[wasm] Refactor wasm-serialization to fix alignment

- Alignment of code sections is only relative to start of header. Code
  should be aligned in the buffer.
- Rewrites Reader/Writer classes to make global alignment easier.
- Rewrites the native serialization to simplify things in preparation
  for switch to streaming serialization APIs.

Bug: chromium:719007
Change-Id: I0397bc84a8d009b4fd7f5286bb5abc527bb0db95
Reviewed-on: https://chromium-review.googlesource.com/1027433
Commit-Queue: Bill Budge <bbudge@chromium.org>
Reviewed-by: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#52860}
This commit is contained in:
Bill Budge 2018-04-27 09:51:54 -07:00 committed by Commit Bot
parent 441e6d4a3c
commit 28e015dbaf

View File

@ -28,102 +28,92 @@ namespace wasm {
namespace {
// TODO(bbudge) Try to unify the various implementations of readers and writers
// in WASM, e.g. StreamProcessor and ZoneBuffer, with these.
class Writer {
public:
explicit Writer(Vector<byte> buffer)
: buffer_(buffer), start_(buffer_.start()) {}
: start_(buffer.start()), end_(buffer.end()), pos_(buffer.start()) {}
size_t bytes_written() const { return pos_ - start_; }
byte* current_location() const { return pos_; }
size_t current_size() const { return end_ - pos_; }
Vector<byte> current_buffer() const {
return {current_location(), current_size()};
}
template <typename T>
void Write(const T& value) {
DCHECK_GE(current_size(), sizeof(T));
WriteUnalignedValue(reinterpret_cast<Address>(current_location()), value);
pos_ += sizeof(T);
if (FLAG_wasm_trace_serialization) {
OFStream os(stdout);
os << "wrote: " << (size_t)value << " sized: " << sizeof(T) << std::endl;
}
DCHECK_GE(buffer_.size(), sizeof(T));
WriteUnalignedValue(reinterpret_cast<Address>(buffer_.start()), value);
buffer_ = buffer_ + sizeof(T);
}
void WriteVector(const Vector<const byte> data) {
DCHECK_GE(buffer_.size(), data.size());
if (data.size() > 0) {
memcpy(buffer_.start(), data.start(), data.size());
buffer_ = buffer_ + data.size();
void WriteVector(const Vector<const byte> v) {
DCHECK_GE(current_size(), v.size());
if (v.size() > 0) {
memcpy(current_location(), v.start(), v.size());
pos_ += v.size();
}
if (FLAG_wasm_trace_serialization) {
OFStream os(stdout);
os << "wrote vector of " << data.size() << " elements" << std::endl;
}
}
Vector<byte> current_buffer() const { return buffer_; }
void Align(size_t alignment) {
size_t num_written_bytes = static_cast<size_t>(buffer_.start() - start_);
if (num_written_bytes % alignment) {
size_t padding = alignment - num_written_bytes % alignment;
buffer_ = buffer_ + padding;
if (FLAG_wasm_trace_serialization) {
OFStream os(stdout);
os << "wrote padding, sized: " << padding << std::endl;
}
os << "wrote vector of " << v.size() << " elements" << std::endl;
}
}
private:
Vector<byte> buffer_;
byte* const start_;
byte* const end_;
byte* pos_;
};
class Reader {
public:
explicit Reader(Vector<const byte> buffer)
: buffer_(buffer), start_(buffer_.start()) {}
: start_(buffer.start()), end_(buffer.end()), pos_(buffer.start()) {}
size_t bytes_read() const { return pos_ - start_; }
const byte* current_location() const { return pos_; }
size_t current_size() const { return end_ - pos_; }
Vector<const byte> current_buffer() const {
return {current_location(), current_size()};
}
template <typename T>
T Read() {
DCHECK_GE(buffer_.size(), sizeof(T));
T ret = ReadUnalignedValue<T>(reinterpret_cast<Address>(buffer_.start()));
buffer_ = buffer_ + sizeof(T);
DCHECK_GE(current_size(), sizeof(T));
T value =
ReadUnalignedValue<T>(reinterpret_cast<Address>(current_location()));
pos_ += sizeof(T);
if (FLAG_wasm_trace_serialization) {
OFStream os(stdout);
os << "read: " << (size_t)ret << " sized: " << sizeof(T) << std::endl;
os << "read: " << (size_t)value << " sized: " << sizeof(T) << std::endl;
}
return ret;
return value;
}
Vector<const byte> GetSubvector(size_t size) {
Vector<const byte> ret = {buffer_.start(), size};
buffer_ = buffer_ + size;
return ret;
}
void ReadIntoVector(const Vector<byte> data) {
if (data.size() > 0) {
DCHECK_GE(buffer_.size(), data.size());
memcpy(data.start(), buffer_.start(), data.size());
buffer_ = buffer_ + data.size();
void ReadVector(Vector<byte> v) {
if (v.size() > 0) {
DCHECK_GE(current_size(), v.size());
memcpy(v.start(), current_location(), v.size());
pos_ += v.size();
}
if (FLAG_wasm_trace_serialization) {
OFStream os(stdout);
os << "read vector of " << data.size() << " elements" << std::endl;
os << "read vector of " << v.size() << " elements" << std::endl;
}
}
Vector<const byte> current_buffer() const { return buffer_; }
void Align(size_t alignment) {
size_t num_read_bytes = static_cast<size_t>(buffer_.start() - start_);
if (num_read_bytes % alignment) {
size_t padding = alignment - num_read_bytes % alignment;
buffer_ = buffer_ + padding;
if (FLAG_wasm_trace_serialization) {
OFStream os(stdout);
os << "read padding, sized: " << padding << std::endl;
}
}
}
void Skip(size_t size) { pos_ += size; }
private:
Vector<const byte> buffer_;
const byte* const start_;
const byte* const end_;
const byte* pos_;
};
constexpr size_t kVersionSize = 4 * sizeof(uint32_t);
@ -131,29 +121,26 @@ constexpr size_t kVersionSize = 4 * sizeof(uint32_t);
// Start from 1 so an encoded stub id is not confused with an encoded builtin.
constexpr int kFirstStubId = 1;
void WriteVersion(Isolate* isolate, Vector<byte> buffer) {
DCHECK_GE(buffer.size(), kVersionSize);
Writer writer(buffer);
writer.Write(SerializedData::ComputeMagicNumber(
void WriteVersion(Isolate* isolate, Writer* writer) {
writer->Write(SerializedData::ComputeMagicNumber(
isolate->heap()->external_reference_table()));
writer.Write(Version::Hash());
writer.Write(static_cast<uint32_t>(CpuFeatures::SupportedFeatures()));
writer.Write(FlagList::Hash());
writer->Write(Version::Hash());
writer->Write(static_cast<uint32_t>(CpuFeatures::SupportedFeatures()));
writer->Write(FlagList::Hash());
}
bool IsSupportedVersion(Isolate* isolate, const Vector<const byte> buffer) {
if (buffer.size() < kVersionSize) return false;
byte version[kVersionSize];
WriteVersion(isolate, {version, kVersionSize});
if (memcmp(buffer.start(), version, kVersionSize) == 0) return true;
return false;
bool IsSupportedVersion(Isolate* isolate, const Vector<const byte> version) {
if (version.size() < kVersionSize) return false;
byte current_version[kVersionSize];
Writer writer({current_version, kVersionSize});
WriteVersion(isolate, &writer);
return memcmp(version.start(), current_version, kVersionSize) == 0;
}
// On Intel, call sites are encoded as a displacement. For linking
// and for serialization/deserialization, we want to store/retrieve
// a tag (the function index). On Intel, that means accessing the
// raw displacement. Everywhere else, that simply means accessing
// the target address.
// On Intel, call sites are encoded as a displacement. For linking and for
// serialization/deserialization, we want to store/retrieve a tag (the function
// index). On Intel, that means accessing the raw displacement. Everywhere else,
// that simply means accessing the target address.
void SetWasmCalleeTag(RelocInfo* rinfo, uint32_t tag) {
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
*(reinterpret_cast<uint32_t*>(rinfo->target_address_address())) = tag;
@ -178,110 +165,12 @@ uint32_t GetWasmCalleeTag(RelocInfo* rinfo) {
#endif
}
} // namespace
constexpr size_t kHeaderSize =
sizeof(uint32_t) + // total wasm function count
sizeof(uint32_t); // imported functions - i.e. index of first wasm function
enum SerializationSection { Init, Metadata, Stubs, CodeSection, Done };
class V8_EXPORT_PRIVATE NativeModuleSerializer {
public:
explicit NativeModuleSerializer(Isolate*, const NativeModule*);
size_t Measure() const;
size_t Write(Vector<byte>);
bool IsDone() const { return state_ == Done; }
private:
size_t MeasureHeader() const;
static size_t GetCodeHeaderSize();
size_t MeasureCode(const WasmCode*) const;
size_t MeasureCopiedStubs() const;
void BufferHeader();
// we buffer all the stubs because they are small
void BufferCopiedStubs();
void BufferCodeInAllocatedScratch(const WasmCode*);
void BufferCurrentWasmCode();
size_t DrainBuffer(Vector<byte> dest);
uint32_t EncodeBuiltinOrStub(Address);
Isolate* const isolate_ = nullptr;
const NativeModule* const native_module_ = nullptr;
SerializationSection state_ = Init;
uint32_t index_ = 0;
std::vector<byte> scratch_;
Vector<byte> remaining_;
// wasm and copied stubs reverse lookup
std::map<Address, uint32_t> wasm_targets_lookup_;
// immovable builtins and runtime entries lookup
std::map<Address, uint32_t> reference_table_lookup_;
std::map<Address, uint32_t> stub_lookup_;
std::map<Address, uint32_t> builtin_lookup_;
};
class V8_EXPORT_PRIVATE NativeModuleDeserializer {
public:
explicit NativeModuleDeserializer(Isolate*, NativeModule*);
// Currently, we don't support streamed reading, yet albeit the
// API suggests that.
bool Read(Vector<const byte>);
private:
void ExpectHeader();
void Expect(size_t size);
bool ReadHeader();
bool ReadCode();
bool ReadStubs();
Address GetTrampolineOrStubFromTag(uint32_t);
Isolate* const isolate_ = nullptr;
NativeModule* const native_module_ = nullptr;
std::vector<byte> scratch_;
std::vector<Address> stubs_;
Vector<const byte> unread_;
size_t current_expectation_ = 0;
uint32_t index_ = 0;
};
NativeModuleSerializer::NativeModuleSerializer(Isolate* isolate,
const NativeModule* module)
: isolate_(isolate), native_module_(module) {
DCHECK_NOT_NULL(isolate_);
DCHECK_NOT_NULL(native_module_);
// TODO(mtrofin): persist the export wrappers. Ideally, we'd only persist
// the unique ones, i.e. the cache.
ExternalReferenceTable* table = isolate_->heap()->external_reference_table();
for (uint32_t i = 0; i < table->size(); ++i) {
Address addr = table->address(i);
reference_table_lookup_.insert(std::make_pair(addr, i));
}
// defer populating the stub_lookup_ to when we buffer the stubs
for (auto pair : native_module_->trampolines_) {
v8::internal::Code* code = Code::GetCodeFromTargetAddress(pair.first);
int builtin_index = code->builtin_index();
if (builtin_index >= 0) {
uint32_t tag = static_cast<uint32_t>(builtin_index);
builtin_lookup_.insert(std::make_pair(pair.second, tag));
}
}
BufferHeader();
state_ = Metadata;
}
size_t NativeModuleSerializer::MeasureHeader() const {
return sizeof(uint32_t) + // total wasm fct count
sizeof(uint32_t); // imported fcts - i.e. index of first wasm function
}
void NativeModuleSerializer::BufferHeader() {
size_t metadata_size = MeasureHeader();
scratch_.resize(metadata_size);
remaining_ = {scratch_.data(), metadata_size};
Writer writer(remaining_);
writer.Write(native_module_->FunctionCount());
writer.Write(native_module_->num_imported_functions());
}
size_t NativeModuleSerializer::GetCodeHeaderSize() {
size_t size = sizeof(size_t) + // size of this section
constexpr size_t kCodeHeaderSize =
sizeof(size_t) + // size of code section
sizeof(size_t) + // offset of constant pool
sizeof(size_t) + // offset of safepoint table
sizeof(size_t) + // offset of handler table
@ -291,56 +180,104 @@ size_t NativeModuleSerializer::GetCodeHeaderSize() {
sizeof(size_t) + // source positions size
sizeof(size_t) + // protected instructions size
sizeof(WasmCode::Tier); // tier
return RoundUp(size, sizeof(size_t));
} // namespace
class V8_EXPORT_PRIVATE NativeModuleSerializer {
public:
NativeModuleSerializer() = delete;
NativeModuleSerializer(Isolate*, const NativeModule*);
size_t Measure() const;
bool Write(Writer* writer);
private:
size_t MeasureCopiedStubs() const;
size_t MeasureCode(const WasmCode*) const;
void WriteHeader(Writer* writer);
void WriteCopiedStubs(Writer* writer);
void WriteCode(const WasmCode*, Writer* writer);
uint32_t EncodeBuiltinOrStub(Address);
Isolate* const isolate_;
const NativeModule* const native_module_;
bool write_called_;
// wasm and copied stubs reverse lookup
std::map<Address, uint32_t> wasm_targets_lookup_;
// immovable builtins and runtime entries lookup
std::map<Address, uint32_t> reference_table_lookup_;
std::map<Address, uint32_t> stub_lookup_;
std::map<Address, uint32_t> builtin_lookup_;
DISALLOW_COPY_AND_ASSIGN(NativeModuleSerializer);
};
NativeModuleSerializer::NativeModuleSerializer(Isolate* isolate,
const NativeModule* module)
: isolate_(isolate), native_module_(module), write_called_(false) {
DCHECK_NOT_NULL(isolate_);
DCHECK_NOT_NULL(native_module_);
// TODO(mtrofin): persist the export wrappers. Ideally, we'd only persist
// the unique ones, i.e. the cache.
ExternalReferenceTable* table = isolate_->heap()->external_reference_table();
for (uint32_t i = 0; i < table->size(); ++i) {
Address addr = table->address(i);
reference_table_lookup_.insert(std::make_pair(addr, i));
}
// Defer populating stub_lookup_ to when we write the stubs.
for (auto pair : native_module_->trampolines_) {
v8::internal::Code* code = Code::GetCodeFromTargetAddress(pair.first);
int builtin_index = code->builtin_index();
if (builtin_index >= 0) {
uint32_t tag = static_cast<uint32_t>(builtin_index);
builtin_lookup_.insert(std::make_pair(pair.second, tag));
}
}
}
size_t NativeModuleSerializer::MeasureCopiedStubs() const {
size_t size = sizeof(uint32_t); // number of stubs
for (auto pair : native_module_->trampolines_) {
v8::internal::Code* code = Code::GetCodeFromTargetAddress(pair.first);
int builtin_index = code->builtin_index();
if (builtin_index < 0) size += sizeof(uint32_t); // stub key
}
return size;
}
size_t NativeModuleSerializer::MeasureCode(const WasmCode* code) const {
return GetCodeHeaderSize() + code->instructions().size() + // code
code->reloc_info().size() + // reloc info
code->source_positions().size() + // source pos.
code->protected_instructions().size() * // protected inst.
return code->instructions().size() + code->reloc_info().size() +
code->source_positions().size() +
code->protected_instructions().size() *
sizeof(trap_handler::ProtectedInstructionData);
}
size_t NativeModuleSerializer::Measure() const {
size_t ret = MeasureHeader() + MeasureCopiedStubs();
for (uint32_t i = native_module_->num_imported_functions(),
e = native_module_->FunctionCount();
i < e; ++i) {
ret += MeasureCode(native_module_->GetCode(i));
size_t size = kHeaderSize + MeasureCopiedStubs();
uint32_t first_wasm_fn = native_module_->num_imported_functions();
uint32_t total_fns = native_module_->FunctionCount();
for (uint32_t i = first_wasm_fn; i < total_fns; ++i) {
size += kCodeHeaderSize;
size += MeasureCode(native_module_->GetCode(i));
}
return ret;
return size;
}
size_t NativeModuleSerializer::DrainBuffer(Vector<byte> dest) {
size_t to_write = std::min(dest.size(), remaining_.size());
memcpy(dest.start(), remaining_.start(), to_write);
DCHECK_GE(remaining_.size(), to_write);
remaining_ = remaining_ + to_write;
return to_write;
void NativeModuleSerializer::WriteHeader(Writer* writer) {
writer->Write(native_module_->FunctionCount());
writer->Write(native_module_->num_imported_functions());
}
size_t NativeModuleSerializer::MeasureCopiedStubs() const {
size_t ret = sizeof(uint32_t); // number of stubs
for (auto pair : native_module_->trampolines_) {
v8::internal::Code* code = Code::GetCodeFromTargetAddress(pair.first);
int builtin_index = code->builtin_index();
if (builtin_index < 0) ret += sizeof(uint32_t);
}
return ret;
}
void NativeModuleSerializer::BufferCopiedStubs() {
// We buffer all the stubs together, because they are very likely
// few and small. Each stub is buffered like a WasmCode would,
// and in addition prefaced by its stub key. The whole section is prefaced
// by the number of stubs.
size_t buff_size = MeasureCopiedStubs();
scratch_.resize(buff_size);
remaining_ = {scratch_.data(), buff_size};
Writer writer(remaining_);
writer.Write(
static_cast<uint32_t>((buff_size - sizeof(uint32_t)) / sizeof(uint32_t)));
void NativeModuleSerializer::WriteCopiedStubs(Writer* writer) {
// Write the number of stubs and their keys.
// TODO(all) Serialize the stubs as WasmCode.
size_t stubs_size = MeasureCopiedStubs();
// Get the stub count from the number of keys.
size_t num_stubs = (stubs_size - sizeof(uint32_t)) / sizeof(uint32_t);
writer->Write(static_cast<uint32_t>(num_stubs));
uint32_t stub_id = kFirstStubId;
for (auto pair : native_module_->trampolines_) {
@ -348,48 +285,35 @@ void NativeModuleSerializer::BufferCopiedStubs() {
int builtin_index = code->builtin_index();
if (builtin_index < 0) {
stub_lookup_.insert(std::make_pair(pair.second, stub_id));
writer.Write(code->stub_key());
writer->Write(code->stub_key());
++stub_id;
}
}
}
void NativeModuleSerializer::BufferCurrentWasmCode() {
const WasmCode* code = native_module_->GetCode(index_);
size_t size = MeasureCode(code);
scratch_.resize(size);
remaining_ = {scratch_.data(), size};
BufferCodeInAllocatedScratch(code);
}
void NativeModuleSerializer::BufferCodeInAllocatedScratch(
const WasmCode* code) {
// We write the address, the size, and then copy the code as-is, followed
// by reloc info, followed by source positions.
Writer writer(remaining_);
// write the header
writer.Write(MeasureCode(code));
writer.Write(code->constant_pool_offset());
writer.Write(code->safepoint_table_offset());
writer.Write(code->handler_table_offset());
writer.Write(code->stack_slots());
writer.Write(code->instructions().size());
writer.Write(code->reloc_info().size());
writer.Write(code->source_positions().size());
writer.Write(code->protected_instructions().size());
writer.Write(code->tier());
writer.Align(kPointerSize);
// next is the code, which we have to reloc.
byte* serialized_code_start = writer.current_buffer().start();
// write the code and everything else
writer.WriteVector(code->instructions());
writer.WriteVector(code->reloc_info());
writer.WriteVector(code->source_positions());
writer.WriteVector(
void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
// Write the size of the entire code section, followed by the code header.
writer->Write(MeasureCode(code));
writer->Write(code->constant_pool_offset());
writer->Write(code->safepoint_table_offset());
writer->Write(code->handler_table_offset());
writer->Write(code->stack_slots());
writer->Write(code->instructions().size());
writer->Write(code->reloc_info().size());
writer->Write(code->source_positions().size());
writer->Write(code->protected_instructions().size());
writer->Write(code->tier());
// Get a pointer to the code buffer, which we have to relocate.
byte* serialized_code_start = writer->current_buffer().start();
// Now write the code, reloc info, source positions, and protected code.
writer->WriteVector(code->instructions());
writer->WriteVector(code->reloc_info());
writer->WriteVector(code->source_positions());
writer->WriteVector(
{reinterpret_cast<const byte*>(code->protected_instructions().data()),
sizeof(trap_handler::ProtectedInstructionData) *
code->protected_instructions().size()});
// now relocate the code
// Relocate the code.
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
@ -451,49 +375,20 @@ uint32_t NativeModuleSerializer::EncodeBuiltinOrStub(Address address) {
return tag;
}
size_t NativeModuleSerializer::Write(Vector<byte> dest) {
Vector<byte> original = dest;
while (dest.size() > 0) {
switch (state_) {
case Metadata: {
dest = dest + DrainBuffer(dest);
if (remaining_.size() == 0) {
BufferCopiedStubs();
state_ = Stubs;
bool NativeModuleSerializer::Write(Writer* writer) {
DCHECK(!write_called_);
write_called_ = true;
WriteHeader(writer);
WriteCopiedStubs(writer);
uint32_t total_fns = native_module_->FunctionCount();
uint32_t first_wasm_fn = native_module_->num_imported_functions();
for (uint32_t i = first_wasm_fn; i < total_fns; ++i) {
const WasmCode* code = native_module_->GetCode(i);
WriteCode(code, writer);
}
break;
}
case Stubs: {
dest = dest + DrainBuffer(dest);
if (remaining_.size() == 0) {
index_ = native_module_->num_imported_functions();
if (index_ < native_module_->FunctionCount()) {
BufferCurrentWasmCode();
state_ = CodeSection;
} else {
state_ = Done;
}
}
break;
}
case CodeSection: {
dest = dest + DrainBuffer(dest);
if (remaining_.size() == 0) {
++index_; // Move to next code object.
if (index_ < native_module_->FunctionCount()) {
BufferCurrentWasmCode();
} else {
state_ = Done;
}
}
break;
}
default:
UNREACHABLE();
}
}
DCHECK_GE(original.size(), dest.size());
return original.size() - dest.size();
return true;
}
// static
@ -501,105 +396,113 @@ std::pair<std::unique_ptr<const byte[]>, size_t> SerializeNativeModule(
Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
NativeModule* native_module = compiled_module->GetNativeModule();
NativeModuleSerializer serializer(isolate, native_module);
size_t version_size = kVersionSize;
size_t buff_size = serializer.Measure() + version_size;
std::unique_ptr<byte[]> ret(new byte[buff_size]);
WriteVersion(isolate, {ret.get(), buff_size});
size_t buffer_size = kVersionSize + serializer.Measure();
std::unique_ptr<byte[]> buffer(new byte[buffer_size]);
size_t written =
serializer.Write({ret.get() + version_size, buff_size - version_size});
if (written != buff_size - version_size) return {};
Writer writer({buffer.get(), buffer_size});
WriteVersion(isolate, &writer);
return {std::move(ret), buff_size};
if (!serializer.Write(&writer)) return {};
if (writer.bytes_written() != buffer_size) return {};
return {std::move(buffer), buffer_size};
}
class V8_EXPORT_PRIVATE NativeModuleDeserializer {
public:
NativeModuleDeserializer() = delete;
NativeModuleDeserializer(Isolate*, NativeModule*);
bool Read(Reader* reader);
private:
bool ReadHeader(Reader* reader);
bool ReadCode(uint32_t fn_index, Reader* reader);
bool ReadStubs(Reader* reader);
Address GetTrampolineOrStubFromTag(uint32_t);
Isolate* const isolate_;
NativeModule* const native_module_;
std::vector<Address> stubs_;
bool read_called_;
DISALLOW_COPY_AND_ASSIGN(NativeModuleDeserializer);
};
NativeModuleDeserializer::NativeModuleDeserializer(Isolate* isolate,
NativeModule* native_module)
: isolate_(isolate), native_module_(native_module) {}
: isolate_(isolate), native_module_(native_module), read_called_(false) {}
void NativeModuleDeserializer::Expect(size_t size) {
scratch_.resize(size);
current_expectation_ = size;
unread_ = {scratch_.data(), size};
}
bool NativeModuleDeserializer::Read(Reader* reader) {
DCHECK(!read_called_);
read_called_ = true;
bool NativeModuleDeserializer::Read(Vector<const byte> data) {
unread_ = data;
if (!ReadHeader()) return false;
if (!ReadStubs()) return false;
index_ = native_module_->num_imported_functions();
for (; index_ < native_module_->FunctionCount(); ++index_) {
if (!ReadCode()) return false;
if (!ReadHeader(reader)) return false;
if (!ReadStubs(reader)) return false;
uint32_t total_fns = native_module_->FunctionCount();
uint32_t first_wasm_fn = native_module_->num_imported_functions();
for (uint32_t i = first_wasm_fn; i < total_fns; ++i) {
if (!ReadCode(i, reader)) return false;
}
return data.size() - unread_.size();
return reader->current_size() == 0;
}
bool NativeModuleDeserializer::ReadHeader() {
size_t start_size = unread_.size();
Reader reader(unread_);
size_t functions = reader.Read<uint32_t>();
size_t imports = reader.Read<uint32_t>();
bool ok = functions == native_module_->FunctionCount() &&
bool NativeModuleDeserializer::ReadHeader(Reader* reader) {
size_t functions = reader->Read<uint32_t>();
size_t imports = reader->Read<uint32_t>();
return functions == native_module_->FunctionCount() &&
imports == native_module_->num_imported_functions();
if (!ok) return false;
unread_ = unread_ + (start_size - reader.current_buffer().size());
return true;
}
bool NativeModuleDeserializer::ReadStubs() {
size_t start_size = unread_.size();
Reader reader(unread_);
size_t nr_stubs = reader.Read<uint32_t>();
stubs_.reserve(nr_stubs);
for (size_t i = 0; i < nr_stubs; ++i) {
uint32_t key = reader.Read<uint32_t>();
bool NativeModuleDeserializer::ReadStubs(Reader* reader) {
size_t num_stubs = reader->Read<uint32_t>();
stubs_.reserve(num_stubs);
for (size_t i = 0; i < num_stubs; ++i) {
uint32_t key = reader->Read<uint32_t>();
v8::internal::Code* stub =
*(v8::internal::CodeStub::GetCode(isolate_, key).ToHandleChecked());
stubs_.push_back(native_module_->GetLocalAddressFor(handle(stub)));
}
unread_ = unread_ + (start_size - reader.current_buffer().size());
return true;
}
bool NativeModuleDeserializer::ReadCode() {
size_t start_size = unread_.size();
Reader reader(unread_);
size_t code_section_size = reader.Read<size_t>();
bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
size_t code_section_size = reader->Read<size_t>();
USE(code_section_size);
size_t constant_pool_offset = reader.Read<size_t>();
size_t safepoint_table_offset = reader.Read<size_t>();
size_t handler_table_offset = reader.Read<size_t>();
uint32_t stack_slot_count = reader.Read<uint32_t>();
size_t code_size = reader.Read<size_t>();
size_t reloc_size = reader.Read<size_t>();
size_t source_position_size = reader.Read<size_t>();
size_t protected_instructions_size = reader.Read<size_t>();
WasmCode::Tier tier = reader.Read<WasmCode::Tier>();
reader.Align(kPointerSize);
size_t constant_pool_offset = reader->Read<size_t>();
size_t safepoint_table_offset = reader->Read<size_t>();
size_t handler_table_offset = reader->Read<size_t>();
uint32_t stack_slot_count = reader->Read<uint32_t>();
size_t code_size = reader->Read<size_t>();
size_t reloc_size = reader->Read<size_t>();
size_t source_position_size = reader->Read<size_t>();
size_t protected_instructions_size = reader->Read<size_t>();
WasmCode::Tier tier = reader->Read<WasmCode::Tier>();
std::shared_ptr<ProtectedInstructions> protected_instructions(
new ProtectedInstructions(protected_instructions_size));
DCHECK_EQ(protected_instructions_size, protected_instructions->size());
Vector<const byte> code_buffer = reader.GetSubvector(code_size);
Vector<const byte> code_buffer = {reader->current_location(), code_size};
reader->Skip(code_size);
std::unique_ptr<byte[]> reloc_info;
if (reloc_size > 0) {
reloc_info.reset(new byte[reloc_size]);
reader.ReadIntoVector({reloc_info.get(), reloc_size});
reader->ReadVector({reloc_info.get(), reloc_size});
}
std::unique_ptr<byte[]> source_pos;
if (source_position_size > 0) {
source_pos.reset(new byte[source_position_size]);
reader.ReadIntoVector({source_pos.get(), source_position_size});
reader->ReadVector({source_pos.get(), source_position_size});
}
WasmCode* ret = native_module_->AddOwnedCode(
code_buffer, std::move(reloc_info), reloc_size, std::move(source_pos),
source_position_size, Just(index_), WasmCode::kFunction,
source_position_size, Just(fn_index), WasmCode::kFunction,
constant_pool_offset, stack_slot_count, safepoint_table_offset,
handler_table_offset, protected_instructions, tier,
WasmCode::kNoFlushICache);
native_module_->code_table_[index_] = ret;
native_module_->code_table_[fn_index] = ret;
// now relocate the code
int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
@ -649,12 +552,12 @@ bool NativeModuleDeserializer::ReadCode() {
ret->instructions().size());
if (protected_instructions_size > 0) {
reader.ReadIntoVector(
{reinterpret_cast<byte*>(protected_instructions->data()),
sizeof(trap_handler::ProtectedInstructionData) *
protected_instructions->size()});
size_t size = sizeof(trap_handler::ProtectedInstructionData) *
protected_instructions->size();
Vector<byte> data(reinterpret_cast<byte*>(protected_instructions->data()),
size);
reader->ReadVector(data);
}
unread_ = unread_ + (start_size - reader.current_buffer().size());
return true;
}
@ -677,7 +580,6 @@ MaybeHandle<WasmCompiledModule> DeserializeNativeModule(
if (!IsSupportedVersion(isolate, data)) {
return {};
}
data = data + kVersionSize;
ModuleResult decode_result =
SyncDecodeWasmModule(isolate, wire_bytes.start(), wire_bytes.end(), false,
i::wasm::kWasmOrigin);
@ -716,11 +618,13 @@ MaybeHandle<WasmCompiledModule> DeserializeNativeModule(
script->set_wasm_compiled_module(*compiled_module);
NativeModuleDeserializer deserializer(isolate,
compiled_module->GetNativeModule());
if (!deserializer.Read(data)) return {};
Reader reader(data + kVersionSize);
if (!deserializer.Read(&reader)) return {};
// TODO(6792): Wrappers below might be cloned using {Factory::CopyCode}. This
// requires unlocking the code space here. This should be moved into the
// allocator eventually.
// requires unlocking the code space here. This should eventually be moved
// into the allocator.
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
CompileJsToWasmWrappers(isolate, compiled_module, isolate->counters());
WasmCompiledModule::ReinitializeAfterDeserialization(isolate,