Revert "[wasm] Add support to allocate in a specific code region"
This reverts commit a3218aa744
.
Reason for revert: Needed for another revert (https://chromium-review.googlesource.com/c/v8/v8/+/1735320)
Original change's description:
> [wasm] Add support to allocate in a specific code region
>
> For multiple code spaces with multiple jump tables, we need to allocate
> the space for the jump tables within specific regions (the new code
> spaces) so they are within reachable distance.
> This CL adds support for that, and uses it for the first jump table.
>
> R=mstarzinger@chromium.org
>
> Bug: v8:9477
> Change-Id: Ibdf05c9500c2dfdb2c5f5f920b4422339aaab810
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1733069
> Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
> Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#63074}
TBR=mstarzinger@chromium.org,clemensh@chromium.org
Change-Id: I4b1d1e670d5b25fa40205754d9572b2b809c93ea
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: v8:9477
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1735321
Reviewed-by: Leszek Swirski <leszeks@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63075}
This commit is contained in:
parent
a3218aa744
commit
ee0aeefacc
@ -45,13 +45,6 @@ class AddressRegion {
|
||||
return contains(region.address_, region.size_);
|
||||
}
|
||||
|
||||
base::AddressRegion GetOverlap(AddressRegion region) const {
|
||||
Address overlap_start = std::max(begin(), region.begin());
|
||||
Address overlap_end =
|
||||
std::max(overlap_start, std::min(end(), region.end()));
|
||||
return {overlap_start, overlap_end - overlap_start};
|
||||
}
|
||||
|
||||
bool operator==(AddressRegion other) const {
|
||||
return address_ == other.address_ && size_ == other.size_;
|
||||
}
|
||||
|
@ -88,30 +88,13 @@ base::AddressRegion DisjointAllocationPool::Merge(base::AddressRegion region) {
|
||||
}
|
||||
|
||||
base::AddressRegion DisjointAllocationPool::Allocate(size_t size) {
|
||||
return AllocateInRegion(size,
|
||||
{kNullAddress, std::numeric_limits<size_t>::max()});
|
||||
}
|
||||
|
||||
base::AddressRegion DisjointAllocationPool::AllocateInRegion(
|
||||
size_t size, base::AddressRegion region) {
|
||||
for (auto it = regions_.begin(), end = regions_.end(); it != end; ++it) {
|
||||
base::AddressRegion overlap = it->GetOverlap(region);
|
||||
if (size > overlap.size()) continue;
|
||||
base::AddressRegion ret{overlap.begin(), size};
|
||||
if (size > it->size()) continue;
|
||||
base::AddressRegion ret{it->begin(), size};
|
||||
if (size == it->size()) {
|
||||
// We use the full region --> erase the region from {regions_}.
|
||||
regions_.erase(it);
|
||||
} else if (ret.begin() == it->begin()) {
|
||||
// We return a region at the start --> shrink remaining region from front.
|
||||
*it = base::AddressRegion{it->begin() + size, it->size() - size};
|
||||
} else if (ret.end() == it->end()) {
|
||||
// We return a region at the end --> shrink remaining region.
|
||||
*it = base::AddressRegion{it->begin(), it->size() - size};
|
||||
} else {
|
||||
// We return something in the middle --> split the remaining region.
|
||||
regions_.insert(
|
||||
it, base::AddressRegion{it->begin(), ret.begin() - it->begin()});
|
||||
*it = base::AddressRegion{ret.end(), it->end() - ret.end()};
|
||||
*it = base::AddressRegion{it->begin() + size, it->size() - size};
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@ -477,26 +460,16 @@ base::SmallVector<base::AddressRegion, 1> SplitRangeByReservationsIfNeeded(
|
||||
|
||||
Vector<byte> WasmCodeAllocator::AllocateForCode(NativeModule* native_module,
|
||||
size_t size) {
|
||||
return AllocateForCodeInRegion(
|
||||
native_module, size, {kNullAddress, std::numeric_limits<size_t>::max()});
|
||||
}
|
||||
|
||||
Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
|
||||
NativeModule* native_module, size_t size, base::AddressRegion region) {
|
||||
base::MutexGuard lock(&mutex_);
|
||||
DCHECK_EQ(code_manager_, native_module->engine()->code_manager());
|
||||
DCHECK_LT(0, size);
|
||||
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
|
||||
// This happens under a lock assumed by the caller.
|
||||
size = RoundUp<kCodeAlignment>(size);
|
||||
base::AddressRegion code_space =
|
||||
free_code_space_.AllocateInRegion(size, region);
|
||||
base::AddressRegion code_space = free_code_space_.Allocate(size);
|
||||
if (code_space.is_empty()) {
|
||||
const bool in_specific_region =
|
||||
region.size() < std::numeric_limits<size_t>::max();
|
||||
if (!can_request_more_memory_ || in_specific_region) {
|
||||
auto error = in_specific_region ? "wasm code reservation in region"
|
||||
: "wasm code reservation";
|
||||
V8::FatalProcessOutOfMemory(nullptr, error);
|
||||
if (!can_request_more_memory_) {
|
||||
V8::FatalProcessOutOfMemory(nullptr, "wasm code reservation");
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
@ -692,9 +665,8 @@ void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
|
||||
|
||||
CHECK_EQ(1, code_space_data_.size());
|
||||
// Re-allocate jump table.
|
||||
code_space_data_[0].jump_table = CreateEmptyJumpTableInRegion(
|
||||
JumpTableAssembler::SizeForNumberOfSlots(max_functions),
|
||||
code_space_data_[0].region);
|
||||
code_space_data_[0].jump_table = CreateEmptyJumpTable(
|
||||
JumpTableAssembler::SizeForNumberOfSlots(max_functions));
|
||||
main_jump_table_ = code_space_data_[0].jump_table;
|
||||
}
|
||||
|
||||
@ -728,10 +700,8 @@ void NativeModule::UseLazyStub(uint32_t func_index) {
|
||||
if (!lazy_compile_table_) {
|
||||
uint32_t num_slots = module_->num_declared_functions;
|
||||
WasmCodeRefScope code_ref_scope;
|
||||
DCHECK_EQ(1, code_space_data_.size());
|
||||
lazy_compile_table_ = CreateEmptyJumpTableInRegion(
|
||||
JumpTableAssembler::SizeForNumberOfLazyFunctions(num_slots),
|
||||
code_space_data_[0].region);
|
||||
lazy_compile_table_ = CreateEmptyJumpTable(
|
||||
JumpTableAssembler::SizeForNumberOfLazyFunctions(num_slots));
|
||||
JumpTableAssembler::GenerateLazyCompileTable(
|
||||
lazy_compile_table_->instruction_start(), num_slots,
|
||||
module_->num_imported_functions,
|
||||
@ -755,10 +725,9 @@ void NativeModule::SetRuntimeStubs(Isolate* isolate) {
|
||||
DCHECK_EQ(kNullAddress, runtime_stub_entries_[0]); // Only called once.
|
||||
#ifdef V8_EMBEDDED_BUILTINS
|
||||
WasmCodeRefScope code_ref_scope;
|
||||
DCHECK_EQ(1, code_space_data_.size());
|
||||
WasmCode* jump_table = CreateEmptyJumpTableInRegion(
|
||||
JumpTableAssembler::SizeForNumberOfStubSlots(WasmCode::kRuntimeStubCount),
|
||||
code_space_data_[0].region);
|
||||
WasmCode* jump_table =
|
||||
CreateEmptyJumpTable(JumpTableAssembler::SizeForNumberOfStubSlots(
|
||||
WasmCode::kRuntimeStubCount));
|
||||
Address base = jump_table->instruction_start();
|
||||
EmbeddedData embedded_data = EmbeddedData::FromBlob();
|
||||
#define RUNTIME_STUB(Name) Builtins::k##Name,
|
||||
@ -1096,13 +1065,11 @@ bool NativeModule::HasCode(uint32_t index) const {
|
||||
return code_table_[index - module_->num_imported_functions] != nullptr;
|
||||
}
|
||||
|
||||
WasmCode* NativeModule::CreateEmptyJumpTableInRegion(
|
||||
uint32_t jump_table_size, base::AddressRegion region) {
|
||||
WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t jump_table_size) {
|
||||
// Only call this if we really need a jump table.
|
||||
DCHECK_LT(0, jump_table_size);
|
||||
Vector<uint8_t> code_space =
|
||||
code_allocator_.AllocateForCodeInRegion(this, jump_table_size, region);
|
||||
DCHECK(!code_space.empty());
|
||||
code_allocator_.AllocateForCode(this, jump_table_size);
|
||||
ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size());
|
||||
std::unique_ptr<WasmCode> code{new WasmCode{
|
||||
this, // native_module
|
||||
@ -1155,8 +1122,8 @@ void NativeModule::AddCodeSpace(base::AddressRegion region) {
|
||||
has_functions && is_first_code_space && !implicit_alloc_disabled;
|
||||
|
||||
if (needs_jump_table) {
|
||||
jump_table = CreateEmptyJumpTableInRegion(
|
||||
JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions), region);
|
||||
jump_table = CreateEmptyJumpTable(
|
||||
JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions));
|
||||
CHECK(region.contains(jump_table->instruction_start()));
|
||||
}
|
||||
|
||||
|
@ -60,10 +60,6 @@ class V8_EXPORT_PRIVATE DisjointAllocationPool final {
|
||||
// failure.
|
||||
base::AddressRegion Allocate(size_t size);
|
||||
|
||||
// Allocate a contiguous region of size {size} within {region}. Return an
|
||||
// empty pool on failure.
|
||||
base::AddressRegion AllocateInRegion(size_t size, base::AddressRegion);
|
||||
|
||||
bool IsEmpty() const { return regions_.empty(); }
|
||||
const std::list<base::AddressRegion>& regions() const { return regions_; }
|
||||
|
||||
@ -298,11 +294,6 @@ class WasmCodeAllocator {
|
||||
// Allocate code space. Returns a valid buffer or fails with OOM (crash).
|
||||
Vector<byte> AllocateForCode(NativeModule*, size_t size);
|
||||
|
||||
// Allocate code space within a specific region. Returns a valid buffer or
|
||||
// fails with OOM (crash).
|
||||
Vector<byte> AllocateForCodeInRegion(NativeModule*, size_t size,
|
||||
base::AddressRegion);
|
||||
|
||||
// Sets permissions of all owned code space to executable, or read-write (if
|
||||
// {executable} is false). Returns true on success.
|
||||
V8_EXPORT_PRIVATE bool SetExecutable(bool executable);
|
||||
@ -526,8 +517,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
|
||||
WasmCode* AddAndPublishAnonymousCode(Handle<Code>, WasmCode::Kind kind,
|
||||
const char* name = nullptr);
|
||||
|
||||
WasmCode* CreateEmptyJumpTableInRegion(uint32_t jump_table_size,
|
||||
base::AddressRegion);
|
||||
WasmCode* CreateEmptyJumpTable(uint32_t jump_table_size);
|
||||
|
||||
// Called by the {WasmCodeAllocator} to register a new code space.
|
||||
void AddCodeSpace(base::AddressRegion);
|
||||
|
Loading…
Reference in New Issue
Block a user