[wasm] Fix TODO and remove bad tests

In the {Fixed} variant, the {WasmCodeManagerTest} always reserves
1GB of memory. This makes the test run OOM on many 32-bit platforms.
Instead of skipping it selectively, this CL just removes the whole
test. It caused a lot of trouble in the past, and needs two test-only
methods in the WasmCodeManager. Also, the {Fixed} variant will not be
needed any more with the wasm far jump table, since modules can always
grow then.

Drive-by: Clean up the unittests status file a bit.

R=mstarzinger@chromium.org

Bug: v8:9477
Change-Id: I5b6f8ed9f800863575c69d49d5df82f21fd23030
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1815251
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Commit-Queue: Clemens Backes [né Hammacher] <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63942}
This commit is contained in:
Clemens Hammacher 2019-09-23 15:12:40 +02:00 committed by Commit Bot
parent f7adf5f569
commit ae764cc31b
4 changed files with 33 additions and 297 deletions

View File

@ -1040,12 +1040,9 @@ WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
// Populate optimized code to the jump table unless there is an active
// redirection to the interpreter that should be preserved.
DCHECK_IMPLIES(
main_jump_table_ == nullptr,
engine_->code_manager()->IsImplicitAllocationsDisabledForTesting());
bool update_jump_table = update_code_table &&
!has_interpreter_redirection(code->index()) &&
main_jump_table_;
DCHECK_NOT_NULL(main_jump_table_);
bool update_jump_table =
update_code_table && !has_interpreter_redirection(code->index());
// Ensure that interpreter entries always populate to the jump table.
if (code->kind_ == WasmCode::Kind::kInterpreterEntry) {
@ -1203,8 +1200,6 @@ void NativeModule::AddCodeSpace(base::AddressRegion region) {
// space. Otherwise, we are wasting too much memory.
DCHECK_GE(region.size(),
2 * OverheadPerCodeSpace(module()->num_declared_functions));
const bool implicit_alloc_disabled =
engine_->code_manager()->IsImplicitAllocationsDisabledForTesting();
#if defined(V8_OS_WIN64)
// On some platforms, specifically Win64, we need to reserve some pages at
@ -1213,8 +1208,7 @@ void NativeModule::AddCodeSpace(base::AddressRegion region) {
// https://cs.chromium.org/chromium/src/components/crash/content/app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
// for details.
if (engine_->code_manager()
->CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
!implicit_alloc_disabled) {
->CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
size_t size = Heap::GetCodeRangeReservedAreaSize();
DCHECK_LT(0, size);
Vector<byte> padding =
@ -1232,10 +1226,7 @@ void NativeModule::AddCodeSpace(base::AddressRegion region) {
// TODO(clemensh): Avoid additional jump table if the code space is close
// enough to another existing code space.
const bool needs_jump_table =
has_functions &&
(kNeedsFarJumpsBetweenCodeSpaces || is_first_code_space) &&
!implicit_alloc_disabled;
const bool needs_far_jump_table = !implicit_alloc_disabled;
has_functions && (kNeedsFarJumpsBetweenCodeSpaces || is_first_code_space);
if (needs_jump_table) {
jump_table = CreateEmptyJumpTableInRegion(
@ -1243,30 +1234,29 @@ void NativeModule::AddCodeSpace(base::AddressRegion region) {
CHECK(region.contains(jump_table->instruction_start()));
}
if (needs_far_jump_table) {
int num_function_slots = NumWasmFunctionsInFarJumpTable(num_wasm_functions);
far_jump_table = CreateEmptyJumpTableInRegion(
JumpTableAssembler::SizeForNumberOfFarJumpSlots(
WasmCode::kRuntimeStubCount, num_function_slots),
region);
CHECK(region.contains(far_jump_table->instruction_start()));
EmbeddedData embedded_data = EmbeddedData::FromBlob();
// Always allocate a far jump table, because it contains the runtime stubs.
int num_function_slots = NumWasmFunctionsInFarJumpTable(num_wasm_functions);
far_jump_table = CreateEmptyJumpTableInRegion(
JumpTableAssembler::SizeForNumberOfFarJumpSlots(
WasmCode::kRuntimeStubCount, num_function_slots),
region);
CHECK(region.contains(far_jump_table->instruction_start()));
EmbeddedData embedded_data = EmbeddedData::FromBlob();
#define RUNTIME_STUB(Name) Builtins::k##Name,
#define RUNTIME_STUB_TRAP(Name) RUNTIME_STUB(ThrowWasm##Name)
Builtins::Name stub_names[WasmCode::kRuntimeStubCount] = {
WASM_RUNTIME_STUB_LIST(RUNTIME_STUB, RUNTIME_STUB_TRAP)};
Builtins::Name stub_names[WasmCode::kRuntimeStubCount] = {
WASM_RUNTIME_STUB_LIST(RUNTIME_STUB, RUNTIME_STUB_TRAP)};
#undef RUNTIME_STUB
#undef RUNTIME_STUB_TRAP
Address builtin_addresses[WasmCode::kRuntimeStubCount];
for (int i = 0; i < WasmCode::kRuntimeStubCount; ++i) {
Builtins::Name builtin = stub_names[i];
CHECK(embedded_data.ContainsBuiltin(builtin));
builtin_addresses[i] = embedded_data.InstructionStartOfBuiltin(builtin);
}
JumpTableAssembler::GenerateFarJumpTable(
far_jump_table->instruction_start(), builtin_addresses,
WasmCode::kRuntimeStubCount, num_function_slots);
Address builtin_addresses[WasmCode::kRuntimeStubCount];
for (int i = 0; i < WasmCode::kRuntimeStubCount; ++i) {
Builtins::Name builtin = stub_names[i];
CHECK(embedded_data.ContainsBuiltin(builtin));
builtin_addresses[i] = embedded_data.InstructionStartOfBuiltin(builtin);
}
JumpTableAssembler::GenerateFarJumpTable(
far_jump_table->instruction_start(), builtin_addresses,
WasmCode::kRuntimeStubCount, num_function_slots);
if (is_first_code_space) main_jump_table_ = jump_table;
@ -1522,13 +1512,6 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
return mem;
}
void WasmCodeManager::SetMaxCommittedMemoryForTesting(size_t limit) {
// This has to be set before committing any memory.
DCHECK_EQ(0, total_committed_code_space_.load());
max_committed_code_space_ = limit;
critical_committed_code_space_.store(limit / 2);
}
// static
size_t WasmCodeManager::EstimateNativeModuleCodeSize(const WasmModule* module) {
constexpr size_t kCodeSizeMultiplier = 4;
@ -1577,13 +1560,10 @@ std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
}
// If we cannot add code space later, reserve enough address space up front.
// TODO(clemensh): Fix WasmCodeManagerTest and use {can_request_more} here.
bool can_add_code_space = !NativeModule::kNeedsFarJumpsBetweenCodeSpaces ||
FLAG_wasm_far_jump_table;
size_t code_vmem_size =
can_add_code_space ? ReservationSize(code_size_estimate,
module->num_declared_functions, 0)
: kMaxWasmCodeMemory;
can_request_more ? ReservationSize(code_size_estimate,
module->num_declared_functions, 0)
: kMaxWasmCodeMemory;
// The '--wasm-max-code-space-reservation' testing flag can be used to reduce
// the maximum size of the initial code space reservation (in MB).
@ -1622,8 +1602,7 @@ std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
size);
#if defined(V8_OS_WIN64)
if (CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
!implicit_allocations_disabled_for_testing_) {
if (CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
win64_unwindinfo::RegisterNonABICompliantCodeRange(
reinterpret_cast<void*>(start), size);
}
@ -1745,8 +1724,7 @@ void WasmCodeManager::FreeNativeModule(Vector<VirtualMemory> owned_code_space,
code_space.address(), code_space.end(), code_space.size());
#if defined(V8_OS_WIN64)
if (CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
!implicit_allocations_disabled_for_testing_) {
if (CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
win64_unwindinfo::UnregisterNonABICompliantCodeRange(
reinterpret_cast<void*>(code_space.address()));
}

View File

@ -663,16 +663,6 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
return total_committed_code_space_.load();
}
void SetMaxCommittedMemoryForTesting(size_t limit);
void DisableImplicitAllocationsForTesting() {
implicit_allocations_disabled_for_testing_ = true;
}
bool IsImplicitAllocationsDisabledForTesting() const {
return implicit_allocations_disabled_for_testing_;
}
static size_t EstimateNativeModuleCodeSize(const WasmModule* module);
static size_t EstimateNativeModuleNonCodeSize(const WasmModule* module);
@ -695,9 +685,7 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
void AssignRange(base::AddressRegion, NativeModule*);
size_t max_committed_code_space_;
bool implicit_allocations_disabled_for_testing_ = false;
const size_t max_committed_code_space_;
std::atomic<size_t> total_committed_code_space_{0};
// If the committed code space exceeds {critical_committed_code_space_}, then

View File

@ -14,19 +14,13 @@
'RandomNumberGenerator.NextSampleInvalidParam': [SKIP],
'RandomNumberGenerator.NextSampleSlowInvalidParam1': [SKIP],
'RandomNumberGenerator.NextSampleSlowInvalidParam2': [SKIP],
}], # 'system == macos and asan'
['(arch == arm or arch == mips) and not simulator_run', {
# Uses too much memory.
'Parameterized/WasmCodeManagerTest.GrowingVsFixedModule/Fixed': [SKIP]
}], # '(arch == arm or arch == mips) and not simulator_run'
}], # system == macos and asan
##############################################################################
['lite_mode or variant == jitless', {
# TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
'ValueSerializerTestWithSharedArrayBufferClone.RoundTripWebAssemblyMemory': [SKIP],
'ValueSerializerTestWithWasm.*': [SKIP],
'Parameterized/WasmCodeManagerTest.*': [SKIP],
}], # lite_mode or variant == jitless
##############################################################################
@ -37,18 +31,18 @@
['system == windows and asan', {
# BUG(893437).
'Torque*': [SKIP],
}], # 'system == windows and asan'
}], # system == windows and asan
['system == windows and arch == x64 and mode == release', {
# BUG(992783).
'Torque.ConditionalFields': [SKIP],
'Torque.UsingUnderscorePrefixedIdentifierError': [SKIP],
}], # 'system == windows and arch == x64 and mode == release'
}], # system == windows and arch == x64 and mode == release
['tsan == True', {
# https://crbug.com/v8/9380
# The test is broken and needs to be fixed to use separate isolates.
'BackingStoreTest.RacyGrowWasmMemoryInPlace': [SKIP],
}], # 'tsan == True'
}], # tsan == True
]

View File

@ -138,230 +138,6 @@ TEST_F(DisjointAllocationPoolTest, MergingSkipLargerSrcWithGap) {
CheckPool(a, {{10, 5}, {20, 15}, {36, 4}});
}
enum ModuleStyle : int { Fixed = 0, Growable = 1 };
std::string PrintWasmCodeManageTestParam(
::testing::TestParamInfo<ModuleStyle> info) {
switch (info.param) {
case Fixed:
return "Fixed";
case Growable:
return "Growable";
}
UNREACHABLE();
}
class WasmCodeManagerTest : public TestWithContext,
public ::testing::WithParamInterface<ModuleStyle> {
public:
static constexpr uint32_t kNumFunctions = 10;
static size_t allocate_page_size;
static size_t commit_page_size;
WasmCodeManagerTest() {
CHECK_EQ(allocate_page_size == 0, commit_page_size == 0);
if (allocate_page_size == 0) {
allocate_page_size = AllocatePageSize();
commit_page_size = CommitPageSize();
}
CHECK_NE(0, allocate_page_size);
CHECK_NE(0, commit_page_size);
manager()->DisableImplicitAllocationsForTesting();
}
using NativeModulePtr = std::shared_ptr<NativeModule>;
NativeModulePtr AllocModule(size_t size, ModuleStyle style) {
std::shared_ptr<WasmModule> module(new WasmModule);
module->num_declared_functions = kNumFunctions;
bool can_request_more = style == Growable;
return engine()->NewNativeModule(i_isolate(), kAllWasmFeatures, size,
can_request_more, std::move(module));
}
WasmCode* AddCode(NativeModule* native_module, uint32_t index, size_t size) {
CodeDesc desc;
memset(reinterpret_cast<void*>(&desc), 0, sizeof(CodeDesc));
std::unique_ptr<byte[]> exec_buff(new byte[size]);
desc.buffer = exec_buff.get();
desc.instr_size = static_cast<int>(size);
std::unique_ptr<WasmCode> code = native_module->AddCode(
index, desc, 0, 0, {}, {}, WasmCode::kFunction, ExecutionTier::kNone);
return native_module->PublishCode(std::move(code));
}
WasmEngine* engine() { return i_isolate()->wasm_engine(); }
WasmCodeManager* manager() { return engine()->code_manager(); }
void SetMaxCommittedMemory(size_t limit) {
manager()->SetMaxCommittedMemoryForTesting(limit);
}
};
// static
size_t WasmCodeManagerTest::allocate_page_size = 0;
size_t WasmCodeManagerTest::commit_page_size = 0;
INSTANTIATE_TEST_SUITE_P(Parameterized, WasmCodeManagerTest,
::testing::Values(Fixed, Growable),
PrintWasmCodeManageTestParam);
TEST_P(WasmCodeManagerTest, EmptyCase) {
SetMaxCommittedMemory(0);
CHECK_EQ(0, manager()->committed_code_space());
NativeModulePtr native_module = AllocModule(allocate_page_size, GetParam());
ASSERT_DEATH_IF_SUPPORTED(AddCode(native_module.get(), 0, kCodeAlignment),
"OOM in wasm code commit");
}
TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
SetMaxCommittedMemory(allocate_page_size);
CHECK_EQ(0, manager()->committed_code_space());
NativeModulePtr native_module = AllocModule(allocate_page_size, GetParam());
CHECK(native_module);
CHECK_EQ(0, manager()->committed_code_space());
WasmCodeRefScope code_ref_scope;
uint32_t index = 0;
WasmCode* code = AddCode(native_module.get(), index++, 1 * kCodeAlignment);
CHECK_NOT_NULL(code);
CHECK_EQ(commit_page_size, manager()->committed_code_space());
code = AddCode(native_module.get(), index++, 3 * kCodeAlignment);
CHECK_NOT_NULL(code);
CHECK_EQ(commit_page_size, manager()->committed_code_space());
code = AddCode(native_module.get(), index++,
allocate_page_size - 4 * kCodeAlignment);
CHECK_NOT_NULL(code);
CHECK_EQ(allocate_page_size, manager()->committed_code_space());
// This fails in "reservation" if we cannot extend the code space, or in
// "commit" it we can (since we hit the allocation limit in the
// WasmCodeManager). Hence don't check for that part of the OOM message.
ASSERT_DEATH_IF_SUPPORTED(
AddCode(native_module.get(), index++, 1 * kCodeAlignment),
"OOM in wasm code");
}
TEST_P(WasmCodeManagerTest, TotalLimitIrrespectiveOfModuleCount) {
SetMaxCommittedMemory(3 * allocate_page_size);
NativeModulePtr nm1 = AllocModule(2 * allocate_page_size, GetParam());
NativeModulePtr nm2 = AllocModule(2 * allocate_page_size, GetParam());
CHECK(nm1);
CHECK(nm2);
WasmCodeRefScope code_ref_scope;
WasmCode* code = AddCode(nm1.get(), 0, 2 * allocate_page_size);
CHECK_NOT_NULL(code);
ASSERT_DEATH_IF_SUPPORTED(AddCode(nm2.get(), 0, 2 * allocate_page_size),
"OOM in wasm code commit");
}
TEST_P(WasmCodeManagerTest, GrowingVsFixedModule) {
SetMaxCommittedMemory(3 * allocate_page_size);
NativeModulePtr nm = AllocModule(allocate_page_size, GetParam());
size_t module_size =
GetParam() == Fixed ? kMaxWasmCodeMemory : allocate_page_size;
size_t remaining_space_in_module = module_size;
if (GetParam() == Fixed) {
// Requesting more than the remaining space fails because the module cannot
// grow.
ASSERT_DEATH_IF_SUPPORTED(
AddCode(nm.get(), 0, remaining_space_in_module + kCodeAlignment),
"OOM in wasm code reservation");
} else {
// The module grows by one page. One page remains uncommitted.
WasmCodeRefScope code_ref_scope;
CHECK_NOT_NULL(
AddCode(nm.get(), 0, remaining_space_in_module + kCodeAlignment));
CHECK_EQ(commit_page_size + allocate_page_size,
manager()->committed_code_space());
}
}
TEST_P(WasmCodeManagerTest, CommitIncrements) {
SetMaxCommittedMemory(10 * allocate_page_size);
NativeModulePtr nm = AllocModule(3 * allocate_page_size, GetParam());
WasmCodeRefScope code_ref_scope;
WasmCode* code = AddCode(nm.get(), 0, kCodeAlignment);
CHECK_NOT_NULL(code);
CHECK_EQ(commit_page_size, manager()->committed_code_space());
code = AddCode(nm.get(), 1, 2 * allocate_page_size);
CHECK_NOT_NULL(code);
CHECK_EQ(commit_page_size + 2 * allocate_page_size,
manager()->committed_code_space());
code = AddCode(nm.get(), 2, allocate_page_size - kCodeAlignment);
CHECK_NOT_NULL(code);
CHECK_EQ(3 * allocate_page_size, manager()->committed_code_space());
}
TEST_P(WasmCodeManagerTest, Lookup) {
SetMaxCommittedMemory(2 * allocate_page_size);
NativeModulePtr nm1 = AllocModule(allocate_page_size, GetParam());
NativeModulePtr nm2 = AllocModule(allocate_page_size, GetParam());
Address mid_code1_1;
{
// The {WasmCodeRefScope} needs to die before {nm1} dies.
WasmCodeRefScope code_ref_scope;
WasmCode* code1_0 = AddCode(nm1.get(), 0, kCodeAlignment);
CHECK_EQ(nm1.get(), code1_0->native_module());
WasmCode* code1_1 = AddCode(nm1.get(), 1, kCodeAlignment);
WasmCode* code2_0 = AddCode(nm2.get(), 0, kCodeAlignment);
WasmCode* code2_1 = AddCode(nm2.get(), 1, kCodeAlignment);
CHECK_EQ(nm2.get(), code2_1->native_module());
CHECK_EQ(0, code1_0->index());
CHECK_EQ(1, code1_1->index());
CHECK_EQ(0, code2_0->index());
CHECK_EQ(1, code2_1->index());
// we know the manager object is allocated here, so we shouldn't
// find any WasmCode* associated with that ptr.
WasmCode* not_found =
manager()->LookupCode(reinterpret_cast<Address>(manager()));
CHECK_NULL(not_found);
WasmCode* found = manager()->LookupCode(code1_0->instruction_start());
CHECK_EQ(found, code1_0);
found = manager()->LookupCode(code2_1->instruction_start() +
(code2_1->instructions().size() / 2));
CHECK_EQ(found, code2_1);
found = manager()->LookupCode(code2_1->instruction_start() +
code2_1->instructions().size() - 1);
CHECK_EQ(found, code2_1);
found = manager()->LookupCode(code2_1->instruction_start() +
code2_1->instructions().size());
CHECK_NULL(found);
mid_code1_1 =
code1_1->instruction_start() + (code1_1->instructions().size() / 2);
CHECK_EQ(code1_1, manager()->LookupCode(mid_code1_1));
}
nm1.reset();
CHECK_NULL(manager()->LookupCode(mid_code1_1));
}
TEST_P(WasmCodeManagerTest, LookupWorksAfterRewrite) {
SetMaxCommittedMemory(2 * allocate_page_size);
NativeModulePtr nm1 = AllocModule(allocate_page_size, GetParam());
WasmCodeRefScope code_ref_scope;
WasmCode* code0 = AddCode(nm1.get(), 0, kCodeAlignment);
WasmCode* code1 = AddCode(nm1.get(), 1, kCodeAlignment);
CHECK_EQ(0, code0->index());
CHECK_EQ(1, code1->index());
CHECK_EQ(code1, manager()->LookupCode(code1->instruction_start()));
WasmCode* code1_1 = AddCode(nm1.get(), 1, kCodeAlignment);
CHECK_EQ(1, code1_1->index());
CHECK_EQ(code1, manager()->LookupCode(code1->instruction_start()));
CHECK_EQ(code1_1, manager()->LookupCode(code1_1->instruction_start()));
}
} // namespace wasm_heap_unittest
} // namespace wasm
} // namespace internal