2017-07-13 09:55:16 +00:00
|
|
|
// Copyright 2017 the V8 project authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file.
|
|
|
|
|
2017-08-30 12:09:00 +00:00
|
|
|
#include "src/assembler-inl.h"
|
|
|
|
#include "src/base/utils/random-number-generator.h"
|
2017-07-13 09:55:16 +00:00
|
|
|
#include "src/codegen.h"
|
|
|
|
#include "src/compilation-info.h"
|
|
|
|
#include "src/compiler/code-generator.h"
|
|
|
|
#include "src/compiler/instruction.h"
|
|
|
|
#include "src/compiler/linkage.h"
|
|
|
|
#include "src/isolate.h"
|
2017-09-13 10:56:20 +00:00
|
|
|
#include "src/objects-inl.h"
|
2017-07-13 09:55:16 +00:00
|
|
|
|
|
|
|
#include "test/cctest/cctest.h"
|
2017-08-30 12:09:00 +00:00
|
|
|
#include "test/cctest/compiler/function-tester.h"
|
2017-07-13 09:55:16 +00:00
|
|
|
|
|
|
|
namespace v8 {
|
|
|
|
namespace internal {
|
|
|
|
namespace compiler {
|
|
|
|
|
2017-08-30 12:09:00 +00:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
int GetSlotSizeInBytes(MachineRepresentation rep) {
|
|
|
|
switch (rep) {
|
|
|
|
case MachineRepresentation::kTagged:
|
|
|
|
case MachineRepresentation::kFloat32:
|
|
|
|
return kPointerSize;
|
|
|
|
case MachineRepresentation::kFloat64:
|
|
|
|
return kDoubleSize;
|
|
|
|
case MachineRepresentation::kSimd128:
|
|
|
|
return kSimd128Size;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
// Wrapper around the CodeGenerator with the ability to randomly generate moves
|
|
|
|
// and swaps which can then be executed. The `slots` map represents how many
|
|
|
|
// slots should be allocated per representation. Parallel moves will then be
|
|
|
|
// generated by randomly picking slots. Constants can be provided so that
|
|
|
|
// parallel moves may use them.
|
|
|
|
//
|
|
|
|
// At the moment, only the following representations are tested:
|
|
|
|
// - kTagged
|
|
|
|
// - kFloat32
|
|
|
|
// - kFloat64
|
|
|
|
// - kSimd128
|
|
|
|
// There is no need to test using Word32 or Word64 as they are the same as
|
|
|
|
// Tagged as far as the code generator is concerned.
|
|
|
|
class CodeGeneratorTester : public HandleAndZoneScope {
|
2017-07-13 09:55:16 +00:00
|
|
|
public:
|
2017-08-30 12:09:00 +00:00
|
|
|
CodeGeneratorTester(std::map<MachineRepresentation, int> slots =
|
|
|
|
std::map<MachineRepresentation, int>{},
|
|
|
|
std::initializer_list<Constant> constants = {})
|
2017-09-29 14:59:24 +00:00
|
|
|
: info_(ArrayVector("test"), main_isolate(), main_zone(), Code::STUB),
|
2017-08-30 12:09:00 +00:00
|
|
|
descriptor_(Linkage::GetStubCallDescriptor(
|
|
|
|
main_isolate(), main_zone(), VoidDescriptor(main_isolate()), 0,
|
|
|
|
CallDescriptor::kNoFlags, Operator::kNoProperties,
|
|
|
|
MachineType::AnyTagged(), 0)),
|
2017-07-13 09:55:16 +00:00
|
|
|
linkage_(descriptor_),
|
2017-08-30 12:09:00 +00:00
|
|
|
blocks_(main_zone()),
|
|
|
|
sequence_(main_isolate(), main_zone(), &blocks_),
|
|
|
|
rng_(CcTest::random_number_generator()),
|
2017-07-13 09:55:16 +00:00
|
|
|
frame_(descriptor_->CalculateFixedFrameSize()),
|
2017-08-30 12:09:00 +00:00
|
|
|
generator_(main_zone(), &frame_, &linkage_, &sequence_, &info_,
|
2017-08-23 03:08:51 +00:00
|
|
|
base::Optional<OsrHelper>(), kNoSourcePosition, nullptr) {
|
2017-08-30 12:09:00 +00:00
|
|
|
// Keep track of all supported representations depending on what kind of
|
|
|
|
// stack slots are supported.
|
|
|
|
for (const auto& slot : slots) {
|
|
|
|
supported_reps_.push_back(slot.first);
|
|
|
|
}
|
|
|
|
// Allocate new slots until we run out of them.
|
|
|
|
while (std::any_of(slots.cbegin(), slots.cend(),
|
|
|
|
[](const std::pair<MachineRepresentation, int>& entry) {
|
|
|
|
// True if there are slots left to allocate for this
|
|
|
|
// representation.
|
|
|
|
return entry.second > 0;
|
|
|
|
})) {
|
|
|
|
// Pick a random MachineRepresentation from supported_reps_.
|
|
|
|
MachineRepresentation rep = CreateRandomMachineRepresentation();
|
|
|
|
auto entry = slots.find(rep);
|
|
|
|
DCHECK(entry != slots.end());
|
|
|
|
// We may have picked a representation for which all slots have already
|
|
|
|
// been allocated.
|
|
|
|
if (entry->second > 0) {
|
|
|
|
// Keep a map of (MachineRepresentation . std::vector<int>) with
|
|
|
|
// allocated slots to pick from for each representation.
|
|
|
|
RegisterSlot(rep, frame_.AllocateSpillSlot(GetSlotSizeInBytes(rep)));
|
|
|
|
entry->second--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (auto constant : constants) {
|
|
|
|
int virtual_register = AllocateConstant(constant);
|
|
|
|
// Associate constants with their compatible representations.
|
|
|
|
// TODO(all): Test all types of constants.
|
|
|
|
switch (constant.type()) {
|
|
|
|
// Integer constants are always moved to a tagged location, whatever
|
|
|
|
// their sizes.
|
|
|
|
case Constant::kInt32:
|
|
|
|
case Constant::kInt64:
|
|
|
|
RegisterConstant(MachineRepresentation::kTagged, virtual_register);
|
|
|
|
break;
|
|
|
|
// FP constants may be moved to a tagged location using a heap number,
|
|
|
|
// or directly to a location of the same size.
|
|
|
|
case Constant::kFloat32:
|
|
|
|
RegisterConstant(MachineRepresentation::kTagged, virtual_register);
|
|
|
|
RegisterConstant(MachineRepresentation::kFloat32, virtual_register);
|
|
|
|
break;
|
|
|
|
case Constant::kFloat64:
|
|
|
|
RegisterConstant(MachineRepresentation::kTagged, virtual_register);
|
|
|
|
RegisterConstant(MachineRepresentation::kFloat64, virtual_register);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Force a frame to be created.
|
|
|
|
generator_.frame_access_state()->MarkHasFrame(true);
|
|
|
|
generator_.AssembleConstructFrame();
|
|
|
|
// TODO(all): Generate a stack check here so that we fail gracefully if the
|
|
|
|
// frame is too big.
|
|
|
|
}
|
|
|
|
|
|
|
|
int AllocateConstant(Constant constant) {
|
|
|
|
int virtual_register = sequence_.NextVirtualRegister();
|
|
|
|
sequence_.AddConstant(virtual_register, constant);
|
|
|
|
return virtual_register;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register a constant referenced by `virtual_register` as compatible with
|
|
|
|
// `rep`.
|
|
|
|
void RegisterConstant(MachineRepresentation rep, int virtual_register) {
|
|
|
|
auto entry = constants_.find(rep);
|
|
|
|
if (entry == constants_.end()) {
|
|
|
|
std::vector<int> vregs = {virtual_register};
|
|
|
|
constants_.emplace(rep, vregs);
|
|
|
|
} else {
|
|
|
|
entry->second.push_back(virtual_register);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void RegisterSlot(MachineRepresentation rep, int slot) {
|
|
|
|
auto entry = allocated_slots_.find(rep);
|
|
|
|
if (entry == allocated_slots_.end()) {
|
|
|
|
std::vector<int> slots = {slot};
|
|
|
|
allocated_slots_.emplace(rep, slots);
|
|
|
|
} else {
|
|
|
|
entry->second.push_back(slot);
|
|
|
|
}
|
2017-07-13 09:55:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
enum PushTypeFlag {
|
|
|
|
kRegisterPush = CodeGenerator::kRegisterPush,
|
|
|
|
kStackSlotPush = CodeGenerator::kStackSlotPush,
|
|
|
|
kScalarPush = CodeGenerator::kScalarPush
|
|
|
|
};
|
|
|
|
|
2017-08-30 12:09:00 +00:00
|
|
|
enum OperandConstraint {
|
|
|
|
kNone,
|
|
|
|
// Restrict operands to non-constants. This is useful when generating a
|
|
|
|
// destination.
|
|
|
|
kCannotBeConstant
|
|
|
|
};
|
|
|
|
|
|
|
|
// Generate parallel moves at random. Note that they may not be compatible
|
|
|
|
// between each other as this doesn't matter to the code generator.
|
|
|
|
ParallelMove* GenerateRandomMoves(int size) {
|
|
|
|
ParallelMove* parallel_move = new (main_zone()) ParallelMove(main_zone());
|
|
|
|
|
|
|
|
for (int i = 0; i < size;) {
|
|
|
|
MachineRepresentation rep = CreateRandomMachineRepresentation();
|
|
|
|
MoveOperands mo(CreateRandomOperand(kNone, rep),
|
|
|
|
CreateRandomOperand(kCannotBeConstant, rep));
|
|
|
|
// It isn't valid to call `AssembleMove` and `AssembleSwap` with redundant
|
|
|
|
// moves.
|
|
|
|
if (mo.IsRedundant()) continue;
|
|
|
|
parallel_move->AddMove(mo.source(), mo.destination());
|
|
|
|
// Iterate only when a move was created.
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return parallel_move;
|
|
|
|
}
|
|
|
|
|
|
|
|
ParallelMove* GenerateRandomSwaps(int size) {
|
|
|
|
ParallelMove* parallel_move = new (main_zone()) ParallelMove(main_zone());
|
|
|
|
|
|
|
|
for (int i = 0; i < size;) {
|
|
|
|
MachineRepresentation rep = CreateRandomMachineRepresentation();
|
|
|
|
InstructionOperand lhs = CreateRandomOperand(kCannotBeConstant, rep);
|
|
|
|
InstructionOperand rhs = CreateRandomOperand(kCannotBeConstant, rep);
|
|
|
|
MoveOperands mo(lhs, rhs);
|
|
|
|
// It isn't valid to call `AssembleMove` and `AssembleSwap` with redundant
|
|
|
|
// moves.
|
|
|
|
if (mo.IsRedundant()) continue;
|
|
|
|
// Canonicalize the swap: the register operand has to be the left hand
|
|
|
|
// side.
|
|
|
|
if (lhs.IsStackSlot() || lhs.IsFPStackSlot()) {
|
|
|
|
std::swap(lhs, rhs);
|
|
|
|
}
|
|
|
|
parallel_move->AddMove(lhs, rhs);
|
|
|
|
// Iterate only when a swap was created.
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return parallel_move;
|
|
|
|
}
|
|
|
|
|
|
|
|
MachineRepresentation CreateRandomMachineRepresentation() {
|
|
|
|
int index = rng_->NextInt(static_cast<int>(supported_reps_.size()));
|
|
|
|
return supported_reps_[index];
|
|
|
|
}
|
|
|
|
|
|
|
|
InstructionOperand CreateRandomOperand(OperandConstraint constraint,
|
|
|
|
MachineRepresentation rep) {
|
|
|
|
// Only generate a Constant if the operand is a source and we have a
|
|
|
|
// constant with a compatible representation in stock.
|
|
|
|
bool generate_constant = (constraint != kCannotBeConstant) &&
|
|
|
|
(constants_.find(rep) != constants_.end());
|
|
|
|
switch (rng_->NextInt(generate_constant ? 3 : 2)) {
|
|
|
|
case 0:
|
|
|
|
return CreateRandomStackSlotOperand(rep);
|
|
|
|
case 1:
|
|
|
|
return CreateRandomRegisterOperand(rep);
|
|
|
|
case 2:
|
|
|
|
return CreateRandomConstant(rep);
|
|
|
|
}
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
|
|
|
InstructionOperand CreateRandomRegisterOperand(MachineRepresentation rep) {
|
|
|
|
int code;
|
|
|
|
const RegisterConfiguration* conf = RegisterConfiguration::Default();
|
|
|
|
switch (rep) {
|
|
|
|
case MachineRepresentation::kFloat32: {
|
|
|
|
int index = rng_->NextInt(conf->num_allocatable_float_registers());
|
|
|
|
code = conf->RegisterConfiguration::GetAllocatableFloatCode(index);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case MachineRepresentation::kFloat64: {
|
|
|
|
int index = rng_->NextInt(conf->num_allocatable_double_registers());
|
|
|
|
code = conf->RegisterConfiguration::GetAllocatableDoubleCode(index);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case MachineRepresentation::kSimd128: {
|
|
|
|
int index = rng_->NextInt(conf->num_allocatable_simd128_registers());
|
|
|
|
code = conf->RegisterConfiguration::GetAllocatableSimd128Code(index);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case MachineRepresentation::kTagged: {
|
|
|
|
// Pick an allocatable register that is not the return register.
|
|
|
|
do {
|
|
|
|
int index = rng_->NextInt(conf->num_allocatable_general_registers());
|
|
|
|
code = conf->RegisterConfiguration::GetAllocatableGeneralCode(index);
|
|
|
|
} while (code == kReturnRegister0.code());
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return AllocatedOperand(LocationOperand::REGISTER, rep, code);
|
|
|
|
}
|
|
|
|
|
|
|
|
InstructionOperand CreateRandomStackSlotOperand(MachineRepresentation rep) {
|
|
|
|
int index = rng_->NextInt(static_cast<int>(allocated_slots_[rep].size()));
|
|
|
|
return AllocatedOperand(LocationOperand::STACK_SLOT, rep,
|
|
|
|
allocated_slots_[rep][index]);
|
|
|
|
}
|
|
|
|
|
|
|
|
InstructionOperand CreateRandomConstant(MachineRepresentation rep) {
|
|
|
|
int index = rng_->NextInt(static_cast<int>(constants_[rep].size()));
|
|
|
|
return ConstantOperand(constants_[rep][index]);
|
|
|
|
}
|
|
|
|
|
2017-07-13 09:55:16 +00:00
|
|
|
void CheckAssembleTailCallGaps(Instruction* instr,
|
|
|
|
int first_unused_stack_slot,
|
|
|
|
CodeGeneratorTester::PushTypeFlag push_type) {
|
|
|
|
generator_.AssembleTailCallBeforeGap(instr, first_unused_stack_slot);
|
2017-08-02 19:24:16 +00:00
|
|
|
#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_S390) || \
|
|
|
|
defined(V8_TARGET_ARCH_PPC)
|
2017-07-13 09:55:16 +00:00
|
|
|
// Only folding register pushes is supported on ARM.
|
|
|
|
bool supported = ((push_type & CodeGenerator::kRegisterPush) == push_type);
|
|
|
|
#elif defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32) || \
|
2017-08-02 19:24:16 +00:00
|
|
|
defined(V8_TARGET_ARCH_X87)
|
2017-07-13 09:55:16 +00:00
|
|
|
bool supported = ((push_type & CodeGenerator::kScalarPush) == push_type);
|
|
|
|
#else
|
|
|
|
bool supported = false;
|
|
|
|
#endif
|
|
|
|
if (supported) {
|
|
|
|
// Architectures supporting folding adjacent pushes should now have
|
|
|
|
// resolved all moves.
|
|
|
|
for (const auto& move :
|
|
|
|
*instr->parallel_moves()[Instruction::FIRST_GAP_POSITION]) {
|
|
|
|
CHECK(move->IsEliminated());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
generator_.AssembleGaps(instr);
|
|
|
|
generator_.AssembleTailCallAfterGap(instr, first_unused_stack_slot);
|
|
|
|
}
|
|
|
|
|
2017-08-30 12:09:00 +00:00
|
|
|
void CheckAssembleMove(InstructionOperand* source,
|
|
|
|
InstructionOperand* destination) {
|
|
|
|
int start = generator_.tasm()->pc_offset();
|
|
|
|
generator_.AssembleMove(source, destination);
|
|
|
|
CHECK(generator_.tasm()->pc_offset() > start);
|
|
|
|
}
|
|
|
|
|
|
|
|
void CheckAssembleSwap(InstructionOperand* source,
|
|
|
|
InstructionOperand* destination) {
|
|
|
|
int start = generator_.tasm()->pc_offset();
|
|
|
|
generator_.AssembleSwap(source, destination);
|
|
|
|
CHECK(generator_.tasm()->pc_offset() > start);
|
|
|
|
}
|
|
|
|
|
2017-07-13 09:55:16 +00:00
|
|
|
Handle<Code> Finalize() {
|
2017-08-30 12:09:00 +00:00
|
|
|
InstructionOperand zero = ImmediateOperand(ImmediateOperand::INLINE, 0);
|
|
|
|
generator_.AssembleReturn(&zero);
|
|
|
|
|
2017-07-13 09:55:16 +00:00
|
|
|
generator_.FinishCode();
|
|
|
|
generator_.safepoints()->Emit(generator_.tasm(),
|
|
|
|
frame_.GetTotalFrameSlotCount());
|
|
|
|
return generator_.FinalizeCode();
|
|
|
|
}
|
|
|
|
|
|
|
|
void Disassemble() {
|
|
|
|
HandleScope scope(main_isolate());
|
|
|
|
Handle<Code> code = Finalize();
|
|
|
|
if (FLAG_print_code) {
|
|
|
|
code->Print();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-30 12:09:00 +00:00
|
|
|
void Run() {
|
|
|
|
HandleScope scope(main_isolate());
|
|
|
|
Handle<Code> code = Finalize();
|
|
|
|
if (FLAG_print_code) {
|
|
|
|
code->Print();
|
|
|
|
}
|
|
|
|
FunctionTester ft(code);
|
|
|
|
ft.Call();
|
|
|
|
}
|
|
|
|
|
|
|
|
v8::base::RandomNumberGenerator* rng() const { return rng_; }
|
2017-07-13 09:55:16 +00:00
|
|
|
|
|
|
|
private:
|
|
|
|
CompilationInfo info_;
|
|
|
|
CallDescriptor* descriptor_;
|
|
|
|
Linkage linkage_;
|
|
|
|
ZoneVector<InstructionBlock*> blocks_;
|
|
|
|
InstructionSequence sequence_;
|
2017-08-30 12:09:00 +00:00
|
|
|
std::vector<MachineRepresentation> supported_reps_;
|
|
|
|
std::map<MachineRepresentation, std::vector<int>> allocated_slots_;
|
|
|
|
std::map<MachineRepresentation, std::vector<int>> constants_;
|
|
|
|
v8::base::RandomNumberGenerator* rng_;
|
2017-07-13 09:55:16 +00:00
|
|
|
Frame frame_;
|
|
|
|
CodeGenerator generator_;
|
|
|
|
};
|
|
|
|
|
2017-08-30 12:09:00 +00:00
|
|
|
// The following fuzz tests will assemble a lot of moves, wrap them in
|
|
|
|
// executable native code and run them. At this time, we only check that
|
|
|
|
// something is actually generated, and that it runs on hardware or the
|
|
|
|
// simulator.
|
|
|
|
|
|
|
|
// TODO(all): It would be great to record the data on the stack after all moves
|
|
|
|
// are executed so that we could test the functionality in an architecture
|
|
|
|
// independent way. We would also have to make sure we generate moves compatible
|
|
|
|
// with each other as the gap-resolver tests do.
|
|
|
|
|
|
|
|
TEST(FuzzAssembleMove) {
|
|
|
|
// Test small and potentially large ranges separately. Note that the number of
|
|
|
|
// slots affects how much stack is allocated when running the generated code.
|
|
|
|
// This means we have to be careful not to exceed the stack limit, which is
|
|
|
|
// lower on Windows.
|
|
|
|
for (auto n : {64, 500}) {
|
|
|
|
std::map<MachineRepresentation, int> slots = {
|
|
|
|
{MachineRepresentation::kTagged, n},
|
|
|
|
{MachineRepresentation::kFloat32, n},
|
|
|
|
{MachineRepresentation::kFloat64, n}};
|
|
|
|
if (CpuFeatures::SupportsWasmSimd128()) {
|
|
|
|
// Generate fewer 128-bit slots.
|
|
|
|
slots.emplace(MachineRepresentation::kSimd128, n / 4);
|
|
|
|
}
|
|
|
|
CodeGeneratorTester c(
|
|
|
|
slots,
|
|
|
|
{Constant(0), Constant(1), Constant(2), Constant(3), Constant(4),
|
|
|
|
Constant(5), Constant(6), Constant(7),
|
|
|
|
Constant(static_cast<float>(0.1)), Constant(static_cast<float>(0.2)),
|
|
|
|
Constant(static_cast<float>(0.3)), Constant(static_cast<float>(0.4)),
|
|
|
|
Constant(static_cast<double>(0.5)), Constant(static_cast<double>(0.6)),
|
|
|
|
Constant(static_cast<double>(0.7)),
|
|
|
|
Constant(static_cast<double>(0.8))});
|
|
|
|
ParallelMove* moves = c.GenerateRandomMoves(1000);
|
|
|
|
for (const auto m : *moves) {
|
|
|
|
c.CheckAssembleMove(&m->source(), &m->destination());
|
|
|
|
}
|
|
|
|
c.Run();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(FuzzAssembleSwap) {
|
|
|
|
// Test small and potentially large ranges separately. Note that the number of
|
|
|
|
// slots affects how much stack is allocated when running the generated code.
|
|
|
|
// This means we have to be careful not to exceed the stack limit, which is
|
|
|
|
// lower on Windows.
|
|
|
|
for (auto n : {64, 500}) {
|
|
|
|
std::map<MachineRepresentation, int> slots = {
|
|
|
|
{MachineRepresentation::kTagged, n},
|
|
|
|
{MachineRepresentation::kFloat32, n},
|
|
|
|
{MachineRepresentation::kFloat64, n}};
|
|
|
|
if (CpuFeatures::SupportsWasmSimd128()) {
|
|
|
|
// Generate fewer 128-bit slots.
|
|
|
|
slots.emplace(MachineRepresentation::kSimd128, n / 4);
|
|
|
|
}
|
|
|
|
CodeGeneratorTester c(slots);
|
|
|
|
ParallelMove* moves = c.GenerateRandomSwaps(1000);
|
|
|
|
for (const auto m : *moves) {
|
|
|
|
c.CheckAssembleSwap(&m->source(), &m->destination());
|
|
|
|
}
|
|
|
|
c.Run();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(FuzzAssembleMoveAndSwap) {
|
|
|
|
// Test small and potentially large ranges separately. Note that the number of
|
|
|
|
// slots affects how much stack is allocated when running the generated code.
|
|
|
|
// This means we have to be careful not to exceed the stack limit, which is
|
|
|
|
// lower on Windows.
|
|
|
|
for (auto n : {64, 500}) {
|
|
|
|
std::map<MachineRepresentation, int> slots = {
|
|
|
|
{MachineRepresentation::kTagged, n},
|
|
|
|
{MachineRepresentation::kFloat32, n},
|
|
|
|
{MachineRepresentation::kFloat64, n}};
|
|
|
|
if (CpuFeatures::SupportsWasmSimd128()) {
|
|
|
|
// Generate fewer 128-bit slots.
|
|
|
|
slots.emplace(MachineRepresentation::kSimd128, n / 4);
|
|
|
|
}
|
|
|
|
CodeGeneratorTester c(
|
|
|
|
slots,
|
|
|
|
{Constant(0), Constant(1), Constant(2), Constant(3), Constant(4),
|
|
|
|
Constant(5), Constant(6), Constant(7),
|
|
|
|
Constant(static_cast<float>(0.1)), Constant(static_cast<float>(0.2)),
|
|
|
|
Constant(static_cast<float>(0.3)), Constant(static_cast<float>(0.4)),
|
|
|
|
Constant(static_cast<double>(0.5)), Constant(static_cast<double>(0.6)),
|
|
|
|
Constant(static_cast<double>(0.7)),
|
|
|
|
Constant(static_cast<double>(0.8))});
|
|
|
|
for (int i = 0; i < 1000; i++) {
|
|
|
|
// Randomly alternate between swaps and moves.
|
|
|
|
if (c.rng()->NextInt(2) == 0) {
|
|
|
|
MoveOperands* move = c.GenerateRandomMoves(1)->at(0);
|
|
|
|
c.CheckAssembleMove(&move->source(), &move->destination());
|
|
|
|
} else {
|
|
|
|
MoveOperands* move = c.GenerateRandomSwaps(1)->at(0);
|
|
|
|
c.CheckAssembleSwap(&move->source(), &move->destination());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
c.Run();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-13 09:55:16 +00:00
|
|
|
TEST(AssembleTailCallGap) {
|
2017-08-02 13:36:27 +00:00
|
|
|
const RegisterConfiguration* conf = RegisterConfiguration::Default();
|
2017-07-13 09:55:16 +00:00
|
|
|
|
|
|
|
// This test assumes at least 4 registers are allocatable.
|
2017-09-25 09:45:55 +00:00
|
|
|
CHECK_LE(4, conf->num_allocatable_general_registers());
|
2017-07-13 09:55:16 +00:00
|
|
|
|
|
|
|
auto r0 = AllocatedOperand(LocationOperand::REGISTER,
|
|
|
|
MachineRepresentation::kTagged,
|
|
|
|
conf->GetAllocatableGeneralCode(0));
|
|
|
|
auto r1 = AllocatedOperand(LocationOperand::REGISTER,
|
|
|
|
MachineRepresentation::kTagged,
|
|
|
|
conf->GetAllocatableGeneralCode(1));
|
|
|
|
auto r2 = AllocatedOperand(LocationOperand::REGISTER,
|
|
|
|
MachineRepresentation::kTagged,
|
|
|
|
conf->GetAllocatableGeneralCode(2));
|
|
|
|
auto r3 = AllocatedOperand(LocationOperand::REGISTER,
|
|
|
|
MachineRepresentation::kTagged,
|
|
|
|
conf->GetAllocatableGeneralCode(3));
|
|
|
|
|
|
|
|
auto slot_minus_4 = AllocatedOperand(LocationOperand::STACK_SLOT,
|
|
|
|
MachineRepresentation::kTagged, -4);
|
|
|
|
auto slot_minus_3 = AllocatedOperand(LocationOperand::STACK_SLOT,
|
|
|
|
MachineRepresentation::kTagged, -3);
|
|
|
|
auto slot_minus_2 = AllocatedOperand(LocationOperand::STACK_SLOT,
|
|
|
|
MachineRepresentation::kTagged, -2);
|
|
|
|
auto slot_minus_1 = AllocatedOperand(LocationOperand::STACK_SLOT,
|
|
|
|
MachineRepresentation::kTagged, -1);
|
|
|
|
|
|
|
|
// Avoid slot 0 for architectures which use it store the return address.
|
|
|
|
int first_slot = V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0;
|
|
|
|
auto slot_0 = AllocatedOperand(LocationOperand::STACK_SLOT,
|
|
|
|
MachineRepresentation::kTagged, first_slot);
|
|
|
|
auto slot_1 =
|
|
|
|
AllocatedOperand(LocationOperand::STACK_SLOT,
|
|
|
|
MachineRepresentation::kTagged, first_slot + 1);
|
|
|
|
auto slot_2 =
|
|
|
|
AllocatedOperand(LocationOperand::STACK_SLOT,
|
|
|
|
MachineRepresentation::kTagged, first_slot + 2);
|
|
|
|
auto slot_3 =
|
|
|
|
AllocatedOperand(LocationOperand::STACK_SLOT,
|
|
|
|
MachineRepresentation::kTagged, first_slot + 3);
|
|
|
|
|
|
|
|
// These tests all generate series of moves that the code generator should
|
|
|
|
// detect as adjacent pushes. Depending on the architecture, we make sure
|
|
|
|
// these moves get eliminated.
|
|
|
|
// Also, disassembling with `--print-code` is useful when debugging.
|
|
|
|
|
|
|
|
{
|
|
|
|
// Generate a series of register pushes only.
|
|
|
|
CodeGeneratorTester c;
|
2017-08-30 12:09:00 +00:00
|
|
|
Instruction* instr = Instruction::New(c.main_zone(), kArchNop);
|
|
|
|
instr
|
|
|
|
->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
|
|
|
|
c.main_zone())
|
2017-07-13 09:55:16 +00:00
|
|
|
->AddMove(r3, slot_0);
|
2017-08-30 12:09:00 +00:00
|
|
|
instr
|
|
|
|
->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
|
|
|
|
c.main_zone())
|
2017-07-13 09:55:16 +00:00
|
|
|
->AddMove(r2, slot_1);
|
2017-08-30 12:09:00 +00:00
|
|
|
instr
|
|
|
|
->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
|
|
|
|
c.main_zone())
|
2017-07-13 09:55:16 +00:00
|
|
|
->AddMove(r1, slot_2);
|
2017-08-30 12:09:00 +00:00
|
|
|
instr
|
|
|
|
->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
|
|
|
|
c.main_zone())
|
2017-07-13 09:55:16 +00:00
|
|
|
->AddMove(r0, slot_3);
|
|
|
|
|
|
|
|
c.CheckAssembleTailCallGaps(instr, first_slot + 4,
|
|
|
|
CodeGeneratorTester::kRegisterPush);
|
|
|
|
c.Disassemble();
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// Generate a series of stack pushes only.
|
|
|
|
CodeGeneratorTester c;
|
2017-08-30 12:09:00 +00:00
|
|
|
Instruction* instr = Instruction::New(c.main_zone(), kArchNop);
|
|
|
|
instr
|
|
|
|
->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
|
|
|
|
c.main_zone())
|
2017-07-13 09:55:16 +00:00
|
|
|
->AddMove(slot_minus_4, slot_0);
|
2017-08-30 12:09:00 +00:00
|
|
|
instr
|
|
|
|
->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
|
|
|
|
c.main_zone())
|
2017-07-13 09:55:16 +00:00
|
|
|
->AddMove(slot_minus_3, slot_1);
|
2017-08-30 12:09:00 +00:00
|
|
|
instr
|
|
|
|
->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
|
|
|
|
c.main_zone())
|
2017-07-13 09:55:16 +00:00
|
|
|
->AddMove(slot_minus_2, slot_2);
|
2017-08-30 12:09:00 +00:00
|
|
|
instr
|
|
|
|
->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
|
|
|
|
c.main_zone())
|
2017-07-13 09:55:16 +00:00
|
|
|
->AddMove(slot_minus_1, slot_3);
|
|
|
|
|
|
|
|
c.CheckAssembleTailCallGaps(instr, first_slot + 4,
|
|
|
|
CodeGeneratorTester::kStackSlotPush);
|
|
|
|
c.Disassemble();
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// Generate a mix of stack and register pushes.
|
|
|
|
CodeGeneratorTester c;
|
2017-08-30 12:09:00 +00:00
|
|
|
Instruction* instr = Instruction::New(c.main_zone(), kArchNop);
|
|
|
|
instr
|
|
|
|
->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
|
|
|
|
c.main_zone())
|
2017-07-13 09:55:16 +00:00
|
|
|
->AddMove(slot_minus_2, slot_0);
|
2017-08-30 12:09:00 +00:00
|
|
|
instr
|
|
|
|
->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
|
|
|
|
c.main_zone())
|
2017-07-13 09:55:16 +00:00
|
|
|
->AddMove(r1, slot_1);
|
2017-08-30 12:09:00 +00:00
|
|
|
instr
|
|
|
|
->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
|
|
|
|
c.main_zone())
|
2017-07-13 09:55:16 +00:00
|
|
|
->AddMove(slot_minus_1, slot_2);
|
2017-08-30 12:09:00 +00:00
|
|
|
instr
|
|
|
|
->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
|
|
|
|
c.main_zone())
|
2017-07-13 09:55:16 +00:00
|
|
|
->AddMove(r0, slot_3);
|
|
|
|
|
|
|
|
c.CheckAssembleTailCallGaps(instr, first_slot + 4,
|
|
|
|
CodeGeneratorTester::kScalarPush);
|
|
|
|
c.Disassemble();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace compiler
|
|
|
|
} // namespace internal
|
|
|
|
} // namespace v8
|