Land the Fan (disabled)
R=mstarzinger@chromium.org Review URL: https://codereview.chromium.org/426233002 git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22709 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
50869d70a9
commit
a1383e2250
@ -452,6 +452,9 @@
|
||||
'defines': [
|
||||
'WIN32',
|
||||
],
|
||||
# 4351: VS 2005 and later are warning us that they've fixed a bug
|
||||
# present in VS 2003 and earlier.
|
||||
'msvs_disabled_warnings': [4351],
|
||||
'msvs_configuration_attributes': {
|
||||
'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
|
||||
'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
|
||||
|
@ -5596,7 +5596,7 @@ class Internals {
|
||||
static const int kJSObjectHeaderSize = 3 * kApiPointerSize;
|
||||
static const int kFixedArrayHeaderSize = 2 * kApiPointerSize;
|
||||
static const int kContextHeaderSize = 2 * kApiPointerSize;
|
||||
static const int kContextEmbedderDataIndex = 76;
|
||||
static const int kContextEmbedderDataIndex = 95;
|
||||
static const int kFullStringRepresentationMask = 0x07;
|
||||
static const int kStringEncodingMask = 0x4;
|
||||
static const int kExternalTwoByteRepresentationTag = 0x02;
|
||||
|
@ -1544,6 +1544,15 @@ void Assembler::sdiv(Register dst, Register src1, Register src2,
|
||||
}
|
||||
|
||||
|
||||
void Assembler::udiv(Register dst, Register src1, Register src2,
|
||||
Condition cond) {
|
||||
ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
|
||||
ASSERT(IsEnabled(SUDIV));
|
||||
emit(cond | B26 | B25 | B24 | B21 | B20 | dst.code() * B16 | 0xf * B12 |
|
||||
src2.code() * B8 | B4 | src1.code());
|
||||
}
|
||||
|
||||
|
||||
void Assembler::mul(Register dst, Register src1, Register src2,
|
||||
SBit s, Condition cond) {
|
||||
ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
|
||||
@ -2156,9 +2165,14 @@ void Assembler::vldr(const DwVfpRegister dst,
|
||||
void Assembler::vldr(const DwVfpRegister dst,
|
||||
const MemOperand& operand,
|
||||
const Condition cond) {
|
||||
ASSERT(!operand.rm().is_valid());
|
||||
ASSERT(operand.am_ == Offset);
|
||||
vldr(dst, operand.rn(), operand.offset(), cond);
|
||||
if (operand.rm().is_valid()) {
|
||||
add(ip, operand.rn(),
|
||||
Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
|
||||
vldr(dst, ip, 0, cond);
|
||||
} else {
|
||||
vldr(dst, operand.rn(), operand.offset(), cond);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -2199,9 +2213,14 @@ void Assembler::vldr(const SwVfpRegister dst,
|
||||
void Assembler::vldr(const SwVfpRegister dst,
|
||||
const MemOperand& operand,
|
||||
const Condition cond) {
|
||||
ASSERT(!operand.rm().is_valid());
|
||||
ASSERT(operand.am_ == Offset);
|
||||
vldr(dst, operand.rn(), operand.offset(), cond);
|
||||
if (operand.rm().is_valid()) {
|
||||
add(ip, operand.rn(),
|
||||
Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
|
||||
vldr(dst, ip, 0, cond);
|
||||
} else {
|
||||
vldr(dst, operand.rn(), operand.offset(), cond);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -2242,9 +2261,14 @@ void Assembler::vstr(const DwVfpRegister src,
|
||||
void Assembler::vstr(const DwVfpRegister src,
|
||||
const MemOperand& operand,
|
||||
const Condition cond) {
|
||||
ASSERT(!operand.rm().is_valid());
|
||||
ASSERT(operand.am_ == Offset);
|
||||
vstr(src, operand.rn(), operand.offset(), cond);
|
||||
if (operand.rm().is_valid()) {
|
||||
add(ip, operand.rn(),
|
||||
Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
|
||||
vstr(src, ip, 0, cond);
|
||||
} else {
|
||||
vstr(src, operand.rn(), operand.offset(), cond);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -2284,9 +2308,14 @@ void Assembler::vstr(const SwVfpRegister src,
|
||||
void Assembler::vstr(const SwVfpRegister src,
|
||||
const MemOperand& operand,
|
||||
const Condition cond) {
|
||||
ASSERT(!operand.rm().is_valid());
|
||||
ASSERT(operand.am_ == Offset);
|
||||
vstr(src, operand.rn(), operand.offset(), cond);
|
||||
if (operand.rm().is_valid()) {
|
||||
add(ip, operand.rn(),
|
||||
Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
|
||||
vstr(src, ip, 0, cond);
|
||||
} else {
|
||||
vstr(src, operand.rn(), operand.offset(), cond);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -3125,6 +3154,7 @@ bool Assembler::IsNop(Instr instr, int type) {
|
||||
}
|
||||
|
||||
|
||||
// static
|
||||
bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
|
||||
uint32_t dummy1;
|
||||
uint32_t dummy2;
|
||||
|
@ -922,6 +922,35 @@ class Assembler : public AssemblerBase {
|
||||
void mvn(Register dst, const Operand& src,
|
||||
SBit s = LeaveCC, Condition cond = al);
|
||||
|
||||
// Shift instructions
|
||||
|
||||
void asr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
|
||||
Condition cond = al) {
|
||||
if (src2.is_reg()) {
|
||||
mov(dst, Operand(src1, ASR, src2.rm()), s, cond);
|
||||
} else {
|
||||
mov(dst, Operand(src1, ASR, src2.immediate()), s, cond);
|
||||
}
|
||||
}
|
||||
|
||||
void lsl(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
|
||||
Condition cond = al) {
|
||||
if (src2.is_reg()) {
|
||||
mov(dst, Operand(src1, LSL, src2.rm()), s, cond);
|
||||
} else {
|
||||
mov(dst, Operand(src1, LSL, src2.immediate()), s, cond);
|
||||
}
|
||||
}
|
||||
|
||||
void lsr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
|
||||
Condition cond = al) {
|
||||
if (src2.is_reg()) {
|
||||
mov(dst, Operand(src1, LSR, src2.rm()), s, cond);
|
||||
} else {
|
||||
mov(dst, Operand(src1, LSR, src2.immediate()), s, cond);
|
||||
}
|
||||
}
|
||||
|
||||
// Multiply instructions
|
||||
|
||||
void mla(Register dst, Register src1, Register src2, Register srcA,
|
||||
@ -933,6 +962,8 @@ class Assembler : public AssemblerBase {
|
||||
void sdiv(Register dst, Register src1, Register src2,
|
||||
Condition cond = al);
|
||||
|
||||
void udiv(Register dst, Register src1, Register src2, Condition cond = al);
|
||||
|
||||
void mul(Register dst, Register src1, Register src2,
|
||||
SBit s = LeaveCC, Condition cond = al);
|
||||
|
||||
@ -1290,7 +1321,7 @@ class Assembler : public AssemblerBase {
|
||||
}
|
||||
|
||||
// Check whether an immediate fits an addressing mode 1 instruction.
|
||||
bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
|
||||
static bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
|
||||
|
||||
// Check whether an immediate fits an addressing mode 2 instruction.
|
||||
bool ImmediateFitsAddrMode2Instruction(int32_t imm32);
|
||||
|
@ -19,7 +19,7 @@ void FastNewClosureStub::InitializeInterfaceDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor) {
|
||||
Register registers[] = { cp, r2 };
|
||||
descriptor->Initialize(
|
||||
ARRAY_SIZE(registers), registers,
|
||||
MajorKey(), ARRAY_SIZE(registers), registers,
|
||||
Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry);
|
||||
}
|
||||
|
||||
@ -27,14 +27,14 @@ void FastNewClosureStub::InitializeInterfaceDescriptor(
|
||||
void FastNewContextStub::InitializeInterfaceDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor) {
|
||||
Register registers[] = { cp, r1 };
|
||||
descriptor->Initialize(ARRAY_SIZE(registers), registers);
|
||||
descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
|
||||
}
|
||||
|
||||
|
||||
void ToNumberStub::InitializeInterfaceDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor) {
|
||||
Register registers[] = { cp, r0 };
|
||||
descriptor->Initialize(ARRAY_SIZE(registers), registers);
|
||||
descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
|
||||
}
|
||||
|
||||
|
||||
@ -42,7 +42,7 @@ void NumberToStringStub::InitializeInterfaceDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor) {
|
||||
Register registers[] = { cp, r0 };
|
||||
descriptor->Initialize(
|
||||
ARRAY_SIZE(registers), registers,
|
||||
MajorKey(), ARRAY_SIZE(registers), registers,
|
||||
Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry);
|
||||
}
|
||||
|
||||
@ -56,9 +56,8 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
|
||||
Representation::Smi(),
|
||||
Representation::Tagged() };
|
||||
descriptor->Initialize(
|
||||
ARRAY_SIZE(registers), registers,
|
||||
Runtime::FunctionForId(
|
||||
Runtime::kCreateArrayLiteralStubBailout)->entry,
|
||||
MajorKey(), ARRAY_SIZE(registers), registers,
|
||||
Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry,
|
||||
representations);
|
||||
}
|
||||
|
||||
@ -67,7 +66,7 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor) {
|
||||
Register registers[] = { cp, r3, r2, r1, r0 };
|
||||
descriptor->Initialize(
|
||||
ARRAY_SIZE(registers), registers,
|
||||
MajorKey(), ARRAY_SIZE(registers), registers,
|
||||
Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry);
|
||||
}
|
||||
|
||||
@ -75,7 +74,36 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
|
||||
void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor) {
|
||||
Register registers[] = { cp, r2, r3 };
|
||||
descriptor->Initialize(ARRAY_SIZE(registers), registers);
|
||||
descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
|
||||
}
|
||||
|
||||
|
||||
void InstanceofStub::InitializeInterfaceDescriptor(
|
||||
Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) {
|
||||
Register registers[] = {cp, left(), right()};
|
||||
descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
|
||||
}
|
||||
|
||||
|
||||
void CallFunctionStub::InitializeInterfaceDescriptor(
|
||||
Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) {
|
||||
// r1 function the function to call
|
||||
Register registers[] = {cp, r1};
|
||||
descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
|
||||
}
|
||||
|
||||
|
||||
void CallConstructStub::InitializeInterfaceDescriptor(
|
||||
Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) {
|
||||
// r0 : number of arguments
|
||||
// r1 : the function to call
|
||||
// r2 : feedback vector
|
||||
// r3 : (only if r2 is not the megamorphic symbol) slot in feedback
|
||||
// vector (Smi)
|
||||
// TODO(turbofan): So far we don't gather type feedback and hence skip the
|
||||
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
|
||||
Register registers[] = {cp, r0, r1, r2};
|
||||
descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
|
||||
}
|
||||
|
||||
|
||||
@ -83,7 +111,7 @@ void RegExpConstructResultStub::InitializeInterfaceDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor) {
|
||||
Register registers[] = { cp, r2, r1, r0 };
|
||||
descriptor->Initialize(
|
||||
ARRAY_SIZE(registers), registers,
|
||||
MajorKey(), ARRAY_SIZE(registers), registers,
|
||||
Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry);
|
||||
}
|
||||
|
||||
@ -93,7 +121,7 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor(
|
||||
Register registers[] = { cp, r0, r1 };
|
||||
Address entry =
|
||||
Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
|
||||
descriptor->Initialize(ARRAY_SIZE(registers), registers,
|
||||
descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
|
||||
FUNCTION_ADDR(entry));
|
||||
}
|
||||
|
||||
@ -101,7 +129,7 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor(
|
||||
void CompareNilICStub::InitializeInterfaceDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor) {
|
||||
Register registers[] = { cp, r0 };
|
||||
descriptor->Initialize(ARRAY_SIZE(registers), registers,
|
||||
descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
|
||||
FUNCTION_ADDR(CompareNilIC_Miss));
|
||||
descriptor->SetMissHandler(
|
||||
ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
|
||||
@ -112,7 +140,7 @@ const Register InterfaceDescriptor::ContextRegister() { return cp; }
|
||||
|
||||
|
||||
static void InitializeArrayConstructorDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor,
|
||||
CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
|
||||
int constant_stack_parameter_count) {
|
||||
// register state
|
||||
// cp -- context
|
||||
@ -124,10 +152,8 @@ static void InitializeArrayConstructorDescriptor(
|
||||
|
||||
if (constant_stack_parameter_count == 0) {
|
||||
Register registers[] = { cp, r1, r2 };
|
||||
descriptor->Initialize(ARRAY_SIZE(registers), registers,
|
||||
deopt_handler,
|
||||
NULL,
|
||||
constant_stack_parameter_count,
|
||||
descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
|
||||
deopt_handler, NULL, constant_stack_parameter_count,
|
||||
JS_FUNCTION_STUB_MODE);
|
||||
} else {
|
||||
// stack param count needs (constructor pointer, and single argument)
|
||||
@ -137,19 +163,16 @@ static void InitializeArrayConstructorDescriptor(
|
||||
Representation::Tagged(),
|
||||
Representation::Tagged(),
|
||||
Representation::Integer32() };
|
||||
descriptor->Initialize(ARRAY_SIZE(registers), registers,
|
||||
r0,
|
||||
deopt_handler,
|
||||
representations,
|
||||
descriptor->Initialize(major, ARRAY_SIZE(registers), registers, r0,
|
||||
deopt_handler, representations,
|
||||
constant_stack_parameter_count,
|
||||
JS_FUNCTION_STUB_MODE,
|
||||
PASS_ARGUMENTS);
|
||||
JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void InitializeInternalArrayConstructorDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor,
|
||||
CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
|
||||
int constant_stack_parameter_count) {
|
||||
// register state
|
||||
// cp -- context
|
||||
@ -160,10 +183,8 @@ static void InitializeInternalArrayConstructorDescriptor(
|
||||
|
||||
if (constant_stack_parameter_count == 0) {
|
||||
Register registers[] = { cp, r1 };
|
||||
descriptor->Initialize(ARRAY_SIZE(registers), registers,
|
||||
deopt_handler,
|
||||
NULL,
|
||||
constant_stack_parameter_count,
|
||||
descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
|
||||
deopt_handler, NULL, constant_stack_parameter_count,
|
||||
JS_FUNCTION_STUB_MODE);
|
||||
} else {
|
||||
// stack param count needs (constructor pointer, and single argument)
|
||||
@ -172,39 +193,36 @@ static void InitializeInternalArrayConstructorDescriptor(
|
||||
Representation::Tagged(),
|
||||
Representation::Tagged(),
|
||||
Representation::Integer32() };
|
||||
descriptor->Initialize(ARRAY_SIZE(registers), registers,
|
||||
r0,
|
||||
deopt_handler,
|
||||
representations,
|
||||
descriptor->Initialize(major, ARRAY_SIZE(registers), registers, r0,
|
||||
deopt_handler, representations,
|
||||
constant_stack_parameter_count,
|
||||
JS_FUNCTION_STUB_MODE,
|
||||
PASS_ARGUMENTS);
|
||||
JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor) {
|
||||
InitializeArrayConstructorDescriptor(descriptor, 0);
|
||||
InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 0);
|
||||
}
|
||||
|
||||
|
||||
void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor) {
|
||||
InitializeArrayConstructorDescriptor(descriptor, 1);
|
||||
InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 1);
|
||||
}
|
||||
|
||||
|
||||
void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor) {
|
||||
InitializeArrayConstructorDescriptor(descriptor, -1);
|
||||
InitializeArrayConstructorDescriptor(MajorKey(), descriptor, -1);
|
||||
}
|
||||
|
||||
|
||||
void ToBooleanStub::InitializeInterfaceDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor) {
|
||||
Register registers[] = { cp, r0 };
|
||||
descriptor->Initialize(ARRAY_SIZE(registers), registers,
|
||||
descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
|
||||
FUNCTION_ADDR(ToBooleanIC_Miss));
|
||||
descriptor->SetMissHandler(
|
||||
ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
|
||||
@ -213,26 +231,26 @@ void ToBooleanStub::InitializeInterfaceDescriptor(
|
||||
|
||||
void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor) {
|
||||
InitializeInternalArrayConstructorDescriptor(descriptor, 0);
|
||||
InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 0);
|
||||
}
|
||||
|
||||
|
||||
void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor) {
|
||||
InitializeInternalArrayConstructorDescriptor(descriptor, 1);
|
||||
InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 1);
|
||||
}
|
||||
|
||||
|
||||
void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor) {
|
||||
InitializeInternalArrayConstructorDescriptor(descriptor, -1);
|
||||
InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, -1);
|
||||
}
|
||||
|
||||
|
||||
void BinaryOpICStub::InitializeInterfaceDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor) {
|
||||
Register registers[] = { cp, r1, r0 };
|
||||
descriptor->Initialize(ARRAY_SIZE(registers), registers,
|
||||
descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
|
||||
FUNCTION_ADDR(BinaryOpIC_Miss));
|
||||
descriptor->SetMissHandler(
|
||||
ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
|
||||
@ -242,7 +260,7 @@ void BinaryOpICStub::InitializeInterfaceDescriptor(
|
||||
void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor) {
|
||||
Register registers[] = { cp, r2, r1, r0 };
|
||||
descriptor->Initialize(ARRAY_SIZE(registers), registers,
|
||||
descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
|
||||
FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite));
|
||||
}
|
||||
|
||||
@ -250,9 +268,8 @@ void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
|
||||
void StringAddStub::InitializeInterfaceDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor) {
|
||||
Register registers[] = { cp, r1, r0 };
|
||||
descriptor->Initialize(
|
||||
ARRAY_SIZE(registers), registers,
|
||||
Runtime::FunctionForId(Runtime::kStringAdd)->entry);
|
||||
descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
|
||||
Runtime::FunctionForId(Runtime::kStringAdd)->entry);
|
||||
}
|
||||
|
||||
|
||||
@ -1672,8 +1689,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
|
||||
void InstanceofStub::Generate(MacroAssembler* masm) {
|
||||
// Call site inlining and patching implies arguments in registers.
|
||||
ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
|
||||
// ReturnTrueFalse is only implemented for inlined call sites.
|
||||
ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
|
||||
|
||||
// Fixed register usage throughout the stub:
|
||||
const Register object = r0; // Object (lhs).
|
||||
@ -1695,7 +1710,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
// If there is a call site cache don't look in the global cache, but do the
|
||||
// real lookup and update the call site cache.
|
||||
if (!HasCallSiteInlineCheck()) {
|
||||
if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
|
||||
Label miss;
|
||||
__ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
|
||||
__ b(ne, &miss);
|
||||
@ -1751,11 +1766,15 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
|
||||
__ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
|
||||
__ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
|
||||
__ jmp(&loop);
|
||||
Factory* factory = isolate()->factory();
|
||||
|
||||
__ bind(&is_instance);
|
||||
if (!HasCallSiteInlineCheck()) {
|
||||
__ mov(r0, Operand(Smi::FromInt(0)));
|
||||
__ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
|
||||
if (ReturnTrueFalseObject()) {
|
||||
__ Move(r0, factory->true_value());
|
||||
}
|
||||
} else {
|
||||
// Patch the call site to return true.
|
||||
__ LoadRoot(r0, Heap::kTrueValueRootIndex);
|
||||
@ -1777,6 +1796,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
|
||||
if (!HasCallSiteInlineCheck()) {
|
||||
__ mov(r0, Operand(Smi::FromInt(1)));
|
||||
__ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
|
||||
if (ReturnTrueFalseObject()) {
|
||||
__ Move(r0, factory->false_value());
|
||||
}
|
||||
} else {
|
||||
// Patch the call site to return false.
|
||||
__ LoadRoot(r0, Heap::kFalseValueRootIndex);
|
||||
@ -1806,19 +1828,31 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
|
||||
// Null is not instance of anything.
|
||||
__ cmp(scratch, Operand(isolate()->factory()->null_value()));
|
||||
__ b(ne, &object_not_null);
|
||||
__ mov(r0, Operand(Smi::FromInt(1)));
|
||||
if (ReturnTrueFalseObject()) {
|
||||
__ Move(r0, factory->false_value());
|
||||
} else {
|
||||
__ mov(r0, Operand(Smi::FromInt(1)));
|
||||
}
|
||||
__ Ret(HasArgsInRegisters() ? 0 : 2);
|
||||
|
||||
__ bind(&object_not_null);
|
||||
// Smi values are not instances of anything.
|
||||
__ JumpIfNotSmi(object, &object_not_null_or_smi);
|
||||
__ mov(r0, Operand(Smi::FromInt(1)));
|
||||
if (ReturnTrueFalseObject()) {
|
||||
__ Move(r0, factory->false_value());
|
||||
} else {
|
||||
__ mov(r0, Operand(Smi::FromInt(1)));
|
||||
}
|
||||
__ Ret(HasArgsInRegisters() ? 0 : 2);
|
||||
|
||||
__ bind(&object_not_null_or_smi);
|
||||
// String values are not instances of anything.
|
||||
__ IsObjectJSStringType(object, scratch, &slow);
|
||||
__ mov(r0, Operand(Smi::FromInt(1)));
|
||||
if (ReturnTrueFalseObject()) {
|
||||
__ Move(r0, factory->false_value());
|
||||
} else {
|
||||
__ mov(r0, Operand(Smi::FromInt(1)));
|
||||
}
|
||||
__ Ret(HasArgsInRegisters() ? 0 : 2);
|
||||
|
||||
// Slow-case. Tail call builtin.
|
||||
|
@ -49,9 +49,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
|
||||
|
||||
DeoptimizationInputData* deopt_data =
|
||||
DeoptimizationInputData::cast(code->deoptimization_data());
|
||||
SharedFunctionInfo* shared =
|
||||
SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
|
||||
shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
|
||||
#ifdef DEBUG
|
||||
Address prev_call_address = NULL;
|
||||
#endif
|
||||
|
@ -1097,13 +1097,16 @@ void Decoder::DecodeType3(Instruction* instr) {
|
||||
}
|
||||
case db_x: {
|
||||
if (FLAG_enable_sudiv) {
|
||||
if (!instr->HasW()) {
|
||||
if (instr->Bits(5, 4) == 0x1) {
|
||||
if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
|
||||
if (instr->Bits(5, 4) == 0x1) {
|
||||
if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
|
||||
if (instr->Bit(21) == 0x1) {
|
||||
// UDIV (in V8 notation matching ARM ISA format) rn = rm/rs
|
||||
Format(instr, "udiv'cond'b 'rn, 'rm, 'rs");
|
||||
} else {
|
||||
// SDIV (in V8 notation matching ARM ISA format) rn = rm/rs
|
||||
Format(instr, "sdiv'cond'b 'rn, 'rm, 'rs");
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4,10 +4,9 @@
|
||||
|
||||
#include "src/v8.h"
|
||||
|
||||
#include "src/arm/lithium-arm.h"
|
||||
#include "src/arm/lithium-codegen-arm.h"
|
||||
#include "src/hydrogen-osr.h"
|
||||
#include "src/lithium-allocator-inl.h"
|
||||
#include "src/lithium-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
@ -223,6 +223,9 @@ class LInstruction : public ZoneObject {
|
||||
|
||||
virtual bool IsControl() const { return false; }
|
||||
|
||||
// Try deleting this instruction if possible.
|
||||
virtual bool TryDelete() { return false; }
|
||||
|
||||
void set_environment(LEnvironment* env) { environment_ = env; }
|
||||
LEnvironment* environment() const { return environment_; }
|
||||
bool HasEnvironment() const { return environment_ != NULL; }
|
||||
@ -261,11 +264,12 @@ class LInstruction : public ZoneObject {
|
||||
void VerifyCall();
|
||||
#endif
|
||||
|
||||
virtual int InputCount() = 0;
|
||||
virtual LOperand* InputAt(int i) = 0;
|
||||
|
||||
private:
|
||||
// Iterator support.
|
||||
friend class InputIterator;
|
||||
virtual int InputCount() = 0;
|
||||
virtual LOperand* InputAt(int i) = 0;
|
||||
|
||||
friend class TempIterator;
|
||||
virtual int TempCount() = 0;
|
||||
|
@ -932,7 +932,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
|
||||
int length = deoptimizations_.length();
|
||||
if (length == 0) return;
|
||||
Handle<DeoptimizationInputData> data =
|
||||
DeoptimizationInputData::New(isolate(), length, TENURED);
|
||||
DeoptimizationInputData::New(isolate(), length, 0, TENURED);
|
||||
|
||||
Handle<ByteArray> translations =
|
||||
translations_.CreateByteArray(isolate()->factory());
|
||||
|
@ -254,7 +254,7 @@ void MacroAssembler::Mls(Register dst, Register src1, Register src2,
|
||||
CpuFeatureScope scope(this, MLS);
|
||||
mls(dst, src1, src2, srcA, cond);
|
||||
} else {
|
||||
ASSERT(!dst.is(srcA));
|
||||
ASSERT(!srcA.is(ip));
|
||||
mul(ip, src1, src2, LeaveCC, cond);
|
||||
sub(dst, srcA, ip, LeaveCC, cond);
|
||||
}
|
||||
|
@ -152,6 +152,9 @@ class MacroAssembler: public Assembler {
|
||||
// Register move. May do nothing if the registers are identical.
|
||||
void Move(Register dst, Handle<Object> value);
|
||||
void Move(Register dst, Register src, Condition cond = al);
|
||||
void Move(Register dst, const Operand& src, Condition cond = al) {
|
||||
if (!src.is_reg() || !src.rm().is(dst)) mov(dst, src, LeaveCC, cond);
|
||||
}
|
||||
void Move(DwVfpRegister dst, DwVfpRegister src);
|
||||
|
||||
void Load(Register dst, const MemOperand& src, Representation r);
|
||||
|
@ -2711,28 +2711,30 @@ void Simulator::DecodeType3(Instruction* instr) {
|
||||
}
|
||||
case db_x: {
|
||||
if (FLAG_enable_sudiv) {
|
||||
if (!instr->HasW()) {
|
||||
if (instr->Bits(5, 4) == 0x1) {
|
||||
if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
|
||||
// sdiv (in V8 notation matching ARM ISA format) rn = rm/rs
|
||||
// Format(instr, "'sdiv'cond'b 'rn, 'rm, 'rs);
|
||||
int rm = instr->RmValue();
|
||||
int32_t rm_val = get_register(rm);
|
||||
int rs = instr->RsValue();
|
||||
int32_t rs_val = get_register(rs);
|
||||
int32_t ret_val = 0;
|
||||
ASSERT(rs_val != 0);
|
||||
if ((rm_val == kMinInt) && (rs_val == -1)) {
|
||||
ret_val = kMinInt;
|
||||
} else {
|
||||
ret_val = rm_val / rs_val;
|
||||
}
|
||||
set_register(rn, ret_val);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (instr->Bits(5, 4) == 0x1) {
|
||||
if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
|
||||
// (s/u)div (in V8 notation matching ARM ISA format) rn = rm/rs
|
||||
// Format(instr, "'(s/u)div'cond'b 'rn, 'rm, 'rs);
|
||||
int rm = instr->RmValue();
|
||||
int32_t rm_val = get_register(rm);
|
||||
int rs = instr->RsValue();
|
||||
int32_t rs_val = get_register(rs);
|
||||
int32_t ret_val = 0;
|
||||
ASSERT(rs_val != 0);
|
||||
// udiv
|
||||
if (instr->Bit(21) == 0x1) {
|
||||
ret_val = static_cast<int32_t>(static_cast<uint32_t>(rm_val) /
|
||||
static_cast<uint32_t>(rs_val));
|
||||
} else if ((rm_val == kMinInt) && (rs_val == -1)) {
|
||||
ret_val = kMinInt;
|
||||
} else {
|
||||
ret_val = rm_val / rs_val;
|
||||
}
|
||||
set_register(rn, ret_val);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
|
||||
addr = rn_val - shifter_operand;
|
||||
if (instr->HasW()) {
|
||||
@ -2772,7 +2774,7 @@ void Simulator::DecodeType3(Instruction* instr) {
|
||||
uint32_t rd_val =
|
||||
static_cast<uint32_t>(get_register(instr->RdValue()));
|
||||
uint32_t bitcount = msbit - lsbit + 1;
|
||||
uint32_t mask = (1 << bitcount) - 1;
|
||||
uint32_t mask = 0xffffffffu >> (32 - bitcount);
|
||||
rd_val &= ~(mask << lsbit);
|
||||
if (instr->RmValue() != 15) {
|
||||
// bfi - bitfield insert.
|
||||
|
@ -20,7 +20,7 @@ void FastNewClosureStub::InitializeInterfaceDescriptor(
|
||||
// x2: function info
|
||||
Register registers[] = { cp, x2 };
|
||||
descriptor->Initialize(
|
||||
ARRAY_SIZE(registers), registers,
|
||||
MajorKey(), ARRAY_SIZE(registers), registers,
|
||||
Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry);
|
||||
}
|
||||
|
||||
@ -30,7 +30,7 @@ void FastNewContextStub::InitializeInterfaceDescriptor(
|
||||
// cp: context
|
||||
// x1: function
|
||||
Register registers[] = { cp, x1 };
|
||||
descriptor->Initialize(ARRAY_SIZE(registers), registers);
|
||||
descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
|
||||
}
|
||||
|
||||
|
||||
@ -39,7 +39,7 @@ void ToNumberStub::InitializeInterfaceDescriptor(
|
||||
// cp: context
|
||||
// x0: value
|
||||
Register registers[] = { cp, x0 };
|
||||
descriptor->Initialize(ARRAY_SIZE(registers), registers);
|
||||
descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
|
||||
}
|
||||
|
||||
|
||||
@ -49,7 +49,7 @@ void NumberToStringStub::InitializeInterfaceDescriptor(
|
||||
// x0: value
|
||||
Register registers[] = { cp, x0 };
|
||||
descriptor->Initialize(
|
||||
ARRAY_SIZE(registers), registers,
|
||||
MajorKey(), ARRAY_SIZE(registers), registers,
|
||||
Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry);
|
||||
}
|
||||
|
||||
@ -67,9 +67,8 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
|
||||
Representation::Smi(),
|
||||
Representation::Tagged() };
|
||||
descriptor->Initialize(
|
||||
ARRAY_SIZE(registers), registers,
|
||||
Runtime::FunctionForId(
|
||||
Runtime::kCreateArrayLiteralStubBailout)->entry,
|
||||
MajorKey(), ARRAY_SIZE(registers), registers,
|
||||
Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry,
|
||||
representations);
|
||||
}
|
||||
|
||||
@ -83,7 +82,7 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
|
||||
// x0: object literal flags
|
||||
Register registers[] = { cp, x3, x2, x1, x0 };
|
||||
descriptor->Initialize(
|
||||
ARRAY_SIZE(registers), registers,
|
||||
MajorKey(), ARRAY_SIZE(registers), registers,
|
||||
Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry);
|
||||
}
|
||||
|
||||
@ -94,7 +93,35 @@ void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
|
||||
// x2: feedback vector
|
||||
// x3: call feedback slot
|
||||
Register registers[] = { cp, x2, x3 };
|
||||
descriptor->Initialize(ARRAY_SIZE(registers), registers);
|
||||
descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
|
||||
}
|
||||
|
||||
|
||||
void InstanceofStub::InitializeInterfaceDescriptor(
|
||||
Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) {
|
||||
Register registers[] = {cp, left(), right()};
|
||||
descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
|
||||
}
|
||||
|
||||
|
||||
void CallFunctionStub::InitializeInterfaceDescriptor(
|
||||
Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) {
|
||||
// x1 function the function to call
|
||||
Register registers[] = {cp, x1};
|
||||
descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
|
||||
}
|
||||
|
||||
|
||||
void CallConstructStub::InitializeInterfaceDescriptor(
|
||||
Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) {
|
||||
// x0 : number of arguments
|
||||
// x1 : the function to call
|
||||
// x2 : feedback vector
|
||||
// x3 : slot in feedback vector (smi) (if r2 is not the megamorphic symbol)
|
||||
// TODO(turbofan): So far we don't gather type feedback and hence skip the
|
||||
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
|
||||
Register registers[] = {cp, x0, x1, x2};
|
||||
descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
|
||||
}
|
||||
|
||||
|
||||
@ -106,7 +133,7 @@ void RegExpConstructResultStub::InitializeInterfaceDescriptor(
|
||||
// x0: string
|
||||
Register registers[] = { cp, x2, x1, x0 };
|
||||
descriptor->Initialize(
|
||||
ARRAY_SIZE(registers), registers,
|
||||
MajorKey(), ARRAY_SIZE(registers), registers,
|
||||
Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry);
|
||||
}
|
||||
|
||||
@ -119,7 +146,7 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor(
|
||||
Register registers[] = { cp, x0, x1 };
|
||||
Address entry =
|
||||
Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
|
||||
descriptor->Initialize(ARRAY_SIZE(registers), registers,
|
||||
descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
|
||||
FUNCTION_ADDR(entry));
|
||||
}
|
||||
|
||||
@ -129,7 +156,7 @@ void CompareNilICStub::InitializeInterfaceDescriptor(
|
||||
// cp: context
|
||||
// x0: value to compare
|
||||
Register registers[] = { cp, x0 };
|
||||
descriptor->Initialize(ARRAY_SIZE(registers), registers,
|
||||
descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
|
||||
FUNCTION_ADDR(CompareNilIC_Miss));
|
||||
descriptor->SetMissHandler(
|
||||
ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
|
||||
@ -140,7 +167,7 @@ const Register InterfaceDescriptor::ContextRegister() { return cp; }
|
||||
|
||||
|
||||
static void InitializeArrayConstructorDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor,
|
||||
CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
|
||||
int constant_stack_parameter_count) {
|
||||
// cp: context
|
||||
// x1: function
|
||||
@ -151,10 +178,8 @@ static void InitializeArrayConstructorDescriptor(
|
||||
|
||||
if (constant_stack_parameter_count == 0) {
|
||||
Register registers[] = { cp, x1, x2 };
|
||||
descriptor->Initialize(ARRAY_SIZE(registers), registers,
|
||||
deopt_handler,
|
||||
NULL,
|
||||
constant_stack_parameter_count,
|
||||
descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
|
||||
deopt_handler, NULL, constant_stack_parameter_count,
|
||||
JS_FUNCTION_STUB_MODE);
|
||||
} else {
|
||||
// stack param count needs (constructor pointer, and single argument)
|
||||
@ -164,37 +189,34 @@ static void InitializeArrayConstructorDescriptor(
|
||||
Representation::Tagged(),
|
||||
Representation::Tagged(),
|
||||
Representation::Integer32() };
|
||||
descriptor->Initialize(ARRAY_SIZE(registers), registers,
|
||||
x0,
|
||||
deopt_handler,
|
||||
representations,
|
||||
descriptor->Initialize(major, ARRAY_SIZE(registers), registers, x0,
|
||||
deopt_handler, representations,
|
||||
constant_stack_parameter_count,
|
||||
JS_FUNCTION_STUB_MODE,
|
||||
PASS_ARGUMENTS);
|
||||
JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor) {
|
||||
InitializeArrayConstructorDescriptor(descriptor, 0);
|
||||
InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 0);
|
||||
}
|
||||
|
||||
|
||||
void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor) {
|
||||
InitializeArrayConstructorDescriptor(descriptor, 1);
|
||||
InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 1);
|
||||
}
|
||||
|
||||
|
||||
void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor) {
|
||||
InitializeArrayConstructorDescriptor(descriptor, -1);
|
||||
InitializeArrayConstructorDescriptor(MajorKey(), descriptor, -1);
|
||||
}
|
||||
|
||||
|
||||
static void InitializeInternalArrayConstructorDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor,
|
||||
CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
|
||||
int constant_stack_parameter_count) {
|
||||
// cp: context
|
||||
// x1: constructor function
|
||||
@ -204,10 +226,8 @@ static void InitializeInternalArrayConstructorDescriptor(
|
||||
|
||||
if (constant_stack_parameter_count == 0) {
|
||||
Register registers[] = { cp, x1 };
|
||||
descriptor->Initialize(ARRAY_SIZE(registers), registers,
|
||||
deopt_handler,
|
||||
NULL,
|
||||
constant_stack_parameter_count,
|
||||
descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
|
||||
deopt_handler, NULL, constant_stack_parameter_count,
|
||||
JS_FUNCTION_STUB_MODE);
|
||||
} else {
|
||||
// stack param count needs (constructor pointer, and single argument)
|
||||
@ -216,32 +236,29 @@ static void InitializeInternalArrayConstructorDescriptor(
|
||||
Representation::Tagged(),
|
||||
Representation::Tagged(),
|
||||
Representation::Integer32() };
|
||||
descriptor->Initialize(ARRAY_SIZE(registers), registers,
|
||||
x0,
|
||||
deopt_handler,
|
||||
representations,
|
||||
descriptor->Initialize(major, ARRAY_SIZE(registers), registers, x0,
|
||||
deopt_handler, representations,
|
||||
constant_stack_parameter_count,
|
||||
JS_FUNCTION_STUB_MODE,
|
||||
PASS_ARGUMENTS);
|
||||
JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor) {
|
||||
InitializeInternalArrayConstructorDescriptor(descriptor, 0);
|
||||
InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 0);
|
||||
}
|
||||
|
||||
|
||||
void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor) {
|
||||
InitializeInternalArrayConstructorDescriptor(descriptor, 1);
|
||||
InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 1);
|
||||
}
|
||||
|
||||
|
||||
void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor) {
|
||||
InitializeInternalArrayConstructorDescriptor(descriptor, -1);
|
||||
InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, -1);
|
||||
}
|
||||
|
||||
|
||||
@ -250,7 +267,7 @@ void ToBooleanStub::InitializeInterfaceDescriptor(
|
||||
// cp: context
|
||||
// x0: value
|
||||
Register registers[] = { cp, x0 };
|
||||
descriptor->Initialize(ARRAY_SIZE(registers), registers,
|
||||
descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
|
||||
FUNCTION_ADDR(ToBooleanIC_Miss));
|
||||
descriptor->SetMissHandler(
|
||||
ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
|
||||
@ -263,7 +280,7 @@ void BinaryOpICStub::InitializeInterfaceDescriptor(
|
||||
// x1: left operand
|
||||
// x0: right operand
|
||||
Register registers[] = { cp, x1, x0 };
|
||||
descriptor->Initialize(ARRAY_SIZE(registers), registers,
|
||||
descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
|
||||
FUNCTION_ADDR(BinaryOpIC_Miss));
|
||||
descriptor->SetMissHandler(
|
||||
ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
|
||||
@ -277,7 +294,7 @@ void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
|
||||
// x1: left operand
|
||||
// x0: right operand
|
||||
Register registers[] = { cp, x2, x1, x0 };
|
||||
descriptor->Initialize(ARRAY_SIZE(registers), registers,
|
||||
descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
|
||||
FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite));
|
||||
}
|
||||
|
||||
@ -288,9 +305,8 @@ void StringAddStub::InitializeInterfaceDescriptor(
|
||||
// x1: left operand
|
||||
// x0: right operand
|
||||
Register registers[] = { cp, x1, x0 };
|
||||
descriptor->Initialize(
|
||||
ARRAY_SIZE(registers), registers,
|
||||
Runtime::FunctionForId(Runtime::kStringAdd)->entry);
|
||||
descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
|
||||
Runtime::FunctionForId(Runtime::kStringAdd)->entry);
|
||||
}
|
||||
|
||||
|
||||
|
@ -32,9 +32,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
|
||||
|
||||
DeoptimizationInputData* deopt_data =
|
||||
DeoptimizationInputData::cast(code->deoptimization_data());
|
||||
SharedFunctionInfo* shared =
|
||||
SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
|
||||
shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
|
||||
Address code_start_address = code->instruction_start();
|
||||
#ifdef DEBUG
|
||||
Address prev_call_address = NULL;
|
||||
|
@ -4,15 +4,13 @@
|
||||
|
||||
#include "src/v8.h"
|
||||
|
||||
#include "src/arm64/lithium-arm64.h"
|
||||
#include "src/arm64/lithium-codegen-arm64.h"
|
||||
#include "src/hydrogen-osr.h"
|
||||
#include "src/lithium-allocator-inl.h"
|
||||
#include "src/lithium-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
|
||||
#define DEFINE_COMPILE(type) \
|
||||
void L##type::CompileToNative(LCodeGen* generator) { \
|
||||
generator->Do##type(this); \
|
||||
|
@ -234,6 +234,9 @@ class LInstruction : public ZoneObject {
|
||||
|
||||
virtual bool IsControl() const { return false; }
|
||||
|
||||
// Try deleting this instruction if possible.
|
||||
virtual bool TryDelete() { return false; }
|
||||
|
||||
void set_environment(LEnvironment* env) { environment_ = env; }
|
||||
LEnvironment* environment() const { return environment_; }
|
||||
bool HasEnvironment() const { return environment_ != NULL; }
|
||||
|
@ -938,7 +938,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
|
||||
if (length == 0) return;
|
||||
|
||||
Handle<DeoptimizationInputData> data =
|
||||
DeoptimizationInputData::New(isolate(), length, TENURED);
|
||||
DeoptimizationInputData::New(isolate(), length, 0, TENURED);
|
||||
|
||||
Handle<ByteArray> translations =
|
||||
translations_.CreateByteArray(isolate()->factory());
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include "src/assembler.h"
|
||||
#include "src/disasm.h"
|
||||
#include "src/macro-assembler.h"
|
||||
#include "src/ostreams.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
@ -2912,7 +2913,7 @@ T Simulator::FPMaxNM(T a, T b) {
|
||||
template <typename T>
|
||||
T Simulator::FPMin(T a, T b) {
|
||||
// NaNs should be handled elsewhere.
|
||||
ASSERT(!isnan(a) && !isnan(b));
|
||||
ASSERT(!std::isnan(a) && !std::isnan(b));
|
||||
|
||||
if ((a == 0.0) && (b == 0.0) &&
|
||||
(copysign(1.0, a) != copysign(1.0, b))) {
|
||||
|
@ -211,6 +211,7 @@ class Simulator : public DecoderVisitor {
|
||||
public:
|
||||
template<typename T>
|
||||
explicit CallArgument(T argument) {
|
||||
bits_ = 0;
|
||||
ASSERT(sizeof(argument) <= sizeof(bits_));
|
||||
memcpy(&bits_, &argument, sizeof(argument));
|
||||
type_ = X_ARG;
|
||||
|
@ -88,13 +88,13 @@ inline bool IsQuietNaN(T num) {
|
||||
|
||||
// Convert the NaN in 'num' to a quiet NaN.
|
||||
inline double ToQuietNaN(double num) {
|
||||
ASSERT(isnan(num));
|
||||
ASSERT(std::isnan(num));
|
||||
return rawbits_to_double(double_to_rawbits(num) | kDQuietNanMask);
|
||||
}
|
||||
|
||||
|
||||
inline float ToQuietNaN(float num) {
|
||||
ASSERT(isnan(num));
|
||||
ASSERT(std::isnan(num));
|
||||
return rawbits_to_float(float_to_rawbits(num) | kSQuietNanMask);
|
||||
}
|
||||
|
||||
|
16
src/ast.h
16
src/ast.h
@ -15,7 +15,6 @@
|
||||
#include "src/isolate.h"
|
||||
#include "src/jsregexp.h"
|
||||
#include "src/list-inl.h"
|
||||
#include "src/ostreams.h"
|
||||
#include "src/runtime.h"
|
||||
#include "src/small-pointer-list.h"
|
||||
#include "src/smart-pointers.h"
|
||||
@ -113,6 +112,7 @@ class BreakableStatement;
|
||||
class Expression;
|
||||
class IterationStatement;
|
||||
class MaterializedLiteral;
|
||||
class OStream;
|
||||
class Statement;
|
||||
class TargetCollector;
|
||||
class TypeFeedbackOracle;
|
||||
@ -1516,6 +1516,13 @@ class ObjectLiteral V8_FINAL : public MaterializedLiteral {
|
||||
// marked expressions, no store code is emitted.
|
||||
void CalculateEmitStore(Zone* zone);
|
||||
|
||||
// Assemble bitfield of flags for the CreateObjectLiteral helper.
|
||||
int ComputeFlags() const {
|
||||
int flags = fast_elements() ? kFastElements : kNoFlags;
|
||||
flags |= has_function() ? kHasFunction : kNoFlags;
|
||||
return flags;
|
||||
}
|
||||
|
||||
enum Flags {
|
||||
kNoFlags = 0,
|
||||
kFastElements = 1,
|
||||
@ -1595,6 +1602,13 @@ class ArrayLiteral V8_FINAL : public MaterializedLiteral {
|
||||
// Populate the constant elements fixed array.
|
||||
void BuildConstantElements(Isolate* isolate);
|
||||
|
||||
// Assemble bitfield of flags for the CreateArrayLiteral helper.
|
||||
int ComputeFlags() const {
|
||||
int flags = depth() == 1 ? kShallowElements : kNoFlags;
|
||||
flags |= ArrayLiteral::kDisableMementos;
|
||||
return flags;
|
||||
}
|
||||
|
||||
enum Flags {
|
||||
kNoFlags = 0,
|
||||
kShallowElements = 1,
|
||||
|
@ -152,29 +152,6 @@ inline void CheckNonEqualsHelper(const char* file,
|
||||
}
|
||||
|
||||
|
||||
// Helper function used by the CHECK function when given floating
|
||||
// point arguments. Should not be called directly.
|
||||
inline void CheckEqualsHelper(const char* file,
|
||||
int line,
|
||||
const char* expected_source,
|
||||
double expected,
|
||||
const char* value_source,
|
||||
double value) {
|
||||
// Force values to 64 bit memory to truncate 80 bit precision on IA32.
|
||||
volatile double* exp = new double[1];
|
||||
*exp = expected;
|
||||
volatile double* val = new double[1];
|
||||
*val = value;
|
||||
if (*exp != *val) {
|
||||
V8_Fatal(file, line,
|
||||
"CHECK_EQ(%s, %s) failed\n# Expected: %f\n# Found: %f",
|
||||
expected_source, value_source, *exp, *val);
|
||||
}
|
||||
delete[] exp;
|
||||
delete[] val;
|
||||
}
|
||||
|
||||
|
||||
inline void CheckNonEqualsHelper(const char* file,
|
||||
int line,
|
||||
const char* expected_source,
|
||||
@ -189,27 +166,6 @@ inline void CheckNonEqualsHelper(const char* file,
|
||||
}
|
||||
|
||||
|
||||
inline void CheckNonEqualsHelper(const char* file,
|
||||
int line,
|
||||
const char* expected_source,
|
||||
double expected,
|
||||
const char* value_source,
|
||||
double value) {
|
||||
// Force values to 64 bit memory to truncate 80 bit precision on IA32.
|
||||
volatile double* exp = new double[1];
|
||||
*exp = expected;
|
||||
volatile double* val = new double[1];
|
||||
*val = value;
|
||||
if (*exp == *val) {
|
||||
V8_Fatal(file, line,
|
||||
"CHECK_NE(%s, %s) failed\n# Value: %f",
|
||||
expected_source, value_source, *val);
|
||||
}
|
||||
delete[] exp;
|
||||
delete[] val;
|
||||
}
|
||||
|
||||
|
||||
#define CHECK_EQ(expected, value) CheckEqualsHelper(__FILE__, __LINE__, \
|
||||
#expected, expected, #value, value)
|
||||
|
||||
|
@ -1534,6 +1534,38 @@ bool Genesis::CompileScriptCached(Isolate* isolate,
|
||||
}
|
||||
|
||||
|
||||
static Handle<JSObject> ResolveBuiltinIdHolder(Handle<Context> native_context,
|
||||
const char* holder_expr) {
|
||||
Isolate* isolate = native_context->GetIsolate();
|
||||
Factory* factory = isolate->factory();
|
||||
Handle<GlobalObject> global(native_context->global_object());
|
||||
const char* period_pos = strchr(holder_expr, '.');
|
||||
if (period_pos == NULL) {
|
||||
return Handle<JSObject>::cast(
|
||||
Object::GetPropertyOrElement(
|
||||
global, factory->InternalizeUtf8String(holder_expr))
|
||||
.ToHandleChecked());
|
||||
}
|
||||
const char* inner = period_pos + 1;
|
||||
ASSERT_EQ(NULL, strchr(inner, '.'));
|
||||
Vector<const char> property(holder_expr,
|
||||
static_cast<int>(period_pos - holder_expr));
|
||||
Handle<String> property_string = factory->InternalizeUtf8String(property);
|
||||
ASSERT(!property_string.is_null());
|
||||
Handle<JSObject> object = Handle<JSObject>::cast(
|
||||
Object::GetProperty(global, property_string).ToHandleChecked());
|
||||
if (strcmp("prototype", inner) == 0) {
|
||||
Handle<JSFunction> function = Handle<JSFunction>::cast(object);
|
||||
return Handle<JSObject>(JSObject::cast(function->prototype()));
|
||||
}
|
||||
Handle<String> inner_string = factory->InternalizeUtf8String(inner);
|
||||
ASSERT(!inner_string.is_null());
|
||||
Handle<Object> value =
|
||||
Object::GetProperty(object, inner_string).ToHandleChecked();
|
||||
return Handle<JSObject>::cast(value);
|
||||
}
|
||||
|
||||
|
||||
#define INSTALL_NATIVE(Type, name, var) \
|
||||
Handle<String> var##_name = \
|
||||
factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR(name)); \
|
||||
@ -1541,6 +1573,12 @@ bool Genesis::CompileScriptCached(Isolate* isolate,
|
||||
handle(native_context()->builtins()), var##_name).ToHandleChecked(); \
|
||||
native_context()->set_##var(Type::cast(*var##_native));
|
||||
|
||||
#define INSTALL_NATIVE_MATH(name) \
|
||||
{ \
|
||||
Handle<Object> fun = \
|
||||
ResolveBuiltinIdHolder(native_context(), "Math." #name); \
|
||||
native_context()->set_math_##name##_fun(JSFunction::cast(*fun)); \
|
||||
}
|
||||
|
||||
void Genesis::InstallNativeFunctions() {
|
||||
HandleScope scope(isolate());
|
||||
@ -1583,6 +1621,26 @@ void Genesis::InstallNativeFunctions() {
|
||||
native_object_get_notifier);
|
||||
INSTALL_NATIVE(JSFunction, "NativeObjectNotifierPerformChange",
|
||||
native_object_notifier_perform_change);
|
||||
|
||||
INSTALL_NATIVE_MATH(abs)
|
||||
INSTALL_NATIVE_MATH(acos)
|
||||
INSTALL_NATIVE_MATH(asin)
|
||||
INSTALL_NATIVE_MATH(atan)
|
||||
INSTALL_NATIVE_MATH(atan2)
|
||||
INSTALL_NATIVE_MATH(ceil)
|
||||
INSTALL_NATIVE_MATH(cos)
|
||||
INSTALL_NATIVE_MATH(exp)
|
||||
INSTALL_NATIVE_MATH(floor)
|
||||
INSTALL_NATIVE_MATH(imul)
|
||||
INSTALL_NATIVE_MATH(log)
|
||||
INSTALL_NATIVE_MATH(max)
|
||||
INSTALL_NATIVE_MATH(min)
|
||||
INSTALL_NATIVE_MATH(pow)
|
||||
INSTALL_NATIVE_MATH(random)
|
||||
INSTALL_NATIVE_MATH(round)
|
||||
INSTALL_NATIVE_MATH(sin)
|
||||
INSTALL_NATIVE_MATH(sqrt)
|
||||
INSTALL_NATIVE_MATH(tan)
|
||||
}
|
||||
|
||||
|
||||
@ -2029,28 +2087,6 @@ bool Genesis::InstallExperimentalNatives() {
|
||||
}
|
||||
|
||||
|
||||
static Handle<JSObject> ResolveBuiltinIdHolder(
|
||||
Handle<Context> native_context,
|
||||
const char* holder_expr) {
|
||||
Isolate* isolate = native_context->GetIsolate();
|
||||
Factory* factory = isolate->factory();
|
||||
Handle<GlobalObject> global(native_context->global_object());
|
||||
const char* period_pos = strchr(holder_expr, '.');
|
||||
if (period_pos == NULL) {
|
||||
return Handle<JSObject>::cast(Object::GetPropertyOrElement(
|
||||
global, factory->InternalizeUtf8String(holder_expr)).ToHandleChecked());
|
||||
}
|
||||
ASSERT_EQ(".prototype", period_pos);
|
||||
Vector<const char> property(holder_expr,
|
||||
static_cast<int>(period_pos - holder_expr));
|
||||
Handle<String> property_string = factory->InternalizeUtf8String(property);
|
||||
ASSERT(!property_string.is_null());
|
||||
Handle<JSFunction> function = Handle<JSFunction>::cast(
|
||||
Object::GetProperty(global, property_string).ToHandleChecked());
|
||||
return Handle<JSObject>(JSObject::cast(function->prototype()));
|
||||
}
|
||||
|
||||
|
||||
static void InstallBuiltinFunctionId(Handle<JSObject> holder,
|
||||
const char* function_name,
|
||||
BuiltinFunctionId id) {
|
||||
@ -2336,6 +2372,10 @@ bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
|
||||
isolate(), builtins, Builtins::GetName(id)).ToHandleChecked();
|
||||
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
|
||||
builtins->set_javascript_builtin(id, *function);
|
||||
// TODO(mstarzinger): This is just a temporary hack to make TurboFan work,
|
||||
// the correct solution is to restore the context register after invoking
|
||||
// builtins from full-codegen.
|
||||
function->shared()->set_optimization_disabled(true);
|
||||
if (!Compiler::EnsureCompiled(function, CLEAR_EXCEPTION)) {
|
||||
return false;
|
||||
}
|
||||
|
@ -14,6 +14,50 @@ intptr_t HeapObjectTagMask() { return kHeapObjectTagMask; }
|
||||
} } // namespace v8::internal
|
||||
|
||||
|
||||
static bool CheckEqualsStrict(volatile double* exp, volatile double* val) {
|
||||
v8::internal::DoubleRepresentation exp_rep(*exp);
|
||||
v8::internal::DoubleRepresentation val_rep(*val);
|
||||
if (std::isnan(exp_rep.value) && std::isnan(val_rep.value)) return true;
|
||||
return exp_rep.bits == val_rep.bits;
|
||||
}
|
||||
|
||||
|
||||
void CheckEqualsHelper(const char* file, int line, const char* expected_source,
|
||||
double expected, const char* value_source,
|
||||
double value) {
|
||||
// Force values to 64 bit memory to truncate 80 bit precision on IA32.
|
||||
volatile double* exp = new double[1];
|
||||
*exp = expected;
|
||||
volatile double* val = new double[1];
|
||||
*val = value;
|
||||
if (!CheckEqualsStrict(exp, val)) {
|
||||
V8_Fatal(file, line,
|
||||
"CHECK_EQ(%s, %s) failed\n# Expected: %f\n# Found: %f",
|
||||
expected_source, value_source, *exp, *val);
|
||||
}
|
||||
delete[] exp;
|
||||
delete[] val;
|
||||
}
|
||||
|
||||
|
||||
void CheckNonEqualsHelper(const char* file, int line,
|
||||
const char* expected_source, double expected,
|
||||
const char* value_source, double value) {
|
||||
// Force values to 64 bit memory to truncate 80 bit precision on IA32.
|
||||
volatile double* exp = new double[1];
|
||||
*exp = expected;
|
||||
volatile double* val = new double[1];
|
||||
*val = value;
|
||||
if (CheckEqualsStrict(exp, val)) {
|
||||
V8_Fatal(file, line,
|
||||
"CHECK_EQ(%s, %s) failed\n# Expected: %f\n# Found: %f",
|
||||
expected_source, value_source, *exp, *val);
|
||||
}
|
||||
delete[] exp;
|
||||
delete[] val;
|
||||
}
|
||||
|
||||
|
||||
void CheckEqualsHelper(const char* file,
|
||||
int line,
|
||||
const char* expected_source,
|
||||
|
10
src/checks.h
10
src/checks.h
@ -53,8 +53,14 @@ const bool FLAG_enable_slow_asserts = false;
|
||||
} } // namespace v8::internal
|
||||
|
||||
|
||||
void CheckNonEqualsHelper(const char* file,
|
||||
int line,
|
||||
void CheckNonEqualsHelper(const char* file, int line,
|
||||
const char* expected_source, double expected,
|
||||
const char* value_source, double value);
|
||||
|
||||
void CheckEqualsHelper(const char* file, int line, const char* expected_source,
|
||||
double expected, const char* value_source, double value);
|
||||
|
||||
void CheckNonEqualsHelper(const char* file, int line,
|
||||
const char* unexpected_source,
|
||||
v8::Handle<v8::Value> unexpected,
|
||||
const char* value_source,
|
||||
|
@ -63,12 +63,10 @@ void InterfaceDescriptor::Initialize(
|
||||
|
||||
|
||||
void CodeStubInterfaceDescriptor::Initialize(
|
||||
int register_parameter_count,
|
||||
Register* registers,
|
||||
CodeStub::Major major, int register_parameter_count, Register* registers,
|
||||
Address deoptimization_handler,
|
||||
Representation* register_param_representations,
|
||||
int hint_stack_parameter_count,
|
||||
StubFunctionMode function_mode) {
|
||||
int hint_stack_parameter_count, StubFunctionMode function_mode) {
|
||||
InterfaceDescriptor::Initialize(register_parameter_count, registers,
|
||||
register_param_representations);
|
||||
|
||||
@ -76,22 +74,18 @@ void CodeStubInterfaceDescriptor::Initialize(
|
||||
|
||||
hint_stack_parameter_count_ = hint_stack_parameter_count;
|
||||
function_mode_ = function_mode;
|
||||
major_ = major;
|
||||
}
|
||||
|
||||
|
||||
void CodeStubInterfaceDescriptor::Initialize(
|
||||
int register_parameter_count,
|
||||
Register* registers,
|
||||
Register stack_parameter_count,
|
||||
Address deoptimization_handler,
|
||||
CodeStub::Major major, int register_parameter_count, Register* registers,
|
||||
Register stack_parameter_count, Address deoptimization_handler,
|
||||
Representation* register_param_representations,
|
||||
int hint_stack_parameter_count,
|
||||
StubFunctionMode function_mode,
|
||||
int hint_stack_parameter_count, StubFunctionMode function_mode,
|
||||
HandlerArgumentsMode handler_mode) {
|
||||
Initialize(register_parameter_count, registers,
|
||||
deoptimization_handler,
|
||||
register_param_representations,
|
||||
hint_stack_parameter_count,
|
||||
Initialize(major, register_parameter_count, registers, deoptimization_handler,
|
||||
register_param_representations, hint_stack_parameter_count,
|
||||
function_mode);
|
||||
stack_parameter_count_ = stack_parameter_count;
|
||||
handler_arguments_mode_ = handler_mode;
|
||||
@ -591,7 +585,7 @@ void LoadFastElementStub::InitializeInterfaceDescriptor(
|
||||
LoadIC::ReceiverRegister(),
|
||||
LoadIC::NameRegister() };
|
||||
STATIC_ASSERT(LoadIC::kParameterCount == 2);
|
||||
descriptor->Initialize(ARRAY_SIZE(registers), registers,
|
||||
descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
|
||||
FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure));
|
||||
}
|
||||
|
||||
@ -602,7 +596,7 @@ void LoadDictionaryElementStub::InitializeInterfaceDescriptor(
|
||||
LoadIC::ReceiverRegister(),
|
||||
LoadIC::NameRegister() };
|
||||
STATIC_ASSERT(LoadIC::kParameterCount == 2);
|
||||
descriptor->Initialize(ARRAY_SIZE(registers), registers,
|
||||
descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
|
||||
FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure));
|
||||
}
|
||||
|
||||
@ -614,7 +608,7 @@ void KeyedLoadGenericStub::InitializeInterfaceDescriptor(
|
||||
LoadIC::NameRegister() };
|
||||
STATIC_ASSERT(LoadIC::kParameterCount == 2);
|
||||
descriptor->Initialize(
|
||||
ARRAY_SIZE(registers), registers,
|
||||
MajorKey(), ARRAY_SIZE(registers), registers,
|
||||
Runtime::FunctionForId(Runtime::kKeyedGetProperty)->entry);
|
||||
}
|
||||
|
||||
@ -623,7 +617,7 @@ void LoadFieldStub::InitializeInterfaceDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor) {
|
||||
Register registers[] = { InterfaceDescriptor::ContextRegister(),
|
||||
LoadIC::ReceiverRegister() };
|
||||
descriptor->Initialize(ARRAY_SIZE(registers), registers);
|
||||
descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
|
||||
}
|
||||
|
||||
|
||||
@ -632,7 +626,7 @@ void StringLengthStub::InitializeInterfaceDescriptor(
|
||||
Register registers[] = { InterfaceDescriptor::ContextRegister(),
|
||||
LoadIC::ReceiverRegister(),
|
||||
LoadIC::NameRegister() };
|
||||
descriptor->Initialize(ARRAY_SIZE(registers), registers);
|
||||
descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
|
||||
}
|
||||
|
||||
|
||||
@ -642,9 +636,8 @@ void StoreFastElementStub::InitializeInterfaceDescriptor(
|
||||
KeyedStoreIC::ReceiverRegister(),
|
||||
KeyedStoreIC::NameRegister(),
|
||||
KeyedStoreIC::ValueRegister() };
|
||||
descriptor->Initialize(
|
||||
ARRAY_SIZE(registers), registers,
|
||||
FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure));
|
||||
descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
|
||||
FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure));
|
||||
}
|
||||
|
||||
|
||||
@ -655,7 +648,7 @@ void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
|
||||
MapRegister(),
|
||||
KeyRegister(),
|
||||
ObjectRegister() };
|
||||
descriptor->Initialize(ARRAY_SIZE(registers), registers,
|
||||
descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
|
||||
FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss));
|
||||
}
|
||||
|
||||
@ -666,7 +659,7 @@ void StoreGlobalStub::InitializeInterfaceDescriptor(
|
||||
StoreIC::ReceiverRegister(),
|
||||
StoreIC::NameRegister(),
|
||||
StoreIC::ValueRegister() };
|
||||
descriptor->Initialize(ARRAY_SIZE(registers), registers,
|
||||
descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
|
||||
FUNCTION_ADDR(StoreIC_MissFromStubFailure));
|
||||
}
|
||||
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include "src/codegen.h"
|
||||
#include "src/globals.h"
|
||||
#include "src/macro-assembler.h"
|
||||
#include "src/ostreams.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
@ -348,13 +349,13 @@ class CodeStubInterfaceDescriptor: public InterfaceDescriptor {
|
||||
public:
|
||||
CodeStubInterfaceDescriptor();
|
||||
|
||||
void Initialize(int register_parameter_count, Register* registers,
|
||||
Address deoptimization_handler = NULL,
|
||||
void Initialize(CodeStub::Major major, int register_parameter_count,
|
||||
Register* registers, Address deoptimization_handler = NULL,
|
||||
Representation* register_param_representations = NULL,
|
||||
int hint_stack_parameter_count = -1,
|
||||
StubFunctionMode function_mode = NOT_JS_FUNCTION_STUB_MODE);
|
||||
void Initialize(int register_parameter_count, Register* registers,
|
||||
Register stack_parameter_count,
|
||||
void Initialize(CodeStub::Major major, int register_parameter_count,
|
||||
Register* registers, Register stack_parameter_count,
|
||||
Address deoptimization_handler = NULL,
|
||||
Representation* register_param_representations = NULL,
|
||||
int hint_stack_parameter_count = -1,
|
||||
@ -394,6 +395,7 @@ class CodeStubInterfaceDescriptor: public InterfaceDescriptor {
|
||||
Register stack_parameter_count() const { return stack_parameter_count_; }
|
||||
StubFunctionMode function_mode() const { return function_mode_; }
|
||||
Address deoptimization_handler() const { return deoptimization_handler_; }
|
||||
CodeStub::Major MajorKey() const { return major_; }
|
||||
|
||||
private:
|
||||
Register stack_parameter_count_;
|
||||
@ -407,6 +409,7 @@ class CodeStubInterfaceDescriptor: public InterfaceDescriptor {
|
||||
|
||||
ExternalReference miss_handler_;
|
||||
bool has_miss_handler_;
|
||||
CodeStub::Major major_;
|
||||
};
|
||||
|
||||
|
||||
@ -743,6 +746,9 @@ class InstanceofStub: public PlatformCodeStub {
|
||||
|
||||
void Generate(MacroAssembler* masm);
|
||||
|
||||
virtual void InitializeInterfaceDescriptor(
|
||||
Isolate* isolate, CodeStubInterfaceDescriptor* descriptor);
|
||||
|
||||
private:
|
||||
Major MajorKey() const { return Instanceof; }
|
||||
int MinorKey() const { return static_cast<int>(flags_); }
|
||||
@ -1132,10 +1138,11 @@ class CallApiGetterStub : public PlatformCodeStub {
|
||||
|
||||
class BinaryOpICStub : public HydrogenCodeStub {
|
||||
public:
|
||||
BinaryOpICStub(Isolate* isolate, Token::Value op, OverwriteMode mode)
|
||||
BinaryOpICStub(Isolate* isolate, Token::Value op,
|
||||
OverwriteMode mode = NO_OVERWRITE)
|
||||
: HydrogenCodeStub(isolate, UNINITIALIZED), state_(isolate, op, mode) {}
|
||||
|
||||
BinaryOpICStub(Isolate* isolate, const BinaryOpIC::State& state)
|
||||
explicit BinaryOpICStub(Isolate* isolate, const BinaryOpIC::State& state)
|
||||
: HydrogenCodeStub(isolate), state_(state) {}
|
||||
|
||||
static void GenerateAheadOfTime(Isolate* isolate);
|
||||
@ -1618,6 +1625,9 @@ class CallFunctionStub: public PlatformCodeStub {
|
||||
return ArgcBits::decode(minor_key);
|
||||
}
|
||||
|
||||
virtual void InitializeInterfaceDescriptor(
|
||||
Isolate* isolate, CodeStubInterfaceDescriptor* descriptor);
|
||||
|
||||
private:
|
||||
int argc_;
|
||||
CallFunctionFlags flags_;
|
||||
@ -1655,6 +1665,9 @@ class CallConstructStub: public PlatformCodeStub {
|
||||
code->set_has_function_cache(RecordCallTarget());
|
||||
}
|
||||
|
||||
virtual void InitializeInterfaceDescriptor(
|
||||
Isolate* isolate, CodeStubInterfaceDescriptor* descriptor);
|
||||
|
||||
private:
|
||||
CallConstructorFlags flags_;
|
||||
|
||||
|
@ -5,6 +5,8 @@
|
||||
#ifndef V8_COMPILER_INTRINSICS_H_
|
||||
#define V8_COMPILER_INTRINSICS_H_
|
||||
|
||||
#include "src/base/macros.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include "src/bootstrapper.h"
|
||||
#include "src/codegen.h"
|
||||
#include "src/compilation-cache.h"
|
||||
#include "src/compiler/pipeline.h"
|
||||
#include "src/cpu-profiler.h"
|
||||
#include "src/debug.h"
|
||||
#include "src/deoptimizer.h"
|
||||
@ -57,6 +58,19 @@ CompilationInfo::CompilationInfo(Handle<Script> script,
|
||||
}
|
||||
|
||||
|
||||
CompilationInfo::CompilationInfo(Isolate* isolate, Zone* zone)
|
||||
: flags_(StrictModeField::encode(SLOPPY)),
|
||||
script_(Handle<Script>::null()),
|
||||
osr_ast_id_(BailoutId::None()),
|
||||
parameter_count_(0),
|
||||
this_has_uses_(true),
|
||||
optimization_id_(-1),
|
||||
ast_value_factory_(NULL),
|
||||
ast_value_factory_owned_(false) {
|
||||
Initialize(isolate, STUB, zone);
|
||||
}
|
||||
|
||||
|
||||
CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
|
||||
Zone* zone)
|
||||
: flags_(StrictModeField::encode(SLOPPY) | IsLazy::encode(true)),
|
||||
@ -354,15 +368,16 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
|
||||
return AbortAndDisableOptimization(kFunctionWithIllegalRedeclaration);
|
||||
}
|
||||
|
||||
// Take --hydrogen-filter into account.
|
||||
// Check the whitelist for Crankshaft.
|
||||
if (!info()->closure()->PassesFilter(FLAG_hydrogen_filter)) {
|
||||
return AbortOptimization(kHydrogenFilter);
|
||||
}
|
||||
|
||||
// Crankshaft requires a version of fullcode with deoptimization support.
|
||||
// Recompile the unoptimized version of the code if the current version
|
||||
// doesn't have deoptimization support. Alternatively, we may decide to
|
||||
// run the full code generator to get a baseline for the compile-time
|
||||
// performance of the hydrogen-based compiler.
|
||||
// doesn't have deoptimization support already.
|
||||
// Otherwise, if we are gathering compilation time and space statistics
|
||||
// for hydrogen, gather baseline statistics for a fullcode compilation.
|
||||
bool should_recompile = !info()->shared_info()->has_deoptimization_support();
|
||||
if (should_recompile || FLAG_hydrogen_stats) {
|
||||
base::ElapsedTimer timer;
|
||||
@ -390,14 +405,20 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
|
||||
}
|
||||
}
|
||||
|
||||
// Check that the unoptimized, shared code is ready for
|
||||
// optimizations. When using the always_opt flag we disregard the
|
||||
// optimizable marker in the code object and optimize anyway. This
|
||||
// is safe as long as the unoptimized code has deoptimization
|
||||
// support.
|
||||
ASSERT(FLAG_always_opt || info()->shared_info()->code()->optimizable());
|
||||
ASSERT(info()->shared_info()->has_deoptimization_support());
|
||||
|
||||
// Check the whitelist for TurboFan.
|
||||
if (info()->closure()->PassesFilter(FLAG_turbo_filter) &&
|
||||
// TODO(turbofan): Make try-catch work and remove this bailout.
|
||||
info()->function()->dont_optimize_reason() != kTryCatchStatement &&
|
||||
info()->function()->dont_optimize_reason() != kTryFinallyStatement &&
|
||||
// TODO(turbofan): Make OSR work and remove this bailout.
|
||||
!info()->is_osr()) {
|
||||
compiler::Pipeline pipeline(info());
|
||||
pipeline.GenerateCode();
|
||||
return SetLastStatus(SUCCEEDED);
|
||||
}
|
||||
|
||||
if (FLAG_trace_hydrogen) {
|
||||
Handle<String> name = info()->function()->debug_name();
|
||||
PrintF("-----------------------------------------------------------\n");
|
||||
@ -447,6 +468,11 @@ OptimizedCompileJob::Status OptimizedCompileJob::OptimizeGraph() {
|
||||
DisallowCodeDependencyChange no_dependency_change;
|
||||
|
||||
ASSERT(last_status() == SUCCEEDED);
|
||||
// TODO(turbofan): Currently everything is done in the first phase.
|
||||
if (!info()->code().is_null()) {
|
||||
return last_status();
|
||||
}
|
||||
|
||||
Timer t(this, &time_taken_to_optimize_);
|
||||
ASSERT(graph_ != NULL);
|
||||
BailoutReason bailout_reason = kNoReason;
|
||||
@ -464,6 +490,12 @@ OptimizedCompileJob::Status OptimizedCompileJob::OptimizeGraph() {
|
||||
|
||||
OptimizedCompileJob::Status OptimizedCompileJob::GenerateCode() {
|
||||
ASSERT(last_status() == SUCCEEDED);
|
||||
// TODO(turbofan): Currently everything is done in the first phase.
|
||||
if (!info()->code().is_null()) {
|
||||
RecordOptimizationStats();
|
||||
return last_status();
|
||||
}
|
||||
|
||||
ASSERT(!info()->HasAbortedDueToDependencyChange());
|
||||
DisallowCodeDependencyChange no_dependency_change;
|
||||
DisallowJavascriptExecution no_js(isolate());
|
||||
@ -1115,6 +1147,9 @@ static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
|
||||
Handle<Code> code = info->code();
|
||||
if (code->kind() != Code::OPTIMIZED_FUNCTION) return; // Nothing to do.
|
||||
|
||||
// Context specialization folds-in the context, so no sharing can occur.
|
||||
if (code->is_turbofanned() && FLAG_context_specialization) return;
|
||||
|
||||
// Cache optimized code.
|
||||
if (FLAG_cache_optimized_code) {
|
||||
Handle<JSFunction> function = info->closure();
|
||||
|
@ -63,6 +63,7 @@ class ScriptData {
|
||||
class CompilationInfo {
|
||||
public:
|
||||
CompilationInfo(Handle<JSFunction> closure, Zone* zone);
|
||||
CompilationInfo(Isolate* isolate, Zone* zone);
|
||||
virtual ~CompilationInfo();
|
||||
|
||||
Isolate* isolate() const {
|
||||
@ -391,7 +392,6 @@ class CompilationInfo {
|
||||
void Initialize(Isolate* isolate, Mode mode, Zone* zone);
|
||||
|
||||
void SetMode(Mode mode) {
|
||||
ASSERT(isolate()->use_crankshaft());
|
||||
mode_ = mode;
|
||||
}
|
||||
|
||||
|
828
src/compiler/arm/code-generator-arm.cc
Normal file
828
src/compiler/arm/code-generator-arm.cc
Normal file
@ -0,0 +1,828 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/compiler/code-generator.h"
|
||||
|
||||
#include "src/arm/macro-assembler-arm.h"
|
||||
#include "src/compiler/code-generator-impl.h"
|
||||
#include "src/compiler/gap-resolver.h"
|
||||
#include "src/compiler/node-matchers.h"
|
||||
#include "src/compiler/node-properties-inl.h"
|
||||
#include "src/scopes.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
#define __ masm()->
|
||||
|
||||
|
||||
#define kScratchReg r9
|
||||
|
||||
|
||||
// Adds Arm-specific methods to convert InstructionOperands.
|
||||
class ArmOperandConverter : public InstructionOperandConverter {
|
||||
public:
|
||||
ArmOperandConverter(CodeGenerator* gen, Instruction* instr)
|
||||
: InstructionOperandConverter(gen, instr) {}
|
||||
|
||||
SBit OutputSBit() const {
|
||||
switch (instr_->flags_mode()) {
|
||||
case kFlags_branch:
|
||||
case kFlags_set:
|
||||
return SetCC;
|
||||
case kFlags_none:
|
||||
return LeaveCC;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return LeaveCC;
|
||||
}
|
||||
|
||||
Operand InputImmediate(int index) {
|
||||
Constant constant = ToConstant(instr_->InputAt(index));
|
||||
switch (constant.type()) {
|
||||
case Constant::kInt32:
|
||||
return Operand(constant.ToInt32());
|
||||
case Constant::kFloat64:
|
||||
return Operand(
|
||||
isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
|
||||
case Constant::kInt64:
|
||||
case Constant::kExternalReference:
|
||||
case Constant::kHeapObject:
|
||||
break;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return Operand::Zero();
|
||||
}
|
||||
|
||||
Operand InputOperand2(int first_index) {
|
||||
const int index = first_index;
|
||||
switch (AddressingModeField::decode(instr_->opcode())) {
|
||||
case kMode_None:
|
||||
case kMode_Offset_RI:
|
||||
case kMode_Offset_RR:
|
||||
break;
|
||||
case kMode_Operand2_I:
|
||||
return InputImmediate(index + 0);
|
||||
case kMode_Operand2_R:
|
||||
return Operand(InputRegister(index + 0));
|
||||
case kMode_Operand2_R_ASR_I:
|
||||
return Operand(InputRegister(index + 0), ASR, InputInt5(index + 1));
|
||||
case kMode_Operand2_R_ASR_R:
|
||||
return Operand(InputRegister(index + 0), ASR, InputRegister(index + 1));
|
||||
case kMode_Operand2_R_LSL_I:
|
||||
return Operand(InputRegister(index + 0), LSL, InputInt5(index + 1));
|
||||
case kMode_Operand2_R_LSL_R:
|
||||
return Operand(InputRegister(index + 0), LSL, InputRegister(index + 1));
|
||||
case kMode_Operand2_R_LSR_I:
|
||||
return Operand(InputRegister(index + 0), LSR, InputInt5(index + 1));
|
||||
case kMode_Operand2_R_LSR_R:
|
||||
return Operand(InputRegister(index + 0), LSR, InputRegister(index + 1));
|
||||
}
|
||||
UNREACHABLE();
|
||||
return Operand::Zero();
|
||||
}
|
||||
|
||||
MemOperand InputOffset(int* first_index) {
|
||||
const int index = *first_index;
|
||||
switch (AddressingModeField::decode(instr_->opcode())) {
|
||||
case kMode_None:
|
||||
case kMode_Operand2_I:
|
||||
case kMode_Operand2_R:
|
||||
case kMode_Operand2_R_ASR_I:
|
||||
case kMode_Operand2_R_ASR_R:
|
||||
case kMode_Operand2_R_LSL_I:
|
||||
case kMode_Operand2_R_LSL_R:
|
||||
case kMode_Operand2_R_LSR_I:
|
||||
case kMode_Operand2_R_LSR_R:
|
||||
break;
|
||||
case kMode_Offset_RI:
|
||||
*first_index += 2;
|
||||
return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
|
||||
case kMode_Offset_RR:
|
||||
*first_index += 2;
|
||||
return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
|
||||
}
|
||||
UNREACHABLE();
|
||||
return MemOperand(r0);
|
||||
}
|
||||
|
||||
MemOperand InputOffset() {
|
||||
int index = 0;
|
||||
return InputOffset(&index);
|
||||
}
|
||||
|
||||
MemOperand ToMemOperand(InstructionOperand* op) const {
|
||||
ASSERT(op != NULL);
|
||||
ASSERT(!op->IsRegister());
|
||||
ASSERT(!op->IsDoubleRegister());
|
||||
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
|
||||
// The linkage computes where all spill slots are located.
|
||||
FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
|
||||
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// Assembles an instruction after register allocation, producing machine code.
|
||||
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
|
||||
ArmOperandConverter i(this, instr);
|
||||
|
||||
switch (ArchOpcodeField::decode(instr->opcode())) {
|
||||
case kArchJmp:
|
||||
__ b(code_->GetLabel(i.InputBlock(0)));
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
case kArchNop:
|
||||
// don't emit code for nops.
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
case kArchRet:
|
||||
AssembleReturn();
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
case kArchDeoptimize: {
|
||||
int deoptimization_id = MiscField::decode(instr->opcode());
|
||||
BuildTranslation(instr, deoptimization_id);
|
||||
|
||||
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
|
||||
isolate(), deoptimization_id, Deoptimizer::LAZY);
|
||||
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
}
|
||||
case kArmAdd:
|
||||
__ add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
|
||||
i.OutputSBit());
|
||||
break;
|
||||
case kArmAnd:
|
||||
__ and_(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
|
||||
i.OutputSBit());
|
||||
break;
|
||||
case kArmBic:
|
||||
__ bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
|
||||
i.OutputSBit());
|
||||
break;
|
||||
case kArmMul:
|
||||
__ mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
|
||||
i.OutputSBit());
|
||||
break;
|
||||
case kArmMla:
|
||||
__ mla(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
|
||||
i.InputRegister(2), i.OutputSBit());
|
||||
break;
|
||||
case kArmMls: {
|
||||
CpuFeatureScope scope(masm(), MLS);
|
||||
__ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
|
||||
i.InputRegister(2));
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
}
|
||||
case kArmSdiv: {
|
||||
CpuFeatureScope scope(masm(), SUDIV);
|
||||
__ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
}
|
||||
case kArmUdiv: {
|
||||
CpuFeatureScope scope(masm(), SUDIV);
|
||||
__ udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
}
|
||||
case kArmMov:
|
||||
__ Move(i.OutputRegister(), i.InputOperand2(0));
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
case kArmMvn:
|
||||
__ mvn(i.OutputRegister(), i.InputOperand2(0), i.OutputSBit());
|
||||
break;
|
||||
case kArmOrr:
|
||||
__ orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
|
||||
i.OutputSBit());
|
||||
break;
|
||||
case kArmEor:
|
||||
__ eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
|
||||
i.OutputSBit());
|
||||
break;
|
||||
case kArmSub:
|
||||
__ sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
|
||||
i.OutputSBit());
|
||||
break;
|
||||
case kArmRsb:
|
||||
__ rsb(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
|
||||
i.OutputSBit());
|
||||
break;
|
||||
case kArmBfc: {
|
||||
CpuFeatureScope scope(masm(), ARMv7);
|
||||
__ bfc(i.OutputRegister(), i.InputInt8(1), i.InputInt8(2));
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
}
|
||||
case kArmUbfx: {
|
||||
CpuFeatureScope scope(masm(), ARMv7);
|
||||
__ ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
|
||||
i.InputInt8(2));
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
}
|
||||
case kArmCallCodeObject: {
|
||||
if (instr->InputAt(0)->IsImmediate()) {
|
||||
Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
|
||||
__ Call(code, RelocInfo::CODE_TARGET);
|
||||
RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
|
||||
Safepoint::kNoLazyDeopt);
|
||||
} else {
|
||||
Register reg = i.InputRegister(0);
|
||||
int entry = Code::kHeaderSize - kHeapObjectTag;
|
||||
__ ldr(reg, MemOperand(reg, entry));
|
||||
__ Call(reg);
|
||||
RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
|
||||
Safepoint::kNoLazyDeopt);
|
||||
}
|
||||
bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1);
|
||||
if (lazy_deopt) {
|
||||
RecordLazyDeoptimizationEntry(instr);
|
||||
}
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
}
|
||||
case kArmCallJSFunction: {
|
||||
Register func = i.InputRegister(0);
|
||||
|
||||
// TODO(jarin) The load of the context should be separated from the call.
|
||||
__ ldr(cp, FieldMemOperand(func, JSFunction::kContextOffset));
|
||||
__ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
|
||||
__ Call(ip);
|
||||
|
||||
RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
|
||||
Safepoint::kNoLazyDeopt);
|
||||
RecordLazyDeoptimizationEntry(instr);
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
}
|
||||
case kArmCallAddress: {
|
||||
DirectCEntryStub stub(isolate());
|
||||
stub.GenerateCall(masm(), i.InputRegister(0));
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
}
|
||||
case kArmPush:
|
||||
__ Push(i.InputRegister(0));
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
case kArmDrop: {
|
||||
int words = MiscField::decode(instr->opcode());
|
||||
__ Drop(words);
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
}
|
||||
case kArmCmp:
|
||||
__ cmp(i.InputRegister(0), i.InputOperand2(1));
|
||||
ASSERT_EQ(SetCC, i.OutputSBit());
|
||||
break;
|
||||
case kArmCmn:
|
||||
__ cmn(i.InputRegister(0), i.InputOperand2(1));
|
||||
ASSERT_EQ(SetCC, i.OutputSBit());
|
||||
break;
|
||||
case kArmTst:
|
||||
__ tst(i.InputRegister(0), i.InputOperand2(1));
|
||||
ASSERT_EQ(SetCC, i.OutputSBit());
|
||||
break;
|
||||
case kArmTeq:
|
||||
__ teq(i.InputRegister(0), i.InputOperand2(1));
|
||||
ASSERT_EQ(SetCC, i.OutputSBit());
|
||||
break;
|
||||
case kArmVcmpF64:
|
||||
__ VFPCompareAndSetFlags(i.InputDoubleRegister(0),
|
||||
i.InputDoubleRegister(1));
|
||||
ASSERT_EQ(SetCC, i.OutputSBit());
|
||||
break;
|
||||
case kArmVaddF64:
|
||||
__ vadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
|
||||
i.InputDoubleRegister(1));
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
case kArmVsubF64:
|
||||
__ vsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
|
||||
i.InputDoubleRegister(1));
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
case kArmVmulF64:
|
||||
__ vmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
|
||||
i.InputDoubleRegister(1));
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
case kArmVmlaF64:
|
||||
__ vmla(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
|
||||
i.InputDoubleRegister(2));
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
case kArmVmlsF64:
|
||||
__ vmls(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
|
||||
i.InputDoubleRegister(2));
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
case kArmVdivF64:
|
||||
__ vdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
|
||||
i.InputDoubleRegister(1));
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
case kArmVmodF64: {
|
||||
// TODO(bmeurer): We should really get rid of this special instruction,
|
||||
// and generate a CallAddress instruction instead.
|
||||
FrameScope scope(masm(), StackFrame::MANUAL);
|
||||
__ PrepareCallCFunction(0, 2, kScratchReg);
|
||||
__ MovToFloatParameters(i.InputDoubleRegister(0),
|
||||
i.InputDoubleRegister(1));
|
||||
__ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
|
||||
0, 2);
|
||||
// Move the result in the double result register.
|
||||
__ MovFromFloatResult(i.OutputDoubleRegister());
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
}
|
||||
case kArmVnegF64:
|
||||
__ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
|
||||
break;
|
||||
case kArmVcvtF64S32: {
|
||||
SwVfpRegister scratch = kScratchDoubleReg.low();
|
||||
__ vmov(scratch, i.InputRegister(0));
|
||||
__ vcvt_f64_s32(i.OutputDoubleRegister(), scratch);
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
}
|
||||
case kArmVcvtF64U32: {
|
||||
SwVfpRegister scratch = kScratchDoubleReg.low();
|
||||
__ vmov(scratch, i.InputRegister(0));
|
||||
__ vcvt_f64_u32(i.OutputDoubleRegister(), scratch);
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
}
|
||||
case kArmVcvtS32F64: {
|
||||
SwVfpRegister scratch = kScratchDoubleReg.low();
|
||||
__ vcvt_s32_f64(scratch, i.InputDoubleRegister(0));
|
||||
__ vmov(i.OutputRegister(), scratch);
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
}
|
||||
case kArmVcvtU32F64: {
|
||||
SwVfpRegister scratch = kScratchDoubleReg.low();
|
||||
__ vcvt_u32_f64(scratch, i.InputDoubleRegister(0));
|
||||
__ vmov(i.OutputRegister(), scratch);
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
}
|
||||
case kArmLoadWord8:
|
||||
__ ldrb(i.OutputRegister(), i.InputOffset());
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
case kArmStoreWord8: {
|
||||
int index = 0;
|
||||
MemOperand operand = i.InputOffset(&index);
|
||||
__ strb(i.InputRegister(index), operand);
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
}
|
||||
case kArmLoadWord16:
|
||||
__ ldrh(i.OutputRegister(), i.InputOffset());
|
||||
break;
|
||||
case kArmStoreWord16: {
|
||||
int index = 0;
|
||||
MemOperand operand = i.InputOffset(&index);
|
||||
__ strh(i.InputRegister(index), operand);
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
}
|
||||
case kArmLoadWord32:
|
||||
__ ldr(i.OutputRegister(), i.InputOffset());
|
||||
break;
|
||||
case kArmStoreWord32: {
|
||||
int index = 0;
|
||||
MemOperand operand = i.InputOffset(&index);
|
||||
__ str(i.InputRegister(index), operand);
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
}
|
||||
case kArmFloat64Load:
|
||||
__ vldr(i.OutputDoubleRegister(), i.InputOffset());
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
case kArmFloat64Store: {
|
||||
int index = 0;
|
||||
MemOperand operand = i.InputOffset(&index);
|
||||
__ vstr(i.InputDoubleRegister(index), operand);
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
}
|
||||
case kArmStoreWriteBarrier: {
|
||||
Register object = i.InputRegister(0);
|
||||
Register index = i.InputRegister(1);
|
||||
Register value = i.InputRegister(2);
|
||||
__ add(index, object, index);
|
||||
__ str(value, MemOperand(index));
|
||||
SaveFPRegsMode mode =
|
||||
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
|
||||
LinkRegisterStatus lr_status = kLRHasNotBeenSaved;
|
||||
__ RecordWrite(object, index, value, lr_status, mode);
|
||||
ASSERT_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Assembles branches after an instruction.
|
||||
void CodeGenerator::AssembleArchBranch(Instruction* instr,
|
||||
FlagsCondition condition) {
|
||||
ArmOperandConverter i(this, instr);
|
||||
Label done;
|
||||
|
||||
// Emit a branch. The true and false targets are always the last two inputs
|
||||
// to the instruction.
|
||||
BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
|
||||
BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
|
||||
bool fallthru = IsNextInAssemblyOrder(fblock);
|
||||
Label* tlabel = code()->GetLabel(tblock);
|
||||
Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
|
||||
switch (condition) {
|
||||
case kUnorderedEqual:
|
||||
__ b(vs, flabel);
|
||||
// Fall through.
|
||||
case kEqual:
|
||||
__ b(eq, tlabel);
|
||||
break;
|
||||
case kUnorderedNotEqual:
|
||||
__ b(vs, tlabel);
|
||||
// Fall through.
|
||||
case kNotEqual:
|
||||
__ b(ne, tlabel);
|
||||
break;
|
||||
case kSignedLessThan:
|
||||
__ b(lt, tlabel);
|
||||
break;
|
||||
case kSignedGreaterThanOrEqual:
|
||||
__ b(ge, tlabel);
|
||||
break;
|
||||
case kSignedLessThanOrEqual:
|
||||
__ b(le, tlabel);
|
||||
break;
|
||||
case kSignedGreaterThan:
|
||||
__ b(gt, tlabel);
|
||||
break;
|
||||
case kUnorderedLessThan:
|
||||
__ b(vs, flabel);
|
||||
// Fall through.
|
||||
case kUnsignedLessThan:
|
||||
__ b(lo, tlabel);
|
||||
break;
|
||||
case kUnorderedGreaterThanOrEqual:
|
||||
__ b(vs, tlabel);
|
||||
// Fall through.
|
||||
case kUnsignedGreaterThanOrEqual:
|
||||
__ b(hs, tlabel);
|
||||
break;
|
||||
case kUnorderedLessThanOrEqual:
|
||||
__ b(vs, flabel);
|
||||
// Fall through.
|
||||
case kUnsignedLessThanOrEqual:
|
||||
__ b(ls, tlabel);
|
||||
break;
|
||||
case kUnorderedGreaterThan:
|
||||
__ b(vs, tlabel);
|
||||
// Fall through.
|
||||
case kUnsignedGreaterThan:
|
||||
__ b(hi, tlabel);
|
||||
break;
|
||||
}
|
||||
if (!fallthru) __ b(flabel); // no fallthru to flabel.
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
|
||||
// Assembles boolean materializations after an instruction.
|
||||
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
|
||||
FlagsCondition condition) {
|
||||
ArmOperandConverter i(this, instr);
|
||||
Label done;
|
||||
|
||||
// Materialize a full 32-bit 1 or 0 value.
|
||||
Label check;
|
||||
Register reg = i.OutputRegister();
|
||||
Condition cc = kNoCondition;
|
||||
switch (condition) {
|
||||
case kUnorderedEqual:
|
||||
__ b(vc, &check);
|
||||
__ mov(reg, Operand(0));
|
||||
__ b(&done);
|
||||
// Fall through.
|
||||
case kEqual:
|
||||
cc = eq;
|
||||
break;
|
||||
case kUnorderedNotEqual:
|
||||
__ b(vc, &check);
|
||||
__ mov(reg, Operand(1));
|
||||
__ b(&done);
|
||||
// Fall through.
|
||||
case kNotEqual:
|
||||
cc = ne;
|
||||
break;
|
||||
case kSignedLessThan:
|
||||
cc = lt;
|
||||
break;
|
||||
case kSignedGreaterThanOrEqual:
|
||||
cc = ge;
|
||||
break;
|
||||
case kSignedLessThanOrEqual:
|
||||
cc = le;
|
||||
break;
|
||||
case kSignedGreaterThan:
|
||||
cc = gt;
|
||||
break;
|
||||
case kUnorderedLessThan:
|
||||
__ b(vc, &check);
|
||||
__ mov(reg, Operand(0));
|
||||
__ b(&done);
|
||||
// Fall through.
|
||||
case kUnsignedLessThan:
|
||||
cc = lo;
|
||||
break;
|
||||
case kUnorderedGreaterThanOrEqual:
|
||||
__ b(vc, &check);
|
||||
__ mov(reg, Operand(1));
|
||||
__ b(&done);
|
||||
// Fall through.
|
||||
case kUnsignedGreaterThanOrEqual:
|
||||
cc = hs;
|
||||
break;
|
||||
case kUnorderedLessThanOrEqual:
|
||||
__ b(vc, &check);
|
||||
__ mov(reg, Operand(0));
|
||||
__ b(&done);
|
||||
// Fall through.
|
||||
case kUnsignedLessThanOrEqual:
|
||||
cc = ls;
|
||||
break;
|
||||
case kUnorderedGreaterThan:
|
||||
__ b(vc, &check);
|
||||
__ mov(reg, Operand(1));
|
||||
__ b(&done);
|
||||
// Fall through.
|
||||
case kUnsignedGreaterThan:
|
||||
cc = hi;
|
||||
break;
|
||||
}
|
||||
__ bind(&check);
|
||||
__ mov(reg, Operand(0));
|
||||
__ mov(reg, Operand(1), LeaveCC, cc);
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
|
||||
void CodeGenerator::AssemblePrologue() {
|
||||
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
|
||||
if (descriptor->kind() == CallDescriptor::kCallAddress) {
|
||||
__ Push(lr, fp);
|
||||
__ mov(fp, sp);
|
||||
const RegList saves = descriptor->CalleeSavedRegisters();
|
||||
if (saves != 0) { // Save callee-saved registers.
|
||||
int register_save_area_size = 0;
|
||||
for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
|
||||
if (!((1 << i) & saves)) continue;
|
||||
register_save_area_size += kPointerSize;
|
||||
}
|
||||
frame()->SetRegisterSaveAreaSize(register_save_area_size);
|
||||
__ stm(db_w, sp, saves);
|
||||
}
|
||||
} else if (descriptor->IsJSFunctionCall()) {
|
||||
CompilationInfo* info = linkage()->info();
|
||||
__ Prologue(info->IsCodePreAgingActive());
|
||||
frame()->SetRegisterSaveAreaSize(
|
||||
StandardFrameConstants::kFixedFrameSizeFromFp);
|
||||
|
||||
// Sloppy mode functions and builtins need to replace the receiver with the
|
||||
// global proxy when called as functions (without an explicit receiver
|
||||
// object).
|
||||
// TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
|
||||
if (info->strict_mode() == SLOPPY && !info->is_native()) {
|
||||
Label ok;
|
||||
// +2 for return address and saved frame pointer.
|
||||
int receiver_slot = info->scope()->num_parameters() + 2;
|
||||
__ ldr(r2, MemOperand(fp, receiver_slot * kPointerSize));
|
||||
__ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
|
||||
__ b(ne, &ok);
|
||||
__ ldr(r2, GlobalObjectOperand());
|
||||
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
|
||||
__ str(r2, MemOperand(fp, receiver_slot * kPointerSize));
|
||||
__ bind(&ok);
|
||||
}
|
||||
|
||||
} else {
|
||||
__ StubPrologue();
|
||||
frame()->SetRegisterSaveAreaSize(
|
||||
StandardFrameConstants::kFixedFrameSizeFromFp);
|
||||
}
|
||||
int stack_slots = frame()->GetSpillSlotCount();
|
||||
if (stack_slots > 0) {
|
||||
__ sub(sp, sp, Operand(stack_slots * kPointerSize));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CodeGenerator::AssembleReturn() {
|
||||
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
|
||||
if (descriptor->kind() == CallDescriptor::kCallAddress) {
|
||||
if (frame()->GetRegisterSaveAreaSize() > 0) {
|
||||
// Remove this frame's spill slots first.
|
||||
int stack_slots = frame()->GetSpillSlotCount();
|
||||
if (stack_slots > 0) {
|
||||
__ add(sp, sp, Operand(stack_slots * kPointerSize));
|
||||
}
|
||||
// Restore registers.
|
||||
const RegList saves = descriptor->CalleeSavedRegisters();
|
||||
if (saves != 0) {
|
||||
__ ldm(ia_w, sp, saves);
|
||||
}
|
||||
}
|
||||
__ mov(sp, fp);
|
||||
__ ldm(ia_w, sp, fp.bit() | lr.bit());
|
||||
__ Ret();
|
||||
} else {
|
||||
__ mov(sp, fp);
|
||||
__ ldm(ia_w, sp, fp.bit() | lr.bit());
|
||||
int pop_count =
|
||||
descriptor->IsJSFunctionCall() ? descriptor->ParameterCount() : 0;
|
||||
__ Drop(pop_count);
|
||||
__ Ret();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
InstructionOperand* destination) {
|
||||
ArmOperandConverter g(this, NULL);
|
||||
// Dispatch on the source and destination operand kinds. Not all
|
||||
// combinations are possible.
|
||||
if (source->IsRegister()) {
|
||||
ASSERT(destination->IsRegister() || destination->IsStackSlot());
|
||||
Register src = g.ToRegister(source);
|
||||
if (destination->IsRegister()) {
|
||||
__ mov(g.ToRegister(destination), src);
|
||||
} else {
|
||||
__ str(src, g.ToMemOperand(destination));
|
||||
}
|
||||
} else if (source->IsStackSlot()) {
|
||||
ASSERT(destination->IsRegister() || destination->IsStackSlot());
|
||||
MemOperand src = g.ToMemOperand(source);
|
||||
if (destination->IsRegister()) {
|
||||
__ ldr(g.ToRegister(destination), src);
|
||||
} else {
|
||||
Register temp = kScratchReg;
|
||||
__ ldr(temp, src);
|
||||
__ str(temp, g.ToMemOperand(destination));
|
||||
}
|
||||
} else if (source->IsConstant()) {
|
||||
if (destination->IsRegister() || destination->IsStackSlot()) {
|
||||
Register dst =
|
||||
destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
|
||||
Constant src = g.ToConstant(source);
|
||||
switch (src.type()) {
|
||||
case Constant::kInt32:
|
||||
__ mov(dst, Operand(src.ToInt32()));
|
||||
break;
|
||||
case Constant::kInt64:
|
||||
UNREACHABLE();
|
||||
break;
|
||||
case Constant::kFloat64:
|
||||
__ Move(dst,
|
||||
isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
|
||||
break;
|
||||
case Constant::kExternalReference:
|
||||
__ mov(dst, Operand(src.ToExternalReference()));
|
||||
break;
|
||||
case Constant::kHeapObject:
|
||||
__ Move(dst, src.ToHeapObject());
|
||||
break;
|
||||
}
|
||||
if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
|
||||
} else if (destination->IsDoubleRegister()) {
|
||||
DwVfpRegister result = g.ToDoubleRegister(destination);
|
||||
__ vmov(result, g.ToDouble(source));
|
||||
} else {
|
||||
ASSERT(destination->IsDoubleStackSlot());
|
||||
DwVfpRegister temp = kScratchDoubleReg;
|
||||
__ vmov(temp, g.ToDouble(source));
|
||||
__ vstr(temp, g.ToMemOperand(destination));
|
||||
}
|
||||
} else if (source->IsDoubleRegister()) {
|
||||
DwVfpRegister src = g.ToDoubleRegister(source);
|
||||
if (destination->IsDoubleRegister()) {
|
||||
DwVfpRegister dst = g.ToDoubleRegister(destination);
|
||||
__ Move(dst, src);
|
||||
} else {
|
||||
ASSERT(destination->IsDoubleStackSlot());
|
||||
__ vstr(src, g.ToMemOperand(destination));
|
||||
}
|
||||
} else if (source->IsDoubleStackSlot()) {
|
||||
ASSERT(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
|
||||
MemOperand src = g.ToMemOperand(source);
|
||||
if (destination->IsDoubleRegister()) {
|
||||
__ vldr(g.ToDoubleRegister(destination), src);
|
||||
} else {
|
||||
DwVfpRegister temp = kScratchDoubleReg;
|
||||
__ vldr(temp, src);
|
||||
__ vstr(temp, g.ToMemOperand(destination));
|
||||
}
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CodeGenerator::AssembleSwap(InstructionOperand* source,
|
||||
InstructionOperand* destination) {
|
||||
ArmOperandConverter g(this, NULL);
|
||||
// Dispatch on the source and destination operand kinds. Not all
|
||||
// combinations are possible.
|
||||
if (source->IsRegister()) {
|
||||
// Register-register.
|
||||
Register temp = kScratchReg;
|
||||
Register src = g.ToRegister(source);
|
||||
if (destination->IsRegister()) {
|
||||
Register dst = g.ToRegister(destination);
|
||||
__ Move(temp, src);
|
||||
__ Move(src, dst);
|
||||
__ Move(dst, temp);
|
||||
} else {
|
||||
ASSERT(destination->IsStackSlot());
|
||||
MemOperand dst = g.ToMemOperand(destination);
|
||||
__ mov(temp, src);
|
||||
__ ldr(src, dst);
|
||||
__ str(temp, dst);
|
||||
}
|
||||
} else if (source->IsStackSlot()) {
|
||||
ASSERT(destination->IsStackSlot());
|
||||
Register temp_0 = kScratchReg;
|
||||
SwVfpRegister temp_1 = kScratchDoubleReg.low();
|
||||
MemOperand src = g.ToMemOperand(source);
|
||||
MemOperand dst = g.ToMemOperand(destination);
|
||||
__ ldr(temp_0, src);
|
||||
__ vldr(temp_1, dst);
|
||||
__ str(temp_0, dst);
|
||||
__ vstr(temp_1, src);
|
||||
} else if (source->IsDoubleRegister()) {
|
||||
DwVfpRegister temp = kScratchDoubleReg;
|
||||
DwVfpRegister src = g.ToDoubleRegister(source);
|
||||
if (destination->IsDoubleRegister()) {
|
||||
DwVfpRegister dst = g.ToDoubleRegister(destination);
|
||||
__ Move(temp, src);
|
||||
__ Move(src, dst);
|
||||
__ Move(src, temp);
|
||||
} else {
|
||||
ASSERT(destination->IsDoubleStackSlot());
|
||||
MemOperand dst = g.ToMemOperand(destination);
|
||||
__ Move(temp, src);
|
||||
__ vldr(src, dst);
|
||||
__ vstr(temp, dst);
|
||||
}
|
||||
} else if (source->IsDoubleStackSlot()) {
|
||||
ASSERT(destination->IsDoubleStackSlot());
|
||||
Register temp_0 = kScratchReg;
|
||||
DwVfpRegister temp_1 = kScratchDoubleReg;
|
||||
MemOperand src0 = g.ToMemOperand(source);
|
||||
MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
|
||||
MemOperand dst0 = g.ToMemOperand(destination);
|
||||
MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
|
||||
__ vldr(temp_1, dst0); // Save destination in temp_1.
|
||||
__ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
|
||||
__ str(temp_0, dst0);
|
||||
__ ldr(temp_0, src1);
|
||||
__ str(temp_0, dst1);
|
||||
__ vstr(temp_1, src0);
|
||||
} else {
|
||||
// No other combinations are possible.
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CodeGenerator::AddNopForSmiCodeInlining() {
|
||||
// On 32-bit ARM we do not insert nops for inlined Smi code.
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
|
||||
// Checks whether the code between start_pc and end_pc is a no-op.
|
||||
bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int start_pc,
|
||||
int end_pc) {
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif // DEBUG
|
||||
|
||||
#undef __
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
84
src/compiler/arm/instruction-codes-arm.h
Normal file
84
src/compiler/arm/instruction-codes-arm.h
Normal file
@ -0,0 +1,84 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_ARM_INSTRUCTION_CODES_ARM_H_
|
||||
#define V8_COMPILER_ARM_INSTRUCTION_CODES_ARM_H_
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
// ARM-specific opcodes that specify which assembly sequence to emit.
|
||||
// Most opcodes specify a single instruction.
|
||||
#define TARGET_ARCH_OPCODE_LIST(V) \
|
||||
V(ArmAdd) \
|
||||
V(ArmAnd) \
|
||||
V(ArmBic) \
|
||||
V(ArmCmp) \
|
||||
V(ArmCmn) \
|
||||
V(ArmTst) \
|
||||
V(ArmTeq) \
|
||||
V(ArmOrr) \
|
||||
V(ArmEor) \
|
||||
V(ArmSub) \
|
||||
V(ArmRsb) \
|
||||
V(ArmMul) \
|
||||
V(ArmMla) \
|
||||
V(ArmMls) \
|
||||
V(ArmSdiv) \
|
||||
V(ArmUdiv) \
|
||||
V(ArmMov) \
|
||||
V(ArmMvn) \
|
||||
V(ArmBfc) \
|
||||
V(ArmUbfx) \
|
||||
V(ArmCallCodeObject) \
|
||||
V(ArmCallJSFunction) \
|
||||
V(ArmCallAddress) \
|
||||
V(ArmPush) \
|
||||
V(ArmDrop) \
|
||||
V(ArmVcmpF64) \
|
||||
V(ArmVaddF64) \
|
||||
V(ArmVsubF64) \
|
||||
V(ArmVmulF64) \
|
||||
V(ArmVmlaF64) \
|
||||
V(ArmVmlsF64) \
|
||||
V(ArmVdivF64) \
|
||||
V(ArmVmodF64) \
|
||||
V(ArmVnegF64) \
|
||||
V(ArmVcvtF64S32) \
|
||||
V(ArmVcvtF64U32) \
|
||||
V(ArmVcvtS32F64) \
|
||||
V(ArmVcvtU32F64) \
|
||||
V(ArmFloat64Load) \
|
||||
V(ArmFloat64Store) \
|
||||
V(ArmLoadWord8) \
|
||||
V(ArmStoreWord8) \
|
||||
V(ArmLoadWord16) \
|
||||
V(ArmStoreWord16) \
|
||||
V(ArmLoadWord32) \
|
||||
V(ArmStoreWord32) \
|
||||
V(ArmStoreWriteBarrier)
|
||||
|
||||
|
||||
// Addressing modes represent the "shape" of inputs to an instruction.
|
||||
// Many instructions support multiple addressing modes. Addressing modes
|
||||
// are encoded into the InstructionCode of the instruction and tell the
|
||||
// code generator after register allocation which assembler method to call.
|
||||
#define TARGET_ADDRESSING_MODE_LIST(V) \
|
||||
V(Offset_RI) /* [%r0 + K] */ \
|
||||
V(Offset_RR) /* [%r0 + %r1] */ \
|
||||
V(Operand2_I) /* K */ \
|
||||
V(Operand2_R) /* %r0 */ \
|
||||
V(Operand2_R_ASR_I) /* %r0 ASR K */ \
|
||||
V(Operand2_R_LSL_I) /* %r0 LSL K */ \
|
||||
V(Operand2_R_LSR_I) /* %r0 LSR K */ \
|
||||
V(Operand2_R_ASR_R) /* %r0 ASR %r1 */ \
|
||||
V(Operand2_R_LSL_R) /* %r0 LSL %r1 */ \
|
||||
V(Operand2_R_LSR_R) /* %r0 LSR %r1 */
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_COMPILER_ARM_INSTRUCTION_CODES_ARM_H_
|
796
src/compiler/arm/instruction-selector-arm.cc
Normal file
796
src/compiler/arm/instruction-selector-arm.cc
Normal file
@ -0,0 +1,796 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/compiler/instruction-selector-impl.h"
|
||||
#include "src/compiler/node-matchers.h"
|
||||
#include "src/compiler-intrinsics.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
// Adds Arm-specific methods for generating InstructionOperands.
|
||||
class ArmOperandGenerator V8_FINAL : public OperandGenerator {
|
||||
public:
|
||||
explicit ArmOperandGenerator(InstructionSelector* selector)
|
||||
: OperandGenerator(selector) {}
|
||||
|
||||
InstructionOperand* UseOperand(Node* node, InstructionCode opcode) {
|
||||
if (CanBeImmediate(node, opcode)) {
|
||||
return UseImmediate(node);
|
||||
}
|
||||
return UseRegister(node);
|
||||
}
|
||||
|
||||
bool CanBeImmediate(Node* node, InstructionCode opcode) {
|
||||
int32_t value;
|
||||
switch (node->opcode()) {
|
||||
case IrOpcode::kInt32Constant:
|
||||
case IrOpcode::kNumberConstant:
|
||||
value = ValueOf<int32_t>(node->op());
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
switch (ArchOpcodeField::decode(opcode)) {
|
||||
case kArmAnd:
|
||||
case kArmMov:
|
||||
case kArmMvn:
|
||||
case kArmBic:
|
||||
return ImmediateFitsAddrMode1Instruction(value) ||
|
||||
ImmediateFitsAddrMode1Instruction(~value);
|
||||
|
||||
case kArmAdd:
|
||||
case kArmSub:
|
||||
case kArmCmp:
|
||||
case kArmCmn:
|
||||
return ImmediateFitsAddrMode1Instruction(value) ||
|
||||
ImmediateFitsAddrMode1Instruction(-value);
|
||||
|
||||
case kArmTst:
|
||||
case kArmTeq:
|
||||
case kArmOrr:
|
||||
case kArmEor:
|
||||
case kArmRsb:
|
||||
return ImmediateFitsAddrMode1Instruction(value);
|
||||
|
||||
case kArmFloat64Load:
|
||||
case kArmFloat64Store:
|
||||
return value >= -1020 && value <= 1020 && (value % 4) == 0;
|
||||
|
||||
case kArmLoadWord8:
|
||||
case kArmStoreWord8:
|
||||
case kArmLoadWord32:
|
||||
case kArmStoreWord32:
|
||||
case kArmStoreWriteBarrier:
|
||||
return value >= -4095 && value <= 4095;
|
||||
|
||||
case kArmLoadWord16:
|
||||
case kArmStoreWord16:
|
||||
return value >= -255 && value <= 255;
|
||||
|
||||
case kArchJmp:
|
||||
case kArchNop:
|
||||
case kArchRet:
|
||||
case kArchDeoptimize:
|
||||
case kArmMul:
|
||||
case kArmMla:
|
||||
case kArmMls:
|
||||
case kArmSdiv:
|
||||
case kArmUdiv:
|
||||
case kArmBfc:
|
||||
case kArmUbfx:
|
||||
case kArmCallCodeObject:
|
||||
case kArmCallJSFunction:
|
||||
case kArmCallAddress:
|
||||
case kArmPush:
|
||||
case kArmDrop:
|
||||
case kArmVcmpF64:
|
||||
case kArmVaddF64:
|
||||
case kArmVsubF64:
|
||||
case kArmVmulF64:
|
||||
case kArmVmlaF64:
|
||||
case kArmVmlsF64:
|
||||
case kArmVdivF64:
|
||||
case kArmVmodF64:
|
||||
case kArmVnegF64:
|
||||
case kArmVcvtF64S32:
|
||||
case kArmVcvtF64U32:
|
||||
case kArmVcvtS32F64:
|
||||
case kArmVcvtU32F64:
|
||||
return false;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return false;
|
||||
}
|
||||
|
||||
private:
|
||||
bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
|
||||
return Assembler::ImmediateFitsAddrMode1Instruction(imm);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
|
||||
Node* node) {
|
||||
ArmOperandGenerator g(selector);
|
||||
selector->Emit(opcode, g.DefineAsDoubleRegister(node),
|
||||
g.UseDoubleRegister(node->InputAt(0)),
|
||||
g.UseDoubleRegister(node->InputAt(1)));
|
||||
}
|
||||
|
||||
|
||||
static Instruction* EmitBinop(InstructionSelector* selector,
|
||||
InstructionCode opcode, size_t output_count,
|
||||
InstructionOperand** outputs, Node* left,
|
||||
Node* right, size_t label_count,
|
||||
InstructionOperand** labels) {
|
||||
ArmOperandGenerator g(selector);
|
||||
InstructionOperand* inputs[5];
|
||||
size_t input_count = 0;
|
||||
|
||||
inputs[input_count++] = g.UseRegister(left);
|
||||
if (g.CanBeImmediate(right, opcode)) {
|
||||
opcode |= AddressingModeField::encode(kMode_Operand2_I);
|
||||
inputs[input_count++] = g.UseImmediate(right);
|
||||
} else if (right->opcode() == IrOpcode::kWord32Sar) {
|
||||
Int32BinopMatcher mright(right);
|
||||
inputs[input_count++] = g.UseRegister(mright.left().node());
|
||||
if (mright.right().IsInRange(1, 32)) {
|
||||
opcode |= AddressingModeField::encode(kMode_Operand2_R_ASR_I);
|
||||
inputs[input_count++] = g.UseImmediate(mright.right().node());
|
||||
} else {
|
||||
opcode |= AddressingModeField::encode(kMode_Operand2_R_ASR_R);
|
||||
inputs[input_count++] = g.UseRegister(mright.right().node());
|
||||
}
|
||||
} else if (right->opcode() == IrOpcode::kWord32Shl) {
|
||||
Int32BinopMatcher mright(right);
|
||||
inputs[input_count++] = g.UseRegister(mright.left().node());
|
||||
if (mright.right().IsInRange(0, 31)) {
|
||||
opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
|
||||
inputs[input_count++] = g.UseImmediate(mright.right().node());
|
||||
} else {
|
||||
opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_R);
|
||||
inputs[input_count++] = g.UseRegister(mright.right().node());
|
||||
}
|
||||
} else if (right->opcode() == IrOpcode::kWord32Shr) {
|
||||
Int32BinopMatcher mright(right);
|
||||
inputs[input_count++] = g.UseRegister(mright.left().node());
|
||||
if (mright.right().IsInRange(1, 32)) {
|
||||
opcode |= AddressingModeField::encode(kMode_Operand2_R_LSR_I);
|
||||
inputs[input_count++] = g.UseImmediate(mright.right().node());
|
||||
} else {
|
||||
opcode |= AddressingModeField::encode(kMode_Operand2_R_LSR_R);
|
||||
inputs[input_count++] = g.UseRegister(mright.right().node());
|
||||
}
|
||||
} else {
|
||||
opcode |= AddressingModeField::encode(kMode_Operand2_R);
|
||||
inputs[input_count++] = g.UseRegister(right);
|
||||
}
|
||||
|
||||
// Append the optional labels.
|
||||
while (label_count-- != 0) {
|
||||
inputs[input_count++] = *labels++;
|
||||
}
|
||||
|
||||
ASSERT_NE(0, input_count);
|
||||
ASSERT_GE(ARRAY_SIZE(inputs), input_count);
|
||||
ASSERT_NE(kMode_None, AddressingModeField::decode(opcode));
|
||||
|
||||
return selector->Emit(opcode, output_count, outputs, input_count, inputs);
|
||||
}
|
||||
|
||||
|
||||
static Instruction* EmitBinop(InstructionSelector* selector,
|
||||
InstructionCode opcode, Node* node, Node* left,
|
||||
Node* right) {
|
||||
ArmOperandGenerator g(selector);
|
||||
InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
|
||||
const size_t output_count = ARRAY_SIZE(outputs);
|
||||
return EmitBinop(selector, opcode, output_count, outputs, left, right, 0,
|
||||
NULL);
|
||||
}
|
||||
|
||||
|
||||
// Shared routine for multiple binary operations.
|
||||
static void VisitBinop(InstructionSelector* selector, Node* node,
|
||||
InstructionCode opcode, InstructionCode reverse_opcode) {
|
||||
ArmOperandGenerator g(selector);
|
||||
Int32BinopMatcher m(node);
|
||||
|
||||
Node* left = m.left().node();
|
||||
Node* right = m.right().node();
|
||||
if (g.CanBeImmediate(m.left().node(), reverse_opcode) ||
|
||||
m.left().IsWord32Sar() || m.left().IsWord32Shl() ||
|
||||
m.left().IsWord32Shr()) {
|
||||
opcode = reverse_opcode;
|
||||
std::swap(left, right);
|
||||
}
|
||||
|
||||
EmitBinop(selector, opcode, node, left, right);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitLoad(Node* node) {
|
||||
MachineRepresentation rep = OpParameter<MachineRepresentation>(node);
|
||||
ArmOperandGenerator g(this);
|
||||
Node* base = node->InputAt(0);
|
||||
Node* index = node->InputAt(1);
|
||||
|
||||
InstructionOperand* result = rep == kMachineFloat64
|
||||
? g.DefineAsDoubleRegister(node)
|
||||
: g.DefineAsRegister(node);
|
||||
|
||||
ArchOpcode opcode;
|
||||
switch (rep) {
|
||||
case kMachineFloat64:
|
||||
opcode = kArmFloat64Load;
|
||||
break;
|
||||
case kMachineWord8:
|
||||
opcode = kArmLoadWord8;
|
||||
break;
|
||||
case kMachineWord16:
|
||||
opcode = kArmLoadWord16;
|
||||
break;
|
||||
case kMachineTagged: // Fall through.
|
||||
case kMachineWord32:
|
||||
opcode = kArmLoadWord32;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
return;
|
||||
}
|
||||
|
||||
if (g.CanBeImmediate(index, opcode)) {
|
||||
Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), result,
|
||||
g.UseRegister(base), g.UseImmediate(index));
|
||||
} else if (g.CanBeImmediate(base, opcode)) {
|
||||
Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), result,
|
||||
g.UseRegister(index), g.UseImmediate(base));
|
||||
} else {
|
||||
Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), result,
|
||||
g.UseRegister(base), g.UseRegister(index));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitStore(Node* node) {
|
||||
ArmOperandGenerator g(this);
|
||||
Node* base = node->InputAt(0);
|
||||
Node* index = node->InputAt(1);
|
||||
Node* value = node->InputAt(2);
|
||||
|
||||
StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
|
||||
MachineRepresentation rep = store_rep.rep;
|
||||
if (store_rep.write_barrier_kind == kFullWriteBarrier) {
|
||||
ASSERT(rep == kMachineTagged);
|
||||
// TODO(dcarney): refactor RecordWrite function to take temp registers
|
||||
// and pass them here instead of using fixed regs
|
||||
// TODO(dcarney): handle immediate indices.
|
||||
InstructionOperand* temps[] = {g.TempRegister(r5), g.TempRegister(r6)};
|
||||
Emit(kArmStoreWriteBarrier, NULL, g.UseFixed(base, r4),
|
||||
g.UseFixed(index, r5), g.UseFixed(value, r6), ARRAY_SIZE(temps),
|
||||
temps);
|
||||
return;
|
||||
}
|
||||
ASSERT_EQ(kNoWriteBarrier, store_rep.write_barrier_kind);
|
||||
InstructionOperand* val = rep == kMachineFloat64 ? g.UseDoubleRegister(value)
|
||||
: g.UseRegister(value);
|
||||
|
||||
ArchOpcode opcode;
|
||||
switch (rep) {
|
||||
case kMachineFloat64:
|
||||
opcode = kArmFloat64Store;
|
||||
break;
|
||||
case kMachineWord8:
|
||||
opcode = kArmStoreWord8;
|
||||
break;
|
||||
case kMachineWord16:
|
||||
opcode = kArmStoreWord16;
|
||||
break;
|
||||
case kMachineTagged: // Fall through.
|
||||
case kMachineWord32:
|
||||
opcode = kArmStoreWord32;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
return;
|
||||
}
|
||||
|
||||
if (g.CanBeImmediate(index, opcode)) {
|
||||
Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), NULL,
|
||||
g.UseRegister(base), g.UseImmediate(index), val);
|
||||
} else if (g.CanBeImmediate(base, opcode)) {
|
||||
Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), NULL,
|
||||
g.UseRegister(index), g.UseImmediate(base), val);
|
||||
} else {
|
||||
Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), NULL,
|
||||
g.UseRegister(base), g.UseRegister(index), val);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord32And(Node* node) {
|
||||
ArmOperandGenerator g(this);
|
||||
Int32BinopMatcher m(node);
|
||||
if (m.left().IsWord32Xor() && CanCover(node, m.left().node())) {
|
||||
Int32BinopMatcher mleft(m.left().node());
|
||||
if (mleft.right().Is(-1)) {
|
||||
EmitBinop(this, kArmBic, node, m.right().node(), mleft.left().node());
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (m.right().IsWord32Xor() && CanCover(node, m.right().node())) {
|
||||
Int32BinopMatcher mright(m.right().node());
|
||||
if (mright.right().Is(-1)) {
|
||||
EmitBinop(this, kArmBic, node, m.left().node(), mright.left().node());
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (CpuFeatures::IsSupported(ARMv7) && m.right().HasValue()) {
|
||||
uint32_t value = m.right().Value();
|
||||
uint32_t width = CompilerIntrinsics::CountSetBits(value);
|
||||
uint32_t msb = CompilerIntrinsics::CountLeadingZeros(value);
|
||||
if (msb + width == 32) {
|
||||
ASSERT_EQ(0, CompilerIntrinsics::CountTrailingZeros(value));
|
||||
if (m.left().IsWord32Shr()) {
|
||||
Int32BinopMatcher mleft(m.left().node());
|
||||
if (mleft.right().IsInRange(0, 31)) {
|
||||
Emit(kArmUbfx, g.DefineAsRegister(node),
|
||||
g.UseRegister(mleft.left().node()),
|
||||
g.UseImmediate(mleft.right().node()), g.TempImmediate(width));
|
||||
return;
|
||||
}
|
||||
}
|
||||
Emit(kArmUbfx, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
|
||||
g.TempImmediate(0), g.TempImmediate(width));
|
||||
return;
|
||||
}
|
||||
// Try to interpret this AND as BFC.
|
||||
width = 32 - width;
|
||||
msb = CompilerIntrinsics::CountLeadingZeros(~value);
|
||||
uint32_t lsb = CompilerIntrinsics::CountTrailingZeros(~value);
|
||||
if (msb + width + lsb == 32) {
|
||||
Emit(kArmBfc, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
|
||||
g.TempImmediate(lsb), g.TempImmediate(width));
|
||||
return;
|
||||
}
|
||||
}
|
||||
VisitBinop(this, node, kArmAnd, kArmAnd);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord32Or(Node* node) {
|
||||
VisitBinop(this, node, kArmOrr, kArmOrr);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord32Xor(Node* node) {
|
||||
ArmOperandGenerator g(this);
|
||||
Int32BinopMatcher m(node);
|
||||
if (m.right().Is(-1)) {
|
||||
Emit(kArmMvn | AddressingModeField::encode(kMode_Operand2_R),
|
||||
g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
|
||||
} else {
|
||||
VisitBinop(this, node, kArmEor, kArmEor);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord32Shl(Node* node) {
|
||||
ArmOperandGenerator g(this);
|
||||
Int32BinopMatcher m(node);
|
||||
if (m.right().IsInRange(0, 31)) {
|
||||
Emit(kArmMov | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
|
||||
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
|
||||
g.UseImmediate(m.right().node()));
|
||||
} else {
|
||||
Emit(kArmMov | AddressingModeField::encode(kMode_Operand2_R_LSL_R),
|
||||
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
|
||||
g.UseRegister(m.right().node()));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord32Shr(Node* node) {
|
||||
ArmOperandGenerator g(this);
|
||||
Int32BinopMatcher m(node);
|
||||
if (CpuFeatures::IsSupported(ARMv7) && m.left().IsWord32And() &&
|
||||
m.right().IsInRange(0, 31)) {
|
||||
int32_t lsb = m.right().Value();
|
||||
Int32BinopMatcher mleft(m.left().node());
|
||||
if (mleft.right().HasValue()) {
|
||||
uint32_t value = (mleft.right().Value() >> lsb) << lsb;
|
||||
uint32_t width = CompilerIntrinsics::CountSetBits(value);
|
||||
uint32_t msb = CompilerIntrinsics::CountLeadingZeros(value);
|
||||
if (msb + width + lsb == 32) {
|
||||
ASSERT_EQ(lsb, CompilerIntrinsics::CountTrailingZeros(value));
|
||||
Emit(kArmUbfx, g.DefineAsRegister(node),
|
||||
g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
|
||||
g.TempImmediate(width));
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (m.right().IsInRange(1, 32)) {
|
||||
Emit(kArmMov | AddressingModeField::encode(kMode_Operand2_R_LSR_I),
|
||||
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
|
||||
g.UseImmediate(m.right().node()));
|
||||
return;
|
||||
}
|
||||
Emit(kArmMov | AddressingModeField::encode(kMode_Operand2_R_LSR_R),
|
||||
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
|
||||
g.UseRegister(m.right().node()));
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord32Sar(Node* node) {
|
||||
ArmOperandGenerator g(this);
|
||||
Int32BinopMatcher m(node);
|
||||
if (m.right().IsInRange(1, 32)) {
|
||||
Emit(kArmMov | AddressingModeField::encode(kMode_Operand2_R_ASR_I),
|
||||
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
|
||||
g.UseImmediate(m.right().node()));
|
||||
} else {
|
||||
Emit(kArmMov | AddressingModeField::encode(kMode_Operand2_R_ASR_R),
|
||||
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
|
||||
g.UseRegister(m.right().node()));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt32Add(Node* node) {
|
||||
ArmOperandGenerator g(this);
|
||||
Int32BinopMatcher m(node);
|
||||
if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) {
|
||||
Int32BinopMatcher mleft(m.left().node());
|
||||
Emit(kArmMla, g.DefineAsRegister(node), g.UseRegister(mleft.left().node()),
|
||||
g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
|
||||
return;
|
||||
}
|
||||
if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
|
||||
Int32BinopMatcher mright(m.right().node());
|
||||
Emit(kArmMla, g.DefineAsRegister(node), g.UseRegister(mright.left().node()),
|
||||
g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
|
||||
return;
|
||||
}
|
||||
VisitBinop(this, node, kArmAdd, kArmAdd);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt32Sub(Node* node) {
|
||||
ArmOperandGenerator g(this);
|
||||
Int32BinopMatcher m(node);
|
||||
if (CpuFeatures::IsSupported(MLS) && m.right().IsInt32Mul() &&
|
||||
CanCover(node, m.right().node())) {
|
||||
Int32BinopMatcher mright(m.right().node());
|
||||
Emit(kArmMls, g.DefineAsRegister(node), g.UseRegister(mright.left().node()),
|
||||
g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
|
||||
return;
|
||||
}
|
||||
VisitBinop(this, node, kArmSub, kArmRsb);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt32Mul(Node* node) {
|
||||
ArmOperandGenerator g(this);
|
||||
Int32BinopMatcher m(node);
|
||||
if (m.right().HasValue() && m.right().Value() > 0) {
|
||||
int32_t value = m.right().Value();
|
||||
if (IsPowerOf2(value - 1)) {
|
||||
Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
|
||||
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
|
||||
g.UseRegister(m.left().node()),
|
||||
g.TempImmediate(WhichPowerOf2(value - 1)));
|
||||
return;
|
||||
}
|
||||
if (value < kMaxInt && IsPowerOf2(value + 1)) {
|
||||
Emit(kArmRsb | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
|
||||
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
|
||||
g.UseRegister(m.left().node()),
|
||||
g.TempImmediate(WhichPowerOf2(value + 1)));
|
||||
return;
|
||||
}
|
||||
}
|
||||
Emit(kArmMul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
|
||||
g.UseRegister(m.right().node()));
|
||||
}
|
||||
|
||||
|
||||
static void EmitDiv(InstructionSelector* selector, ArchOpcode div_opcode,
|
||||
ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode,
|
||||
InstructionOperand* result_operand,
|
||||
InstructionOperand* left_operand,
|
||||
InstructionOperand* right_operand) {
|
||||
ArmOperandGenerator g(selector);
|
||||
if (CpuFeatures::IsSupported(SUDIV)) {
|
||||
selector->Emit(div_opcode, result_operand, left_operand, right_operand);
|
||||
return;
|
||||
}
|
||||
InstructionOperand* left_double_operand = g.TempDoubleRegister();
|
||||
InstructionOperand* right_double_operand = g.TempDoubleRegister();
|
||||
InstructionOperand* result_double_operand = g.TempDoubleRegister();
|
||||
selector->Emit(f64i32_opcode, left_double_operand, left_operand);
|
||||
selector->Emit(f64i32_opcode, right_double_operand, right_operand);
|
||||
selector->Emit(kArmVdivF64, result_double_operand, left_double_operand,
|
||||
right_double_operand);
|
||||
selector->Emit(i32f64_opcode, result_operand, result_double_operand);
|
||||
}
|
||||
|
||||
|
||||
static void VisitDiv(InstructionSelector* selector, Node* node,
|
||||
ArchOpcode div_opcode, ArchOpcode f64i32_opcode,
|
||||
ArchOpcode i32f64_opcode) {
|
||||
ArmOperandGenerator g(selector);
|
||||
Int32BinopMatcher m(node);
|
||||
EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode,
|
||||
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
|
||||
g.UseRegister(m.right().node()));
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt32Div(Node* node) {
|
||||
VisitDiv(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt32UDiv(Node* node) {
|
||||
VisitDiv(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
|
||||
}
|
||||
|
||||
|
||||
static void VisitMod(InstructionSelector* selector, Node* node,
|
||||
ArchOpcode div_opcode, ArchOpcode f64i32_opcode,
|
||||
ArchOpcode i32f64_opcode) {
|
||||
ArmOperandGenerator g(selector);
|
||||
Int32BinopMatcher m(node);
|
||||
InstructionOperand* div_operand = g.TempRegister();
|
||||
InstructionOperand* result_operand = g.DefineAsRegister(node);
|
||||
InstructionOperand* left_operand = g.UseRegister(m.left().node());
|
||||
InstructionOperand* right_operand = g.UseRegister(m.right().node());
|
||||
EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode, div_operand,
|
||||
left_operand, right_operand);
|
||||
if (CpuFeatures::IsSupported(MLS)) {
|
||||
selector->Emit(kArmMls, result_operand, div_operand, right_operand,
|
||||
left_operand);
|
||||
return;
|
||||
}
|
||||
InstructionOperand* mul_operand = g.TempRegister();
|
||||
selector->Emit(kArmMul, mul_operand, div_operand, right_operand);
|
||||
selector->Emit(kArmSub, result_operand, left_operand, mul_operand);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt32Mod(Node* node) {
|
||||
VisitMod(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt32UMod(Node* node) {
|
||||
VisitMod(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitConvertInt32ToFloat64(Node* node) {
|
||||
ArmOperandGenerator g(this);
|
||||
Emit(kArmVcvtF64S32, g.DefineAsDoubleRegister(node),
|
||||
g.UseRegister(node->InputAt(0)));
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitConvertFloat64ToInt32(Node* node) {
|
||||
ArmOperandGenerator g(this);
|
||||
Emit(kArmVcvtS32F64, g.DefineAsRegister(node),
|
||||
g.UseDoubleRegister(node->InputAt(0)));
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64Add(Node* node) {
|
||||
ArmOperandGenerator g(this);
|
||||
Int32BinopMatcher m(node);
|
||||
if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
|
||||
Int32BinopMatcher mleft(m.left().node());
|
||||
Emit(kArmVmlaF64, g.DefineSameAsFirst(node),
|
||||
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
|
||||
g.UseRegister(mleft.right().node()));
|
||||
return;
|
||||
}
|
||||
if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
|
||||
Int32BinopMatcher mright(m.right().node());
|
||||
Emit(kArmVmlaF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
|
||||
g.UseRegister(mright.left().node()),
|
||||
g.UseRegister(mright.right().node()));
|
||||
return;
|
||||
}
|
||||
VisitRRRFloat64(this, kArmVaddF64, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64Sub(Node* node) {
|
||||
ArmOperandGenerator g(this);
|
||||
Int32BinopMatcher m(node);
|
||||
if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
|
||||
Int32BinopMatcher mright(m.right().node());
|
||||
Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
|
||||
g.UseRegister(mright.left().node()),
|
||||
g.UseRegister(mright.right().node()));
|
||||
return;
|
||||
}
|
||||
VisitRRRFloat64(this, kArmVsubF64, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64Mul(Node* node) {
|
||||
ArmOperandGenerator g(this);
|
||||
Float64BinopMatcher m(node);
|
||||
if (m.right().Is(-1.0)) {
|
||||
Emit(kArmVnegF64, g.DefineAsRegister(node),
|
||||
g.UseDoubleRegister(m.left().node()));
|
||||
} else {
|
||||
VisitRRRFloat64(this, kArmVmulF64, node);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64Div(Node* node) {
|
||||
VisitRRRFloat64(this, kArmVdivF64, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64Mod(Node* node) {
|
||||
ArmOperandGenerator g(this);
|
||||
Emit(kArmVmodF64, g.DefineAsFixedDouble(node, d0),
|
||||
g.UseFixedDouble(node->InputAt(0), d0),
|
||||
g.UseFixedDouble(node->InputAt(1), d1))->MarkAsCall();
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
|
||||
BasicBlock* deoptimization) {
|
||||
ArmOperandGenerator g(this);
|
||||
CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
|
||||
CallBuffer buffer(zone(), descriptor); // TODO(turbofan): temp zone here?
|
||||
|
||||
// Compute InstructionOperands for inputs and outputs.
|
||||
// TODO(turbofan): on ARM64 it's probably better to use the code object in a
|
||||
// register if there are multiple uses of it. Improve constant pool and the
|
||||
// heuristics in the register allocator for where to emit constants.
|
||||
InitializeCallBuffer(call, &buffer, true, false, continuation,
|
||||
deoptimization);
|
||||
|
||||
// TODO(dcarney): might be possible to use claim/poke instead
|
||||
// Push any stack arguments.
|
||||
for (int i = buffer.pushed_count - 1; i >= 0; --i) {
|
||||
Node* input = buffer.pushed_nodes[i];
|
||||
Emit(kArmPush, NULL, g.UseRegister(input));
|
||||
}
|
||||
|
||||
// Select the appropriate opcode based on the call type.
|
||||
InstructionCode opcode;
|
||||
switch (descriptor->kind()) {
|
||||
case CallDescriptor::kCallCodeObject: {
|
||||
bool lazy_deopt = descriptor->CanLazilyDeoptimize();
|
||||
opcode = kArmCallCodeObject | MiscField::encode(lazy_deopt ? 1 : 0);
|
||||
break;
|
||||
}
|
||||
case CallDescriptor::kCallAddress:
|
||||
opcode = kArmCallAddress;
|
||||
break;
|
||||
case CallDescriptor::kCallJSFunction:
|
||||
opcode = kArmCallJSFunction;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
return;
|
||||
}
|
||||
|
||||
// Emit the call instruction.
|
||||
Instruction* call_instr =
|
||||
Emit(opcode, buffer.output_count, buffer.outputs,
|
||||
buffer.fixed_and_control_count(), buffer.fixed_and_control_args);
|
||||
|
||||
call_instr->MarkAsCall();
|
||||
if (deoptimization != NULL) {
|
||||
ASSERT(continuation != NULL);
|
||||
call_instr->MarkAsControl();
|
||||
}
|
||||
|
||||
// Caller clean up of stack for C-style calls.
|
||||
if (descriptor->kind() == CallDescriptor::kCallAddress &&
|
||||
buffer.pushed_count > 0) {
|
||||
ASSERT(deoptimization == NULL && continuation == NULL);
|
||||
Emit(kArmDrop | MiscField::encode(buffer.pushed_count), NULL);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Shared routine for multiple compare operations.
|
||||
static void VisitWordCompare(InstructionSelector* selector, Node* node,
|
||||
InstructionCode opcode, FlagsContinuation* cont,
|
||||
bool commutative, bool requires_output) {
|
||||
ArmOperandGenerator g(selector);
|
||||
Int32BinopMatcher m(node);
|
||||
|
||||
Node* left = m.left().node();
|
||||
Node* right = m.right().node();
|
||||
if (g.CanBeImmediate(m.left().node(), opcode) || m.left().IsWord32Sar() ||
|
||||
m.left().IsWord32Shl() || m.left().IsWord32Shr()) {
|
||||
if (!commutative) cont->Commute();
|
||||
std::swap(left, right);
|
||||
}
|
||||
|
||||
opcode = cont->Encode(opcode);
|
||||
if (cont->IsBranch()) {
|
||||
InstructionOperand* outputs[1];
|
||||
size_t output_count = 0;
|
||||
if (requires_output) {
|
||||
outputs[output_count++] = g.DefineAsRegister(node);
|
||||
}
|
||||
InstructionOperand* labels[] = {g.Label(cont->true_block()),
|
||||
g.Label(cont->false_block())};
|
||||
const size_t label_count = ARRAY_SIZE(labels);
|
||||
EmitBinop(selector, opcode, output_count, outputs, left, right, label_count,
|
||||
labels)->MarkAsControl();
|
||||
} else {
|
||||
ASSERT(cont->IsSet());
|
||||
EmitBinop(selector, opcode, cont->result(), left, right);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
|
||||
switch (node->opcode()) {
|
||||
case IrOpcode::kInt32Add:
|
||||
return VisitWordCompare(this, node, kArmCmn, cont, true, false);
|
||||
case IrOpcode::kInt32Sub:
|
||||
return VisitWordCompare(this, node, kArmCmp, cont, false, false);
|
||||
case IrOpcode::kWord32And:
|
||||
return VisitWordCompare(this, node, kArmTst, cont, true, false);
|
||||
case IrOpcode::kWord32Or:
|
||||
return VisitWordCompare(this, node, kArmOrr, cont, true, true);
|
||||
case IrOpcode::kWord32Xor:
|
||||
return VisitWordCompare(this, node, kArmTeq, cont, true, false);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
ArmOperandGenerator g(this);
|
||||
InstructionCode opcode =
|
||||
cont->Encode(kArmTst) | AddressingModeField::encode(kMode_Operand2_R);
|
||||
if (cont->IsBranch()) {
|
||||
Emit(opcode, NULL, g.UseRegister(node), g.UseRegister(node),
|
||||
g.Label(cont->true_block()),
|
||||
g.Label(cont->false_block()))->MarkAsControl();
|
||||
} else {
|
||||
Emit(opcode, g.DefineAsRegister(cont->result()), g.UseRegister(node),
|
||||
g.UseRegister(node));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord32Compare(Node* node,
|
||||
FlagsContinuation* cont) {
|
||||
VisitWordCompare(this, node, kArmCmp, cont, false, false);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64Compare(Node* node,
|
||||
FlagsContinuation* cont) {
|
||||
ArmOperandGenerator g(this);
|
||||
Float64BinopMatcher m(node);
|
||||
if (cont->IsBranch()) {
|
||||
Emit(cont->Encode(kArmVcmpF64), NULL, g.UseDoubleRegister(m.left().node()),
|
||||
g.UseDoubleRegister(m.right().node()), g.Label(cont->true_block()),
|
||||
g.Label(cont->false_block()))->MarkAsControl();
|
||||
} else {
|
||||
ASSERT(cont->IsSet());
|
||||
Emit(cont->Encode(kArmVcmpF64), g.DefineAsRegister(cont->result()),
|
||||
g.UseDoubleRegister(m.left().node()),
|
||||
g.UseDoubleRegister(m.right().node()));
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
66
src/compiler/arm/linkage-arm.cc
Normal file
66
src/compiler/arm/linkage-arm.cc
Normal file
@ -0,0 +1,66 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/v8.h"
|
||||
|
||||
#include "src/assembler.h"
|
||||
#include "src/code-stubs.h"
|
||||
#include "src/compiler/linkage.h"
|
||||
#include "src/compiler/linkage-impl.h"
|
||||
#include "src/zone.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
struct LinkageHelperTraits {
|
||||
static Register ReturnValueReg() { return r0; }
|
||||
static Register ReturnValue2Reg() { return r1; }
|
||||
static Register JSCallFunctionReg() { return r1; }
|
||||
static Register ContextReg() { return cp; }
|
||||
static Register RuntimeCallFunctionReg() { return r1; }
|
||||
static Register RuntimeCallArgCountReg() { return r0; }
|
||||
static RegList CCalleeSaveRegisters() {
|
||||
return r4.bit() | r5.bit() | r6.bit() | r7.bit() | r8.bit() | r9.bit() |
|
||||
r10.bit();
|
||||
}
|
||||
static Register CRegisterParameter(int i) {
|
||||
static Register register_parameters[] = {r0, r1, r2, r3};
|
||||
return register_parameters[i];
|
||||
}
|
||||
static int CRegisterParametersLength() { return 4; }
|
||||
};
|
||||
|
||||
|
||||
CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
|
||||
return LinkageHelper::GetJSCallDescriptor<LinkageHelperTraits>(
|
||||
zone, parameter_count);
|
||||
}
|
||||
|
||||
|
||||
CallDescriptor* Linkage::GetRuntimeCallDescriptor(
|
||||
Runtime::FunctionId function, int parameter_count,
|
||||
Operator::Property properties,
|
||||
CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
|
||||
return LinkageHelper::GetRuntimeCallDescriptor<LinkageHelperTraits>(
|
||||
zone, function, parameter_count, properties, can_deoptimize);
|
||||
}
|
||||
|
||||
|
||||
CallDescriptor* Linkage::GetStubCallDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count) {
|
||||
return LinkageHelper::GetStubCallDescriptor<LinkageHelperTraits>(
|
||||
this->info_->zone(), descriptor, stack_parameter_count);
|
||||
}
|
||||
|
||||
|
||||
CallDescriptor* Linkage::GetSimplifiedCDescriptor(
|
||||
Zone* zone, int num_params, MachineRepresentation return_type,
|
||||
const MachineRepresentation* param_types) {
|
||||
return LinkageHelper::GetSimplifiedCDescriptor<LinkageHelperTraits>(
|
||||
zone, num_params, return_type, param_types);
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
825
src/compiler/arm64/code-generator-arm64.cc
Normal file
825
src/compiler/arm64/code-generator-arm64.cc
Normal file
@ -0,0 +1,825 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/compiler/code-generator.h"
|
||||
|
||||
#include "src/arm64/macro-assembler-arm64.h"
|
||||
#include "src/compiler/code-generator-impl.h"
|
||||
#include "src/compiler/gap-resolver.h"
|
||||
#include "src/compiler/node-matchers.h"
|
||||
#include "src/compiler/node-properties-inl.h"
|
||||
#include "src/scopes.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
#define __ masm()->
|
||||
|
||||
|
||||
// Adds Arm64-specific methods to convert InstructionOperands.
|
||||
class Arm64OperandConverter V8_FINAL : public InstructionOperandConverter {
|
||||
public:
|
||||
Arm64OperandConverter(CodeGenerator* gen, Instruction* instr)
|
||||
: InstructionOperandConverter(gen, instr) {}
|
||||
|
||||
Register InputRegister32(int index) {
|
||||
return ToRegister(instr_->InputAt(index)).W();
|
||||
}
|
||||
|
||||
Register InputRegister64(int index) { return InputRegister(index); }
|
||||
|
||||
Operand InputImmediate(int index) {
|
||||
return ToImmediate(instr_->InputAt(index));
|
||||
}
|
||||
|
||||
Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
|
||||
|
||||
Operand InputOperand64(int index) { return InputOperand(index); }
|
||||
|
||||
Operand InputOperand32(int index) {
|
||||
return ToOperand32(instr_->InputAt(index));
|
||||
}
|
||||
|
||||
Register OutputRegister64() { return OutputRegister(); }
|
||||
|
||||
Register OutputRegister32() { return ToRegister(instr_->Output()).W(); }
|
||||
|
||||
MemOperand MemoryOperand(int* first_index) {
|
||||
const int index = *first_index;
|
||||
switch (AddressingModeField::decode(instr_->opcode())) {
|
||||
case kMode_None:
|
||||
break;
|
||||
case kMode_MRI:
|
||||
*first_index += 2;
|
||||
return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
|
||||
case kMode_MRR:
|
||||
*first_index += 2;
|
||||
return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
|
||||
SXTW);
|
||||
}
|
||||
UNREACHABLE();
|
||||
return MemOperand(no_reg);
|
||||
}
|
||||
|
||||
MemOperand MemoryOperand() {
|
||||
int index = 0;
|
||||
return MemoryOperand(&index);
|
||||
}
|
||||
|
||||
Operand ToOperand(InstructionOperand* op) {
|
||||
if (op->IsRegister()) {
|
||||
return Operand(ToRegister(op));
|
||||
}
|
||||
return ToImmediate(op);
|
||||
}
|
||||
|
||||
Operand ToOperand32(InstructionOperand* op) {
|
||||
if (op->IsRegister()) {
|
||||
return Operand(ToRegister(op).W());
|
||||
}
|
||||
return ToImmediate(op);
|
||||
}
|
||||
|
||||
Operand ToImmediate(InstructionOperand* operand) {
|
||||
Constant constant = ToConstant(operand);
|
||||
switch (constant.type()) {
|
||||
case Constant::kInt32:
|
||||
return Operand(constant.ToInt32());
|
||||
case Constant::kInt64:
|
||||
return Operand(constant.ToInt64());
|
||||
case Constant::kFloat64:
|
||||
return Operand(
|
||||
isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
|
||||
case Constant::kExternalReference:
|
||||
return Operand(constant.ToExternalReference());
|
||||
case Constant::kHeapObject:
|
||||
return Operand(constant.ToHeapObject());
|
||||
}
|
||||
UNREACHABLE();
|
||||
return Operand(-1);
|
||||
}
|
||||
|
||||
MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
|
||||
ASSERT(op != NULL);
|
||||
ASSERT(!op->IsRegister());
|
||||
ASSERT(!op->IsDoubleRegister());
|
||||
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
|
||||
// The linkage computes where all spill slots are located.
|
||||
FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
|
||||
return MemOperand(offset.from_stack_pointer() ? masm->StackPointer() : fp,
|
||||
offset.offset());
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
#define ASSEMBLE_SHIFT(asm_instr, width) \
|
||||
do { \
|
||||
if (instr->InputAt(1)->IsRegister()) { \
|
||||
__ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), \
|
||||
i.InputRegister##width(1)); \
|
||||
} else { \
|
||||
int64_t imm = i.InputOperand##width(1).immediate().value(); \
|
||||
__ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), imm); \
|
||||
} \
|
||||
} while (0);
|
||||
|
||||
|
||||
// Assembles an instruction after register allocation, producing machine code.
|
||||
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
|
||||
Arm64OperandConverter i(this, instr);
|
||||
|
||||
switch (ArchOpcodeField::decode(instr->opcode())) {
|
||||
case kArchJmp:
|
||||
__ B(code_->GetLabel(i.InputBlock(0)));
|
||||
break;
|
||||
case kArchNop:
|
||||
// don't emit code for nops.
|
||||
break;
|
||||
case kArchRet:
|
||||
AssembleReturn();
|
||||
break;
|
||||
case kArchDeoptimize: {
|
||||
int deoptimization_id = MiscField::decode(instr->opcode());
|
||||
BuildTranslation(instr, deoptimization_id);
|
||||
|
||||
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
|
||||
isolate(), deoptimization_id, Deoptimizer::LAZY);
|
||||
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
|
||||
break;
|
||||
}
|
||||
case kArm64Add:
|
||||
__ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
|
||||
break;
|
||||
case kArm64Add32:
|
||||
__ Add(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
|
||||
break;
|
||||
case kArm64And:
|
||||
__ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
|
||||
break;
|
||||
case kArm64And32:
|
||||
__ And(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
|
||||
break;
|
||||
case kArm64Mul:
|
||||
__ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
|
||||
break;
|
||||
case kArm64Mul32:
|
||||
__ Mul(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
|
||||
break;
|
||||
case kArm64Idiv:
|
||||
__ Sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
|
||||
break;
|
||||
case kArm64Idiv32:
|
||||
__ Sdiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
|
||||
break;
|
||||
case kArm64Udiv:
|
||||
__ Udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
|
||||
break;
|
||||
case kArm64Udiv32:
|
||||
__ Udiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
|
||||
break;
|
||||
case kArm64Imod: {
|
||||
UseScratchRegisterScope scope(masm());
|
||||
Register temp = scope.AcquireX();
|
||||
__ Sdiv(temp, i.InputRegister(0), i.InputRegister(1));
|
||||
__ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
|
||||
break;
|
||||
}
|
||||
case kArm64Imod32: {
|
||||
UseScratchRegisterScope scope(masm());
|
||||
Register temp = scope.AcquireW();
|
||||
__ Sdiv(temp, i.InputRegister32(0), i.InputRegister32(1));
|
||||
__ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
|
||||
i.InputRegister32(0));
|
||||
break;
|
||||
}
|
||||
case kArm64Umod: {
|
||||
UseScratchRegisterScope scope(masm());
|
||||
Register temp = scope.AcquireX();
|
||||
__ Udiv(temp, i.InputRegister(0), i.InputRegister(1));
|
||||
__ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
|
||||
break;
|
||||
}
|
||||
case kArm64Umod32: {
|
||||
UseScratchRegisterScope scope(masm());
|
||||
Register temp = scope.AcquireW();
|
||||
__ Udiv(temp, i.InputRegister32(0), i.InputRegister32(1));
|
||||
__ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
|
||||
i.InputRegister32(0));
|
||||
break;
|
||||
}
|
||||
// TODO(dcarney): use mvn instr??
|
||||
case kArm64Not:
|
||||
__ Orn(i.OutputRegister(), xzr, i.InputOperand(0));
|
||||
break;
|
||||
case kArm64Not32:
|
||||
__ Orn(i.OutputRegister32(), wzr, i.InputOperand32(0));
|
||||
break;
|
||||
case kArm64Neg:
|
||||
__ Neg(i.OutputRegister(), i.InputOperand(0));
|
||||
break;
|
||||
case kArm64Neg32:
|
||||
__ Neg(i.OutputRegister32(), i.InputOperand32(0));
|
||||
break;
|
||||
case kArm64Or:
|
||||
__ Orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
|
||||
break;
|
||||
case kArm64Or32:
|
||||
__ Orr(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
|
||||
break;
|
||||
case kArm64Xor:
|
||||
__ Eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
|
||||
break;
|
||||
case kArm64Xor32:
|
||||
__ Eor(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
|
||||
break;
|
||||
case kArm64Sub:
|
||||
__ Sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
|
||||
break;
|
||||
case kArm64Sub32:
|
||||
__ Sub(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
|
||||
break;
|
||||
case kArm64Shl:
|
||||
ASSEMBLE_SHIFT(Lsl, 64);
|
||||
break;
|
||||
case kArm64Shl32:
|
||||
ASSEMBLE_SHIFT(Lsl, 32);
|
||||
break;
|
||||
case kArm64Shr:
|
||||
ASSEMBLE_SHIFT(Lsr, 64);
|
||||
break;
|
||||
case kArm64Shr32:
|
||||
ASSEMBLE_SHIFT(Lsr, 32);
|
||||
break;
|
||||
case kArm64Sar:
|
||||
ASSEMBLE_SHIFT(Asr, 64);
|
||||
break;
|
||||
case kArm64Sar32:
|
||||
ASSEMBLE_SHIFT(Asr, 32);
|
||||
break;
|
||||
case kArm64CallCodeObject: {
|
||||
if (instr->InputAt(0)->IsImmediate()) {
|
||||
Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
|
||||
__ Call(code, RelocInfo::CODE_TARGET);
|
||||
RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
|
||||
Safepoint::kNoLazyDeopt);
|
||||
} else {
|
||||
Register reg = i.InputRegister(0);
|
||||
int entry = Code::kHeaderSize - kHeapObjectTag;
|
||||
__ Ldr(reg, MemOperand(reg, entry));
|
||||
__ Call(reg);
|
||||
RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
|
||||
Safepoint::kNoLazyDeopt);
|
||||
}
|
||||
bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1);
|
||||
if (lazy_deopt) {
|
||||
RecordLazyDeoptimizationEntry(instr);
|
||||
}
|
||||
// Meaningless instruction for ICs to overwrite.
|
||||
AddNopForSmiCodeInlining();
|
||||
break;
|
||||
}
|
||||
case kArm64CallJSFunction: {
|
||||
Register func = i.InputRegister(0);
|
||||
|
||||
// TODO(jarin) The load of the context should be separated from the call.
|
||||
__ Ldr(cp, FieldMemOperand(func, JSFunction::kContextOffset));
|
||||
__ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
|
||||
__ Call(x10);
|
||||
|
||||
RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
|
||||
Safepoint::kNoLazyDeopt);
|
||||
RecordLazyDeoptimizationEntry(instr);
|
||||
break;
|
||||
}
|
||||
case kArm64CallAddress: {
|
||||
DirectCEntryStub stub(isolate());
|
||||
stub.GenerateCall(masm(), i.InputRegister(0));
|
||||
break;
|
||||
}
|
||||
case kArm64Claim: {
|
||||
int words = MiscField::decode(instr->opcode());
|
||||
__ Claim(words);
|
||||
break;
|
||||
}
|
||||
case kArm64Poke: {
|
||||
int slot = MiscField::decode(instr->opcode());
|
||||
Operand operand(slot * kPointerSize);
|
||||
__ Poke(i.InputRegister(0), operand);
|
||||
break;
|
||||
}
|
||||
case kArm64PokePairZero: {
|
||||
// TODO(dcarney): test slot offset and register order.
|
||||
int slot = MiscField::decode(instr->opcode()) - 1;
|
||||
__ PokePair(i.InputRegister(0), xzr, slot * kPointerSize);
|
||||
break;
|
||||
}
|
||||
case kArm64PokePair: {
|
||||
int slot = MiscField::decode(instr->opcode()) - 1;
|
||||
__ PokePair(i.InputRegister(1), i.InputRegister(0), slot * kPointerSize);
|
||||
break;
|
||||
}
|
||||
case kArm64Drop: {
|
||||
int words = MiscField::decode(instr->opcode());
|
||||
__ Drop(words);
|
||||
break;
|
||||
}
|
||||
case kArm64Cmp:
|
||||
__ Cmp(i.InputRegister(0), i.InputOperand(1));
|
||||
break;
|
||||
case kArm64Cmp32:
|
||||
__ Cmp(i.InputRegister32(0), i.InputOperand32(1));
|
||||
break;
|
||||
case kArm64Tst:
|
||||
__ Tst(i.InputRegister(0), i.InputOperand(1));
|
||||
break;
|
||||
case kArm64Tst32:
|
||||
__ Tst(i.InputRegister32(0), i.InputOperand32(1));
|
||||
break;
|
||||
case kArm64Float64Cmp:
|
||||
__ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
|
||||
break;
|
||||
case kArm64Float64Add:
|
||||
__ Fadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
|
||||
i.InputDoubleRegister(1));
|
||||
break;
|
||||
case kArm64Float64Sub:
|
||||
__ Fsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
|
||||
i.InputDoubleRegister(1));
|
||||
break;
|
||||
case kArm64Float64Mul:
|
||||
__ Fmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
|
||||
i.InputDoubleRegister(1));
|
||||
break;
|
||||
case kArm64Float64Div:
|
||||
__ Fdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
|
||||
i.InputDoubleRegister(1));
|
||||
break;
|
||||
case kArm64Float64Mod: {
|
||||
// TODO(dcarney): implement directly. See note in lithium-codegen-arm64.cc
|
||||
FrameScope scope(masm(), StackFrame::MANUAL);
|
||||
ASSERT(d0.is(i.InputDoubleRegister(0)));
|
||||
ASSERT(d1.is(i.InputDoubleRegister(1)));
|
||||
ASSERT(d0.is(i.OutputDoubleRegister()));
|
||||
// TODO(dcarney): make sure this saves all relevant registers.
|
||||
__ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
|
||||
0, 2);
|
||||
break;
|
||||
}
|
||||
case kArm64Int32ToInt64:
|
||||
__ Sxtw(i.OutputRegister(), i.InputRegister(0));
|
||||
break;
|
||||
case kArm64Int64ToInt32:
|
||||
if (!i.OutputRegister().is(i.InputRegister(0))) {
|
||||
__ Mov(i.OutputRegister(), i.InputRegister(0));
|
||||
}
|
||||
break;
|
||||
case kArm64Float64ToInt32:
|
||||
__ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0));
|
||||
break;
|
||||
case kArm64Int32ToFloat64:
|
||||
__ Scvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
|
||||
break;
|
||||
case kArm64LoadWord8:
|
||||
__ Ldrb(i.OutputRegister(), i.MemoryOperand());
|
||||
break;
|
||||
case kArm64StoreWord8:
|
||||
__ Strb(i.InputRegister(2), i.MemoryOperand());
|
||||
break;
|
||||
case kArm64LoadWord16:
|
||||
__ Ldrh(i.OutputRegister(), i.MemoryOperand());
|
||||
break;
|
||||
case kArm64StoreWord16:
|
||||
__ Strh(i.InputRegister(2), i.MemoryOperand());
|
||||
break;
|
||||
case kArm64LoadWord32:
|
||||
__ Ldr(i.OutputRegister32(), i.MemoryOperand());
|
||||
break;
|
||||
case kArm64StoreWord32:
|
||||
__ Str(i.InputRegister32(2), i.MemoryOperand());
|
||||
break;
|
||||
case kArm64LoadWord64:
|
||||
__ Ldr(i.OutputRegister(), i.MemoryOperand());
|
||||
break;
|
||||
case kArm64StoreWord64:
|
||||
__ Str(i.InputRegister(2), i.MemoryOperand());
|
||||
break;
|
||||
case kArm64Float64Load:
|
||||
__ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
|
||||
break;
|
||||
case kArm64Float64Store:
|
||||
__ Str(i.InputDoubleRegister(2), i.MemoryOperand());
|
||||
break;
|
||||
case kArm64StoreWriteBarrier: {
|
||||
Register object = i.InputRegister(0);
|
||||
Register index = i.InputRegister(1);
|
||||
Register value = i.InputRegister(2);
|
||||
__ Add(index, object, Operand(index, SXTW));
|
||||
__ Str(value, MemOperand(index));
|
||||
SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
|
||||
? kSaveFPRegs
|
||||
: kDontSaveFPRegs;
|
||||
// TODO(dcarney): we shouldn't test write barriers from c calls.
|
||||
LinkRegisterStatus lr_status = kLRHasNotBeenSaved;
|
||||
UseScratchRegisterScope scope(masm());
|
||||
Register temp = no_reg;
|
||||
if (csp.is(masm()->StackPointer())) {
|
||||
temp = scope.AcquireX();
|
||||
lr_status = kLRHasBeenSaved;
|
||||
__ Push(lr, temp); // Need to push a pair
|
||||
}
|
||||
__ RecordWrite(object, index, value, lr_status, mode);
|
||||
if (csp.is(masm()->StackPointer())) {
|
||||
__ Pop(temp, lr);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Assemble branches after this instruction.
|
||||
void CodeGenerator::AssembleArchBranch(Instruction* instr,
|
||||
FlagsCondition condition) {
|
||||
Arm64OperandConverter i(this, instr);
|
||||
Label done;
|
||||
|
||||
// Emit a branch. The true and false targets are always the last two inputs
|
||||
// to the instruction.
|
||||
BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
|
||||
BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
|
||||
bool fallthru = IsNextInAssemblyOrder(fblock);
|
||||
Label* tlabel = code()->GetLabel(tblock);
|
||||
Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
|
||||
switch (condition) {
|
||||
case kUnorderedEqual:
|
||||
__ B(vs, flabel);
|
||||
// Fall through.
|
||||
case kEqual:
|
||||
__ B(eq, tlabel);
|
||||
break;
|
||||
case kUnorderedNotEqual:
|
||||
__ B(vs, tlabel);
|
||||
// Fall through.
|
||||
case kNotEqual:
|
||||
__ B(ne, tlabel);
|
||||
break;
|
||||
case kSignedLessThan:
|
||||
__ B(lt, tlabel);
|
||||
break;
|
||||
case kSignedGreaterThanOrEqual:
|
||||
__ B(ge, tlabel);
|
||||
break;
|
||||
case kSignedLessThanOrEqual:
|
||||
__ B(le, tlabel);
|
||||
break;
|
||||
case kSignedGreaterThan:
|
||||
__ B(gt, tlabel);
|
||||
break;
|
||||
case kUnorderedLessThan:
|
||||
__ B(vs, flabel);
|
||||
// Fall through.
|
||||
case kUnsignedLessThan:
|
||||
__ B(lo, tlabel);
|
||||
break;
|
||||
case kUnorderedGreaterThanOrEqual:
|
||||
__ B(vs, tlabel);
|
||||
// Fall through.
|
||||
case kUnsignedGreaterThanOrEqual:
|
||||
__ B(hs, tlabel);
|
||||
break;
|
||||
case kUnorderedLessThanOrEqual:
|
||||
__ B(vs, flabel);
|
||||
// Fall through.
|
||||
case kUnsignedLessThanOrEqual:
|
||||
__ B(ls, tlabel);
|
||||
break;
|
||||
case kUnorderedGreaterThan:
|
||||
__ B(vs, tlabel);
|
||||
// Fall through.
|
||||
case kUnsignedGreaterThan:
|
||||
__ B(hi, tlabel);
|
||||
break;
|
||||
}
|
||||
if (!fallthru) __ B(flabel); // no fallthru to flabel.
|
||||
__ Bind(&done);
|
||||
}
|
||||
|
||||
|
||||
// Assemble boolean materializations after this instruction.
|
||||
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
|
||||
FlagsCondition condition) {
|
||||
Arm64OperandConverter i(this, instr);
|
||||
Label done;
|
||||
|
||||
// Materialize a full 64-bit 1 or 0 value.
|
||||
Label check;
|
||||
Register reg = i.OutputRegister();
|
||||
Condition cc = nv;
|
||||
switch (condition) {
|
||||
case kUnorderedEqual:
|
||||
__ B(vc, &check);
|
||||
__ Mov(reg, 0);
|
||||
__ B(&done);
|
||||
// Fall through.
|
||||
case kEqual:
|
||||
cc = eq;
|
||||
break;
|
||||
case kUnorderedNotEqual:
|
||||
__ B(vc, &check);
|
||||
__ Mov(reg, 1);
|
||||
__ B(&done);
|
||||
// Fall through.
|
||||
case kNotEqual:
|
||||
cc = ne;
|
||||
break;
|
||||
case kSignedLessThan:
|
||||
cc = lt;
|
||||
break;
|
||||
case kSignedGreaterThanOrEqual:
|
||||
cc = ge;
|
||||
break;
|
||||
case kSignedLessThanOrEqual:
|
||||
cc = le;
|
||||
break;
|
||||
case kSignedGreaterThan:
|
||||
cc = gt;
|
||||
break;
|
||||
case kUnorderedLessThan:
|
||||
__ B(vc, &check);
|
||||
__ Mov(reg, 0);
|
||||
__ B(&done);
|
||||
// Fall through.
|
||||
case kUnsignedLessThan:
|
||||
cc = lo;
|
||||
break;
|
||||
case kUnorderedGreaterThanOrEqual:
|
||||
__ B(vc, &check);
|
||||
__ Mov(reg, 1);
|
||||
__ B(&done);
|
||||
// Fall through.
|
||||
case kUnsignedGreaterThanOrEqual:
|
||||
cc = hs;
|
||||
break;
|
||||
case kUnorderedLessThanOrEqual:
|
||||
__ B(vc, &check);
|
||||
__ Mov(reg, 0);
|
||||
__ B(&done);
|
||||
// Fall through.
|
||||
case kUnsignedLessThanOrEqual:
|
||||
cc = ls;
|
||||
break;
|
||||
case kUnorderedGreaterThan:
|
||||
__ B(vc, &check);
|
||||
__ Mov(reg, 1);
|
||||
__ B(&done);
|
||||
// Fall through.
|
||||
case kUnsignedGreaterThan:
|
||||
cc = hi;
|
||||
break;
|
||||
}
|
||||
__ bind(&check);
|
||||
__ Cset(reg, cc);
|
||||
__ B(&done);
|
||||
__ Bind(&done);
|
||||
}
|
||||
|
||||
|
||||
// TODO(dcarney): increase stack slots in frame once before first use.
|
||||
static int AlignedStackSlots(int stack_slots) {
|
||||
if (stack_slots & 1) stack_slots++;
|
||||
return stack_slots;
|
||||
}
|
||||
|
||||
|
||||
void CodeGenerator::AssemblePrologue() {
|
||||
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
|
||||
if (descriptor->kind() == CallDescriptor::kCallAddress) {
|
||||
__ SetStackPointer(csp);
|
||||
__ Push(lr, fp);
|
||||
__ Mov(fp, csp);
|
||||
// TODO(dcarney): correct callee saved registers.
|
||||
__ PushCalleeSavedRegisters();
|
||||
frame()->SetRegisterSaveAreaSize(20 * kPointerSize);
|
||||
} else if (descriptor->IsJSFunctionCall()) {
|
||||
CompilationInfo* info = linkage()->info();
|
||||
__ SetStackPointer(jssp);
|
||||
__ Prologue(info->IsCodePreAgingActive());
|
||||
frame()->SetRegisterSaveAreaSize(
|
||||
StandardFrameConstants::kFixedFrameSizeFromFp);
|
||||
|
||||
// Sloppy mode functions and builtins need to replace the receiver with the
|
||||
// global proxy when called as functions (without an explicit receiver
|
||||
// object).
|
||||
// TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
|
||||
if (info->strict_mode() == SLOPPY && !info->is_native()) {
|
||||
Label ok;
|
||||
// +2 for return address and saved frame pointer.
|
||||
int receiver_slot = info->scope()->num_parameters() + 2;
|
||||
__ Ldr(x10, MemOperand(fp, receiver_slot * kXRegSize));
|
||||
__ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
|
||||
__ Ldr(x10, GlobalObjectMemOperand());
|
||||
__ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
|
||||
__ Str(x10, MemOperand(fp, receiver_slot * kXRegSize));
|
||||
__ Bind(&ok);
|
||||
}
|
||||
|
||||
} else {
|
||||
__ SetStackPointer(jssp);
|
||||
__ StubPrologue();
|
||||
frame()->SetRegisterSaveAreaSize(
|
||||
StandardFrameConstants::kFixedFrameSizeFromFp);
|
||||
}
|
||||
int stack_slots = frame()->GetSpillSlotCount();
|
||||
if (stack_slots > 0) {
|
||||
Register sp = __ StackPointer();
|
||||
if (!sp.Is(csp)) {
|
||||
__ Sub(sp, sp, stack_slots * kPointerSize);
|
||||
}
|
||||
__ Sub(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CodeGenerator::AssembleReturn() {
|
||||
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
|
||||
if (descriptor->kind() == CallDescriptor::kCallAddress) {
|
||||
if (frame()->GetRegisterSaveAreaSize() > 0) {
|
||||
// Remove this frame's spill slots first.
|
||||
int stack_slots = frame()->GetSpillSlotCount();
|
||||
if (stack_slots > 0) {
|
||||
__ Add(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize);
|
||||
}
|
||||
// Restore registers.
|
||||
// TODO(dcarney): correct callee saved registers.
|
||||
__ PopCalleeSavedRegisters();
|
||||
}
|
||||
__ Mov(csp, fp);
|
||||
__ Pop(fp, lr);
|
||||
__ Ret();
|
||||
} else {
|
||||
__ Mov(jssp, fp);
|
||||
__ Pop(fp, lr);
|
||||
int pop_count =
|
||||
descriptor->IsJSFunctionCall() ? descriptor->ParameterCount() : 0;
|
||||
__ Drop(pop_count);
|
||||
__ Ret();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
InstructionOperand* destination) {
|
||||
Arm64OperandConverter g(this, NULL);
|
||||
// Dispatch on the source and destination operand kinds. Not all
|
||||
// combinations are possible.
|
||||
if (source->IsRegister()) {
|
||||
ASSERT(destination->IsRegister() || destination->IsStackSlot());
|
||||
Register src = g.ToRegister(source);
|
||||
if (destination->IsRegister()) {
|
||||
__ Mov(g.ToRegister(destination), src);
|
||||
} else {
|
||||
__ Str(src, g.ToMemOperand(destination, masm()));
|
||||
}
|
||||
} else if (source->IsStackSlot()) {
|
||||
MemOperand src = g.ToMemOperand(source, masm());
|
||||
ASSERT(destination->IsRegister() || destination->IsStackSlot());
|
||||
if (destination->IsRegister()) {
|
||||
__ Ldr(g.ToRegister(destination), src);
|
||||
} else {
|
||||
UseScratchRegisterScope scope(masm());
|
||||
Register temp = scope.AcquireX();
|
||||
__ Ldr(temp, src);
|
||||
__ Str(temp, g.ToMemOperand(destination, masm()));
|
||||
}
|
||||
} else if (source->IsConstant()) {
|
||||
ConstantOperand* constant_source = ConstantOperand::cast(source);
|
||||
if (destination->IsRegister() || destination->IsStackSlot()) {
|
||||
UseScratchRegisterScope scope(masm());
|
||||
Register dst = destination->IsRegister() ? g.ToRegister(destination)
|
||||
: scope.AcquireX();
|
||||
Constant src = g.ToConstant(source);
|
||||
if (src.type() == Constant::kHeapObject) {
|
||||
__ LoadObject(dst, src.ToHeapObject());
|
||||
} else {
|
||||
__ Mov(dst, g.ToImmediate(source));
|
||||
}
|
||||
if (destination->IsStackSlot()) {
|
||||
__ Str(dst, g.ToMemOperand(destination, masm()));
|
||||
}
|
||||
} else if (destination->IsDoubleRegister()) {
|
||||
FPRegister result = g.ToDoubleRegister(destination);
|
||||
__ Fmov(result, g.ToDouble(constant_source));
|
||||
} else {
|
||||
ASSERT(destination->IsDoubleStackSlot());
|
||||
UseScratchRegisterScope scope(masm());
|
||||
FPRegister temp = scope.AcquireD();
|
||||
__ Fmov(temp, g.ToDouble(constant_source));
|
||||
__ Str(temp, g.ToMemOperand(destination, masm()));
|
||||
}
|
||||
} else if (source->IsDoubleRegister()) {
|
||||
FPRegister src = g.ToDoubleRegister(source);
|
||||
if (destination->IsDoubleRegister()) {
|
||||
FPRegister dst = g.ToDoubleRegister(destination);
|
||||
__ Fmov(dst, src);
|
||||
} else {
|
||||
ASSERT(destination->IsDoubleStackSlot());
|
||||
__ Str(src, g.ToMemOperand(destination, masm()));
|
||||
}
|
||||
} else if (source->IsDoubleStackSlot()) {
|
||||
ASSERT(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
|
||||
MemOperand src = g.ToMemOperand(source, masm());
|
||||
if (destination->IsDoubleRegister()) {
|
||||
__ Ldr(g.ToDoubleRegister(destination), src);
|
||||
} else {
|
||||
UseScratchRegisterScope scope(masm());
|
||||
FPRegister temp = scope.AcquireD();
|
||||
__ Ldr(temp, src);
|
||||
__ Str(temp, g.ToMemOperand(destination, masm()));
|
||||
}
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CodeGenerator::AssembleSwap(InstructionOperand* source,
|
||||
InstructionOperand* destination) {
|
||||
Arm64OperandConverter g(this, NULL);
|
||||
// Dispatch on the source and destination operand kinds. Not all
|
||||
// combinations are possible.
|
||||
if (source->IsRegister()) {
|
||||
// Register-register.
|
||||
UseScratchRegisterScope scope(masm());
|
||||
Register temp = scope.AcquireX();
|
||||
Register src = g.ToRegister(source);
|
||||
if (destination->IsRegister()) {
|
||||
Register dst = g.ToRegister(destination);
|
||||
__ Mov(temp, src);
|
||||
__ Mov(src, dst);
|
||||
__ Mov(dst, temp);
|
||||
} else {
|
||||
ASSERT(destination->IsStackSlot());
|
||||
MemOperand dst = g.ToMemOperand(destination, masm());
|
||||
__ Mov(temp, src);
|
||||
__ Ldr(src, dst);
|
||||
__ Str(temp, dst);
|
||||
}
|
||||
} else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
|
||||
UseScratchRegisterScope scope(masm());
|
||||
CPURegister temp_0 = scope.AcquireX();
|
||||
CPURegister temp_1 = scope.AcquireX();
|
||||
MemOperand src = g.ToMemOperand(source, masm());
|
||||
MemOperand dst = g.ToMemOperand(destination, masm());
|
||||
__ Ldr(temp_0, src);
|
||||
__ Ldr(temp_1, dst);
|
||||
__ Str(temp_0, dst);
|
||||
__ Str(temp_1, src);
|
||||
} else if (source->IsDoubleRegister()) {
|
||||
UseScratchRegisterScope scope(masm());
|
||||
FPRegister temp = scope.AcquireD();
|
||||
FPRegister src = g.ToDoubleRegister(source);
|
||||
if (destination->IsDoubleRegister()) {
|
||||
FPRegister dst = g.ToDoubleRegister(destination);
|
||||
__ Fmov(temp, src);
|
||||
__ Fmov(src, dst);
|
||||
__ Fmov(src, temp);
|
||||
} else {
|
||||
ASSERT(destination->IsDoubleStackSlot());
|
||||
MemOperand dst = g.ToMemOperand(destination, masm());
|
||||
__ Fmov(temp, src);
|
||||
__ Ldr(src, dst);
|
||||
__ Str(temp, dst);
|
||||
}
|
||||
} else {
|
||||
// No other combinations are possible.
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CodeGenerator::AddNopForSmiCodeInlining() { __ movz(xzr, 0); }
|
||||
|
||||
#undef __
|
||||
|
||||
#if DEBUG
|
||||
|
||||
// Checks whether the code between start_pc and end_pc is a no-op.
|
||||
bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int start_pc,
|
||||
int end_pc) {
|
||||
if (start_pc + 4 != end_pc) {
|
||||
return false;
|
||||
}
|
||||
Address instr_address = code->instruction_start() + start_pc;
|
||||
|
||||
v8::internal::Instruction* instr =
|
||||
reinterpret_cast<v8::internal::Instruction*>(instr_address);
|
||||
return instr->IsMovz() && instr->Rd() == xzr.code() && instr->SixtyFourBits();
|
||||
}
|
||||
|
||||
#endif // DEBUG
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
101
src/compiler/arm64/instruction-codes-arm64.h
Normal file
101
src/compiler/arm64/instruction-codes-arm64.h
Normal file
@ -0,0 +1,101 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_
|
||||
#define V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
// ARM64-specific opcodes that specify which assembly sequence to emit.
|
||||
// Most opcodes specify a single instruction.
|
||||
#define TARGET_ARCH_OPCODE_LIST(V) \
|
||||
V(Arm64Add) \
|
||||
V(Arm64Add32) \
|
||||
V(Arm64And) \
|
||||
V(Arm64And32) \
|
||||
V(Arm64Cmp) \
|
||||
V(Arm64Cmp32) \
|
||||
V(Arm64Tst) \
|
||||
V(Arm64Tst32) \
|
||||
V(Arm64Or) \
|
||||
V(Arm64Or32) \
|
||||
V(Arm64Xor) \
|
||||
V(Arm64Xor32) \
|
||||
V(Arm64Sub) \
|
||||
V(Arm64Sub32) \
|
||||
V(Arm64Mul) \
|
||||
V(Arm64Mul32) \
|
||||
V(Arm64Idiv) \
|
||||
V(Arm64Idiv32) \
|
||||
V(Arm64Udiv) \
|
||||
V(Arm64Udiv32) \
|
||||
V(Arm64Imod) \
|
||||
V(Arm64Imod32) \
|
||||
V(Arm64Umod) \
|
||||
V(Arm64Umod32) \
|
||||
V(Arm64Not) \
|
||||
V(Arm64Not32) \
|
||||
V(Arm64Neg) \
|
||||
V(Arm64Neg32) \
|
||||
V(Arm64Shl) \
|
||||
V(Arm64Shl32) \
|
||||
V(Arm64Shr) \
|
||||
V(Arm64Shr32) \
|
||||
V(Arm64Sar) \
|
||||
V(Arm64Sar32) \
|
||||
V(Arm64CallCodeObject) \
|
||||
V(Arm64CallJSFunction) \
|
||||
V(Arm64CallAddress) \
|
||||
V(Arm64Claim) \
|
||||
V(Arm64Poke) \
|
||||
V(Arm64PokePairZero) \
|
||||
V(Arm64PokePair) \
|
||||
V(Arm64Drop) \
|
||||
V(Arm64Float64Cmp) \
|
||||
V(Arm64Float64Add) \
|
||||
V(Arm64Float64Sub) \
|
||||
V(Arm64Float64Mul) \
|
||||
V(Arm64Float64Div) \
|
||||
V(Arm64Float64Mod) \
|
||||
V(Arm64Int32ToInt64) \
|
||||
V(Arm64Int64ToInt32) \
|
||||
V(Arm64Float64ToInt32) \
|
||||
V(Arm64Int32ToFloat64) \
|
||||
V(Arm64Float64Load) \
|
||||
V(Arm64Float64Store) \
|
||||
V(Arm64LoadWord8) \
|
||||
V(Arm64StoreWord8) \
|
||||
V(Arm64LoadWord16) \
|
||||
V(Arm64StoreWord16) \
|
||||
V(Arm64LoadWord32) \
|
||||
V(Arm64StoreWord32) \
|
||||
V(Arm64LoadWord64) \
|
||||
V(Arm64StoreWord64) \
|
||||
V(Arm64StoreWriteBarrier)
|
||||
|
||||
|
||||
// Addressing modes represent the "shape" of inputs to an instruction.
|
||||
// Many instructions support multiple addressing modes. Addressing modes
|
||||
// are encoded into the InstructionCode of the instruction and tell the
|
||||
// code generator after register allocation which assembler method to call.
|
||||
//
|
||||
// We use the following local notation for addressing modes:
|
||||
//
|
||||
// R = register
|
||||
// O = register or stack slot
|
||||
// D = double register
|
||||
// I = immediate (handle, external, int32)
|
||||
// MRI = [register + immediate]
|
||||
// MRR = [register + register]
|
||||
#define TARGET_ADDRESSING_MODE_LIST(V) \
|
||||
V(MRI) /* [%r0 + K] */ \
|
||||
V(MRR) /* [%r0 + %r1] */
|
||||
|
||||
} // namespace internal
|
||||
} // namespace compiler
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_
|
606
src/compiler/arm64/instruction-selector-arm64.cc
Normal file
606
src/compiler/arm64/instruction-selector-arm64.cc
Normal file
@ -0,0 +1,606 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/compiler/instruction-selector-impl.h"
|
||||
#include "src/compiler/node-matchers.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
enum ImmediateMode {
|
||||
kArithimeticImm, // 12 bit unsigned immediate shifted left 0 or 12 bits
|
||||
kShift32Imm, // 0 - 31
|
||||
kShift64Imm, // 0 -63
|
||||
kLogical32Imm,
|
||||
kLogical64Imm,
|
||||
kLoadStoreImm, // unsigned 9 bit or signed 7 bit
|
||||
kNoImmediate
|
||||
};
|
||||
|
||||
|
||||
// Adds Arm64-specific methods for generating operands.
|
||||
class Arm64OperandGenerator V8_FINAL : public OperandGenerator {
|
||||
public:
|
||||
explicit Arm64OperandGenerator(InstructionSelector* selector)
|
||||
: OperandGenerator(selector) {}
|
||||
|
||||
InstructionOperand* UseOperand(Node* node, ImmediateMode mode) {
|
||||
if (CanBeImmediate(node, mode)) {
|
||||
return UseImmediate(node);
|
||||
}
|
||||
return UseRegister(node);
|
||||
}
|
||||
|
||||
bool CanBeImmediate(Node* node, ImmediateMode mode) {
|
||||
int64_t value;
|
||||
switch (node->opcode()) {
|
||||
// TODO(turbofan): SMI number constants as immediates.
|
||||
case IrOpcode::kInt32Constant:
|
||||
value = ValueOf<int32_t>(node->op());
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
unsigned ignored;
|
||||
switch (mode) {
|
||||
case kLogical32Imm:
|
||||
// TODO(dcarney): some unencodable values can be handled by
|
||||
// switching instructions.
|
||||
return Assembler::IsImmLogical(static_cast<uint64_t>(value), 32,
|
||||
&ignored, &ignored, &ignored);
|
||||
case kLogical64Imm:
|
||||
return Assembler::IsImmLogical(static_cast<uint64_t>(value), 64,
|
||||
&ignored, &ignored, &ignored);
|
||||
case kArithimeticImm:
|
||||
// TODO(dcarney): -values can be handled by instruction swapping
|
||||
return Assembler::IsImmAddSub(value);
|
||||
case kShift32Imm:
|
||||
return 0 <= value && value < 31;
|
||||
case kShift64Imm:
|
||||
return 0 <= value && value < 63;
|
||||
case kLoadStoreImm:
|
||||
return (0 <= value && value < (1 << 9)) ||
|
||||
(-(1 << 6) <= value && value < (1 << 6));
|
||||
case kNoImmediate:
|
||||
return false;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
|
||||
Node* node) {
|
||||
Arm64OperandGenerator g(selector);
|
||||
selector->Emit(opcode, g.DefineAsRegister(node),
|
||||
g.UseRegister(node->InputAt(0)));
|
||||
}
|
||||
|
||||
|
||||
static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
|
||||
Node* node) {
|
||||
Arm64OperandGenerator g(selector);
|
||||
selector->Emit(opcode, g.DefineAsRegister(node),
|
||||
g.UseRegister(node->InputAt(0)),
|
||||
g.UseRegister(node->InputAt(1)));
|
||||
}
|
||||
|
||||
|
||||
static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
|
||||
Node* node) {
|
||||
Arm64OperandGenerator g(selector);
|
||||
selector->Emit(opcode, g.DefineAsDoubleRegister(node),
|
||||
g.UseDoubleRegister(node->InputAt(0)),
|
||||
g.UseDoubleRegister(node->InputAt(1)));
|
||||
}
|
||||
|
||||
|
||||
static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
|
||||
Node* node, ImmediateMode operand_mode) {
|
||||
Arm64OperandGenerator g(selector);
|
||||
selector->Emit(opcode, g.DefineAsRegister(node),
|
||||
g.UseRegister(node->InputAt(0)),
|
||||
g.UseOperand(node->InputAt(1), operand_mode));
|
||||
}
|
||||
|
||||
|
||||
// Shared routine for multiple binary operations.
|
||||
static void VisitBinop(InstructionSelector* selector, Node* node,
|
||||
ArchOpcode opcode, ImmediateMode operand_mode,
|
||||
bool commutative) {
|
||||
VisitRRO(selector, opcode, node, operand_mode);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitLoad(Node* node) {
|
||||
MachineRepresentation rep = OpParameter<MachineRepresentation>(node);
|
||||
Arm64OperandGenerator g(this);
|
||||
Node* base = node->InputAt(0);
|
||||
Node* index = node->InputAt(1);
|
||||
|
||||
InstructionOperand* result = rep == kMachineFloat64
|
||||
? g.DefineAsDoubleRegister(node)
|
||||
: g.DefineAsRegister(node);
|
||||
|
||||
ArchOpcode opcode;
|
||||
switch (rep) {
|
||||
case kMachineFloat64:
|
||||
opcode = kArm64Float64Load;
|
||||
break;
|
||||
case kMachineWord8:
|
||||
opcode = kArm64LoadWord8;
|
||||
break;
|
||||
case kMachineWord16:
|
||||
opcode = kArm64LoadWord16;
|
||||
break;
|
||||
case kMachineWord32:
|
||||
opcode = kArm64LoadWord32;
|
||||
break;
|
||||
case kMachineTagged: // Fall through.
|
||||
case kMachineWord64:
|
||||
opcode = kArm64LoadWord64;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
return;
|
||||
}
|
||||
if (g.CanBeImmediate(index, kLoadStoreImm)) {
|
||||
Emit(opcode | AddressingModeField::encode(kMode_MRI), result,
|
||||
g.UseRegister(base), g.UseImmediate(index));
|
||||
} else if (g.CanBeImmediate(index, kLoadStoreImm)) {
|
||||
Emit(opcode | AddressingModeField::encode(kMode_MRI), result,
|
||||
g.UseRegister(index), g.UseImmediate(base));
|
||||
} else {
|
||||
Emit(opcode | AddressingModeField::encode(kMode_MRR), result,
|
||||
g.UseRegister(base), g.UseRegister(index));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitStore(Node* node) {
|
||||
Arm64OperandGenerator g(this);
|
||||
Node* base = node->InputAt(0);
|
||||
Node* index = node->InputAt(1);
|
||||
Node* value = node->InputAt(2);
|
||||
|
||||
StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
|
||||
MachineRepresentation rep = store_rep.rep;
|
||||
if (store_rep.write_barrier_kind == kFullWriteBarrier) {
|
||||
ASSERT(rep == kMachineTagged);
|
||||
// TODO(dcarney): refactor RecordWrite function to take temp registers
|
||||
// and pass them here instead of using fixed regs
|
||||
// TODO(dcarney): handle immediate indices.
|
||||
InstructionOperand* temps[] = {g.TempRegister(x11), g.TempRegister(x12)};
|
||||
Emit(kArm64StoreWriteBarrier, NULL, g.UseFixed(base, x10),
|
||||
g.UseFixed(index, x11), g.UseFixed(value, x12), ARRAY_SIZE(temps),
|
||||
temps);
|
||||
return;
|
||||
}
|
||||
ASSERT_EQ(kNoWriteBarrier, store_rep.write_barrier_kind);
|
||||
InstructionOperand* val;
|
||||
if (rep == kMachineFloat64) {
|
||||
val = g.UseDoubleRegister(value);
|
||||
} else {
|
||||
val = g.UseRegister(value);
|
||||
}
|
||||
ArchOpcode opcode;
|
||||
switch (rep) {
|
||||
case kMachineFloat64:
|
||||
opcode = kArm64Float64Store;
|
||||
break;
|
||||
case kMachineWord8:
|
||||
opcode = kArm64StoreWord8;
|
||||
break;
|
||||
case kMachineWord16:
|
||||
opcode = kArm64StoreWord16;
|
||||
break;
|
||||
case kMachineWord32:
|
||||
opcode = kArm64StoreWord32;
|
||||
break;
|
||||
case kMachineTagged: // Fall through.
|
||||
case kMachineWord64:
|
||||
opcode = kArm64StoreWord64;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
return;
|
||||
}
|
||||
if (g.CanBeImmediate(index, kLoadStoreImm)) {
|
||||
Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
|
||||
g.UseRegister(base), g.UseImmediate(index), val);
|
||||
} else if (g.CanBeImmediate(index, kLoadStoreImm)) {
|
||||
Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
|
||||
g.UseRegister(index), g.UseImmediate(base), val);
|
||||
} else {
|
||||
Emit(opcode | AddressingModeField::encode(kMode_MRR), NULL,
|
||||
g.UseRegister(base), g.UseRegister(index), val);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord32And(Node* node) {
|
||||
VisitBinop(this, node, kArm64And32, kLogical32Imm, true);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord64And(Node* node) {
|
||||
VisitBinop(this, node, kArm64And, kLogical64Imm, true);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord32Or(Node* node) {
|
||||
VisitBinop(this, node, kArm64Or32, kLogical32Imm, true);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord64Or(Node* node) {
|
||||
VisitBinop(this, node, kArm64Or, kLogical64Imm, true);
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
static void VisitXor(InstructionSelector* selector, Node* node,
|
||||
ArchOpcode xor_opcode, ArchOpcode not_opcode) {
|
||||
Arm64OperandGenerator g(selector);
|
||||
BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node);
|
||||
if (m.right().Is(-1)) {
|
||||
selector->Emit(not_opcode, g.DefineAsRegister(node),
|
||||
g.UseRegister(m.left().node()));
|
||||
} else {
|
||||
VisitBinop(selector, node, xor_opcode, kLogical32Imm, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord32Xor(Node* node) {
|
||||
VisitXor<int32_t>(this, node, kArm64Xor32, kArm64Not32);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord64Xor(Node* node) {
|
||||
VisitXor<int64_t>(this, node, kArm64Xor, kArm64Not);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord32Shl(Node* node) {
|
||||
VisitRRO(this, kArm64Shl32, node, kShift32Imm);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord64Shl(Node* node) {
|
||||
VisitRRO(this, kArm64Shl, node, kShift64Imm);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord32Shr(Node* node) {
|
||||
VisitRRO(this, kArm64Shr32, node, kShift32Imm);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord64Shr(Node* node) {
|
||||
VisitRRO(this, kArm64Shr, node, kShift64Imm);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord32Sar(Node* node) {
|
||||
VisitRRO(this, kArm64Sar32, node, kShift32Imm);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord64Sar(Node* node) {
|
||||
VisitRRO(this, kArm64Sar, node, kShift64Imm);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt32Add(Node* node) {
|
||||
VisitBinop(this, node, kArm64Add32, kArithimeticImm, true);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt64Add(Node* node) {
|
||||
VisitBinop(this, node, kArm64Add, kArithimeticImm, true);
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
static void VisitSub(InstructionSelector* selector, Node* node,
|
||||
ArchOpcode sub_opcode, ArchOpcode neg_opcode) {
|
||||
Arm64OperandGenerator g(selector);
|
||||
BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node);
|
||||
if (m.left().Is(0)) {
|
||||
selector->Emit(neg_opcode, g.DefineAsRegister(node),
|
||||
g.UseRegister(m.right().node()));
|
||||
} else {
|
||||
VisitBinop(selector, node, sub_opcode, kArithimeticImm, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt32Sub(Node* node) {
|
||||
VisitSub<int32_t>(this, node, kArm64Sub32, kArm64Neg32);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt64Sub(Node* node) {
|
||||
VisitSub<int64_t>(this, node, kArm64Sub, kArm64Neg);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt32Mul(Node* node) {
|
||||
VisitRRR(this, kArm64Mul32, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt64Mul(Node* node) {
|
||||
VisitRRR(this, kArm64Mul, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt32Div(Node* node) {
|
||||
VisitRRR(this, kArm64Idiv32, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt64Div(Node* node) {
|
||||
VisitRRR(this, kArm64Idiv, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt32UDiv(Node* node) {
|
||||
VisitRRR(this, kArm64Udiv32, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt64UDiv(Node* node) {
|
||||
VisitRRR(this, kArm64Udiv, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt32Mod(Node* node) {
|
||||
VisitRRR(this, kArm64Imod32, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt64Mod(Node* node) {
|
||||
VisitRRR(this, kArm64Imod, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt32UMod(Node* node) {
|
||||
VisitRRR(this, kArm64Umod32, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt64UMod(Node* node) {
|
||||
VisitRRR(this, kArm64Umod, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitConvertInt32ToInt64(Node* node) {
|
||||
VisitRR(this, kArm64Int32ToInt64, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitConvertInt64ToInt32(Node* node) {
|
||||
VisitRR(this, kArm64Int64ToInt32, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitConvertInt32ToFloat64(Node* node) {
|
||||
Arm64OperandGenerator g(this);
|
||||
Emit(kArm64Int32ToFloat64, g.DefineAsDoubleRegister(node),
|
||||
g.UseRegister(node->InputAt(0)));
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitConvertFloat64ToInt32(Node* node) {
|
||||
Arm64OperandGenerator g(this);
|
||||
Emit(kArm64Float64ToInt32, g.DefineAsRegister(node),
|
||||
g.UseDoubleRegister(node->InputAt(0)));
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64Add(Node* node) {
|
||||
VisitRRRFloat64(this, kArm64Float64Add, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64Sub(Node* node) {
|
||||
VisitRRRFloat64(this, kArm64Float64Sub, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64Mul(Node* node) {
|
||||
VisitRRRFloat64(this, kArm64Float64Mul, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64Div(Node* node) {
|
||||
VisitRRRFloat64(this, kArm64Float64Div, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64Mod(Node* node) {
|
||||
Arm64OperandGenerator g(this);
|
||||
Emit(kArm64Float64Mod, g.DefineAsFixedDouble(node, d0),
|
||||
g.UseFixedDouble(node->InputAt(0), d0),
|
||||
g.UseFixedDouble(node->InputAt(1), d1))->MarkAsCall();
|
||||
}
|
||||
|
||||
|
||||
// Shared routine for multiple compare operations.
|
||||
static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
|
||||
InstructionOperand* left, InstructionOperand* right,
|
||||
FlagsContinuation* cont) {
|
||||
Arm64OperandGenerator g(selector);
|
||||
opcode = cont->Encode(opcode);
|
||||
if (cont->IsBranch()) {
|
||||
selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
|
||||
g.Label(cont->false_block()))->MarkAsControl();
|
||||
} else {
|
||||
ASSERT(cont->IsSet());
|
||||
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Shared routine for multiple word compare operations.
|
||||
static void VisitWordCompare(InstructionSelector* selector, Node* node,
|
||||
InstructionCode opcode, FlagsContinuation* cont,
|
||||
bool commutative) {
|
||||
Arm64OperandGenerator g(selector);
|
||||
Node* left = node->InputAt(0);
|
||||
Node* right = node->InputAt(1);
|
||||
|
||||
// Match immediates on left or right side of comparison.
|
||||
if (g.CanBeImmediate(right, kArithimeticImm)) {
|
||||
VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
|
||||
cont);
|
||||
} else if (g.CanBeImmediate(left, kArithimeticImm)) {
|
||||
if (!commutative) cont->Commute();
|
||||
VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
|
||||
cont);
|
||||
} else {
|
||||
VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
|
||||
cont);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
|
||||
switch (node->opcode()) {
|
||||
case IrOpcode::kWord32And:
|
||||
return VisitWordCompare(this, node, kArm64Tst32, cont, true);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
Arm64OperandGenerator g(this);
|
||||
VisitCompare(this, kArm64Tst32, g.UseRegister(node), g.UseRegister(node),
|
||||
cont);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) {
|
||||
switch (node->opcode()) {
|
||||
case IrOpcode::kWord64And:
|
||||
return VisitWordCompare(this, node, kArm64Tst, cont, true);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
Arm64OperandGenerator g(this);
|
||||
VisitCompare(this, kArm64Tst, g.UseRegister(node), g.UseRegister(node), cont);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord32Compare(Node* node,
|
||||
FlagsContinuation* cont) {
|
||||
VisitWordCompare(this, node, kArm64Cmp32, cont, false);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord64Compare(Node* node,
|
||||
FlagsContinuation* cont) {
|
||||
VisitWordCompare(this, node, kArm64Cmp, cont, false);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64Compare(Node* node,
|
||||
FlagsContinuation* cont) {
|
||||
Arm64OperandGenerator g(this);
|
||||
Node* left = node->InputAt(0);
|
||||
Node* right = node->InputAt(1);
|
||||
VisitCompare(this, kArm64Float64Cmp, g.UseDoubleRegister(left),
|
||||
g.UseDoubleRegister(right), cont);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
|
||||
BasicBlock* deoptimization) {
|
||||
Arm64OperandGenerator g(this);
|
||||
CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
|
||||
CallBuffer buffer(zone(), descriptor); // TODO(turbofan): temp zone here?
|
||||
|
||||
// Compute InstructionOperands for inputs and outputs.
|
||||
// TODO(turbofan): on ARM64 it's probably better to use the code object in a
|
||||
// register if there are multiple uses of it. Improve constant pool and the
|
||||
// heuristics in the register allocator for where to emit constants.
|
||||
InitializeCallBuffer(call, &buffer, true, false, continuation,
|
||||
deoptimization);
|
||||
|
||||
// Push the arguments to the stack.
|
||||
bool is_c_frame = descriptor->kind() == CallDescriptor::kCallAddress;
|
||||
bool pushed_count_uneven = buffer.pushed_count & 1;
|
||||
int aligned_push_count = buffer.pushed_count;
|
||||
if (is_c_frame && pushed_count_uneven) {
|
||||
aligned_push_count++;
|
||||
}
|
||||
// TODO(dcarney): claim and poke probably take small immediates,
|
||||
// loop here or whatever.
|
||||
// Bump the stack pointer(s).
|
||||
if (aligned_push_count > 0) {
|
||||
// TODO(dcarney): it would be better to bump the csp here only
|
||||
// and emit paired stores with increment for non c frames.
|
||||
Emit(kArm64Claim | MiscField::encode(aligned_push_count), NULL);
|
||||
}
|
||||
// Move arguments to the stack.
|
||||
{
|
||||
int slot = buffer.pushed_count - 1;
|
||||
// Emit the uneven pushes.
|
||||
if (pushed_count_uneven) {
|
||||
Node* input = buffer.pushed_nodes[slot];
|
||||
ArchOpcode opcode = is_c_frame ? kArm64PokePairZero : kArm64Poke;
|
||||
Emit(opcode | MiscField::encode(slot), NULL, g.UseRegister(input));
|
||||
slot--;
|
||||
}
|
||||
// Now all pushes can be done in pairs.
|
||||
for (; slot >= 0; slot -= 2) {
|
||||
Emit(kArm64PokePair | MiscField::encode(slot), NULL,
|
||||
g.UseRegister(buffer.pushed_nodes[slot]),
|
||||
g.UseRegister(buffer.pushed_nodes[slot - 1]));
|
||||
}
|
||||
}
|
||||
|
||||
// Select the appropriate opcode based on the call type.
|
||||
InstructionCode opcode;
|
||||
switch (descriptor->kind()) {
|
||||
case CallDescriptor::kCallCodeObject: {
|
||||
bool lazy_deopt = descriptor->CanLazilyDeoptimize();
|
||||
opcode = kArm64CallCodeObject | MiscField::encode(lazy_deopt ? 1 : 0);
|
||||
break;
|
||||
}
|
||||
case CallDescriptor::kCallAddress:
|
||||
opcode = kArm64CallAddress;
|
||||
break;
|
||||
case CallDescriptor::kCallJSFunction:
|
||||
opcode = kArm64CallJSFunction;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
return;
|
||||
}
|
||||
|
||||
// Emit the call instruction.
|
||||
Instruction* call_instr =
|
||||
Emit(opcode, buffer.output_count, buffer.outputs,
|
||||
buffer.fixed_and_control_count(), buffer.fixed_and_control_args);
|
||||
|
||||
call_instr->MarkAsCall();
|
||||
if (deoptimization != NULL) {
|
||||
ASSERT(continuation != NULL);
|
||||
call_instr->MarkAsControl();
|
||||
}
|
||||
|
||||
// Caller clean up of stack for C-style calls.
|
||||
if (is_c_frame && aligned_push_count > 0) {
|
||||
ASSERT(deoptimization == NULL && continuation == NULL);
|
||||
Emit(kArm64Drop | MiscField::encode(aligned_push_count), NULL);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
66
src/compiler/arm64/linkage-arm64.cc
Normal file
66
src/compiler/arm64/linkage-arm64.cc
Normal file
@ -0,0 +1,66 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/v8.h"
|
||||
|
||||
#include "src/assembler.h"
|
||||
#include "src/code-stubs.h"
|
||||
#include "src/compiler/linkage.h"
|
||||
#include "src/compiler/linkage-impl.h"
|
||||
#include "src/zone.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
struct LinkageHelperTraits {
|
||||
static Register ReturnValueReg() { return x0; }
|
||||
static Register ReturnValue2Reg() { return x1; }
|
||||
static Register JSCallFunctionReg() { return x1; }
|
||||
static Register ContextReg() { return cp; }
|
||||
static Register RuntimeCallFunctionReg() { return x1; }
|
||||
static Register RuntimeCallArgCountReg() { return x0; }
|
||||
static RegList CCalleeSaveRegisters() {
|
||||
// TODO(dcarney): correct callee saved registers.
|
||||
return 0;
|
||||
}
|
||||
static Register CRegisterParameter(int i) {
|
||||
static Register register_parameters[] = {x0, x1, x2, x3, x4, x5, x6, x7};
|
||||
return register_parameters[i];
|
||||
}
|
||||
static int CRegisterParametersLength() { return 8; }
|
||||
};
|
||||
|
||||
|
||||
CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
|
||||
return LinkageHelper::GetJSCallDescriptor<LinkageHelperTraits>(
|
||||
zone, parameter_count);
|
||||
}
|
||||
|
||||
|
||||
CallDescriptor* Linkage::GetRuntimeCallDescriptor(
|
||||
Runtime::FunctionId function, int parameter_count,
|
||||
Operator::Property properties,
|
||||
CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
|
||||
return LinkageHelper::GetRuntimeCallDescriptor<LinkageHelperTraits>(
|
||||
zone, function, parameter_count, properties, can_deoptimize);
|
||||
}
|
||||
|
||||
|
||||
CallDescriptor* Linkage::GetStubCallDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count) {
|
||||
return LinkageHelper::GetStubCallDescriptor<LinkageHelperTraits>(
|
||||
this->info_->zone(), descriptor, stack_parameter_count);
|
||||
}
|
||||
|
||||
|
||||
CallDescriptor* Linkage::GetSimplifiedCDescriptor(
|
||||
Zone* zone, int num_params, MachineRepresentation return_type,
|
||||
const MachineRepresentation* param_types) {
|
||||
return LinkageHelper::GetSimplifiedCDescriptor<LinkageHelperTraits>(
|
||||
zone, num_params, return_type, param_types);
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
1990
src/compiler/ast-graph-builder.cc
Normal file
1990
src/compiler/ast-graph-builder.cc
Normal file
File diff suppressed because it is too large
Load Diff
417
src/compiler/ast-graph-builder.h
Normal file
417
src/compiler/ast-graph-builder.h
Normal file
@ -0,0 +1,417 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_AST_GRAPH_BUILDER_H_
|
||||
#define V8_COMPILER_AST_GRAPH_BUILDER_H_
|
||||
|
||||
#include "src/v8.h"
|
||||
|
||||
#include "src/ast.h"
|
||||
#include "src/compiler/graph-builder.h"
|
||||
#include "src/compiler/js-graph.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
class ControlBuilder;
|
||||
class LoopBuilder;
|
||||
class Graph;
|
||||
|
||||
// The AstGraphBuilder produces a high-level IR graph, based on an
|
||||
// underlying AST. The produced graph can either be compiled into a
|
||||
// stand-alone function or be wired into another graph for the purposes
|
||||
// of function inlining.
|
||||
class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
|
||||
public:
|
||||
AstGraphBuilder(CompilationInfo* info, JSGraph* jsgraph,
|
||||
SourcePositionTable* source_positions_);
|
||||
|
||||
// Creates a graph by visiting the entire AST.
|
||||
bool CreateGraph();
|
||||
|
||||
protected:
|
||||
class AstContext;
|
||||
class AstEffectContext;
|
||||
class AstValueContext;
|
||||
class AstTestContext;
|
||||
class BreakableScope;
|
||||
class ContextScope;
|
||||
class Environment;
|
||||
|
||||
Environment* environment() {
|
||||
return reinterpret_cast<Environment*>(environment_internal());
|
||||
}
|
||||
|
||||
AstContext* ast_context() const { return ast_context_; }
|
||||
BreakableScope* breakable() const { return breakable_; }
|
||||
ContextScope* execution_context() const { return execution_context_; }
|
||||
|
||||
void set_ast_context(AstContext* ctx) { ast_context_ = ctx; }
|
||||
void set_breakable(BreakableScope* brk) { breakable_ = brk; }
|
||||
void set_execution_context(ContextScope* ctx) { execution_context_ = ctx; }
|
||||
|
||||
// Support for control flow builders. The concrete type of the environment
|
||||
// depends on the graph builder, but environments themselves are not virtual.
|
||||
typedef StructuredGraphBuilder::Environment BaseEnvironment;
|
||||
virtual BaseEnvironment* CopyEnvironment(BaseEnvironment* env);
|
||||
|
||||
SourcePositionTable* source_positions() { return source_positions_; }
|
||||
|
||||
// TODO(mstarzinger): The pipeline only needs to be a friend to access the
|
||||
// function context. Remove as soon as the context is a parameter.
|
||||
friend class Pipeline;
|
||||
|
||||
// Getters for values in the activation record.
|
||||
Node* GetFunctionClosure();
|
||||
Node* GetFunctionContext();
|
||||
|
||||
//
|
||||
// The following build methods all generate graph fragments and return one
|
||||
// resulting node. The operand stack height remains the same, variables and
|
||||
// other dependencies tracked by the environment might be mutated though.
|
||||
//
|
||||
|
||||
// Builder to create a local function context.
|
||||
Node* BuildLocalFunctionContext(Node* context, Node* closure);
|
||||
|
||||
// Builder to create an arguments object if it is used.
|
||||
Node* BuildArgumentsObject(Variable* arguments);
|
||||
|
||||
// Builders for variable load and assignment.
|
||||
Node* BuildVariableAssignment(Variable* var, Node* value, Token::Value op);
|
||||
Node* BuildVariableDelete(Variable* var);
|
||||
Node* BuildVariableLoad(Variable* var, ContextualMode mode = CONTEXTUAL);
|
||||
|
||||
// Builders for accessing the function context.
|
||||
Node* BuildLoadBuiltinsObject();
|
||||
Node* BuildLoadGlobalObject();
|
||||
Node* BuildLoadClosure();
|
||||
|
||||
// Builders for automatic type conversion.
|
||||
Node* BuildToBoolean(Node* value);
|
||||
|
||||
// Builders for error reporting at runtime.
|
||||
Node* BuildThrowReferenceError(Variable* var);
|
||||
|
||||
// Builders for dynamic hole-checks at runtime.
|
||||
Node* BuildHoleCheckSilent(Node* value, Node* for_hole, Node* not_hole);
|
||||
Node* BuildHoleCheckThrow(Node* value, Variable* var, Node* not_hole);
|
||||
|
||||
// Builders for binary operations.
|
||||
Node* BuildBinaryOp(Node* left, Node* right, Token::Value op);
|
||||
|
||||
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
|
||||
// Visiting functions for AST nodes make this an AstVisitor.
|
||||
AST_NODE_LIST(DECLARE_VISIT)
|
||||
#undef DECLARE_VISIT
|
||||
|
||||
// Visiting function for declarations list is overridden.
|
||||
virtual void VisitDeclarations(ZoneList<Declaration*>* declarations);
|
||||
|
||||
private:
|
||||
CompilationInfo* info_;
|
||||
AstContext* ast_context_;
|
||||
JSGraph* jsgraph_;
|
||||
SourcePositionTable* source_positions_;
|
||||
|
||||
// List of global declarations for functions and variables.
|
||||
ZoneList<Handle<Object> > globals_;
|
||||
|
||||
// Stack of breakable statements entered by the visitor.
|
||||
BreakableScope* breakable_;
|
||||
|
||||
// Stack of context objects pushed onto the chain by the visitor.
|
||||
ContextScope* execution_context_;
|
||||
|
||||
// Nodes representing values in the activation record.
|
||||
SetOncePointer<Node> function_closure_;
|
||||
SetOncePointer<Node> function_context_;
|
||||
|
||||
CompilationInfo* info() { return info_; }
|
||||
StrictMode strict_mode() { return info()->strict_mode(); }
|
||||
JSGraph* jsgraph() { return jsgraph_; }
|
||||
JSOperatorBuilder* javascript() { return jsgraph_->javascript(); }
|
||||
ZoneList<Handle<Object> >* globals() { return &globals_; }
|
||||
|
||||
// Current scope during visitation.
|
||||
inline Scope* current_scope() const;
|
||||
|
||||
// Process arguments to a call by popping {arity} elements off the operand
|
||||
// stack and build a call node using the given call operator.
|
||||
Node* ProcessArguments(Operator* op, int arity);
|
||||
|
||||
// Visit statements.
|
||||
void VisitIfNotNull(Statement* stmt);
|
||||
|
||||
// Visit expressions.
|
||||
void VisitForTest(Expression* expr);
|
||||
void VisitForEffect(Expression* expr);
|
||||
void VisitForValue(Expression* expr);
|
||||
void VisitForValueOrNull(Expression* expr);
|
||||
void VisitForValues(ZoneList<Expression*>* exprs);
|
||||
|
||||
// Common for all IterationStatement bodies.
|
||||
void VisitIterationBody(IterationStatement* stmt, LoopBuilder* loop, int);
|
||||
|
||||
// Dispatched from VisitCallRuntime.
|
||||
void VisitCallJSRuntime(CallRuntime* expr);
|
||||
|
||||
// Dispatched from VisitUnaryOperation.
|
||||
void VisitDelete(UnaryOperation* expr);
|
||||
void VisitVoid(UnaryOperation* expr);
|
||||
void VisitTypeof(UnaryOperation* expr);
|
||||
void VisitNot(UnaryOperation* expr);
|
||||
|
||||
// Dispatched from VisitBinaryOperation.
|
||||
void VisitComma(BinaryOperation* expr);
|
||||
void VisitLogicalExpression(BinaryOperation* expr);
|
||||
void VisitArithmeticExpression(BinaryOperation* expr);
|
||||
|
||||
// Dispatched from VisitForInStatement.
|
||||
void VisitForInAssignment(Expression* expr, Node* value);
|
||||
|
||||
void BuildLazyBailout(Node* node, BailoutId ast_id);
|
||||
|
||||
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
|
||||
DISALLOW_COPY_AND_ASSIGN(AstGraphBuilder);
|
||||
};
|
||||
|
||||
|
||||
// The abstract execution environment for generated code consists of
|
||||
// parameter variables, local variables and the operand stack. The
|
||||
// environment will perform proper SSA-renaming of all tracked nodes
|
||||
// at split and merge points in the control flow. Internally all the
|
||||
// values are stored in one list using the following layout:
|
||||
//
|
||||
// [parameters (+receiver)] [locals] [operand stack]
|
||||
//
|
||||
class AstGraphBuilder::Environment
|
||||
: public StructuredGraphBuilder::Environment {
|
||||
public:
|
||||
Environment(AstGraphBuilder* builder, Scope* scope, Node* control_dependency);
|
||||
Environment(const Environment& copy);
|
||||
|
||||
int parameters_count() const { return parameters_count_; }
|
||||
int locals_count() const { return locals_count_; }
|
||||
int stack_height() {
|
||||
return values()->size() - parameters_count_ - locals_count_;
|
||||
}
|
||||
|
||||
// Operations on parameter or local variables. The parameter indices are
|
||||
// shifted by 1 (receiver is parameter index -1 but environment index 0).
|
||||
void Bind(Variable* variable, Node* node) {
|
||||
ASSERT(variable->IsStackAllocated());
|
||||
if (variable->IsParameter()) {
|
||||
values()->at(variable->index() + 1) = node;
|
||||
parameters_dirty_ = true;
|
||||
} else {
|
||||
ASSERT(variable->IsStackLocal());
|
||||
values()->at(variable->index() + parameters_count_) = node;
|
||||
locals_dirty_ = true;
|
||||
}
|
||||
}
|
||||
Node* Lookup(Variable* variable) {
|
||||
ASSERT(variable->IsStackAllocated());
|
||||
if (variable->IsParameter()) {
|
||||
return values()->at(variable->index() + 1);
|
||||
} else {
|
||||
ASSERT(variable->IsStackLocal());
|
||||
return values()->at(variable->index() + parameters_count_);
|
||||
}
|
||||
}
|
||||
|
||||
// Operations on the operand stack.
|
||||
void Push(Node* node) {
|
||||
values()->push_back(node);
|
||||
stack_dirty_ = true;
|
||||
}
|
||||
Node* Top() {
|
||||
ASSERT(stack_height() > 0);
|
||||
return values()->back();
|
||||
}
|
||||
Node* Pop() {
|
||||
ASSERT(stack_height() > 0);
|
||||
Node* back = values()->back();
|
||||
values()->pop_back();
|
||||
return back;
|
||||
}
|
||||
|
||||
// Direct mutations of the operand stack.
|
||||
void Poke(int depth, Node* node) {
|
||||
ASSERT(depth >= 0 && depth < stack_height());
|
||||
int index = values()->size() - depth - 1;
|
||||
values()->at(index) = node;
|
||||
}
|
||||
Node* Peek(int depth) {
|
||||
ASSERT(depth >= 0 && depth < stack_height());
|
||||
int index = values()->size() - depth - 1;
|
||||
return values()->at(index);
|
||||
}
|
||||
void Drop(int depth) {
|
||||
ASSERT(depth >= 0 && depth <= stack_height());
|
||||
values()->erase(values()->end() - depth, values()->end());
|
||||
}
|
||||
|
||||
// Preserve a checkpoint of the environment for the IR graph. Any
|
||||
// further mutation of the environment will not affect checkpoints.
|
||||
Node* Checkpoint(BailoutId ast_id);
|
||||
|
||||
private:
|
||||
int parameters_count_;
|
||||
int locals_count_;
|
||||
Node* parameters_node_;
|
||||
Node* locals_node_;
|
||||
Node* stack_node_;
|
||||
bool parameters_dirty_;
|
||||
bool locals_dirty_;
|
||||
bool stack_dirty_;
|
||||
};
|
||||
|
||||
|
||||
// Each expression in the AST is evaluated in a specific context. This context
|
||||
// decides how the evaluation result is passed up the visitor.
|
||||
class AstGraphBuilder::AstContext BASE_EMBEDDED {
|
||||
public:
|
||||
bool IsEffect() const { return kind_ == Expression::kEffect; }
|
||||
bool IsValue() const { return kind_ == Expression::kValue; }
|
||||
bool IsTest() const { return kind_ == Expression::kTest; }
|
||||
|
||||
// Plug a node into this expression context. Call this function in tail
|
||||
// position in the Visit functions for expressions.
|
||||
virtual void ProduceValue(Node* value) = 0;
|
||||
|
||||
// Unplugs a node from this expression context. Call this to retrieve the
|
||||
// result of another Visit function that already plugged the context.
|
||||
virtual Node* ConsumeValue() = 0;
|
||||
|
||||
// Shortcut for "context->ProduceValue(context->ConsumeValue())".
|
||||
void ReplaceValue() { ProduceValue(ConsumeValue()); }
|
||||
|
||||
protected:
|
||||
AstContext(AstGraphBuilder* owner, Expression::Context kind);
|
||||
virtual ~AstContext();
|
||||
|
||||
AstGraphBuilder* owner() const { return owner_; }
|
||||
Environment* environment() const { return owner_->environment(); }
|
||||
|
||||
// We want to be able to assert, in a context-specific way, that the stack
|
||||
// height makes sense when the context is filled.
|
||||
#ifdef DEBUG
|
||||
int original_height_;
|
||||
#endif
|
||||
|
||||
private:
|
||||
Expression::Context kind_;
|
||||
AstGraphBuilder* owner_;
|
||||
AstContext* outer_;
|
||||
};
|
||||
|
||||
|
||||
// Context to evaluate expression for its side effects only.
|
||||
class AstGraphBuilder::AstEffectContext V8_FINAL : public AstContext {
|
||||
public:
|
||||
explicit AstEffectContext(AstGraphBuilder* owner)
|
||||
: AstContext(owner, Expression::kEffect) {}
|
||||
virtual ~AstEffectContext();
|
||||
virtual void ProduceValue(Node* value) V8_OVERRIDE;
|
||||
virtual Node* ConsumeValue() V8_OVERRIDE;
|
||||
};
|
||||
|
||||
|
||||
// Context to evaluate expression for its value (and side effects).
|
||||
class AstGraphBuilder::AstValueContext V8_FINAL : public AstContext {
|
||||
public:
|
||||
explicit AstValueContext(AstGraphBuilder* owner)
|
||||
: AstContext(owner, Expression::kValue) {}
|
||||
virtual ~AstValueContext();
|
||||
virtual void ProduceValue(Node* value) V8_OVERRIDE;
|
||||
virtual Node* ConsumeValue() V8_OVERRIDE;
|
||||
};
|
||||
|
||||
|
||||
// Context to evaluate expression for a condition value (and side effects).
|
||||
class AstGraphBuilder::AstTestContext V8_FINAL : public AstContext {
|
||||
public:
|
||||
explicit AstTestContext(AstGraphBuilder* owner)
|
||||
: AstContext(owner, Expression::kTest) {}
|
||||
virtual ~AstTestContext();
|
||||
virtual void ProduceValue(Node* value) V8_OVERRIDE;
|
||||
virtual Node* ConsumeValue() V8_OVERRIDE;
|
||||
};
|
||||
|
||||
|
||||
// Scoped class tracking breakable statements entered by the visitor. Allows to
|
||||
// properly 'break' and 'continue' iteration statements as well as to 'break'
|
||||
// from blocks within switch statements.
|
||||
class AstGraphBuilder::BreakableScope BASE_EMBEDDED {
|
||||
public:
|
||||
BreakableScope(AstGraphBuilder* owner, BreakableStatement* target,
|
||||
ControlBuilder* control, int drop_extra)
|
||||
: owner_(owner),
|
||||
target_(target),
|
||||
next_(owner->breakable()),
|
||||
control_(control),
|
||||
drop_extra_(drop_extra) {
|
||||
owner_->set_breakable(this); // Push.
|
||||
}
|
||||
|
||||
~BreakableScope() {
|
||||
owner_->set_breakable(next_); // Pop.
|
||||
}
|
||||
|
||||
// Either 'break' or 'continue' the target statement.
|
||||
void BreakTarget(BreakableStatement* target);
|
||||
void ContinueTarget(BreakableStatement* target);
|
||||
|
||||
private:
|
||||
AstGraphBuilder* owner_;
|
||||
BreakableStatement* target_;
|
||||
BreakableScope* next_;
|
||||
ControlBuilder* control_;
|
||||
int drop_extra_;
|
||||
|
||||
// Find the correct scope for the target statement. Note that this also drops
|
||||
// extra operands from the environment for each scope skipped along the way.
|
||||
BreakableScope* FindBreakable(BreakableStatement* target);
|
||||
};
|
||||
|
||||
|
||||
// Scoped class tracking context objects created by the visitor. Represents
|
||||
// mutations of the context chain within the function body and allows to
|
||||
// change the current {scope} and {context} during visitation.
|
||||
class AstGraphBuilder::ContextScope BASE_EMBEDDED {
|
||||
public:
|
||||
ContextScope(AstGraphBuilder* owner, Scope* scope, Node* context)
|
||||
: owner_(owner),
|
||||
next_(owner->execution_context()),
|
||||
outer_(owner->current_context()),
|
||||
scope_(scope) {
|
||||
owner_->set_execution_context(this); // Push.
|
||||
owner_->set_current_context(context);
|
||||
}
|
||||
|
||||
~ContextScope() {
|
||||
owner_->set_execution_context(next_); // Pop.
|
||||
owner_->set_current_context(outer_);
|
||||
}
|
||||
|
||||
// Current scope during visitation.
|
||||
Scope* scope() const { return scope_; }
|
||||
|
||||
private:
|
||||
AstGraphBuilder* owner_;
|
||||
ContextScope* next_;
|
||||
Node* outer_;
|
||||
Scope* scope_;
|
||||
};
|
||||
|
||||
Scope* AstGraphBuilder::current_scope() const {
|
||||
return execution_context_->scope();
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
||||
|
||||
#endif // V8_COMPILER_AST_GRAPH_BUILDER_H_
|
130
src/compiler/code-generator-impl.h
Normal file
130
src/compiler/code-generator-impl.h
Normal file
@ -0,0 +1,130 @@
|
||||
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_CODE_GENERATOR_IMPL_H_
|
||||
#define V8_COMPILER_CODE_GENERATOR_IMPL_H_
|
||||
|
||||
#include "src/compiler/code-generator.h"
|
||||
#include "src/compiler/common-operator.h"
|
||||
#include "src/compiler/generic-graph.h"
|
||||
#include "src/compiler/instruction.h"
|
||||
#include "src/compiler/linkage.h"
|
||||
#include "src/compiler/machine-operator.h"
|
||||
#include "src/compiler/node.h"
|
||||
#include "src/compiler/opcodes.h"
|
||||
#include "src/compiler/operator.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
// Converts InstructionOperands from a given instruction to
|
||||
// architecture-specific
|
||||
// registers and operands after they have been assigned by the register
|
||||
// allocator.
|
||||
class InstructionOperandConverter {
|
||||
public:
|
||||
InstructionOperandConverter(CodeGenerator* gen, Instruction* instr)
|
||||
: gen_(gen), instr_(instr) {}
|
||||
|
||||
Register InputRegister(int index) {
|
||||
return ToRegister(instr_->InputAt(index));
|
||||
}
|
||||
|
||||
DoubleRegister InputDoubleRegister(int index) {
|
||||
return ToDoubleRegister(instr_->InputAt(index));
|
||||
}
|
||||
|
||||
double InputDouble(int index) { return ToDouble(instr_->InputAt(index)); }
|
||||
|
||||
int32_t InputInt32(int index) {
|
||||
return ToConstant(instr_->InputAt(index)).ToInt32();
|
||||
}
|
||||
|
||||
int8_t InputInt8(int index) { return static_cast<int8_t>(InputInt32(index)); }
|
||||
|
||||
int16_t InputInt16(int index) {
|
||||
return static_cast<int16_t>(InputInt32(index));
|
||||
}
|
||||
|
||||
uint8_t InputInt5(int index) {
|
||||
return static_cast<uint8_t>(InputInt32(index) & 0x1F);
|
||||
}
|
||||
|
||||
uint8_t InputInt6(int index) {
|
||||
return static_cast<uint8_t>(InputInt32(index) & 0x3F);
|
||||
}
|
||||
|
||||
Handle<HeapObject> InputHeapObject(int index) {
|
||||
return ToHeapObject(instr_->InputAt(index));
|
||||
}
|
||||
|
||||
Label* InputLabel(int index) {
|
||||
return gen_->code()->GetLabel(InputBlock(index));
|
||||
}
|
||||
|
||||
BasicBlock* InputBlock(int index) {
|
||||
NodeId block_id = static_cast<NodeId>(instr_->InputAt(index)->index());
|
||||
// operand should be a block id.
|
||||
ASSERT(block_id >= 0);
|
||||
ASSERT(block_id < gen_->schedule()->BasicBlockCount());
|
||||
return gen_->schedule()->GetBlockById(block_id);
|
||||
}
|
||||
|
||||
Register OutputRegister() { return ToRegister(instr_->Output()); }
|
||||
|
||||
DoubleRegister OutputDoubleRegister() {
|
||||
return ToDoubleRegister(instr_->Output());
|
||||
}
|
||||
|
||||
Register TempRegister(int index) { return ToRegister(instr_->TempAt(index)); }
|
||||
|
||||
Register ToRegister(InstructionOperand* op) {
|
||||
ASSERT(op->IsRegister());
|
||||
return Register::FromAllocationIndex(op->index());
|
||||
}
|
||||
|
||||
DoubleRegister ToDoubleRegister(InstructionOperand* op) {
|
||||
ASSERT(op->IsDoubleRegister());
|
||||
return DoubleRegister::FromAllocationIndex(op->index());
|
||||
}
|
||||
|
||||
Constant ToConstant(InstructionOperand* operand) {
|
||||
if (operand->IsImmediate()) {
|
||||
return gen_->code()->GetImmediate(operand->index());
|
||||
}
|
||||
return gen_->code()->GetConstant(operand->index());
|
||||
}
|
||||
|
||||
double ToDouble(InstructionOperand* operand) {
|
||||
return ToConstant(operand).ToFloat64();
|
||||
}
|
||||
|
||||
Handle<HeapObject> ToHeapObject(InstructionOperand* operand) {
|
||||
return ToConstant(operand).ToHeapObject();
|
||||
}
|
||||
|
||||
Frame* frame() const { return gen_->frame(); }
|
||||
Isolate* isolate() const { return gen_->isolate(); }
|
||||
Linkage* linkage() const { return gen_->linkage(); }
|
||||
|
||||
protected:
|
||||
CodeGenerator* gen_;
|
||||
Instruction* instr_;
|
||||
};
|
||||
|
||||
|
||||
// TODO(dcarney): generify this on bleeding_edge and replace this call
|
||||
// when merged.
|
||||
static inline void FinishCode(MacroAssembler* masm) {
|
||||
#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
|
||||
masm->CheckConstPool(true, false);
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_COMPILER_CODE_GENERATOR_IMPL_H
|
288
src/compiler/code-generator.cc
Normal file
288
src/compiler/code-generator.cc
Normal file
@ -0,0 +1,288 @@
|
||||
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/compiler/code-generator.h"
|
||||
|
||||
#include "src/compiler/code-generator-impl.h"
|
||||
#include "src/compiler/linkage.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
CodeGenerator::CodeGenerator(InstructionSequence* code)
|
||||
: code_(code),
|
||||
current_block_(NULL),
|
||||
current_source_position_(SourcePosition::Invalid()),
|
||||
masm_(code->zone()->isolate(), NULL, 0),
|
||||
resolver_(this),
|
||||
safepoints_(code->zone()),
|
||||
lazy_deoptimization_entries_(
|
||||
LazyDeoptimizationEntries::allocator_type(code->zone())),
|
||||
deoptimization_states_(
|
||||
DeoptimizationStates::allocator_type(code->zone())),
|
||||
deoptimization_literals_(Literals::allocator_type(code->zone())),
|
||||
translations_(code->zone()) {
|
||||
deoptimization_states_.resize(code->GetDeoptimizationEntryCount(), NULL);
|
||||
}
|
||||
|
||||
|
||||
Handle<Code> CodeGenerator::GenerateCode() {
|
||||
CompilationInfo* info = linkage()->info();
|
||||
|
||||
// Emit a code line info recording start event.
|
||||
PositionsRecorder* recorder = masm()->positions_recorder();
|
||||
LOG_CODE_EVENT(isolate(), CodeStartLinePosInfoRecordEvent(recorder));
|
||||
|
||||
// Place function entry hook if requested to do so.
|
||||
if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
|
||||
ProfileEntryHookStub::MaybeCallEntryHook(masm());
|
||||
}
|
||||
|
||||
// Architecture-specific, linkage-specific prologue.
|
||||
info->set_prologue_offset(masm()->pc_offset());
|
||||
AssemblePrologue();
|
||||
|
||||
// Assemble all instructions.
|
||||
for (InstructionSequence::const_iterator i = code()->begin();
|
||||
i != code()->end(); ++i) {
|
||||
AssembleInstruction(*i);
|
||||
}
|
||||
|
||||
FinishCode(masm());
|
||||
|
||||
safepoints()->Emit(masm(), frame()->GetSpillSlotCount());
|
||||
|
||||
// TODO(titzer): what are the right code flags here?
|
||||
Code::Kind kind = Code::STUB;
|
||||
if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
|
||||
kind = Code::OPTIMIZED_FUNCTION;
|
||||
}
|
||||
Handle<Code> result = v8::internal::CodeGenerator::MakeCodeEpilogue(
|
||||
masm(), Code::ComputeFlags(kind), info);
|
||||
result->set_is_turbofanned(true);
|
||||
result->set_stack_slots(frame()->GetSpillSlotCount());
|
||||
result->set_safepoint_table_offset(safepoints()->GetCodeOffset());
|
||||
|
||||
PopulateDeoptimizationData(result);
|
||||
|
||||
// Emit a code line info recording stop event.
|
||||
void* line_info = recorder->DetachJITHandlerData();
|
||||
LOG_CODE_EVENT(isolate(), CodeEndLinePosInfoRecordEvent(*result, line_info));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
void CodeGenerator::RecordSafepoint(PointerMap* pointers, Safepoint::Kind kind,
|
||||
int arguments,
|
||||
Safepoint::DeoptMode deopt_mode) {
|
||||
const ZoneList<InstructionOperand*>* operands =
|
||||
pointers->GetNormalizedOperands();
|
||||
Safepoint safepoint =
|
||||
safepoints()->DefineSafepoint(masm(), kind, arguments, deopt_mode);
|
||||
for (int i = 0; i < operands->length(); i++) {
|
||||
InstructionOperand* pointer = operands->at(i);
|
||||
if (pointer->IsStackSlot()) {
|
||||
safepoint.DefinePointerSlot(pointer->index(), zone());
|
||||
} else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
|
||||
Register reg = Register::FromAllocationIndex(pointer->index());
|
||||
safepoint.DefinePointerRegister(reg, zone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CodeGenerator::AssembleInstruction(Instruction* instr) {
|
||||
if (instr->IsBlockStart()) {
|
||||
// Bind a label for a block start and handle parallel moves.
|
||||
BlockStartInstruction* block_start = BlockStartInstruction::cast(instr);
|
||||
current_block_ = block_start->block();
|
||||
if (FLAG_code_comments) {
|
||||
// TODO(titzer): these code comments are a giant memory leak.
|
||||
Vector<char> buffer = Vector<char>::New(32);
|
||||
SNPrintF(buffer, "-- B%d start --", block_start->block()->id());
|
||||
masm()->RecordComment(buffer.start());
|
||||
}
|
||||
masm()->bind(block_start->label());
|
||||
}
|
||||
if (instr->IsGapMoves()) {
|
||||
// Handle parallel moves associated with the gap instruction.
|
||||
AssembleGap(GapInstruction::cast(instr));
|
||||
} else if (instr->IsSourcePosition()) {
|
||||
AssembleSourcePosition(SourcePositionInstruction::cast(instr));
|
||||
} else {
|
||||
// Assemble architecture-specific code for the instruction.
|
||||
AssembleArchInstruction(instr);
|
||||
|
||||
// Assemble branches or boolean materializations after this instruction.
|
||||
FlagsMode mode = FlagsModeField::decode(instr->opcode());
|
||||
FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
|
||||
switch (mode) {
|
||||
case kFlags_none:
|
||||
return;
|
||||
case kFlags_set:
|
||||
return AssembleArchBoolean(instr, condition);
|
||||
case kFlags_branch:
|
||||
return AssembleArchBranch(instr, condition);
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CodeGenerator::AssembleSourcePosition(SourcePositionInstruction* instr) {
|
||||
SourcePosition source_position = instr->source_position();
|
||||
if (source_position == current_source_position_) return;
|
||||
ASSERT(!source_position.IsInvalid());
|
||||
if (!source_position.IsUnknown()) {
|
||||
int code_pos = source_position.raw();
|
||||
masm()->positions_recorder()->RecordPosition(source_position.raw());
|
||||
masm()->positions_recorder()->WriteRecordedPositions();
|
||||
if (FLAG_code_comments) {
|
||||
Vector<char> buffer = Vector<char>::New(256);
|
||||
CompilationInfo* info = linkage()->info();
|
||||
int ln = Script::GetLineNumber(info->script(), code_pos);
|
||||
int cn = Script::GetColumnNumber(info->script(), code_pos);
|
||||
if (info->script()->name()->IsString()) {
|
||||
Handle<String> file(String::cast(info->script()->name()));
|
||||
base::OS::SNPrintF(buffer.start(), buffer.length(), "-- %s:%d:%d --",
|
||||
file->ToCString().get(), ln, cn);
|
||||
} else {
|
||||
base::OS::SNPrintF(buffer.start(), buffer.length(),
|
||||
"-- <unknown>:%d:%d --", ln, cn);
|
||||
}
|
||||
masm()->RecordComment(buffer.start());
|
||||
}
|
||||
}
|
||||
current_source_position_ = source_position;
|
||||
}
|
||||
|
||||
|
||||
void CodeGenerator::AssembleGap(GapInstruction* instr) {
|
||||
for (int i = GapInstruction::FIRST_INNER_POSITION;
|
||||
i <= GapInstruction::LAST_INNER_POSITION; i++) {
|
||||
GapInstruction::InnerPosition inner_pos =
|
||||
static_cast<GapInstruction::InnerPosition>(i);
|
||||
ParallelMove* move = instr->GetParallelMove(inner_pos);
|
||||
if (move != NULL) resolver()->Resolve(move);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
|
||||
CompilationInfo* info = linkage()->info();
|
||||
int deopt_count = code()->GetDeoptimizationEntryCount();
|
||||
int patch_count = lazy_deoptimization_entries_.size();
|
||||
if (patch_count == 0 && deopt_count == 0) return;
|
||||
Handle<DeoptimizationInputData> data = DeoptimizationInputData::New(
|
||||
isolate(), deopt_count, patch_count, TENURED);
|
||||
|
||||
Handle<ByteArray> translation_array =
|
||||
translations_.CreateByteArray(isolate()->factory());
|
||||
|
||||
data->SetTranslationByteArray(*translation_array);
|
||||
data->SetInlinedFunctionCount(Smi::FromInt(0));
|
||||
data->SetOptimizationId(Smi::FromInt(info->optimization_id()));
|
||||
// TODO(jarin) The following code was copied over from Lithium, not sure
|
||||
// whether the scope or the IsOptimizing condition are really needed.
|
||||
if (info->IsOptimizing()) {
|
||||
// Reference to shared function info does not change between phases.
|
||||
AllowDeferredHandleDereference allow_handle_dereference;
|
||||
data->SetSharedFunctionInfo(*info->shared_info());
|
||||
} else {
|
||||
data->SetSharedFunctionInfo(Smi::FromInt(0));
|
||||
}
|
||||
|
||||
Handle<FixedArray> literals = isolate()->factory()->NewFixedArray(
|
||||
deoptimization_literals_.size(), TENURED);
|
||||
{
|
||||
AllowDeferredHandleDereference copy_handles;
|
||||
for (unsigned i = 0; i < deoptimization_literals_.size(); i++) {
|
||||
literals->set(i, *deoptimization_literals_[i]);
|
||||
}
|
||||
data->SetLiteralArray(*literals);
|
||||
}
|
||||
|
||||
// No OSR in Turbofan yet...
|
||||
BailoutId osr_ast_id = BailoutId::None();
|
||||
data->SetOsrAstId(Smi::FromInt(osr_ast_id.ToInt()));
|
||||
data->SetOsrPcOffset(Smi::FromInt(-1));
|
||||
|
||||
// Populate deoptimization entries.
|
||||
for (int i = 0; i < deopt_count; i++) {
|
||||
FrameStateDescriptor descriptor = code()->GetDeoptimizationEntry(i);
|
||||
data->SetAstId(i, descriptor.bailout_id());
|
||||
data->SetTranslationIndex(i, Smi::FromInt(0));
|
||||
data->SetArgumentsStackHeight(i, Smi::FromInt(0));
|
||||
data->SetPc(i, Smi::FromInt(-1));
|
||||
}
|
||||
|
||||
// Populate the return address patcher entries.
|
||||
for (int i = 0; i < patch_count; ++i) {
|
||||
LazyDeoptimizationEntry entry = lazy_deoptimization_entries_[i];
|
||||
ASSERT(entry.position_after_call() == entry.continuation()->pos() ||
|
||||
IsNopForSmiCodeInlining(code_object, entry.position_after_call(),
|
||||
entry.continuation()->pos()));
|
||||
data->SetReturnAddressPc(i, Smi::FromInt(entry.position_after_call()));
|
||||
data->SetPatchedAddressPc(i, Smi::FromInt(entry.deoptimization()->pos()));
|
||||
}
|
||||
|
||||
code_object->set_deoptimization_data(*data);
|
||||
}
|
||||
|
||||
|
||||
void CodeGenerator::RecordLazyDeoptimizationEntry(Instruction* instr) {
|
||||
InstructionOperandConverter i(this, instr);
|
||||
|
||||
Label after_call;
|
||||
masm()->bind(&after_call);
|
||||
|
||||
// The continuation and deoptimization are the last two inputs:
|
||||
BasicBlock* cont_block = i.InputBlock(instr->InputCount() - 2);
|
||||
BasicBlock* deopt_block = i.InputBlock(instr->InputCount() - 1);
|
||||
|
||||
Label* cont_label = code_->GetLabel(cont_block);
|
||||
Label* deopt_label = code_->GetLabel(deopt_block);
|
||||
|
||||
lazy_deoptimization_entries_.push_back(
|
||||
LazyDeoptimizationEntry(after_call.pos(), cont_label, deopt_label));
|
||||
}
|
||||
|
||||
|
||||
int CodeGenerator::DefineDeoptimizationLiteral(Handle<Object> literal) {
|
||||
int result = deoptimization_literals_.size();
|
||||
for (unsigned i = 0; i < deoptimization_literals_.size(); ++i) {
|
||||
if (deoptimization_literals_[i].is_identical_to(literal)) return i;
|
||||
}
|
||||
deoptimization_literals_.push_back(literal);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
void CodeGenerator::BuildTranslation(Instruction* instr,
|
||||
int deoptimization_id) {
|
||||
// We should build translation only once.
|
||||
ASSERT_EQ(NULL, deoptimization_states_[deoptimization_id]);
|
||||
|
||||
// TODO(jarin) This should build translation codes from the instruction inputs
|
||||
// and from the framestate descriptor. At the moment, we only create a dummy
|
||||
// translation.
|
||||
|
||||
FrameStateDescriptor descriptor =
|
||||
code()->GetDeoptimizationEntry(deoptimization_id);
|
||||
Translation translation(&translations_, 1, 1, zone());
|
||||
translation.BeginJSFrame(descriptor.bailout_id(), Translation::kSelfLiteralId,
|
||||
0);
|
||||
int undefined_literal_id =
|
||||
DefineDeoptimizationLiteral(isolate()->factory()->undefined_value());
|
||||
translation.StoreLiteral(undefined_literal_id);
|
||||
|
||||
deoptimization_states_[deoptimization_id] =
|
||||
new (zone()) DeoptimizationState(translation.index());
|
||||
}
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
144
src/compiler/code-generator.h
Normal file
144
src/compiler/code-generator.h
Normal file
@ -0,0 +1,144 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_CODE_GENERATOR_H_
|
||||
#define V8_COMPILER_CODE_GENERATOR_H_
|
||||
|
||||
#include <deque>
|
||||
|
||||
#include "src/compiler/gap-resolver.h"
|
||||
#include "src/compiler/instruction.h"
|
||||
#include "src/deoptimizer.h"
|
||||
#include "src/macro-assembler.h"
|
||||
#include "src/safepoint-table.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
// Generates native code for a sequence of instructions.
|
||||
class CodeGenerator V8_FINAL : public GapResolver::Assembler {
|
||||
public:
|
||||
explicit CodeGenerator(InstructionSequence* code);
|
||||
|
||||
// Generate native code.
|
||||
Handle<Code> GenerateCode();
|
||||
|
||||
InstructionSequence* code() const { return code_; }
|
||||
Frame* frame() const { return code()->frame(); }
|
||||
Graph* graph() const { return code()->graph(); }
|
||||
Isolate* isolate() const { return zone()->isolate(); }
|
||||
Linkage* linkage() const { return code()->linkage(); }
|
||||
Schedule* schedule() const { return code()->schedule(); }
|
||||
|
||||
private:
|
||||
MacroAssembler* masm() { return &masm_; }
|
||||
GapResolver* resolver() { return &resolver_; }
|
||||
SafepointTableBuilder* safepoints() { return &safepoints_; }
|
||||
Zone* zone() const { return code()->zone(); }
|
||||
|
||||
// Checks if {block} will appear directly after {current_block_} when
|
||||
// assembling code, in which case, a fall-through can be used.
|
||||
bool IsNextInAssemblyOrder(const BasicBlock* block) const {
|
||||
return block->rpo_number_ == (current_block_->rpo_number_ + 1) &&
|
||||
block->deferred_ == current_block_->deferred_;
|
||||
}
|
||||
|
||||
// Record a safepoint with the given pointer map.
|
||||
void RecordSafepoint(PointerMap* pointers, Safepoint::Kind kind,
|
||||
int arguments, Safepoint::DeoptMode deopt_mode);
|
||||
|
||||
// Assemble code for the specified instruction.
|
||||
void AssembleInstruction(Instruction* instr);
|
||||
void AssembleSourcePosition(SourcePositionInstruction* instr);
|
||||
void AssembleGap(GapInstruction* gap);
|
||||
|
||||
// ===========================================================================
|
||||
// ============= Architecture-specific code generation methods. ==============
|
||||
// ===========================================================================
|
||||
|
||||
void AssembleArchInstruction(Instruction* instr);
|
||||
void AssembleArchBranch(Instruction* instr, FlagsCondition condition);
|
||||
void AssembleArchBoolean(Instruction* instr, FlagsCondition condition);
|
||||
|
||||
// Generates an architecture-specific, descriptor-specific prologue
|
||||
// to set up a stack frame.
|
||||
void AssemblePrologue();
|
||||
// Generates an architecture-specific, descriptor-specific return sequence
|
||||
// to tear down a stack frame.
|
||||
void AssembleReturn();
|
||||
|
||||
// ===========================================================================
|
||||
// ============== Architecture-specific gap resolver methods. ================
|
||||
// ===========================================================================
|
||||
|
||||
// Interface used by the gap resolver to emit moves and swaps.
|
||||
virtual void AssembleMove(InstructionOperand* source,
|
||||
InstructionOperand* destination) V8_OVERRIDE;
|
||||
virtual void AssembleSwap(InstructionOperand* source,
|
||||
InstructionOperand* destination) V8_OVERRIDE;
|
||||
|
||||
// ===========================================================================
|
||||
// Deoptimization table construction
|
||||
void RecordLazyDeoptimizationEntry(Instruction* instr);
|
||||
void PopulateDeoptimizationData(Handle<Code> code);
|
||||
int DefineDeoptimizationLiteral(Handle<Object> literal);
|
||||
void BuildTranslation(Instruction* instr, int deoptimization_id);
|
||||
void AddNopForSmiCodeInlining();
|
||||
#if DEBUG
|
||||
static bool IsNopForSmiCodeInlining(Handle<Code> code, int start_pc,
|
||||
int end_pc);
|
||||
#endif // DEBUG
|
||||
// ===========================================================================
|
||||
|
||||
class LazyDeoptimizationEntry V8_FINAL {
|
||||
public:
|
||||
LazyDeoptimizationEntry(int position_after_call, Label* continuation,
|
||||
Label* deoptimization)
|
||||
: position_after_call_(position_after_call),
|
||||
continuation_(continuation),
|
||||
deoptimization_(deoptimization) {}
|
||||
|
||||
int position_after_call() const { return position_after_call_; }
|
||||
Label* continuation() const { return continuation_; }
|
||||
Label* deoptimization() const { return deoptimization_; }
|
||||
|
||||
private:
|
||||
int position_after_call_;
|
||||
Label* continuation_;
|
||||
Label* deoptimization_;
|
||||
};
|
||||
|
||||
struct DeoptimizationState : ZoneObject {
|
||||
int translation_id_;
|
||||
|
||||
explicit DeoptimizationState(int translation_id)
|
||||
: translation_id_(translation_id) {}
|
||||
};
|
||||
|
||||
typedef std::deque<LazyDeoptimizationEntry,
|
||||
zone_allocator<LazyDeoptimizationEntry> >
|
||||
LazyDeoptimizationEntries;
|
||||
typedef std::deque<DeoptimizationState*,
|
||||
zone_allocator<DeoptimizationState*> >
|
||||
DeoptimizationStates;
|
||||
typedef std::deque<Handle<Object>, zone_allocator<Handle<Object> > > Literals;
|
||||
|
||||
InstructionSequence* code_;
|
||||
BasicBlock* current_block_;
|
||||
SourcePosition current_source_position_;
|
||||
MacroAssembler masm_;
|
||||
GapResolver resolver_;
|
||||
SafepointTableBuilder safepoints_;
|
||||
LazyDeoptimizationEntries lazy_deoptimization_entries_;
|
||||
DeoptimizationStates deoptimization_states_;
|
||||
Literals deoptimization_literals_;
|
||||
TranslationBuffer translations_;
|
||||
};
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_COMPILER_CODE_GENERATOR_H
|
51
src/compiler/common-node-cache.h
Normal file
51
src/compiler/common-node-cache.h
Normal file
@ -0,0 +1,51 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_COMMON_NODE_CACHE_H_
|
||||
#define V8_COMPILER_COMMON_NODE_CACHE_H_
|
||||
|
||||
#include "src/assembler.h"
|
||||
#include "src/compiler/node-cache.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
// Bundles various caches for common nodes.
|
||||
class CommonNodeCache V8_FINAL : public ZoneObject {
|
||||
public:
|
||||
explicit CommonNodeCache(Zone* zone) : zone_(zone) {}
|
||||
|
||||
Node** FindInt32Constant(int32_t value) {
|
||||
return int32_constants_.Find(zone_, value);
|
||||
}
|
||||
|
||||
Node** FindFloat64Constant(double value) {
|
||||
// We canonicalize double constants at the bit representation level.
|
||||
return float64_constants_.Find(zone_, BitCast<int64_t>(value));
|
||||
}
|
||||
|
||||
Node** FindExternalConstant(ExternalReference reference) {
|
||||
return external_constants_.Find(zone_, reference.address());
|
||||
}
|
||||
|
||||
Node** FindNumberConstant(double value) {
|
||||
// We canonicalize double constants at the bit representation level.
|
||||
return number_constants_.Find(zone_, BitCast<int64_t>(value));
|
||||
}
|
||||
|
||||
Zone* zone() const { return zone_; }
|
||||
|
||||
private:
|
||||
Int32NodeCache int32_constants_;
|
||||
Int64NodeCache float64_constants_;
|
||||
PtrNodeCache external_constants_;
|
||||
Int64NodeCache number_constants_;
|
||||
Zone* zone_;
|
||||
};
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
||||
|
||||
#endif // V8_COMPILER_COMMON_NODE_CACHE_H_
|
285
src/compiler/common-operator.h
Normal file
285
src/compiler/common-operator.h
Normal file
@ -0,0 +1,285 @@
|
||||
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_COMMON_OPERATOR_H_
|
||||
#define V8_COMPILER_COMMON_OPERATOR_H_
|
||||
|
||||
#include "src/v8.h"
|
||||
|
||||
#include "src/assembler.h"
|
||||
#include "src/compiler/linkage.h"
|
||||
#include "src/compiler/opcodes.h"
|
||||
#include "src/compiler/operator.h"
|
||||
#include "src/unique.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
class OStream;
|
||||
|
||||
namespace compiler {
|
||||
|
||||
class ControlOperator : public Operator1<int> {
|
||||
public:
|
||||
ControlOperator(IrOpcode::Value opcode, uint16_t properties, int inputs,
|
||||
int outputs, int controls, const char* mnemonic)
|
||||
: Operator1(opcode, properties, inputs, outputs, mnemonic, controls) {}
|
||||
|
||||
virtual OStream& PrintParameter(OStream& os) const { return os; } // NOLINT
|
||||
int ControlInputCount() const { return parameter(); }
|
||||
};
|
||||
|
||||
class CallOperator : public Operator1<CallDescriptor*> {
|
||||
public:
|
||||
CallOperator(CallDescriptor* descriptor, const char* mnemonic)
|
||||
: Operator1(IrOpcode::kCall, descriptor->properties(),
|
||||
descriptor->InputCount(), descriptor->ReturnCount(), mnemonic,
|
||||
descriptor) {}
|
||||
|
||||
virtual OStream& PrintParameter(OStream& os) const { // NOLINT
|
||||
return os << "[" << *parameter() << "]";
|
||||
}
|
||||
};
|
||||
|
||||
class FrameStateDescriptor {
|
||||
public:
|
||||
explicit FrameStateDescriptor(BailoutId bailout_id)
|
||||
: bailout_id_(bailout_id) {}
|
||||
|
||||
BailoutId bailout_id() const { return bailout_id_; }
|
||||
|
||||
private:
|
||||
BailoutId bailout_id_;
|
||||
};
|
||||
|
||||
// Interface for building common operators that can be used at any level of IR,
|
||||
// including JavaScript, mid-level, and low-level.
|
||||
// TODO(titzer): Move the mnemonics into SimpleOperator and Operator1 classes.
|
||||
class CommonOperatorBuilder {
|
||||
public:
|
||||
explicit CommonOperatorBuilder(Zone* zone) : zone_(zone) {}
|
||||
|
||||
#define CONTROL_OP(name, inputs, controls) \
|
||||
return new (zone_) ControlOperator(IrOpcode::k##name, Operator::kFoldable, \
|
||||
inputs, 0, controls, #name);
|
||||
|
||||
Operator* Start() { CONTROL_OP(Start, 0, 0); }
|
||||
Operator* Dead() { CONTROL_OP(Dead, 0, 0); }
|
||||
Operator* End() { CONTROL_OP(End, 0, 1); }
|
||||
Operator* Branch() { CONTROL_OP(Branch, 1, 1); }
|
||||
Operator* IfTrue() { CONTROL_OP(IfTrue, 0, 1); }
|
||||
Operator* IfFalse() { CONTROL_OP(IfFalse, 0, 1); }
|
||||
Operator* Throw() { CONTROL_OP(Throw, 1, 1); }
|
||||
Operator* LazyDeoptimization() { CONTROL_OP(LazyDeoptimization, 0, 1); }
|
||||
Operator* Continuation() { CONTROL_OP(Continuation, 0, 1); }
|
||||
|
||||
Operator* Deoptimize() {
|
||||
return new (zone_)
|
||||
ControlOperator(IrOpcode::kDeoptimize, 0, 1, 0, 1, "Deoptimize");
|
||||
}
|
||||
|
||||
Operator* Return() {
|
||||
return new (zone_) ControlOperator(IrOpcode::kReturn, 0, 1, 0, 1, "Return");
|
||||
}
|
||||
|
||||
Operator* Merge(int controls) {
|
||||
return new (zone_) ControlOperator(IrOpcode::kMerge, Operator::kFoldable, 0,
|
||||
0, controls, "Merge");
|
||||
}
|
||||
|
||||
Operator* Loop(int controls) {
|
||||
return new (zone_) ControlOperator(IrOpcode::kLoop, Operator::kFoldable, 0,
|
||||
0, controls, "Loop");
|
||||
}
|
||||
|
||||
Operator* Parameter(int index) {
|
||||
return new (zone_) Operator1<int>(IrOpcode::kParameter, Operator::kPure, 0,
|
||||
1, "Parameter", index);
|
||||
}
|
||||
Operator* Int32Constant(int32_t value) {
|
||||
return new (zone_) Operator1<int>(IrOpcode::kInt32Constant, Operator::kPure,
|
||||
0, 1, "Int32Constant", value);
|
||||
}
|
||||
Operator* Int64Constant(int64_t value) {
|
||||
return new (zone_)
|
||||
Operator1<int64_t>(IrOpcode::kInt64Constant, Operator::kPure, 0, 1,
|
||||
"Int64Constant", value);
|
||||
}
|
||||
Operator* Float64Constant(double value) {
|
||||
return new (zone_)
|
||||
Operator1<double>(IrOpcode::kFloat64Constant, Operator::kPure, 0, 1,
|
||||
"Float64Constant", value);
|
||||
}
|
||||
Operator* ExternalConstant(ExternalReference value) {
|
||||
return new (zone_) Operator1<ExternalReference>(IrOpcode::kExternalConstant,
|
||||
Operator::kPure, 0, 1,
|
||||
"ExternalConstant", value);
|
||||
}
|
||||
Operator* NumberConstant(double value) {
|
||||
return new (zone_)
|
||||
Operator1<double>(IrOpcode::kNumberConstant, Operator::kPure, 0, 1,
|
||||
"NumberConstant", value);
|
||||
}
|
||||
Operator* HeapConstant(PrintableUnique<Object> value) {
|
||||
return new (zone_) Operator1<PrintableUnique<Object> >(
|
||||
IrOpcode::kHeapConstant, Operator::kPure, 0, 1, "HeapConstant", value);
|
||||
}
|
||||
Operator* Phi(int arguments) {
|
||||
ASSERT(arguments > 0); // Disallow empty phis.
|
||||
return new (zone_) Operator1<int>(IrOpcode::kPhi, Operator::kPure,
|
||||
arguments, 1, "Phi", arguments);
|
||||
}
|
||||
Operator* EffectPhi(int arguments) {
|
||||
ASSERT(arguments > 0); // Disallow empty phis.
|
||||
return new (zone_) Operator1<int>(IrOpcode::kEffectPhi, Operator::kPure, 0,
|
||||
0, "EffectPhi", arguments);
|
||||
}
|
||||
Operator* FrameState(const FrameStateDescriptor& descriptor) {
|
||||
return new (zone_) Operator1<FrameStateDescriptor>(
|
||||
IrOpcode::kFrameState, Operator::kPure, 0, 1, "FrameState", descriptor);
|
||||
}
|
||||
Operator* Call(CallDescriptor* descriptor) {
|
||||
return new (zone_) CallOperator(descriptor, "Call");
|
||||
}
|
||||
Operator* Projection(int index) {
|
||||
return new (zone_) Operator1<int>(IrOpcode::kProjection, Operator::kPure, 1,
|
||||
1, "Projection", index);
|
||||
}
|
||||
|
||||
private:
|
||||
Zone* zone_;
|
||||
};
|
||||
|
||||
|
||||
template <typename T>
|
||||
struct CommonOperatorTraits {
|
||||
static inline bool Equals(T a, T b);
|
||||
static inline bool HasValue(Operator* op);
|
||||
static inline T ValueOf(Operator* op);
|
||||
};
|
||||
|
||||
template <>
|
||||
struct CommonOperatorTraits<int32_t> {
|
||||
static inline bool Equals(int32_t a, int32_t b) { return a == b; }
|
||||
static inline bool HasValue(Operator* op) {
|
||||
return op->opcode() == IrOpcode::kInt32Constant ||
|
||||
op->opcode() == IrOpcode::kNumberConstant;
|
||||
}
|
||||
static inline int32_t ValueOf(Operator* op) {
|
||||
if (op->opcode() == IrOpcode::kNumberConstant) {
|
||||
// TODO(titzer): cache the converted int32 value in NumberConstant.
|
||||
return FastD2I(reinterpret_cast<Operator1<double>*>(op)->parameter());
|
||||
}
|
||||
CHECK_EQ(IrOpcode::kInt32Constant, op->opcode());
|
||||
return static_cast<Operator1<int32_t>*>(op)->parameter();
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct CommonOperatorTraits<uint32_t> {
|
||||
static inline bool Equals(uint32_t a, uint32_t b) { return a == b; }
|
||||
static inline bool HasValue(Operator* op) {
|
||||
return CommonOperatorTraits<int32_t>::HasValue(op);
|
||||
}
|
||||
static inline uint32_t ValueOf(Operator* op) {
|
||||
if (op->opcode() == IrOpcode::kNumberConstant) {
|
||||
// TODO(titzer): cache the converted uint32 value in NumberConstant.
|
||||
return FastD2UI(reinterpret_cast<Operator1<double>*>(op)->parameter());
|
||||
}
|
||||
return static_cast<uint32_t>(CommonOperatorTraits<int32_t>::ValueOf(op));
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct CommonOperatorTraits<int64_t> {
|
||||
static inline bool Equals(int64_t a, int64_t b) { return a == b; }
|
||||
static inline bool HasValue(Operator* op) {
|
||||
return op->opcode() == IrOpcode::kInt32Constant ||
|
||||
op->opcode() == IrOpcode::kInt64Constant ||
|
||||
op->opcode() == IrOpcode::kNumberConstant;
|
||||
}
|
||||
static inline int64_t ValueOf(Operator* op) {
|
||||
if (op->opcode() == IrOpcode::kInt32Constant) {
|
||||
return static_cast<int64_t>(CommonOperatorTraits<int32_t>::ValueOf(op));
|
||||
}
|
||||
CHECK_EQ(IrOpcode::kInt64Constant, op->opcode());
|
||||
return static_cast<Operator1<int64_t>*>(op)->parameter();
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct CommonOperatorTraits<uint64_t> {
|
||||
static inline bool Equals(uint64_t a, uint64_t b) { return a == b; }
|
||||
static inline bool HasValue(Operator* op) {
|
||||
return CommonOperatorTraits<int64_t>::HasValue(op);
|
||||
}
|
||||
static inline uint64_t ValueOf(Operator* op) {
|
||||
return static_cast<uint64_t>(CommonOperatorTraits<int64_t>::ValueOf(op));
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct CommonOperatorTraits<double> {
|
||||
static inline bool Equals(double a, double b) {
|
||||
return DoubleRepresentation(a).bits == DoubleRepresentation(b).bits;
|
||||
}
|
||||
static inline bool HasValue(Operator* op) {
|
||||
return op->opcode() == IrOpcode::kFloat64Constant ||
|
||||
op->opcode() == IrOpcode::kInt32Constant ||
|
||||
op->opcode() == IrOpcode::kNumberConstant;
|
||||
}
|
||||
static inline double ValueOf(Operator* op) {
|
||||
if (op->opcode() == IrOpcode::kFloat64Constant ||
|
||||
op->opcode() == IrOpcode::kNumberConstant) {
|
||||
return reinterpret_cast<Operator1<double>*>(op)->parameter();
|
||||
}
|
||||
return static_cast<double>(CommonOperatorTraits<int32_t>::ValueOf(op));
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct CommonOperatorTraits<ExternalReference> {
|
||||
static inline bool Equals(ExternalReference a, ExternalReference b) {
|
||||
return a == b;
|
||||
}
|
||||
static inline bool HasValue(Operator* op) {
|
||||
return op->opcode() == IrOpcode::kExternalConstant;
|
||||
}
|
||||
static inline ExternalReference ValueOf(Operator* op) {
|
||||
CHECK_EQ(IrOpcode::kExternalConstant, op->opcode());
|
||||
return static_cast<Operator1<ExternalReference>*>(op)->parameter();
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct CommonOperatorTraits<PrintableUnique<T> > {
|
||||
static inline bool HasValue(Operator* op) {
|
||||
return op->opcode() == IrOpcode::kHeapConstant;
|
||||
}
|
||||
static inline PrintableUnique<T> ValueOf(Operator* op) {
|
||||
CHECK_EQ(IrOpcode::kHeapConstant, op->opcode());
|
||||
return static_cast<Operator1<PrintableUnique<T> >*>(op)->parameter();
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct CommonOperatorTraits<Handle<T> > {
|
||||
static inline bool HasValue(Operator* op) {
|
||||
return CommonOperatorTraits<PrintableUnique<T> >::HasValue(op);
|
||||
}
|
||||
static inline Handle<T> ValueOf(Operator* op) {
|
||||
return CommonOperatorTraits<PrintableUnique<T> >::ValueOf(op).handle();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template <typename T>
|
||||
inline T ValueOf(Operator* op) {
|
||||
return CommonOperatorTraits<T>::ValueOf(op);
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
||||
|
||||
#endif // V8_COMPILER_COMMON_OPERATOR_H_
|
144
src/compiler/control-builders.cc
Normal file
144
src/compiler/control-builders.cc
Normal file
@ -0,0 +1,144 @@
|
||||
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "control-builders.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
|
||||
void IfBuilder::If(Node* condition) {
|
||||
builder_->NewBranch(condition);
|
||||
else_environment_ = environment()->CopyForConditional();
|
||||
}
|
||||
|
||||
|
||||
void IfBuilder::Then() { builder_->NewIfTrue(); }
|
||||
|
||||
|
||||
void IfBuilder::Else() {
|
||||
builder_->NewMerge();
|
||||
then_environment_ = environment();
|
||||
set_environment(else_environment_);
|
||||
builder_->NewIfFalse();
|
||||
}
|
||||
|
||||
|
||||
void IfBuilder::End() {
|
||||
then_environment_->Merge(environment());
|
||||
set_environment(then_environment_);
|
||||
}
|
||||
|
||||
|
||||
void LoopBuilder::BeginLoop() {
|
||||
builder_->NewLoop();
|
||||
loop_environment_ = environment()->CopyForLoop();
|
||||
continue_environment_ = environment()->CopyAsUnreachable();
|
||||
break_environment_ = environment()->CopyAsUnreachable();
|
||||
}
|
||||
|
||||
|
||||
void LoopBuilder::Continue() {
|
||||
continue_environment_->Merge(environment());
|
||||
environment()->MarkAsUnreachable();
|
||||
}
|
||||
|
||||
|
||||
void LoopBuilder::Break() {
|
||||
break_environment_->Merge(environment());
|
||||
environment()->MarkAsUnreachable();
|
||||
}
|
||||
|
||||
|
||||
void LoopBuilder::EndBody() {
|
||||
continue_environment_->Merge(environment());
|
||||
set_environment(continue_environment_);
|
||||
}
|
||||
|
||||
|
||||
void LoopBuilder::EndLoop() {
|
||||
loop_environment_->Merge(environment());
|
||||
set_environment(break_environment_);
|
||||
}
|
||||
|
||||
|
||||
void LoopBuilder::BreakUnless(Node* condition) {
|
||||
IfBuilder control_if(builder_);
|
||||
control_if.If(condition);
|
||||
control_if.Then();
|
||||
control_if.Else();
|
||||
Break();
|
||||
control_if.End();
|
||||
}
|
||||
|
||||
|
||||
void SwitchBuilder::BeginSwitch() {
|
||||
body_environment_ = environment()->CopyAsUnreachable();
|
||||
label_environment_ = environment()->CopyAsUnreachable();
|
||||
break_environment_ = environment()->CopyAsUnreachable();
|
||||
body_environments_.AddBlock(NULL, case_count(), zone());
|
||||
}
|
||||
|
||||
|
||||
void SwitchBuilder::BeginLabel(int index, Node* condition) {
|
||||
builder_->NewBranch(condition);
|
||||
label_environment_ = environment()->CopyForConditional();
|
||||
builder_->NewIfTrue();
|
||||
body_environments_[index] = environment();
|
||||
}
|
||||
|
||||
|
||||
void SwitchBuilder::EndLabel() {
|
||||
set_environment(label_environment_);
|
||||
builder_->NewIfFalse();
|
||||
}
|
||||
|
||||
|
||||
void SwitchBuilder::DefaultAt(int index) {
|
||||
label_environment_ = environment()->CopyAsUnreachable();
|
||||
body_environments_[index] = environment();
|
||||
}
|
||||
|
||||
|
||||
void SwitchBuilder::BeginCase(int index) {
|
||||
set_environment(body_environments_[index]);
|
||||
environment()->Merge(body_environment_);
|
||||
}
|
||||
|
||||
|
||||
void SwitchBuilder::Break() {
|
||||
break_environment_->Merge(environment());
|
||||
environment()->MarkAsUnreachable();
|
||||
}
|
||||
|
||||
|
||||
void SwitchBuilder::EndCase() { body_environment_ = environment(); }
|
||||
|
||||
|
||||
void SwitchBuilder::EndSwitch() {
|
||||
break_environment_->Merge(label_environment_);
|
||||
break_environment_->Merge(environment());
|
||||
set_environment(break_environment_);
|
||||
}
|
||||
|
||||
|
||||
void BlockBuilder::BeginBlock() {
|
||||
break_environment_ = environment()->CopyAsUnreachable();
|
||||
}
|
||||
|
||||
|
||||
void BlockBuilder::Break() {
|
||||
break_environment_->Merge(environment());
|
||||
environment()->MarkAsUnreachable();
|
||||
}
|
||||
|
||||
|
||||
void BlockBuilder::EndBlock() {
|
||||
break_environment_->Merge(environment());
|
||||
set_environment(break_environment_);
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
144
src/compiler/control-builders.h
Normal file
144
src/compiler/control-builders.h
Normal file
@ -0,0 +1,144 @@
|
||||
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_CONTROL_BUILDERS_H_
|
||||
#define V8_COMPILER_CONTROL_BUILDERS_H_
|
||||
|
||||
#include "src/v8.h"
|
||||
|
||||
#include "src/compiler/graph-builder.h"
|
||||
#include "src/compiler/node.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
|
||||
// Base class for all control builders. Also provides a common interface for
|
||||
// control builders to handle 'break' and 'continue' statements when they are
|
||||
// used to model breakable statements.
|
||||
class ControlBuilder {
|
||||
public:
|
||||
explicit ControlBuilder(StructuredGraphBuilder* builder)
|
||||
: builder_(builder) {}
|
||||
virtual ~ControlBuilder() {}
|
||||
|
||||
// Interface for break and continue.
|
||||
virtual void Break() { UNREACHABLE(); }
|
||||
virtual void Continue() { UNREACHABLE(); }
|
||||
|
||||
protected:
|
||||
typedef StructuredGraphBuilder Builder;
|
||||
typedef StructuredGraphBuilder::Environment Environment;
|
||||
|
||||
Zone* zone() const { return builder_->zone(); }
|
||||
Environment* environment() { return builder_->environment_internal(); }
|
||||
void set_environment(Environment* env) { builder_->set_environment(env); }
|
||||
|
||||
Builder* builder_;
|
||||
};
|
||||
|
||||
|
||||
// Tracks control flow for a conditional statement.
|
||||
class IfBuilder : public ControlBuilder {
|
||||
public:
|
||||
explicit IfBuilder(StructuredGraphBuilder* builder)
|
||||
: ControlBuilder(builder),
|
||||
then_environment_(NULL),
|
||||
else_environment_(NULL) {}
|
||||
|
||||
// Primitive control commands.
|
||||
void If(Node* condition);
|
||||
void Then();
|
||||
void Else();
|
||||
void End();
|
||||
|
||||
private:
|
||||
Environment* then_environment_; // Environment after the 'then' body.
|
||||
Environment* else_environment_; // Environment for the 'else' body.
|
||||
};
|
||||
|
||||
|
||||
// Tracks control flow for an iteration statement.
|
||||
class LoopBuilder : public ControlBuilder {
|
||||
public:
|
||||
explicit LoopBuilder(StructuredGraphBuilder* builder)
|
||||
: ControlBuilder(builder),
|
||||
loop_environment_(NULL),
|
||||
continue_environment_(NULL),
|
||||
break_environment_(NULL) {}
|
||||
|
||||
// Primitive control commands.
|
||||
void BeginLoop();
|
||||
void EndBody();
|
||||
void EndLoop();
|
||||
|
||||
// Primitive support for break and continue.
|
||||
virtual void Continue();
|
||||
virtual void Break();
|
||||
|
||||
// Compound control command for conditional break.
|
||||
void BreakUnless(Node* condition);
|
||||
|
||||
private:
|
||||
Environment* loop_environment_; // Environment of the loop header.
|
||||
Environment* continue_environment_; // Environment after the loop body.
|
||||
Environment* break_environment_; // Environment after the loop exits.
|
||||
};
|
||||
|
||||
|
||||
// Tracks control flow for a switch statement.
|
||||
class SwitchBuilder : public ControlBuilder {
|
||||
public:
|
||||
explicit SwitchBuilder(StructuredGraphBuilder* builder, int case_count)
|
||||
: ControlBuilder(builder),
|
||||
body_environment_(NULL),
|
||||
label_environment_(NULL),
|
||||
break_environment_(NULL),
|
||||
body_environments_(case_count, zone()) {}
|
||||
|
||||
// Primitive control commands.
|
||||
void BeginSwitch();
|
||||
void BeginLabel(int index, Node* condition);
|
||||
void EndLabel();
|
||||
void DefaultAt(int index);
|
||||
void BeginCase(int index);
|
||||
void EndCase();
|
||||
void EndSwitch();
|
||||
|
||||
// Primitive support for break.
|
||||
virtual void Break();
|
||||
|
||||
// The number of cases within a switch is statically known.
|
||||
int case_count() const { return body_environments_.capacity(); }
|
||||
|
||||
private:
|
||||
Environment* body_environment_; // Environment after last case body.
|
||||
Environment* label_environment_; // Environment for next label condition.
|
||||
Environment* break_environment_; // Environment after the switch exits.
|
||||
ZoneList<Environment*> body_environments_;
|
||||
};
|
||||
|
||||
|
||||
// Tracks control flow for a block statement.
|
||||
class BlockBuilder : public ControlBuilder {
|
||||
public:
|
||||
explicit BlockBuilder(StructuredGraphBuilder* builder)
|
||||
: ControlBuilder(builder), break_environment_(NULL) {}
|
||||
|
||||
// Primitive control commands.
|
||||
void BeginBlock();
|
||||
void EndBlock();
|
||||
|
||||
// Primitive support for break.
|
||||
virtual void Break();
|
||||
|
||||
private:
|
||||
Environment* break_environment_; // Environment after the block exits.
|
||||
};
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
||||
|
||||
#endif // V8_COMPILER_CONTROL_BUILDERS_H_
|
104
src/compiler/frame.h
Normal file
104
src/compiler/frame.h
Normal file
@ -0,0 +1,104 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_FRAME_H_
|
||||
#define V8_COMPILER_FRAME_H_
|
||||
|
||||
#include "src/v8.h"
|
||||
|
||||
#include "src/data-flow.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
// Collects the spill slot requirements and the allocated general and double
|
||||
// registers for a compiled function. Frames are usually populated by the
|
||||
// register allocator and are used by Linkage to generate code for the prologue
|
||||
// and epilogue to compiled code.
|
||||
class Frame {
|
||||
public:
|
||||
Frame()
|
||||
: register_save_area_size_(0),
|
||||
spill_slot_count_(0),
|
||||
double_spill_slot_count_(0),
|
||||
allocated_registers_(NULL),
|
||||
allocated_double_registers_(NULL) {}
|
||||
|
||||
inline int GetSpillSlotCount() { return spill_slot_count_; }
|
||||
inline int GetDoubleSpillSlotCount() { return double_spill_slot_count_; }
|
||||
|
||||
void SetAllocatedRegisters(BitVector* regs) {
|
||||
ASSERT(allocated_registers_ == NULL);
|
||||
allocated_registers_ = regs;
|
||||
}
|
||||
|
||||
void SetAllocatedDoubleRegisters(BitVector* regs) {
|
||||
ASSERT(allocated_double_registers_ == NULL);
|
||||
allocated_double_registers_ = regs;
|
||||
}
|
||||
|
||||
bool DidAllocateDoubleRegisters() {
|
||||
return !allocated_double_registers_->IsEmpty();
|
||||
}
|
||||
|
||||
void SetRegisterSaveAreaSize(int size) {
|
||||
ASSERT(IsAligned(size, kPointerSize));
|
||||
register_save_area_size_ = size;
|
||||
}
|
||||
|
||||
int GetRegisterSaveAreaSize() { return register_save_area_size_; }
|
||||
|
||||
int AllocateSpillSlot(bool is_double) {
|
||||
// If 32-bit, skip one if the new slot is a double.
|
||||
if (is_double) {
|
||||
if (kDoubleSize > kPointerSize) {
|
||||
ASSERT(kDoubleSize == kPointerSize * 2);
|
||||
spill_slot_count_++;
|
||||
spill_slot_count_ |= 1;
|
||||
}
|
||||
double_spill_slot_count_++;
|
||||
}
|
||||
return spill_slot_count_++;
|
||||
}
|
||||
|
||||
private:
|
||||
int register_save_area_size_;
|
||||
int spill_slot_count_;
|
||||
int double_spill_slot_count_;
|
||||
BitVector* allocated_registers_;
|
||||
BitVector* allocated_double_registers_;
|
||||
};
|
||||
|
||||
|
||||
// Represents an offset from either the stack pointer or frame pointer.
|
||||
class FrameOffset {
|
||||
public:
|
||||
inline bool from_stack_pointer() { return (offset_ & 1) == kFromSp; }
|
||||
inline bool from_frame_pointer() { return (offset_ & 1) == kFromFp; }
|
||||
inline int offset() { return offset_ & ~1; }
|
||||
|
||||
inline static FrameOffset FromStackPointer(int offset) {
|
||||
ASSERT((offset & 1) == 0);
|
||||
return FrameOffset(offset | kFromSp);
|
||||
}
|
||||
|
||||
inline static FrameOffset FromFramePointer(int offset) {
|
||||
ASSERT((offset & 1) == 0);
|
||||
return FrameOffset(offset | kFromFp);
|
||||
}
|
||||
|
||||
private:
|
||||
explicit FrameOffset(int offset) : offset_(offset) {}
|
||||
|
||||
int offset_; // Encodes SP or FP in the low order bit.
|
||||
|
||||
static const int kFromSp = 1;
|
||||
static const int kFromFp = 0;
|
||||
};
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
||||
|
||||
#endif // V8_COMPILER_FRAME_H_
|
135
src/compiler/gap-resolver.cc
Normal file
135
src/compiler/gap-resolver.cc
Normal file
@ -0,0 +1,135 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/compiler/gap-resolver.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <functional>
|
||||
#include <set>
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
typedef ZoneList<MoveOperands>::iterator op_iterator;
|
||||
|
||||
#ifdef ENABLE_SLOW_ASSERTS
|
||||
// TODO(svenpanne) Brush up InstructionOperand with comparison?
|
||||
struct InstructionOperandComparator {
|
||||
bool operator()(const InstructionOperand* x, const InstructionOperand* y) {
|
||||
return (x->kind() < y->kind()) ||
|
||||
(x->kind() == y->kind() && x->index() < y->index());
|
||||
}
|
||||
};
|
||||
#endif
|
||||
|
||||
// No operand should be the destination for more than one move.
|
||||
static void VerifyMovesAreInjective(ZoneList<MoveOperands>* moves) {
|
||||
#ifdef ENABLE_SLOW_ASSERTS
|
||||
std::set<InstructionOperand*, InstructionOperandComparator> seen;
|
||||
for (op_iterator i = moves->begin(); i != moves->end(); ++i) {
|
||||
SLOW_ASSERT(seen.find(i->destination()) == seen.end());
|
||||
seen.insert(i->destination());
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void GapResolver::Resolve(ParallelMove* parallel_move) const {
|
||||
ZoneList<MoveOperands>* moves = parallel_move->move_operands();
|
||||
// TODO(svenpanne) Use the member version of remove_if when we use real lists.
|
||||
op_iterator end =
|
||||
std::remove_if(moves->begin(), moves->end(),
|
||||
std::mem_fun_ref(&MoveOperands::IsRedundant));
|
||||
moves->Rewind(static_cast<int>(end - moves->begin()));
|
||||
|
||||
VerifyMovesAreInjective(moves);
|
||||
|
||||
for (op_iterator move = moves->begin(); move != moves->end(); ++move) {
|
||||
if (!move->IsEliminated()) PerformMove(moves, &*move);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void GapResolver::PerformMove(ZoneList<MoveOperands>* moves,
|
||||
MoveOperands* move) const {
|
||||
// Each call to this function performs a move and deletes it from the move
|
||||
// graph. We first recursively perform any move blocking this one. We mark a
|
||||
// move as "pending" on entry to PerformMove in order to detect cycles in the
|
||||
// move graph. We use operand swaps to resolve cycles, which means that a
|
||||
// call to PerformMove could change any source operand in the move graph.
|
||||
ASSERT(!move->IsPending());
|
||||
ASSERT(!move->IsRedundant());
|
||||
|
||||
// Clear this move's destination to indicate a pending move. The actual
|
||||
// destination is saved on the side.
|
||||
ASSERT_NOT_NULL(move->source()); // Or else it will look eliminated.
|
||||
InstructionOperand* destination = move->destination();
|
||||
move->set_destination(NULL);
|
||||
|
||||
// Perform a depth-first traversal of the move graph to resolve dependencies.
|
||||
// Any unperformed, unpending move with a source the same as this one's
|
||||
// destination blocks this one so recursively perform all such moves.
|
||||
for (op_iterator other = moves->begin(); other != moves->end(); ++other) {
|
||||
if (other->Blocks(destination) && !other->IsPending()) {
|
||||
// Though PerformMove can change any source operand in the move graph,
|
||||
// this call cannot create a blocking move via a swap (this loop does not
|
||||
// miss any). Assume there is a non-blocking move with source A and this
|
||||
// move is blocked on source B and there is a swap of A and B. Then A and
|
||||
// B must be involved in the same cycle (or they would not be swapped).
|
||||
// Since this move's destination is B and there is only a single incoming
|
||||
// edge to an operand, this move must also be involved in the same cycle.
|
||||
// In that case, the blocking move will be created but will be "pending"
|
||||
// when we return from PerformMove.
|
||||
PerformMove(moves, other);
|
||||
}
|
||||
}
|
||||
|
||||
// We are about to resolve this move and don't need it marked as pending, so
|
||||
// restore its destination.
|
||||
move->set_destination(destination);
|
||||
|
||||
// This move's source may have changed due to swaps to resolve cycles and so
|
||||
// it may now be the last move in the cycle. If so remove it.
|
||||
InstructionOperand* source = move->source();
|
||||
if (source->Equals(destination)) {
|
||||
move->Eliminate();
|
||||
return;
|
||||
}
|
||||
|
||||
// The move may be blocked on a (at most one) pending move, in which case we
|
||||
// have a cycle. Search for such a blocking move and perform a swap to
|
||||
// resolve it.
|
||||
op_iterator blocker = std::find_if(
|
||||
moves->begin(), moves->end(),
|
||||
std::bind2nd(std::mem_fun_ref(&MoveOperands::Blocks), destination));
|
||||
if (blocker == moves->end()) {
|
||||
// The easy case: This move is not blocked.
|
||||
assembler_->AssembleMove(source, destination);
|
||||
move->Eliminate();
|
||||
return;
|
||||
}
|
||||
|
||||
ASSERT(blocker->IsPending());
|
||||
// Ensure source is a register or both are stack slots, to limit swap cases.
|
||||
if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
|
||||
std::swap(source, destination);
|
||||
}
|
||||
assembler_->AssembleSwap(source, destination);
|
||||
move->Eliminate();
|
||||
|
||||
// Any unperformed (including pending) move with a source of either this
|
||||
// move's source or destination needs to have their source changed to
|
||||
// reflect the state of affairs after the swap.
|
||||
for (op_iterator other = moves->begin(); other != moves->end(); ++other) {
|
||||
if (other->Blocks(source)) {
|
||||
other->set_source(destination);
|
||||
} else if (other->Blocks(destination)) {
|
||||
other->set_source(source);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
46
src/compiler/gap-resolver.h
Normal file
46
src/compiler/gap-resolver.h
Normal file
@ -0,0 +1,46 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_GAP_RESOLVER_H_
|
||||
#define V8_COMPILER_GAP_RESOLVER_H_
|
||||
|
||||
#include "src/compiler/instruction.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
class GapResolver V8_FINAL {
|
||||
public:
|
||||
// Interface used by the gap resolver to emit moves and swaps.
|
||||
class Assembler {
|
||||
public:
|
||||
virtual ~Assembler() {}
|
||||
|
||||
// Assemble move.
|
||||
virtual void AssembleMove(InstructionOperand* source,
|
||||
InstructionOperand* destination) = 0;
|
||||
// Assemble swap.
|
||||
virtual void AssembleSwap(InstructionOperand* source,
|
||||
InstructionOperand* destination) = 0;
|
||||
};
|
||||
|
||||
explicit GapResolver(Assembler* assembler) : assembler_(assembler) {}
|
||||
|
||||
// Resolve a set of parallel moves, emitting assembler instructions.
|
||||
void Resolve(ParallelMove* parallel_move) const;
|
||||
|
||||
private:
|
||||
// Perform the given move, possibly requiring other moves to satisfy
|
||||
// dependencies.
|
||||
void PerformMove(ZoneList<MoveOperands>* moves, MoveOperands* move) const;
|
||||
|
||||
// Assembler used to emit moves and save registers.
|
||||
Assembler* const assembler_;
|
||||
};
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
||||
|
||||
#endif // V8_COMPILER_GAP_RESOLVER_H_
|
48
src/compiler/generic-algorithm-inl.h
Normal file
48
src/compiler/generic-algorithm-inl.h
Normal file
@ -0,0 +1,48 @@
|
||||
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_GENERIC_ALGORITHM_INL_H_
|
||||
#define V8_COMPILER_GENERIC_ALGORITHM_INL_H_
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "src/compiler/generic-algorithm.h"
|
||||
#include "src/compiler/generic-graph.h"
|
||||
#include "src/compiler/generic-node.h"
|
||||
#include "src/compiler/generic-node-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
template <class N>
|
||||
class NodeInputIterationTraits {
|
||||
public:
|
||||
typedef N Node;
|
||||
typedef typename N::Inputs::iterator Iterator;
|
||||
|
||||
static Iterator begin(Node* node) { return node->inputs().begin(); }
|
||||
static Iterator end(Node* node) { return node->inputs().end(); }
|
||||
static int max_id(GenericGraphBase* graph) { return graph->NodeCount(); }
|
||||
static Node* to(Iterator iterator) { return *iterator; }
|
||||
static Node* from(Iterator iterator) { return iterator.edge().from(); }
|
||||
};
|
||||
|
||||
template <class N>
|
||||
class NodeUseIterationTraits {
|
||||
public:
|
||||
typedef N Node;
|
||||
typedef typename N::Uses::iterator Iterator;
|
||||
|
||||
static Iterator begin(Node* node) { return node->uses().begin(); }
|
||||
static Iterator end(Node* node) { return node->uses().end(); }
|
||||
static int max_id(GenericGraphBase* graph) { return graph->NodeCount(); }
|
||||
static Node* to(Iterator iterator) { return *iterator; }
|
||||
static Node* from(Iterator iterator) { return iterator.edge().to(); }
|
||||
};
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
||||
|
||||
#endif // V8_COMPILER_GENERIC_ALGORITHM_INL_H_
|
136
src/compiler/generic-algorithm.h
Normal file
136
src/compiler/generic-algorithm.h
Normal file
@ -0,0 +1,136 @@
|
||||
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_GENERIC_ALGORITHM_H_
|
||||
#define V8_COMPILER_GENERIC_ALGORITHM_H_
|
||||
|
||||
#include <deque>
|
||||
#include <stack>
|
||||
|
||||
#include "src/compiler/generic-graph.h"
|
||||
#include "src/compiler/generic-node.h"
|
||||
#include "src/zone-containers.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
// GenericGraphVisit allows visitation of graphs of nodes and edges in pre- and
|
||||
// post-order. Visitation uses an explicitly allocated stack rather than the
|
||||
// execution stack to avoid stack overflow. Although GenericGraphVisit is
|
||||
// primarily intended to traverse networks of nodes through their
|
||||
// dependencies and uses, it also can be used to visit any graph-like network
|
||||
// by specifying custom traits.
|
||||
class GenericGraphVisit {
|
||||
public:
|
||||
enum Control {
|
||||
CONTINUE = 0x0, // Continue depth-first normally
|
||||
SKIP = 0x1, // Skip this node and its successors
|
||||
REENTER = 0x2, // Allow reentering this node
|
||||
DEFER = SKIP | REENTER
|
||||
};
|
||||
|
||||
// struct Visitor {
|
||||
// Control Pre(Traits::Node* current);
|
||||
// Control Post(Traits::Node* current);
|
||||
// void PreEdge(Traits::Node* from, int index, Traits::Node* to);
|
||||
// void PostEdge(Traits::Node* from, int index, Traits::Node* to);
|
||||
// }
|
||||
template <class Visitor, class Traits, class RootIterator>
|
||||
static void Visit(GenericGraphBase* graph, RootIterator root_begin,
|
||||
RootIterator root_end, Visitor* visitor) {
|
||||
// TODO(bmeurer): Pass "local" zone as parameter.
|
||||
Zone* zone = graph->zone();
|
||||
typedef typename Traits::Node Node;
|
||||
typedef typename Traits::Iterator Iterator;
|
||||
typedef std::pair<Iterator, Iterator> NodeState;
|
||||
typedef zone_allocator<NodeState> ZoneNodeStateAllocator;
|
||||
typedef std::deque<NodeState, ZoneNodeStateAllocator> NodeStateDeque;
|
||||
typedef std::stack<NodeState, NodeStateDeque> NodeStateStack;
|
||||
NodeStateStack stack((NodeStateDeque(ZoneNodeStateAllocator(zone))));
|
||||
BoolVector visited(Traits::max_id(graph), false, ZoneBoolAllocator(zone));
|
||||
Node* current = *root_begin;
|
||||
while (true) {
|
||||
ASSERT(current != NULL);
|
||||
const int id = current->id();
|
||||
ASSERT(id >= 0);
|
||||
ASSERT(id < Traits::max_id(graph)); // Must be a valid id.
|
||||
bool visit = !GetVisited(&visited, id);
|
||||
if (visit) {
|
||||
Control control = visitor->Pre(current);
|
||||
visit = !IsSkip(control);
|
||||
if (!IsReenter(control)) SetVisited(&visited, id, true);
|
||||
}
|
||||
Iterator begin(visit ? Traits::begin(current) : Traits::end(current));
|
||||
Iterator end(Traits::end(current));
|
||||
stack.push(NodeState(begin, end));
|
||||
Node* post_order_node = current;
|
||||
while (true) {
|
||||
NodeState top = stack.top();
|
||||
if (top.first == top.second) {
|
||||
if (visit) {
|
||||
Control control = visitor->Post(post_order_node);
|
||||
ASSERT(!IsSkip(control));
|
||||
SetVisited(&visited, post_order_node->id(), !IsReenter(control));
|
||||
}
|
||||
stack.pop();
|
||||
if (stack.empty()) {
|
||||
if (++root_begin == root_end) return;
|
||||
current = *root_begin;
|
||||
break;
|
||||
}
|
||||
post_order_node = Traits::from(stack.top().first);
|
||||
visit = true;
|
||||
} else {
|
||||
visitor->PreEdge(Traits::from(top.first), top.first.edge().index(),
|
||||
Traits::to(top.first));
|
||||
current = Traits::to(top.first);
|
||||
if (!GetVisited(&visited, current->id())) break;
|
||||
}
|
||||
top = stack.top();
|
||||
visitor->PostEdge(Traits::from(top.first), top.first.edge().index(),
|
||||
Traits::to(top.first));
|
||||
++stack.top().first;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <class Visitor, class Traits>
|
||||
static void Visit(GenericGraphBase* graph, typename Traits::Node* current,
|
||||
Visitor* visitor) {
|
||||
typename Traits::Node* array[] = {current};
|
||||
Visit<Visitor, Traits>(graph, &array[0], &array[1], visitor);
|
||||
}
|
||||
|
||||
template <class B, class S>
|
||||
struct NullNodeVisitor {
|
||||
Control Pre(GenericNode<B, S>* node) { return CONTINUE; }
|
||||
Control Post(GenericNode<B, S>* node) { return CONTINUE; }
|
||||
void PreEdge(GenericNode<B, S>* from, int index, GenericNode<B, S>* to) {}
|
||||
void PostEdge(GenericNode<B, S>* from, int index, GenericNode<B, S>* to) {}
|
||||
};
|
||||
|
||||
private:
|
||||
static bool IsSkip(Control c) { return c & SKIP; }
|
||||
static bool IsReenter(Control c) { return c & REENTER; }
|
||||
|
||||
// TODO(turbofan): resizing could be optionally templatized away.
|
||||
static void SetVisited(BoolVector* visited, int id, bool value) {
|
||||
if (id >= static_cast<int>(visited->size())) {
|
||||
// Resize and set all values to unvisited.
|
||||
visited->resize((3 * id) / 2, false);
|
||||
}
|
||||
visited->at(id) = value;
|
||||
}
|
||||
|
||||
static bool GetVisited(BoolVector* visited, int id) {
|
||||
if (id >= static_cast<int>(visited->size())) return false;
|
||||
return visited->at(id);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
||||
|
||||
#endif // V8_COMPILER_GENERIC_ALGORITHM_H_
|
53
src/compiler/generic-graph.h
Normal file
53
src/compiler/generic-graph.h
Normal file
@ -0,0 +1,53 @@
|
||||
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_GENERIC_GRAPH_H_
|
||||
#define V8_COMPILER_GENERIC_GRAPH_H_
|
||||
|
||||
#include "src/compiler/generic-node.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
class Zone;
|
||||
|
||||
namespace compiler {
|
||||
|
||||
class GenericGraphBase : public ZoneObject {
|
||||
public:
|
||||
explicit GenericGraphBase(Zone* zone) : zone_(zone), next_node_id_(0) {}
|
||||
|
||||
Zone* zone() const { return zone_; }
|
||||
|
||||
NodeId NextNodeID() { return next_node_id_++; }
|
||||
NodeId NodeCount() const { return next_node_id_; }
|
||||
|
||||
private:
|
||||
Zone* zone_;
|
||||
NodeId next_node_id_;
|
||||
};
|
||||
|
||||
template <class V>
|
||||
class GenericGraph : public GenericGraphBase {
|
||||
public:
|
||||
explicit GenericGraph(Zone* zone)
|
||||
: GenericGraphBase(zone), start_(NULL), end_(NULL) {}
|
||||
|
||||
V* start() { return start_; }
|
||||
V* end() { return end_; }
|
||||
|
||||
void SetStart(V* start) { start_ = start; }
|
||||
void SetEnd(V* end) { end_ = end; }
|
||||
|
||||
private:
|
||||
V* start_;
|
||||
V* end_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(GenericGraph);
|
||||
};
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
||||
|
||||
#endif // V8_COMPILER_GENERIC_GRAPH_H_
|
244
src/compiler/generic-node-inl.h
Normal file
244
src/compiler/generic-node-inl.h
Normal file
@ -0,0 +1,244 @@
|
||||
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_GENERIC_NODE_INL_H_
|
||||
#define V8_COMPILER_GENERIC_NODE_INL_H_
|
||||
|
||||
#include "src/v8.h"
|
||||
|
||||
#include "src/compiler/generic-graph.h"
|
||||
#include "src/compiler/generic-node.h"
|
||||
#include "src/zone.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
template <class B, class S>
|
||||
GenericNode<B, S>::GenericNode(GenericGraphBase* graph, int input_count)
|
||||
: BaseClass(graph->zone()),
|
||||
input_count_(input_count),
|
||||
has_appendable_inputs_(false),
|
||||
use_count_(0),
|
||||
first_use_(NULL),
|
||||
last_use_(NULL) {
|
||||
inputs_.static_ = reinterpret_cast<Input*>(this + 1), AssignUniqueID(graph);
|
||||
}
|
||||
|
||||
template <class B, class S>
|
||||
inline void GenericNode<B, S>::AssignUniqueID(GenericGraphBase* graph) {
|
||||
id_ = graph->NextNodeID();
|
||||
}
|
||||
|
||||
template <class B, class S>
|
||||
inline typename GenericNode<B, S>::Inputs::iterator
|
||||
GenericNode<B, S>::Inputs::begin() {
|
||||
return GenericNode::Inputs::iterator(this->node_, 0);
|
||||
}
|
||||
|
||||
template <class B, class S>
|
||||
inline typename GenericNode<B, S>::Inputs::iterator
|
||||
GenericNode<B, S>::Inputs::end() {
|
||||
return GenericNode::Inputs::iterator(this->node_, this->node_->InputCount());
|
||||
}
|
||||
|
||||
template <class B, class S>
|
||||
inline typename GenericNode<B, S>::Uses::iterator
|
||||
GenericNode<B, S>::Uses::begin() {
|
||||
return GenericNode::Uses::iterator(this->node_);
|
||||
}
|
||||
|
||||
template <class B, class S>
|
||||
inline typename GenericNode<B, S>::Uses::iterator
|
||||
GenericNode<B, S>::Uses::end() {
|
||||
return GenericNode::Uses::iterator();
|
||||
}
|
||||
|
||||
template <class B, class S>
|
||||
void GenericNode<B, S>::ReplaceUses(GenericNode* replace_to) {
|
||||
for (Use* use = first_use_; use != NULL; use = use->next) {
|
||||
use->from->GetInputRecordPtr(use->input_index)->to = replace_to;
|
||||
}
|
||||
if (replace_to->last_use_ == NULL) {
|
||||
ASSERT_EQ(NULL, replace_to->first_use_);
|
||||
replace_to->first_use_ = first_use_;
|
||||
} else {
|
||||
ASSERT_NE(NULL, replace_to->first_use_);
|
||||
replace_to->last_use_->next = first_use_;
|
||||
first_use_->prev = replace_to->last_use_;
|
||||
}
|
||||
replace_to->last_use_ = last_use_;
|
||||
replace_to->use_count_ += use_count_;
|
||||
use_count_ = 0;
|
||||
first_use_ = NULL;
|
||||
last_use_ = NULL;
|
||||
}
|
||||
|
||||
template <class B, class S>
|
||||
template <class UnaryPredicate>
|
||||
void GenericNode<B, S>::ReplaceUsesIf(UnaryPredicate pred,
|
||||
GenericNode* replace_to) {
|
||||
for (Use* use = first_use_; use != NULL;) {
|
||||
Use* next = use->next;
|
||||
if (pred(static_cast<S*>(use->from))) {
|
||||
RemoveUse(use);
|
||||
replace_to->AppendUse(use);
|
||||
use->from->GetInputRecordPtr(use->input_index)->to = replace_to;
|
||||
}
|
||||
use = next;
|
||||
}
|
||||
}
|
||||
|
||||
template <class B, class S>
|
||||
void GenericNode<B, S>::RemoveAllInputs() {
|
||||
for (typename Inputs::iterator iter(inputs().begin()); iter != inputs().end();
|
||||
++iter) {
|
||||
iter.GetInput()->Update(NULL);
|
||||
}
|
||||
}
|
||||
|
||||
template <class B, class S>
|
||||
void GenericNode<B, S>::TrimInputCount(int new_input_count) {
|
||||
if (new_input_count == input_count_) return; // Nothing to do.
|
||||
|
||||
ASSERT(new_input_count < input_count_);
|
||||
|
||||
// Update inline inputs.
|
||||
for (int i = new_input_count; i < input_count_; i++) {
|
||||
GenericNode<B, S>::Input* input = GetInputRecordPtr(i);
|
||||
input->Update(NULL);
|
||||
}
|
||||
input_count_ = new_input_count;
|
||||
}
|
||||
|
||||
template <class B, class S>
|
||||
void GenericNode<B, S>::ReplaceInput(int index, GenericNode<B, S>* new_to) {
|
||||
Input* input = GetInputRecordPtr(index);
|
||||
input->Update(new_to);
|
||||
}
|
||||
|
||||
template <class B, class S>
|
||||
void GenericNode<B, S>::Input::Update(GenericNode<B, S>* new_to) {
|
||||
GenericNode* old_to = this->to;
|
||||
if (new_to == old_to) return; // Nothing to do.
|
||||
// Snip out the use from where it used to be
|
||||
if (old_to != NULL) {
|
||||
old_to->RemoveUse(use);
|
||||
}
|
||||
to = new_to;
|
||||
// And put it into the new node's use list.
|
||||
if (new_to != NULL) {
|
||||
new_to->AppendUse(use);
|
||||
} else {
|
||||
use->next = NULL;
|
||||
use->prev = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
template <class B, class S>
|
||||
void GenericNode<B, S>::EnsureAppendableInputs(Zone* zone) {
|
||||
if (!has_appendable_inputs_) {
|
||||
void* deque_buffer = zone->New(sizeof(InputDeque));
|
||||
InputDeque* deque = new (deque_buffer) InputDeque(ZoneInputAllocator(zone));
|
||||
for (int i = 0; i < input_count_; ++i) {
|
||||
deque->push_back(inputs_.static_[i]);
|
||||
}
|
||||
inputs_.appendable_ = deque;
|
||||
has_appendable_inputs_ = true;
|
||||
}
|
||||
}
|
||||
|
||||
template <class B, class S>
|
||||
void GenericNode<B, S>::AppendInput(Zone* zone, GenericNode<B, S>* to_append) {
|
||||
EnsureAppendableInputs(zone);
|
||||
Use* new_use = new (zone) Use;
|
||||
Input new_input;
|
||||
new_input.to = to_append;
|
||||
new_input.use = new_use;
|
||||
inputs_.appendable_->push_back(new_input);
|
||||
new_use->input_index = input_count_;
|
||||
new_use->from = this;
|
||||
to_append->AppendUse(new_use);
|
||||
input_count_++;
|
||||
}
|
||||
|
||||
template <class B, class S>
|
||||
void GenericNode<B, S>::InsertInput(Zone* zone, int index,
|
||||
GenericNode<B, S>* to_insert) {
|
||||
ASSERT(index >= 0 && index < InputCount());
|
||||
// TODO(turbofan): Optimize this implementation!
|
||||
AppendInput(zone, InputAt(InputCount() - 1));
|
||||
for (int i = InputCount() - 1; i > index; --i) {
|
||||
ReplaceInput(i, InputAt(i - 1));
|
||||
}
|
||||
ReplaceInput(index, to_insert);
|
||||
}
|
||||
|
||||
template <class B, class S>
|
||||
void GenericNode<B, S>::AppendUse(Use* use) {
|
||||
use->next = NULL;
|
||||
use->prev = last_use_;
|
||||
if (last_use_ == NULL) {
|
||||
first_use_ = use;
|
||||
} else {
|
||||
last_use_->next = use;
|
||||
}
|
||||
last_use_ = use;
|
||||
++use_count_;
|
||||
}
|
||||
|
||||
template <class B, class S>
|
||||
void GenericNode<B, S>::RemoveUse(Use* use) {
|
||||
if (last_use_ == use) {
|
||||
last_use_ = use->prev;
|
||||
}
|
||||
if (use->prev != NULL) {
|
||||
use->prev->next = use->next;
|
||||
} else {
|
||||
first_use_ = use->next;
|
||||
}
|
||||
if (use->next != NULL) {
|
||||
use->next->prev = use->prev;
|
||||
}
|
||||
--use_count_;
|
||||
}
|
||||
|
||||
template <class B, class S>
|
||||
inline bool GenericNode<B, S>::OwnedBy(GenericNode* owner) const {
|
||||
return first_use_ != NULL && first_use_->from == owner &&
|
||||
first_use_->next == NULL;
|
||||
}
|
||||
|
||||
template <class B, class S>
|
||||
S* GenericNode<B, S>::New(GenericGraphBase* graph, int input_count,
|
||||
S** inputs) {
|
||||
size_t node_size = sizeof(GenericNode);
|
||||
size_t inputs_size = input_count * sizeof(Input);
|
||||
size_t uses_size = input_count * sizeof(Use);
|
||||
size_t size = node_size + inputs_size + uses_size;
|
||||
Zone* zone = graph->zone();
|
||||
void* buffer = zone->New(size);
|
||||
S* result = new (buffer) S(graph, input_count);
|
||||
Input* input =
|
||||
reinterpret_cast<Input*>(reinterpret_cast<char*>(buffer) + node_size);
|
||||
Use* use =
|
||||
reinterpret_cast<Use*>(reinterpret_cast<char*>(input) + inputs_size);
|
||||
|
||||
for (int current = 0; current < input_count; ++current) {
|
||||
GenericNode* to = *inputs++;
|
||||
input->to = to;
|
||||
input->use = use;
|
||||
use->input_index = current;
|
||||
use->from = result;
|
||||
to->AppendUse(use);
|
||||
++use;
|
||||
++input;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
||||
|
||||
#endif // V8_COMPILER_GENERIC_NODE_INL_H_
|
271
src/compiler/generic-node.h
Normal file
271
src/compiler/generic-node.h
Normal file
@ -0,0 +1,271 @@
|
||||
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_GENERIC_NODE_H_
|
||||
#define V8_COMPILER_GENERIC_NODE_H_
|
||||
|
||||
#include <deque>
|
||||
|
||||
#include "src/v8.h"
|
||||
|
||||
#include "src/compiler/operator.h"
|
||||
#include "src/zone.h"
|
||||
#include "src/zone-allocator.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
class Operator;
|
||||
class GenericGraphBase;
|
||||
|
||||
typedef int NodeId;
|
||||
|
||||
// A GenericNode<> is the basic primitive of graphs. GenericNode's are
|
||||
// chained together by input/use chains but by default otherwise contain only an
|
||||
// identifying number which specific applications of graphs and nodes can use
|
||||
// to index auxiliary out-of-line data, especially transient data.
|
||||
// Specializations of the templatized GenericNode<> class must provide a base
|
||||
// class B that contains all of the members to be made available in each
|
||||
// specialized Node instance. GenericNode uses a mixin template pattern to
|
||||
// ensure that common accessors and methods expect the derived class S type
|
||||
// rather than the GenericNode<B, S> type.
|
||||
template <class B, class S>
|
||||
class GenericNode : public B {
|
||||
public:
|
||||
typedef B BaseClass;
|
||||
typedef S DerivedClass;
|
||||
|
||||
inline NodeId id() const { return id_; }
|
||||
|
||||
int InputCount() const { return input_count_; }
|
||||
S* InputAt(int index) const {
|
||||
return static_cast<S*>(GetInputRecordPtr(index)->to);
|
||||
}
|
||||
void ReplaceInput(int index, GenericNode* new_input);
|
||||
void AppendInput(Zone* zone, GenericNode* new_input);
|
||||
void InsertInput(Zone* zone, int index, GenericNode* new_input);
|
||||
|
||||
int UseCount() { return use_count_; }
|
||||
S* UseAt(int index) {
|
||||
ASSERT(index < use_count_);
|
||||
Use* current = first_use_;
|
||||
while (index-- != 0) {
|
||||
current = current->next;
|
||||
}
|
||||
return static_cast<S*>(current->from);
|
||||
}
|
||||
inline void ReplaceUses(GenericNode* replace_to);
|
||||
template <class UnaryPredicate>
|
||||
inline void ReplaceUsesIf(UnaryPredicate pred, GenericNode* replace_to);
|
||||
void RemoveAllInputs();
|
||||
|
||||
void TrimInputCount(int input_count);
|
||||
|
||||
class Inputs {
|
||||
public:
|
||||
class iterator;
|
||||
iterator begin();
|
||||
iterator end();
|
||||
|
||||
explicit Inputs(GenericNode* node) : node_(node) {}
|
||||
|
||||
private:
|
||||
GenericNode* node_;
|
||||
};
|
||||
|
||||
Inputs inputs() { return Inputs(this); }
|
||||
|
||||
class Uses {
|
||||
public:
|
||||
class iterator;
|
||||
iterator begin();
|
||||
iterator end();
|
||||
bool empty() { return begin() == end(); }
|
||||
|
||||
explicit Uses(GenericNode* node) : node_(node) {}
|
||||
|
||||
private:
|
||||
GenericNode* node_;
|
||||
};
|
||||
|
||||
Uses uses() { return Uses(this); }
|
||||
|
||||
class Edge;
|
||||
|
||||
bool OwnedBy(GenericNode* owner) const;
|
||||
|
||||
static S* New(GenericGraphBase* graph, int input_count, S** inputs);
|
||||
|
||||
protected:
|
||||
friend class GenericGraphBase;
|
||||
|
||||
class Use : public ZoneObject {
|
||||
public:
|
||||
GenericNode* from;
|
||||
Use* next;
|
||||
Use* prev;
|
||||
int input_index;
|
||||
};
|
||||
|
||||
class Input {
|
||||
public:
|
||||
GenericNode* to;
|
||||
Use* use;
|
||||
|
||||
void Update(GenericNode* new_to);
|
||||
};
|
||||
|
||||
void EnsureAppendableInputs(Zone* zone);
|
||||
|
||||
Input* GetInputRecordPtr(int index) const {
|
||||
if (has_appendable_inputs_) {
|
||||
return &((*inputs_.appendable_)[index]);
|
||||
} else {
|
||||
return inputs_.static_ + index;
|
||||
}
|
||||
}
|
||||
|
||||
void AppendUse(Use* use);
|
||||
void RemoveUse(Use* use);
|
||||
|
||||
void* operator new(size_t, void* location) { return location; }
|
||||
|
||||
GenericNode(GenericGraphBase* graph, int input_count);
|
||||
|
||||
private:
|
||||
void AssignUniqueID(GenericGraphBase* graph);
|
||||
|
||||
typedef zone_allocator<Input> ZoneInputAllocator;
|
||||
typedef std::deque<Input, ZoneInputAllocator> InputDeque;
|
||||
|
||||
NodeId id_;
|
||||
int input_count_ : 31;
|
||||
bool has_appendable_inputs_ : 1;
|
||||
union {
|
||||
// When a node is initially allocated, it uses a static buffer to hold its
|
||||
// inputs under the assumption that the number of outputs will not increase.
|
||||
// When the first input is appended, the static buffer is converted into a
|
||||
// deque to allow for space-efficient growing.
|
||||
Input* static_;
|
||||
InputDeque* appendable_;
|
||||
} inputs_;
|
||||
int use_count_;
|
||||
Use* first_use_;
|
||||
Use* last_use_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(GenericNode);
|
||||
};
|
||||
|
||||
// An encapsulation for information associated with a single use of node as a
|
||||
// input from another node, allowing access to both the defining node and
|
||||
// the ndoe having the input.
|
||||
template <class B, class S>
|
||||
class GenericNode<B, S>::Edge {
|
||||
public:
|
||||
S* from() const { return static_cast<S*>(input_->use->from); }
|
||||
S* to() const { return static_cast<S*>(input_->to); }
|
||||
int index() const {
|
||||
int index = input_->use->input_index;
|
||||
ASSERT(index < input_->use->from->input_count_);
|
||||
return index;
|
||||
}
|
||||
|
||||
private:
|
||||
friend class GenericNode<B, S>::Uses::iterator;
|
||||
friend class GenericNode<B, S>::Inputs::iterator;
|
||||
|
||||
explicit Edge(typename GenericNode<B, S>::Input* input) : input_(input) {}
|
||||
|
||||
typename GenericNode<B, S>::Input* input_;
|
||||
};
|
||||
|
||||
// A forward iterator to visit the nodes which are depended upon by a node
|
||||
// in the order of input.
|
||||
template <class B, class S>
|
||||
class GenericNode<B, S>::Inputs::iterator {
|
||||
public:
|
||||
iterator(const typename GenericNode<B, S>::Inputs::iterator& other) // NOLINT
|
||||
: node_(other.node_),
|
||||
index_(other.index_) {}
|
||||
|
||||
S* operator*() { return static_cast<S*>(GetInput()->to); }
|
||||
typename GenericNode<B, S>::Edge edge() {
|
||||
return typename GenericNode::Edge(GetInput());
|
||||
}
|
||||
bool operator==(const iterator& other) const {
|
||||
return other.index_ == index_ && other.node_ == node_;
|
||||
}
|
||||
bool operator!=(const iterator& other) const { return !(other == *this); }
|
||||
iterator& operator++() {
|
||||
ASSERT(node_ != NULL);
|
||||
ASSERT(index_ < node_->input_count_);
|
||||
++index_;
|
||||
return *this;
|
||||
}
|
||||
int index() { return index_; }
|
||||
|
||||
private:
|
||||
friend class GenericNode;
|
||||
|
||||
explicit iterator(GenericNode* node, int index)
|
||||
: node_(node), index_(index) {}
|
||||
|
||||
Input* GetInput() const { return node_->GetInputRecordPtr(index_); }
|
||||
|
||||
GenericNode* node_;
|
||||
int index_;
|
||||
};
|
||||
|
||||
// A forward iterator to visit the uses of a node. The uses are returned in
|
||||
// the order in which they were added as inputs.
|
||||
template <class B, class S>
|
||||
class GenericNode<B, S>::Uses::iterator {
|
||||
public:
|
||||
iterator(const typename GenericNode<B, S>::Uses::iterator& other) // NOLINT
|
||||
: current_(other.current_),
|
||||
index_(other.index_) {}
|
||||
|
||||
S* operator*() { return static_cast<S*>(current_->from); }
|
||||
typename GenericNode<B, S>::Edge edge() {
|
||||
return typename GenericNode::Edge(CurrentInput());
|
||||
}
|
||||
|
||||
bool operator==(const iterator& other) { return other.current_ == current_; }
|
||||
bool operator!=(const iterator& other) { return other.current_ != current_; }
|
||||
iterator& operator++() {
|
||||
ASSERT(current_ != NULL);
|
||||
index_++;
|
||||
current_ = current_->next;
|
||||
return *this;
|
||||
}
|
||||
iterator& UpdateToAndIncrement(GenericNode<B, S>* new_to) {
|
||||
ASSERT(current_ != NULL);
|
||||
index_++;
|
||||
typename GenericNode<B, S>::Input* input = CurrentInput();
|
||||
current_ = current_->next;
|
||||
input->Update(new_to);
|
||||
return *this;
|
||||
}
|
||||
int index() const { return index_; }
|
||||
|
||||
private:
|
||||
friend class GenericNode<B, S>::Uses;
|
||||
|
||||
iterator() : current_(NULL), index_(0) {}
|
||||
explicit iterator(GenericNode<B, S>* node)
|
||||
: current_(node->first_use_), index_(0) {}
|
||||
|
||||
Input* CurrentInput() const {
|
||||
return current_->from->GetInputRecordPtr(current_->input_index);
|
||||
}
|
||||
|
||||
typename GenericNode<B, S>::Use* current_;
|
||||
int index_;
|
||||
};
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
||||
|
||||
#endif // V8_COMPILER_GENERIC_NODE_H_
|
253
src/compiler/graph-builder.cc
Normal file
253
src/compiler/graph-builder.cc
Normal file
@ -0,0 +1,253 @@
|
||||
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/compiler/graph-builder.h"
|
||||
|
||||
#include "src/compiler.h"
|
||||
#include "src/compiler/generic-graph.h"
|
||||
#include "src/compiler/generic-node.h"
|
||||
#include "src/compiler/generic-node-inl.h"
|
||||
#include "src/compiler/graph-visualizer.h"
|
||||
#include "src/compiler/node-properties.h"
|
||||
#include "src/compiler/node-properties-inl.h"
|
||||
#include "src/compiler/operator-properties.h"
|
||||
#include "src/compiler/operator-properties-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
|
||||
StructuredGraphBuilder::StructuredGraphBuilder(Graph* graph,
|
||||
CommonOperatorBuilder* common)
|
||||
: GraphBuilder(graph),
|
||||
common_(common),
|
||||
environment_(NULL),
|
||||
current_context_(NULL),
|
||||
exit_control_(NULL) {}
|
||||
|
||||
|
||||
Node* StructuredGraphBuilder::MakeNode(Operator* op, int value_input_count,
|
||||
Node** value_inputs) {
|
||||
bool has_context = OperatorProperties::HasContextInput(op);
|
||||
bool has_control = OperatorProperties::GetControlInputCount(op) == 1;
|
||||
bool has_effect = OperatorProperties::GetEffectInputCount(op) == 1;
|
||||
|
||||
ASSERT(OperatorProperties::GetControlInputCount(op) < 2);
|
||||
ASSERT(OperatorProperties::GetEffectInputCount(op) < 2);
|
||||
|
||||
Node* result = NULL;
|
||||
if (!has_context && !has_control && !has_effect) {
|
||||
result = graph()->NewNode(op, value_input_count, value_inputs);
|
||||
} else {
|
||||
int input_count_with_deps = value_input_count;
|
||||
if (has_context) ++input_count_with_deps;
|
||||
if (has_control) ++input_count_with_deps;
|
||||
if (has_effect) ++input_count_with_deps;
|
||||
void* raw_buffer = alloca(kPointerSize * input_count_with_deps);
|
||||
Node** buffer = reinterpret_cast<Node**>(raw_buffer);
|
||||
memcpy(buffer, value_inputs, kPointerSize * value_input_count);
|
||||
Node** current_input = buffer + value_input_count;
|
||||
if (has_context) {
|
||||
*current_input++ = current_context();
|
||||
}
|
||||
if (has_effect) {
|
||||
*current_input++ = environment_->GetEffectDependency();
|
||||
}
|
||||
if (has_control) {
|
||||
*current_input++ = GetControlDependency();
|
||||
}
|
||||
result = graph()->NewNode(op, input_count_with_deps, buffer);
|
||||
if (has_effect) {
|
||||
environment_->UpdateEffectDependency(result);
|
||||
}
|
||||
if (NodeProperties::HasControlOutput(result) &&
|
||||
!environment_internal()->IsMarkedAsUnreachable()) {
|
||||
UpdateControlDependency(result);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
Node* StructuredGraphBuilder::GetControlDependency() {
|
||||
return environment_->GetControlDependency();
|
||||
}
|
||||
|
||||
|
||||
void StructuredGraphBuilder::UpdateControlDependency(Node* new_control) {
|
||||
environment_->UpdateControlDependency(new_control);
|
||||
}
|
||||
|
||||
|
||||
void StructuredGraphBuilder::UpdateControlDependencyToLeaveFunction(
|
||||
Node* exit) {
|
||||
if (environment_internal()->IsMarkedAsUnreachable()) return;
|
||||
if (exit_control() != NULL) {
|
||||
exit = MergeControl(exit_control(), exit);
|
||||
}
|
||||
environment_internal()->MarkAsUnreachable();
|
||||
set_exit_control(exit);
|
||||
}
|
||||
|
||||
|
||||
StructuredGraphBuilder::Environment* StructuredGraphBuilder::CopyEnvironment(
|
||||
Environment* env) {
|
||||
return new (zone()) Environment(*env);
|
||||
}
|
||||
|
||||
|
||||
StructuredGraphBuilder::Environment::Environment(
|
||||
StructuredGraphBuilder* builder, Node* control_dependency)
|
||||
: builder_(builder),
|
||||
control_dependency_(control_dependency),
|
||||
effect_dependency_(control_dependency),
|
||||
values_(NodeVector::allocator_type(zone())) {}
|
||||
|
||||
|
||||
StructuredGraphBuilder::Environment::Environment(const Environment& copy)
|
||||
: builder_(copy.builder()),
|
||||
control_dependency_(copy.control_dependency_),
|
||||
effect_dependency_(copy.effect_dependency_),
|
||||
values_(copy.values_) {}
|
||||
|
||||
|
||||
void StructuredGraphBuilder::Environment::Merge(Environment* other) {
|
||||
ASSERT(values_.size() == other->values_.size());
|
||||
|
||||
// Nothing to do if the other environment is dead.
|
||||
if (other->IsMarkedAsUnreachable()) return;
|
||||
|
||||
// Resurrect a dead environment by copying the contents of the other one and
|
||||
// placing a singleton merge as the new control dependency.
|
||||
if (this->IsMarkedAsUnreachable()) {
|
||||
Node* other_control = other->control_dependency_;
|
||||
control_dependency_ = graph()->NewNode(common()->Merge(1), other_control);
|
||||
effect_dependency_ = other->effect_dependency_;
|
||||
values_ = other->values_;
|
||||
return;
|
||||
}
|
||||
|
||||
// Create a merge of the control dependencies of both environments and update
|
||||
// the current environment's control dependency accordingly.
|
||||
Node* control = builder_->MergeControl(this->GetControlDependency(),
|
||||
other->GetControlDependency());
|
||||
UpdateControlDependency(control);
|
||||
|
||||
// Create a merge of the effect dependencies of both environments and update
|
||||
// the current environment's effect dependency accordingly.
|
||||
Node* effect = builder_->MergeEffect(this->GetEffectDependency(),
|
||||
other->GetEffectDependency(), control);
|
||||
UpdateEffectDependency(effect);
|
||||
|
||||
// Introduce Phi nodes for values that have differing input at merge points,
|
||||
// potentially extending an existing Phi node if possible.
|
||||
for (int i = 0; i < static_cast<int>(values_.size()); ++i) {
|
||||
if (values_[i] == NULL) continue;
|
||||
values_[i] = builder_->MergeValue(values_[i], other->values_[i], control);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void StructuredGraphBuilder::Environment::PrepareForLoop() {
|
||||
Node* control = GetControlDependency();
|
||||
for (int i = 0; i < static_cast<int>(values()->size()); ++i) {
|
||||
if (values()->at(i) == NULL) continue;
|
||||
Node* phi = builder_->NewPhi(1, values()->at(i), control);
|
||||
values()->at(i) = phi;
|
||||
}
|
||||
Node* effect = builder_->NewEffectPhi(1, GetEffectDependency(), control);
|
||||
UpdateEffectDependency(effect);
|
||||
}
|
||||
|
||||
|
||||
Node* StructuredGraphBuilder::NewPhi(int count, Node* input, Node* control) {
|
||||
Operator* phi_op = common()->Phi(count);
|
||||
void* raw_buffer = alloca(kPointerSize * (count + 1));
|
||||
Node** buffer = reinterpret_cast<Node**>(raw_buffer);
|
||||
MemsetPointer(buffer, input, count);
|
||||
buffer[count] = control;
|
||||
return graph()->NewNode(phi_op, count + 1, buffer);
|
||||
}
|
||||
|
||||
|
||||
// TODO(mstarzinger): Revisit this once we have proper effect states.
|
||||
Node* StructuredGraphBuilder::NewEffectPhi(int count, Node* input,
|
||||
Node* control) {
|
||||
Operator* phi_op = common()->EffectPhi(count);
|
||||
void* raw_buffer = alloca(kPointerSize * (count + 1));
|
||||
Node** buffer = reinterpret_cast<Node**>(raw_buffer);
|
||||
MemsetPointer(buffer, input, count);
|
||||
buffer[count] = control;
|
||||
return graph()->NewNode(phi_op, count + 1, buffer);
|
||||
}
|
||||
|
||||
|
||||
Node* StructuredGraphBuilder::MergeControl(Node* control, Node* other) {
|
||||
int inputs = NodeProperties::GetControlInputCount(control) + 1;
|
||||
if (control->opcode() == IrOpcode::kLoop) {
|
||||
// Control node for loop exists, add input.
|
||||
Operator* op = common()->Loop(inputs);
|
||||
control->AppendInput(zone(), other);
|
||||
control->set_op(op);
|
||||
} else if (control->opcode() == IrOpcode::kMerge) {
|
||||
// Control node for merge exists, add input.
|
||||
Operator* op = common()->Merge(inputs);
|
||||
control->AppendInput(zone(), other);
|
||||
control->set_op(op);
|
||||
} else {
|
||||
// Control node is a singleton, introduce a merge.
|
||||
Operator* op = common()->Merge(inputs);
|
||||
control = graph()->NewNode(op, control, other);
|
||||
}
|
||||
return control;
|
||||
}
|
||||
|
||||
|
||||
Node* StructuredGraphBuilder::MergeEffect(Node* value, Node* other,
|
||||
Node* control) {
|
||||
int inputs = NodeProperties::GetControlInputCount(control);
|
||||
if (value->opcode() == IrOpcode::kEffectPhi &&
|
||||
NodeProperties::GetControlInput(value) == control) {
|
||||
// Phi already exists, add input.
|
||||
value->set_op(common()->EffectPhi(inputs));
|
||||
value->InsertInput(zone(), inputs - 1, other);
|
||||
} else if (value != other) {
|
||||
// Phi does not exist yet, introduce one.
|
||||
value = NewEffectPhi(inputs, value, control);
|
||||
value->ReplaceInput(inputs - 1, other);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
|
||||
Node* StructuredGraphBuilder::MergeValue(Node* value, Node* other,
|
||||
Node* control) {
|
||||
int inputs = NodeProperties::GetControlInputCount(control);
|
||||
if (value->opcode() == IrOpcode::kPhi &&
|
||||
NodeProperties::GetControlInput(value) == control) {
|
||||
// Phi already exists, add input.
|
||||
value->set_op(common()->Phi(inputs));
|
||||
value->InsertInput(zone(), inputs - 1, other);
|
||||
} else if (value != other) {
|
||||
// Phi does not exist yet, introduce one.
|
||||
value = NewPhi(inputs, value, control);
|
||||
value->ReplaceInput(inputs - 1, other);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
|
||||
Node* StructuredGraphBuilder::dead_control() {
|
||||
if (!dead_control_.is_set()) {
|
||||
Node* dead_node = graph()->NewNode(common_->Dead());
|
||||
dead_control_.set(dead_node);
|
||||
return dead_node;
|
||||
}
|
||||
return dead_control_.get();
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
232
src/compiler/graph-builder.h
Normal file
232
src/compiler/graph-builder.h
Normal file
@ -0,0 +1,232 @@
|
||||
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_GRAPH_BUILDER_H_
|
||||
#define V8_COMPILER_GRAPH_BUILDER_H_
|
||||
|
||||
#include "src/v8.h"
|
||||
|
||||
#include "src/allocation.h"
|
||||
#include "src/compiler/common-operator.h"
|
||||
#include "src/compiler/graph.h"
|
||||
#include "src/unique.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
class Node;
|
||||
|
||||
// A common base class for anything that creates nodes in a graph.
|
||||
class GraphBuilder {
|
||||
public:
|
||||
explicit GraphBuilder(Graph* graph) : graph_(graph) {}
|
||||
virtual ~GraphBuilder() {}
|
||||
|
||||
Node* NewNode(Operator* op) {
|
||||
return MakeNode(op, 0, static_cast<Node**>(NULL));
|
||||
}
|
||||
|
||||
Node* NewNode(Operator* op, Node* n1) { return MakeNode(op, 1, &n1); }
|
||||
|
||||
Node* NewNode(Operator* op, Node* n1, Node* n2) {
|
||||
Node* buffer[] = {n1, n2};
|
||||
return MakeNode(op, ARRAY_SIZE(buffer), buffer);
|
||||
}
|
||||
|
||||
Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3) {
|
||||
Node* buffer[] = {n1, n2, n3};
|
||||
return MakeNode(op, ARRAY_SIZE(buffer), buffer);
|
||||
}
|
||||
|
||||
Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
|
||||
Node* buffer[] = {n1, n2, n3, n4};
|
||||
return MakeNode(op, ARRAY_SIZE(buffer), buffer);
|
||||
}
|
||||
|
||||
Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
|
||||
Node* n5) {
|
||||
Node* buffer[] = {n1, n2, n3, n4, n5};
|
||||
return MakeNode(op, ARRAY_SIZE(buffer), buffer);
|
||||
}
|
||||
|
||||
Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4, Node* n5,
|
||||
Node* n6) {
|
||||
Node* nodes[] = {n1, n2, n3, n4, n5, n6};
|
||||
return MakeNode(op, ARRAY_SIZE(nodes), nodes);
|
||||
}
|
||||
|
||||
Node* NewNode(Operator* op, int value_input_count, Node** value_inputs) {
|
||||
return MakeNode(op, value_input_count, value_inputs);
|
||||
}
|
||||
|
||||
Graph* graph() const { return graph_; }
|
||||
|
||||
protected:
|
||||
// Base implementation used by all factory methods.
|
||||
virtual Node* MakeNode(Operator* op, int value_input_count,
|
||||
Node** value_inputs) = 0;
|
||||
|
||||
private:
|
||||
Graph* graph_;
|
||||
};
|
||||
|
||||
|
||||
// The StructuredGraphBuilder produces a high-level IR graph. It is used as the
|
||||
// base class for concrete implementations (e.g the AstGraphBuilder or the
|
||||
// StubGraphBuilder).
|
||||
class StructuredGraphBuilder : public GraphBuilder {
|
||||
public:
|
||||
StructuredGraphBuilder(Graph* graph, CommonOperatorBuilder* common);
|
||||
virtual ~StructuredGraphBuilder() {}
|
||||
|
||||
// Creates a new Phi node having {count} input values.
|
||||
Node* NewPhi(int count, Node* input, Node* control);
|
||||
Node* NewEffectPhi(int count, Node* input, Node* control);
|
||||
|
||||
// Helpers for merging control, effect or value dependencies.
|
||||
Node* MergeControl(Node* control, Node* other);
|
||||
Node* MergeEffect(Node* value, Node* other, Node* control);
|
||||
Node* MergeValue(Node* value, Node* other, Node* control);
|
||||
|
||||
// Helpers to create new control nodes.
|
||||
Node* NewIfTrue() { return NewNode(common()->IfTrue()); }
|
||||
Node* NewIfFalse() { return NewNode(common()->IfFalse()); }
|
||||
Node* NewMerge() { return NewNode(common()->Merge(1)); }
|
||||
Node* NewLoop() { return NewNode(common()->Loop(1)); }
|
||||
Node* NewBranch(Node* condition) {
|
||||
return NewNode(common()->Branch(), condition);
|
||||
}
|
||||
|
||||
protected:
|
||||
class Environment;
|
||||
friend class ControlBuilder;
|
||||
|
||||
// The following method creates a new node having the specified operator and
|
||||
// ensures effect and control dependencies are wired up. The dependencies
|
||||
// tracked by the environment might be mutated.
|
||||
virtual Node* MakeNode(Operator* op, int value_input_count,
|
||||
Node** value_inputs);
|
||||
|
||||
Environment* environment_internal() const { return environment_; }
|
||||
void set_environment(Environment* env) { environment_ = env; }
|
||||
|
||||
Node* current_context() const { return current_context_; }
|
||||
void set_current_context(Node* context) { current_context_ = context; }
|
||||
|
||||
Node* exit_control() const { return exit_control_; }
|
||||
void set_exit_control(Node* node) { exit_control_ = node; }
|
||||
|
||||
Node* dead_control();
|
||||
|
||||
// TODO(mstarzinger): Use phase-local zone instead!
|
||||
Zone* zone() const { return graph()->zone(); }
|
||||
Isolate* isolate() const { return zone()->isolate(); }
|
||||
CommonOperatorBuilder* common() const { return common_; }
|
||||
|
||||
// Helper to wrap a Handle<T> into a Unique<T>.
|
||||
template <class T>
|
||||
PrintableUnique<T> MakeUnique(Handle<T> object) {
|
||||
return PrintableUnique<T>::CreateUninitialized(zone(), object);
|
||||
}
|
||||
|
||||
// Support for control flow builders. The concrete type of the environment
|
||||
// depends on the graph builder, but environments themselves are not virtual.
|
||||
virtual Environment* CopyEnvironment(Environment* env);
|
||||
|
||||
// Helper when creating node that depends on control.
|
||||
Node* GetControlDependency();
|
||||
|
||||
// Helper when creating node that updates control.
|
||||
void UpdateControlDependency(Node* new_control);
|
||||
|
||||
// Helper to indicate a node exits the function body.
|
||||
void UpdateControlDependencyToLeaveFunction(Node* exit);
|
||||
|
||||
private:
|
||||
CommonOperatorBuilder* common_;
|
||||
Environment* environment_;
|
||||
|
||||
// Node representing the control dependency for dead code.
|
||||
SetOncePointer<Node> dead_control_;
|
||||
|
||||
// Node representing the current context within the function body.
|
||||
Node* current_context_;
|
||||
|
||||
// Merge of all control nodes that exit the function body.
|
||||
Node* exit_control_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(StructuredGraphBuilder);
|
||||
};
|
||||
|
||||
|
||||
// The abstract execution environment contains static knowledge about
|
||||
// execution state at arbitrary control-flow points. It allows for
|
||||
// simulation of the control-flow at compile time.
|
||||
class StructuredGraphBuilder::Environment : public ZoneObject {
|
||||
public:
|
||||
Environment(StructuredGraphBuilder* builder, Node* control_dependency);
|
||||
Environment(const Environment& copy);
|
||||
|
||||
// Control dependency tracked by this environment.
|
||||
Node* GetControlDependency() { return control_dependency_; }
|
||||
void UpdateControlDependency(Node* dependency) {
|
||||
control_dependency_ = dependency;
|
||||
}
|
||||
|
||||
// Effect dependency tracked by this environment.
|
||||
Node* GetEffectDependency() { return effect_dependency_; }
|
||||
void UpdateEffectDependency(Node* dependency) {
|
||||
effect_dependency_ = dependency;
|
||||
}
|
||||
|
||||
// Mark this environment as being unreachable.
|
||||
void MarkAsUnreachable() {
|
||||
UpdateControlDependency(builder()->dead_control());
|
||||
}
|
||||
bool IsMarkedAsUnreachable() {
|
||||
return GetControlDependency()->opcode() == IrOpcode::kDead;
|
||||
}
|
||||
|
||||
// Merge another environment into this one.
|
||||
void Merge(Environment* other);
|
||||
|
||||
// Copies this environment at a control-flow split point.
|
||||
Environment* CopyForConditional() { return builder()->CopyEnvironment(this); }
|
||||
|
||||
// Copies this environment to a potentially unreachable control-flow point.
|
||||
Environment* CopyAsUnreachable() {
|
||||
Environment* env = builder()->CopyEnvironment(this);
|
||||
env->MarkAsUnreachable();
|
||||
return env;
|
||||
}
|
||||
|
||||
// Copies this environment at a loop header control-flow point.
|
||||
Environment* CopyForLoop() {
|
||||
PrepareForLoop();
|
||||
return builder()->CopyEnvironment(this);
|
||||
}
|
||||
|
||||
protected:
|
||||
// TODO(mstarzinger): Use phase-local zone instead!
|
||||
Zone* zone() const { return graph()->zone(); }
|
||||
Graph* graph() const { return builder_->graph(); }
|
||||
StructuredGraphBuilder* builder() const { return builder_; }
|
||||
CommonOperatorBuilder* common() { return builder_->common(); }
|
||||
NodeVector* values() { return &values_; }
|
||||
|
||||
// Prepare environment to be used as loop header.
|
||||
void PrepareForLoop();
|
||||
|
||||
private:
|
||||
StructuredGraphBuilder* builder_;
|
||||
Node* control_dependency_;
|
||||
Node* effect_dependency_;
|
||||
NodeVector values_;
|
||||
};
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
||||
|
||||
#endif // V8_COMPILER_GRAPH_BUILDER_H__
|
37
src/compiler/graph-inl.h
Normal file
37
src/compiler/graph-inl.h
Normal file
@ -0,0 +1,37 @@
|
||||
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_GRAPH_INL_H_
|
||||
#define V8_COMPILER_GRAPH_INL_H_
|
||||
|
||||
#include "src/compiler/generic-algorithm-inl.h"
|
||||
#include "src/compiler/graph.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
template <class Visitor>
|
||||
void Graph::VisitNodeUsesFrom(Node* node, Visitor* visitor) {
|
||||
GenericGraphVisit::Visit<Visitor, NodeUseIterationTraits<Node> >(this, node,
|
||||
visitor);
|
||||
}
|
||||
|
||||
|
||||
template <class Visitor>
|
||||
void Graph::VisitNodeUsesFromStart(Visitor* visitor) {
|
||||
VisitNodeUsesFrom(start(), visitor);
|
||||
}
|
||||
|
||||
|
||||
template <class Visitor>
|
||||
void Graph::VisitNodeInputsFromEnd(Visitor* visitor) {
|
||||
GenericGraphVisit::Visit<Visitor, NodeInputIterationTraits<Node> >(
|
||||
this, end(), visitor);
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
||||
|
||||
#endif // V8_COMPILER_GRAPH_INL_H_
|
94
src/compiler/graph-reducer.cc
Normal file
94
src/compiler/graph-reducer.cc
Normal file
@ -0,0 +1,94 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/compiler/graph-reducer.h"
|
||||
|
||||
#include <functional>
|
||||
|
||||
#include "src/compiler/graph-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
GraphReducer::GraphReducer(Graph* graph)
|
||||
: graph_(graph), reducers_(Reducers::allocator_type(graph->zone())) {}
|
||||
|
||||
|
||||
static bool NodeIdIsLessThan(const Node* node, NodeId id) {
|
||||
return node->id() < id;
|
||||
}
|
||||
|
||||
|
||||
void GraphReducer::ReduceNode(Node* node) {
|
||||
Reducers::iterator skip = reducers_.end();
|
||||
static const unsigned kMaxAttempts = 16;
|
||||
bool reduce = true;
|
||||
for (unsigned attempts = 0; attempts <= kMaxAttempts; ++attempts) {
|
||||
if (!reduce) return;
|
||||
reduce = false; // Assume we don't need to rerun any reducers.
|
||||
int before = graph_->NodeCount();
|
||||
for (Reducers::iterator i = reducers_.begin(); i != reducers_.end(); ++i) {
|
||||
if (i == skip) continue; // Skip this reducer.
|
||||
Reduction reduction = (*i)->Reduce(node);
|
||||
Node* replacement = reduction.replacement();
|
||||
if (replacement == NULL) {
|
||||
// No change from this reducer.
|
||||
} else if (replacement == node) {
|
||||
// {replacement == node} represents an in-place reduction.
|
||||
// Rerun all the reducers except the current one for this node,
|
||||
// as now there may be more opportunities for reduction.
|
||||
reduce = true;
|
||||
skip = i;
|
||||
break;
|
||||
} else {
|
||||
if (node == graph_->start()) graph_->SetStart(replacement);
|
||||
if (node == graph_->end()) graph_->SetEnd(replacement);
|
||||
// If {node} was replaced by an old node, unlink {node} and assume that
|
||||
// {replacement} was already reduced and finish.
|
||||
if (replacement->id() < before) {
|
||||
node->RemoveAllInputs();
|
||||
node->ReplaceUses(replacement);
|
||||
return;
|
||||
}
|
||||
// Otherwise, {node} was replaced by a new node. Replace all old uses of
|
||||
// {node} with {replacement}. New nodes created by this reduction can
|
||||
// use {node}.
|
||||
node->ReplaceUsesIf(
|
||||
std::bind2nd(std::ptr_fun(&NodeIdIsLessThan), before), replacement);
|
||||
// Unlink {node} if it's no longer used.
|
||||
if (node->uses().empty()) node->RemoveAllInputs();
|
||||
// Rerun all the reductions on the {replacement}.
|
||||
skip = reducers_.end();
|
||||
node = replacement;
|
||||
reduce = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// A helper class to reuse the node traversal algorithm.
|
||||
struct GraphReducerVisitor V8_FINAL : public NullNodeVisitor {
|
||||
explicit GraphReducerVisitor(GraphReducer* reducer) : reducer_(reducer) {}
|
||||
GenericGraphVisit::Control Post(Node* node) {
|
||||
reducer_->ReduceNode(node);
|
||||
return GenericGraphVisit::CONTINUE;
|
||||
}
|
||||
GraphReducer* reducer_;
|
||||
};
|
||||
|
||||
|
||||
void GraphReducer::ReduceGraph() {
|
||||
GraphReducerVisitor visitor(this);
|
||||
// Perform a post-order reduction of all nodes starting from the end.
|
||||
graph()->VisitNodeInputsFromEnd(&visitor);
|
||||
}
|
||||
|
||||
|
||||
// TODO(titzer): partial graph reductions.
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
77
src/compiler/graph-reducer.h
Normal file
77
src/compiler/graph-reducer.h
Normal file
@ -0,0 +1,77 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_GRAPH_REDUCER_H_
|
||||
#define V8_COMPILER_GRAPH_REDUCER_H_
|
||||
|
||||
#include <list>
|
||||
|
||||
#include "src/zone-allocator.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
// Forward declarations.
|
||||
class Graph;
|
||||
class Node;
|
||||
|
||||
|
||||
// Represents the result of trying to reduce a node in the graph.
|
||||
class Reduction V8_FINAL {
|
||||
public:
|
||||
explicit Reduction(Node* replacement = NULL) : replacement_(replacement) {}
|
||||
|
||||
Node* replacement() const { return replacement_; }
|
||||
bool Changed() const { return replacement() != NULL; }
|
||||
|
||||
private:
|
||||
Node* replacement_;
|
||||
};
|
||||
|
||||
|
||||
// A reducer can reduce or simplify a given node based on its operator and
|
||||
// inputs. This class functions as an extension point for the graph reducer for
|
||||
// language-specific reductions (e.g. reduction based on types or constant
|
||||
// folding of low-level operators) can be integrated into the graph reduction
|
||||
// phase.
|
||||
class Reducer {
|
||||
public:
|
||||
virtual ~Reducer() {}
|
||||
|
||||
// Try to reduce a node if possible.
|
||||
virtual Reduction Reduce(Node* node) = 0;
|
||||
|
||||
// Helper functions for subclasses to produce reductions for a node.
|
||||
static Reduction NoChange() { return Reduction(); }
|
||||
static Reduction Replace(Node* node) { return Reduction(node); }
|
||||
static Reduction Changed(Node* node) { return Reduction(node); }
|
||||
};
|
||||
|
||||
|
||||
// Performs an iterative reduction of a node graph.
|
||||
class GraphReducer V8_FINAL {
|
||||
public:
|
||||
explicit GraphReducer(Graph* graph);
|
||||
|
||||
Graph* graph() const { return graph_; }
|
||||
|
||||
void AddReducer(Reducer* reducer) { reducers_.push_back(reducer); }
|
||||
|
||||
// Reduce a single node.
|
||||
void ReduceNode(Node* node);
|
||||
// Reduce the whole graph.
|
||||
void ReduceGraph();
|
||||
|
||||
private:
|
||||
typedef std::list<Reducer*, zone_allocator<Reducer*> > Reducers;
|
||||
|
||||
Graph* graph_;
|
||||
Reducers reducers_;
|
||||
};
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
||||
|
||||
#endif // V8_COMPILER_GRAPH_REDUCER_H_
|
81
src/compiler/graph-replay.cc
Normal file
81
src/compiler/graph-replay.cc
Normal file
@ -0,0 +1,81 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/compiler/graph-replay.h"
|
||||
|
||||
#include "src/compiler/common-operator.h"
|
||||
#include "src/compiler/graph.h"
|
||||
#include "src/compiler/graph-inl.h"
|
||||
#include "src/compiler/node.h"
|
||||
#include "src/compiler/operator.h"
|
||||
#include "src/compiler/operator-properties-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
#ifdef DEBUG
|
||||
|
||||
void GraphReplayPrinter::PrintReplay(Graph* graph) {
|
||||
GraphReplayPrinter replay;
|
||||
PrintF(" Node* nil = graph.NewNode(common_builder.Dead());\n");
|
||||
graph->VisitNodeInputsFromEnd(&replay);
|
||||
}
|
||||
|
||||
|
||||
GenericGraphVisit::Control GraphReplayPrinter::Pre(Node* node) {
|
||||
PrintReplayOpCreator(node->op());
|
||||
PrintF(" Node* n%d = graph.NewNode(op", node->id());
|
||||
for (int i = 0; i < node->InputCount(); ++i) {
|
||||
PrintF(", nil");
|
||||
}
|
||||
PrintF("); USE(n%d);\n", node->id());
|
||||
return GenericGraphVisit::CONTINUE;
|
||||
}
|
||||
|
||||
|
||||
void GraphReplayPrinter::PostEdge(Node* from, int index, Node* to) {
|
||||
PrintF(" n%d->ReplaceInput(%d, n%d);\n", from->id(), index, to->id());
|
||||
}
|
||||
|
||||
|
||||
void GraphReplayPrinter::PrintReplayOpCreator(Operator* op) {
|
||||
IrOpcode::Value opcode = static_cast<IrOpcode::Value>(op->opcode());
|
||||
const char* builder =
|
||||
IrOpcode::IsCommonOpcode(opcode) ? "common_builder" : "js_builder";
|
||||
const char* mnemonic = IrOpcode::IsCommonOpcode(opcode)
|
||||
? IrOpcode::Mnemonic(opcode)
|
||||
: IrOpcode::Mnemonic(opcode) + 2;
|
||||
PrintF(" op = %s.%s(", builder, mnemonic);
|
||||
switch (opcode) {
|
||||
case IrOpcode::kParameter:
|
||||
case IrOpcode::kNumberConstant:
|
||||
PrintF("0");
|
||||
break;
|
||||
case IrOpcode::kLoad:
|
||||
PrintF("unique_name");
|
||||
break;
|
||||
case IrOpcode::kHeapConstant:
|
||||
PrintF("unique_constant");
|
||||
break;
|
||||
case IrOpcode::kPhi:
|
||||
PrintF("%d", op->InputCount());
|
||||
break;
|
||||
case IrOpcode::kEffectPhi:
|
||||
PrintF("%d", OperatorProperties::GetEffectInputCount(op));
|
||||
break;
|
||||
case IrOpcode::kLoop:
|
||||
case IrOpcode::kMerge:
|
||||
PrintF("%d", OperatorProperties::GetControlInputCount(op));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
PrintF(");\n");
|
||||
}
|
||||
|
||||
#endif // DEBUG
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
44
src/compiler/graph-replay.h
Normal file
44
src/compiler/graph-replay.h
Normal file
@ -0,0 +1,44 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_GRAPH_REPLAY_H_
|
||||
#define V8_COMPILER_GRAPH_REPLAY_H_
|
||||
|
||||
#include "src/v8.h"
|
||||
|
||||
#include "src/compiler/node.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
class Graph;
|
||||
class Operator;
|
||||
|
||||
// Helper class to print a full replay of a graph. This replay can be used to
|
||||
// materialize the same graph within a C++ unit test and hence test subsequent
|
||||
// optimization passes on a graph without going through the construction steps.
|
||||
class GraphReplayPrinter : public NullNodeVisitor {
|
||||
public:
|
||||
#ifdef DEBUG
|
||||
static void PrintReplay(Graph* graph);
|
||||
#else
|
||||
static void PrintReplay(Graph* graph) {}
|
||||
#endif
|
||||
|
||||
GenericGraphVisit::Control Pre(Node* node);
|
||||
void PostEdge(Node* from, int index, Node* to);
|
||||
|
||||
private:
|
||||
GraphReplayPrinter() {}
|
||||
|
||||
static void PrintReplayOpCreator(Operator* op);
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(GraphReplayPrinter);
|
||||
};
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
||||
|
||||
#endif // V8_COMPILER_GRAPH_REPLAY_H_
|
260
src/compiler/graph-visualizer.cc
Normal file
260
src/compiler/graph-visualizer.cc
Normal file
@ -0,0 +1,260 @@
|
||||
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/compiler/graph-visualizer.h"
|
||||
|
||||
#include "src/compiler/generic-algorithm.h"
|
||||
#include "src/compiler/generic-node.h"
|
||||
#include "src/compiler/generic-node-inl.h"
|
||||
#include "src/compiler/graph.h"
|
||||
#include "src/compiler/graph-inl.h"
|
||||
#include "src/compiler/node.h"
|
||||
#include "src/compiler/node-properties.h"
|
||||
#include "src/compiler/node-properties-inl.h"
|
||||
#include "src/compiler/opcodes.h"
|
||||
#include "src/compiler/operator.h"
|
||||
#include "src/ostreams.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
#define DEAD_COLOR "#999999"
|
||||
|
||||
class GraphVisualizer : public NullNodeVisitor {
|
||||
public:
|
||||
GraphVisualizer(OStream& os, const Graph* graph); // NOLINT
|
||||
|
||||
void Print();
|
||||
|
||||
GenericGraphVisit::Control Pre(Node* node);
|
||||
GenericGraphVisit::Control PreEdge(Node* from, int index, Node* to);
|
||||
|
||||
private:
|
||||
void AnnotateNode(Node* node);
|
||||
void PrintEdge(Node* from, int index, Node* to);
|
||||
|
||||
NodeSet all_nodes_;
|
||||
NodeSet white_nodes_;
|
||||
bool use_to_def_;
|
||||
OStream& os_;
|
||||
const Graph* const graph_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(GraphVisualizer);
|
||||
};
|
||||
|
||||
|
||||
static Node* GetControlCluster(Node* node) {
|
||||
if (NodeProperties::IsBasicBlockBegin(node)) {
|
||||
return node;
|
||||
} else if (NodeProperties::GetControlInputCount(node) == 1) {
|
||||
Node* control = NodeProperties::GetControlInput(node, 0);
|
||||
return NodeProperties::IsBasicBlockBegin(control) ? control : NULL;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
GenericGraphVisit::Control GraphVisualizer::Pre(Node* node) {
|
||||
if (all_nodes_.count(node) == 0) {
|
||||
Node* control_cluster = GetControlCluster(node);
|
||||
if (control_cluster != NULL) {
|
||||
os_ << " subgraph cluster_BasicBlock" << control_cluster->id() << " {\n";
|
||||
}
|
||||
os_ << " ID" << node->id() << " [\n";
|
||||
AnnotateNode(node);
|
||||
os_ << " ]\n";
|
||||
if (control_cluster != NULL) os_ << " }\n";
|
||||
all_nodes_.insert(node);
|
||||
if (use_to_def_) white_nodes_.insert(node);
|
||||
}
|
||||
return GenericGraphVisit::CONTINUE;
|
||||
}
|
||||
|
||||
|
||||
GenericGraphVisit::Control GraphVisualizer::PreEdge(Node* from, int index,
|
||||
Node* to) {
|
||||
if (use_to_def_) return GenericGraphVisit::CONTINUE;
|
||||
// When going from def to use, only consider white -> other edges, which are
|
||||
// the dead nodes that use live nodes. We're probably not interested in
|
||||
// dead nodes that only use other dead nodes.
|
||||
if (white_nodes_.count(from) > 0) return GenericGraphVisit::CONTINUE;
|
||||
return GenericGraphVisit::SKIP;
|
||||
}
|
||||
|
||||
|
||||
class Escaped {
|
||||
public:
|
||||
explicit Escaped(const OStringStream& os) : str_(os.c_str()) {}
|
||||
|
||||
friend OStream& operator<<(OStream& os, const Escaped& e) {
|
||||
for (const char* s = e.str_; *s != '\0'; ++s) {
|
||||
if (needs_escape(*s)) os << "\\";
|
||||
os << *s;
|
||||
}
|
||||
return os;
|
||||
}
|
||||
|
||||
private:
|
||||
static bool needs_escape(char ch) {
|
||||
switch (ch) {
|
||||
case '>':
|
||||
case '<':
|
||||
case '|':
|
||||
case '}':
|
||||
case '{':
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
const char* const str_;
|
||||
};
|
||||
|
||||
|
||||
static bool IsLikelyBackEdge(Node* from, int index, Node* to) {
|
||||
if (from->opcode() == IrOpcode::kPhi ||
|
||||
from->opcode() == IrOpcode::kEffectPhi) {
|
||||
Node* control = NodeProperties::GetControlInput(from, 0);
|
||||
return control->opcode() != IrOpcode::kMerge && control != to && index != 0;
|
||||
} else if (from->opcode() == IrOpcode::kLoop) {
|
||||
return index != 0;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void GraphVisualizer::AnnotateNode(Node* node) {
|
||||
if (!use_to_def_) {
|
||||
os_ << " style=\"filled\"\n"
|
||||
<< " fillcolor=\"" DEAD_COLOR "\"\n";
|
||||
}
|
||||
|
||||
os_ << " shape=\"record\"\n";
|
||||
switch (node->opcode()) {
|
||||
case IrOpcode::kEnd:
|
||||
case IrOpcode::kDead:
|
||||
case IrOpcode::kStart:
|
||||
os_ << " style=\"diagonals\"\n";
|
||||
break;
|
||||
case IrOpcode::kMerge:
|
||||
case IrOpcode::kIfTrue:
|
||||
case IrOpcode::kIfFalse:
|
||||
case IrOpcode::kLoop:
|
||||
os_ << " style=\"rounded\"\n";
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
OStringStream label;
|
||||
label << *node->op();
|
||||
os_ << " label=\"{{#" << node->id() << ":" << Escaped(label);
|
||||
|
||||
InputIter i = node->inputs().begin();
|
||||
for (int j = NodeProperties::GetValueInputCount(node); j > 0; ++i, j--) {
|
||||
os_ << "|<I" << i.index() << ">#" << (*i)->id();
|
||||
}
|
||||
for (int j = NodeProperties::GetContextInputCount(node); j > 0; ++i, j--) {
|
||||
os_ << "|<I" << i.index() << ">X #" << (*i)->id();
|
||||
}
|
||||
for (int j = NodeProperties::GetEffectInputCount(node); j > 0; ++i, j--) {
|
||||
os_ << "|<I" << i.index() << ">E #" << (*i)->id();
|
||||
}
|
||||
|
||||
if (!use_to_def_ || NodeProperties::IsBasicBlockBegin(node) ||
|
||||
GetControlCluster(node) == NULL) {
|
||||
for (int j = NodeProperties::GetControlInputCount(node); j > 0; ++i, j--) {
|
||||
os_ << "|<I" << i.index() << ">C #" << (*i)->id();
|
||||
}
|
||||
}
|
||||
os_ << "}";
|
||||
|
||||
if (FLAG_trace_turbo_types && !NodeProperties::IsControl(node)) {
|
||||
Bounds bounds = NodeProperties::GetBounds(node);
|
||||
OStringStream upper;
|
||||
bounds.upper->PrintTo(upper);
|
||||
OStringStream lower;
|
||||
bounds.lower->PrintTo(lower);
|
||||
os_ << "|" << Escaped(upper) << "|" << Escaped(lower);
|
||||
}
|
||||
os_ << "}\"\n";
|
||||
}
|
||||
|
||||
|
||||
void GraphVisualizer::PrintEdge(Node* from, int index, Node* to) {
|
||||
bool unconstrained = IsLikelyBackEdge(from, index, to);
|
||||
os_ << " ID" << from->id();
|
||||
if (all_nodes_.count(to) == 0) {
|
||||
os_ << ":I" << index << ":n -> DEAD_INPUT";
|
||||
} else if (NodeProperties::IsBasicBlockBegin(from) ||
|
||||
GetControlCluster(from) == NULL ||
|
||||
(NodeProperties::GetControlInputCount(from) > 0 &&
|
||||
NodeProperties::GetControlInput(from) != to)) {
|
||||
os_ << ":I" << index << ":n -> ID" << to->id() << ":s";
|
||||
if (unconstrained) os_ << " [constraint=false,style=dotted]";
|
||||
} else {
|
||||
os_ << " -> ID" << to->id() << ":s [color=transparent"
|
||||
<< (unconstrained ? ", constraint=false" : "") << "]";
|
||||
}
|
||||
os_ << "\n";
|
||||
}
|
||||
|
||||
|
||||
void GraphVisualizer::Print() {
|
||||
os_ << "digraph D {\n"
|
||||
<< " node [fontsize=8,height=0.25]\n"
|
||||
<< " rankdir=\"BT\"\n"
|
||||
<< " \n";
|
||||
|
||||
// Make sure all nodes have been output before writing out the edges.
|
||||
use_to_def_ = true;
|
||||
// TODO(svenpanne) Remove the need for the const_casts.
|
||||
const_cast<Graph*>(graph_)->VisitNodeInputsFromEnd(this);
|
||||
white_nodes_.insert(const_cast<Graph*>(graph_)->start());
|
||||
|
||||
// Visit all uses of white nodes.
|
||||
use_to_def_ = false;
|
||||
GenericGraphVisit::Visit<GraphVisualizer, NodeUseIterationTraits<Node> >(
|
||||
const_cast<Graph*>(graph_), white_nodes_.begin(), white_nodes_.end(),
|
||||
this);
|
||||
|
||||
os_ << " DEAD_INPUT [\n"
|
||||
<< " style=\"filled\" \n"
|
||||
<< " fillcolor=\"" DEAD_COLOR "\"\n"
|
||||
<< " ]\n"
|
||||
<< "\n";
|
||||
|
||||
// With all the nodes written, add the edges.
|
||||
for (NodeSetIter i = all_nodes_.begin(); i != all_nodes_.end(); ++i) {
|
||||
Node::Inputs inputs = (*i)->inputs();
|
||||
for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
|
||||
++iter) {
|
||||
PrintEdge(iter.edge().from(), iter.edge().index(), iter.edge().to());
|
||||
}
|
||||
}
|
||||
os_ << "}\n";
|
||||
}
|
||||
|
||||
|
||||
GraphVisualizer::GraphVisualizer(OStream& os, const Graph* graph) // NOLINT
|
||||
: all_nodes_(NodeSet::key_compare(),
|
||||
NodeSet::allocator_type(graph->zone())),
|
||||
white_nodes_(NodeSet::key_compare(),
|
||||
NodeSet::allocator_type(graph->zone())),
|
||||
use_to_def_(true),
|
||||
os_(os),
|
||||
graph_(graph) {}
|
||||
|
||||
|
||||
OStream& operator<<(OStream& os, const AsDOT& ad) {
|
||||
GraphVisualizer(os, &ad.graph).Print();
|
||||
return os;
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
29
src/compiler/graph-visualizer.h
Normal file
29
src/compiler/graph-visualizer.h
Normal file
@ -0,0 +1,29 @@
|
||||
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_GRAPH_VISUALIZER_H_
|
||||
#define V8_COMPILER_GRAPH_VISUALIZER_H_
|
||||
|
||||
#include "src/v8.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
class OStream;
|
||||
|
||||
namespace compiler {
|
||||
|
||||
class Graph;
|
||||
|
||||
struct AsDOT {
|
||||
explicit AsDOT(const Graph& g) : graph(g) {}
|
||||
const Graph& graph;
|
||||
};
|
||||
|
||||
OStream& operator<<(OStream& os, const AsDOT& ad);
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
||||
|
||||
#endif // V8_COMPILER_GRAPH_VISUALIZER_H_
|
53
src/compiler/graph.cc
Normal file
53
src/compiler/graph.cc
Normal file
@ -0,0 +1,53 @@
|
||||
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/compiler/graph.h"
|
||||
|
||||
#include "src/compiler/common-operator.h"
|
||||
#include "src/compiler/generic-node-inl.h"
|
||||
#include "src/compiler/graph-inl.h"
|
||||
#include "src/compiler/node.h"
|
||||
#include "src/compiler/node-aux-data-inl.h"
|
||||
#include "src/compiler/node-properties.h"
|
||||
#include "src/compiler/node-properties-inl.h"
|
||||
#include "src/compiler/operator-properties.h"
|
||||
#include "src/compiler/operator-properties-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
Graph::Graph(Zone* zone)
|
||||
: GenericGraph(zone), decorators_(DecoratorVector::allocator_type(zone)) {}
|
||||
|
||||
|
||||
Node* Graph::NewNode(Operator* op, int input_count, Node** inputs) {
|
||||
ASSERT(op->InputCount() <= input_count);
|
||||
Node* result = Node::New(this, input_count, inputs);
|
||||
result->Initialize(op);
|
||||
for (DecoratorVector::iterator i = decorators_.begin();
|
||||
i != decorators_.end(); ++i) {
|
||||
(*i)->Decorate(result);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
void Graph::ChangeOperator(Node* node, Operator* op) { node->set_op(op); }
|
||||
|
||||
|
||||
void Graph::DeleteNode(Node* node) {
|
||||
#if DEBUG
|
||||
// Nodes can't be deleted if they have uses.
|
||||
Node::Uses::iterator use_iterator(node->uses().begin());
|
||||
ASSERT(use_iterator == node->uses().end());
|
||||
#endif
|
||||
|
||||
#if DEBUG
|
||||
memset(node, 0xDE, sizeof(Node));
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
97
src/compiler/graph.h
Normal file
97
src/compiler/graph.h
Normal file
@ -0,0 +1,97 @@
|
||||
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_GRAPH_H_
|
||||
#define V8_COMPILER_GRAPH_H_
|
||||
|
||||
#include <map>
|
||||
#include <set>
|
||||
|
||||
#include "src/compiler/generic-algorithm.h"
|
||||
#include "src/compiler/node.h"
|
||||
#include "src/compiler/node-aux-data.h"
|
||||
#include "src/compiler/source-position.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
class GraphDecorator;
|
||||
|
||||
|
||||
class Graph : public GenericGraph<Node> {
|
||||
public:
|
||||
explicit Graph(Zone* zone);
|
||||
|
||||
// Base implementation used by all factory methods.
|
||||
Node* NewNode(Operator* op, int input_count, Node** inputs);
|
||||
|
||||
// Factories for nodes with static input counts.
|
||||
Node* NewNode(Operator* op) {
|
||||
return NewNode(op, 0, static_cast<Node**>(NULL));
|
||||
}
|
||||
Node* NewNode(Operator* op, Node* n1) { return NewNode(op, 1, &n1); }
|
||||
Node* NewNode(Operator* op, Node* n1, Node* n2) {
|
||||
Node* nodes[] = {n1, n2};
|
||||
return NewNode(op, ARRAY_SIZE(nodes), nodes);
|
||||
}
|
||||
Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3) {
|
||||
Node* nodes[] = {n1, n2, n3};
|
||||
return NewNode(op, ARRAY_SIZE(nodes), nodes);
|
||||
}
|
||||
Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
|
||||
Node* nodes[] = {n1, n2, n3, n4};
|
||||
return NewNode(op, ARRAY_SIZE(nodes), nodes);
|
||||
}
|
||||
Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
|
||||
Node* n5) {
|
||||
Node* nodes[] = {n1, n2, n3, n4, n5};
|
||||
return NewNode(op, ARRAY_SIZE(nodes), nodes);
|
||||
}
|
||||
Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4, Node* n5,
|
||||
Node* n6) {
|
||||
Node* nodes[] = {n1, n2, n3, n4, n5, n6};
|
||||
return NewNode(op, ARRAY_SIZE(nodes), nodes);
|
||||
}
|
||||
|
||||
void ChangeOperator(Node* node, Operator* op);
|
||||
void DeleteNode(Node* node);
|
||||
|
||||
template <class Visitor>
|
||||
void VisitNodeUsesFrom(Node* node, Visitor* visitor);
|
||||
|
||||
template <class Visitor>
|
||||
void VisitNodeUsesFromStart(Visitor* visitor);
|
||||
|
||||
template <class Visitor>
|
||||
void VisitNodeInputsFromEnd(Visitor* visitor);
|
||||
|
||||
void AddDecorator(GraphDecorator* decorator) {
|
||||
decorators_.push_back(decorator);
|
||||
}
|
||||
|
||||
void RemoveDecorator(GraphDecorator* decorator) {
|
||||
DecoratorVector::iterator it =
|
||||
std::find(decorators_.begin(), decorators_.end(), decorator);
|
||||
ASSERT(it != decorators_.end());
|
||||
decorators_.erase(it, it + 1);
|
||||
}
|
||||
|
||||
private:
|
||||
typedef std::vector<GraphDecorator*, zone_allocator<GraphDecorator*> >
|
||||
DecoratorVector;
|
||||
DecoratorVector decorators_;
|
||||
};
|
||||
|
||||
|
||||
class GraphDecorator : public ZoneObject {
|
||||
public:
|
||||
virtual ~GraphDecorator() {}
|
||||
virtual void Decorate(Node* node) = 0;
|
||||
};
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
||||
|
||||
#endif // V8_COMPILER_GRAPH_H_
|
929
src/compiler/ia32/code-generator-ia32.cc
Normal file
929
src/compiler/ia32/code-generator-ia32.cc
Normal file
@ -0,0 +1,929 @@
|
||||
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/compiler/code-generator.h"
|
||||
|
||||
#include "src/compiler/code-generator-impl.h"
|
||||
#include "src/compiler/gap-resolver.h"
|
||||
#include "src/compiler/node-matchers.h"
|
||||
#include "src/compiler/node-properties-inl.h"
|
||||
#include "src/ia32/assembler-ia32.h"
|
||||
#include "src/ia32/macro-assembler-ia32.h"
|
||||
#include "src/scopes.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
#define __ masm()->
|
||||
|
||||
|
||||
// Adds IA-32 specific methods for decoding operands.
|
||||
class IA32OperandConverter : public InstructionOperandConverter {
|
||||
public:
|
||||
IA32OperandConverter(CodeGenerator* gen, Instruction* instr)
|
||||
: InstructionOperandConverter(gen, instr) {}
|
||||
|
||||
Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
|
||||
|
||||
Immediate InputImmediate(int index) {
|
||||
return ToImmediate(instr_->InputAt(index));
|
||||
}
|
||||
|
||||
Operand OutputOperand() { return ToOperand(instr_->Output()); }
|
||||
|
||||
Operand TempOperand(int index) { return ToOperand(instr_->TempAt(index)); }
|
||||
|
||||
Operand ToOperand(InstructionOperand* op, int extra = 0) {
|
||||
if (op->IsRegister()) {
|
||||
ASSERT(extra == 0);
|
||||
return Operand(ToRegister(op));
|
||||
} else if (op->IsDoubleRegister()) {
|
||||
ASSERT(extra == 0);
|
||||
return Operand(ToDoubleRegister(op));
|
||||
}
|
||||
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
|
||||
// The linkage computes where all spill slots are located.
|
||||
FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
|
||||
return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
|
||||
}
|
||||
|
||||
Operand HighOperand(InstructionOperand* op) {
|
||||
ASSERT(op->IsDoubleStackSlot());
|
||||
return ToOperand(op, kPointerSize);
|
||||
}
|
||||
|
||||
Immediate ToImmediate(InstructionOperand* operand) {
|
||||
Constant constant = ToConstant(operand);
|
||||
switch (constant.type()) {
|
||||
case Constant::kInt32:
|
||||
return Immediate(constant.ToInt32());
|
||||
case Constant::kFloat64:
|
||||
return Immediate(
|
||||
isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
|
||||
case Constant::kExternalReference:
|
||||
return Immediate(constant.ToExternalReference());
|
||||
case Constant::kHeapObject:
|
||||
return Immediate(constant.ToHeapObject());
|
||||
case Constant::kInt64:
|
||||
break;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return Immediate(-1);
|
||||
}
|
||||
|
||||
Operand MemoryOperand(int* first_input) {
|
||||
const int offset = *first_input;
|
||||
switch (AddressingModeField::decode(instr_->opcode())) {
|
||||
case kMode_MR1I:
|
||||
*first_input += 2;
|
||||
return Operand(InputRegister(offset + 0), InputRegister(offset + 1),
|
||||
times_1,
|
||||
0); // TODO(dcarney): K != 0
|
||||
case kMode_MRI:
|
||||
*first_input += 2;
|
||||
return Operand::ForRegisterPlusImmediate(InputRegister(offset + 0),
|
||||
InputImmediate(offset + 1));
|
||||
case kMode_MI:
|
||||
*first_input += 1;
|
||||
return Operand(InputImmediate(offset + 0));
|
||||
default:
|
||||
UNREACHABLE();
|
||||
return Operand(no_reg);
|
||||
}
|
||||
}
|
||||
|
||||
Operand MemoryOperand() {
|
||||
int first_input = 0;
|
||||
return MemoryOperand(&first_input);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
static bool HasImmediateInput(Instruction* instr, int index) {
|
||||
return instr->InputAt(index)->IsImmediate();
|
||||
}
|
||||
|
||||
|
||||
// Assembles an instruction after register allocation, producing machine code.
|
||||
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
|
||||
IA32OperandConverter i(this, instr);
|
||||
|
||||
switch (ArchOpcodeField::decode(instr->opcode())) {
|
||||
case kArchJmp:
|
||||
__ jmp(code()->GetLabel(i.InputBlock(0)));
|
||||
break;
|
||||
case kArchNop:
|
||||
// don't emit code for nops.
|
||||
break;
|
||||
case kArchRet:
|
||||
AssembleReturn();
|
||||
break;
|
||||
case kArchDeoptimize: {
|
||||
int deoptimization_id = MiscField::decode(instr->opcode());
|
||||
BuildTranslation(instr, deoptimization_id);
|
||||
|
||||
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
|
||||
isolate(), deoptimization_id, Deoptimizer::LAZY);
|
||||
__ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
|
||||
break;
|
||||
}
|
||||
case kIA32Add:
|
||||
if (HasImmediateInput(instr, 1)) {
|
||||
__ add(i.InputOperand(0), i.InputImmediate(1));
|
||||
} else {
|
||||
__ add(i.InputRegister(0), i.InputOperand(1));
|
||||
}
|
||||
break;
|
||||
case kIA32And:
|
||||
if (HasImmediateInput(instr, 1)) {
|
||||
__ and_(i.InputOperand(0), i.InputImmediate(1));
|
||||
} else {
|
||||
__ and_(i.InputRegister(0), i.InputOperand(1));
|
||||
}
|
||||
break;
|
||||
case kIA32Cmp:
|
||||
if (HasImmediateInput(instr, 1)) {
|
||||
__ cmp(i.InputOperand(0), i.InputImmediate(1));
|
||||
} else {
|
||||
__ cmp(i.InputRegister(0), i.InputOperand(1));
|
||||
}
|
||||
break;
|
||||
case kIA32Test:
|
||||
if (HasImmediateInput(instr, 1)) {
|
||||
__ test(i.InputOperand(0), i.InputImmediate(1));
|
||||
} else {
|
||||
__ test(i.InputRegister(0), i.InputOperand(1));
|
||||
}
|
||||
break;
|
||||
case kIA32Imul:
|
||||
if (HasImmediateInput(instr, 1)) {
|
||||
__ imul(i.OutputRegister(), i.InputOperand(0), i.InputInt32(1));
|
||||
} else {
|
||||
__ imul(i.OutputRegister(), i.InputOperand(1));
|
||||
}
|
||||
break;
|
||||
case kIA32Idiv:
|
||||
__ cdq();
|
||||
__ idiv(i.InputOperand(1));
|
||||
break;
|
||||
case kIA32Udiv:
|
||||
__ xor_(edx, edx);
|
||||
__ div(i.InputOperand(1));
|
||||
break;
|
||||
case kIA32Not:
|
||||
__ not_(i.OutputOperand());
|
||||
break;
|
||||
case kIA32Neg:
|
||||
__ neg(i.OutputOperand());
|
||||
break;
|
||||
case kIA32Or:
|
||||
if (HasImmediateInput(instr, 1)) {
|
||||
__ or_(i.InputOperand(0), i.InputImmediate(1));
|
||||
} else {
|
||||
__ or_(i.InputRegister(0), i.InputOperand(1));
|
||||
}
|
||||
break;
|
||||
case kIA32Xor:
|
||||
if (HasImmediateInput(instr, 1)) {
|
||||
__ xor_(i.InputOperand(0), i.InputImmediate(1));
|
||||
} else {
|
||||
__ xor_(i.InputRegister(0), i.InputOperand(1));
|
||||
}
|
||||
break;
|
||||
case kIA32Sub:
|
||||
if (HasImmediateInput(instr, 1)) {
|
||||
__ sub(i.InputOperand(0), i.InputImmediate(1));
|
||||
} else {
|
||||
__ sub(i.InputRegister(0), i.InputOperand(1));
|
||||
}
|
||||
break;
|
||||
case kIA32Shl:
|
||||
if (HasImmediateInput(instr, 1)) {
|
||||
__ shl(i.OutputRegister(), i.InputInt5(1));
|
||||
} else {
|
||||
__ shl_cl(i.OutputRegister());
|
||||
}
|
||||
break;
|
||||
case kIA32Shr:
|
||||
if (HasImmediateInput(instr, 1)) {
|
||||
__ shr(i.OutputRegister(), i.InputInt5(1));
|
||||
} else {
|
||||
__ shr_cl(i.OutputRegister());
|
||||
}
|
||||
break;
|
||||
case kIA32Sar:
|
||||
if (HasImmediateInput(instr, 1)) {
|
||||
__ sar(i.OutputRegister(), i.InputInt5(1));
|
||||
} else {
|
||||
__ sar_cl(i.OutputRegister());
|
||||
}
|
||||
break;
|
||||
case kIA32Push:
|
||||
if (HasImmediateInput(instr, 0)) {
|
||||
__ push(i.InputImmediate(0));
|
||||
} else {
|
||||
__ push(i.InputOperand(0));
|
||||
}
|
||||
break;
|
||||
case kIA32CallCodeObject: {
|
||||
if (HasImmediateInput(instr, 0)) {
|
||||
Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
|
||||
__ call(code, RelocInfo::CODE_TARGET);
|
||||
} else {
|
||||
Register reg = i.InputRegister(0);
|
||||
int entry = Code::kHeaderSize - kHeapObjectTag;
|
||||
__ call(Operand(reg, entry));
|
||||
}
|
||||
RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
|
||||
Safepoint::kNoLazyDeopt);
|
||||
|
||||
bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1);
|
||||
if (lazy_deopt) {
|
||||
RecordLazyDeoptimizationEntry(instr);
|
||||
}
|
||||
AddNopForSmiCodeInlining();
|
||||
break;
|
||||
}
|
||||
case kIA32CallAddress:
|
||||
if (HasImmediateInput(instr, 0)) {
|
||||
// TODO(dcarney): wire up EXTERNAL_REFERENCE instead of RUNTIME_ENTRY.
|
||||
__ call(reinterpret_cast<byte*>(i.InputInt32(0)),
|
||||
RelocInfo::RUNTIME_ENTRY);
|
||||
} else {
|
||||
__ call(i.InputRegister(0));
|
||||
}
|
||||
break;
|
||||
case kPopStack: {
|
||||
int words = MiscField::decode(instr->opcode());
|
||||
__ add(esp, Immediate(kPointerSize * words));
|
||||
break;
|
||||
}
|
||||
case kIA32CallJSFunction: {
|
||||
Register func = i.InputRegister(0);
|
||||
|
||||
// TODO(jarin) The load of the context should be separated from the call.
|
||||
__ mov(esi, FieldOperand(func, JSFunction::kContextOffset));
|
||||
__ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
|
||||
|
||||
RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
|
||||
Safepoint::kNoLazyDeopt);
|
||||
RecordLazyDeoptimizationEntry(instr);
|
||||
break;
|
||||
}
|
||||
case kSSEFloat64Cmp:
|
||||
__ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
|
||||
break;
|
||||
case kSSEFloat64Add:
|
||||
__ addsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
|
||||
break;
|
||||
case kSSEFloat64Sub:
|
||||
__ subsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
|
||||
break;
|
||||
case kSSEFloat64Mul:
|
||||
__ mulsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
|
||||
break;
|
||||
case kSSEFloat64Div:
|
||||
__ divsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
|
||||
break;
|
||||
case kSSEFloat64Mod: {
|
||||
// TODO(dcarney): alignment is wrong.
|
||||
__ sub(esp, Immediate(kDoubleSize));
|
||||
// Move values to st(0) and st(1).
|
||||
__ movsd(Operand(esp, 0), i.InputDoubleRegister(1));
|
||||
__ fld_d(Operand(esp, 0));
|
||||
__ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
|
||||
__ fld_d(Operand(esp, 0));
|
||||
// Loop while fprem isn't done.
|
||||
Label mod_loop;
|
||||
__ bind(&mod_loop);
|
||||
// This instructions traps on all kinds inputs, but we are assuming the
|
||||
// floating point control word is set to ignore them all.
|
||||
__ fprem();
|
||||
// The following 2 instruction implicitly use eax.
|
||||
__ fnstsw_ax();
|
||||
__ sahf();
|
||||
__ j(parity_even, &mod_loop);
|
||||
// Move output to stack and clean up.
|
||||
__ fstp(1);
|
||||
__ fstp_d(Operand(esp, 0));
|
||||
__ movsd(i.OutputDoubleRegister(), Operand(esp, 0));
|
||||
__ add(esp, Immediate(kDoubleSize));
|
||||
break;
|
||||
}
|
||||
case kSSEFloat64ToInt32:
|
||||
__ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
|
||||
break;
|
||||
case kSSEInt32ToFloat64:
|
||||
__ cvtsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
|
||||
break;
|
||||
case kSSELoad:
|
||||
__ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
|
||||
break;
|
||||
case kSSEStore: {
|
||||
int index = 0;
|
||||
Operand operand = i.MemoryOperand(&index);
|
||||
__ movsd(operand, i.InputDoubleRegister(index));
|
||||
break;
|
||||
}
|
||||
case kIA32LoadWord8:
|
||||
__ movzx_b(i.OutputRegister(), i.MemoryOperand());
|
||||
break;
|
||||
case kIA32StoreWord8: {
|
||||
int index = 0;
|
||||
Operand operand = i.MemoryOperand(&index);
|
||||
__ mov_b(operand, i.InputRegister(index));
|
||||
break;
|
||||
}
|
||||
case kIA32StoreWord8I: {
|
||||
int index = 0;
|
||||
Operand operand = i.MemoryOperand(&index);
|
||||
__ mov_b(operand, i.InputInt8(index));
|
||||
break;
|
||||
}
|
||||
case kIA32LoadWord16:
|
||||
__ movzx_w(i.OutputRegister(), i.MemoryOperand());
|
||||
break;
|
||||
case kIA32StoreWord16: {
|
||||
int index = 0;
|
||||
Operand operand = i.MemoryOperand(&index);
|
||||
__ mov_w(operand, i.InputRegister(index));
|
||||
break;
|
||||
}
|
||||
case kIA32StoreWord16I: {
|
||||
int index = 0;
|
||||
Operand operand = i.MemoryOperand(&index);
|
||||
__ mov_w(operand, i.InputInt16(index));
|
||||
break;
|
||||
}
|
||||
case kIA32LoadWord32:
|
||||
__ mov(i.OutputRegister(), i.MemoryOperand());
|
||||
break;
|
||||
case kIA32StoreWord32: {
|
||||
int index = 0;
|
||||
Operand operand = i.MemoryOperand(&index);
|
||||
__ mov(operand, i.InputRegister(index));
|
||||
break;
|
||||
}
|
||||
case kIA32StoreWord32I: {
|
||||
int index = 0;
|
||||
Operand operand = i.MemoryOperand(&index);
|
||||
__ mov(operand, i.InputImmediate(index));
|
||||
break;
|
||||
}
|
||||
case kIA32StoreWriteBarrier: {
|
||||
Register object = i.InputRegister(0);
|
||||
Register index = i.InputRegister(1);
|
||||
Register value = i.InputRegister(2);
|
||||
__ mov(Operand(object, index, times_1, 0), value);
|
||||
__ lea(index, Operand(object, index, times_1, 0));
|
||||
SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
|
||||
? kSaveFPRegs
|
||||
: kDontSaveFPRegs;
|
||||
__ RecordWrite(object, index, value, mode);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Assembles branches after an instruction.
|
||||
void CodeGenerator::AssembleArchBranch(Instruction* instr,
|
||||
FlagsCondition condition) {
|
||||
IA32OperandConverter i(this, instr);
|
||||
Label done;
|
||||
|
||||
// Emit a branch. The true and false targets are always the last two inputs
|
||||
// to the instruction.
|
||||
BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
|
||||
BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
|
||||
bool fallthru = IsNextInAssemblyOrder(fblock);
|
||||
Label* tlabel = code()->GetLabel(tblock);
|
||||
Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
|
||||
Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar;
|
||||
switch (condition) {
|
||||
case kUnorderedEqual:
|
||||
__ j(parity_even, flabel, flabel_distance);
|
||||
// Fall through.
|
||||
case kEqual:
|
||||
__ j(equal, tlabel);
|
||||
break;
|
||||
case kUnorderedNotEqual:
|
||||
__ j(parity_even, tlabel);
|
||||
// Fall through.
|
||||
case kNotEqual:
|
||||
__ j(not_equal, tlabel);
|
||||
break;
|
||||
case kSignedLessThan:
|
||||
__ j(less, tlabel);
|
||||
break;
|
||||
case kSignedGreaterThanOrEqual:
|
||||
__ j(greater_equal, tlabel);
|
||||
break;
|
||||
case kSignedLessThanOrEqual:
|
||||
__ j(less_equal, tlabel);
|
||||
break;
|
||||
case kSignedGreaterThan:
|
||||
__ j(greater, tlabel);
|
||||
break;
|
||||
case kUnorderedLessThan:
|
||||
__ j(parity_even, flabel, flabel_distance);
|
||||
// Fall through.
|
||||
case kUnsignedLessThan:
|
||||
__ j(below, tlabel);
|
||||
break;
|
||||
case kUnorderedGreaterThanOrEqual:
|
||||
__ j(parity_even, tlabel);
|
||||
// Fall through.
|
||||
case kUnsignedGreaterThanOrEqual:
|
||||
__ j(above_equal, tlabel);
|
||||
break;
|
||||
case kUnorderedLessThanOrEqual:
|
||||
__ j(parity_even, flabel, flabel_distance);
|
||||
// Fall through.
|
||||
case kUnsignedLessThanOrEqual:
|
||||
__ j(below_equal, tlabel);
|
||||
break;
|
||||
case kUnorderedGreaterThan:
|
||||
__ j(parity_even, tlabel);
|
||||
// Fall through.
|
||||
case kUnsignedGreaterThan:
|
||||
__ j(above, tlabel);
|
||||
break;
|
||||
}
|
||||
if (!fallthru) __ jmp(flabel, flabel_distance); // no fallthru to flabel.
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
|
||||
// Assembles boolean materializations after an instruction.
|
||||
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
|
||||
FlagsCondition condition) {
|
||||
IA32OperandConverter i(this, instr);
|
||||
Label done;
|
||||
|
||||
// Materialize a full 32-bit 1 or 0 value.
|
||||
Label check;
|
||||
Register reg = i.OutputRegister();
|
||||
Condition cc = no_condition;
|
||||
switch (condition) {
|
||||
case kUnorderedEqual:
|
||||
__ j(parity_odd, &check, Label::kNear);
|
||||
__ mov(reg, Immediate(0));
|
||||
__ jmp(&done, Label::kNear);
|
||||
// Fall through.
|
||||
case kEqual:
|
||||
cc = equal;
|
||||
break;
|
||||
case kUnorderedNotEqual:
|
||||
__ j(parity_odd, &check, Label::kNear);
|
||||
__ mov(reg, Immediate(1));
|
||||
__ jmp(&done, Label::kNear);
|
||||
// Fall through.
|
||||
case kNotEqual:
|
||||
cc = not_equal;
|
||||
break;
|
||||
case kSignedLessThan:
|
||||
cc = less;
|
||||
break;
|
||||
case kSignedGreaterThanOrEqual:
|
||||
cc = greater_equal;
|
||||
break;
|
||||
case kSignedLessThanOrEqual:
|
||||
cc = less_equal;
|
||||
break;
|
||||
case kSignedGreaterThan:
|
||||
cc = greater;
|
||||
break;
|
||||
case kUnorderedLessThan:
|
||||
__ j(parity_odd, &check, Label::kNear);
|
||||
__ mov(reg, Immediate(0));
|
||||
__ jmp(&done, Label::kNear);
|
||||
// Fall through.
|
||||
case kUnsignedLessThan:
|
||||
cc = below;
|
||||
break;
|
||||
case kUnorderedGreaterThanOrEqual:
|
||||
__ j(parity_odd, &check, Label::kNear);
|
||||
__ mov(reg, Immediate(1));
|
||||
__ jmp(&done, Label::kNear);
|
||||
// Fall through.
|
||||
case kUnsignedGreaterThanOrEqual:
|
||||
cc = above_equal;
|
||||
break;
|
||||
case kUnorderedLessThanOrEqual:
|
||||
__ j(parity_odd, &check, Label::kNear);
|
||||
__ mov(reg, Immediate(0));
|
||||
__ jmp(&done, Label::kNear);
|
||||
// Fall through.
|
||||
case kUnsignedLessThanOrEqual:
|
||||
cc = below_equal;
|
||||
break;
|
||||
case kUnorderedGreaterThan:
|
||||
__ j(parity_odd, &check, Label::kNear);
|
||||
__ mov(reg, Immediate(1));
|
||||
__ jmp(&done, Label::kNear);
|
||||
// Fall through.
|
||||
case kUnsignedGreaterThan:
|
||||
cc = above;
|
||||
break;
|
||||
}
|
||||
__ bind(&check);
|
||||
if (reg.is_byte_register()) {
|
||||
// setcc for byte registers (al, bl, cl, dl).
|
||||
__ setcc(cc, reg);
|
||||
__ movzx_b(reg, reg);
|
||||
} else {
|
||||
// Emit a branch to set a register to either 1 or 0.
|
||||
Label set;
|
||||
__ j(cc, &set, Label::kNear);
|
||||
__ mov(reg, Immediate(0));
|
||||
__ jmp(&done, Label::kNear);
|
||||
__ bind(&set);
|
||||
__ mov(reg, Immediate(1));
|
||||
}
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
|
||||
// The calling convention for JSFunctions on IA32 passes arguments on the
|
||||
// stack and the JSFunction and context in EDI and ESI, respectively, thus
|
||||
// the steps of the call look as follows:
|
||||
|
||||
// --{ before the call instruction }--------------------------------------------
|
||||
// | caller frame |
|
||||
// ^ esp ^ ebp
|
||||
|
||||
// --{ push arguments and setup ESI, EDI }--------------------------------------
|
||||
// | args + receiver | caller frame |
|
||||
// ^ esp ^ ebp
|
||||
// [edi = JSFunction, esi = context]
|
||||
|
||||
// --{ call [edi + kCodeEntryOffset] }------------------------------------------
|
||||
// | RET | args + receiver | caller frame |
|
||||
// ^ esp ^ ebp
|
||||
|
||||
// =={ prologue of called function }============================================
|
||||
// --{ push ebp }---------------------------------------------------------------
|
||||
// | FP | RET | args + receiver | caller frame |
|
||||
// ^ esp ^ ebp
|
||||
|
||||
// --{ mov ebp, esp }-----------------------------------------------------------
|
||||
// | FP | RET | args + receiver | caller frame |
|
||||
// ^ ebp,esp
|
||||
|
||||
// --{ push esi }---------------------------------------------------------------
|
||||
// | CTX | FP | RET | args + receiver | caller frame |
|
||||
// ^esp ^ ebp
|
||||
|
||||
// --{ push edi }---------------------------------------------------------------
|
||||
// | FNC | CTX | FP | RET | args + receiver | caller frame |
|
||||
// ^esp ^ ebp
|
||||
|
||||
// --{ subi esp, #N }-----------------------------------------------------------
|
||||
// | callee frame | FNC | CTX | FP | RET | args + receiver | caller frame |
|
||||
// ^esp ^ ebp
|
||||
|
||||
// =={ body of called function }================================================
|
||||
|
||||
// =={ epilogue of called function }============================================
|
||||
// --{ mov esp, ebp }-----------------------------------------------------------
|
||||
// | FP | RET | args + receiver | caller frame |
|
||||
// ^ esp,ebp
|
||||
|
||||
// --{ pop ebp }-----------------------------------------------------------
|
||||
// | | RET | args + receiver | caller frame |
|
||||
// ^ esp ^ ebp
|
||||
|
||||
// --{ ret #A+1 }-----------------------------------------------------------
|
||||
// | | caller frame |
|
||||
// ^ esp ^ ebp
|
||||
|
||||
|
||||
// Runtime function calls are accomplished by doing a stub call to the
|
||||
// CEntryStub (a real code object). On IA32 passes arguments on the
|
||||
// stack, the number of arguments in EAX, the address of the runtime function
|
||||
// in EBX, and the context in ESI.
|
||||
|
||||
// --{ before the call instruction }--------------------------------------------
|
||||
// | caller frame |
|
||||
// ^ esp ^ ebp
|
||||
|
||||
// --{ push arguments and setup EAX, EBX, and ESI }-----------------------------
|
||||
// | args + receiver | caller frame |
|
||||
// ^ esp ^ ebp
|
||||
// [eax = #args, ebx = runtime function, esi = context]
|
||||
|
||||
// --{ call #CEntryStub }-------------------------------------------------------
|
||||
// | RET | args + receiver | caller frame |
|
||||
// ^ esp ^ ebp
|
||||
|
||||
// =={ body of runtime function }===============================================
|
||||
|
||||
// --{ runtime returns }--------------------------------------------------------
|
||||
// | caller frame |
|
||||
// ^ esp ^ ebp
|
||||
|
||||
// Other custom linkages (e.g. for calling directly into and out of C++) may
|
||||
// need to save callee-saved registers on the stack, which is done in the
|
||||
// function prologue of generated code.
|
||||
|
||||
// --{ before the call instruction }--------------------------------------------
|
||||
// | caller frame |
|
||||
// ^ esp ^ ebp
|
||||
|
||||
// --{ set up arguments in registers on stack }---------------------------------
|
||||
// | args | caller frame |
|
||||
// ^ esp ^ ebp
|
||||
// [r0 = arg0, r1 = arg1, ...]
|
||||
|
||||
// --{ call code }--------------------------------------------------------------
|
||||
// | RET | args | caller frame |
|
||||
// ^ esp ^ ebp
|
||||
|
||||
// =={ prologue of called function }============================================
|
||||
// --{ push ebp }---------------------------------------------------------------
|
||||
// | FP | RET | args | caller frame |
|
||||
// ^ esp ^ ebp
|
||||
|
||||
// --{ mov ebp, esp }-----------------------------------------------------------
|
||||
// | FP | RET | args | caller frame |
|
||||
// ^ ebp,esp
|
||||
|
||||
// --{ save registers }---------------------------------------------------------
|
||||
// | regs | FP | RET | args | caller frame |
|
||||
// ^ esp ^ ebp
|
||||
|
||||
// --{ subi esp, #N }-----------------------------------------------------------
|
||||
// | callee frame | regs | FP | RET | args | caller frame |
|
||||
// ^esp ^ ebp
|
||||
|
||||
// =={ body of called function }================================================
|
||||
|
||||
// =={ epilogue of called function }============================================
|
||||
// --{ restore registers }------------------------------------------------------
|
||||
// | regs | FP | RET | args | caller frame |
|
||||
// ^ esp ^ ebp
|
||||
|
||||
// --{ mov esp, ebp }-----------------------------------------------------------
|
||||
// | FP | RET | args | caller frame |
|
||||
// ^ esp,ebp
|
||||
|
||||
// --{ pop ebp }----------------------------------------------------------------
|
||||
// | RET | args | caller frame |
|
||||
// ^ esp ^ ebp
|
||||
|
||||
|
||||
void CodeGenerator::AssemblePrologue() {
|
||||
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
|
||||
Frame* frame = code_->frame();
|
||||
int stack_slots = frame->GetSpillSlotCount();
|
||||
if (descriptor->kind() == CallDescriptor::kCallAddress) {
|
||||
// Assemble a prologue similar the to cdecl calling convention.
|
||||
__ push(ebp);
|
||||
__ mov(ebp, esp);
|
||||
const RegList saves = descriptor->CalleeSavedRegisters();
|
||||
if (saves != 0) { // Save callee-saved registers.
|
||||
int register_save_area_size = 0;
|
||||
for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
|
||||
if (!((1 << i) & saves)) continue;
|
||||
__ push(Register::from_code(i));
|
||||
register_save_area_size += kPointerSize;
|
||||
}
|
||||
frame->SetRegisterSaveAreaSize(register_save_area_size);
|
||||
}
|
||||
} else if (descriptor->IsJSFunctionCall()) {
|
||||
CompilationInfo* info = linkage()->info();
|
||||
__ Prologue(info->IsCodePreAgingActive());
|
||||
frame->SetRegisterSaveAreaSize(
|
||||
StandardFrameConstants::kFixedFrameSizeFromFp);
|
||||
|
||||
// Sloppy mode functions and builtins need to replace the receiver with the
|
||||
// global proxy when called as functions (without an explicit receiver
|
||||
// object).
|
||||
// TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
|
||||
if (info->strict_mode() == SLOPPY && !info->is_native()) {
|
||||
Label ok;
|
||||
// +2 for return address and saved frame pointer.
|
||||
int receiver_slot = info->scope()->num_parameters() + 2;
|
||||
__ mov(ecx, Operand(ebp, receiver_slot * kPointerSize));
|
||||
__ cmp(ecx, isolate()->factory()->undefined_value());
|
||||
__ j(not_equal, &ok, Label::kNear);
|
||||
__ mov(ecx, GlobalObjectOperand());
|
||||
__ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset));
|
||||
__ mov(Operand(ebp, receiver_slot * kPointerSize), ecx);
|
||||
__ bind(&ok);
|
||||
}
|
||||
|
||||
} else {
|
||||
__ StubPrologue();
|
||||
frame->SetRegisterSaveAreaSize(
|
||||
StandardFrameConstants::kFixedFrameSizeFromFp);
|
||||
}
|
||||
if (stack_slots > 0) {
|
||||
__ sub(esp, Immediate(stack_slots * kPointerSize));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CodeGenerator::AssembleReturn() {
|
||||
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
|
||||
if (descriptor->kind() == CallDescriptor::kCallAddress) {
|
||||
const RegList saves = descriptor->CalleeSavedRegisters();
|
||||
if (frame()->GetRegisterSaveAreaSize() > 0) {
|
||||
// Remove this frame's spill slots first.
|
||||
int stack_slots = frame()->GetSpillSlotCount();
|
||||
if (stack_slots > 0) {
|
||||
__ add(esp, Immediate(stack_slots * kPointerSize));
|
||||
}
|
||||
// Restore registers.
|
||||
if (saves != 0) {
|
||||
for (int i = 0; i < Register::kNumRegisters; i++) {
|
||||
if (!((1 << i) & saves)) continue;
|
||||
__ pop(Register::from_code(i));
|
||||
}
|
||||
}
|
||||
__ pop(ebp); // Pop caller's frame pointer.
|
||||
__ ret(0);
|
||||
} else {
|
||||
// No saved registers.
|
||||
__ mov(esp, ebp); // Move stack pointer back to frame pointer.
|
||||
__ pop(ebp); // Pop caller's frame pointer.
|
||||
__ ret(0);
|
||||
}
|
||||
} else {
|
||||
__ mov(esp, ebp); // Move stack pointer back to frame pointer.
|
||||
__ pop(ebp); // Pop caller's frame pointer.
|
||||
int pop_count =
|
||||
descriptor->IsJSFunctionCall() ? descriptor->ParameterCount() : 0;
|
||||
__ ret(pop_count * kPointerSize);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
InstructionOperand* destination) {
|
||||
IA32OperandConverter g(this, NULL);
|
||||
// Dispatch on the source and destination operand kinds. Not all
|
||||
// combinations are possible.
|
||||
if (source->IsRegister()) {
|
||||
ASSERT(destination->IsRegister() || destination->IsStackSlot());
|
||||
Register src = g.ToRegister(source);
|
||||
Operand dst = g.ToOperand(destination);
|
||||
__ mov(dst, src);
|
||||
} else if (source->IsStackSlot()) {
|
||||
ASSERT(destination->IsRegister() || destination->IsStackSlot());
|
||||
Operand src = g.ToOperand(source);
|
||||
if (destination->IsRegister()) {
|
||||
Register dst = g.ToRegister(destination);
|
||||
__ mov(dst, src);
|
||||
} else {
|
||||
Operand dst = g.ToOperand(destination);
|
||||
__ push(src);
|
||||
__ pop(dst);
|
||||
}
|
||||
} else if (source->IsConstant()) {
|
||||
Constant src_constant = g.ToConstant(source);
|
||||
if (src_constant.type() == Constant::kHeapObject) {
|
||||
Handle<HeapObject> src = src_constant.ToHeapObject();
|
||||
if (destination->IsRegister()) {
|
||||
Register dst = g.ToRegister(destination);
|
||||
__ LoadHeapObject(dst, src);
|
||||
} else {
|
||||
ASSERT(destination->IsStackSlot());
|
||||
Operand dst = g.ToOperand(destination);
|
||||
AllowDeferredHandleDereference embedding_raw_address;
|
||||
if (isolate()->heap()->InNewSpace(*src)) {
|
||||
__ PushHeapObject(src);
|
||||
__ pop(dst);
|
||||
} else {
|
||||
__ mov(dst, src);
|
||||
}
|
||||
}
|
||||
} else if (destination->IsRegister()) {
|
||||
Register dst = g.ToRegister(destination);
|
||||
__ mov(dst, g.ToImmediate(source));
|
||||
} else if (destination->IsStackSlot()) {
|
||||
Operand dst = g.ToOperand(destination);
|
||||
__ mov(dst, g.ToImmediate(source));
|
||||
} else {
|
||||
double v = g.ToDouble(source);
|
||||
uint64_t int_val = BitCast<uint64_t, double>(v);
|
||||
int32_t lower = static_cast<int32_t>(int_val);
|
||||
int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
|
||||
if (destination->IsDoubleRegister()) {
|
||||
XMMRegister dst = g.ToDoubleRegister(destination);
|
||||
__ Move(dst, v);
|
||||
} else {
|
||||
ASSERT(destination->IsDoubleStackSlot());
|
||||
Operand dst0 = g.ToOperand(destination);
|
||||
Operand dst1 = g.HighOperand(destination);
|
||||
__ mov(dst0, Immediate(lower));
|
||||
__ mov(dst1, Immediate(upper));
|
||||
}
|
||||
}
|
||||
} else if (source->IsDoubleRegister()) {
|
||||
XMMRegister src = g.ToDoubleRegister(source);
|
||||
if (destination->IsDoubleRegister()) {
|
||||
XMMRegister dst = g.ToDoubleRegister(destination);
|
||||
__ movaps(dst, src);
|
||||
} else {
|
||||
ASSERT(destination->IsDoubleStackSlot());
|
||||
Operand dst = g.ToOperand(destination);
|
||||
__ movsd(dst, src);
|
||||
}
|
||||
} else if (source->IsDoubleStackSlot()) {
|
||||
ASSERT(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
|
||||
Operand src = g.ToOperand(source);
|
||||
if (destination->IsDoubleRegister()) {
|
||||
XMMRegister dst = g.ToDoubleRegister(destination);
|
||||
__ movsd(dst, src);
|
||||
} else {
|
||||
// We rely on having xmm0 available as a fixed scratch register.
|
||||
Operand dst = g.ToOperand(destination);
|
||||
__ movsd(xmm0, src);
|
||||
__ movsd(dst, xmm0);
|
||||
}
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CodeGenerator::AssembleSwap(InstructionOperand* source,
|
||||
InstructionOperand* destination) {
|
||||
IA32OperandConverter g(this, NULL);
|
||||
// Dispatch on the source and destination operand kinds. Not all
|
||||
// combinations are possible.
|
||||
if (source->IsRegister() && destination->IsRegister()) {
|
||||
// Register-register.
|
||||
Register src = g.ToRegister(source);
|
||||
Register dst = g.ToRegister(destination);
|
||||
__ xchg(dst, src);
|
||||
} else if (source->IsRegister() && destination->IsStackSlot()) {
|
||||
// Register-memory.
|
||||
__ xchg(g.ToRegister(source), g.ToOperand(destination));
|
||||
} else if (source->IsStackSlot() && destination->IsStackSlot()) {
|
||||
// Memory-memory.
|
||||
Operand src = g.ToOperand(source);
|
||||
Operand dst = g.ToOperand(destination);
|
||||
__ push(dst);
|
||||
__ push(src);
|
||||
__ pop(dst);
|
||||
__ pop(src);
|
||||
} else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
|
||||
// XMM register-register swap. We rely on having xmm0
|
||||
// available as a fixed scratch register.
|
||||
XMMRegister src = g.ToDoubleRegister(source);
|
||||
XMMRegister dst = g.ToDoubleRegister(destination);
|
||||
__ movaps(xmm0, src);
|
||||
__ movaps(src, dst);
|
||||
__ movaps(dst, xmm0);
|
||||
} else if (source->IsDoubleRegister() && source->IsDoubleStackSlot()) {
|
||||
// XMM register-memory swap. We rely on having xmm0
|
||||
// available as a fixed scratch register.
|
||||
XMMRegister reg = g.ToDoubleRegister(source);
|
||||
Operand other = g.ToOperand(destination);
|
||||
__ movsd(xmm0, other);
|
||||
__ movsd(other, reg);
|
||||
__ movaps(reg, xmm0);
|
||||
} else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
|
||||
// Double-width memory-to-memory.
|
||||
Operand src0 = g.ToOperand(source);
|
||||
Operand src1 = g.HighOperand(source);
|
||||
Operand dst0 = g.ToOperand(destination);
|
||||
Operand dst1 = g.HighOperand(destination);
|
||||
__ movsd(xmm0, dst0); // Save destination in xmm0.
|
||||
__ push(src0); // Then use stack to copy source to destination.
|
||||
__ pop(dst0);
|
||||
__ push(src1);
|
||||
__ pop(dst1);
|
||||
__ movsd(src0, xmm0);
|
||||
} else {
|
||||
// No other combinations are possible.
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
|
||||
|
||||
#undef __
|
||||
|
||||
#ifdef DEBUG
|
||||
|
||||
// Checks whether the code between start_pc and end_pc is a no-op.
|
||||
bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int start_pc,
|
||||
int end_pc) {
|
||||
if (start_pc + 1 != end_pc) {
|
||||
return false;
|
||||
}
|
||||
return *(code->instruction_start() + start_pc) ==
|
||||
v8::internal::Assembler::kNopByte;
|
||||
}
|
||||
|
||||
#endif // DEBUG
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
86
src/compiler/ia32/instruction-codes-ia32.h
Normal file
86
src/compiler/ia32/instruction-codes-ia32.h
Normal file
@ -0,0 +1,86 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_IA32_INSTRUCTION_CODES_IA32_H_
|
||||
#define V8_COMPILER_IA32_INSTRUCTION_CODES_IA32_H_
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
// IA32-specific opcodes that specify which assembly sequence to emit.
|
||||
// Most opcodes specify a single instruction.
|
||||
#define TARGET_ARCH_OPCODE_LIST(V) \
|
||||
V(IA32Add) \
|
||||
V(IA32And) \
|
||||
V(IA32Cmp) \
|
||||
V(IA32Test) \
|
||||
V(IA32Or) \
|
||||
V(IA32Xor) \
|
||||
V(IA32Sub) \
|
||||
V(IA32Imul) \
|
||||
V(IA32Idiv) \
|
||||
V(IA32Udiv) \
|
||||
V(IA32Not) \
|
||||
V(IA32Neg) \
|
||||
V(IA32Shl) \
|
||||
V(IA32Shr) \
|
||||
V(IA32Sar) \
|
||||
V(IA32Push) \
|
||||
V(IA32CallCodeObject) \
|
||||
V(IA32CallAddress) \
|
||||
V(PopStack) \
|
||||
V(IA32CallJSFunction) \
|
||||
V(SSEFloat64Cmp) \
|
||||
V(SSEFloat64Add) \
|
||||
V(SSEFloat64Sub) \
|
||||
V(SSEFloat64Mul) \
|
||||
V(SSEFloat64Div) \
|
||||
V(SSEFloat64Mod) \
|
||||
V(SSEFloat64ToInt32) \
|
||||
V(SSEInt32ToFloat64) \
|
||||
V(SSELoad) \
|
||||
V(SSEStore) \
|
||||
V(IA32LoadWord8) \
|
||||
V(IA32StoreWord8) \
|
||||
V(IA32StoreWord8I) \
|
||||
V(IA32LoadWord16) \
|
||||
V(IA32StoreWord16) \
|
||||
V(IA32StoreWord16I) \
|
||||
V(IA32LoadWord32) \
|
||||
V(IA32StoreWord32) \
|
||||
V(IA32StoreWord32I) \
|
||||
V(IA32StoreWriteBarrier)
|
||||
|
||||
|
||||
// Addressing modes represent the "shape" of inputs to an instruction.
|
||||
// Many instructions support multiple addressing modes. Addressing modes
|
||||
// are encoded into the InstructionCode of the instruction and tell the
|
||||
// code generator after register allocation which assembler method to call.
|
||||
//
|
||||
// We use the following local notation for addressing modes:
|
||||
//
|
||||
// R = register
|
||||
// O = register or stack slot
|
||||
// D = double register
|
||||
// I = immediate (handle, external, int32)
|
||||
// MR = [register]
|
||||
// MI = [immediate]
|
||||
// MRN = [register + register * N in {1, 2, 4, 8}]
|
||||
// MRI = [register + immediate]
|
||||
// MRNI = [register + register * N in {1, 2, 4, 8} + immediate]
|
||||
#define TARGET_ADDRESSING_MODE_LIST(V) \
|
||||
V(MI) /* [K] */ \
|
||||
V(MR) /* [%r0] */ \
|
||||
V(MRI) /* [%r0 + K] */ \
|
||||
V(MR1I) /* [%r0 + %r1 * 1 + K] */ \
|
||||
V(MR2I) /* [%r0 + %r1 * 2 + K] */ \
|
||||
V(MR4I) /* [%r0 + %r1 * 4 + K] */ \
|
||||
V(MR8I) /* [%r0 + %r1 * 8 + K] */
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_COMPILER_IA32_INSTRUCTION_CODES_IA32_H_
|
504
src/compiler/ia32/instruction-selector-ia32.cc
Normal file
504
src/compiler/ia32/instruction-selector-ia32.cc
Normal file
@ -0,0 +1,504 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/compiler/instruction-selector-impl.h"
|
||||
#include "src/compiler/node-matchers.h"
|
||||
#include "src/compiler/node-properties-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
// Adds IA32-specific methods for generating operands.
|
||||
class IA32OperandGenerator V8_FINAL : public OperandGenerator {
|
||||
public:
|
||||
explicit IA32OperandGenerator(InstructionSelector* selector)
|
||||
: OperandGenerator(selector) {}
|
||||
|
||||
InstructionOperand* UseByteRegister(Node* node) {
|
||||
// TODO(dcarney): relax constraint.
|
||||
return UseFixed(node, edx);
|
||||
}
|
||||
|
||||
bool CanBeImmediate(Node* node) {
|
||||
switch (node->opcode()) {
|
||||
case IrOpcode::kInt32Constant:
|
||||
case IrOpcode::kNumberConstant:
|
||||
case IrOpcode::kExternalConstant:
|
||||
return true;
|
||||
case IrOpcode::kHeapConstant: {
|
||||
// Constants in new space cannot be used as immediates in V8 because
|
||||
// the GC does not scan code objects when collecting the new generation.
|
||||
Handle<HeapObject> value = ValueOf<Handle<HeapObject> >(node->op());
|
||||
return !isolate()->heap()->InNewSpace(*value);
|
||||
}
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
void InstructionSelector::VisitLoad(Node* node) {
|
||||
MachineRepresentation rep = OpParameter<MachineRepresentation>(node);
|
||||
IA32OperandGenerator g(this);
|
||||
Node* base = node->InputAt(0);
|
||||
Node* index = node->InputAt(1);
|
||||
|
||||
InstructionOperand* output = rep == kMachineFloat64
|
||||
? g.DefineAsDoubleRegister(node)
|
||||
: g.DefineAsRegister(node);
|
||||
ArchOpcode opcode;
|
||||
switch (rep) {
|
||||
case kMachineFloat64:
|
||||
opcode = kSSELoad;
|
||||
break;
|
||||
case kMachineWord8:
|
||||
opcode = kIA32LoadWord8;
|
||||
break;
|
||||
case kMachineWord16:
|
||||
opcode = kIA32LoadWord16;
|
||||
break;
|
||||
case kMachineTagged: // Fall through.
|
||||
case kMachineWord32:
|
||||
opcode = kIA32LoadWord32;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
return;
|
||||
}
|
||||
if (g.CanBeImmediate(base)) {
|
||||
if (Int32Matcher(index).Is(0)) { // load [#base + #0]
|
||||
Emit(opcode | AddressingModeField::encode(kMode_MI), output,
|
||||
g.UseImmediate(base));
|
||||
} else { // load [#base + %index]
|
||||
Emit(opcode | AddressingModeField::encode(kMode_MRI), output,
|
||||
g.UseRegister(index), g.UseImmediate(base));
|
||||
}
|
||||
} else if (g.CanBeImmediate(index)) { // load [%base + #index]
|
||||
Emit(opcode | AddressingModeField::encode(kMode_MRI), output,
|
||||
g.UseRegister(base), g.UseImmediate(index));
|
||||
} else { // load [%base + %index + K]
|
||||
Emit(opcode | AddressingModeField::encode(kMode_MR1I), output,
|
||||
g.UseRegister(base), g.UseRegister(index));
|
||||
}
|
||||
// TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitStore(Node* node) {
|
||||
IA32OperandGenerator g(this);
|
||||
Node* base = node->InputAt(0);
|
||||
Node* index = node->InputAt(1);
|
||||
Node* value = node->InputAt(2);
|
||||
|
||||
StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
|
||||
MachineRepresentation rep = store_rep.rep;
|
||||
if (store_rep.write_barrier_kind == kFullWriteBarrier) {
|
||||
ASSERT_EQ(kMachineTagged, rep);
|
||||
// TODO(dcarney): refactor RecordWrite function to take temp registers
|
||||
// and pass them here instead of using fixed regs
|
||||
// TODO(dcarney): handle immediate indices.
|
||||
InstructionOperand* temps[] = {g.TempRegister(ecx), g.TempRegister(edx)};
|
||||
Emit(kIA32StoreWriteBarrier, NULL, g.UseFixed(base, ebx),
|
||||
g.UseFixed(index, ecx), g.UseFixed(value, edx), ARRAY_SIZE(temps),
|
||||
temps);
|
||||
return;
|
||||
}
|
||||
ASSERT_EQ(kNoWriteBarrier, store_rep.write_barrier_kind);
|
||||
bool is_immediate = false;
|
||||
InstructionOperand* val;
|
||||
if (rep == kMachineFloat64) {
|
||||
val = g.UseDoubleRegister(value);
|
||||
} else {
|
||||
is_immediate = g.CanBeImmediate(value);
|
||||
if (is_immediate) {
|
||||
val = g.UseImmediate(value);
|
||||
} else if (rep == kMachineWord8) {
|
||||
val = g.UseByteRegister(value);
|
||||
} else {
|
||||
val = g.UseRegister(value);
|
||||
}
|
||||
}
|
||||
ArchOpcode opcode;
|
||||
switch (rep) {
|
||||
case kMachineFloat64:
|
||||
opcode = kSSEStore;
|
||||
break;
|
||||
case kMachineWord8:
|
||||
opcode = is_immediate ? kIA32StoreWord8I : kIA32StoreWord8;
|
||||
break;
|
||||
case kMachineWord16:
|
||||
opcode = is_immediate ? kIA32StoreWord16I : kIA32StoreWord16;
|
||||
break;
|
||||
case kMachineTagged: // Fall through.
|
||||
case kMachineWord32:
|
||||
opcode = is_immediate ? kIA32StoreWord32I : kIA32StoreWord32;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
return;
|
||||
}
|
||||
if (g.CanBeImmediate(base)) {
|
||||
if (Int32Matcher(index).Is(0)) { // store [#base], %|#value
|
||||
Emit(opcode | AddressingModeField::encode(kMode_MI), NULL,
|
||||
g.UseImmediate(base), val);
|
||||
} else { // store [#base + %index], %|#value
|
||||
Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
|
||||
g.UseRegister(index), g.UseImmediate(base), val);
|
||||
}
|
||||
} else if (g.CanBeImmediate(index)) { // store [%base + #index], %|#value
|
||||
Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
|
||||
g.UseRegister(base), g.UseImmediate(index), val);
|
||||
} else { // store [%base + %index], %|#value
|
||||
Emit(opcode | AddressingModeField::encode(kMode_MR1I), NULL,
|
||||
g.UseRegister(base), g.UseRegister(index), val);
|
||||
}
|
||||
// TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
|
||||
}
|
||||
|
||||
|
||||
// Shared routine for multiple binary operations.
|
||||
static inline void VisitBinop(InstructionSelector* selector, Node* node,
|
||||
ArchOpcode opcode) {
|
||||
IA32OperandGenerator g(selector);
|
||||
Node* left = node->InputAt(0);
|
||||
Node* right = node->InputAt(1);
|
||||
// TODO(turbofan): match complex addressing modes.
|
||||
// TODO(turbofan): if commutative, pick the non-live-in operand as the left as
|
||||
// this might be the last use and therefore its register can be reused.
|
||||
if (g.CanBeImmediate(right)) {
|
||||
selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(left),
|
||||
g.UseImmediate(right));
|
||||
} else if (g.CanBeImmediate(left) &&
|
||||
node->op()->HasProperty(Operator::kCommutative)) {
|
||||
selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(right),
|
||||
g.UseImmediate(left));
|
||||
} else {
|
||||
selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
|
||||
g.Use(right));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord32And(Node* node) {
|
||||
VisitBinop(this, node, kIA32And);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord32Or(Node* node) {
|
||||
VisitBinop(this, node, kIA32Or);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord32Xor(Node* node) {
|
||||
IA32OperandGenerator g(this);
|
||||
Int32BinopMatcher m(node);
|
||||
if (m.right().Is(-1)) {
|
||||
Emit(kIA32Not, g.DefineSameAsFirst(node), g.Use(m.left().node()));
|
||||
} else {
|
||||
VisitBinop(this, node, kIA32Xor);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Shared routine for multiple shift operations.
|
||||
static inline void VisitShift(InstructionSelector* selector, Node* node,
|
||||
ArchOpcode opcode) {
|
||||
IA32OperandGenerator g(selector);
|
||||
Node* left = node->InputAt(0);
|
||||
Node* right = node->InputAt(1);
|
||||
|
||||
// TODO(turbofan): assembler only supports some addressing modes for shifts.
|
||||
if (g.CanBeImmediate(right)) {
|
||||
selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
|
||||
g.UseImmediate(right));
|
||||
} else {
|
||||
Int32BinopMatcher m(node);
|
||||
if (m.right().IsWord32And()) {
|
||||
Int32BinopMatcher mright(right);
|
||||
if (mright.right().Is(0x1F)) {
|
||||
right = mright.left().node();
|
||||
}
|
||||
}
|
||||
selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
|
||||
g.UseFixed(right, ecx));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord32Shl(Node* node) {
|
||||
VisitShift(this, node, kIA32Shl);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord32Shr(Node* node) {
|
||||
VisitShift(this, node, kIA32Shr);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord32Sar(Node* node) {
|
||||
VisitShift(this, node, kIA32Sar);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt32Add(Node* node) {
|
||||
VisitBinop(this, node, kIA32Add);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt32Sub(Node* node) {
|
||||
IA32OperandGenerator g(this);
|
||||
Int32BinopMatcher m(node);
|
||||
if (m.left().Is(0)) {
|
||||
Emit(kIA32Neg, g.DefineSameAsFirst(node), g.Use(m.right().node()));
|
||||
} else {
|
||||
VisitBinop(this, node, kIA32Sub);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt32Mul(Node* node) {
|
||||
IA32OperandGenerator g(this);
|
||||
Node* left = node->InputAt(0);
|
||||
Node* right = node->InputAt(1);
|
||||
if (g.CanBeImmediate(right)) {
|
||||
Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(left),
|
||||
g.UseImmediate(right));
|
||||
} else if (g.CanBeImmediate(left)) {
|
||||
Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(right),
|
||||
g.UseImmediate(left));
|
||||
} else {
|
||||
// TODO(turbofan): select better left operand.
|
||||
Emit(kIA32Imul, g.DefineSameAsFirst(node), g.UseRegister(left),
|
||||
g.Use(right));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static inline void VisitDiv(InstructionSelector* selector, Node* node,
|
||||
ArchOpcode opcode) {
|
||||
IA32OperandGenerator g(selector);
|
||||
InstructionOperand* temps[] = {g.TempRegister(edx)};
|
||||
size_t temp_count = ARRAY_SIZE(temps);
|
||||
selector->Emit(opcode, g.DefineAsFixed(node, eax),
|
||||
g.UseFixed(node->InputAt(0), eax),
|
||||
g.UseUnique(node->InputAt(1)), temp_count, temps);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt32Div(Node* node) {
|
||||
VisitDiv(this, node, kIA32Idiv);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt32UDiv(Node* node) {
|
||||
VisitDiv(this, node, kIA32Udiv);
|
||||
}
|
||||
|
||||
|
||||
static inline void VisitMod(InstructionSelector* selector, Node* node,
|
||||
ArchOpcode opcode) {
|
||||
IA32OperandGenerator g(selector);
|
||||
InstructionOperand* temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
|
||||
size_t temp_count = ARRAY_SIZE(temps);
|
||||
selector->Emit(opcode, g.DefineAsFixed(node, edx),
|
||||
g.UseFixed(node->InputAt(0), eax),
|
||||
g.UseUnique(node->InputAt(1)), temp_count, temps);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt32Mod(Node* node) {
|
||||
VisitMod(this, node, kIA32Idiv);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt32UMod(Node* node) {
|
||||
VisitMod(this, node, kIA32Udiv);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitConvertInt32ToFloat64(Node* node) {
|
||||
IA32OperandGenerator g(this);
|
||||
Emit(kSSEInt32ToFloat64, g.DefineAsDoubleRegister(node),
|
||||
g.Use(node->InputAt(0)));
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitConvertFloat64ToInt32(Node* node) {
|
||||
IA32OperandGenerator g(this);
|
||||
Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64Add(Node* node) {
|
||||
IA32OperandGenerator g(this);
|
||||
Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
|
||||
g.UseDoubleRegister(node->InputAt(0)),
|
||||
g.UseDoubleRegister(node->InputAt(1)));
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64Sub(Node* node) {
|
||||
IA32OperandGenerator g(this);
|
||||
Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
|
||||
g.UseDoubleRegister(node->InputAt(0)),
|
||||
g.UseDoubleRegister(node->InputAt(1)));
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64Mul(Node* node) {
|
||||
IA32OperandGenerator g(this);
|
||||
Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
|
||||
g.UseDoubleRegister(node->InputAt(0)),
|
||||
g.UseDoubleRegister(node->InputAt(1)));
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64Div(Node* node) {
|
||||
IA32OperandGenerator g(this);
|
||||
Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
|
||||
g.UseDoubleRegister(node->InputAt(0)),
|
||||
g.UseDoubleRegister(node->InputAt(1)));
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64Mod(Node* node) {
|
||||
IA32OperandGenerator g(this);
|
||||
InstructionOperand* temps[] = {g.TempRegister(eax)};
|
||||
Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
|
||||
g.UseDoubleRegister(node->InputAt(0)),
|
||||
g.UseDoubleRegister(node->InputAt(1)), 1, temps);
|
||||
}
|
||||
|
||||
|
||||
// Shared routine for multiple compare operations.
|
||||
static inline void VisitCompare(InstructionSelector* selector,
|
||||
InstructionCode opcode,
|
||||
InstructionOperand* left,
|
||||
InstructionOperand* right,
|
||||
FlagsContinuation* cont) {
|
||||
IA32OperandGenerator g(selector);
|
||||
if (cont->IsBranch()) {
|
||||
selector->Emit(cont->Encode(opcode), NULL, left, right,
|
||||
g.Label(cont->true_block()),
|
||||
g.Label(cont->false_block()))->MarkAsControl();
|
||||
} else {
|
||||
ASSERT(cont->IsSet());
|
||||
// TODO(titzer): Needs byte register.
|
||||
selector->Emit(cont->Encode(opcode), g.DefineAsRegister(cont->result()),
|
||||
left, right);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Shared routine for multiple word compare operations.
|
||||
static inline void VisitWordCompare(InstructionSelector* selector, Node* node,
|
||||
InstructionCode opcode,
|
||||
FlagsContinuation* cont, bool commutative) {
|
||||
IA32OperandGenerator g(selector);
|
||||
Node* left = node->InputAt(0);
|
||||
Node* right = node->InputAt(1);
|
||||
|
||||
// Match immediates on left or right side of comparison.
|
||||
if (g.CanBeImmediate(right)) {
|
||||
VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
|
||||
} else if (g.CanBeImmediate(left)) {
|
||||
if (!commutative) cont->Commute();
|
||||
VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
|
||||
} else {
|
||||
VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
|
||||
switch (node->opcode()) {
|
||||
case IrOpcode::kInt32Sub:
|
||||
return VisitWordCompare(this, node, kIA32Cmp, cont, false);
|
||||
case IrOpcode::kWord32And:
|
||||
return VisitWordCompare(this, node, kIA32Test, cont, true);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
IA32OperandGenerator g(this);
|
||||
VisitCompare(this, kIA32Test, g.Use(node), g.TempImmediate(-1), cont);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord32Compare(Node* node,
|
||||
FlagsContinuation* cont) {
|
||||
VisitWordCompare(this, node, kIA32Cmp, cont, false);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64Compare(Node* node,
|
||||
FlagsContinuation* cont) {
|
||||
IA32OperandGenerator g(this);
|
||||
Node* left = node->InputAt(0);
|
||||
Node* right = node->InputAt(1);
|
||||
VisitCompare(this, kSSEFloat64Cmp, g.UseDoubleRegister(left), g.Use(right),
|
||||
cont);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
|
||||
BasicBlock* deoptimization) {
|
||||
IA32OperandGenerator g(this);
|
||||
CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
|
||||
CallBuffer buffer(zone(), descriptor);
|
||||
|
||||
// Compute InstructionOperands for inputs and outputs.
|
||||
InitializeCallBuffer(call, &buffer, true, true, continuation, deoptimization);
|
||||
|
||||
// Push any stack arguments.
|
||||
for (int i = buffer.pushed_count - 1; i >= 0; --i) {
|
||||
Node* input = buffer.pushed_nodes[i];
|
||||
// TODO(titzer): handle pushing double parameters.
|
||||
Emit(kIA32Push, NULL,
|
||||
g.CanBeImmediate(input) ? g.UseImmediate(input) : g.Use(input));
|
||||
}
|
||||
|
||||
// Select the appropriate opcode based on the call type.
|
||||
InstructionCode opcode;
|
||||
switch (descriptor->kind()) {
|
||||
case CallDescriptor::kCallCodeObject: {
|
||||
bool lazy_deopt = descriptor->CanLazilyDeoptimize();
|
||||
opcode = kIA32CallCodeObject | MiscField::encode(lazy_deopt ? 1 : 0);
|
||||
break;
|
||||
}
|
||||
case CallDescriptor::kCallAddress:
|
||||
opcode = kIA32CallAddress;
|
||||
break;
|
||||
case CallDescriptor::kCallJSFunction:
|
||||
opcode = kIA32CallJSFunction;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
return;
|
||||
}
|
||||
|
||||
// Emit the call instruction.
|
||||
Instruction* call_instr =
|
||||
Emit(opcode, buffer.output_count, buffer.outputs,
|
||||
buffer.fixed_and_control_count(), buffer.fixed_and_control_args);
|
||||
|
||||
call_instr->MarkAsCall();
|
||||
if (deoptimization != NULL) {
|
||||
ASSERT(continuation != NULL);
|
||||
call_instr->MarkAsControl();
|
||||
}
|
||||
|
||||
// Caller clean up of stack for C-style calls.
|
||||
if (descriptor->kind() == CallDescriptor::kCallAddress &&
|
||||
buffer.pushed_count > 0) {
|
||||
ASSERT(deoptimization == NULL && continuation == NULL);
|
||||
Emit(kPopStack | MiscField::encode(buffer.pushed_count), NULL);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
62
src/compiler/ia32/linkage-ia32.cc
Normal file
62
src/compiler/ia32/linkage-ia32.cc
Normal file
@ -0,0 +1,62 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/v8.h"
|
||||
|
||||
#include "src/assembler.h"
|
||||
#include "src/code-stubs.h"
|
||||
#include "src/compiler/linkage.h"
|
||||
#include "src/compiler/linkage-impl.h"
|
||||
#include "src/zone.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
struct LinkageHelperTraits {
|
||||
static Register ReturnValueReg() { return eax; }
|
||||
static Register ReturnValue2Reg() { return edx; }
|
||||
static Register JSCallFunctionReg() { return edi; }
|
||||
static Register ContextReg() { return esi; }
|
||||
static Register RuntimeCallFunctionReg() { return ebx; }
|
||||
static Register RuntimeCallArgCountReg() { return eax; }
|
||||
static RegList CCalleeSaveRegisters() {
|
||||
return esi.bit() | edi.bit() | ebx.bit();
|
||||
}
|
||||
static Register CRegisterParameter(int i) { return no_reg; }
|
||||
static int CRegisterParametersLength() { return 0; }
|
||||
};
|
||||
|
||||
|
||||
CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
|
||||
return LinkageHelper::GetJSCallDescriptor<LinkageHelperTraits>(
|
||||
zone, parameter_count);
|
||||
}
|
||||
|
||||
|
||||
CallDescriptor* Linkage::GetRuntimeCallDescriptor(
|
||||
Runtime::FunctionId function, int parameter_count,
|
||||
Operator::Property properties,
|
||||
CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
|
||||
return LinkageHelper::GetRuntimeCallDescriptor<LinkageHelperTraits>(
|
||||
zone, function, parameter_count, properties, can_deoptimize);
|
||||
}
|
||||
|
||||
|
||||
CallDescriptor* Linkage::GetStubCallDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count) {
|
||||
return LinkageHelper::GetStubCallDescriptor<LinkageHelperTraits>(
|
||||
this->info_->zone(), descriptor, stack_parameter_count);
|
||||
}
|
||||
|
||||
|
||||
CallDescriptor* Linkage::GetSimplifiedCDescriptor(
|
||||
Zone* zone, int num_params, MachineRepresentation return_type,
|
||||
const MachineRepresentation* param_types) {
|
||||
return LinkageHelper::GetSimplifiedCDescriptor<LinkageHelperTraits>(
|
||||
zone, num_params, return_type, param_types);
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
114
src/compiler/instruction-codes.h
Normal file
114
src/compiler/instruction-codes.h
Normal file
@ -0,0 +1,114 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_INSTRUCTION_CODES_H_
|
||||
#define V8_COMPILER_INSTRUCTION_CODES_H_
|
||||
|
||||
#if V8_TARGET_ARCH_ARM
|
||||
#include "src/compiler/arm/instruction-codes-arm.h"
|
||||
#elif V8_TARGET_ARCH_ARM64
|
||||
#include "src/compiler/arm64/instruction-codes-arm64.h"
|
||||
#elif V8_TARGET_ARCH_IA32
|
||||
#include "src/compiler/ia32/instruction-codes-ia32.h"
|
||||
#elif V8_TARGET_ARCH_X64
|
||||
#include "src/compiler/x64/instruction-codes-x64.h"
|
||||
#else
|
||||
#error "Unsupported target architecture."
|
||||
#endif
|
||||
#include "src/utils.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
class OStream;
|
||||
|
||||
namespace compiler {
|
||||
|
||||
// Target-specific opcodes that specify which assembly sequence to emit.
|
||||
// Most opcodes specify a single instruction.
|
||||
#define ARCH_OPCODE_LIST(V) \
|
||||
V(ArchDeoptimize) \
|
||||
V(ArchJmp) \
|
||||
V(ArchNop) \
|
||||
V(ArchRet) \
|
||||
TARGET_ARCH_OPCODE_LIST(V)
|
||||
|
||||
enum ArchOpcode {
|
||||
#define DECLARE_ARCH_OPCODE(Name) k##Name,
|
||||
ARCH_OPCODE_LIST(DECLARE_ARCH_OPCODE)
|
||||
#undef DECLARE_ARCH_OPCODE
|
||||
#define COUNT_ARCH_OPCODE(Name) +1
|
||||
kLastArchOpcode = -1 ARCH_OPCODE_LIST(COUNT_ARCH_OPCODE)
|
||||
#undef COUNT_ARCH_OPCODE
|
||||
};
|
||||
|
||||
OStream& operator<<(OStream& os, const ArchOpcode& ao);
|
||||
|
||||
// Addressing modes represent the "shape" of inputs to an instruction.
|
||||
// Many instructions support multiple addressing modes. Addressing modes
|
||||
// are encoded into the InstructionCode of the instruction and tell the
|
||||
// code generator after register allocation which assembler method to call.
|
||||
#define ADDRESSING_MODE_LIST(V) \
|
||||
V(None) \
|
||||
TARGET_ADDRESSING_MODE_LIST(V)
|
||||
|
||||
enum AddressingMode {
|
||||
#define DECLARE_ADDRESSING_MODE(Name) kMode_##Name,
|
||||
ADDRESSING_MODE_LIST(DECLARE_ADDRESSING_MODE)
|
||||
#undef DECLARE_ADDRESSING_MODE
|
||||
#define COUNT_ADDRESSING_MODE(Name) +1
|
||||
kLastAddressingMode = -1 ADDRESSING_MODE_LIST(COUNT_ADDRESSING_MODE)
|
||||
#undef COUNT_ADDRESSING_MODE
|
||||
};
|
||||
|
||||
OStream& operator<<(OStream& os, const AddressingMode& am);
|
||||
|
||||
// The mode of the flags continuation (see below).
|
||||
enum FlagsMode { kFlags_none = 0, kFlags_branch = 1, kFlags_set = 2 };
|
||||
|
||||
OStream& operator<<(OStream& os, const FlagsMode& fm);
|
||||
|
||||
// The condition of flags continuation (see below).
|
||||
enum FlagsCondition {
|
||||
kEqual,
|
||||
kNotEqual,
|
||||
kSignedLessThan,
|
||||
kSignedGreaterThanOrEqual,
|
||||
kSignedLessThanOrEqual,
|
||||
kSignedGreaterThan,
|
||||
kUnsignedLessThan,
|
||||
kUnsignedGreaterThanOrEqual,
|
||||
kUnsignedLessThanOrEqual,
|
||||
kUnsignedGreaterThan,
|
||||
kUnorderedEqual,
|
||||
kUnorderedNotEqual,
|
||||
kUnorderedLessThan,
|
||||
kUnorderedGreaterThanOrEqual,
|
||||
kUnorderedLessThanOrEqual,
|
||||
kUnorderedGreaterThan
|
||||
};
|
||||
|
||||
OStream& operator<<(OStream& os, const FlagsCondition& fc);
|
||||
|
||||
// The InstructionCode is an opaque, target-specific integer that encodes
|
||||
// what code to emit for an instruction in the code generator. It is not
|
||||
// interesting to the register allocator, as the inputs and flags on the
|
||||
// instructions specify everything of interest.
|
||||
typedef int32_t InstructionCode;
|
||||
|
||||
// Helpers for encoding / decoding InstructionCode into the fields needed
|
||||
// for code generation. We encode the instruction, addressing mode, and flags
|
||||
// continuation into a single InstructionCode which is stored as part of
|
||||
// the instruction.
|
||||
typedef BitField<ArchOpcode, 0, 7> ArchOpcodeField;
|
||||
typedef BitField<AddressingMode, 7, 4> AddressingModeField;
|
||||
typedef BitField<FlagsMode, 11, 2> FlagsModeField;
|
||||
typedef BitField<FlagsCondition, 13, 4> FlagsConditionField;
|
||||
typedef BitField<int, 13, 19> MiscField;
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_COMPILER_INSTRUCTION_CODES_H_
|
352
src/compiler/instruction-selector-impl.h
Normal file
352
src/compiler/instruction-selector-impl.h
Normal file
@ -0,0 +1,352 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
|
||||
#define V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
|
||||
|
||||
#include "src/compiler/instruction.h"
|
||||
#include "src/compiler/instruction-selector.h"
|
||||
#include "src/compiler/linkage.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
// A helper class for the instruction selector that simplifies construction of
|
||||
// Operands. This class implements a base for architecture-specific helpers.
|
||||
class OperandGenerator {
|
||||
public:
|
||||
explicit OperandGenerator(InstructionSelector* selector)
|
||||
: selector_(selector) {}
|
||||
|
||||
InstructionOperand* DefineAsRegister(Node* node) {
|
||||
return Define(node, new (zone())
|
||||
UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER));
|
||||
}
|
||||
|
||||
InstructionOperand* DefineAsDoubleRegister(Node* node) {
|
||||
return Define(node, new (zone())
|
||||
UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER));
|
||||
}
|
||||
|
||||
InstructionOperand* DefineSameAsFirst(Node* result) {
|
||||
return Define(result, new (zone())
|
||||
UnallocatedOperand(UnallocatedOperand::SAME_AS_FIRST_INPUT));
|
||||
}
|
||||
|
||||
InstructionOperand* DefineAsFixed(Node* node, Register reg) {
|
||||
return Define(node, new (zone())
|
||||
UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
|
||||
Register::ToAllocationIndex(reg)));
|
||||
}
|
||||
|
||||
InstructionOperand* DefineAsFixedDouble(Node* node, DoubleRegister reg) {
|
||||
return Define(node, new (zone())
|
||||
UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
|
||||
DoubleRegister::ToAllocationIndex(reg)));
|
||||
}
|
||||
|
||||
InstructionOperand* DefineAsConstant(Node* node) {
|
||||
sequence()->AddConstant(node->id(), ToConstant(node));
|
||||
return ConstantOperand::Create(node->id(), zone());
|
||||
}
|
||||
|
||||
InstructionOperand* DefineAsLocation(Node* node, LinkageLocation location) {
|
||||
return Define(node, ToUnallocatedOperand(location));
|
||||
}
|
||||
|
||||
InstructionOperand* Use(Node* node) {
|
||||
return Use(node,
|
||||
new (zone()) UnallocatedOperand(
|
||||
UnallocatedOperand::ANY, UnallocatedOperand::USED_AT_START));
|
||||
}
|
||||
|
||||
InstructionOperand* UseRegister(Node* node) {
|
||||
return Use(node, new (zone())
|
||||
UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
|
||||
UnallocatedOperand::USED_AT_START));
|
||||
}
|
||||
|
||||
InstructionOperand* UseDoubleRegister(Node* node) {
|
||||
return Use(node, new (zone())
|
||||
UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
|
||||
UnallocatedOperand::USED_AT_START));
|
||||
}
|
||||
|
||||
// Use register or operand for the node. If a register is chosen, it won't
|
||||
// alias any temporary or output registers.
|
||||
InstructionOperand* UseUnique(Node* node) {
|
||||
return Use(node, new (zone()) UnallocatedOperand(UnallocatedOperand::ANY));
|
||||
}
|
||||
|
||||
// Use a unique register for the node that does not alias any temporary or
|
||||
// output registers.
|
||||
InstructionOperand* UseUniqueRegister(Node* node) {
|
||||
return Use(node, new (zone())
|
||||
UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER));
|
||||
}
|
||||
|
||||
// Use a unique double register for the node that does not alias any temporary
|
||||
// or output double registers.
|
||||
InstructionOperand* UseUniqueDoubleRegister(Node* node) {
|
||||
return Use(node, new (zone())
|
||||
UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER));
|
||||
}
|
||||
|
||||
InstructionOperand* UseFixed(Node* node, Register reg) {
|
||||
return Use(node, new (zone())
|
||||
UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
|
||||
Register::ToAllocationIndex(reg)));
|
||||
}
|
||||
|
||||
InstructionOperand* UseFixedDouble(Node* node, DoubleRegister reg) {
|
||||
return Use(node, new (zone())
|
||||
UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
|
||||
DoubleRegister::ToAllocationIndex(reg)));
|
||||
}
|
||||
|
||||
InstructionOperand* UseImmediate(Node* node) {
|
||||
int index = sequence()->AddImmediate(ToConstant(node));
|
||||
return ImmediateOperand::Create(index, zone());
|
||||
}
|
||||
|
||||
InstructionOperand* UseLocation(Node* node, LinkageLocation location) {
|
||||
return Use(node, ToUnallocatedOperand(location));
|
||||
}
|
||||
|
||||
InstructionOperand* TempRegister() {
|
||||
UnallocatedOperand* op =
|
||||
new (zone()) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
|
||||
UnallocatedOperand::USED_AT_START);
|
||||
op->set_virtual_register(sequence()->NextVirtualRegister());
|
||||
return op;
|
||||
}
|
||||
|
||||
InstructionOperand* TempDoubleRegister() {
|
||||
UnallocatedOperand* op =
|
||||
new (zone()) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
|
||||
UnallocatedOperand::USED_AT_START);
|
||||
op->set_virtual_register(sequence()->NextVirtualRegister());
|
||||
sequence()->MarkAsDouble(op->virtual_register());
|
||||
return op;
|
||||
}
|
||||
|
||||
InstructionOperand* TempRegister(Register reg) {
|
||||
return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
|
||||
Register::ToAllocationIndex(reg));
|
||||
}
|
||||
|
||||
InstructionOperand* TempImmediate(int32_t imm) {
|
||||
int index = sequence()->AddImmediate(Constant(imm));
|
||||
return ImmediateOperand::Create(index, zone());
|
||||
}
|
||||
|
||||
InstructionOperand* Label(BasicBlock* block) {
|
||||
// TODO(bmeurer): We misuse ImmediateOperand here.
|
||||
return ImmediateOperand::Create(block->id(), zone());
|
||||
}
|
||||
|
||||
protected:
|
||||
Graph* graph() const { return selector()->graph(); }
|
||||
InstructionSelector* selector() const { return selector_; }
|
||||
InstructionSequence* sequence() const { return selector()->sequence(); }
|
||||
Isolate* isolate() const { return zone()->isolate(); }
|
||||
Zone* zone() const { return selector()->instruction_zone(); }
|
||||
|
||||
private:
|
||||
static Constant ToConstant(const Node* node) {
|
||||
switch (node->opcode()) {
|
||||
case IrOpcode::kInt32Constant:
|
||||
return Constant(ValueOf<int32_t>(node->op()));
|
||||
case IrOpcode::kInt64Constant:
|
||||
return Constant(ValueOf<int64_t>(node->op()));
|
||||
case IrOpcode::kNumberConstant:
|
||||
case IrOpcode::kFloat64Constant:
|
||||
return Constant(ValueOf<double>(node->op()));
|
||||
case IrOpcode::kExternalConstant:
|
||||
return Constant(ValueOf<ExternalReference>(node->op()));
|
||||
case IrOpcode::kHeapConstant:
|
||||
return Constant(ValueOf<Handle<HeapObject> >(node->op()));
|
||||
default:
|
||||
break;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return Constant(static_cast<int32_t>(0));
|
||||
}
|
||||
|
||||
UnallocatedOperand* Define(Node* node, UnallocatedOperand* operand) {
|
||||
ASSERT_NOT_NULL(node);
|
||||
ASSERT_NOT_NULL(operand);
|
||||
operand->set_virtual_register(node->id());
|
||||
return operand;
|
||||
}
|
||||
|
||||
UnallocatedOperand* Use(Node* node, UnallocatedOperand* operand) {
|
||||
selector_->MarkAsUsed(node);
|
||||
return Define(node, operand);
|
||||
}
|
||||
|
||||
UnallocatedOperand* ToUnallocatedOperand(LinkageLocation location) {
|
||||
if (location.location_ == LinkageLocation::ANY_REGISTER) {
|
||||
return new (zone())
|
||||
UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER);
|
||||
}
|
||||
if (location.location_ < 0) {
|
||||
return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_SLOT,
|
||||
location.location_);
|
||||
}
|
||||
if (location.rep_ == kMachineFloat64) {
|
||||
return new (zone()) UnallocatedOperand(
|
||||
UnallocatedOperand::FIXED_DOUBLE_REGISTER, location.location_);
|
||||
}
|
||||
return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
|
||||
location.location_);
|
||||
}
|
||||
|
||||
InstructionSelector* selector_;
|
||||
};
|
||||
|
||||
|
||||
// The flags continuation is a way to combine a branch or a materialization
|
||||
// of a boolean value with an instruction that sets the flags register.
|
||||
// The whole instruction is treated as a unit by the register allocator, and
|
||||
// thus no spills or moves can be introduced between the flags-setting
|
||||
// instruction and the branch or set it should be combined with.
|
||||
class FlagsContinuation V8_FINAL {
|
||||
public:
|
||||
// Creates a new flags continuation from the given condition and true/false
|
||||
// blocks.
|
||||
FlagsContinuation(FlagsCondition condition, BasicBlock* true_block,
|
||||
BasicBlock* false_block)
|
||||
: mode_(kFlags_branch),
|
||||
condition_(condition),
|
||||
true_block_(true_block),
|
||||
false_block_(false_block) {
|
||||
ASSERT_NOT_NULL(true_block);
|
||||
ASSERT_NOT_NULL(false_block);
|
||||
}
|
||||
|
||||
// Creates a new flags continuation from the given condition and result node.
|
||||
FlagsContinuation(FlagsCondition condition, Node* result)
|
||||
: mode_(kFlags_set), condition_(condition), result_(result) {
|
||||
ASSERT_NOT_NULL(result);
|
||||
}
|
||||
|
||||
bool IsNone() const { return mode_ == kFlags_none; }
|
||||
bool IsBranch() const { return mode_ == kFlags_branch; }
|
||||
bool IsSet() const { return mode_ == kFlags_set; }
|
||||
FlagsCondition condition() const { return condition_; }
|
||||
Node* result() const {
|
||||
ASSERT(IsSet());
|
||||
return result_;
|
||||
}
|
||||
BasicBlock* true_block() const {
|
||||
ASSERT(IsBranch());
|
||||
return true_block_;
|
||||
}
|
||||
BasicBlock* false_block() const {
|
||||
ASSERT(IsBranch());
|
||||
return false_block_;
|
||||
}
|
||||
|
||||
void Negate() { condition_ = static_cast<FlagsCondition>(condition_ ^ 1); }
|
||||
|
||||
void Commute() {
|
||||
switch (condition_) {
|
||||
case kEqual:
|
||||
case kNotEqual:
|
||||
return;
|
||||
case kSignedLessThan:
|
||||
condition_ = kSignedGreaterThan;
|
||||
return;
|
||||
case kSignedGreaterThanOrEqual:
|
||||
condition_ = kSignedLessThanOrEqual;
|
||||
return;
|
||||
case kSignedLessThanOrEqual:
|
||||
condition_ = kSignedGreaterThanOrEqual;
|
||||
return;
|
||||
case kSignedGreaterThan:
|
||||
condition_ = kSignedLessThan;
|
||||
return;
|
||||
case kUnsignedLessThan:
|
||||
condition_ = kUnsignedGreaterThan;
|
||||
return;
|
||||
case kUnsignedGreaterThanOrEqual:
|
||||
condition_ = kUnsignedLessThanOrEqual;
|
||||
return;
|
||||
case kUnsignedLessThanOrEqual:
|
||||
condition_ = kUnsignedGreaterThanOrEqual;
|
||||
return;
|
||||
case kUnsignedGreaterThan:
|
||||
condition_ = kUnsignedLessThan;
|
||||
return;
|
||||
case kUnorderedEqual:
|
||||
case kUnorderedNotEqual:
|
||||
return;
|
||||
case kUnorderedLessThan:
|
||||
condition_ = kUnorderedGreaterThan;
|
||||
return;
|
||||
case kUnorderedGreaterThanOrEqual:
|
||||
condition_ = kUnorderedLessThanOrEqual;
|
||||
return;
|
||||
case kUnorderedLessThanOrEqual:
|
||||
condition_ = kUnorderedGreaterThanOrEqual;
|
||||
return;
|
||||
case kUnorderedGreaterThan:
|
||||
condition_ = kUnorderedLessThan;
|
||||
return;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void OverwriteAndNegateIfEqual(FlagsCondition condition) {
|
||||
bool negate = condition_ == kEqual;
|
||||
condition_ = condition;
|
||||
if (negate) Negate();
|
||||
}
|
||||
|
||||
void SwapBlocks() { std::swap(true_block_, false_block_); }
|
||||
|
||||
// Encodes this flags continuation into the given opcode.
|
||||
InstructionCode Encode(InstructionCode opcode) {
|
||||
return opcode | FlagsModeField::encode(mode_) |
|
||||
FlagsConditionField::encode(condition_);
|
||||
}
|
||||
|
||||
private:
|
||||
FlagsMode mode_;
|
||||
FlagsCondition condition_;
|
||||
Node* result_; // Only valid if mode_ == kFlags_set.
|
||||
BasicBlock* true_block_; // Only valid if mode_ == kFlags_branch.
|
||||
BasicBlock* false_block_; // Only valid if mode_ == kFlags_branch.
|
||||
};
|
||||
|
||||
|
||||
// An internal helper class for generating the operands to calls.
|
||||
// TODO(bmeurer): Get rid of the CallBuffer business and make
|
||||
// InstructionSelector::VisitCall platform independent instead.
|
||||
struct CallBuffer {
|
||||
CallBuffer(Zone* zone, CallDescriptor* descriptor);
|
||||
|
||||
int output_count;
|
||||
CallDescriptor* descriptor;
|
||||
Node** output_nodes;
|
||||
InstructionOperand** outputs;
|
||||
InstructionOperand** fixed_and_control_args;
|
||||
int fixed_count;
|
||||
Node** pushed_nodes;
|
||||
int pushed_count;
|
||||
|
||||
int input_count() { return descriptor->InputCount(); }
|
||||
|
||||
int control_count() { return descriptor->CanLazilyDeoptimize() ? 2 : 0; }
|
||||
|
||||
int fixed_and_control_count() { return fixed_count + control_count(); }
|
||||
};
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
|
873
src/compiler/instruction-selector.cc
Normal file
873
src/compiler/instruction-selector.cc
Normal file
@ -0,0 +1,873 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/compiler/instruction-selector.h"
|
||||
|
||||
#include "src/compiler/instruction-selector-impl.h"
|
||||
#include "src/compiler/node-matchers.h"
|
||||
#include "src/compiler/node-properties-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
InstructionSelector::InstructionSelector(InstructionSequence* sequence,
|
||||
SourcePositionTable* source_positions)
|
||||
: zone_(sequence->isolate()),
|
||||
sequence_(sequence),
|
||||
source_positions_(source_positions),
|
||||
current_block_(NULL),
|
||||
instructions_(InstructionDeque::allocator_type(zone())),
|
||||
used_(graph()->NodeCount(), false, BoolVector::allocator_type(zone())) {}
|
||||
|
||||
|
||||
void InstructionSelector::SelectInstructions() {
|
||||
// Mark the inputs of all phis in loop headers as used.
|
||||
BasicBlockVector* blocks = schedule()->rpo_order();
|
||||
for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) {
|
||||
BasicBlock* block = *i;
|
||||
if (!block->IsLoopHeader()) continue;
|
||||
ASSERT_NE(0, block->PredecessorCount());
|
||||
ASSERT_NE(1, block->PredecessorCount());
|
||||
for (BasicBlock::const_iterator j = block->begin(); j != block->end();
|
||||
++j) {
|
||||
Node* phi = *j;
|
||||
if (phi->opcode() != IrOpcode::kPhi) continue;
|
||||
|
||||
// Mark all inputs as used.
|
||||
Node::Inputs inputs = phi->inputs();
|
||||
for (InputIter k = inputs.begin(); k != inputs.end(); ++k) {
|
||||
MarkAsUsed(*k);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Visit each basic block in post order.
|
||||
for (BasicBlockVectorRIter i = blocks->rbegin(); i != blocks->rend(); ++i) {
|
||||
VisitBlock(*i);
|
||||
}
|
||||
|
||||
// Schedule the selected instructions.
|
||||
for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) {
|
||||
BasicBlock* block = *i;
|
||||
size_t end = block->code_end_;
|
||||
size_t start = block->code_start_;
|
||||
sequence()->StartBlock(block);
|
||||
while (start-- > end) {
|
||||
sequence()->AddInstruction(instructions_[start], block);
|
||||
}
|
||||
sequence()->EndBlock(block);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Instruction* InstructionSelector::Emit(InstructionCode opcode,
|
||||
InstructionOperand* output,
|
||||
size_t temp_count,
|
||||
InstructionOperand** temps) {
|
||||
size_t output_count = output == NULL ? 0 : 1;
|
||||
return Emit(opcode, output_count, &output, 0, NULL, temp_count, temps);
|
||||
}
|
||||
|
||||
|
||||
Instruction* InstructionSelector::Emit(InstructionCode opcode,
|
||||
InstructionOperand* output,
|
||||
InstructionOperand* a, size_t temp_count,
|
||||
InstructionOperand** temps) {
|
||||
size_t output_count = output == NULL ? 0 : 1;
|
||||
return Emit(opcode, output_count, &output, 1, &a, temp_count, temps);
|
||||
}
|
||||
|
||||
|
||||
Instruction* InstructionSelector::Emit(InstructionCode opcode,
|
||||
InstructionOperand* output,
|
||||
InstructionOperand* a,
|
||||
InstructionOperand* b, size_t temp_count,
|
||||
InstructionOperand** temps) {
|
||||
size_t output_count = output == NULL ? 0 : 1;
|
||||
InstructionOperand* inputs[] = {a, b};
|
||||
size_t input_count = ARRAY_SIZE(inputs);
|
||||
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
|
||||
temps);
|
||||
}
|
||||
|
||||
|
||||
Instruction* InstructionSelector::Emit(InstructionCode opcode,
|
||||
InstructionOperand* output,
|
||||
InstructionOperand* a,
|
||||
InstructionOperand* b,
|
||||
InstructionOperand* c, size_t temp_count,
|
||||
InstructionOperand** temps) {
|
||||
size_t output_count = output == NULL ? 0 : 1;
|
||||
InstructionOperand* inputs[] = {a, b, c};
|
||||
size_t input_count = ARRAY_SIZE(inputs);
|
||||
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
|
||||
temps);
|
||||
}
|
||||
|
||||
|
||||
Instruction* InstructionSelector::Emit(
|
||||
InstructionCode opcode, InstructionOperand* output, InstructionOperand* a,
|
||||
InstructionOperand* b, InstructionOperand* c, InstructionOperand* d,
|
||||
size_t temp_count, InstructionOperand** temps) {
|
||||
size_t output_count = output == NULL ? 0 : 1;
|
||||
InstructionOperand* inputs[] = {a, b, c, d};
|
||||
size_t input_count = ARRAY_SIZE(inputs);
|
||||
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
|
||||
temps);
|
||||
}
|
||||
|
||||
|
||||
Instruction* InstructionSelector::Emit(
|
||||
InstructionCode opcode, size_t output_count, InstructionOperand** outputs,
|
||||
size_t input_count, InstructionOperand** inputs, size_t temp_count,
|
||||
InstructionOperand** temps) {
|
||||
Instruction* instr =
|
||||
Instruction::New(instruction_zone(), opcode, output_count, outputs,
|
||||
input_count, inputs, temp_count, temps);
|
||||
return Emit(instr);
|
||||
}
|
||||
|
||||
|
||||
Instruction* InstructionSelector::Emit(Instruction* instr) {
|
||||
instructions_.push_back(instr);
|
||||
return instr;
|
||||
}
|
||||
|
||||
|
||||
bool InstructionSelector::IsNextInAssemblyOrder(const BasicBlock* block) const {
|
||||
return block->rpo_number_ == (current_block_->rpo_number_ + 1) &&
|
||||
block->deferred_ == current_block_->deferred_;
|
||||
}
|
||||
|
||||
|
||||
bool InstructionSelector::CanCover(Node* user, Node* node) const {
|
||||
return node->OwnedBy(user) &&
|
||||
schedule()->block(node) == schedule()->block(user);
|
||||
}
|
||||
|
||||
|
||||
bool InstructionSelector::IsUsed(Node* node) const {
|
||||
if (!node->op()->HasProperty(Operator::kEliminatable)) return true;
|
||||
NodeId id = node->id();
|
||||
ASSERT(id >= 0);
|
||||
ASSERT(id < static_cast<NodeId>(used_.size()));
|
||||
return used_[id];
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::MarkAsUsed(Node* node) {
|
||||
ASSERT_NOT_NULL(node);
|
||||
NodeId id = node->id();
|
||||
ASSERT(id >= 0);
|
||||
ASSERT(id < static_cast<NodeId>(used_.size()));
|
||||
used_[id] = true;
|
||||
}
|
||||
|
||||
|
||||
bool InstructionSelector::IsDouble(const Node* node) const {
|
||||
ASSERT_NOT_NULL(node);
|
||||
return sequence()->IsDouble(node->id());
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::MarkAsDouble(Node* node) {
|
||||
ASSERT_NOT_NULL(node);
|
||||
ASSERT(!IsReference(node));
|
||||
sequence()->MarkAsDouble(node->id());
|
||||
|
||||
// Propagate "doubleness" throughout phis.
|
||||
for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
|
||||
Node* user = *i;
|
||||
if (user->opcode() != IrOpcode::kPhi) continue;
|
||||
if (IsDouble(user)) continue;
|
||||
MarkAsDouble(user);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool InstructionSelector::IsReference(const Node* node) const {
|
||||
ASSERT_NOT_NULL(node);
|
||||
return sequence()->IsReference(node->id());
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::MarkAsReference(Node* node) {
|
||||
ASSERT_NOT_NULL(node);
|
||||
ASSERT(!IsDouble(node));
|
||||
sequence()->MarkAsReference(node->id());
|
||||
|
||||
// Propagate "referenceness" throughout phis.
|
||||
for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
|
||||
Node* user = *i;
|
||||
if (user->opcode() != IrOpcode::kPhi) continue;
|
||||
if (IsReference(user)) continue;
|
||||
MarkAsReference(user);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
|
||||
Node* node) {
|
||||
ASSERT_NOT_NULL(node);
|
||||
if (rep == kMachineFloat64) MarkAsDouble(node);
|
||||
if (rep == kMachineTagged) MarkAsReference(node);
|
||||
}
|
||||
|
||||
|
||||
// TODO(bmeurer): Get rid of the CallBuffer business and make
|
||||
// InstructionSelector::VisitCall platform independent instead.
|
||||
CallBuffer::CallBuffer(Zone* zone, CallDescriptor* d)
|
||||
: output_count(0),
|
||||
descriptor(d),
|
||||
output_nodes(zone->NewArray<Node*>(d->ReturnCount())),
|
||||
outputs(zone->NewArray<InstructionOperand*>(d->ReturnCount())),
|
||||
fixed_and_control_args(
|
||||
zone->NewArray<InstructionOperand*>(input_count() + control_count())),
|
||||
fixed_count(0),
|
||||
pushed_nodes(zone->NewArray<Node*>(input_count())),
|
||||
pushed_count(0) {
|
||||
if (d->ReturnCount() > 1) {
|
||||
memset(output_nodes, 0, sizeof(Node*) * d->ReturnCount()); // NOLINT
|
||||
}
|
||||
memset(pushed_nodes, 0, sizeof(Node*) * input_count()); // NOLINT
|
||||
}
|
||||
|
||||
|
||||
// TODO(bmeurer): Get rid of the CallBuffer business and make
|
||||
// InstructionSelector::VisitCall platform independent instead.
|
||||
void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
|
||||
bool call_code_immediate,
|
||||
bool call_address_immediate,
|
||||
BasicBlock* cont_node,
|
||||
BasicBlock* deopt_node) {
|
||||
OperandGenerator g(this);
|
||||
ASSERT_EQ(call->op()->OutputCount(), buffer->descriptor->ReturnCount());
|
||||
ASSERT_EQ(NodeProperties::GetValueInputCount(call), buffer->input_count());
|
||||
|
||||
if (buffer->descriptor->ReturnCount() > 0) {
|
||||
// Collect the projections that represent multiple outputs from this call.
|
||||
if (buffer->descriptor->ReturnCount() == 1) {
|
||||
buffer->output_nodes[0] = call;
|
||||
} else {
|
||||
// Iterate over all uses of {call} and collect the projections into the
|
||||
// {result} buffer.
|
||||
for (UseIter i = call->uses().begin(); i != call->uses().end(); ++i) {
|
||||
if ((*i)->opcode() == IrOpcode::kProjection) {
|
||||
int index = OpParameter<int32_t>(*i);
|
||||
ASSERT_GE(index, 0);
|
||||
ASSERT_LT(index, buffer->descriptor->ReturnCount());
|
||||
ASSERT_EQ(NULL, buffer->output_nodes[index]);
|
||||
buffer->output_nodes[index] = *i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Filter out the outputs that aren't live because no projection uses them.
|
||||
for (int i = 0; i < buffer->descriptor->ReturnCount(); i++) {
|
||||
if (buffer->output_nodes[i] != NULL) {
|
||||
Node* output = buffer->output_nodes[i];
|
||||
LinkageLocation location = buffer->descriptor->GetReturnLocation(i);
|
||||
MarkAsRepresentation(location.representation(), output);
|
||||
buffer->outputs[buffer->output_count++] =
|
||||
g.DefineAsLocation(output, location);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
buffer->fixed_count = 1; // First argument is always the callee.
|
||||
Node* callee = call->InputAt(0);
|
||||
switch (buffer->descriptor->kind()) {
|
||||
case CallDescriptor::kCallCodeObject:
|
||||
buffer->fixed_and_control_args[0] =
|
||||
(call_code_immediate && callee->opcode() == IrOpcode::kHeapConstant)
|
||||
? g.UseImmediate(callee)
|
||||
: g.UseRegister(callee);
|
||||
break;
|
||||
case CallDescriptor::kCallAddress:
|
||||
buffer->fixed_and_control_args[0] =
|
||||
(call_address_immediate &&
|
||||
(callee->opcode() == IrOpcode::kInt32Constant ||
|
||||
callee->opcode() == IrOpcode::kInt64Constant))
|
||||
? g.UseImmediate(callee)
|
||||
: g.UseRegister(callee);
|
||||
break;
|
||||
case CallDescriptor::kCallJSFunction:
|
||||
buffer->fixed_and_control_args[0] =
|
||||
g.UseLocation(callee, buffer->descriptor->GetInputLocation(0));
|
||||
break;
|
||||
}
|
||||
|
||||
int input_count = buffer->input_count();
|
||||
|
||||
// Split the arguments into pushed_nodes and fixed_args. Pushed arguments
|
||||
// require an explicit push instruction before the call and do not appear
|
||||
// as arguments to the call. Everything else ends up as an InstructionOperand
|
||||
// argument to the call.
|
||||
InputIter iter(call->inputs().begin());
|
||||
for (int index = 0; index < input_count; ++iter, ++index) {
|
||||
ASSERT(iter != call->inputs().end());
|
||||
ASSERT(index == iter.index());
|
||||
if (index == 0) continue; // The first argument (callee) is already done.
|
||||
InstructionOperand* op =
|
||||
g.UseLocation(*iter, buffer->descriptor->GetInputLocation(index));
|
||||
if (UnallocatedOperand::cast(op)->HasFixedSlotPolicy()) {
|
||||
int stack_index = -UnallocatedOperand::cast(op)->fixed_slot_index() - 1;
|
||||
ASSERT(buffer->pushed_nodes[stack_index] == NULL);
|
||||
buffer->pushed_nodes[stack_index] = *iter;
|
||||
buffer->pushed_count++;
|
||||
} else {
|
||||
buffer->fixed_and_control_args[buffer->fixed_count] = op;
|
||||
buffer->fixed_count++;
|
||||
}
|
||||
}
|
||||
|
||||
// If the call can deoptimize, we add the continuation and deoptimization
|
||||
// block labels.
|
||||
if (buffer->descriptor->CanLazilyDeoptimize()) {
|
||||
ASSERT(cont_node != NULL);
|
||||
ASSERT(deopt_node != NULL);
|
||||
buffer->fixed_and_control_args[buffer->fixed_count] = g.Label(cont_node);
|
||||
buffer->fixed_and_control_args[buffer->fixed_count + 1] =
|
||||
g.Label(deopt_node);
|
||||
} else {
|
||||
ASSERT(cont_node == NULL);
|
||||
ASSERT(deopt_node == NULL);
|
||||
}
|
||||
|
||||
ASSERT(input_count == (buffer->fixed_count + buffer->pushed_count));
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitBlock(BasicBlock* block) {
|
||||
ASSERT_EQ(NULL, current_block_);
|
||||
current_block_ = block;
|
||||
size_t current_block_end = instructions_.size();
|
||||
|
||||
// Generate code for the block control "top down", but schedule the code
|
||||
// "bottom up".
|
||||
VisitControl(block);
|
||||
std::reverse(instructions_.begin() + current_block_end, instructions_.end());
|
||||
|
||||
// Visit code in reverse control flow order, because architecture-specific
|
||||
// matching may cover more than one node at a time.
|
||||
for (BasicBlock::reverse_iterator i = block->rbegin(); i != block->rend();
|
||||
++i) {
|
||||
Node* node = *i;
|
||||
if (!IsUsed(node)) continue;
|
||||
// Generate code for this node "top down", but schedule the code "bottom
|
||||
// up".
|
||||
size_t current_node_end = instructions_.size();
|
||||
VisitNode(node);
|
||||
std::reverse(instructions_.begin() + current_node_end, instructions_.end());
|
||||
}
|
||||
|
||||
// We're done with the block.
|
||||
// TODO(bmeurer): We should not mutate the schedule.
|
||||
block->code_end_ = current_block_end;
|
||||
block->code_start_ = instructions_.size();
|
||||
|
||||
current_block_ = NULL;
|
||||
}
|
||||
|
||||
|
||||
static inline void CheckNoPhis(const BasicBlock* block) {
|
||||
#ifdef DEBUG
|
||||
// Branch targets should not have phis.
|
||||
for (BasicBlock::const_iterator i = block->begin(); i != block->end(); ++i) {
|
||||
const Node* node = *i;
|
||||
CHECK_NE(IrOpcode::kPhi, node->opcode());
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitControl(BasicBlock* block) {
|
||||
Node* input = block->control_input_;
|
||||
switch (block->control_) {
|
||||
case BasicBlockData::kGoto:
|
||||
return VisitGoto(block->SuccessorAt(0));
|
||||
case BasicBlockData::kBranch: {
|
||||
ASSERT_EQ(IrOpcode::kBranch, input->opcode());
|
||||
BasicBlock* tbranch = block->SuccessorAt(0);
|
||||
BasicBlock* fbranch = block->SuccessorAt(1);
|
||||
// SSA deconstruction requires targets of branches not to have phis.
|
||||
// Edge split form guarantees this property, but is more strict.
|
||||
CheckNoPhis(tbranch);
|
||||
CheckNoPhis(fbranch);
|
||||
if (tbranch == fbranch) return VisitGoto(tbranch);
|
||||
return VisitBranch(input, tbranch, fbranch);
|
||||
}
|
||||
case BasicBlockData::kReturn: {
|
||||
// If the result itself is a return, return its input.
|
||||
Node* value = (input != NULL && input->opcode() == IrOpcode::kReturn)
|
||||
? input->InputAt(0)
|
||||
: input;
|
||||
return VisitReturn(value);
|
||||
}
|
||||
case BasicBlockData::kThrow:
|
||||
return VisitThrow(input);
|
||||
case BasicBlockData::kDeoptimize:
|
||||
return VisitDeoptimization(input);
|
||||
case BasicBlockData::kCall: {
|
||||
BasicBlock* deoptimization = block->SuccessorAt(0);
|
||||
BasicBlock* continuation = block->SuccessorAt(1);
|
||||
VisitCall(input, continuation, deoptimization);
|
||||
break;
|
||||
}
|
||||
case BasicBlockData::kNone: {
|
||||
// TODO(titzer): exit block doesn't have control.
|
||||
ASSERT(input == NULL);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitNode(Node* node) {
|
||||
ASSERT_NOT_NULL(schedule()->block(node)); // should only use scheduled nodes.
|
||||
SourcePosition source_position = source_positions_->GetSourcePosition(node);
|
||||
if (!source_position.IsUnknown()) {
|
||||
ASSERT(!source_position.IsInvalid());
|
||||
if (FLAG_turbo_source_positions || node->opcode() == IrOpcode::kCall) {
|
||||
Emit(SourcePositionInstruction::New(instruction_zone(), source_position));
|
||||
}
|
||||
}
|
||||
switch (node->opcode()) {
|
||||
case IrOpcode::kStart:
|
||||
case IrOpcode::kLoop:
|
||||
case IrOpcode::kEnd:
|
||||
case IrOpcode::kBranch:
|
||||
case IrOpcode::kIfTrue:
|
||||
case IrOpcode::kIfFalse:
|
||||
case IrOpcode::kEffectPhi:
|
||||
case IrOpcode::kMerge:
|
||||
case IrOpcode::kProjection:
|
||||
case IrOpcode::kLazyDeoptimization:
|
||||
case IrOpcode::kContinuation:
|
||||
// No code needed for these graph artifacts.
|
||||
return;
|
||||
case IrOpcode::kPhi:
|
||||
return VisitPhi(node);
|
||||
case IrOpcode::kParameter: {
|
||||
int index = OpParameter<int>(node);
|
||||
MachineRepresentation rep = linkage()
|
||||
->GetIncomingDescriptor()
|
||||
->GetInputLocation(index)
|
||||
.representation();
|
||||
MarkAsRepresentation(rep, node);
|
||||
return VisitParameter(node);
|
||||
}
|
||||
case IrOpcode::kInt32Constant:
|
||||
case IrOpcode::kInt64Constant:
|
||||
case IrOpcode::kExternalConstant:
|
||||
return VisitConstant(node);
|
||||
case IrOpcode::kFloat64Constant:
|
||||
return MarkAsDouble(node), VisitConstant(node);
|
||||
case IrOpcode::kHeapConstant:
|
||||
case IrOpcode::kNumberConstant:
|
||||
// TODO(turbofan): only mark non-smis as references.
|
||||
return MarkAsReference(node), VisitConstant(node);
|
||||
case IrOpcode::kCall:
|
||||
return VisitCall(node, NULL, NULL);
|
||||
case IrOpcode::kFrameState:
|
||||
// TODO(titzer): state nodes should be combined into their users.
|
||||
return;
|
||||
case IrOpcode::kLoad: {
|
||||
MachineRepresentation load_rep = OpParameter<MachineRepresentation>(node);
|
||||
MarkAsRepresentation(load_rep, node);
|
||||
return VisitLoad(node);
|
||||
}
|
||||
case IrOpcode::kStore:
|
||||
return VisitStore(node);
|
||||
case IrOpcode::kWord32And:
|
||||
return VisitWord32And(node);
|
||||
case IrOpcode::kWord32Or:
|
||||
return VisitWord32Or(node);
|
||||
case IrOpcode::kWord32Xor:
|
||||
return VisitWord32Xor(node);
|
||||
case IrOpcode::kWord32Shl:
|
||||
return VisitWord32Shl(node);
|
||||
case IrOpcode::kWord32Shr:
|
||||
return VisitWord32Shr(node);
|
||||
case IrOpcode::kWord32Sar:
|
||||
return VisitWord32Sar(node);
|
||||
case IrOpcode::kWord32Equal:
|
||||
return VisitWord32Equal(node);
|
||||
case IrOpcode::kWord64And:
|
||||
return VisitWord64And(node);
|
||||
case IrOpcode::kWord64Or:
|
||||
return VisitWord64Or(node);
|
||||
case IrOpcode::kWord64Xor:
|
||||
return VisitWord64Xor(node);
|
||||
case IrOpcode::kWord64Shl:
|
||||
return VisitWord64Shl(node);
|
||||
case IrOpcode::kWord64Shr:
|
||||
return VisitWord64Shr(node);
|
||||
case IrOpcode::kWord64Sar:
|
||||
return VisitWord64Sar(node);
|
||||
case IrOpcode::kWord64Equal:
|
||||
return VisitWord64Equal(node);
|
||||
case IrOpcode::kInt32Add:
|
||||
return VisitInt32Add(node);
|
||||
case IrOpcode::kInt32Sub:
|
||||
return VisitInt32Sub(node);
|
||||
case IrOpcode::kInt32Mul:
|
||||
return VisitInt32Mul(node);
|
||||
case IrOpcode::kInt32Div:
|
||||
return VisitInt32Div(node);
|
||||
case IrOpcode::kInt32UDiv:
|
||||
return VisitInt32UDiv(node);
|
||||
case IrOpcode::kInt32Mod:
|
||||
return VisitInt32Mod(node);
|
||||
case IrOpcode::kInt32UMod:
|
||||
return VisitInt32UMod(node);
|
||||
case IrOpcode::kInt32LessThan:
|
||||
return VisitInt32LessThan(node);
|
||||
case IrOpcode::kInt32LessThanOrEqual:
|
||||
return VisitInt32LessThanOrEqual(node);
|
||||
case IrOpcode::kUint32LessThan:
|
||||
return VisitUint32LessThan(node);
|
||||
case IrOpcode::kUint32LessThanOrEqual:
|
||||
return VisitUint32LessThanOrEqual(node);
|
||||
case IrOpcode::kInt64Add:
|
||||
return VisitInt64Add(node);
|
||||
case IrOpcode::kInt64Sub:
|
||||
return VisitInt64Sub(node);
|
||||
case IrOpcode::kInt64Mul:
|
||||
return VisitInt64Mul(node);
|
||||
case IrOpcode::kInt64Div:
|
||||
return VisitInt64Div(node);
|
||||
case IrOpcode::kInt64UDiv:
|
||||
return VisitInt64UDiv(node);
|
||||
case IrOpcode::kInt64Mod:
|
||||
return VisitInt64Mod(node);
|
||||
case IrOpcode::kInt64UMod:
|
||||
return VisitInt64UMod(node);
|
||||
case IrOpcode::kInt64LessThan:
|
||||
return VisitInt64LessThan(node);
|
||||
case IrOpcode::kInt64LessThanOrEqual:
|
||||
return VisitInt64LessThanOrEqual(node);
|
||||
case IrOpcode::kConvertInt32ToInt64:
|
||||
return VisitConvertInt32ToInt64(node);
|
||||
case IrOpcode::kConvertInt64ToInt32:
|
||||
return VisitConvertInt64ToInt32(node);
|
||||
case IrOpcode::kConvertInt32ToFloat64:
|
||||
return MarkAsDouble(node), VisitConvertInt32ToFloat64(node);
|
||||
case IrOpcode::kConvertFloat64ToInt32:
|
||||
return VisitConvertFloat64ToInt32(node);
|
||||
case IrOpcode::kFloat64Add:
|
||||
return MarkAsDouble(node), VisitFloat64Add(node);
|
||||
case IrOpcode::kFloat64Sub:
|
||||
return MarkAsDouble(node), VisitFloat64Sub(node);
|
||||
case IrOpcode::kFloat64Mul:
|
||||
return MarkAsDouble(node), VisitFloat64Mul(node);
|
||||
case IrOpcode::kFloat64Div:
|
||||
return MarkAsDouble(node), VisitFloat64Div(node);
|
||||
case IrOpcode::kFloat64Mod:
|
||||
return MarkAsDouble(node), VisitFloat64Mod(node);
|
||||
case IrOpcode::kFloat64Equal:
|
||||
return VisitFloat64Equal(node);
|
||||
case IrOpcode::kFloat64LessThan:
|
||||
return VisitFloat64LessThan(node);
|
||||
case IrOpcode::kFloat64LessThanOrEqual:
|
||||
return VisitFloat64LessThanOrEqual(node);
|
||||
default:
|
||||
V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
|
||||
node->opcode(), node->op()->mnemonic(), node->id());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord32Equal(Node* node) {
|
||||
FlagsContinuation cont(kEqual, node);
|
||||
Int32BinopMatcher m(node);
|
||||
if (m.right().Is(0)) {
|
||||
return VisitWord32Test(m.left().node(), &cont);
|
||||
}
|
||||
VisitWord32Compare(node, &cont);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt32LessThan(Node* node) {
|
||||
FlagsContinuation cont(kSignedLessThan, node);
|
||||
VisitWord32Compare(node, &cont);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
|
||||
FlagsContinuation cont(kSignedLessThanOrEqual, node);
|
||||
VisitWord32Compare(node, &cont);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitUint32LessThan(Node* node) {
|
||||
FlagsContinuation cont(kUnsignedLessThan, node);
|
||||
VisitWord32Compare(node, &cont);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
|
||||
FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
|
||||
VisitWord32Compare(node, &cont);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord64Equal(Node* node) {
|
||||
FlagsContinuation cont(kEqual, node);
|
||||
Int64BinopMatcher m(node);
|
||||
if (m.right().Is(0)) {
|
||||
return VisitWord64Test(m.left().node(), &cont);
|
||||
}
|
||||
VisitWord64Compare(node, &cont);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt64LessThan(Node* node) {
|
||||
FlagsContinuation cont(kSignedLessThan, node);
|
||||
VisitWord64Compare(node, &cont);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
|
||||
FlagsContinuation cont(kSignedLessThanOrEqual, node);
|
||||
VisitWord64Compare(node, &cont);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64Equal(Node* node) {
|
||||
FlagsContinuation cont(kUnorderedEqual, node);
|
||||
VisitFloat64Compare(node, &cont);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64LessThan(Node* node) {
|
||||
FlagsContinuation cont(kUnorderedLessThan, node);
|
||||
VisitFloat64Compare(node, &cont);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
|
||||
FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
|
||||
VisitFloat64Compare(node, &cont);
|
||||
}
|
||||
|
||||
|
||||
// 32 bit targets do not implement the following instructions.
|
||||
#if V8_TARGET_ARCH_32_BIT
|
||||
|
||||
void InstructionSelector::VisitWord64And(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord64Or(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord64Xor(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord64Shl(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord64Shr(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord64Sar(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt64Add(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt64Sub(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt64Mul(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt64Div(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt64UDiv(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt64Mod(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
|
||||
void InstructionSelector::VisitInt64UMod(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
|
||||
void InstructionSelector::VisitConvertInt64ToInt32(Node* node) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitConvertInt32ToInt64(Node* node) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitWord64Compare(Node* node,
|
||||
FlagsContinuation* cont) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
#endif // V8_TARGET_ARCH_32_BIT
|
||||
|
||||
|
||||
void InstructionSelector::VisitPhi(Node* node) {
|
||||
// TODO(bmeurer): Emit a PhiInstruction here.
|
||||
for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
|
||||
MarkAsUsed(*i);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitParameter(Node* node) {
|
||||
OperandGenerator g(this);
|
||||
Emit(kArchNop, g.DefineAsLocation(node, linkage()->GetParameterLocation(
|
||||
OpParameter<int>(node))));
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitConstant(Node* node) {
|
||||
// We must emit a NOP here because every live range needs a defining
|
||||
// instruction in the register allocator.
|
||||
OperandGenerator g(this);
|
||||
Emit(kArchNop, g.DefineAsConstant(node));
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitGoto(BasicBlock* target) {
|
||||
if (IsNextInAssemblyOrder(target)) {
|
||||
// fall through to the next block.
|
||||
Emit(kArchNop, NULL)->MarkAsControl();
|
||||
} else {
|
||||
// jump to the next block.
|
||||
OperandGenerator g(this);
|
||||
Emit(kArchJmp, NULL, g.Label(target))->MarkAsControl();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
|
||||
BasicBlock* fbranch) {
|
||||
OperandGenerator g(this);
|
||||
Node* user = branch;
|
||||
Node* value = branch->InputAt(0);
|
||||
|
||||
FlagsContinuation cont(kNotEqual, tbranch, fbranch);
|
||||
|
||||
// If we can fall through to the true block, invert the branch.
|
||||
if (IsNextInAssemblyOrder(tbranch)) {
|
||||
cont.Negate();
|
||||
cont.SwapBlocks();
|
||||
}
|
||||
|
||||
// Try to combine with comparisons against 0 by simply inverting the branch.
|
||||
while (CanCover(user, value)) {
|
||||
if (value->opcode() == IrOpcode::kWord32Equal) {
|
||||
Int32BinopMatcher m(value);
|
||||
if (m.right().Is(0)) {
|
||||
user = value;
|
||||
value = m.left().node();
|
||||
cont.Negate();
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
} else if (value->opcode() == IrOpcode::kWord64Equal) {
|
||||
Int64BinopMatcher m(value);
|
||||
if (m.right().Is(0)) {
|
||||
user = value;
|
||||
value = m.left().node();
|
||||
cont.Negate();
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Try to combine the branch with a comparison.
|
||||
if (CanCover(user, value)) {
|
||||
switch (value->opcode()) {
|
||||
case IrOpcode::kWord32Equal:
|
||||
cont.OverwriteAndNegateIfEqual(kEqual);
|
||||
return VisitWord32Compare(value, &cont);
|
||||
case IrOpcode::kInt32LessThan:
|
||||
cont.OverwriteAndNegateIfEqual(kSignedLessThan);
|
||||
return VisitWord32Compare(value, &cont);
|
||||
case IrOpcode::kInt32LessThanOrEqual:
|
||||
cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
|
||||
return VisitWord32Compare(value, &cont);
|
||||
case IrOpcode::kUint32LessThan:
|
||||
cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
|
||||
return VisitWord32Compare(value, &cont);
|
||||
case IrOpcode::kUint32LessThanOrEqual:
|
||||
cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
|
||||
return VisitWord32Compare(value, &cont);
|
||||
case IrOpcode::kWord64Equal:
|
||||
cont.OverwriteAndNegateIfEqual(kEqual);
|
||||
return VisitWord64Compare(value, &cont);
|
||||
case IrOpcode::kInt64LessThan:
|
||||
cont.OverwriteAndNegateIfEqual(kSignedLessThan);
|
||||
return VisitWord64Compare(value, &cont);
|
||||
case IrOpcode::kInt64LessThanOrEqual:
|
||||
cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
|
||||
return VisitWord64Compare(value, &cont);
|
||||
case IrOpcode::kFloat64Equal:
|
||||
cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
|
||||
return VisitFloat64Compare(value, &cont);
|
||||
case IrOpcode::kFloat64LessThan:
|
||||
cont.OverwriteAndNegateIfEqual(kUnorderedLessThan);
|
||||
return VisitFloat64Compare(value, &cont);
|
||||
case IrOpcode::kFloat64LessThanOrEqual:
|
||||
cont.OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
|
||||
return VisitFloat64Compare(value, &cont);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Branch could not be combined with a compare, emit compare against 0.
|
||||
VisitWord32Test(value, &cont);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitReturn(Node* value) {
|
||||
OperandGenerator g(this);
|
||||
if (value != NULL) {
|
||||
Emit(kArchRet, NULL, g.UseLocation(value, linkage()->GetReturnLocation()));
|
||||
} else {
|
||||
Emit(kArchRet, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitThrow(Node* value) {
|
||||
UNIMPLEMENTED(); // TODO(titzer)
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitDeoptimization(Node* deopt) {
|
||||
ASSERT(deopt->op()->opcode() == IrOpcode::kDeoptimize);
|
||||
Node* state = deopt->InputAt(0);
|
||||
ASSERT(state->op()->opcode() == IrOpcode::kFrameState);
|
||||
FrameStateDescriptor descriptor = OpParameter<FrameStateDescriptor>(state);
|
||||
// TODO(jarin) We should also add an instruction input for every input to
|
||||
// the framestate node (and recurse for the inlined framestates).
|
||||
int deoptimization_id = sequence()->AddDeoptimizationEntry(descriptor);
|
||||
Emit(kArchDeoptimize | MiscField::encode(deoptimization_id), NULL);
|
||||
}
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
169
src/compiler/instruction-selector.h
Normal file
169
src/compiler/instruction-selector.h
Normal file
@ -0,0 +1,169 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_INSTRUCTION_SELECTOR_H_
|
||||
#define V8_COMPILER_INSTRUCTION_SELECTOR_H_
|
||||
|
||||
#include <deque>
|
||||
|
||||
#include "src/compiler/common-operator.h"
|
||||
#include "src/compiler/instruction.h"
|
||||
#include "src/compiler/machine-operator.h"
|
||||
#include "src/zone-containers.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
// Forward declarations.
|
||||
struct CallBuffer; // TODO(bmeurer): Remove this.
|
||||
class FlagsContinuation;
|
||||
|
||||
class InstructionSelector V8_FINAL {
|
||||
public:
|
||||
explicit InstructionSelector(InstructionSequence* sequence,
|
||||
SourcePositionTable* source_positions);
|
||||
|
||||
// Visit code for the entire graph with the included schedule.
|
||||
void SelectInstructions();
|
||||
|
||||
// ===========================================================================
|
||||
// ============= Architecture-independent code emission methods. =============
|
||||
// ===========================================================================
|
||||
|
||||
Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
|
||||
size_t temp_count = 0, InstructionOperand* *temps = NULL);
|
||||
Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
|
||||
InstructionOperand* a, size_t temp_count = 0,
|
||||
InstructionOperand* *temps = NULL);
|
||||
Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
|
||||
InstructionOperand* a, InstructionOperand* b,
|
||||
size_t temp_count = 0, InstructionOperand* *temps = NULL);
|
||||
Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
|
||||
InstructionOperand* a, InstructionOperand* b,
|
||||
InstructionOperand* c, size_t temp_count = 0,
|
||||
InstructionOperand* *temps = NULL);
|
||||
Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
|
||||
InstructionOperand* a, InstructionOperand* b,
|
||||
InstructionOperand* c, InstructionOperand* d,
|
||||
size_t temp_count = 0, InstructionOperand* *temps = NULL);
|
||||
Instruction* Emit(InstructionCode opcode, size_t output_count,
|
||||
InstructionOperand** outputs, size_t input_count,
|
||||
InstructionOperand** inputs, size_t temp_count = 0,
|
||||
InstructionOperand* *temps = NULL);
|
||||
Instruction* Emit(Instruction* instr);
|
||||
|
||||
private:
|
||||
friend class OperandGenerator;
|
||||
|
||||
// ===========================================================================
|
||||
// ============ Architecture-independent graph covering methods. =============
|
||||
// ===========================================================================
|
||||
|
||||
// Checks if {block} will appear directly after {current_block_} when
|
||||
// assembling code, in which case, a fall-through can be used.
|
||||
bool IsNextInAssemblyOrder(const BasicBlock* block) const;
|
||||
|
||||
// Used in pattern matching during code generation.
|
||||
// Check if {node} can be covered while generating code for the current
|
||||
// instruction. A node can be covered if the {user} of the node has the only
|
||||
// edge and the two are in the same basic block.
|
||||
bool CanCover(Node* user, Node* node) const;
|
||||
|
||||
// Checks if {node} has any uses, and therefore code has to be generated for
|
||||
// it.
|
||||
bool IsUsed(Node* node) const;
|
||||
|
||||
// Inform the instruction selection that {node} has at least one use and we
|
||||
// will need to generate code for it.
|
||||
void MarkAsUsed(Node* node);
|
||||
|
||||
// Checks if {node} is marked as double.
|
||||
bool IsDouble(const Node* node) const;
|
||||
|
||||
// Inform the register allocator of a double result.
|
||||
void MarkAsDouble(Node* node);
|
||||
|
||||
// Checks if {node} is marked as reference.
|
||||
bool IsReference(const Node* node) const;
|
||||
|
||||
// Inform the register allocator of a reference result.
|
||||
void MarkAsReference(Node* node);
|
||||
|
||||
// Inform the register allocation of the representation of the value produced
|
||||
// by {node}.
|
||||
void MarkAsRepresentation(MachineRepresentation rep, Node* node);
|
||||
|
||||
// Initialize the call buffer with the InstructionOperands, nodes, etc,
|
||||
// corresponding
|
||||
// to the inputs and outputs of the call.
|
||||
// {call_code_immediate} to generate immediate operands to calls of code.
|
||||
// {call_address_immediate} to generate immediate operands to address calls.
|
||||
void InitializeCallBuffer(Node* call, CallBuffer* buffer,
|
||||
bool call_code_immediate,
|
||||
bool call_address_immediate, BasicBlock* cont_node,
|
||||
BasicBlock* deopt_node);
|
||||
|
||||
// ===========================================================================
|
||||
// ============= Architecture-specific graph covering methods. ===============
|
||||
// ===========================================================================
|
||||
|
||||
// Visit nodes in the given block and generate code.
|
||||
void VisitBlock(BasicBlock* block);
|
||||
|
||||
// Visit the node for the control flow at the end of the block, generating
|
||||
// code if necessary.
|
||||
void VisitControl(BasicBlock* block);
|
||||
|
||||
// Visit the node and generate code, if any.
|
||||
void VisitNode(Node* node);
|
||||
|
||||
#define DECLARE_GENERATOR(x) void Visit##x(Node* node);
|
||||
MACHINE_OP_LIST(DECLARE_GENERATOR)
|
||||
#undef DECLARE_GENERATOR
|
||||
|
||||
void VisitWord32Test(Node* node, FlagsContinuation* cont);
|
||||
void VisitWord64Test(Node* node, FlagsContinuation* cont);
|
||||
void VisitWord32Compare(Node* node, FlagsContinuation* cont);
|
||||
void VisitWord64Compare(Node* node, FlagsContinuation* cont);
|
||||
void VisitFloat64Compare(Node* node, FlagsContinuation* cont);
|
||||
|
||||
void VisitPhi(Node* node);
|
||||
void VisitParameter(Node* node);
|
||||
void VisitConstant(Node* node);
|
||||
void VisitCall(Node* call, BasicBlock* continuation,
|
||||
BasicBlock* deoptimization);
|
||||
void VisitGoto(BasicBlock* target);
|
||||
void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
|
||||
void VisitReturn(Node* value);
|
||||
void VisitThrow(Node* value);
|
||||
void VisitDeoptimization(Node* deopt);
|
||||
|
||||
// ===========================================================================
|
||||
|
||||
Graph* graph() const { return sequence()->graph(); }
|
||||
Linkage* linkage() const { return sequence()->linkage(); }
|
||||
Schedule* schedule() const { return sequence()->schedule(); }
|
||||
InstructionSequence* sequence() const { return sequence_; }
|
||||
Zone* instruction_zone() const { return sequence()->zone(); }
|
||||
Zone* zone() { return &zone_; }
|
||||
|
||||
// ===========================================================================
|
||||
|
||||
typedef zone_allocator<Instruction*> InstructionPtrZoneAllocator;
|
||||
typedef std::deque<Instruction*, InstructionPtrZoneAllocator> Instructions;
|
||||
|
||||
Zone zone_;
|
||||
InstructionSequence* sequence_;
|
||||
SourcePositionTable* source_positions_;
|
||||
BasicBlock* current_block_;
|
||||
Instructions instructions_;
|
||||
BoolVector used_;
|
||||
};
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_COMPILER_INSTRUCTION_SELECTOR_H_
|
479
src/compiler/instruction.cc
Normal file
479
src/compiler/instruction.cc
Normal file
@ -0,0 +1,479 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/compiler/instruction.h"
|
||||
|
||||
#include "src/compiler/common-operator.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
OStream& operator<<(OStream& os, const InstructionOperand& op) {
|
||||
switch (op.kind()) {
|
||||
case InstructionOperand::INVALID:
|
||||
return os << "(0)";
|
||||
case InstructionOperand::UNALLOCATED: {
|
||||
const UnallocatedOperand* unalloc = UnallocatedOperand::cast(&op);
|
||||
os << "v" << unalloc->virtual_register();
|
||||
if (unalloc->basic_policy() == UnallocatedOperand::FIXED_SLOT) {
|
||||
return os << "(=" << unalloc->fixed_slot_index() << "S)";
|
||||
}
|
||||
switch (unalloc->extended_policy()) {
|
||||
case UnallocatedOperand::NONE:
|
||||
return os;
|
||||
case UnallocatedOperand::FIXED_REGISTER:
|
||||
return os << "(=" << Register::AllocationIndexToString(
|
||||
unalloc->fixed_register_index()) << ")";
|
||||
case UnallocatedOperand::FIXED_DOUBLE_REGISTER:
|
||||
return os << "(=" << DoubleRegister::AllocationIndexToString(
|
||||
unalloc->fixed_register_index()) << ")";
|
||||
case UnallocatedOperand::MUST_HAVE_REGISTER:
|
||||
return os << "(R)";
|
||||
case UnallocatedOperand::SAME_AS_FIRST_INPUT:
|
||||
return os << "(1)";
|
||||
case UnallocatedOperand::ANY:
|
||||
return os << "(-)";
|
||||
}
|
||||
}
|
||||
case InstructionOperand::CONSTANT:
|
||||
return os << "[constant:" << op.index() << "]";
|
||||
case InstructionOperand::IMMEDIATE:
|
||||
return os << "[immediate:" << op.index() << "]";
|
||||
case InstructionOperand::STACK_SLOT:
|
||||
return os << "[stack:" << op.index() << "]";
|
||||
case InstructionOperand::DOUBLE_STACK_SLOT:
|
||||
return os << "[double_stack:" << op.index() << "]";
|
||||
case InstructionOperand::REGISTER:
|
||||
return os << "[" << Register::AllocationIndexToString(op.index())
|
||||
<< "|R]";
|
||||
case InstructionOperand::DOUBLE_REGISTER:
|
||||
return os << "[" << DoubleRegister::AllocationIndexToString(op.index())
|
||||
<< "|R]";
|
||||
}
|
||||
UNREACHABLE();
|
||||
return os;
|
||||
}
|
||||
|
||||
|
||||
template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
|
||||
SubKindOperand<kOperandKind, kNumCachedOperands>*
|
||||
SubKindOperand<kOperandKind, kNumCachedOperands>::cache = NULL;
|
||||
|
||||
|
||||
template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
|
||||
void SubKindOperand<kOperandKind, kNumCachedOperands>::SetUpCache() {
|
||||
if (cache) return;
|
||||
cache = new SubKindOperand[kNumCachedOperands];
|
||||
for (int i = 0; i < kNumCachedOperands; i++) {
|
||||
cache[i].ConvertTo(kOperandKind, i);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
|
||||
void SubKindOperand<kOperandKind, kNumCachedOperands>::TearDownCache() {
|
||||
delete[] cache;
|
||||
}
|
||||
|
||||
|
||||
void InstructionOperand::SetUpCaches() {
|
||||
#define INSTRUCTION_OPERAND_SETUP(name, type, number) \
|
||||
name##Operand::SetUpCache();
|
||||
INSTRUCTION_OPERAND_LIST(INSTRUCTION_OPERAND_SETUP)
|
||||
#undef INSTRUCTION_OPERAND_SETUP
|
||||
}
|
||||
|
||||
|
||||
void InstructionOperand::TearDownCaches() {
|
||||
#define INSTRUCTION_OPERAND_TEARDOWN(name, type, number) \
|
||||
name##Operand::TearDownCache();
|
||||
INSTRUCTION_OPERAND_LIST(INSTRUCTION_OPERAND_TEARDOWN)
|
||||
#undef INSTRUCTION_OPERAND_TEARDOWN
|
||||
}
|
||||
|
||||
|
||||
OStream& operator<<(OStream& os, const MoveOperands& mo) {
|
||||
os << *mo.destination();
|
||||
if (!mo.source()->Equals(mo.destination())) os << " = " << *mo.source();
|
||||
return os << ";";
|
||||
}
|
||||
|
||||
|
||||
bool ParallelMove::IsRedundant() const {
|
||||
for (int i = 0; i < move_operands_.length(); ++i) {
|
||||
if (!move_operands_[i].IsRedundant()) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
OStream& operator<<(OStream& os, const ParallelMove& pm) {
|
||||
bool first = true;
|
||||
for (ZoneList<MoveOperands>::iterator move = pm.move_operands()->begin();
|
||||
move != pm.move_operands()->end(); ++move) {
|
||||
if (move->IsEliminated()) continue;
|
||||
if (!first) os << " ";
|
||||
first = false;
|
||||
os << *move;
|
||||
}
|
||||
return os;
|
||||
}
|
||||
|
||||
|
||||
void PointerMap::RecordPointer(InstructionOperand* op, Zone* zone) {
|
||||
// Do not record arguments as pointers.
|
||||
if (op->IsStackSlot() && op->index() < 0) return;
|
||||
ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
|
||||
pointer_operands_.Add(op, zone);
|
||||
}
|
||||
|
||||
|
||||
void PointerMap::RemovePointer(InstructionOperand* op) {
|
||||
// Do not record arguments as pointers.
|
||||
if (op->IsStackSlot() && op->index() < 0) return;
|
||||
ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
|
||||
for (int i = 0; i < pointer_operands_.length(); ++i) {
|
||||
if (pointer_operands_[i]->Equals(op)) {
|
||||
pointer_operands_.Remove(i);
|
||||
--i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void PointerMap::RecordUntagged(InstructionOperand* op, Zone* zone) {
|
||||
// Do not record arguments as pointers.
|
||||
if (op->IsStackSlot() && op->index() < 0) return;
|
||||
ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
|
||||
untagged_operands_.Add(op, zone);
|
||||
}
|
||||
|
||||
|
||||
OStream& operator<<(OStream& os, const PointerMap& pm) {
|
||||
os << "{";
|
||||
for (ZoneList<InstructionOperand*>::iterator op =
|
||||
pm.pointer_operands_.begin();
|
||||
op != pm.pointer_operands_.end(); ++op) {
|
||||
if (op != pm.pointer_operands_.begin()) os << ";";
|
||||
os << *op;
|
||||
}
|
||||
return os << "}";
|
||||
}
|
||||
|
||||
|
||||
OStream& operator<<(OStream& os, const ArchOpcode& ao) {
|
||||
switch (ao) {
|
||||
#define CASE(Name) \
|
||||
case k##Name: \
|
||||
return os << #Name;
|
||||
ARCH_OPCODE_LIST(CASE)
|
||||
#undef CASE
|
||||
}
|
||||
UNREACHABLE();
|
||||
return os;
|
||||
}
|
||||
|
||||
|
||||
OStream& operator<<(OStream& os, const AddressingMode& am) {
|
||||
switch (am) {
|
||||
case kMode_None:
|
||||
return os;
|
||||
#define CASE(Name) \
|
||||
case kMode_##Name: \
|
||||
return os << #Name;
|
||||
TARGET_ADDRESSING_MODE_LIST(CASE)
|
||||
#undef CASE
|
||||
}
|
||||
UNREACHABLE();
|
||||
return os;
|
||||
}
|
||||
|
||||
|
||||
OStream& operator<<(OStream& os, const FlagsMode& fm) {
|
||||
switch (fm) {
|
||||
case kFlags_none:
|
||||
return os;
|
||||
case kFlags_branch:
|
||||
return os << "branch";
|
||||
case kFlags_set:
|
||||
return os << "set";
|
||||
}
|
||||
UNREACHABLE();
|
||||
return os;
|
||||
}
|
||||
|
||||
|
||||
OStream& operator<<(OStream& os, const FlagsCondition& fc) {
|
||||
switch (fc) {
|
||||
case kEqual:
|
||||
return os << "equal";
|
||||
case kNotEqual:
|
||||
return os << "not equal";
|
||||
case kSignedLessThan:
|
||||
return os << "signed less than";
|
||||
case kSignedGreaterThanOrEqual:
|
||||
return os << "signed greater than or equal";
|
||||
case kSignedLessThanOrEqual:
|
||||
return os << "signed less than or equal";
|
||||
case kSignedGreaterThan:
|
||||
return os << "signed greater than";
|
||||
case kUnsignedLessThan:
|
||||
return os << "unsigned less than";
|
||||
case kUnsignedGreaterThanOrEqual:
|
||||
return os << "unsigned greater than or equal";
|
||||
case kUnsignedLessThanOrEqual:
|
||||
return os << "unsigned less than or equal";
|
||||
case kUnsignedGreaterThan:
|
||||
return os << "unsigned greater than";
|
||||
case kUnorderedEqual:
|
||||
return os << "unordered equal";
|
||||
case kUnorderedNotEqual:
|
||||
return os << "unordered not equal";
|
||||
case kUnorderedLessThan:
|
||||
return os << "unordered less than";
|
||||
case kUnorderedGreaterThanOrEqual:
|
||||
return os << "unordered greater than or equal";
|
||||
case kUnorderedLessThanOrEqual:
|
||||
return os << "unordered less than or equal";
|
||||
case kUnorderedGreaterThan:
|
||||
return os << "unordered greater than";
|
||||
}
|
||||
UNREACHABLE();
|
||||
return os;
|
||||
}
|
||||
|
||||
|
||||
OStream& operator<<(OStream& os, const Instruction& instr) {
|
||||
if (instr.OutputCount() > 1) os << "(";
|
||||
for (size_t i = 0; i < instr.OutputCount(); i++) {
|
||||
if (i > 0) os << ", ";
|
||||
os << *instr.OutputAt(i);
|
||||
}
|
||||
|
||||
if (instr.OutputCount() > 1) os << ") = ";
|
||||
if (instr.OutputCount() == 1) os << " = ";
|
||||
|
||||
if (instr.IsGapMoves()) {
|
||||
const GapInstruction* gap = GapInstruction::cast(&instr);
|
||||
os << (instr.IsBlockStart() ? " block-start" : "gap ");
|
||||
for (int i = GapInstruction::FIRST_INNER_POSITION;
|
||||
i <= GapInstruction::LAST_INNER_POSITION; i++) {
|
||||
os << "(";
|
||||
if (gap->parallel_moves_[i] != NULL) os << *gap->parallel_moves_[i];
|
||||
os << ") ";
|
||||
}
|
||||
} else if (instr.IsSourcePosition()) {
|
||||
const SourcePositionInstruction* pos =
|
||||
SourcePositionInstruction::cast(&instr);
|
||||
os << "position (" << pos->source_position().raw() << ")";
|
||||
} else {
|
||||
os << ArchOpcodeField::decode(instr.opcode());
|
||||
AddressingMode am = AddressingModeField::decode(instr.opcode());
|
||||
if (am != kMode_None) {
|
||||
os << " : " << AddressingModeField::decode(instr.opcode());
|
||||
}
|
||||
FlagsMode fm = FlagsModeField::decode(instr.opcode());
|
||||
if (fm != kFlags_none) {
|
||||
os << " && " << fm << " if "
|
||||
<< FlagsConditionField::decode(instr.opcode());
|
||||
}
|
||||
}
|
||||
if (instr.InputCount() > 0) {
|
||||
for (size_t i = 0; i < instr.InputCount(); i++) {
|
||||
os << " " << *instr.InputAt(i);
|
||||
}
|
||||
}
|
||||
return os << "\n";
|
||||
}
|
||||
|
||||
|
||||
OStream& operator<<(OStream& os, const Constant& constant) {
|
||||
switch (constant.type()) {
|
||||
case Constant::kInt32:
|
||||
return os << constant.ToInt32();
|
||||
case Constant::kInt64:
|
||||
return os << constant.ToInt64() << "l";
|
||||
case Constant::kFloat64:
|
||||
return os << constant.ToFloat64();
|
||||
case Constant::kExternalReference:
|
||||
return os << constant.ToExternalReference().address();
|
||||
case Constant::kHeapObject:
|
||||
return os << Brief(*constant.ToHeapObject());
|
||||
}
|
||||
UNREACHABLE();
|
||||
return os;
|
||||
}
|
||||
|
||||
|
||||
Label* InstructionSequence::GetLabel(BasicBlock* block) {
|
||||
return GetBlockStart(block)->label();
|
||||
}
|
||||
|
||||
|
||||
BlockStartInstruction* InstructionSequence::GetBlockStart(BasicBlock* block) {
|
||||
return BlockStartInstruction::cast(InstructionAt(block->code_start_));
|
||||
}
|
||||
|
||||
|
||||
void InstructionSequence::StartBlock(BasicBlock* block) {
|
||||
block->code_start_ = instructions_.size();
|
||||
BlockStartInstruction* block_start =
|
||||
BlockStartInstruction::New(zone(), block);
|
||||
AddInstruction(block_start, block);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSequence::EndBlock(BasicBlock* block) {
|
||||
int end = instructions_.size();
|
||||
ASSERT(block->code_start_ >= 0 && block->code_start_ < end);
|
||||
block->code_end_ = end;
|
||||
}
|
||||
|
||||
|
||||
int InstructionSequence::AddInstruction(Instruction* instr, BasicBlock* block) {
|
||||
// TODO(titzer): the order of these gaps is a holdover from Lithium.
|
||||
GapInstruction* gap = GapInstruction::New(zone());
|
||||
if (instr->IsControl()) instructions_.push_back(gap);
|
||||
int index = instructions_.size();
|
||||
instructions_.push_back(instr);
|
||||
if (!instr->IsControl()) instructions_.push_back(gap);
|
||||
if (instr->NeedsPointerMap()) {
|
||||
ASSERT(instr->pointer_map() == NULL);
|
||||
PointerMap* pointer_map = new (zone()) PointerMap(zone());
|
||||
pointer_map->set_instruction_position(index);
|
||||
instr->set_pointer_map(pointer_map);
|
||||
pointer_maps_.push_back(pointer_map);
|
||||
}
|
||||
return index;
|
||||
}
|
||||
|
||||
|
||||
BasicBlock* InstructionSequence::GetBasicBlock(int instruction_index) {
|
||||
// TODO(turbofan): Optimize this.
|
||||
for (;;) {
|
||||
ASSERT_LE(0, instruction_index);
|
||||
Instruction* instruction = InstructionAt(instruction_index--);
|
||||
if (instruction->IsBlockStart()) {
|
||||
return BlockStartInstruction::cast(instruction)->block();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool InstructionSequence::IsReference(int virtual_register) const {
|
||||
return references_.find(virtual_register) != references_.end();
|
||||
}
|
||||
|
||||
|
||||
bool InstructionSequence::IsDouble(int virtual_register) const {
|
||||
return doubles_.find(virtual_register) != doubles_.end();
|
||||
}
|
||||
|
||||
|
||||
void InstructionSequence::MarkAsReference(int virtual_register) {
|
||||
references_.insert(virtual_register);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSequence::MarkAsDouble(int virtual_register) {
|
||||
doubles_.insert(virtual_register);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSequence::AddGapMove(int index, InstructionOperand* from,
|
||||
InstructionOperand* to) {
|
||||
GapAt(index)->GetOrCreateParallelMove(GapInstruction::START, zone())->AddMove(
|
||||
from, to, zone());
|
||||
}
|
||||
|
||||
|
||||
int InstructionSequence::AddDeoptimizationEntry(
|
||||
const FrameStateDescriptor& descriptor) {
|
||||
int deoptimization_id = deoptimization_entries_.size();
|
||||
deoptimization_entries_.push_back(descriptor);
|
||||
return deoptimization_id;
|
||||
}
|
||||
|
||||
FrameStateDescriptor InstructionSequence::GetDeoptimizationEntry(
|
||||
int deoptimization_id) {
|
||||
return deoptimization_entries_[deoptimization_id];
|
||||
}
|
||||
|
||||
|
||||
int InstructionSequence::GetDeoptimizationEntryCount() {
|
||||
return deoptimization_entries_.size();
|
||||
}
|
||||
|
||||
|
||||
OStream& operator<<(OStream& os, const InstructionSequence& code) {
|
||||
for (size_t i = 0; i < code.immediates_.size(); ++i) {
|
||||
Constant constant = code.immediates_[i];
|
||||
os << "IMM#" << i << ": " << constant << "\n";
|
||||
}
|
||||
int i = 0;
|
||||
for (ConstantMap::const_iterator it = code.constants_.begin();
|
||||
it != code.constants_.end(); ++i, ++it) {
|
||||
os << "CST#" << i << ": v" << it->first << " = " << it->second << "\n";
|
||||
}
|
||||
for (int i = 0; i < code.BasicBlockCount(); i++) {
|
||||
BasicBlock* block = code.BlockAt(i);
|
||||
|
||||
int bid = block->id();
|
||||
os << "RPO#" << block->rpo_number_ << ": B" << bid;
|
||||
CHECK(block->rpo_number_ == i);
|
||||
if (block->IsLoopHeader()) {
|
||||
os << " loop blocks: [" << block->rpo_number_ << ", " << block->loop_end_
|
||||
<< ")";
|
||||
}
|
||||
os << " instructions: [" << block->code_start_ << ", " << block->code_end_
|
||||
<< ")\n predecessors:";
|
||||
|
||||
BasicBlock::Predecessors predecessors = block->predecessors();
|
||||
for (BasicBlock::Predecessors::iterator iter = predecessors.begin();
|
||||
iter != predecessors.end(); ++iter) {
|
||||
os << " B" << (*iter)->id();
|
||||
}
|
||||
os << "\n";
|
||||
|
||||
for (BasicBlock::const_iterator j = block->begin(); j != block->end();
|
||||
++j) {
|
||||
Node* phi = *j;
|
||||
if (phi->opcode() != IrOpcode::kPhi) continue;
|
||||
os << " phi: v" << phi->id() << " =";
|
||||
Node::Inputs inputs = phi->inputs();
|
||||
for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
|
||||
++iter) {
|
||||
os << " v" << (*iter)->id();
|
||||
}
|
||||
os << "\n";
|
||||
}
|
||||
|
||||
Vector<char> buf = Vector<char>::New(32);
|
||||
for (int j = block->first_instruction_index();
|
||||
j <= block->last_instruction_index(); j++) {
|
||||
// TODO(svenpanne) Add some basic formatting to our streams.
|
||||
SNPrintF(buf, "%5d", j);
|
||||
os << " " << buf.start() << ": " << *code.InstructionAt(j);
|
||||
}
|
||||
|
||||
os << " " << block->control_;
|
||||
|
||||
if (block->control_input_ != NULL) {
|
||||
os << " v" << block->control_input_->id();
|
||||
}
|
||||
|
||||
BasicBlock::Successors successors = block->successors();
|
||||
for (BasicBlock::Successors::iterator iter = successors.begin();
|
||||
iter != successors.end(); ++iter) {
|
||||
os << " B" << (*iter)->id();
|
||||
}
|
||||
os << "\n";
|
||||
}
|
||||
return os;
|
||||
}
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
843
src/compiler/instruction.h
Normal file
843
src/compiler/instruction.h
Normal file
@ -0,0 +1,843 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_INSTRUCTION_H_
|
||||
#define V8_COMPILER_INSTRUCTION_H_
|
||||
|
||||
#include <deque>
|
||||
#include <map>
|
||||
#include <set>
|
||||
|
||||
// TODO(titzer): don't include the assembler?
|
||||
#include "src/assembler.h"
|
||||
#include "src/compiler/common-operator.h"
|
||||
#include "src/compiler/frame.h"
|
||||
#include "src/compiler/graph.h"
|
||||
#include "src/compiler/instruction-codes.h"
|
||||
#include "src/compiler/opcodes.h"
|
||||
#include "src/compiler/schedule.h"
|
||||
#include "src/zone-allocator.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// Forward declarations.
|
||||
class OStream;
|
||||
|
||||
namespace compiler {
|
||||
|
||||
// Forward declarations.
|
||||
class Linkage;
|
||||
|
||||
// A couple of reserved opcodes are used for internal use.
|
||||
const InstructionCode kGapInstruction = -1;
|
||||
const InstructionCode kBlockStartInstruction = -2;
|
||||
const InstructionCode kSourcePositionInstruction = -3;
|
||||
|
||||
|
||||
#define INSTRUCTION_OPERAND_LIST(V) \
|
||||
V(Constant, CONSTANT, 128) \
|
||||
V(Immediate, IMMEDIATE, 128) \
|
||||
V(StackSlot, STACK_SLOT, 128) \
|
||||
V(DoubleStackSlot, DOUBLE_STACK_SLOT, 128) \
|
||||
V(Register, REGISTER, Register::kNumRegisters) \
|
||||
V(DoubleRegister, DOUBLE_REGISTER, DoubleRegister::kMaxNumRegisters)
|
||||
|
||||
class InstructionOperand : public ZoneObject {
|
||||
public:
|
||||
enum Kind {
|
||||
INVALID,
|
||||
UNALLOCATED,
|
||||
CONSTANT,
|
||||
IMMEDIATE,
|
||||
STACK_SLOT,
|
||||
DOUBLE_STACK_SLOT,
|
||||
REGISTER,
|
||||
DOUBLE_REGISTER
|
||||
};
|
||||
|
||||
InstructionOperand() : value_(KindField::encode(INVALID)) {}
|
||||
InstructionOperand(Kind kind, int index) { ConvertTo(kind, index); }
|
||||
|
||||
Kind kind() const { return KindField::decode(value_); }
|
||||
int index() const { return static_cast<int>(value_) >> KindField::kSize; }
|
||||
#define INSTRUCTION_OPERAND_PREDICATE(name, type, number) \
|
||||
bool Is##name() const { return kind() == type; }
|
||||
INSTRUCTION_OPERAND_LIST(INSTRUCTION_OPERAND_PREDICATE)
|
||||
INSTRUCTION_OPERAND_PREDICATE(Unallocated, UNALLOCATED, 0)
|
||||
INSTRUCTION_OPERAND_PREDICATE(Ignored, INVALID, 0)
|
||||
#undef INSTRUCTION_OPERAND_PREDICATE
|
||||
bool Equals(InstructionOperand* other) const {
|
||||
return value_ == other->value_;
|
||||
}
|
||||
|
||||
void ConvertTo(Kind kind, int index) {
|
||||
if (kind == REGISTER || kind == DOUBLE_REGISTER) ASSERT(index >= 0);
|
||||
value_ = KindField::encode(kind);
|
||||
value_ |= index << KindField::kSize;
|
||||
ASSERT(this->index() == index);
|
||||
}
|
||||
|
||||
// Calls SetUpCache()/TearDownCache() for each subclass.
|
||||
static void SetUpCaches();
|
||||
static void TearDownCaches();
|
||||
|
||||
protected:
|
||||
typedef BitField<Kind, 0, 3> KindField;
|
||||
|
||||
unsigned value_;
|
||||
};
|
||||
|
||||
OStream& operator<<(OStream& os, const InstructionOperand& op);
|
||||
|
||||
class UnallocatedOperand : public InstructionOperand {
|
||||
public:
|
||||
enum BasicPolicy { FIXED_SLOT, EXTENDED_POLICY };
|
||||
|
||||
enum ExtendedPolicy {
|
||||
NONE,
|
||||
ANY,
|
||||
FIXED_REGISTER,
|
||||
FIXED_DOUBLE_REGISTER,
|
||||
MUST_HAVE_REGISTER,
|
||||
SAME_AS_FIRST_INPUT
|
||||
};
|
||||
|
||||
// Lifetime of operand inside the instruction.
|
||||
enum Lifetime {
|
||||
// USED_AT_START operand is guaranteed to be live only at
|
||||
// instruction start. Register allocator is free to assign the same register
|
||||
// to some other operand used inside instruction (i.e. temporary or
|
||||
// output).
|
||||
USED_AT_START,
|
||||
|
||||
// USED_AT_END operand is treated as live until the end of
|
||||
// instruction. This means that register allocator will not reuse it's
|
||||
// register for any other operand inside instruction.
|
||||
USED_AT_END
|
||||
};
|
||||
|
||||
explicit UnallocatedOperand(ExtendedPolicy policy)
|
||||
: InstructionOperand(UNALLOCATED, 0) {
|
||||
value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
|
||||
value_ |= ExtendedPolicyField::encode(policy);
|
||||
value_ |= LifetimeField::encode(USED_AT_END);
|
||||
}
|
||||
|
||||
UnallocatedOperand(BasicPolicy policy, int index)
|
||||
: InstructionOperand(UNALLOCATED, 0) {
|
||||
ASSERT(policy == FIXED_SLOT);
|
||||
value_ |= BasicPolicyField::encode(policy);
|
||||
value_ |= index << FixedSlotIndexField::kShift;
|
||||
ASSERT(this->fixed_slot_index() == index);
|
||||
}
|
||||
|
||||
UnallocatedOperand(ExtendedPolicy policy, int index)
|
||||
: InstructionOperand(UNALLOCATED, 0) {
|
||||
ASSERT(policy == FIXED_REGISTER || policy == FIXED_DOUBLE_REGISTER);
|
||||
value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
|
||||
value_ |= ExtendedPolicyField::encode(policy);
|
||||
value_ |= LifetimeField::encode(USED_AT_END);
|
||||
value_ |= FixedRegisterField::encode(index);
|
||||
}
|
||||
|
||||
UnallocatedOperand(ExtendedPolicy policy, Lifetime lifetime)
|
||||
: InstructionOperand(UNALLOCATED, 0) {
|
||||
value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
|
||||
value_ |= ExtendedPolicyField::encode(policy);
|
||||
value_ |= LifetimeField::encode(lifetime);
|
||||
}
|
||||
|
||||
UnallocatedOperand* CopyUnconstrained(Zone* zone) {
|
||||
UnallocatedOperand* result = new (zone) UnallocatedOperand(ANY);
|
||||
result->set_virtual_register(virtual_register());
|
||||
return result;
|
||||
}
|
||||
|
||||
static const UnallocatedOperand* cast(const InstructionOperand* op) {
|
||||
ASSERT(op->IsUnallocated());
|
||||
return static_cast<const UnallocatedOperand*>(op);
|
||||
}
|
||||
|
||||
static UnallocatedOperand* cast(InstructionOperand* op) {
|
||||
ASSERT(op->IsUnallocated());
|
||||
return static_cast<UnallocatedOperand*>(op);
|
||||
}
|
||||
|
||||
// The encoding used for UnallocatedOperand operands depends on the policy
|
||||
// that is
|
||||
// stored within the operand. The FIXED_SLOT policy uses a compact encoding
|
||||
// because it accommodates a larger pay-load.
|
||||
//
|
||||
// For FIXED_SLOT policy:
|
||||
// +------------------------------------------+
|
||||
// | slot_index | vreg | 0 | 001 |
|
||||
// +------------------------------------------+
|
||||
//
|
||||
// For all other (extended) policies:
|
||||
// +------------------------------------------+
|
||||
// | reg_index | L | PPP | vreg | 1 | 001 | L ... Lifetime
|
||||
// +------------------------------------------+ P ... Policy
|
||||
//
|
||||
// The slot index is a signed value which requires us to decode it manually
|
||||
// instead of using the BitField utility class.
|
||||
|
||||
// The superclass has a KindField.
|
||||
STATIC_ASSERT(KindField::kSize == 3);
|
||||
|
||||
// BitFields for all unallocated operands.
|
||||
class BasicPolicyField : public BitField<BasicPolicy, 3, 1> {};
|
||||
class VirtualRegisterField : public BitField<unsigned, 4, 18> {};
|
||||
|
||||
// BitFields specific to BasicPolicy::FIXED_SLOT.
|
||||
class FixedSlotIndexField : public BitField<int, 22, 10> {};
|
||||
|
||||
// BitFields specific to BasicPolicy::EXTENDED_POLICY.
|
||||
class ExtendedPolicyField : public BitField<ExtendedPolicy, 22, 3> {};
|
||||
class LifetimeField : public BitField<Lifetime, 25, 1> {};
|
||||
class FixedRegisterField : public BitField<int, 26, 6> {};
|
||||
|
||||
static const int kMaxVirtualRegisters = VirtualRegisterField::kMax + 1;
|
||||
static const int kFixedSlotIndexWidth = FixedSlotIndexField::kSize;
|
||||
static const int kMaxFixedSlotIndex = (1 << (kFixedSlotIndexWidth - 1)) - 1;
|
||||
static const int kMinFixedSlotIndex = -(1 << (kFixedSlotIndexWidth - 1));
|
||||
|
||||
// Predicates for the operand policy.
|
||||
bool HasAnyPolicy() const {
|
||||
return basic_policy() == EXTENDED_POLICY && extended_policy() == ANY;
|
||||
}
|
||||
bool HasFixedPolicy() const {
|
||||
return basic_policy() == FIXED_SLOT ||
|
||||
extended_policy() == FIXED_REGISTER ||
|
||||
extended_policy() == FIXED_DOUBLE_REGISTER;
|
||||
}
|
||||
bool HasRegisterPolicy() const {
|
||||
return basic_policy() == EXTENDED_POLICY &&
|
||||
extended_policy() == MUST_HAVE_REGISTER;
|
||||
}
|
||||
bool HasSameAsInputPolicy() const {
|
||||
return basic_policy() == EXTENDED_POLICY &&
|
||||
extended_policy() == SAME_AS_FIRST_INPUT;
|
||||
}
|
||||
bool HasFixedSlotPolicy() const { return basic_policy() == FIXED_SLOT; }
|
||||
bool HasFixedRegisterPolicy() const {
|
||||
return basic_policy() == EXTENDED_POLICY &&
|
||||
extended_policy() == FIXED_REGISTER;
|
||||
}
|
||||
bool HasFixedDoubleRegisterPolicy() const {
|
||||
return basic_policy() == EXTENDED_POLICY &&
|
||||
extended_policy() == FIXED_DOUBLE_REGISTER;
|
||||
}
|
||||
|
||||
// [basic_policy]: Distinguish between FIXED_SLOT and all other policies.
|
||||
BasicPolicy basic_policy() const { return BasicPolicyField::decode(value_); }
|
||||
|
||||
// [extended_policy]: Only for non-FIXED_SLOT. The finer-grained policy.
|
||||
ExtendedPolicy extended_policy() const {
|
||||
ASSERT(basic_policy() == EXTENDED_POLICY);
|
||||
return ExtendedPolicyField::decode(value_);
|
||||
}
|
||||
|
||||
// [fixed_slot_index]: Only for FIXED_SLOT.
|
||||
int fixed_slot_index() const {
|
||||
ASSERT(HasFixedSlotPolicy());
|
||||
return static_cast<int>(value_) >> FixedSlotIndexField::kShift;
|
||||
}
|
||||
|
||||
// [fixed_register_index]: Only for FIXED_REGISTER or FIXED_DOUBLE_REGISTER.
|
||||
int fixed_register_index() const {
|
||||
ASSERT(HasFixedRegisterPolicy() || HasFixedDoubleRegisterPolicy());
|
||||
return FixedRegisterField::decode(value_);
|
||||
}
|
||||
|
||||
// [virtual_register]: The virtual register ID for this operand.
|
||||
int virtual_register() const { return VirtualRegisterField::decode(value_); }
|
||||
void set_virtual_register(unsigned id) {
|
||||
value_ = VirtualRegisterField::update(value_, id);
|
||||
}
|
||||
|
||||
// [lifetime]: Only for non-FIXED_SLOT.
|
||||
bool IsUsedAtStart() {
|
||||
ASSERT(basic_policy() == EXTENDED_POLICY);
|
||||
return LifetimeField::decode(value_) == USED_AT_START;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
class MoveOperands V8_FINAL BASE_EMBEDDED {
|
||||
public:
|
||||
MoveOperands(InstructionOperand* source, InstructionOperand* destination)
|
||||
: source_(source), destination_(destination) {}
|
||||
|
||||
InstructionOperand* source() const { return source_; }
|
||||
void set_source(InstructionOperand* operand) { source_ = operand; }
|
||||
|
||||
InstructionOperand* destination() const { return destination_; }
|
||||
void set_destination(InstructionOperand* operand) { destination_ = operand; }
|
||||
|
||||
// The gap resolver marks moves as "in-progress" by clearing the
|
||||
// destination (but not the source).
|
||||
bool IsPending() const { return destination_ == NULL && source_ != NULL; }
|
||||
|
||||
// True if this move a move into the given destination operand.
|
||||
bool Blocks(InstructionOperand* operand) const {
|
||||
return !IsEliminated() && source()->Equals(operand);
|
||||
}
|
||||
|
||||
// A move is redundant if it's been eliminated, if its source and
|
||||
// destination are the same, or if its destination is unneeded or constant.
|
||||
bool IsRedundant() const {
|
||||
return IsEliminated() || source_->Equals(destination_) || IsIgnored() ||
|
||||
(destination_ != NULL && destination_->IsConstant());
|
||||
}
|
||||
|
||||
bool IsIgnored() const {
|
||||
return destination_ != NULL && destination_->IsIgnored();
|
||||
}
|
||||
|
||||
// We clear both operands to indicate move that's been eliminated.
|
||||
void Eliminate() { source_ = destination_ = NULL; }
|
||||
bool IsEliminated() const {
|
||||
ASSERT(source_ != NULL || destination_ == NULL);
|
||||
return source_ == NULL;
|
||||
}
|
||||
|
||||
private:
|
||||
InstructionOperand* source_;
|
||||
InstructionOperand* destination_;
|
||||
};
|
||||
|
||||
OStream& operator<<(OStream& os, const MoveOperands& mo);
|
||||
|
||||
template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
|
||||
class SubKindOperand V8_FINAL : public InstructionOperand {
|
||||
public:
|
||||
static SubKindOperand* Create(int index, Zone* zone) {
|
||||
ASSERT(index >= 0);
|
||||
if (index < kNumCachedOperands) return &cache[index];
|
||||
return new (zone) SubKindOperand(index);
|
||||
}
|
||||
|
||||
static SubKindOperand* cast(InstructionOperand* op) {
|
||||
ASSERT(op->kind() == kOperandKind);
|
||||
return reinterpret_cast<SubKindOperand*>(op);
|
||||
}
|
||||
|
||||
static void SetUpCache();
|
||||
static void TearDownCache();
|
||||
|
||||
private:
|
||||
static SubKindOperand* cache;
|
||||
|
||||
SubKindOperand() : InstructionOperand() {}
|
||||
explicit SubKindOperand(int index)
|
||||
: InstructionOperand(kOperandKind, index) {}
|
||||
};
|
||||
|
||||
|
||||
#define INSTRUCTION_TYPEDEF_SUBKIND_OPERAND_CLASS(name, type, number) \
|
||||
typedef SubKindOperand<InstructionOperand::type, number> name##Operand;
|
||||
INSTRUCTION_OPERAND_LIST(INSTRUCTION_TYPEDEF_SUBKIND_OPERAND_CLASS)
|
||||
#undef INSTRUCTION_TYPEDEF_SUBKIND_OPERAND_CLASS
|
||||
|
||||
|
||||
class ParallelMove V8_FINAL : public ZoneObject {
|
||||
public:
|
||||
explicit ParallelMove(Zone* zone) : move_operands_(4, zone) {}
|
||||
|
||||
void AddMove(InstructionOperand* from, InstructionOperand* to, Zone* zone) {
|
||||
move_operands_.Add(MoveOperands(from, to), zone);
|
||||
}
|
||||
|
||||
bool IsRedundant() const;
|
||||
|
||||
ZoneList<MoveOperands>* move_operands() { return &move_operands_; }
|
||||
const ZoneList<MoveOperands>* move_operands() const {
|
||||
return &move_operands_;
|
||||
}
|
||||
|
||||
private:
|
||||
ZoneList<MoveOperands> move_operands_;
|
||||
};
|
||||
|
||||
OStream& operator<<(OStream& os, const ParallelMove& pm);
|
||||
|
||||
class PointerMap V8_FINAL : public ZoneObject {
|
||||
public:
|
||||
explicit PointerMap(Zone* zone)
|
||||
: pointer_operands_(8, zone),
|
||||
untagged_operands_(0, zone),
|
||||
instruction_position_(-1) {}
|
||||
|
||||
const ZoneList<InstructionOperand*>* GetNormalizedOperands() {
|
||||
for (int i = 0; i < untagged_operands_.length(); ++i) {
|
||||
RemovePointer(untagged_operands_[i]);
|
||||
}
|
||||
untagged_operands_.Clear();
|
||||
return &pointer_operands_;
|
||||
}
|
||||
int instruction_position() const { return instruction_position_; }
|
||||
|
||||
void set_instruction_position(int pos) {
|
||||
ASSERT(instruction_position_ == -1);
|
||||
instruction_position_ = pos;
|
||||
}
|
||||
|
||||
void RecordPointer(InstructionOperand* op, Zone* zone);
|
||||
void RemovePointer(InstructionOperand* op);
|
||||
void RecordUntagged(InstructionOperand* op, Zone* zone);
|
||||
|
||||
private:
|
||||
friend OStream& operator<<(OStream& os, const PointerMap& pm);
|
||||
|
||||
ZoneList<InstructionOperand*> pointer_operands_;
|
||||
ZoneList<InstructionOperand*> untagged_operands_;
|
||||
int instruction_position_;
|
||||
};
|
||||
|
||||
OStream& operator<<(OStream& os, const PointerMap& pm);
|
||||
|
||||
// TODO(titzer): s/PointerMap/ReferenceMap/
|
||||
class Instruction : public ZoneObject {
|
||||
public:
|
||||
size_t OutputCount() const { return OutputCountField::decode(bit_field_); }
|
||||
InstructionOperand* Output() const { return OutputAt(0); }
|
||||
InstructionOperand* OutputAt(size_t i) const {
|
||||
ASSERT(i < OutputCount());
|
||||
return operands_[i];
|
||||
}
|
||||
|
||||
size_t InputCount() const { return InputCountField::decode(bit_field_); }
|
||||
InstructionOperand* InputAt(size_t i) const {
|
||||
ASSERT(i < InputCount());
|
||||
return operands_[OutputCount() + i];
|
||||
}
|
||||
|
||||
size_t TempCount() const { return TempCountField::decode(bit_field_); }
|
||||
InstructionOperand* TempAt(size_t i) const {
|
||||
ASSERT(i < TempCount());
|
||||
return operands_[OutputCount() + InputCount() + i];
|
||||
}
|
||||
|
||||
InstructionCode opcode() const { return opcode_; }
|
||||
ArchOpcode arch_opcode() const { return ArchOpcodeField::decode(opcode()); }
|
||||
AddressingMode addressing_mode() const {
|
||||
return AddressingModeField::decode(opcode());
|
||||
}
|
||||
FlagsMode flags_mode() const { return FlagsModeField::decode(opcode()); }
|
||||
FlagsCondition flags_condition() const {
|
||||
return FlagsConditionField::decode(opcode());
|
||||
}
|
||||
|
||||
// TODO(titzer): make control and call into flags.
|
||||
static Instruction* New(Zone* zone, InstructionCode opcode) {
|
||||
return New(zone, opcode, 0, NULL, 0, NULL, 0, NULL);
|
||||
}
|
||||
|
||||
static Instruction* New(Zone* zone, InstructionCode opcode,
|
||||
size_t output_count, InstructionOperand** outputs,
|
||||
size_t input_count, InstructionOperand** inputs,
|
||||
size_t temp_count, InstructionOperand** temps) {
|
||||
ASSERT(opcode >= 0);
|
||||
ASSERT(output_count == 0 || outputs != NULL);
|
||||
ASSERT(input_count == 0 || inputs != NULL);
|
||||
ASSERT(temp_count == 0 || temps != NULL);
|
||||
InstructionOperand* none = NULL;
|
||||
USE(none);
|
||||
size_t size = RoundUp(sizeof(Instruction), kPointerSize) +
|
||||
(output_count + input_count + temp_count - 1) * sizeof(none);
|
||||
return new (zone->New(size)) Instruction(
|
||||
opcode, output_count, outputs, input_count, inputs, temp_count, temps);
|
||||
}
|
||||
|
||||
// TODO(titzer): another holdover from lithium days; register allocator
|
||||
// should not need to know about control instructions.
|
||||
Instruction* MarkAsControl() {
|
||||
bit_field_ = IsControlField::update(bit_field_, true);
|
||||
return this;
|
||||
}
|
||||
Instruction* MarkAsCall() {
|
||||
bit_field_ = IsCallField::update(bit_field_, true);
|
||||
return this;
|
||||
}
|
||||
bool IsControl() const { return IsControlField::decode(bit_field_); }
|
||||
bool IsCall() const { return IsCallField::decode(bit_field_); }
|
||||
bool NeedsPointerMap() const { return IsCall(); }
|
||||
bool HasPointerMap() const { return pointer_map_ != NULL; }
|
||||
|
||||
bool IsGapMoves() const {
|
||||
return opcode() == kGapInstruction || opcode() == kBlockStartInstruction;
|
||||
}
|
||||
bool IsBlockStart() const { return opcode() == kBlockStartInstruction; }
|
||||
bool IsSourcePosition() const {
|
||||
return opcode() == kSourcePositionInstruction;
|
||||
}
|
||||
|
||||
bool ClobbersRegisters() const { return IsCall(); }
|
||||
bool ClobbersTemps() const { return IsCall(); }
|
||||
bool ClobbersDoubleRegisters() const { return IsCall(); }
|
||||
PointerMap* pointer_map() const { return pointer_map_; }
|
||||
|
||||
void set_pointer_map(PointerMap* map) {
|
||||
ASSERT(NeedsPointerMap());
|
||||
ASSERT_EQ(NULL, pointer_map_);
|
||||
pointer_map_ = map;
|
||||
}
|
||||
|
||||
// Placement new operator so that we can smash instructions into
|
||||
// zone-allocated memory.
|
||||
void* operator new(size_t, void* location) { return location; }
|
||||
|
||||
protected:
|
||||
explicit Instruction(InstructionCode opcode)
|
||||
: opcode_(opcode),
|
||||
bit_field_(OutputCountField::encode(0) | InputCountField::encode(0) |
|
||||
TempCountField::encode(0) | IsCallField::encode(false) |
|
||||
IsControlField::encode(false)),
|
||||
pointer_map_(NULL) {}
|
||||
|
||||
Instruction(InstructionCode opcode, size_t output_count,
|
||||
InstructionOperand** outputs, size_t input_count,
|
||||
InstructionOperand** inputs, size_t temp_count,
|
||||
InstructionOperand** temps)
|
||||
: opcode_(opcode),
|
||||
bit_field_(OutputCountField::encode(output_count) |
|
||||
InputCountField::encode(input_count) |
|
||||
TempCountField::encode(temp_count) |
|
||||
IsCallField::encode(false) | IsControlField::encode(false)),
|
||||
pointer_map_(NULL) {
|
||||
for (size_t i = 0; i < output_count; ++i) {
|
||||
operands_[i] = outputs[i];
|
||||
}
|
||||
for (size_t i = 0; i < input_count; ++i) {
|
||||
operands_[output_count + i] = inputs[i];
|
||||
}
|
||||
for (size_t i = 0; i < temp_count; ++i) {
|
||||
operands_[output_count + input_count + i] = temps[i];
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
typedef BitField<size_t, 0, 8> OutputCountField;
|
||||
typedef BitField<size_t, 8, 16> InputCountField;
|
||||
typedef BitField<size_t, 24, 6> TempCountField;
|
||||
typedef BitField<bool, 30, 1> IsCallField;
|
||||
typedef BitField<bool, 31, 1> IsControlField;
|
||||
|
||||
InstructionCode opcode_;
|
||||
uint32_t bit_field_;
|
||||
PointerMap* pointer_map_;
|
||||
InstructionOperand* operands_[1];
|
||||
};
|
||||
|
||||
OStream& operator<<(OStream& os, const Instruction& instr);
|
||||
|
||||
// Represents moves inserted before an instruction due to register allocation.
|
||||
// TODO(titzer): squash GapInstruction back into Instruction, since essentially
|
||||
// every instruction can possibly have moves inserted before it.
|
||||
class GapInstruction : public Instruction {
|
||||
public:
|
||||
enum InnerPosition {
|
||||
BEFORE,
|
||||
START,
|
||||
END,
|
||||
AFTER,
|
||||
FIRST_INNER_POSITION = BEFORE,
|
||||
LAST_INNER_POSITION = AFTER
|
||||
};
|
||||
|
||||
ParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
|
||||
if (parallel_moves_[pos] == NULL) {
|
||||
parallel_moves_[pos] = new (zone) ParallelMove(zone);
|
||||
}
|
||||
return parallel_moves_[pos];
|
||||
}
|
||||
|
||||
ParallelMove* GetParallelMove(InnerPosition pos) {
|
||||
return parallel_moves_[pos];
|
||||
}
|
||||
|
||||
static GapInstruction* New(Zone* zone) {
|
||||
void* buffer = zone->New(sizeof(GapInstruction));
|
||||
return new (buffer) GapInstruction(kGapInstruction);
|
||||
}
|
||||
|
||||
static GapInstruction* cast(Instruction* instr) {
|
||||
ASSERT(instr->IsGapMoves());
|
||||
return static_cast<GapInstruction*>(instr);
|
||||
}
|
||||
|
||||
static const GapInstruction* cast(const Instruction* instr) {
|
||||
ASSERT(instr->IsGapMoves());
|
||||
return static_cast<const GapInstruction*>(instr);
|
||||
}
|
||||
|
||||
protected:
|
||||
explicit GapInstruction(InstructionCode opcode) : Instruction(opcode) {
|
||||
parallel_moves_[BEFORE] = NULL;
|
||||
parallel_moves_[START] = NULL;
|
||||
parallel_moves_[END] = NULL;
|
||||
parallel_moves_[AFTER] = NULL;
|
||||
}
|
||||
|
||||
private:
|
||||
friend OStream& operator<<(OStream& os, const Instruction& instr);
|
||||
ParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
|
||||
};
|
||||
|
||||
|
||||
// This special kind of gap move instruction represents the beginning of a
|
||||
// block of code.
|
||||
// TODO(titzer): move code_start and code_end from BasicBlock to here.
|
||||
class BlockStartInstruction V8_FINAL : public GapInstruction {
|
||||
public:
|
||||
BasicBlock* block() const { return block_; }
|
||||
Label* label() { return &label_; }
|
||||
|
||||
static BlockStartInstruction* New(Zone* zone, BasicBlock* block) {
|
||||
void* buffer = zone->New(sizeof(BlockStartInstruction));
|
||||
return new (buffer) BlockStartInstruction(block);
|
||||
}
|
||||
|
||||
static BlockStartInstruction* cast(Instruction* instr) {
|
||||
ASSERT(instr->IsBlockStart());
|
||||
return static_cast<BlockStartInstruction*>(instr);
|
||||
}
|
||||
|
||||
private:
|
||||
explicit BlockStartInstruction(BasicBlock* block)
|
||||
: GapInstruction(kBlockStartInstruction), block_(block) {}
|
||||
|
||||
BasicBlock* block_;
|
||||
Label label_;
|
||||
};
|
||||
|
||||
|
||||
class SourcePositionInstruction V8_FINAL : public Instruction {
|
||||
public:
|
||||
static SourcePositionInstruction* New(Zone* zone, SourcePosition position) {
|
||||
void* buffer = zone->New(sizeof(SourcePositionInstruction));
|
||||
return new (buffer) SourcePositionInstruction(position);
|
||||
}
|
||||
|
||||
SourcePosition source_position() const { return source_position_; }
|
||||
|
||||
static SourcePositionInstruction* cast(Instruction* instr) {
|
||||
ASSERT(instr->IsSourcePosition());
|
||||
return static_cast<SourcePositionInstruction*>(instr);
|
||||
}
|
||||
|
||||
static const SourcePositionInstruction* cast(const Instruction* instr) {
|
||||
ASSERT(instr->IsSourcePosition());
|
||||
return static_cast<const SourcePositionInstruction*>(instr);
|
||||
}
|
||||
|
||||
private:
|
||||
explicit SourcePositionInstruction(SourcePosition source_position)
|
||||
: Instruction(kSourcePositionInstruction),
|
||||
source_position_(source_position) {
|
||||
ASSERT(!source_position_.IsInvalid());
|
||||
ASSERT(!source_position_.IsUnknown());
|
||||
}
|
||||
|
||||
SourcePosition source_position_;
|
||||
};
|
||||
|
||||
|
||||
class Constant V8_FINAL {
|
||||
public:
|
||||
enum Type { kInt32, kInt64, kFloat64, kExternalReference, kHeapObject };
|
||||
|
||||
explicit Constant(int32_t v) : type_(kInt32), value_(v) {}
|
||||
explicit Constant(int64_t v) : type_(kInt64), value_(v) {}
|
||||
explicit Constant(double v) : type_(kFloat64), value_(BitCast<int64_t>(v)) {}
|
||||
explicit Constant(ExternalReference ref)
|
||||
: type_(kExternalReference), value_(BitCast<intptr_t>(ref)) {}
|
||||
explicit Constant(Handle<HeapObject> obj)
|
||||
: type_(kHeapObject), value_(BitCast<intptr_t>(obj)) {}
|
||||
|
||||
Type type() const { return type_; }
|
||||
|
||||
int32_t ToInt32() const {
|
||||
ASSERT_EQ(kInt32, type());
|
||||
return static_cast<int32_t>(value_);
|
||||
}
|
||||
|
||||
int64_t ToInt64() const {
|
||||
if (type() == kInt32) return ToInt32();
|
||||
ASSERT_EQ(kInt64, type());
|
||||
return value_;
|
||||
}
|
||||
|
||||
double ToFloat64() const {
|
||||
if (type() == kInt32) return ToInt32();
|
||||
ASSERT_EQ(kFloat64, type());
|
||||
return BitCast<double>(value_);
|
||||
}
|
||||
|
||||
ExternalReference ToExternalReference() const {
|
||||
ASSERT_EQ(kExternalReference, type());
|
||||
return BitCast<ExternalReference>(static_cast<intptr_t>(value_));
|
||||
}
|
||||
|
||||
Handle<HeapObject> ToHeapObject() const {
|
||||
ASSERT_EQ(kHeapObject, type());
|
||||
return BitCast<Handle<HeapObject> >(static_cast<intptr_t>(value_));
|
||||
}
|
||||
|
||||
private:
|
||||
Type type_;
|
||||
int64_t value_;
|
||||
};
|
||||
|
||||
OStream& operator<<(OStream& os, const Constant& constant);
|
||||
|
||||
typedef std::deque<Constant, zone_allocator<Constant> > ConstantDeque;
|
||||
typedef std::map<int, Constant, std::less<int>,
|
||||
zone_allocator<std::pair<int, Constant> > > ConstantMap;
|
||||
|
||||
|
||||
typedef std::deque<Instruction*, zone_allocator<Instruction*> >
|
||||
InstructionDeque;
|
||||
typedef std::deque<PointerMap*, zone_allocator<PointerMap*> > PointerMapDeque;
|
||||
typedef std::vector<FrameStateDescriptor, zone_allocator<FrameStateDescriptor> >
|
||||
DeoptimizationVector;
|
||||
|
||||
|
||||
// Represents architecture-specific generated code before, during, and after
|
||||
// register allocation.
|
||||
// TODO(titzer): s/IsDouble/IsFloat64/
|
||||
class InstructionSequence V8_FINAL {
|
||||
public:
|
||||
InstructionSequence(Linkage* linkage, Graph* graph, Schedule* schedule)
|
||||
: graph_(graph),
|
||||
linkage_(linkage),
|
||||
schedule_(schedule),
|
||||
constants_(ConstantMap::key_compare(),
|
||||
ConstantMap::allocator_type(zone())),
|
||||
immediates_(ConstantDeque::allocator_type(zone())),
|
||||
instructions_(InstructionDeque::allocator_type(zone())),
|
||||
next_virtual_register_(graph->NodeCount()),
|
||||
pointer_maps_(PointerMapDeque::allocator_type(zone())),
|
||||
doubles_(std::less<int>(), VirtualRegisterSet::allocator_type(zone())),
|
||||
references_(std::less<int>(),
|
||||
VirtualRegisterSet::allocator_type(zone())),
|
||||
deoptimization_entries_(DeoptimizationVector::allocator_type(zone())) {}
|
||||
|
||||
int NextVirtualRegister() { return next_virtual_register_++; }
|
||||
int VirtualRegisterCount() const { return next_virtual_register_; }
|
||||
|
||||
int ValueCount() const { return graph_->NodeCount(); }
|
||||
|
||||
int BasicBlockCount() const {
|
||||
return static_cast<int>(schedule_->rpo_order()->size());
|
||||
}
|
||||
|
||||
BasicBlock* BlockAt(int rpo_number) const {
|
||||
return (*schedule_->rpo_order())[rpo_number];
|
||||
}
|
||||
|
||||
BasicBlock* GetContainingLoop(BasicBlock* block) {
|
||||
return block->loop_header_;
|
||||
}
|
||||
|
||||
int GetLoopEnd(BasicBlock* block) const { return block->loop_end_; }
|
||||
|
||||
BasicBlock* GetBasicBlock(int instruction_index);
|
||||
|
||||
int GetVirtualRegister(Node* node) const { return node->id(); }
|
||||
|
||||
bool IsReference(int virtual_register) const;
|
||||
bool IsDouble(int virtual_register) const;
|
||||
|
||||
void MarkAsReference(int virtual_register);
|
||||
void MarkAsDouble(int virtual_register);
|
||||
|
||||
void AddGapMove(int index, InstructionOperand* from, InstructionOperand* to);
|
||||
|
||||
Label* GetLabel(BasicBlock* block);
|
||||
BlockStartInstruction* GetBlockStart(BasicBlock* block);
|
||||
|
||||
typedef InstructionDeque::const_iterator const_iterator;
|
||||
const_iterator begin() const { return instructions_.begin(); }
|
||||
const_iterator end() const { return instructions_.end(); }
|
||||
|
||||
GapInstruction* GapAt(int index) const {
|
||||
return GapInstruction::cast(InstructionAt(index));
|
||||
}
|
||||
bool IsGapAt(int index) const { return InstructionAt(index)->IsGapMoves(); }
|
||||
Instruction* InstructionAt(int index) const {
|
||||
ASSERT(index >= 0);
|
||||
ASSERT(index < static_cast<int>(instructions_.size()));
|
||||
return instructions_[index];
|
||||
}
|
||||
|
||||
Frame* frame() { return &frame_; }
|
||||
Graph* graph() const { return graph_; }
|
||||
Isolate* isolate() const { return zone()->isolate(); }
|
||||
Linkage* linkage() const { return linkage_; }
|
||||
Schedule* schedule() const { return schedule_; }
|
||||
const PointerMapDeque* pointer_maps() const { return &pointer_maps_; }
|
||||
Zone* zone() const { return graph_->zone(); }
|
||||
|
||||
// Used by the code generator while adding instructions.
|
||||
int AddInstruction(Instruction* instr, BasicBlock* block);
|
||||
void StartBlock(BasicBlock* block);
|
||||
void EndBlock(BasicBlock* block);
|
||||
|
||||
void AddConstant(int virtual_register, Constant constant) {
|
||||
ASSERT(constants_.find(virtual_register) == constants_.end());
|
||||
constants_.insert(std::make_pair(virtual_register, constant));
|
||||
}
|
||||
Constant GetConstant(int virtual_register) const {
|
||||
ConstantMap::const_iterator it = constants_.find(virtual_register);
|
||||
ASSERT(it != constants_.end());
|
||||
ASSERT_EQ(virtual_register, it->first);
|
||||
return it->second;
|
||||
}
|
||||
|
||||
typedef ConstantDeque Immediates;
|
||||
const Immediates& immediates() const { return immediates_; }
|
||||
|
||||
int AddImmediate(Constant constant) {
|
||||
int index = immediates_.size();
|
||||
immediates_.push_back(constant);
|
||||
return index;
|
||||
}
|
||||
Constant GetImmediate(int index) const {
|
||||
ASSERT(index >= 0);
|
||||
ASSERT(index < static_cast<int>(immediates_.size()));
|
||||
return immediates_[index];
|
||||
}
|
||||
|
||||
int AddDeoptimizationEntry(const FrameStateDescriptor& descriptor);
|
||||
FrameStateDescriptor GetDeoptimizationEntry(int deoptimization_id);
|
||||
int GetDeoptimizationEntryCount();
|
||||
|
||||
private:
|
||||
friend OStream& operator<<(OStream& os, const InstructionSequence& code);
|
||||
|
||||
typedef std::set<int, std::less<int>, ZoneIntAllocator> VirtualRegisterSet;
|
||||
|
||||
Graph* graph_;
|
||||
Linkage* linkage_;
|
||||
Schedule* schedule_;
|
||||
ConstantMap constants_;
|
||||
ConstantDeque immediates_;
|
||||
InstructionDeque instructions_;
|
||||
int next_virtual_register_;
|
||||
PointerMapDeque pointer_maps_;
|
||||
VirtualRegisterSet doubles_;
|
||||
VirtualRegisterSet references_;
|
||||
Frame frame_;
|
||||
DeoptimizationVector deoptimization_entries_;
|
||||
};
|
||||
|
||||
OStream& operator<<(OStream& os, const InstructionSequence& code);
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_COMPILER_INSTRUCTION_H_
|
0
src/compiler/ir-operations.txt
Normal file
0
src/compiler/ir-operations.txt
Normal file
93
src/compiler/js-context-specialization.cc
Normal file
93
src/compiler/js-context-specialization.cc
Normal file
@ -0,0 +1,93 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/compiler/common-operator.h"
|
||||
#include "src/compiler/generic-node-inl.h"
|
||||
#include "src/compiler/js-context-specialization.h"
|
||||
#include "src/compiler/js-operator.h"
|
||||
#include "src/compiler/node-aux-data-inl.h"
|
||||
#include "src/compiler/node-matchers.h"
|
||||
#include "src/compiler/node-properties-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
// TODO(titzer): factor this out to a common routine with js-typed-lowering.
|
||||
static void ReplaceEffectfulWithValue(Node* node, Node* value) {
|
||||
Node* effect = NodeProperties::GetEffectInput(node);
|
||||
|
||||
// Requires distinguishing between value and effect edges.
|
||||
UseIter iter = node->uses().begin();
|
||||
while (iter != node->uses().end()) {
|
||||
if (NodeProperties::IsEffectEdge(iter.edge())) {
|
||||
iter = iter.UpdateToAndIncrement(effect);
|
||||
} else {
|
||||
iter = iter.UpdateToAndIncrement(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void JSContextSpecializer::SpecializeToContext() {
|
||||
ValueMatcher<Handle<Context> > match(context_);
|
||||
|
||||
// Iterate over all uses of the context and try to replace {LoadContext}
|
||||
// nodes with their values from the constant context.
|
||||
UseIter iter = match.node()->uses().begin();
|
||||
while (iter != match.node()->uses().end()) {
|
||||
Node* use = *iter;
|
||||
if (use->opcode() == IrOpcode::kJSLoadContext) {
|
||||
Reduction r = ReduceJSLoadContext(use);
|
||||
if (r.Changed() && r.replacement() != use) {
|
||||
ReplaceEffectfulWithValue(use, r.replacement());
|
||||
}
|
||||
}
|
||||
++iter;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reduction JSContextSpecializer::ReduceJSLoadContext(Node* node) {
|
||||
ASSERT_EQ(IrOpcode::kJSLoadContext, node->opcode());
|
||||
|
||||
ContextAccess access =
|
||||
static_cast<Operator1<ContextAccess>*>(node->op())->parameter();
|
||||
|
||||
// Find the right parent context.
|
||||
Context* context = *info_->context();
|
||||
for (int i = access.depth(); i > 0; --i) {
|
||||
context = context->previous();
|
||||
}
|
||||
|
||||
// If the access itself is mutable, only fold-in the parent.
|
||||
if (!access.immutable()) {
|
||||
// The access does not have to look up a parent, nothing to fold.
|
||||
if (access.depth() == 0) {
|
||||
return Reducer::NoChange();
|
||||
}
|
||||
Operator* op = jsgraph_->javascript()->LoadContext(0, access.index(),
|
||||
access.immutable());
|
||||
node->set_op(op);
|
||||
Handle<Object> context_handle = Handle<Object>(context, info_->isolate());
|
||||
node->ReplaceInput(0, jsgraph_->Constant(context_handle));
|
||||
return Reducer::Changed(node);
|
||||
}
|
||||
Handle<Object> value =
|
||||
Handle<Object>(context->get(access.index()), info_->isolate());
|
||||
|
||||
// Even though the context slot is immutable, the context might have escaped
|
||||
// before the function to which it belongs has initialized the slot.
|
||||
// We must be conservative and check if the value in the slot is currently the
|
||||
// hole or undefined. If it is neither of these, then it must be initialized.
|
||||
if (value->IsUndefined() || value->IsTheHole()) return Reducer::NoChange();
|
||||
|
||||
// Success. The context load can be replaced with the constant.
|
||||
// TODO(titzer): record the specialization for sharing code across multiple
|
||||
// contexts that have the same value in the corresponding context slot.
|
||||
return Reducer::Replace(jsgraph_->Constant(value));
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
36
src/compiler/js-context-specialization.h
Normal file
36
src/compiler/js-context-specialization.h
Normal file
@ -0,0 +1,36 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_JS_CONTEXT_SPECIALIZATION_H_
|
||||
#define V8_COMPILER_JS_CONTEXT_SPECIALIZATION_H_
|
||||
|
||||
#include "src/compiler/graph-reducer.h"
|
||||
#include "src/compiler/js-graph.h"
|
||||
#include "src/contexts.h"
|
||||
#include "src/v8.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
// Specializes a given JSGraph to a given context, potentially constant folding
|
||||
// some {LoadContext} nodes.
|
||||
class JSContextSpecializer {
|
||||
public:
|
||||
JSContextSpecializer(CompilationInfo* info, JSGraph* jsgraph, Node* context)
|
||||
: info_(info), jsgraph_(jsgraph), context_(context) {}
|
||||
|
||||
void SpecializeToContext();
|
||||
Reduction ReduceJSLoadContext(Node* node);
|
||||
|
||||
private:
|
||||
CompilationInfo* info_;
|
||||
JSGraph* jsgraph_;
|
||||
Node* context_;
|
||||
};
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
||||
|
||||
#endif // V8_COMPILER_JS_CONTEXT_SPECIALIZATION_H_
|
425
src/compiler/js-generic-lowering.cc
Normal file
425
src/compiler/js-generic-lowering.cc
Normal file
@ -0,0 +1,425 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/code-stubs.h"
|
||||
#include "src/compiler/common-operator.h"
|
||||
#include "src/compiler/graph-inl.h"
|
||||
#include "src/compiler/js-generic-lowering.h"
|
||||
#include "src/compiler/machine-operator.h"
|
||||
#include "src/compiler/node-aux-data-inl.h"
|
||||
#include "src/compiler/node-properties-inl.h"
|
||||
#include "src/unique.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
|
||||
// TODO(mstarzinger): This is a temporary workaround for non-hydrogen stubs for
|
||||
// which we don't have an interface descriptor yet. Use ReplaceWithICStubCall
|
||||
// once these stub have been made into a HydrogenCodeStub.
|
||||
template <typename T>
|
||||
static CodeStubInterfaceDescriptor* GetInterfaceDescriptor(Isolate* isolate,
|
||||
T* stub) {
|
||||
CodeStub::Major key = static_cast<CodeStub*>(stub)->MajorKey();
|
||||
CodeStubInterfaceDescriptor* d = isolate->code_stub_interface_descriptor(key);
|
||||
stub->InitializeInterfaceDescriptor(isolate, d);
|
||||
return d;
|
||||
}
|
||||
|
||||
|
||||
JSGenericLowering::JSGenericLowering(CompilationInfo* info, JSGraph* jsgraph,
|
||||
MachineOperatorBuilder* machine,
|
||||
SourcePositionTable* source_positions)
|
||||
: LoweringBuilder(jsgraph->graph(), source_positions),
|
||||
info_(info),
|
||||
jsgraph_(jsgraph),
|
||||
linkage_(new (jsgraph->zone()) Linkage(info)),
|
||||
machine_(machine) {}
|
||||
|
||||
|
||||
void JSGenericLowering::PatchOperator(Node* node, Operator* op) {
|
||||
node->set_op(op);
|
||||
}
|
||||
|
||||
|
||||
void JSGenericLowering::PatchInsertInput(Node* node, int index, Node* input) {
|
||||
node->InsertInput(zone(), index, input);
|
||||
}
|
||||
|
||||
|
||||
Node* JSGenericLowering::SmiConstant(int32_t immediate) {
|
||||
return jsgraph()->SmiConstant(immediate);
|
||||
}
|
||||
|
||||
|
||||
Node* JSGenericLowering::Int32Constant(int immediate) {
|
||||
return jsgraph()->Int32Constant(immediate);
|
||||
}
|
||||
|
||||
|
||||
Node* JSGenericLowering::CodeConstant(Handle<Code> code) {
|
||||
return jsgraph()->HeapConstant(code);
|
||||
}
|
||||
|
||||
|
||||
Node* JSGenericLowering::FunctionConstant(Handle<JSFunction> function) {
|
||||
return jsgraph()->HeapConstant(function);
|
||||
}
|
||||
|
||||
|
||||
Node* JSGenericLowering::ExternalConstant(ExternalReference ref) {
|
||||
return jsgraph()->ExternalConstant(ref);
|
||||
}
|
||||
|
||||
|
||||
void JSGenericLowering::Lower(Node* node) {
|
||||
Node* replacement = NULL;
|
||||
// Dispatch according to the opcode.
|
||||
switch (node->opcode()) {
|
||||
#define DECLARE_CASE(x) \
|
||||
case IrOpcode::k##x: \
|
||||
replacement = Lower##x(node); \
|
||||
break;
|
||||
DECLARE_CASE(Branch)
|
||||
JS_OP_LIST(DECLARE_CASE)
|
||||
#undef DECLARE_CASE
|
||||
default:
|
||||
// Nothing to see.
|
||||
return;
|
||||
}
|
||||
|
||||
// Nothing to do if lowering was done by patching the existing node.
|
||||
if (replacement == node) return;
|
||||
|
||||
// Iterate through uses of the original node and replace uses accordingly.
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
|
||||
#define REPLACE_IC_STUB_CALL(op, StubDeclaration) \
|
||||
Node* JSGenericLowering::Lower##op(Node* node) { \
|
||||
StubDeclaration; \
|
||||
ReplaceWithICStubCall(node, &stub); \
|
||||
return node; \
|
||||
}
|
||||
REPLACE_IC_STUB_CALL(JSBitwiseOr, BinaryOpICStub stub(isolate(), Token::BIT_OR))
|
||||
REPLACE_IC_STUB_CALL(JSBitwiseXor,
|
||||
BinaryOpICStub stub(isolate(), Token::BIT_XOR))
|
||||
REPLACE_IC_STUB_CALL(JSBitwiseAnd,
|
||||
BinaryOpICStub stub(isolate(), Token::BIT_AND))
|
||||
REPLACE_IC_STUB_CALL(JSShiftLeft, BinaryOpICStub stub(isolate(), Token::SHL))
|
||||
REPLACE_IC_STUB_CALL(JSShiftRight, BinaryOpICStub stub(isolate(), Token::SAR))
|
||||
REPLACE_IC_STUB_CALL(JSShiftRightLogical,
|
||||
BinaryOpICStub stub(isolate(), Token::SHR))
|
||||
REPLACE_IC_STUB_CALL(JSAdd, BinaryOpICStub stub(isolate(), Token::ADD))
|
||||
REPLACE_IC_STUB_CALL(JSSubtract, BinaryOpICStub stub(isolate(), Token::SUB))
|
||||
REPLACE_IC_STUB_CALL(JSMultiply, BinaryOpICStub stub(isolate(), Token::MUL))
|
||||
REPLACE_IC_STUB_CALL(JSDivide, BinaryOpICStub stub(isolate(), Token::DIV))
|
||||
REPLACE_IC_STUB_CALL(JSModulus, BinaryOpICStub stub(isolate(), Token::MOD))
|
||||
REPLACE_IC_STUB_CALL(JSToNumber, ToNumberStub stub(isolate()))
|
||||
#undef REPLACE_IC_STUB_CALL
|
||||
|
||||
|
||||
#define REPLACE_COMPARE_IC_CALL(op, token, pure) \
|
||||
Node* JSGenericLowering::Lower##op(Node* node) { \
|
||||
ReplaceWithCompareIC(node, token, pure); \
|
||||
return node; \
|
||||
}
|
||||
REPLACE_COMPARE_IC_CALL(JSEqual, Token::EQ, false)
|
||||
REPLACE_COMPARE_IC_CALL(JSNotEqual, Token::NE, false)
|
||||
REPLACE_COMPARE_IC_CALL(JSStrictEqual, Token::EQ_STRICT, true)
|
||||
REPLACE_COMPARE_IC_CALL(JSStrictNotEqual, Token::NE_STRICT, true)
|
||||
REPLACE_COMPARE_IC_CALL(JSLessThan, Token::LT, false)
|
||||
REPLACE_COMPARE_IC_CALL(JSGreaterThan, Token::GT, false)
|
||||
REPLACE_COMPARE_IC_CALL(JSLessThanOrEqual, Token::LTE, false)
|
||||
REPLACE_COMPARE_IC_CALL(JSGreaterThanOrEqual, Token::GTE, false)
|
||||
#undef REPLACE_COMPARE_IC_CALL
|
||||
|
||||
|
||||
#define REPLACE_RUNTIME_CALL(op, fun) \
|
||||
Node* JSGenericLowering::Lower##op(Node* node) { \
|
||||
ReplaceWithRuntimeCall(node, fun); \
|
||||
return node; \
|
||||
}
|
||||
REPLACE_RUNTIME_CALL(JSTypeOf, Runtime::kTypeof)
|
||||
REPLACE_RUNTIME_CALL(JSCreate, Runtime::kAbort)
|
||||
REPLACE_RUNTIME_CALL(JSCreateFunctionContext, Runtime::kNewFunctionContext)
|
||||
REPLACE_RUNTIME_CALL(JSCreateCatchContext, Runtime::kPushCatchContext)
|
||||
REPLACE_RUNTIME_CALL(JSCreateWithContext, Runtime::kPushWithContext)
|
||||
REPLACE_RUNTIME_CALL(JSCreateBlockContext, Runtime::kPushBlockContext)
|
||||
REPLACE_RUNTIME_CALL(JSCreateModuleContext, Runtime::kPushModuleContext)
|
||||
REPLACE_RUNTIME_CALL(JSCreateGlobalContext, Runtime::kAbort)
|
||||
#undef REPLACE_RUNTIME
|
||||
|
||||
|
||||
#define REPLACE_UNIMPLEMENTED(op) \
|
||||
Node* JSGenericLowering::Lower##op(Node* node) { \
|
||||
UNIMPLEMENTED(); \
|
||||
return node; \
|
||||
}
|
||||
REPLACE_UNIMPLEMENTED(JSToString)
|
||||
REPLACE_UNIMPLEMENTED(JSToName)
|
||||
REPLACE_UNIMPLEMENTED(JSYield)
|
||||
REPLACE_UNIMPLEMENTED(JSDebugger)
|
||||
#undef REPLACE_UNIMPLEMENTED
|
||||
|
||||
|
||||
void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token,
|
||||
bool pure) {
|
||||
BinaryOpICStub stub(isolate(), Token::ADD); // TODO(mstarzinger): Hack.
|
||||
CodeStubInterfaceDescriptor* d = stub.GetInterfaceDescriptor();
|
||||
CallDescriptor* desc_compare = linkage()->GetStubCallDescriptor(d);
|
||||
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), token);
|
||||
Node* compare;
|
||||
if (pure) {
|
||||
// A pure (strict) comparison doesn't have an effect or control.
|
||||
// But for the graph, we need to add these inputs.
|
||||
compare = graph()->NewNode(common()->Call(desc_compare), CodeConstant(ic),
|
||||
NodeProperties::GetValueInput(node, 0),
|
||||
NodeProperties::GetValueInput(node, 1),
|
||||
NodeProperties::GetContextInput(node),
|
||||
graph()->start(), graph()->start());
|
||||
} else {
|
||||
compare = graph()->NewNode(common()->Call(desc_compare), CodeConstant(ic),
|
||||
NodeProperties::GetValueInput(node, 0),
|
||||
NodeProperties::GetValueInput(node, 1),
|
||||
NodeProperties::GetContextInput(node),
|
||||
NodeProperties::GetEffectInput(node),
|
||||
NodeProperties::GetControlInput(node));
|
||||
}
|
||||
node->ReplaceInput(0, compare);
|
||||
node->ReplaceInput(1, SmiConstant(token));
|
||||
ReplaceWithRuntimeCall(node, Runtime::kBooleanize);
|
||||
}
|
||||
|
||||
|
||||
void JSGenericLowering::ReplaceWithICStubCall(Node* node,
|
||||
HydrogenCodeStub* stub) {
|
||||
CodeStubInterfaceDescriptor* d = stub->GetInterfaceDescriptor();
|
||||
CallDescriptor* desc = linkage()->GetStubCallDescriptor(d);
|
||||
Node* stub_code = CodeConstant(stub->GetCode());
|
||||
PatchInsertInput(node, 0, stub_code);
|
||||
PatchOperator(node, common()->Call(desc));
|
||||
}
|
||||
|
||||
|
||||
void JSGenericLowering::ReplaceWithBuiltinCall(Node* node,
|
||||
Builtins::JavaScript id,
|
||||
int nargs) {
|
||||
CallFunctionStub stub(isolate(), nargs - 1, NO_CALL_FUNCTION_FLAGS);
|
||||
CodeStubInterfaceDescriptor* d = GetInterfaceDescriptor(isolate(), &stub);
|
||||
CallDescriptor* desc = linkage()->GetStubCallDescriptor(d, nargs);
|
||||
// TODO(mstarzinger): Accessing the builtins object this way prevents sharing
|
||||
// of code across native contexts. Fix this by loading from given context.
|
||||
Handle<JSFunction> function(
|
||||
JSFunction::cast(info()->context()->builtins()->javascript_builtin(id)));
|
||||
Node* stub_code = CodeConstant(stub.GetCode());
|
||||
Node* function_node = FunctionConstant(function);
|
||||
PatchInsertInput(node, 0, stub_code);
|
||||
PatchInsertInput(node, 1, function_node);
|
||||
PatchOperator(node, common()->Call(desc));
|
||||
}
|
||||
|
||||
|
||||
void JSGenericLowering::ReplaceWithRuntimeCall(Node* node,
|
||||
Runtime::FunctionId f,
|
||||
int nargs_override) {
|
||||
Operator::Property props = node->op()->properties();
|
||||
const Runtime::Function* fun = Runtime::FunctionForId(f);
|
||||
int nargs = (nargs_override < 0) ? fun->nargs : nargs_override;
|
||||
CallDescriptor::DeoptimizationSupport deopt =
|
||||
NodeProperties::CanLazilyDeoptimize(node)
|
||||
? CallDescriptor::kCanDeoptimize
|
||||
: CallDescriptor::kCannotDeoptimize;
|
||||
CallDescriptor* desc =
|
||||
linkage()->GetRuntimeCallDescriptor(f, nargs, props, deopt);
|
||||
Node* ref = ExternalConstant(ExternalReference(f, isolate()));
|
||||
Node* arity = Int32Constant(nargs);
|
||||
if (!centrystub_constant_.is_set()) {
|
||||
centrystub_constant_.set(CodeConstant(CEntryStub(isolate(), 1).GetCode()));
|
||||
}
|
||||
PatchInsertInput(node, 0, centrystub_constant_.get());
|
||||
PatchInsertInput(node, nargs + 1, ref);
|
||||
PatchInsertInput(node, nargs + 2, arity);
|
||||
PatchOperator(node, common()->Call(desc));
|
||||
}
|
||||
|
||||
|
||||
Node* JSGenericLowering::LowerBranch(Node* node) {
|
||||
Node* test = graph()->NewNode(machine()->WordEqual(), node->InputAt(0),
|
||||
jsgraph()->TrueConstant());
|
||||
node->ReplaceInput(0, test);
|
||||
return node;
|
||||
}
|
||||
|
||||
|
||||
Node* JSGenericLowering::LowerJSUnaryNot(Node* node) {
|
||||
ToBooleanStub stub(isolate());
|
||||
CodeStubInterfaceDescriptor* d = stub.GetInterfaceDescriptor();
|
||||
CallDescriptor* desc = linkage()->GetStubCallDescriptor(d);
|
||||
Node* to_bool =
|
||||
graph()->NewNode(common()->Call(desc), CodeConstant(stub.GetCode()),
|
||||
NodeProperties::GetValueInput(node, 0),
|
||||
NodeProperties::GetContextInput(node),
|
||||
NodeProperties::GetEffectInput(node),
|
||||
NodeProperties::GetControlInput(node));
|
||||
node->ReplaceInput(0, to_bool);
|
||||
PatchInsertInput(node, 1, SmiConstant(Token::EQ));
|
||||
ReplaceWithRuntimeCall(node, Runtime::kBooleanize);
|
||||
return node;
|
||||
}
|
||||
|
||||
|
||||
Node* JSGenericLowering::LowerJSToBoolean(Node* node) {
|
||||
ToBooleanStub stub(isolate());
|
||||
CodeStubInterfaceDescriptor* d = stub.GetInterfaceDescriptor();
|
||||
CallDescriptor* desc = linkage()->GetStubCallDescriptor(d);
|
||||
Node* to_bool =
|
||||
graph()->NewNode(common()->Call(desc), CodeConstant(stub.GetCode()),
|
||||
NodeProperties::GetValueInput(node, 0),
|
||||
NodeProperties::GetContextInput(node),
|
||||
NodeProperties::GetEffectInput(node),
|
||||
NodeProperties::GetControlInput(node));
|
||||
node->ReplaceInput(0, to_bool);
|
||||
PatchInsertInput(node, 1, SmiConstant(Token::NE));
|
||||
ReplaceWithRuntimeCall(node, Runtime::kBooleanize);
|
||||
return node;
|
||||
}
|
||||
|
||||
|
||||
Node* JSGenericLowering::LowerJSToObject(Node* node) {
|
||||
ReplaceWithBuiltinCall(node, Builtins::TO_OBJECT, 1);
|
||||
return node;
|
||||
}
|
||||
|
||||
|
||||
Node* JSGenericLowering::LowerJSLoadProperty(Node* node) {
|
||||
if (FLAG_compiled_keyed_generic_loads) {
|
||||
KeyedLoadGenericStub stub(isolate());
|
||||
ReplaceWithICStubCall(node, &stub);
|
||||
} else {
|
||||
ReplaceWithRuntimeCall(node, Runtime::kKeyedGetProperty);
|
||||
}
|
||||
return node;
|
||||
}
|
||||
|
||||
|
||||
Node* JSGenericLowering::LowerJSLoadNamed(Node* node) {
|
||||
Node* key =
|
||||
jsgraph()->HeapConstant(OpParameter<PrintableUnique<Name> >(node));
|
||||
PatchInsertInput(node, 1, key);
|
||||
// TODO(mstarzinger): We cannot yet use KeyedLoadGenericElementStub here,
|
||||
// because named interceptors would not fire correctly yet.
|
||||
ReplaceWithRuntimeCall(node, Runtime::kGetProperty);
|
||||
return node;
|
||||
}
|
||||
|
||||
|
||||
Node* JSGenericLowering::LowerJSStoreProperty(Node* node) {
|
||||
// TODO(mstarzinger): The strict_mode needs to be carried along in the
|
||||
// operator so that graphs are fully compositional for inlining.
|
||||
StrictMode strict_mode = info()->strict_mode();
|
||||
PatchInsertInput(node, 3, SmiConstant(strict_mode));
|
||||
ReplaceWithRuntimeCall(node, Runtime::kSetProperty, 4);
|
||||
return node;
|
||||
}
|
||||
|
||||
|
||||
Node* JSGenericLowering::LowerJSStoreNamed(Node* node) {
|
||||
// TODO(mstarzinger): The strict_mode needs to be carried along in the
|
||||
// operator so that graphs are fully compositional for inlining.
|
||||
StrictMode strict_mode = info()->strict_mode();
|
||||
Node* key =
|
||||
jsgraph()->HeapConstant(OpParameter<PrintableUnique<Name> >(node));
|
||||
PatchInsertInput(node, 1, key);
|
||||
PatchInsertInput(node, 3, SmiConstant(strict_mode));
|
||||
ReplaceWithRuntimeCall(node, Runtime::kSetProperty, 4);
|
||||
return node;
|
||||
}
|
||||
|
||||
|
||||
Node* JSGenericLowering::LowerJSDeleteProperty(Node* node) {
|
||||
StrictMode strict_mode = OpParameter<StrictMode>(node);
|
||||
PatchInsertInput(node, 2, SmiConstant(strict_mode));
|
||||
ReplaceWithBuiltinCall(node, Builtins::DELETE, 3);
|
||||
return node;
|
||||
}
|
||||
|
||||
|
||||
Node* JSGenericLowering::LowerJSHasProperty(Node* node) {
|
||||
ReplaceWithBuiltinCall(node, Builtins::IN, 2);
|
||||
return node;
|
||||
}
|
||||
|
||||
|
||||
Node* JSGenericLowering::LowerJSInstanceOf(Node* node) {
|
||||
InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
|
||||
InstanceofStub::kReturnTrueFalseObject |
|
||||
InstanceofStub::kArgsInRegisters);
|
||||
InstanceofStub stub(isolate(), flags);
|
||||
CodeStubInterfaceDescriptor* d = GetInterfaceDescriptor(isolate(), &stub);
|
||||
CallDescriptor* desc = linkage()->GetStubCallDescriptor(d, 0);
|
||||
Node* stub_code = CodeConstant(stub.GetCode());
|
||||
PatchInsertInput(node, 0, stub_code);
|
||||
PatchOperator(node, common()->Call(desc));
|
||||
return node;
|
||||
}
|
||||
|
||||
|
||||
Node* JSGenericLowering::LowerJSLoadContext(Node* node) {
|
||||
ContextAccess access = OpParameter<ContextAccess>(node);
|
||||
PatchInsertInput(node, 1, SmiConstant(access.depth()));
|
||||
PatchInsertInput(node, 2, SmiConstant(access.index()));
|
||||
ReplaceWithRuntimeCall(node, Runtime::kLoadContextRelative, 3);
|
||||
return node;
|
||||
}
|
||||
|
||||
|
||||
Node* JSGenericLowering::LowerJSStoreContext(Node* node) {
|
||||
ContextAccess access = OpParameter<ContextAccess>(node);
|
||||
PatchInsertInput(node, 1, SmiConstant(access.depth()));
|
||||
PatchInsertInput(node, 2, SmiConstant(access.index()));
|
||||
ReplaceWithRuntimeCall(node, Runtime::kStoreContextRelative, 4);
|
||||
return node;
|
||||
}
|
||||
|
||||
|
||||
Node* JSGenericLowering::LowerJSCallConstruct(Node* node) {
|
||||
int arity = OpParameter<int>(node);
|
||||
CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
|
||||
CodeStubInterfaceDescriptor* d = GetInterfaceDescriptor(isolate(), &stub);
|
||||
CallDescriptor* desc = linkage()->GetStubCallDescriptor(d, arity);
|
||||
Node* stub_code = CodeConstant(stub.GetCode());
|
||||
Node* construct = NodeProperties::GetValueInput(node, 0);
|
||||
PatchInsertInput(node, 0, stub_code);
|
||||
PatchInsertInput(node, 1, Int32Constant(arity - 1));
|
||||
PatchInsertInput(node, 2, construct);
|
||||
PatchInsertInput(node, 3, jsgraph()->UndefinedConstant());
|
||||
PatchOperator(node, common()->Call(desc));
|
||||
return node;
|
||||
}
|
||||
|
||||
|
||||
Node* JSGenericLowering::LowerJSCallFunction(Node* node) {
|
||||
CallParameters p = OpParameter<CallParameters>(node);
|
||||
CallFunctionStub stub(isolate(), p.arity - 2, p.flags);
|
||||
CodeStubInterfaceDescriptor* d = GetInterfaceDescriptor(isolate(), &stub);
|
||||
CallDescriptor* desc = linkage()->GetStubCallDescriptor(d, p.arity - 1);
|
||||
Node* stub_code = CodeConstant(stub.GetCode());
|
||||
PatchInsertInput(node, 0, stub_code);
|
||||
PatchOperator(node, common()->Call(desc));
|
||||
return node;
|
||||
}
|
||||
|
||||
|
||||
Node* JSGenericLowering::LowerJSCallRuntime(Node* node) {
|
||||
Runtime::FunctionId function = OpParameter<Runtime::FunctionId>(node);
|
||||
int arity = NodeProperties::GetValueInputCount(node);
|
||||
ReplaceWithRuntimeCall(node, function, arity);
|
||||
return node;
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
83
src/compiler/js-generic-lowering.h
Normal file
83
src/compiler/js-generic-lowering.h
Normal file
@ -0,0 +1,83 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_JS_GENERIC_LOWERING_H_
|
||||
#define V8_COMPILER_JS_GENERIC_LOWERING_H_
|
||||
|
||||
#include "src/v8.h"
|
||||
|
||||
#include "src/allocation.h"
|
||||
#include "src/compiler/graph.h"
|
||||
#include "src/compiler/js-graph.h"
|
||||
#include "src/compiler/lowering-builder.h"
|
||||
#include "src/compiler/opcodes.h"
|
||||
#include "src/unique.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// Forward declarations.
|
||||
class HydrogenCodeStub;
|
||||
|
||||
namespace compiler {
|
||||
|
||||
// Forward declarations.
|
||||
class CommonOperatorBuilder;
|
||||
class MachineOperatorBuilder;
|
||||
class Linkage;
|
||||
|
||||
// Lowers JS-level operators to runtime and IC calls in the "generic" case.
|
||||
class JSGenericLowering : public LoweringBuilder {
|
||||
public:
|
||||
JSGenericLowering(CompilationInfo* info, JSGraph* graph,
|
||||
MachineOperatorBuilder* machine,
|
||||
SourcePositionTable* source_positions);
|
||||
virtual ~JSGenericLowering() {}
|
||||
|
||||
virtual void Lower(Node* node);
|
||||
|
||||
protected:
|
||||
// Dispatched depending on opcode.
|
||||
#define DECLARE_LOWER(x) Node* Lower##x(Node* node);
|
||||
ALL_OP_LIST(DECLARE_LOWER)
|
||||
#undef DECLARE_LOWER
|
||||
|
||||
// Helpers to create new constant nodes.
|
||||
Node* SmiConstant(int immediate);
|
||||
Node* Int32Constant(int immediate);
|
||||
Node* CodeConstant(Handle<Code> code);
|
||||
Node* FunctionConstant(Handle<JSFunction> function);
|
||||
Node* ExternalConstant(ExternalReference ref);
|
||||
|
||||
// Helpers to patch existing nodes in the graph.
|
||||
void PatchOperator(Node* node, Operator* new_op);
|
||||
void PatchInsertInput(Node* node, int index, Node* input);
|
||||
|
||||
// Helpers to replace existing nodes with a generic call.
|
||||
void ReplaceWithCompareIC(Node* node, Token::Value token, bool pure);
|
||||
void ReplaceWithICStubCall(Node* node, HydrogenCodeStub* stub);
|
||||
void ReplaceWithBuiltinCall(Node* node, Builtins::JavaScript id, int args);
|
||||
void ReplaceWithRuntimeCall(Node* node, Runtime::FunctionId f, int args = -1);
|
||||
|
||||
Zone* zone() const { return graph()->zone(); }
|
||||
Isolate* isolate() const { return zone()->isolate(); }
|
||||
JSGraph* jsgraph() const { return jsgraph_; }
|
||||
Graph* graph() const { return jsgraph()->graph(); }
|
||||
Linkage* linkage() const { return linkage_; }
|
||||
CompilationInfo* info() const { return info_; }
|
||||
CommonOperatorBuilder* common() const { return jsgraph()->common(); }
|
||||
MachineOperatorBuilder* machine() const { return machine_; }
|
||||
|
||||
private:
|
||||
CompilationInfo* info_;
|
||||
JSGraph* jsgraph_;
|
||||
Linkage* linkage_;
|
||||
MachineOperatorBuilder* machine_;
|
||||
SetOncePointer<Node> centrystub_constant_;
|
||||
};
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
||||
|
||||
#endif // V8_COMPILER_JS_GENERIC_LOWERING_H_
|
174
src/compiler/js-graph.cc
Normal file
174
src/compiler/js-graph.cc
Normal file
@ -0,0 +1,174 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/compiler/js-graph.h"
|
||||
#include "src/compiler/node-properties-inl.h"
|
||||
#include "src/compiler/typer.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
Node* JSGraph::ImmovableHeapConstant(Handle<Object> object) {
|
||||
PrintableUnique<Object> unique =
|
||||
PrintableUnique<Object>::CreateImmovable(zone(), object);
|
||||
return NewNode(common()->HeapConstant(unique));
|
||||
}
|
||||
|
||||
|
||||
Node* JSGraph::NewNode(Operator* op) {
|
||||
Node* node = graph()->NewNode(op);
|
||||
typer_->Init(node);
|
||||
return node;
|
||||
}
|
||||
|
||||
|
||||
Node* JSGraph::UndefinedConstant() {
|
||||
if (!undefined_constant_.is_set()) {
|
||||
undefined_constant_.set(
|
||||
ImmovableHeapConstant(factory()->undefined_value()));
|
||||
}
|
||||
return undefined_constant_.get();
|
||||
}
|
||||
|
||||
|
||||
Node* JSGraph::TheHoleConstant() {
|
||||
if (!the_hole_constant_.is_set()) {
|
||||
the_hole_constant_.set(ImmovableHeapConstant(factory()->the_hole_value()));
|
||||
}
|
||||
return the_hole_constant_.get();
|
||||
}
|
||||
|
||||
|
||||
Node* JSGraph::TrueConstant() {
|
||||
if (!true_constant_.is_set()) {
|
||||
true_constant_.set(ImmovableHeapConstant(factory()->true_value()));
|
||||
}
|
||||
return true_constant_.get();
|
||||
}
|
||||
|
||||
|
||||
Node* JSGraph::FalseConstant() {
|
||||
if (!false_constant_.is_set()) {
|
||||
false_constant_.set(ImmovableHeapConstant(factory()->false_value()));
|
||||
}
|
||||
return false_constant_.get();
|
||||
}
|
||||
|
||||
|
||||
Node* JSGraph::NullConstant() {
|
||||
if (!null_constant_.is_set()) {
|
||||
null_constant_.set(ImmovableHeapConstant(factory()->null_value()));
|
||||
}
|
||||
return null_constant_.get();
|
||||
}
|
||||
|
||||
|
||||
Node* JSGraph::ZeroConstant() {
|
||||
if (!zero_constant_.is_set()) zero_constant_.set(NumberConstant(0.0));
|
||||
return zero_constant_.get();
|
||||
}
|
||||
|
||||
|
||||
Node* JSGraph::OneConstant() {
|
||||
if (!one_constant_.is_set()) one_constant_.set(NumberConstant(1.0));
|
||||
return one_constant_.get();
|
||||
}
|
||||
|
||||
|
||||
Node* JSGraph::NaNConstant() {
|
||||
if (!nan_constant_.is_set()) {
|
||||
nan_constant_.set(NumberConstant(base::OS::nan_value()));
|
||||
}
|
||||
return nan_constant_.get();
|
||||
}
|
||||
|
||||
|
||||
Node* JSGraph::HeapConstant(PrintableUnique<Object> value) {
|
||||
// TODO(turbofan): canonicalize heap constants using Unique<T>
|
||||
return NewNode(common()->HeapConstant(value));
|
||||
}
|
||||
|
||||
|
||||
Node* JSGraph::HeapConstant(Handle<Object> value) {
|
||||
// TODO(titzer): We could also match against the addresses of immortable
|
||||
// immovables here, even without access to the heap, thus always
|
||||
// canonicalizing references to them.
|
||||
return HeapConstant(
|
||||
PrintableUnique<Object>::CreateUninitialized(zone(), value));
|
||||
}
|
||||
|
||||
|
||||
Node* JSGraph::Constant(Handle<Object> value) {
|
||||
// Dereference the handle to determine if a number constant or other
|
||||
// canonicalized node can be used.
|
||||
if (value->IsNumber()) {
|
||||
return Constant(value->Number());
|
||||
} else if (value->IsUndefined()) {
|
||||
return UndefinedConstant();
|
||||
} else if (value->IsTrue()) {
|
||||
return TrueConstant();
|
||||
} else if (value->IsFalse()) {
|
||||
return FalseConstant();
|
||||
} else if (value->IsNull()) {
|
||||
return NullConstant();
|
||||
} else if (value->IsTheHole()) {
|
||||
return TheHoleConstant();
|
||||
} else {
|
||||
return HeapConstant(value);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Node* JSGraph::Constant(double value) {
|
||||
if (BitCast<int64_t>(value) == BitCast<int64_t>(0.0)) return ZeroConstant();
|
||||
if (BitCast<int64_t>(value) == BitCast<int64_t>(1.0)) return OneConstant();
|
||||
return NumberConstant(value);
|
||||
}
|
||||
|
||||
|
||||
Node* JSGraph::Constant(int32_t value) {
|
||||
if (value == 0) return ZeroConstant();
|
||||
if (value == 1) return OneConstant();
|
||||
return NumberConstant(value);
|
||||
}
|
||||
|
||||
|
||||
Node* JSGraph::Int32Constant(int32_t value) {
|
||||
Node** loc = cache_.FindInt32Constant(value);
|
||||
if (*loc == NULL) {
|
||||
*loc = NewNode(common()->Int32Constant(value));
|
||||
}
|
||||
return *loc;
|
||||
}
|
||||
|
||||
|
||||
Node* JSGraph::NumberConstant(double value) {
|
||||
Node** loc = cache_.FindNumberConstant(value);
|
||||
if (*loc == NULL) {
|
||||
*loc = NewNode(common()->NumberConstant(value));
|
||||
}
|
||||
return *loc;
|
||||
}
|
||||
|
||||
|
||||
Node* JSGraph::Float64Constant(double value) {
|
||||
Node** loc = cache_.FindFloat64Constant(value);
|
||||
if (*loc == NULL) {
|
||||
*loc = NewNode(common()->Float64Constant(value));
|
||||
}
|
||||
return *loc;
|
||||
}
|
||||
|
||||
|
||||
Node* JSGraph::ExternalConstant(ExternalReference reference) {
|
||||
Node** loc = cache_.FindExternalConstant(reference);
|
||||
if (*loc == NULL) {
|
||||
*loc = NewNode(common()->ExternalConstant(reference));
|
||||
}
|
||||
return *loc;
|
||||
}
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
107
src/compiler/js-graph.h
Normal file
107
src/compiler/js-graph.h
Normal file
@ -0,0 +1,107 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_JS_GRAPH_H_
|
||||
#define V8_COMPILER_JS_GRAPH_H_
|
||||
|
||||
#include "src/compiler/common-node-cache.h"
|
||||
#include "src/compiler/common-operator.h"
|
||||
#include "src/compiler/graph.h"
|
||||
#include "src/compiler/js-operator.h"
|
||||
#include "src/compiler/node-properties.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
class Typer;
|
||||
|
||||
// Implements a facade on a Graph, enhancing the graph with JS-specific
|
||||
// notions, including a builder for for JS* operators, canonicalized global
|
||||
// constants, and various helper methods.
|
||||
class JSGraph : public ZoneObject {
|
||||
public:
|
||||
JSGraph(Graph* graph, CommonOperatorBuilder* common, Typer* typer)
|
||||
: graph_(graph),
|
||||
common_(common),
|
||||
javascript_(zone()),
|
||||
typer_(typer),
|
||||
cache_(zone()) {}
|
||||
|
||||
// Canonicalized global constants.
|
||||
Node* UndefinedConstant();
|
||||
Node* TheHoleConstant();
|
||||
Node* TrueConstant();
|
||||
Node* FalseConstant();
|
||||
Node* NullConstant();
|
||||
Node* ZeroConstant();
|
||||
Node* OneConstant();
|
||||
Node* NaNConstant();
|
||||
|
||||
// Creates a HeapConstant node, possibly canonicalized, without inspecting the
|
||||
// object.
|
||||
Node* HeapConstant(PrintableUnique<Object> value);
|
||||
|
||||
// Creates a HeapConstant node, possibly canonicalized, and may access the
|
||||
// heap to inspect the object.
|
||||
Node* HeapConstant(Handle<Object> value);
|
||||
|
||||
// Creates a Constant node of the appropriate type for the given object.
|
||||
// Accesses the heap to inspect the object and determine whether one of the
|
||||
// canonicalized globals or a number constant should be returned.
|
||||
Node* Constant(Handle<Object> value);
|
||||
|
||||
// Creates a NumberConstant node, usually canonicalized.
|
||||
Node* Constant(double value);
|
||||
|
||||
// Creates a NumberConstant node, usually canonicalized.
|
||||
Node* Constant(int32_t value);
|
||||
|
||||
// Creates a Int32Constant node, usually canonicalized.
|
||||
Node* Int32Constant(int32_t value);
|
||||
|
||||
// Creates a Float64Constant node, usually canonicalized.
|
||||
Node* Float64Constant(double value);
|
||||
|
||||
// Creates an ExternalConstant node, usually canonicalized.
|
||||
Node* ExternalConstant(ExternalReference ref);
|
||||
|
||||
Node* SmiConstant(int32_t immediate) {
|
||||
ASSERT(Smi::IsValid(immediate));
|
||||
return Constant(immediate);
|
||||
}
|
||||
|
||||
JSOperatorBuilder* javascript() { return &javascript_; }
|
||||
CommonOperatorBuilder* common() { return common_; }
|
||||
Graph* graph() { return graph_; }
|
||||
Zone* zone() { return graph()->zone(); }
|
||||
|
||||
private:
|
||||
Graph* graph_;
|
||||
CommonOperatorBuilder* common_;
|
||||
JSOperatorBuilder javascript_;
|
||||
Typer* typer_;
|
||||
|
||||
SetOncePointer<Node> undefined_constant_;
|
||||
SetOncePointer<Node> the_hole_constant_;
|
||||
SetOncePointer<Node> true_constant_;
|
||||
SetOncePointer<Node> false_constant_;
|
||||
SetOncePointer<Node> null_constant_;
|
||||
SetOncePointer<Node> zero_constant_;
|
||||
SetOncePointer<Node> one_constant_;
|
||||
SetOncePointer<Node> nan_constant_;
|
||||
|
||||
CommonNodeCache cache_;
|
||||
|
||||
Node* ImmovableHeapConstant(Handle<Object> value);
|
||||
Node* NumberConstant(double value);
|
||||
Node* NewNode(Operator* op);
|
||||
|
||||
Factory* factory() { return zone()->isolate()->factory(); }
|
||||
};
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif
|
204
src/compiler/js-operator.h
Normal file
204
src/compiler/js-operator.h
Normal file
@ -0,0 +1,204 @@
|
||||
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_JS_OPERATOR_H_
|
||||
#define V8_COMPILER_JS_OPERATOR_H_
|
||||
|
||||
#include "src/compiler/opcodes.h"
|
||||
#include "src/compiler/operator.h"
|
||||
#include "src/unique.h"
|
||||
#include "src/zone.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
// Defines the location of a context slot relative to a specific scope. This is
|
||||
// used as a parameter by JSLoadContext and JSStoreContext operators and allows
|
||||
// accessing a context-allocated variable without keeping track of the scope.
|
||||
class ContextAccess {
|
||||
public:
|
||||
ContextAccess(int depth, int index, bool immutable)
|
||||
: immutable_(immutable), depth_(depth), index_(index) {
|
||||
ASSERT(0 <= depth && depth <= kMaxUInt16);
|
||||
ASSERT(0 <= index && static_cast<uint32_t>(index) <= kMaxUInt32);
|
||||
}
|
||||
int depth() const { return depth_; }
|
||||
int index() const { return index_; }
|
||||
bool immutable() const { return immutable_; }
|
||||
|
||||
private:
|
||||
// For space reasons, we keep this tightly packed, otherwise we could just use
|
||||
// a simple int/int/bool POD.
|
||||
const bool immutable_;
|
||||
const uint16_t depth_;
|
||||
const uint32_t index_;
|
||||
};
|
||||
|
||||
// Defines the arity and the call flags for a JavaScript function call. This is
|
||||
// used as a parameter by JSCall operators.
|
||||
struct CallParameters {
|
||||
int arity;
|
||||
CallFunctionFlags flags;
|
||||
};
|
||||
|
||||
// Interface for building JavaScript-level operators, e.g. directly from the
|
||||
// AST. Most operators have no parameters, thus can be globally shared for all
|
||||
// graphs.
|
||||
class JSOperatorBuilder {
|
||||
public:
|
||||
explicit JSOperatorBuilder(Zone* zone) : zone_(zone) {}
|
||||
|
||||
#define SIMPLE(name, properties, inputs, outputs) \
|
||||
return new (zone_) \
|
||||
SimpleOperator(IrOpcode::k##name, properties, inputs, outputs, #name);
|
||||
|
||||
#define NOPROPS(name, inputs, outputs) \
|
||||
SIMPLE(name, Operator::kNoProperties, inputs, outputs)
|
||||
|
||||
#define OP1(name, ptype, pname, properties, inputs, outputs) \
|
||||
return new (zone_) Operator1<ptype>(IrOpcode::k##name, properties, inputs, \
|
||||
outputs, #name, pname)
|
||||
|
||||
#define BINOP(name) NOPROPS(name, 2, 1)
|
||||
#define UNOP(name) NOPROPS(name, 1, 1)
|
||||
|
||||
#define PURE_BINOP(name) SIMPLE(name, Operator::kPure, 2, 1)
|
||||
|
||||
Operator* Equal() { BINOP(JSEqual); }
|
||||
Operator* NotEqual() { BINOP(JSNotEqual); }
|
||||
Operator* StrictEqual() { PURE_BINOP(JSStrictEqual); }
|
||||
Operator* StrictNotEqual() { PURE_BINOP(JSStrictNotEqual); }
|
||||
Operator* LessThan() { BINOP(JSLessThan); }
|
||||
Operator* GreaterThan() { BINOP(JSGreaterThan); }
|
||||
Operator* LessThanOrEqual() { BINOP(JSLessThanOrEqual); }
|
||||
Operator* GreaterThanOrEqual() { BINOP(JSGreaterThanOrEqual); }
|
||||
Operator* BitwiseOr() { BINOP(JSBitwiseOr); }
|
||||
Operator* BitwiseXor() { BINOP(JSBitwiseXor); }
|
||||
Operator* BitwiseAnd() { BINOP(JSBitwiseAnd); }
|
||||
Operator* ShiftLeft() { BINOP(JSShiftLeft); }
|
||||
Operator* ShiftRight() { BINOP(JSShiftRight); }
|
||||
Operator* ShiftRightLogical() { BINOP(JSShiftRightLogical); }
|
||||
Operator* Add() { BINOP(JSAdd); }
|
||||
Operator* Subtract() { BINOP(JSSubtract); }
|
||||
Operator* Multiply() { BINOP(JSMultiply); }
|
||||
Operator* Divide() { BINOP(JSDivide); }
|
||||
Operator* Modulus() { BINOP(JSModulus); }
|
||||
|
||||
Operator* UnaryNot() { UNOP(JSUnaryNot); }
|
||||
Operator* ToBoolean() { UNOP(JSToBoolean); }
|
||||
Operator* ToNumber() { UNOP(JSToNumber); }
|
||||
Operator* ToString() { UNOP(JSToString); }
|
||||
Operator* ToName() { UNOP(JSToName); }
|
||||
Operator* ToObject() { UNOP(JSToObject); }
|
||||
Operator* Yield() { UNOP(JSYield); }
|
||||
|
||||
Operator* Create() { SIMPLE(JSCreate, Operator::kEliminatable, 0, 1); }
|
||||
|
||||
Operator* Call(int arguments, CallFunctionFlags flags) {
|
||||
CallParameters parameters = {arguments, flags};
|
||||
OP1(JSCallFunction, CallParameters, parameters, Operator::kNoProperties,
|
||||
arguments, 1);
|
||||
}
|
||||
|
||||
Operator* CallNew(int arguments) {
|
||||
return new (zone_)
|
||||
Operator1<int>(IrOpcode::kJSCallConstruct, Operator::kNoProperties,
|
||||
arguments, 1, "JSCallConstruct", arguments);
|
||||
}
|
||||
|
||||
Operator* LoadProperty() { BINOP(JSLoadProperty); }
|
||||
Operator* LoadNamed(PrintableUnique<Name> name) {
|
||||
OP1(JSLoadNamed, PrintableUnique<Name>, name, Operator::kNoProperties, 1,
|
||||
1);
|
||||
}
|
||||
|
||||
Operator* StoreProperty() { NOPROPS(JSStoreProperty, 3, 0); }
|
||||
Operator* StoreNamed(PrintableUnique<Name> name) {
|
||||
OP1(JSStoreNamed, PrintableUnique<Name>, name, Operator::kNoProperties, 2,
|
||||
0);
|
||||
}
|
||||
|
||||
Operator* DeleteProperty(StrictMode strict_mode) {
|
||||
OP1(JSDeleteProperty, StrictMode, strict_mode, Operator::kNoProperties, 2,
|
||||
1);
|
||||
}
|
||||
|
||||
Operator* HasProperty() { NOPROPS(JSHasProperty, 2, 1); }
|
||||
|
||||
Operator* LoadContext(uint16_t depth, uint32_t index, bool immutable) {
|
||||
ContextAccess access(depth, index, immutable);
|
||||
OP1(JSLoadContext, ContextAccess, access,
|
||||
Operator::kEliminatable | Operator::kNoWrite, 1, 1);
|
||||
}
|
||||
Operator* StoreContext(uint16_t depth, uint32_t index) {
|
||||
ContextAccess access(depth, index, false);
|
||||
OP1(JSStoreContext, ContextAccess, access, Operator::kNoProperties, 2, 1);
|
||||
}
|
||||
|
||||
Operator* TypeOf() { SIMPLE(JSTypeOf, Operator::kPure, 1, 1); }
|
||||
Operator* InstanceOf() { NOPROPS(JSInstanceOf, 2, 1); }
|
||||
Operator* Debugger() { NOPROPS(JSDebugger, 0, 0); }
|
||||
|
||||
// TODO(titzer): nail down the static parts of each of these context flavors.
|
||||
Operator* CreateFunctionContext() { NOPROPS(JSCreateFunctionContext, 1, 1); }
|
||||
Operator* CreateCatchContext(PrintableUnique<String> name) {
|
||||
OP1(JSCreateCatchContext, PrintableUnique<String>, name,
|
||||
Operator::kNoProperties, 1, 1);
|
||||
}
|
||||
Operator* CreateWithContext() { NOPROPS(JSCreateWithContext, 2, 1); }
|
||||
Operator* CreateBlockContext() { NOPROPS(JSCreateBlockContext, 2, 1); }
|
||||
Operator* CreateModuleContext() { NOPROPS(JSCreateModuleContext, 2, 1); }
|
||||
Operator* CreateGlobalContext() { NOPROPS(JSCreateGlobalContext, 2, 1); }
|
||||
|
||||
Operator* Runtime(Runtime::FunctionId function, int arguments) {
|
||||
const Runtime::Function* f = Runtime::FunctionForId(function);
|
||||
ASSERT(f->nargs == -1 || f->nargs == arguments);
|
||||
OP1(JSCallRuntime, Runtime::FunctionId, function, Operator::kNoProperties,
|
||||
arguments, f->result_size);
|
||||
}
|
||||
|
||||
#undef SIMPLE
|
||||
#undef NOPROPS
|
||||
#undef OP1
|
||||
#undef BINOP
|
||||
#undef UNOP
|
||||
|
||||
private:
|
||||
Zone* zone_;
|
||||
};
|
||||
|
||||
// Specialization for static parameters of type {ContextAccess}.
|
||||
template <>
|
||||
struct StaticParameterTraits<ContextAccess> {
|
||||
static OStream& PrintTo(OStream& os, ContextAccess val) { // NOLINT
|
||||
return os << val.depth() << "," << val.index()
|
||||
<< (val.immutable() ? ",imm" : "");
|
||||
}
|
||||
static int HashCode(ContextAccess val) {
|
||||
return (val.depth() << 16) | (val.index() & 0xffff);
|
||||
}
|
||||
static bool Equals(ContextAccess a, ContextAccess b) {
|
||||
return a.immutable() == b.immutable() && a.depth() == b.depth() &&
|
||||
a.index() == b.index();
|
||||
}
|
||||
};
|
||||
|
||||
// Specialization for static parameters of type {Runtime::FunctionId}.
|
||||
template <>
|
||||
struct StaticParameterTraits<Runtime::FunctionId> {
|
||||
static OStream& PrintTo(OStream& os, Runtime::FunctionId val) { // NOLINT
|
||||
const Runtime::Function* f = Runtime::FunctionForId(val);
|
||||
return os << (f->name ? f->name : "?Runtime?");
|
||||
}
|
||||
static int HashCode(Runtime::FunctionId val) { return static_cast<int>(val); }
|
||||
static bool Equals(Runtime::FunctionId a, Runtime::FunctionId b) {
|
||||
return a == b;
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
||||
|
||||
#endif // V8_COMPILER_JS_OPERATOR_H_
|
604
src/compiler/js-typed-lowering.cc
Normal file
604
src/compiler/js-typed-lowering.cc
Normal file
@ -0,0 +1,604 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/compiler/graph-inl.h"
|
||||
#include "src/compiler/js-typed-lowering.h"
|
||||
#include "src/compiler/node-aux-data-inl.h"
|
||||
#include "src/compiler/node-properties-inl.h"
|
||||
#include "src/types.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
// TODO(turbofan): js-typed-lowering improvements possible
|
||||
// - immediately put in type bounds for all new nodes
|
||||
// - relax effects from generic but not-side-effecting operations
|
||||
// - relax effects for ToNumber(mixed)
|
||||
|
||||
// Replace value uses of {node} with {value} and effect uses of {node} with
|
||||
// {effect}. If {effect == NULL}, then use the effect input to {node}.
|
||||
// TODO(titzer): move into a GraphEditor?
|
||||
static void ReplaceUses(Node* node, Node* value, Node* effect) {
|
||||
if (value == effect) {
|
||||
// Effect and value updates are the same; no special iteration needed.
|
||||
if (value != node) node->ReplaceUses(value);
|
||||
return;
|
||||
}
|
||||
|
||||
if (effect == NULL) effect = NodeProperties::GetEffectInput(node);
|
||||
|
||||
// The iteration requires distinguishing between value and effect edges.
|
||||
UseIter iter = node->uses().begin();
|
||||
while (iter != node->uses().end()) {
|
||||
if (NodeProperties::IsEffectEdge(iter.edge())) {
|
||||
iter = iter.UpdateToAndIncrement(effect);
|
||||
} else {
|
||||
iter = iter.UpdateToAndIncrement(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Relax the effects of {node} by immediately replacing effect uses of {node}
|
||||
// with the effect input to {node}.
|
||||
// TODO(turbofan): replace the effect input to {node} with {graph->start()}.
|
||||
// TODO(titzer): move into a GraphEditor?
|
||||
static void RelaxEffects(Node* node) { ReplaceUses(node, node, NULL); }
|
||||
|
||||
|
||||
Reduction JSTypedLowering::ReplaceEagerly(Node* old, Node* node) {
|
||||
ReplaceUses(old, node, node);
|
||||
return Reducer::Changed(node);
|
||||
}
|
||||
|
||||
|
||||
// A helper class to simplify the process of reducing a single binop node with a
|
||||
// JSOperator. This class manages the rewriting of context, control, and effect
|
||||
// dependencies during lowering of a binop and contains numerous helper
|
||||
// functions for matching the types of inputs to an operation.
|
||||
class JSBinopReduction {
|
||||
public:
|
||||
JSBinopReduction(JSTypedLowering* lowering, Node* node)
|
||||
: lowering_(lowering),
|
||||
node_(node),
|
||||
left_type_(NodeProperties::GetBounds(node->InputAt(0)).upper),
|
||||
right_type_(NodeProperties::GetBounds(node->InputAt(1)).upper) {}
|
||||
|
||||
void ConvertInputsToNumber() {
|
||||
node_->ReplaceInput(0, ConvertToNumber(left()));
|
||||
node_->ReplaceInput(1, ConvertToNumber(right()));
|
||||
}
|
||||
|
||||
void ConvertInputsToInt32(bool left_signed, bool right_signed) {
|
||||
node_->ReplaceInput(0, ConvertToI32(left_signed, left()));
|
||||
node_->ReplaceInput(1, ConvertToI32(right_signed, right()));
|
||||
}
|
||||
|
||||
void ConvertInputsToString() {
|
||||
node_->ReplaceInput(0, ConvertToString(left()));
|
||||
node_->ReplaceInput(1, ConvertToString(right()));
|
||||
}
|
||||
|
||||
// Convert inputs for bitwise shift operation (ES5 spec 11.7).
|
||||
void ConvertInputsForShift(bool left_signed) {
|
||||
node_->ReplaceInput(0, ConvertToI32(left_signed, left()));
|
||||
Node* rnum = ConvertToI32(false, right());
|
||||
node_->ReplaceInput(1, graph()->NewNode(machine()->Word32And(), rnum,
|
||||
jsgraph()->Int32Constant(0x1F)));
|
||||
}
|
||||
|
||||
void SwapInputs() {
|
||||
Node* l = left();
|
||||
Node* r = right();
|
||||
node_->ReplaceInput(0, r);
|
||||
node_->ReplaceInput(1, l);
|
||||
std::swap(left_type_, right_type_);
|
||||
}
|
||||
|
||||
// Remove all effect and control inputs and outputs to this node and change
|
||||
// to the pure operator {op}, possibly inserting a boolean inversion.
|
||||
Reduction ChangeToPureOperator(Operator* op, bool invert = false) {
|
||||
ASSERT_EQ(0, OperatorProperties::GetEffectInputCount(op));
|
||||
ASSERT_EQ(false, OperatorProperties::HasContextInput(op));
|
||||
ASSERT_EQ(0, OperatorProperties::GetControlInputCount(op));
|
||||
ASSERT_EQ(2, OperatorProperties::GetValueInputCount(op));
|
||||
|
||||
// Remove the effects from the node, if any, and update its effect usages.
|
||||
if (OperatorProperties::GetEffectInputCount(node_->op()) > 0) {
|
||||
RelaxEffects(node_);
|
||||
}
|
||||
// Remove the inputs corresponding to context, effect, and control.
|
||||
NodeProperties::RemoveNonValueInputs(node_);
|
||||
// Finally, update the operator to the new one.
|
||||
node_->set_op(op);
|
||||
|
||||
if (invert) {
|
||||
// Insert an boolean not to invert the value.
|
||||
Node* value = graph()->NewNode(simplified()->BooleanNot(), node_);
|
||||
node_->ReplaceUses(value);
|
||||
// Note: ReplaceUses() smashes all uses, so smash it back here.
|
||||
value->ReplaceInput(0, node_);
|
||||
return lowering_->ReplaceWith(value);
|
||||
}
|
||||
return lowering_->Changed(node_);
|
||||
}
|
||||
|
||||
bool OneInputIs(Type* t) { return left_type_->Is(t) || right_type_->Is(t); }
|
||||
|
||||
bool BothInputsAre(Type* t) {
|
||||
return left_type_->Is(t) && right_type_->Is(t);
|
||||
}
|
||||
|
||||
bool OneInputCannotBe(Type* t) {
|
||||
return !left_type_->Maybe(t) || !right_type_->Maybe(t);
|
||||
}
|
||||
|
||||
bool NeitherInputCanBe(Type* t) {
|
||||
return !left_type_->Maybe(t) && !right_type_->Maybe(t);
|
||||
}
|
||||
|
||||
Node* effect() { return NodeProperties::GetEffectInput(node_); }
|
||||
Node* control() { return NodeProperties::GetControlInput(node_); }
|
||||
Node* context() { return NodeProperties::GetContextInput(node_); }
|
||||
Node* left() { return NodeProperties::GetValueInput(node_, 0); }
|
||||
Node* right() { return NodeProperties::GetValueInput(node_, 1); }
|
||||
Type* left_type() { return left_type_; }
|
||||
Type* right_type() { return right_type_; }
|
||||
|
||||
SimplifiedOperatorBuilder* simplified() { return lowering_->simplified(); }
|
||||
Graph* graph() { return lowering_->graph(); }
|
||||
JSGraph* jsgraph() { return lowering_->jsgraph(); }
|
||||
JSOperatorBuilder* javascript() { return lowering_->javascript(); }
|
||||
MachineOperatorBuilder* machine() { return lowering_->machine(); }
|
||||
|
||||
private:
|
||||
JSTypedLowering* lowering_; // The containing lowering instance.
|
||||
Node* node_; // The original node.
|
||||
Type* left_type_; // Cache of the left input's type.
|
||||
Type* right_type_; // Cache of the right input's type.
|
||||
|
||||
Node* ConvertToString(Node* node) {
|
||||
// Avoid introducing too many eager ToString() operations.
|
||||
Reduction reduced = lowering_->ReduceJSToStringInput(node);
|
||||
if (reduced.Changed()) return reduced.replacement();
|
||||
Node* n = graph()->NewNode(javascript()->ToString(), node, context(),
|
||||
effect(), control());
|
||||
update_effect(n);
|
||||
return n;
|
||||
}
|
||||
|
||||
Node* ConvertToNumber(Node* node) {
|
||||
// Avoid introducing too many eager ToNumber() operations.
|
||||
Reduction reduced = lowering_->ReduceJSToNumberInput(node);
|
||||
if (reduced.Changed()) return reduced.replacement();
|
||||
Node* n = graph()->NewNode(javascript()->ToNumber(), node, context(),
|
||||
effect(), control());
|
||||
update_effect(n);
|
||||
return n;
|
||||
}
|
||||
|
||||
// Try to narrowing a double or number operation to an Int32 operation.
|
||||
bool TryNarrowingToI32(Type* type, Node* node) {
|
||||
switch (node->opcode()) {
|
||||
case IrOpcode::kFloat64Add:
|
||||
case IrOpcode::kNumberAdd: {
|
||||
JSBinopReduction r(lowering_, node);
|
||||
if (r.BothInputsAre(Type::Integral32())) {
|
||||
node->set_op(lowering_->machine()->Int32Add());
|
||||
// TODO(titzer): narrow bounds instead of overwriting.
|
||||
NodeProperties::SetBounds(node, Bounds(type));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
case IrOpcode::kFloat64Sub:
|
||||
case IrOpcode::kNumberSubtract: {
|
||||
JSBinopReduction r(lowering_, node);
|
||||
if (r.BothInputsAre(Type::Integral32())) {
|
||||
node->set_op(lowering_->machine()->Int32Sub());
|
||||
// TODO(titzer): narrow bounds instead of overwriting.
|
||||
NodeProperties::SetBounds(node, Bounds(type));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
Node* ConvertToI32(bool is_signed, Node* node) {
|
||||
Type* type = is_signed ? Type::Signed32() : Type::Unsigned32();
|
||||
if (node->OwnedBy(node_)) {
|
||||
// If this node {node_} has the only edge to {node}, then try narrowing
|
||||
// its operation to an Int32 add or subtract.
|
||||
if (TryNarrowingToI32(type, node)) return node;
|
||||
} else {
|
||||
// Otherwise, {node} has multiple uses. Leave it as is and let the
|
||||
// further lowering passes deal with it, which use a full backwards
|
||||
// fixpoint.
|
||||
}
|
||||
|
||||
// Avoid introducing too many eager NumberToXXnt32() operations.
|
||||
node = ConvertToNumber(node);
|
||||
Type* input_type = NodeProperties::GetBounds(node).upper;
|
||||
|
||||
if (input_type->Is(type)) return node; // already in the value range.
|
||||
|
||||
Operator* op = is_signed ? simplified()->NumberToInt32()
|
||||
: simplified()->NumberToUint32();
|
||||
Node* n = graph()->NewNode(op, node);
|
||||
return n;
|
||||
}
|
||||
|
||||
void update_effect(Node* effect) {
|
||||
NodeProperties::ReplaceEffectInput(node_, effect);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
|
||||
JSBinopReduction r(this, node);
|
||||
if (r.OneInputIs(Type::String())) {
|
||||
r.ConvertInputsToString();
|
||||
return r.ChangeToPureOperator(simplified()->StringAdd());
|
||||
} else if (r.NeitherInputCanBe(Type::String())) {
|
||||
r.ConvertInputsToNumber();
|
||||
return r.ChangeToPureOperator(simplified()->NumberAdd());
|
||||
}
|
||||
return NoChange();
|
||||
}
|
||||
|
||||
|
||||
Reduction JSTypedLowering::ReduceNumberBinop(Node* node, Operator* numberOp) {
|
||||
JSBinopReduction r(this, node);
|
||||
if (r.OneInputIs(Type::Primitive())) {
|
||||
// If at least one input is a primitive, then insert appropriate conversions
|
||||
// to number and reduce this operator to the given numeric one.
|
||||
// TODO(turbofan): make this heuristic configurable for code size.
|
||||
r.ConvertInputsToNumber();
|
||||
return r.ChangeToPureOperator(numberOp);
|
||||
}
|
||||
// TODO(turbofan): relax/remove the effects of this operator in other cases.
|
||||
return NoChange();
|
||||
}
|
||||
|
||||
|
||||
Reduction JSTypedLowering::ReduceI32Binop(Node* node, bool left_signed,
|
||||
bool right_signed, Operator* intOp) {
|
||||
JSBinopReduction r(this, node);
|
||||
// TODO(titzer): some Smi bitwise operations don't really require going
|
||||
// all the way to int32, which can save tagging/untagging for some operations
|
||||
// on some platforms.
|
||||
// TODO(turbofan): make this heuristic configurable for code size.
|
||||
r.ConvertInputsToInt32(left_signed, right_signed);
|
||||
return r.ChangeToPureOperator(intOp);
|
||||
}
|
||||
|
||||
|
||||
Reduction JSTypedLowering::ReduceI32Shift(Node* node, bool left_signed,
|
||||
Operator* shift_op) {
|
||||
JSBinopReduction r(this, node);
|
||||
r.ConvertInputsForShift(left_signed);
|
||||
return r.ChangeToPureOperator(shift_op);
|
||||
}
|
||||
|
||||
|
||||
Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
|
||||
JSBinopReduction r(this, node);
|
||||
if (r.BothInputsAre(Type::String())) {
|
||||
// If both inputs are definitely strings, perform a string comparison.
|
||||
Operator* stringOp;
|
||||
switch (node->opcode()) {
|
||||
case IrOpcode::kJSLessThan:
|
||||
stringOp = simplified()->StringLessThan();
|
||||
break;
|
||||
case IrOpcode::kJSGreaterThan:
|
||||
stringOp = simplified()->StringLessThan();
|
||||
r.SwapInputs(); // a > b => b < a
|
||||
break;
|
||||
case IrOpcode::kJSLessThanOrEqual:
|
||||
stringOp = simplified()->StringLessThanOrEqual();
|
||||
break;
|
||||
case IrOpcode::kJSGreaterThanOrEqual:
|
||||
stringOp = simplified()->StringLessThanOrEqual();
|
||||
r.SwapInputs(); // a >= b => b <= a
|
||||
break;
|
||||
default:
|
||||
return NoChange();
|
||||
}
|
||||
return r.ChangeToPureOperator(stringOp);
|
||||
} else if (r.OneInputCannotBe(Type::String())) {
|
||||
// If one input cannot be a string, then emit a number comparison.
|
||||
Operator* less_than;
|
||||
Operator* less_than_or_equal;
|
||||
if (r.BothInputsAre(Type::Unsigned32())) {
|
||||
less_than = machine()->Uint32LessThan();
|
||||
less_than_or_equal = machine()->Uint32LessThanOrEqual();
|
||||
} else if (r.BothInputsAre(Type::Signed32())) {
|
||||
less_than = machine()->Int32LessThan();
|
||||
less_than_or_equal = machine()->Int32LessThanOrEqual();
|
||||
} else {
|
||||
// TODO(turbofan): mixed signed/unsigned int32 comparisons.
|
||||
r.ConvertInputsToNumber();
|
||||
less_than = simplified()->NumberLessThan();
|
||||
less_than_or_equal = simplified()->NumberLessThanOrEqual();
|
||||
}
|
||||
Operator* comparison;
|
||||
switch (node->opcode()) {
|
||||
case IrOpcode::kJSLessThan:
|
||||
comparison = less_than;
|
||||
break;
|
||||
case IrOpcode::kJSGreaterThan:
|
||||
comparison = less_than;
|
||||
r.SwapInputs(); // a > b => b < a
|
||||
break;
|
||||
case IrOpcode::kJSLessThanOrEqual:
|
||||
comparison = less_than_or_equal;
|
||||
break;
|
||||
case IrOpcode::kJSGreaterThanOrEqual:
|
||||
comparison = less_than_or_equal;
|
||||
r.SwapInputs(); // a >= b => b <= a
|
||||
break;
|
||||
default:
|
||||
return NoChange();
|
||||
}
|
||||
return r.ChangeToPureOperator(comparison);
|
||||
}
|
||||
// TODO(turbofan): relax/remove effects of this operator in other cases.
|
||||
return NoChange(); // Keep a generic comparison.
|
||||
}
|
||||
|
||||
|
||||
Reduction JSTypedLowering::ReduceJSEqual(Node* node, bool invert) {
|
||||
JSBinopReduction r(this, node);
|
||||
|
||||
if (r.BothInputsAre(Type::Number())) {
|
||||
return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
|
||||
}
|
||||
if (r.BothInputsAre(Type::String())) {
|
||||
return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
|
||||
}
|
||||
if (r.BothInputsAre(Type::Receiver())) {
|
||||
return r.ChangeToPureOperator(
|
||||
simplified()->ReferenceEqual(Type::Receiver()), invert);
|
||||
}
|
||||
// TODO(turbofan): js-typed-lowering of Equal(undefined)
|
||||
// TODO(turbofan): js-typed-lowering of Equal(null)
|
||||
// TODO(turbofan): js-typed-lowering of Equal(boolean)
|
||||
return NoChange();
|
||||
}
|
||||
|
||||
|
||||
Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node, bool invert) {
|
||||
JSBinopReduction r(this, node);
|
||||
if (r.left() == r.right()) {
|
||||
// x === x is always true if x != NaN
|
||||
if (!r.left_type()->Maybe(Type::NaN())) {
|
||||
return ReplaceEagerly(node, invert ? jsgraph()->FalseConstant()
|
||||
: jsgraph()->TrueConstant());
|
||||
}
|
||||
}
|
||||
if (!r.left_type()->Maybe(r.right_type())) {
|
||||
// Type intersection is empty; === is always false unless both
|
||||
// inputs could be strings (one internalized and one not).
|
||||
if (r.OneInputCannotBe(Type::String())) {
|
||||
return ReplaceEagerly(node, invert ? jsgraph()->TrueConstant()
|
||||
: jsgraph()->FalseConstant());
|
||||
}
|
||||
}
|
||||
if (r.OneInputIs(Type::Undefined())) {
|
||||
return r.ChangeToPureOperator(
|
||||
simplified()->ReferenceEqual(Type::Undefined()), invert);
|
||||
}
|
||||
if (r.OneInputIs(Type::Null())) {
|
||||
return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Null()),
|
||||
invert);
|
||||
}
|
||||
if (r.OneInputIs(Type::Boolean())) {
|
||||
return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Boolean()),
|
||||
invert);
|
||||
}
|
||||
if (r.OneInputIs(Type::Object())) {
|
||||
return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Object()),
|
||||
invert);
|
||||
}
|
||||
if (r.OneInputIs(Type::Receiver())) {
|
||||
return r.ChangeToPureOperator(
|
||||
simplified()->ReferenceEqual(Type::Receiver()), invert);
|
||||
}
|
||||
if (r.BothInputsAre(Type::String())) {
|
||||
return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
|
||||
}
|
||||
if (r.BothInputsAre(Type::Number())) {
|
||||
return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
|
||||
}
|
||||
// TODO(turbofan): js-typed-lowering of StrictEqual(mixed types)
|
||||
return NoChange();
|
||||
}
|
||||
|
||||
|
||||
Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
|
||||
if (input->opcode() == IrOpcode::kJSToNumber) {
|
||||
// Recursively try to reduce the input first.
|
||||
Reduction result = ReduceJSToNumberInput(input->InputAt(0));
|
||||
if (result.Changed()) {
|
||||
RelaxEffects(input);
|
||||
return result;
|
||||
}
|
||||
return Changed(input); // JSToNumber(JSToNumber(x)) => JSToNumber(x)
|
||||
}
|
||||
Type* input_type = NodeProperties::GetBounds(input).upper;
|
||||
if (input_type->Is(Type::Number())) {
|
||||
// JSToNumber(number) => x
|
||||
return Changed(input);
|
||||
}
|
||||
if (input_type->Is(Type::Undefined())) {
|
||||
// JSToNumber(undefined) => #NaN
|
||||
return ReplaceWith(jsgraph()->NaNConstant());
|
||||
}
|
||||
if (input_type->Is(Type::Null())) {
|
||||
// JSToNumber(null) => #0
|
||||
return ReplaceWith(jsgraph()->ZeroConstant());
|
||||
}
|
||||
// TODO(turbofan): js-typed-lowering of ToNumber(boolean)
|
||||
// TODO(turbofan): js-typed-lowering of ToNumber(string)
|
||||
return NoChange();
|
||||
}
|
||||
|
||||
|
||||
Reduction JSTypedLowering::ReduceJSToStringInput(Node* input) {
|
||||
if (input->opcode() == IrOpcode::kJSToString) {
|
||||
// Recursively try to reduce the input first.
|
||||
Reduction result = ReduceJSToStringInput(input->InputAt(0));
|
||||
if (result.Changed()) {
|
||||
RelaxEffects(input);
|
||||
return result;
|
||||
}
|
||||
return Changed(input); // JSToString(JSToString(x)) => JSToString(x)
|
||||
}
|
||||
Type* input_type = NodeProperties::GetBounds(input).upper;
|
||||
if (input_type->Is(Type::String())) {
|
||||
return Changed(input); // JSToString(string) => x
|
||||
}
|
||||
if (input_type->Is(Type::Undefined())) {
|
||||
return ReplaceWith(jsgraph()->HeapConstant(
|
||||
graph()->zone()->isolate()->factory()->undefined_string()));
|
||||
}
|
||||
if (input_type->Is(Type::Null())) {
|
||||
return ReplaceWith(jsgraph()->HeapConstant(
|
||||
graph()->zone()->isolate()->factory()->null_string()));
|
||||
}
|
||||
// TODO(turbofan): js-typed-lowering of ToString(boolean)
|
||||
// TODO(turbofan): js-typed-lowering of ToString(number)
|
||||
return NoChange();
|
||||
}
|
||||
|
||||
|
||||
Reduction JSTypedLowering::ReduceJSToBooleanInput(Node* input) {
|
||||
if (input->opcode() == IrOpcode::kJSToBoolean) {
|
||||
// Recursively try to reduce the input first.
|
||||
Reduction result = ReduceJSToBooleanInput(input->InputAt(0));
|
||||
if (result.Changed()) {
|
||||
RelaxEffects(input);
|
||||
return result;
|
||||
}
|
||||
return Changed(input); // JSToBoolean(JSToBoolean(x)) => JSToBoolean(x)
|
||||
}
|
||||
Type* input_type = NodeProperties::GetBounds(input).upper;
|
||||
if (input_type->Is(Type::Boolean())) {
|
||||
return Changed(input); // JSToBoolean(boolean) => x
|
||||
}
|
||||
if (input_type->Is(Type::Undefined())) {
|
||||
// JSToBoolean(undefined) => #false
|
||||
return ReplaceWith(jsgraph()->FalseConstant());
|
||||
}
|
||||
if (input_type->Is(Type::Null())) {
|
||||
// JSToBoolean(null) => #false
|
||||
return ReplaceWith(jsgraph()->FalseConstant());
|
||||
}
|
||||
if (input_type->Is(Type::DetectableReceiver())) {
|
||||
// JSToBoolean(detectable) => #true
|
||||
return ReplaceWith(jsgraph()->TrueConstant());
|
||||
}
|
||||
if (input_type->Is(Type::Undetectable())) {
|
||||
// JSToBoolean(undetectable) => #false
|
||||
return ReplaceWith(jsgraph()->FalseConstant());
|
||||
}
|
||||
if (input_type->Is(Type::Number())) {
|
||||
// JSToBoolean(number) => BooleanNot(NumberEqual(x, #0))
|
||||
Node* cmp = graph()->NewNode(simplified()->NumberEqual(), input,
|
||||
jsgraph()->ZeroConstant());
|
||||
Node* inv = graph()->NewNode(simplified()->BooleanNot(), cmp);
|
||||
ReplaceEagerly(input, inv);
|
||||
// TODO(titzer): Ugly. ReplaceEagerly smashes all uses. Smash it back here.
|
||||
cmp->ReplaceInput(0, input);
|
||||
return Changed(inv);
|
||||
}
|
||||
// TODO(turbofan): js-typed-lowering of ToBoolean(string)
|
||||
return NoChange();
|
||||
}
|
||||
|
||||
|
||||
static Reduction ReplaceWithReduction(Node* node, Reduction reduction) {
|
||||
if (reduction.Changed()) {
|
||||
ReplaceUses(node, reduction.replacement(), NULL);
|
||||
return reduction;
|
||||
}
|
||||
return Reducer::NoChange();
|
||||
}
|
||||
|
||||
|
||||
Reduction JSTypedLowering::Reduce(Node* node) {
|
||||
switch (node->opcode()) {
|
||||
case IrOpcode::kJSEqual:
|
||||
return ReduceJSEqual(node, false);
|
||||
case IrOpcode::kJSNotEqual:
|
||||
return ReduceJSEqual(node, true);
|
||||
case IrOpcode::kJSStrictEqual:
|
||||
return ReduceJSStrictEqual(node, false);
|
||||
case IrOpcode::kJSStrictNotEqual:
|
||||
return ReduceJSStrictEqual(node, true);
|
||||
case IrOpcode::kJSLessThan: // fall through
|
||||
case IrOpcode::kJSGreaterThan: // fall through
|
||||
case IrOpcode::kJSLessThanOrEqual: // fall through
|
||||
case IrOpcode::kJSGreaterThanOrEqual:
|
||||
return ReduceJSComparison(node);
|
||||
case IrOpcode::kJSBitwiseOr:
|
||||
return ReduceI32Binop(node, true, true, machine()->Word32Or());
|
||||
case IrOpcode::kJSBitwiseXor:
|
||||
return ReduceI32Binop(node, true, true, machine()->Word32Xor());
|
||||
case IrOpcode::kJSBitwiseAnd:
|
||||
return ReduceI32Binop(node, true, true, machine()->Word32And());
|
||||
case IrOpcode::kJSShiftLeft:
|
||||
return ReduceI32Shift(node, true, machine()->Word32Shl());
|
||||
case IrOpcode::kJSShiftRight:
|
||||
return ReduceI32Shift(node, true, machine()->Word32Sar());
|
||||
case IrOpcode::kJSShiftRightLogical:
|
||||
return ReduceI32Shift(node, false, machine()->Word32Shr());
|
||||
case IrOpcode::kJSAdd:
|
||||
return ReduceJSAdd(node);
|
||||
case IrOpcode::kJSSubtract:
|
||||
return ReduceNumberBinop(node, simplified()->NumberSubtract());
|
||||
case IrOpcode::kJSMultiply:
|
||||
return ReduceNumberBinop(node, simplified()->NumberMultiply());
|
||||
case IrOpcode::kJSDivide:
|
||||
return ReduceNumberBinop(node, simplified()->NumberDivide());
|
||||
case IrOpcode::kJSModulus:
|
||||
return ReduceNumberBinop(node, simplified()->NumberModulus());
|
||||
case IrOpcode::kJSUnaryNot: {
|
||||
Reduction result = ReduceJSToBooleanInput(node->InputAt(0));
|
||||
Node* value;
|
||||
if (result.Changed()) {
|
||||
// !x => BooleanNot(x)
|
||||
value =
|
||||
graph()->NewNode(simplified()->BooleanNot(), result.replacement());
|
||||
ReplaceUses(node, value, NULL);
|
||||
return Changed(value);
|
||||
} else {
|
||||
// !x => BooleanNot(JSToBoolean(x))
|
||||
value = graph()->NewNode(simplified()->BooleanNot(), node);
|
||||
node->set_op(javascript()->ToBoolean());
|
||||
ReplaceUses(node, value, node);
|
||||
// Note: ReplaceUses() smashes all uses, so smash it back here.
|
||||
value->ReplaceInput(0, node);
|
||||
return ReplaceWith(value);
|
||||
}
|
||||
}
|
||||
case IrOpcode::kJSToBoolean:
|
||||
return ReplaceWithReduction(node,
|
||||
ReduceJSToBooleanInput(node->InputAt(0)));
|
||||
case IrOpcode::kJSToNumber:
|
||||
return ReplaceWithReduction(node,
|
||||
ReduceJSToNumberInput(node->InputAt(0)));
|
||||
case IrOpcode::kJSToString:
|
||||
return ReplaceWithReduction(node,
|
||||
ReduceJSToStringInput(node->InputAt(0)));
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return NoChange();
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
69
src/compiler/js-typed-lowering.h
Normal file
69
src/compiler/js-typed-lowering.h
Normal file
@ -0,0 +1,69 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_OPERATOR_REDUCERS_H_
|
||||
#define V8_COMPILER_OPERATOR_REDUCERS_H_
|
||||
|
||||
#include "src/compiler/graph-reducer.h"
|
||||
#include "src/compiler/js-graph.h"
|
||||
#include "src/compiler/lowering-builder.h"
|
||||
#include "src/compiler/machine-operator.h"
|
||||
#include "src/compiler/node.h"
|
||||
#include "src/compiler/simplified-operator.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
class JSBinopReduction;
|
||||
|
||||
// Lowers JS-level operators to simplified operators based on types.
|
||||
class JSTypedLowering : public LoweringBuilder {
|
||||
public:
|
||||
explicit JSTypedLowering(JSGraph* jsgraph,
|
||||
SourcePositionTable* source_positions)
|
||||
: LoweringBuilder(jsgraph->graph(), source_positions),
|
||||
jsgraph_(jsgraph),
|
||||
simplified_(jsgraph->zone()),
|
||||
machine_(jsgraph->zone()) {}
|
||||
virtual ~JSTypedLowering() {}
|
||||
|
||||
Reduction Reduce(Node* node);
|
||||
virtual void Lower(Node* node) { Reduce(node); }
|
||||
|
||||
JSGraph* jsgraph() { return jsgraph_; }
|
||||
Graph* graph() { return jsgraph_->graph(); }
|
||||
|
||||
private:
|
||||
friend class JSBinopReduction;
|
||||
JSGraph* jsgraph_;
|
||||
SimplifiedOperatorBuilder simplified_;
|
||||
MachineOperatorBuilder machine_;
|
||||
|
||||
Reduction ReplaceEagerly(Node* old, Node* node);
|
||||
Reduction NoChange() { return Reducer::NoChange(); }
|
||||
Reduction ReplaceWith(Node* node) { return Reducer::Replace(node); }
|
||||
Reduction Changed(Node* node) { return Reducer::Changed(node); }
|
||||
Reduction ReduceJSAdd(Node* node);
|
||||
Reduction ReduceJSComparison(Node* node);
|
||||
Reduction ReduceJSEqual(Node* node, bool invert);
|
||||
Reduction ReduceJSStrictEqual(Node* node, bool invert);
|
||||
Reduction ReduceJSToNumberInput(Node* input);
|
||||
Reduction ReduceJSToStringInput(Node* input);
|
||||
Reduction ReduceJSToBooleanInput(Node* input);
|
||||
Reduction ReduceNumberBinop(Node* node, Operator* numberOp);
|
||||
Reduction ReduceI32Binop(Node* node, bool left_signed, bool right_signed,
|
||||
Operator* intOp);
|
||||
Reduction ReduceI32Shift(Node* node, bool left_signed, Operator* shift_op);
|
||||
|
||||
JSOperatorBuilder* javascript() { return jsgraph_->javascript(); }
|
||||
CommonOperatorBuilder* common() { return jsgraph_->common(); }
|
||||
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
|
||||
MachineOperatorBuilder* machine() { return &machine_; }
|
||||
};
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
||||
|
||||
#endif // V8_COMPILER_OPERATOR_REDUCERS_H_
|
206
src/compiler/linkage-impl.h
Normal file
206
src/compiler/linkage-impl.h
Normal file
@ -0,0 +1,206 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_LINKAGE_IMPL_H_
|
||||
#define V8_COMPILER_LINKAGE_IMPL_H_
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
class LinkageHelper {
|
||||
public:
|
||||
static LinkageLocation TaggedStackSlot(int index) {
|
||||
ASSERT(index < 0);
|
||||
return LinkageLocation(kMachineTagged, index);
|
||||
}
|
||||
|
||||
static LinkageLocation TaggedRegisterLocation(Register reg) {
|
||||
return LinkageLocation(kMachineTagged, Register::ToAllocationIndex(reg));
|
||||
}
|
||||
|
||||
static inline LinkageLocation WordRegisterLocation(Register reg) {
|
||||
return LinkageLocation(MachineOperatorBuilder::pointer_rep(),
|
||||
Register::ToAllocationIndex(reg));
|
||||
}
|
||||
|
||||
static LinkageLocation UnconstrainedRegister(MachineRepresentation rep) {
|
||||
return LinkageLocation(rep, LinkageLocation::ANY_REGISTER);
|
||||
}
|
||||
|
||||
static const RegList kNoCalleeSaved = 0;
|
||||
|
||||
// TODO(turbofan): cache call descriptors for JSFunction calls.
|
||||
template <typename LinkageTraits>
|
||||
static CallDescriptor* GetJSCallDescriptor(Zone* zone, int parameter_count) {
|
||||
const int jsfunction_count = 1;
|
||||
const int context_count = 1;
|
||||
int input_count = jsfunction_count + parameter_count + context_count;
|
||||
|
||||
const int return_count = 1;
|
||||
LinkageLocation* locations =
|
||||
zone->NewArray<LinkageLocation>(return_count + input_count);
|
||||
|
||||
int index = 0;
|
||||
locations[index++] =
|
||||
TaggedRegisterLocation(LinkageTraits::ReturnValueReg());
|
||||
locations[index++] =
|
||||
TaggedRegisterLocation(LinkageTraits::JSCallFunctionReg());
|
||||
|
||||
for (int i = 0; i < parameter_count; i++) {
|
||||
// All parameters to JS calls go on the stack.
|
||||
int spill_slot_index = i - parameter_count;
|
||||
locations[index++] = TaggedStackSlot(spill_slot_index);
|
||||
}
|
||||
locations[index++] = TaggedRegisterLocation(LinkageTraits::ContextReg());
|
||||
|
||||
// TODO(titzer): refactor TurboFan graph to consider context a value input.
|
||||
return new (zone)
|
||||
CallDescriptor(CallDescriptor::kCallJSFunction, // kind
|
||||
return_count, // return_count
|
||||
parameter_count, // parameter_count
|
||||
input_count - context_count, // input_count
|
||||
locations, // locations
|
||||
Operator::kNoProperties, // properties
|
||||
kNoCalleeSaved, // callee-saved registers
|
||||
CallDescriptor::kCanDeoptimize); // deoptimization
|
||||
}
|
||||
|
||||
|
||||
// TODO(turbofan): cache call descriptors for runtime calls.
|
||||
template <typename LinkageTraits>
|
||||
static CallDescriptor* GetRuntimeCallDescriptor(
|
||||
Zone* zone, Runtime::FunctionId function_id, int parameter_count,
|
||||
Operator::Property properties,
|
||||
CallDescriptor::DeoptimizationSupport can_deoptimize) {
|
||||
const int code_count = 1;
|
||||
const int function_count = 1;
|
||||
const int num_args_count = 1;
|
||||
const int context_count = 1;
|
||||
const int input_count = code_count + parameter_count + function_count +
|
||||
num_args_count + context_count;
|
||||
|
||||
const Runtime::Function* function = Runtime::FunctionForId(function_id);
|
||||
const int return_count = function->result_size;
|
||||
LinkageLocation* locations =
|
||||
zone->NewArray<LinkageLocation>(return_count + input_count);
|
||||
|
||||
int index = 0;
|
||||
if (return_count > 0) {
|
||||
locations[index++] =
|
||||
TaggedRegisterLocation(LinkageTraits::ReturnValueReg());
|
||||
}
|
||||
if (return_count > 1) {
|
||||
locations[index++] =
|
||||
TaggedRegisterLocation(LinkageTraits::ReturnValue2Reg());
|
||||
}
|
||||
|
||||
ASSERT_LE(return_count, 2);
|
||||
|
||||
locations[index++] = UnconstrainedRegister(kMachineTagged); // CEntryStub
|
||||
|
||||
for (int i = 0; i < parameter_count; i++) {
|
||||
// All parameters to runtime calls go on the stack.
|
||||
int spill_slot_index = i - parameter_count;
|
||||
locations[index++] = TaggedStackSlot(spill_slot_index);
|
||||
}
|
||||
locations[index++] =
|
||||
TaggedRegisterLocation(LinkageTraits::RuntimeCallFunctionReg());
|
||||
locations[index++] =
|
||||
WordRegisterLocation(LinkageTraits::RuntimeCallArgCountReg());
|
||||
locations[index++] = TaggedRegisterLocation(LinkageTraits::ContextReg());
|
||||
|
||||
// TODO(titzer): refactor TurboFan graph to consider context a value input.
|
||||
return new (zone) CallDescriptor(CallDescriptor::kCallCodeObject, // kind
|
||||
return_count, // return_count
|
||||
parameter_count, // parameter_count
|
||||
input_count, // input_count
|
||||
locations, // locations
|
||||
properties, // properties
|
||||
kNoCalleeSaved, // callee-saved registers
|
||||
can_deoptimize, // deoptimization
|
||||
function->name);
|
||||
}
|
||||
|
||||
|
||||
// TODO(turbofan): cache call descriptors for code stub calls.
|
||||
template <typename LinkageTraits>
|
||||
static CallDescriptor* GetStubCallDescriptor(
|
||||
Zone* zone, CodeStubInterfaceDescriptor* descriptor,
|
||||
int stack_parameter_count) {
|
||||
int register_parameter_count = descriptor->GetEnvironmentParameterCount();
|
||||
int parameter_count = register_parameter_count + stack_parameter_count;
|
||||
const int code_count = 1;
|
||||
const int context_count = 1;
|
||||
int input_count = code_count + parameter_count + context_count;
|
||||
|
||||
const int return_count = 1;
|
||||
LinkageLocation* locations =
|
||||
zone->NewArray<LinkageLocation>(return_count + input_count);
|
||||
|
||||
int index = 0;
|
||||
locations[index++] =
|
||||
TaggedRegisterLocation(LinkageTraits::ReturnValueReg());
|
||||
locations[index++] = UnconstrainedRegister(kMachineTagged); // code
|
||||
for (int i = 0; i < parameter_count; i++) {
|
||||
if (i < register_parameter_count) {
|
||||
// The first parameters to code stub calls go in registers.
|
||||
Register reg = descriptor->GetEnvironmentParameterRegister(i);
|
||||
locations[index++] = TaggedRegisterLocation(reg);
|
||||
} else {
|
||||
// The rest of the parameters go on the stack.
|
||||
int stack_slot = i - register_parameter_count - stack_parameter_count;
|
||||
locations[index++] = TaggedStackSlot(stack_slot);
|
||||
}
|
||||
}
|
||||
locations[index++] = TaggedRegisterLocation(LinkageTraits::ContextReg());
|
||||
|
||||
// TODO(titzer): refactor TurboFan graph to consider context a value input.
|
||||
return new (zone)
|
||||
CallDescriptor(CallDescriptor::kCallCodeObject, // kind
|
||||
return_count, // return_count
|
||||
parameter_count, // parameter_count
|
||||
input_count, // input_count
|
||||
locations, // locations
|
||||
Operator::kNoProperties, // properties
|
||||
kNoCalleeSaved, // callee-saved registers
|
||||
CallDescriptor::kCannotDeoptimize, // deoptimization
|
||||
CodeStub::MajorName(descriptor->MajorKey(), false));
|
||||
// TODO(jarin) should deoptimize!
|
||||
}
|
||||
|
||||
|
||||
template <typename LinkageTraits>
|
||||
static CallDescriptor* GetSimplifiedCDescriptor(
|
||||
Zone* zone, int num_params, MachineRepresentation return_type,
|
||||
const MachineRepresentation* param_types) {
|
||||
LinkageLocation* locations =
|
||||
zone->NewArray<LinkageLocation>(num_params + 2);
|
||||
int index = 0;
|
||||
locations[index++] =
|
||||
TaggedRegisterLocation(LinkageTraits::ReturnValueReg());
|
||||
locations[index++] = LinkageHelper::UnconstrainedRegister(
|
||||
MachineOperatorBuilder::pointer_rep());
|
||||
// TODO(dcarney): test with lots of parameters.
|
||||
int i = 0;
|
||||
for (; i < LinkageTraits::CRegisterParametersLength() && i < num_params;
|
||||
i++) {
|
||||
locations[index++] = LinkageLocation(
|
||||
param_types[i],
|
||||
Register::ToAllocationIndex(LinkageTraits::CRegisterParameter(i)));
|
||||
}
|
||||
for (; i < num_params; i++) {
|
||||
locations[index++] = LinkageLocation(param_types[i], -1 - i);
|
||||
}
|
||||
return new (zone) CallDescriptor(
|
||||
CallDescriptor::kCallAddress, 1, num_params, num_params + 1, locations,
|
||||
Operator::kNoProperties, LinkageTraits::CCalleeSaveRegisters(),
|
||||
CallDescriptor::kCannotDeoptimize); // TODO(jarin) should deoptimize!
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
||||
|
||||
#endif // V8_COMPILER_LINKAGE_IMPL_H_
|
140
src/compiler/linkage.cc
Normal file
140
src/compiler/linkage.cc
Normal file
@ -0,0 +1,140 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/compiler/linkage.h"
|
||||
|
||||
#include "src/code-stubs.h"
|
||||
#include "src/compiler.h"
|
||||
#include "src/compiler/node.h"
|
||||
#include "src/compiler/pipeline.h"
|
||||
#include "src/scopes.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
|
||||
OStream& operator<<(OStream& os, const CallDescriptor::Kind& k) {
|
||||
switch (k) {
|
||||
case CallDescriptor::kCallCodeObject:
|
||||
os << "Code";
|
||||
break;
|
||||
case CallDescriptor::kCallJSFunction:
|
||||
os << "JS";
|
||||
break;
|
||||
case CallDescriptor::kCallAddress:
|
||||
os << "Addr";
|
||||
break;
|
||||
}
|
||||
return os;
|
||||
}
|
||||
|
||||
|
||||
OStream& operator<<(OStream& os, const CallDescriptor& d) {
|
||||
// TODO(svenpanne) Output properties etc. and be less cryptic.
|
||||
return os << d.kind() << ":" << d.debug_name() << ":r" << d.ReturnCount()
|
||||
<< "p" << d.ParameterCount() << "i" << d.InputCount()
|
||||
<< (d.CanLazilyDeoptimize() ? "deopt" : "");
|
||||
}
|
||||
|
||||
|
||||
Linkage::Linkage(CompilationInfo* info) : info_(info) {
|
||||
if (info->function() != NULL) {
|
||||
// If we already have the function literal, use the number of parameters
|
||||
// plus the receiver.
|
||||
incoming_ = GetJSCallDescriptor(1 + info->function()->parameter_count());
|
||||
} else if (!info->closure().is_null()) {
|
||||
// If we are compiling a JS function, use a JS call descriptor,
|
||||
// plus the receiver.
|
||||
SharedFunctionInfo* shared = info->closure()->shared();
|
||||
incoming_ = GetJSCallDescriptor(1 + shared->formal_parameter_count());
|
||||
} else if (info->code_stub() != NULL) {
|
||||
// Use the code stub interface descriptor.
|
||||
HydrogenCodeStub* stub = info->code_stub();
|
||||
CodeStubInterfaceDescriptor* descriptor =
|
||||
info_->isolate()->code_stub_interface_descriptor(stub->MajorKey());
|
||||
incoming_ = GetStubCallDescriptor(descriptor);
|
||||
} else {
|
||||
incoming_ = NULL; // TODO(titzer): ?
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
FrameOffset Linkage::GetFrameOffset(int spill_slot, Frame* frame, int extra) {
|
||||
if (frame->GetSpillSlotCount() > 0 || incoming_->IsJSFunctionCall() ||
|
||||
incoming_->kind() == CallDescriptor::kCallAddress) {
|
||||
int offset;
|
||||
int register_save_area_size = frame->GetRegisterSaveAreaSize();
|
||||
if (spill_slot >= 0) {
|
||||
// Local or spill slot. Skip the frame pointer, function, and
|
||||
// context in the fixed part of the frame.
|
||||
offset =
|
||||
-(spill_slot + 1) * kPointerSize - register_save_area_size + extra;
|
||||
} else {
|
||||
// Incoming parameter. Skip the return address.
|
||||
offset = -(spill_slot + 1) * kPointerSize + kFPOnStackSize +
|
||||
kPCOnStackSize + extra;
|
||||
}
|
||||
return FrameOffset::FromFramePointer(offset);
|
||||
} else {
|
||||
// No frame. Retrieve all parameters relative to stack pointer.
|
||||
ASSERT(spill_slot < 0); // Must be a parameter.
|
||||
int register_save_area_size = frame->GetRegisterSaveAreaSize();
|
||||
int offset = register_save_area_size - (spill_slot + 1) * kPointerSize +
|
||||
kPCOnStackSize + extra;
|
||||
return FrameOffset::FromStackPointer(offset);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count) {
|
||||
return GetJSCallDescriptor(parameter_count, this->info_->zone());
|
||||
}
|
||||
|
||||
|
||||
CallDescriptor* Linkage::GetRuntimeCallDescriptor(
|
||||
Runtime::FunctionId function, int parameter_count,
|
||||
Operator::Property properties,
|
||||
CallDescriptor::DeoptimizationSupport can_deoptimize) {
|
||||
return GetRuntimeCallDescriptor(function, parameter_count, properties,
|
||||
can_deoptimize, this->info_->zone());
|
||||
}
|
||||
|
||||
|
||||
//==============================================================================
|
||||
// Provide unimplemented methods on unsupported architectures, to at least link.
|
||||
//==============================================================================
|
||||
#if !V8_TURBOFAN_TARGET
|
||||
CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
|
||||
UNIMPLEMENTED();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
CallDescriptor* Linkage::GetRuntimeCallDescriptor(
|
||||
Runtime::FunctionId function, int parameter_count,
|
||||
Operator::Property properties,
|
||||
CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
|
||||
UNIMPLEMENTED();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
CallDescriptor* Linkage::GetStubCallDescriptor(
|
||||
CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count) {
|
||||
UNIMPLEMENTED();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
CallDescriptor* Linkage::GetSimplifiedCDescriptor(
|
||||
Zone* zone, int num_params, MachineRepresentation return_type,
|
||||
MachineRepresentation* param_types) {
|
||||
UNIMPLEMENTED();
|
||||
return NULL;
|
||||
}
|
||||
#endif // !V8_TURBOFAN_TARGET
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
188
src/compiler/linkage.h
Normal file
188
src/compiler/linkage.h
Normal file
@ -0,0 +1,188 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_LINKAGE_H_
|
||||
#define V8_COMPILER_LINKAGE_H_
|
||||
|
||||
#include "src/v8.h"
|
||||
|
||||
#include "src/code-stubs.h"
|
||||
#include "src/compiler/frame.h"
|
||||
#include "src/compiler/machine-operator.h"
|
||||
#include "src/compiler/node.h"
|
||||
#include "src/compiler/operator.h"
|
||||
#include "src/zone.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
// Describes the location for a parameter or a return value to a call.
|
||||
// TODO(titzer): replace with Radium locations when they are ready.
|
||||
class LinkageLocation {
|
||||
public:
|
||||
LinkageLocation(MachineRepresentation rep, int location)
|
||||
: rep_(rep), location_(location) {}
|
||||
|
||||
inline MachineRepresentation representation() const { return rep_; }
|
||||
|
||||
static const int16_t ANY_REGISTER = 32767;
|
||||
|
||||
private:
|
||||
friend class CallDescriptor;
|
||||
friend class OperandGenerator;
|
||||
MachineRepresentation rep_;
|
||||
int16_t location_; // >= 0 implies register, otherwise stack slot.
|
||||
};
|
||||
|
||||
|
||||
class CallDescriptor : public ZoneObject {
|
||||
public:
|
||||
// Describes whether the first parameter is a code object, a JSFunction,
|
||||
// or an address--all of which require different machine sequences to call.
|
||||
enum Kind { kCallCodeObject, kCallJSFunction, kCallAddress };
|
||||
|
||||
enum DeoptimizationSupport { kCanDeoptimize, kCannotDeoptimize };
|
||||
|
||||
CallDescriptor(Kind kind, int8_t return_count, int16_t parameter_count,
|
||||
int16_t input_count, LinkageLocation* locations,
|
||||
Operator::Property properties, RegList callee_saved_registers,
|
||||
DeoptimizationSupport deoptimization_support,
|
||||
const char* debug_name = "")
|
||||
: kind_(kind),
|
||||
return_count_(return_count),
|
||||
parameter_count_(parameter_count),
|
||||
input_count_(input_count),
|
||||
locations_(locations),
|
||||
properties_(properties),
|
||||
callee_saved_registers_(callee_saved_registers),
|
||||
deoptimization_support_(deoptimization_support),
|
||||
debug_name_(debug_name) {}
|
||||
// Returns the kind of this call.
|
||||
Kind kind() const { return kind_; }
|
||||
|
||||
// Returns {true} if this descriptor is a call to a JSFunction.
|
||||
bool IsJSFunctionCall() const { return kind_ == kCallJSFunction; }
|
||||
|
||||
// The number of return values from this call, usually 0 or 1.
|
||||
int ReturnCount() const { return return_count_; }
|
||||
|
||||
// The number of JavaScript parameters to this call, including receiver,
|
||||
// but not the context.
|
||||
int ParameterCount() const { return parameter_count_; }
|
||||
|
||||
int InputCount() const { return input_count_; }
|
||||
|
||||
bool CanLazilyDeoptimize() const {
|
||||
return deoptimization_support_ == kCanDeoptimize;
|
||||
}
|
||||
|
||||
LinkageLocation GetReturnLocation(int index) {
|
||||
ASSERT(index < return_count_);
|
||||
return locations_[0 + index]; // return locations start at 0.
|
||||
}
|
||||
|
||||
LinkageLocation GetInputLocation(int index) {
|
||||
ASSERT(index < input_count_ + 1); // input_count + 1 is the context.
|
||||
return locations_[return_count_ + index]; // inputs start after returns.
|
||||
}
|
||||
|
||||
// Operator properties describe how this call can be optimized, if at all.
|
||||
Operator::Property properties() const { return properties_; }
|
||||
|
||||
// Get the callee-saved registers, if any, across this call.
|
||||
RegList CalleeSavedRegisters() { return callee_saved_registers_; }
|
||||
|
||||
const char* debug_name() const { return debug_name_; }
|
||||
|
||||
private:
|
||||
friend class Linkage;
|
||||
|
||||
Kind kind_;
|
||||
int8_t return_count_;
|
||||
int16_t parameter_count_;
|
||||
int16_t input_count_;
|
||||
LinkageLocation* locations_;
|
||||
Operator::Property properties_;
|
||||
RegList callee_saved_registers_;
|
||||
DeoptimizationSupport deoptimization_support_;
|
||||
const char* debug_name_;
|
||||
};
|
||||
|
||||
OStream& operator<<(OStream& os, const CallDescriptor& d);
|
||||
OStream& operator<<(OStream& os, const CallDescriptor::Kind& k);
|
||||
|
||||
// Defines the linkage for a compilation, including the calling conventions
|
||||
// for incoming parameters and return value(s) as well as the outgoing calling
|
||||
// convention for any kind of call. Linkage is generally architecture-specific.
|
||||
//
|
||||
// Can be used to translate {arg_index} (i.e. index of the call node input) as
|
||||
// well as {param_index} (i.e. as stored in parameter nodes) into an operator
|
||||
// representing the architecture-specific location. The following call node
|
||||
// layouts are supported (where {n} is the number value inputs):
|
||||
//
|
||||
// #0 #1 #2 #3 [...] #n
|
||||
// Call[CodeStub] code, arg 1, arg 2, arg 3, [...], context
|
||||
// Call[JSFunction] function, rcvr, arg 1, arg 2, [...], context
|
||||
// Call[Runtime] CEntryStub, arg 1, arg 2, arg 3, [...], fun, #arg, context
|
||||
class Linkage : public ZoneObject {
|
||||
public:
|
||||
explicit Linkage(CompilationInfo* info);
|
||||
explicit Linkage(CompilationInfo* info, CallDescriptor* incoming)
|
||||
: info_(info), incoming_(incoming) {}
|
||||
|
||||
// The call descriptor for this compilation unit describes the locations
|
||||
// of incoming parameters and the outgoing return value(s).
|
||||
CallDescriptor* GetIncomingDescriptor() { return incoming_; }
|
||||
CallDescriptor* GetJSCallDescriptor(int parameter_count);
|
||||
static CallDescriptor* GetJSCallDescriptor(int parameter_count, Zone* zone);
|
||||
CallDescriptor* GetRuntimeCallDescriptor(
|
||||
Runtime::FunctionId function, int parameter_count,
|
||||
Operator::Property properties,
|
||||
CallDescriptor::DeoptimizationSupport can_deoptimize =
|
||||
CallDescriptor::kCannotDeoptimize);
|
||||
static CallDescriptor* GetRuntimeCallDescriptor(
|
||||
Runtime::FunctionId function, int parameter_count,
|
||||
Operator::Property properties,
|
||||
CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone);
|
||||
|
||||
CallDescriptor* GetStubCallDescriptor(CodeStubInterfaceDescriptor* descriptor,
|
||||
int stack_parameter_count = 0);
|
||||
|
||||
// Creates a call descriptor for simplified C calls that is appropriate
|
||||
// for the host platform. This simplified calling convention only supports
|
||||
// integers and pointers of one word size each, i.e. no floating point,
|
||||
// structs, pointers to members, etc.
|
||||
static CallDescriptor* GetSimplifiedCDescriptor(
|
||||
Zone* zone, int num_params, MachineRepresentation return_type,
|
||||
const MachineRepresentation* param_types);
|
||||
|
||||
// Get the location of an (incoming) parameter to this function.
|
||||
LinkageLocation GetParameterLocation(int index) {
|
||||
return incoming_->GetInputLocation(index + 1);
|
||||
}
|
||||
|
||||
// Get the location where this function should place its return value.
|
||||
LinkageLocation GetReturnLocation() {
|
||||
return incoming_->GetReturnLocation(0);
|
||||
}
|
||||
|
||||
// Get the frame offset for a given spill slot. The location depends on the
|
||||
// calling convention and the specific frame layout, and may thus be
|
||||
// architecture-specific. Negative spill slots indicate arguments on the
|
||||
// caller's frame. The {extra} parameter indicates an additional offset from
|
||||
// the frame offset, e.g. to index into part of a double slot.
|
||||
FrameOffset GetFrameOffset(int spill_slot, Frame* frame, int extra = 0);
|
||||
|
||||
CompilationInfo* info() const { return info_; }
|
||||
|
||||
private:
|
||||
CompilationInfo* info_;
|
||||
CallDescriptor* incoming_;
|
||||
};
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
||||
|
||||
#endif // V8_COMPILER_LINKAGE_H_
|
41
src/compiler/lowering-builder.cc
Normal file
41
src/compiler/lowering-builder.cc
Normal file
@ -0,0 +1,41 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/compiler/graph-inl.h"
|
||||
#include "src/compiler/lowering-builder.h"
|
||||
#include "src/compiler/node-aux-data-inl.h"
|
||||
#include "src/compiler/node-properties-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
class LoweringBuilder::NodeVisitor : public NullNodeVisitor {
|
||||
public:
|
||||
explicit NodeVisitor(LoweringBuilder* lowering) : lowering_(lowering) {}
|
||||
|
||||
GenericGraphVisit::Control Post(Node* node) {
|
||||
SourcePositionTable::Scope pos(lowering_->source_positions_, node);
|
||||
lowering_->Lower(node);
|
||||
return GenericGraphVisit::CONTINUE;
|
||||
}
|
||||
|
||||
private:
|
||||
LoweringBuilder* lowering_;
|
||||
};
|
||||
|
||||
|
||||
LoweringBuilder::LoweringBuilder(Graph* graph,
|
||||
SourcePositionTable* source_positions)
|
||||
: graph_(graph), source_positions_(source_positions) {}
|
||||
|
||||
|
||||
void LoweringBuilder::LowerAllNodes() {
|
||||
NodeVisitor visitor(this);
|
||||
graph()->VisitNodeInputsFromEnd(&visitor);
|
||||
}
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
38
src/compiler/lowering-builder.h
Normal file
38
src/compiler/lowering-builder.h
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_LOWERING_BUILDER_H_
|
||||
#define V8_COMPILER_LOWERING_BUILDER_H_
|
||||
|
||||
#include "src/v8.h"
|
||||
|
||||
#include "src/compiler/graph.h"
|
||||
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
// TODO(dcarney): rename this class.
|
||||
class LoweringBuilder {
|
||||
public:
|
||||
explicit LoweringBuilder(Graph* graph, SourcePositionTable* source_positions);
|
||||
virtual ~LoweringBuilder() {}
|
||||
|
||||
void LowerAllNodes();
|
||||
virtual void Lower(Node* node) = 0; // Exposed for testing.
|
||||
|
||||
Graph* graph() const { return graph_; }
|
||||
|
||||
private:
|
||||
class NodeVisitor;
|
||||
Graph* graph_;
|
||||
SourcePositionTable* source_positions_;
|
||||
};
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_COMPILER_LOWERING_BUILDER_H_
|
367
src/compiler/machine-node-factory.h
Normal file
367
src/compiler/machine-node-factory.h
Normal file
@ -0,0 +1,367 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_MACHINE_NODE_FACTORY_H_
|
||||
#define V8_COMPILER_MACHINE_NODE_FACTORY_H_
|
||||
|
||||
#ifdef USE_SIMULATOR
|
||||
#define MACHINE_ASSEMBLER_SUPPORTS_CALL_C 0
|
||||
#else
|
||||
#define MACHINE_ASSEMBLER_SUPPORTS_CALL_C 1
|
||||
#endif
|
||||
|
||||
#include "src/v8.h"
|
||||
|
||||
#include "src/compiler/machine-operator.h"
|
||||
#include "src/compiler/node.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
class MachineCallDescriptorBuilder : public ZoneObject {
|
||||
public:
|
||||
MachineCallDescriptorBuilder(MachineRepresentation return_type,
|
||||
int parameter_count,
|
||||
const MachineRepresentation* parameter_types)
|
||||
: return_type_(return_type),
|
||||
parameter_count_(parameter_count),
|
||||
parameter_types_(parameter_types) {}
|
||||
|
||||
int parameter_count() const { return parameter_count_; }
|
||||
const MachineRepresentation* parameter_types() const {
|
||||
return parameter_types_;
|
||||
}
|
||||
|
||||
CallDescriptor* BuildCallDescriptor(Zone* zone) {
|
||||
return Linkage::GetSimplifiedCDescriptor(zone, parameter_count_,
|
||||
return_type_, parameter_types_);
|
||||
}
|
||||
|
||||
private:
|
||||
const MachineRepresentation return_type_;
|
||||
const int parameter_count_;
|
||||
const MachineRepresentation* const parameter_types_;
|
||||
};
|
||||
|
||||
|
||||
#define ZONE() static_cast<NodeFactory*>(this)->zone()
|
||||
#define COMMON() static_cast<NodeFactory*>(this)->common()
|
||||
#define MACHINE() static_cast<NodeFactory*>(this)->machine()
|
||||
#define NEW_NODE_0(op) static_cast<NodeFactory*>(this)->NewNode(op)
|
||||
#define NEW_NODE_1(op, a) static_cast<NodeFactory*>(this)->NewNode(op, a)
|
||||
#define NEW_NODE_2(op, a, b) static_cast<NodeFactory*>(this)->NewNode(op, a, b)
|
||||
#define NEW_NODE_3(op, a, b, c) \
|
||||
static_cast<NodeFactory*>(this)->NewNode(op, a, b, c)
|
||||
|
||||
template <typename NodeFactory>
|
||||
class MachineNodeFactory {
|
||||
public:
|
||||
// Constants.
|
||||
Node* PointerConstant(void* value) {
|
||||
return IntPtrConstant(reinterpret_cast<intptr_t>(value));
|
||||
}
|
||||
Node* IntPtrConstant(intptr_t value) {
|
||||
// TODO(dcarney): mark generated code as unserializable if value != 0.
|
||||
return kPointerSize == 8 ? Int64Constant(value) : Int32Constant(value);
|
||||
}
|
||||
Node* Int32Constant(int32_t value) {
|
||||
return NEW_NODE_0(COMMON()->Int32Constant(value));
|
||||
}
|
||||
Node* Int64Constant(int64_t value) {
|
||||
return NEW_NODE_0(COMMON()->Int64Constant(value));
|
||||
}
|
||||
Node* NumberConstant(double value) {
|
||||
return NEW_NODE_0(COMMON()->NumberConstant(value));
|
||||
}
|
||||
Node* Float64Constant(double value) {
|
||||
return NEW_NODE_0(COMMON()->Float64Constant(value));
|
||||
}
|
||||
Node* HeapConstant(Handle<Object> object) {
|
||||
PrintableUnique<Object> val =
|
||||
PrintableUnique<Object>::CreateUninitialized(ZONE(), object);
|
||||
return NEW_NODE_0(COMMON()->HeapConstant(val));
|
||||
}
|
||||
|
||||
// Memory Operations.
|
||||
Node* Load(MachineRepresentation rep, Node* base) {
|
||||
return Load(rep, base, Int32Constant(0));
|
||||
}
|
||||
Node* Load(MachineRepresentation rep, Node* base, Node* index) {
|
||||
return NEW_NODE_2(MACHINE()->Load(rep), base, index);
|
||||
}
|
||||
void Store(MachineRepresentation rep, Node* base, Node* value) {
|
||||
Store(rep, base, Int32Constant(0), value);
|
||||
}
|
||||
void Store(MachineRepresentation rep, Node* base, Node* index, Node* value) {
|
||||
NEW_NODE_3(MACHINE()->Store(rep), base, index, value);
|
||||
}
|
||||
// Arithmetic Operations.
|
||||
Node* WordAnd(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->WordAnd(), a, b);
|
||||
}
|
||||
Node* WordOr(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->WordOr(), a, b);
|
||||
}
|
||||
Node* WordXor(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->WordXor(), a, b);
|
||||
}
|
||||
Node* WordShl(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->WordShl(), a, b);
|
||||
}
|
||||
Node* WordShr(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->WordShr(), a, b);
|
||||
}
|
||||
Node* WordSar(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->WordSar(), a, b);
|
||||
}
|
||||
Node* WordEqual(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->WordEqual(), a, b);
|
||||
}
|
||||
Node* WordNotEqual(Node* a, Node* b) {
|
||||
return WordBinaryNot(WordEqual(a, b));
|
||||
}
|
||||
Node* WordNot(Node* a) {
|
||||
if (MACHINE()->is32()) {
|
||||
return Word32Not(a);
|
||||
} else {
|
||||
return Word64Not(a);
|
||||
}
|
||||
}
|
||||
Node* WordBinaryNot(Node* a) {
|
||||
if (MACHINE()->is32()) {
|
||||
return Word32BinaryNot(a);
|
||||
} else {
|
||||
return Word64BinaryNot(a);
|
||||
}
|
||||
}
|
||||
|
||||
Node* Word32And(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Word32And(), a, b);
|
||||
}
|
||||
Node* Word32Or(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Word32Or(), a, b);
|
||||
}
|
||||
Node* Word32Xor(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Word32Xor(), a, b);
|
||||
}
|
||||
Node* Word32Shl(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Word32Shl(), a, b);
|
||||
}
|
||||
Node* Word32Shr(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Word32Shr(), a, b);
|
||||
}
|
||||
Node* Word32Sar(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Word32Sar(), a, b);
|
||||
}
|
||||
Node* Word32Equal(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Word32Equal(), a, b);
|
||||
}
|
||||
Node* Word32NotEqual(Node* a, Node* b) {
|
||||
return Word32BinaryNot(Word32Equal(a, b));
|
||||
}
|
||||
Node* Word32Not(Node* a) { return Word32Xor(a, Int32Constant(-1)); }
|
||||
Node* Word32BinaryNot(Node* a) { return Word32Equal(a, Int32Constant(0)); }
|
||||
|
||||
Node* Word64And(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Word64And(), a, b);
|
||||
}
|
||||
Node* Word64Or(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Word64Or(), a, b);
|
||||
}
|
||||
Node* Word64Xor(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Word64Xor(), a, b);
|
||||
}
|
||||
Node* Word64Shl(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Word64Shl(), a, b);
|
||||
}
|
||||
Node* Word64Shr(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Word64Shr(), a, b);
|
||||
}
|
||||
Node* Word64Sar(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Word64Sar(), a, b);
|
||||
}
|
||||
Node* Word64Equal(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Word64Equal(), a, b);
|
||||
}
|
||||
Node* Word64NotEqual(Node* a, Node* b) {
|
||||
return Word64BinaryNot(Word64Equal(a, b));
|
||||
}
|
||||
Node* Word64Not(Node* a) { return Word64Xor(a, Int64Constant(-1)); }
|
||||
Node* Word64BinaryNot(Node* a) { return Word64Equal(a, Int64Constant(0)); }
|
||||
|
||||
Node* Int32Add(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Int32Add(), a, b);
|
||||
}
|
||||
Node* Int32Sub(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Int32Sub(), a, b);
|
||||
}
|
||||
Node* Int32Mul(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Int32Mul(), a, b);
|
||||
}
|
||||
Node* Int32Div(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Int32Div(), a, b);
|
||||
}
|
||||
Node* Int32UDiv(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Int32UDiv(), a, b);
|
||||
}
|
||||
Node* Int32Mod(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Int32Mod(), a, b);
|
||||
}
|
||||
Node* Int32UMod(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Int32UMod(), a, b);
|
||||
}
|
||||
Node* Int32LessThan(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Int32LessThan(), a, b);
|
||||
}
|
||||
Node* Int32LessThanOrEqual(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Int32LessThanOrEqual(), a, b);
|
||||
}
|
||||
Node* Uint32LessThan(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Uint32LessThan(), a, b);
|
||||
}
|
||||
Node* Uint32LessThanOrEqual(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Uint32LessThanOrEqual(), a, b);
|
||||
}
|
||||
Node* Int32GreaterThan(Node* a, Node* b) { return Int32LessThan(b, a); }
|
||||
Node* Int32GreaterThanOrEqual(Node* a, Node* b) {
|
||||
return Int32LessThanOrEqual(b, a);
|
||||
}
|
||||
Node* Int32Neg(Node* a) { return Int32Sub(Int32Constant(0), a); }
|
||||
|
||||
Node* Int64Add(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Int64Add(), a, b);
|
||||
}
|
||||
Node* Int64Sub(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Int64Sub(), a, b);
|
||||
}
|
||||
Node* Int64Mul(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Int64Mul(), a, b);
|
||||
}
|
||||
Node* Int64Div(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Int64Div(), a, b);
|
||||
}
|
||||
Node* Int64UDiv(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Int64UDiv(), a, b);
|
||||
}
|
||||
Node* Int64Mod(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Int64Mod(), a, b);
|
||||
}
|
||||
Node* Int64UMod(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Int64UMod(), a, b);
|
||||
}
|
||||
Node* Int64Neg(Node* a) { return Int64Sub(Int64Constant(0), a); }
|
||||
Node* Int64LessThan(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Int64LessThan(), a, b);
|
||||
}
|
||||
Node* Int64LessThanOrEqual(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Int64LessThanOrEqual(), a, b);
|
||||
}
|
||||
Node* Int64GreaterThan(Node* a, Node* b) { return Int64LessThan(b, a); }
|
||||
Node* Int64GreaterThanOrEqual(Node* a, Node* b) {
|
||||
return Int64LessThanOrEqual(b, a);
|
||||
}
|
||||
|
||||
Node* ConvertIntPtrToInt32(Node* a) {
|
||||
return kPointerSize == 8 ? NEW_NODE_1(MACHINE()->ConvertInt64ToInt32(), a)
|
||||
: a;
|
||||
}
|
||||
Node* ConvertInt32ToIntPtr(Node* a) {
|
||||
return kPointerSize == 8 ? NEW_NODE_1(MACHINE()->ConvertInt32ToInt64(), a)
|
||||
: a;
|
||||
}
|
||||
|
||||
#define INTPTR_BINOP(prefix, name) \
|
||||
Node* IntPtr##name(Node* a, Node* b) { \
|
||||
return kPointerSize == 8 ? prefix##64##name(a, b) \
|
||||
: prefix##32##name(a, b); \
|
||||
}
|
||||
|
||||
INTPTR_BINOP(Int, Add);
|
||||
INTPTR_BINOP(Int, Sub);
|
||||
INTPTR_BINOP(Int, LessThan);
|
||||
INTPTR_BINOP(Int, LessThanOrEqual);
|
||||
INTPTR_BINOP(Word, Equal);
|
||||
INTPTR_BINOP(Word, NotEqual);
|
||||
INTPTR_BINOP(Int, GreaterThanOrEqual);
|
||||
INTPTR_BINOP(Int, GreaterThan);
|
||||
|
||||
#undef INTPTR_BINOP
|
||||
|
||||
Node* Float64Add(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Float64Add(), a, b);
|
||||
}
|
||||
Node* Float64Sub(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Float64Sub(), a, b);
|
||||
}
|
||||
Node* Float64Mul(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Float64Mul(), a, b);
|
||||
}
|
||||
Node* Float64Div(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Float64Div(), a, b);
|
||||
}
|
||||
Node* Float64Mod(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Float64Mod(), a, b);
|
||||
}
|
||||
Node* Float64Equal(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Float64Equal(), a, b);
|
||||
}
|
||||
Node* Float64NotEqual(Node* a, Node* b) {
|
||||
return WordBinaryNot(Float64Equal(a, b));
|
||||
}
|
||||
Node* Float64LessThan(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Float64LessThan(), a, b);
|
||||
}
|
||||
Node* Float64LessThanOrEqual(Node* a, Node* b) {
|
||||
return NEW_NODE_2(MACHINE()->Float64LessThanOrEqual(), a, b);
|
||||
}
|
||||
Node* Float64GreaterThan(Node* a, Node* b) { return Float64LessThan(b, a); }
|
||||
Node* Float64GreaterThanOrEqual(Node* a, Node* b) {
|
||||
return Float64LessThanOrEqual(b, a);
|
||||
}
|
||||
|
||||
// Conversions.
|
||||
Node* ConvertInt32ToInt64(Node* a) {
|
||||
return NEW_NODE_1(MACHINE()->ConvertInt32ToInt64(), a);
|
||||
}
|
||||
Node* ConvertInt64ToInt32(Node* a) {
|
||||
return NEW_NODE_1(MACHINE()->ConvertInt64ToInt32(), a);
|
||||
}
|
||||
Node* ConvertInt32ToFloat64(Node* a) {
|
||||
return NEW_NODE_1(MACHINE()->ConvertInt32ToFloat64(), a);
|
||||
}
|
||||
Node* ConvertFloat64ToInt32(Node* a) {
|
||||
return NEW_NODE_1(MACHINE()->ConvertFloat64ToInt32(), a);
|
||||
}
|
||||
|
||||
#ifdef MACHINE_ASSEMBLER_SUPPORTS_CALL_C
|
||||
// Call to C.
|
||||
Node* CallC(Node* function_address, MachineRepresentation return_type,
|
||||
MachineRepresentation* arg_types, Node** args, int n_args) {
|
||||
CallDescriptor* descriptor = Linkage::GetSimplifiedCDescriptor(
|
||||
ZONE(), n_args, return_type, arg_types);
|
||||
Node** passed_args =
|
||||
static_cast<Node**>(alloca((n_args + 1) * sizeof(args[0])));
|
||||
passed_args[0] = function_address;
|
||||
for (int i = 0; i < n_args; ++i) {
|
||||
passed_args[i + 1] = args[i];
|
||||
}
|
||||
return NEW_NODE_2(COMMON()->Call(descriptor), n_args + 1, passed_args);
|
||||
}
|
||||
#endif
|
||||
};
|
||||
|
||||
#undef NEW_NODE_0
|
||||
#undef NEW_NODE_1
|
||||
#undef NEW_NODE_2
|
||||
#undef NEW_NODE_3
|
||||
#undef MACHINE
|
||||
#undef COMMON
|
||||
#undef ZONE
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_COMPILER_MACHINE_NODE_FACTORY_H_
|
343
src/compiler/machine-operator-reducer.cc
Normal file
343
src/compiler/machine-operator-reducer.cc
Normal file
@ -0,0 +1,343 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/compiler/machine-operator-reducer.h"
|
||||
|
||||
#include "src/compiler/common-node-cache.h"
|
||||
#include "src/compiler/generic-node-inl.h"
|
||||
#include "src/compiler/graph.h"
|
||||
#include "src/compiler/node-matchers.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
MachineOperatorReducer::MachineOperatorReducer(Graph* graph)
|
||||
: graph_(graph),
|
||||
cache_(new (graph->zone()) CommonNodeCache(graph->zone())),
|
||||
common_(graph->zone()),
|
||||
machine_(graph->zone()) {}
|
||||
|
||||
|
||||
MachineOperatorReducer::MachineOperatorReducer(Graph* graph,
|
||||
CommonNodeCache* cache)
|
||||
: graph_(graph),
|
||||
cache_(cache),
|
||||
common_(graph->zone()),
|
||||
machine_(graph->zone()) {}
|
||||
|
||||
|
||||
Node* MachineOperatorReducer::Int32Constant(int32_t value) {
|
||||
Node** loc = cache_->FindInt32Constant(value);
|
||||
if (*loc == NULL) {
|
||||
*loc = graph_->NewNode(common_.Int32Constant(value));
|
||||
}
|
||||
return *loc;
|
||||
}
|
||||
|
||||
|
||||
Node* MachineOperatorReducer::Float64Constant(volatile double value) {
|
||||
Node** loc = cache_->FindFloat64Constant(value);
|
||||
if (*loc == NULL) {
|
||||
*loc = graph_->NewNode(common_.Float64Constant(value));
|
||||
}
|
||||
return *loc;
|
||||
}
|
||||
|
||||
|
||||
// Perform constant folding and strength reduction on machine operators.
|
||||
Reduction MachineOperatorReducer::Reduce(Node* node) {
|
||||
switch (node->opcode()) {
|
||||
case IrOpcode::kWord32And: {
|
||||
Int32BinopMatcher m(node);
|
||||
if (m.right().Is(0)) return Replace(m.right().node()); // x & 0 => 0
|
||||
if (m.right().Is(-1)) return Replace(m.left().node()); // x & -1 => x
|
||||
if (m.IsFoldable()) { // K & K => K
|
||||
return ReplaceInt32(m.left().Value() & m.right().Value());
|
||||
}
|
||||
if (m.LeftEqualsRight()) return Replace(m.left().node()); // x & x => x
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kWord32Or: {
|
||||
Int32BinopMatcher m(node);
|
||||
if (m.right().Is(0)) return Replace(m.left().node()); // x | 0 => x
|
||||
if (m.right().Is(-1)) return Replace(m.right().node()); // x | -1 => -1
|
||||
if (m.IsFoldable()) { // K | K => K
|
||||
return ReplaceInt32(m.left().Value() | m.right().Value());
|
||||
}
|
||||
if (m.LeftEqualsRight()) return Replace(m.left().node()); // x | x => x
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kWord32Xor: {
|
||||
Int32BinopMatcher m(node);
|
||||
if (m.right().Is(0)) return Replace(m.left().node()); // x ^ 0 => x
|
||||
if (m.IsFoldable()) { // K ^ K => K
|
||||
return ReplaceInt32(m.left().Value() ^ m.right().Value());
|
||||
}
|
||||
if (m.LeftEqualsRight()) return ReplaceInt32(0); // x ^ x => 0
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kWord32Shl: {
|
||||
Int32BinopMatcher m(node);
|
||||
if (m.right().Is(0)) return Replace(m.left().node()); // x << 0 => x
|
||||
if (m.IsFoldable()) { // K << K => K
|
||||
return ReplaceInt32(m.left().Value() << m.right().Value());
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kWord32Shr: {
|
||||
Uint32BinopMatcher m(node);
|
||||
if (m.right().Is(0)) return Replace(m.left().node()); // x >>> 0 => x
|
||||
if (m.IsFoldable()) { // K >>> K => K
|
||||
return ReplaceInt32(m.left().Value() >> m.right().Value());
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kWord32Sar: {
|
||||
Int32BinopMatcher m(node);
|
||||
if (m.right().Is(0)) return Replace(m.left().node()); // x >> 0 => x
|
||||
if (m.IsFoldable()) { // K >> K => K
|
||||
return ReplaceInt32(m.left().Value() >> m.right().Value());
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kWord32Equal: {
|
||||
Int32BinopMatcher m(node);
|
||||
if (m.IsFoldable()) { // K == K => K
|
||||
return ReplaceBool(m.left().Value() == m.right().Value());
|
||||
}
|
||||
if (m.left().IsInt32Sub() && m.right().Is(0)) { // x - y == 0 => x == y
|
||||
Int32BinopMatcher msub(m.left().node());
|
||||
node->ReplaceInput(0, msub.left().node());
|
||||
node->ReplaceInput(1, msub.right().node());
|
||||
return Changed(node);
|
||||
}
|
||||
// TODO(turbofan): fold HeapConstant, ExternalReference, pointer compares
|
||||
if (m.LeftEqualsRight()) return ReplaceBool(true); // x == x => true
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kInt32Add: {
|
||||
Int32BinopMatcher m(node);
|
||||
if (m.right().Is(0)) return Replace(m.left().node()); // x + 0 => x
|
||||
if (m.IsFoldable()) { // K + K => K
|
||||
return ReplaceInt32(static_cast<uint32_t>(m.left().Value()) +
|
||||
static_cast<uint32_t>(m.right().Value()));
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kInt32Sub: {
|
||||
Int32BinopMatcher m(node);
|
||||
if (m.right().Is(0)) return Replace(m.left().node()); // x - 0 => x
|
||||
if (m.IsFoldable()) { // K - K => K
|
||||
return ReplaceInt32(static_cast<uint32_t>(m.left().Value()) -
|
||||
static_cast<uint32_t>(m.right().Value()));
|
||||
}
|
||||
if (m.LeftEqualsRight()) return ReplaceInt32(0); // x - x => 0
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kInt32Mul: {
|
||||
Int32BinopMatcher m(node);
|
||||
if (m.right().Is(0)) return Replace(m.right().node()); // x * 0 => 0
|
||||
if (m.right().Is(1)) return Replace(m.left().node()); // x * 1 => x
|
||||
if (m.IsFoldable()) { // K * K => K
|
||||
return ReplaceInt32(m.left().Value() * m.right().Value());
|
||||
}
|
||||
if (m.right().Is(-1)) { // x * -1 => 0 - x
|
||||
graph_->ChangeOperator(node, machine_.Int32Sub());
|
||||
node->ReplaceInput(0, Int32Constant(0));
|
||||
node->ReplaceInput(1, m.left().node());
|
||||
return Changed(node);
|
||||
}
|
||||
if (m.right().IsPowerOf2()) { // x * 2^n => x << n
|
||||
graph_->ChangeOperator(node, machine_.Word32Shl());
|
||||
node->ReplaceInput(1, Int32Constant(WhichPowerOf2(m.right().Value())));
|
||||
return Changed(node);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kInt32Div: {
|
||||
Int32BinopMatcher m(node);
|
||||
if (m.right().Is(1)) return Replace(m.left().node()); // x / 1 => x
|
||||
// TODO(turbofan): if (m.left().Is(0))
|
||||
// TODO(turbofan): if (m.right().IsPowerOf2())
|
||||
// TODO(turbofan): if (m.right().Is(0))
|
||||
// TODO(turbofan): if (m.LeftEqualsRight())
|
||||
if (m.IsFoldable() && !m.right().Is(0)) { // K / K => K
|
||||
if (m.right().Is(-1)) return ReplaceInt32(-m.left().Value());
|
||||
return ReplaceInt32(m.left().Value() / m.right().Value());
|
||||
}
|
||||
if (m.right().Is(-1)) { // x / -1 => 0 - x
|
||||
graph_->ChangeOperator(node, machine_.Int32Sub());
|
||||
node->ReplaceInput(0, Int32Constant(0));
|
||||
node->ReplaceInput(1, m.left().node());
|
||||
return Changed(node);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kInt32UDiv: {
|
||||
Uint32BinopMatcher m(node);
|
||||
if (m.right().Is(1)) return Replace(m.left().node()); // x / 1 => x
|
||||
// TODO(turbofan): if (m.left().Is(0))
|
||||
// TODO(turbofan): if (m.right().Is(0))
|
||||
// TODO(turbofan): if (m.LeftEqualsRight())
|
||||
if (m.IsFoldable() && !m.right().Is(0)) { // K / K => K
|
||||
return ReplaceInt32(m.left().Value() / m.right().Value());
|
||||
}
|
||||
if (m.right().IsPowerOf2()) { // x / 2^n => x >> n
|
||||
graph_->ChangeOperator(node, machine_.Word32Shr());
|
||||
node->ReplaceInput(1, Int32Constant(WhichPowerOf2(m.right().Value())));
|
||||
return Changed(node);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kInt32Mod: {
|
||||
Int32BinopMatcher m(node);
|
||||
if (m.right().Is(1)) return ReplaceInt32(0); // x % 1 => 0
|
||||
if (m.right().Is(-1)) return ReplaceInt32(0); // x % -1 => 0
|
||||
// TODO(turbofan): if (m.left().Is(0))
|
||||
// TODO(turbofan): if (m.right().IsPowerOf2())
|
||||
// TODO(turbofan): if (m.right().Is(0))
|
||||
// TODO(turbofan): if (m.LeftEqualsRight())
|
||||
if (m.IsFoldable() && !m.right().Is(0)) { // K % K => K
|
||||
return ReplaceInt32(m.left().Value() % m.right().Value());
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kInt32UMod: {
|
||||
Uint32BinopMatcher m(node);
|
||||
if (m.right().Is(1)) return ReplaceInt32(0); // x % 1 => 0
|
||||
// TODO(turbofan): if (m.left().Is(0))
|
||||
// TODO(turbofan): if (m.right().Is(0))
|
||||
// TODO(turbofan): if (m.LeftEqualsRight())
|
||||
if (m.IsFoldable() && !m.right().Is(0)) { // K % K => K
|
||||
return ReplaceInt32(m.left().Value() % m.right().Value());
|
||||
}
|
||||
if (m.right().IsPowerOf2()) { // x % 2^n => x & 2^n-1
|
||||
graph_->ChangeOperator(node, machine_.Word32And());
|
||||
node->ReplaceInput(1, Int32Constant(m.right().Value() - 1));
|
||||
return Changed(node);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kInt32LessThan: {
|
||||
Int32BinopMatcher m(node);
|
||||
if (m.IsFoldable()) { // K < K => K
|
||||
return ReplaceBool(m.left().Value() < m.right().Value());
|
||||
}
|
||||
if (m.left().IsInt32Sub() && m.right().Is(0)) { // x - y < 0 => x < y
|
||||
Int32BinopMatcher msub(m.left().node());
|
||||
node->ReplaceInput(0, msub.left().node());
|
||||
node->ReplaceInput(1, msub.right().node());
|
||||
return Changed(node);
|
||||
}
|
||||
if (m.left().Is(0) && m.right().IsInt32Sub()) { // 0 < x - y => y < x
|
||||
Int32BinopMatcher msub(m.right().node());
|
||||
node->ReplaceInput(0, msub.right().node());
|
||||
node->ReplaceInput(1, msub.left().node());
|
||||
return Changed(node);
|
||||
}
|
||||
if (m.LeftEqualsRight()) return ReplaceBool(false); // x < x => false
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kInt32LessThanOrEqual: {
|
||||
Int32BinopMatcher m(node);
|
||||
if (m.IsFoldable()) { // K <= K => K
|
||||
return ReplaceBool(m.left().Value() <= m.right().Value());
|
||||
}
|
||||
if (m.left().IsInt32Sub() && m.right().Is(0)) { // x - y <= 0 => x <= y
|
||||
Int32BinopMatcher msub(m.left().node());
|
||||
node->ReplaceInput(0, msub.left().node());
|
||||
node->ReplaceInput(1, msub.right().node());
|
||||
return Changed(node);
|
||||
}
|
||||
if (m.left().Is(0) && m.right().IsInt32Sub()) { // 0 <= x - y => y <= x
|
||||
Int32BinopMatcher msub(m.right().node());
|
||||
node->ReplaceInput(0, msub.right().node());
|
||||
node->ReplaceInput(1, msub.left().node());
|
||||
return Changed(node);
|
||||
}
|
||||
if (m.LeftEqualsRight()) return ReplaceBool(true); // x <= x => true
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kUint32LessThan: {
|
||||
Uint32BinopMatcher m(node);
|
||||
if (m.left().Is(kMaxUInt32)) return ReplaceBool(false); // M < x => false
|
||||
if (m.right().Is(0)) return ReplaceBool(false); // x < 0 => false
|
||||
if (m.IsFoldable()) { // K < K => K
|
||||
return ReplaceBool(m.left().Value() < m.right().Value());
|
||||
}
|
||||
if (m.LeftEqualsRight()) return ReplaceBool(false); // x < x => false
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kUint32LessThanOrEqual: {
|
||||
Uint32BinopMatcher m(node);
|
||||
if (m.left().Is(0)) return ReplaceBool(true); // 0 <= x => true
|
||||
if (m.right().Is(kMaxUInt32)) return ReplaceBool(true); // x <= M => true
|
||||
if (m.IsFoldable()) { // K <= K => K
|
||||
return ReplaceBool(m.left().Value() <= m.right().Value());
|
||||
}
|
||||
if (m.LeftEqualsRight()) return ReplaceBool(true); // x <= x => true
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kFloat64Add: {
|
||||
Float64BinopMatcher m(node);
|
||||
if (m.IsFoldable()) { // K + K => K
|
||||
return ReplaceFloat64(m.left().Value() + m.right().Value());
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kFloat64Sub: {
|
||||
Float64BinopMatcher m(node);
|
||||
if (m.IsFoldable()) { // K - K => K
|
||||
return ReplaceFloat64(m.left().Value() - m.right().Value());
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kFloat64Mul: {
|
||||
Float64BinopMatcher m(node);
|
||||
if (m.right().Is(1)) return Replace(m.left().node()); // x * 1.0 => x
|
||||
if (m.right().IsNaN()) { // x * NaN => NaN
|
||||
return Replace(m.right().node());
|
||||
}
|
||||
if (m.IsFoldable()) { // K * K => K
|
||||
return ReplaceFloat64(m.left().Value() * m.right().Value());
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kFloat64Div: {
|
||||
Float64BinopMatcher m(node);
|
||||
if (m.right().Is(1)) return Replace(m.left().node()); // x / 1.0 => x
|
||||
if (m.right().IsNaN()) { // x / NaN => NaN
|
||||
return Replace(m.right().node());
|
||||
}
|
||||
if (m.left().IsNaN()) { // NaN / x => NaN
|
||||
return Replace(m.left().node());
|
||||
}
|
||||
if (m.IsFoldable()) { // K / K => K
|
||||
return ReplaceFloat64(m.left().Value() / m.right().Value());
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kFloat64Mod: {
|
||||
Float64BinopMatcher m(node);
|
||||
if (m.right().IsNaN()) { // x % NaN => NaN
|
||||
return Replace(m.right().node());
|
||||
}
|
||||
if (m.left().IsNaN()) { // NaN % x => NaN
|
||||
return Replace(m.left().node());
|
||||
}
|
||||
if (m.IsFoldable()) { // K % K => K
|
||||
return ReplaceFloat64(modulo(m.left().Value(), m.right().Value()));
|
||||
}
|
||||
break;
|
||||
}
|
||||
// TODO(turbofan): strength-reduce and fold floating point operations.
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return NoChange();
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
52
src/compiler/machine-operator-reducer.h
Normal file
52
src/compiler/machine-operator-reducer.h
Normal file
@ -0,0 +1,52 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_MACHINE_OPERATOR_REDUCER_H_
|
||||
#define V8_COMPILER_MACHINE_OPERATOR_REDUCER_H_
|
||||
|
||||
#include "src/compiler/common-operator.h"
|
||||
#include "src/compiler/graph-reducer.h"
|
||||
#include "src/compiler/machine-operator.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
// Forward declarations.
|
||||
class CommonNodeCache;
|
||||
|
||||
// Performs constant folding and strength reduction on nodes that have
|
||||
// machine operators.
|
||||
class MachineOperatorReducer : public Reducer {
|
||||
public:
|
||||
explicit MachineOperatorReducer(Graph* graph);
|
||||
|
||||
MachineOperatorReducer(Graph* graph, CommonNodeCache* cache);
|
||||
|
||||
virtual Reduction Reduce(Node* node);
|
||||
|
||||
private:
|
||||
Graph* graph_;
|
||||
CommonNodeCache* cache_;
|
||||
CommonOperatorBuilder common_;
|
||||
MachineOperatorBuilder machine_;
|
||||
|
||||
Node* Int32Constant(int32_t value);
|
||||
Node* Float64Constant(volatile double value);
|
||||
|
||||
Reduction ReplaceBool(bool value) { return ReplaceInt32(value ? 1 : 0); }
|
||||
|
||||
Reduction ReplaceInt32(int32_t value) {
|
||||
return Replace(Int32Constant(value));
|
||||
}
|
||||
|
||||
Reduction ReplaceFloat64(volatile double value) {
|
||||
return Replace(Float64Constant(value));
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
||||
|
||||
#endif // V8_COMPILER_MACHINE_OPERATOR_REDUCER_H_
|
177
src/compiler/machine-operator.h
Normal file
177
src/compiler/machine-operator.h
Normal file
@ -0,0 +1,177 @@
|
||||
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_MACHINE_OPERATOR_H_
|
||||
#define V8_COMPILER_MACHINE_OPERATOR_H_
|
||||
|
||||
#include "src/compiler/opcodes.h"
|
||||
#include "src/compiler/operator.h"
|
||||
#include "src/zone.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
// An enumeration of the storage representations at the machine level.
|
||||
// - Words are uninterpreted bits of a given fixed size that can be used
|
||||
// to store integers and pointers. They are normally allocated to general
|
||||
// purpose registers by the backend and are not tracked for GC.
|
||||
// - Floats are bits of a given fixed size that are used to store floating
|
||||
// point numbers. They are normally allocated to the floating point
|
||||
// registers of the machine and are not tracked for the GC.
|
||||
// - Tagged values are the size of a reference into the heap and can store
|
||||
// small words or references into the heap using a language and potentially
|
||||
// machine-dependent tagging scheme. These values are tracked by the code
|
||||
// generator for precise GC.
|
||||
enum MachineRepresentation {
|
||||
kMachineWord8,
|
||||
kMachineWord16,
|
||||
kMachineWord32,
|
||||
kMachineWord64,
|
||||
kMachineFloat64,
|
||||
kMachineTagged,
|
||||
kMachineLast
|
||||
};
|
||||
|
||||
|
||||
// TODO(turbofan): other write barriers are possible based on type
|
||||
enum WriteBarrierKind { kNoWriteBarrier, kFullWriteBarrier };
|
||||
|
||||
|
||||
// A Store needs a MachineRepresentation and a WriteBarrierKind
|
||||
// in order to emit the correct write barrier.
|
||||
struct StoreRepresentation {
|
||||
MachineRepresentation rep;
|
||||
WriteBarrierKind write_barrier_kind;
|
||||
};
|
||||
|
||||
|
||||
// Interface for building machine-level operators. These operators are
|
||||
// machine-level but machine-independent and thus define a language suitable
|
||||
// for generating code to run on architectures such as ia32, x64, arm, etc.
|
||||
class MachineOperatorBuilder {
|
||||
public:
|
||||
explicit MachineOperatorBuilder(Zone* zone,
|
||||
MachineRepresentation word = pointer_rep())
|
||||
: zone_(zone), word_(word) {
|
||||
CHECK(word == kMachineWord32 || word == kMachineWord64);
|
||||
}
|
||||
|
||||
#define SIMPLE(name, properties, inputs, outputs) \
|
||||
return new (zone_) \
|
||||
SimpleOperator(IrOpcode::k##name, properties, inputs, outputs, #name);
|
||||
|
||||
#define OP1(name, ptype, pname, properties, inputs, outputs) \
|
||||
return new (zone_) \
|
||||
Operator1<ptype>(IrOpcode::k##name, properties | Operator::kNoThrow, \
|
||||
inputs, outputs, #name, pname)
|
||||
|
||||
#define BINOP(name) SIMPLE(name, Operator::kPure, 2, 1)
|
||||
#define BINOP_C(name) \
|
||||
SIMPLE(name, Operator::kCommutative | Operator::kPure, 2, 1)
|
||||
#define BINOP_AC(name) \
|
||||
SIMPLE(name, \
|
||||
Operator::kAssociative | Operator::kCommutative | Operator::kPure, 2, \
|
||||
1)
|
||||
#define UNOP(name) SIMPLE(name, Operator::kPure, 1, 1)
|
||||
|
||||
#define WORD_SIZE(x) return is64() ? Word64##x() : Word32##x()
|
||||
|
||||
Operator* Load(MachineRepresentation rep) { // load [base + index]
|
||||
OP1(Load, MachineRepresentation, rep, Operator::kNoWrite, 2, 1);
|
||||
}
|
||||
// store [base + index], value
|
||||
Operator* Store(MachineRepresentation rep,
|
||||
WriteBarrierKind kind = kNoWriteBarrier) {
|
||||
StoreRepresentation store_rep = {rep, kind};
|
||||
OP1(Store, StoreRepresentation, store_rep, Operator::kNoRead, 3, 0);
|
||||
}
|
||||
|
||||
Operator* WordAnd() { WORD_SIZE(And); }
|
||||
Operator* WordOr() { WORD_SIZE(Or); }
|
||||
Operator* WordXor() { WORD_SIZE(Xor); }
|
||||
Operator* WordShl() { WORD_SIZE(Shl); }
|
||||
Operator* WordShr() { WORD_SIZE(Shr); }
|
||||
Operator* WordSar() { WORD_SIZE(Sar); }
|
||||
Operator* WordEqual() { WORD_SIZE(Equal); }
|
||||
|
||||
Operator* Word32And() { BINOP_AC(Word32And); }
|
||||
Operator* Word32Or() { BINOP_AC(Word32Or); }
|
||||
Operator* Word32Xor() { BINOP_AC(Word32Xor); }
|
||||
Operator* Word32Shl() { BINOP(Word32Shl); }
|
||||
Operator* Word32Shr() { BINOP(Word32Shr); }
|
||||
Operator* Word32Sar() { BINOP(Word32Sar); }
|
||||
Operator* Word32Equal() { BINOP_C(Word32Equal); }
|
||||
|
||||
Operator* Word64And() { BINOP_AC(Word64And); }
|
||||
Operator* Word64Or() { BINOP_AC(Word64Or); }
|
||||
Operator* Word64Xor() { BINOP_AC(Word64Xor); }
|
||||
Operator* Word64Shl() { BINOP(Word64Shl); }
|
||||
Operator* Word64Shr() { BINOP(Word64Shr); }
|
||||
Operator* Word64Sar() { BINOP(Word64Sar); }
|
||||
Operator* Word64Equal() { BINOP_C(Word64Equal); }
|
||||
|
||||
Operator* Int32Add() { BINOP_AC(Int32Add); }
|
||||
Operator* Int32Sub() { BINOP(Int32Sub); }
|
||||
Operator* Int32Mul() { BINOP_AC(Int32Mul); }
|
||||
Operator* Int32Div() { BINOP(Int32Div); }
|
||||
Operator* Int32UDiv() { BINOP(Int32UDiv); }
|
||||
Operator* Int32Mod() { BINOP(Int32Mod); }
|
||||
Operator* Int32UMod() { BINOP(Int32UMod); }
|
||||
Operator* Int32LessThan() { BINOP(Int32LessThan); }
|
||||
Operator* Int32LessThanOrEqual() { BINOP(Int32LessThanOrEqual); }
|
||||
Operator* Uint32LessThan() { BINOP(Uint32LessThan); }
|
||||
Operator* Uint32LessThanOrEqual() { BINOP(Uint32LessThanOrEqual); }
|
||||
|
||||
Operator* Int64Add() { BINOP_AC(Int64Add); }
|
||||
Operator* Int64Sub() { BINOP(Int64Sub); }
|
||||
Operator* Int64Mul() { BINOP_AC(Int64Mul); }
|
||||
Operator* Int64Div() { BINOP(Int64Div); }
|
||||
Operator* Int64UDiv() { BINOP(Int64UDiv); }
|
||||
Operator* Int64Mod() { BINOP(Int64Mod); }
|
||||
Operator* Int64UMod() { BINOP(Int64UMod); }
|
||||
Operator* Int64LessThan() { BINOP(Int64LessThan); }
|
||||
Operator* Int64LessThanOrEqual() { BINOP(Int64LessThanOrEqual); }
|
||||
|
||||
Operator* ConvertInt32ToInt64() { UNOP(ConvertInt32ToInt64); }
|
||||
Operator* ConvertInt64ToInt32() { UNOP(ConvertInt64ToInt32); }
|
||||
Operator* ConvertInt32ToFloat64() { UNOP(ConvertInt32ToFloat64); }
|
||||
Operator* ConvertUint32ToFloat64() { UNOP(ConvertUint32ToFloat64); }
|
||||
// TODO(titzer): add rounding mode to floating point conversion.
|
||||
Operator* ConvertFloat64ToInt32() { UNOP(ConvertFloat64ToInt32); }
|
||||
Operator* ConvertFloat64ToUint32() { UNOP(ConvertFloat64ToUint32); }
|
||||
|
||||
// TODO(titzer): do we need different rounding modes for float arithmetic?
|
||||
Operator* Float64Add() { BINOP_C(Float64Add); }
|
||||
Operator* Float64Sub() { BINOP(Float64Sub); }
|
||||
Operator* Float64Mul() { BINOP_C(Float64Mul); }
|
||||
Operator* Float64Div() { BINOP(Float64Div); }
|
||||
Operator* Float64Mod() { BINOP(Float64Mod); }
|
||||
Operator* Float64Equal() { BINOP_C(Float64Equal); }
|
||||
Operator* Float64LessThan() { BINOP(Float64LessThan); }
|
||||
Operator* Float64LessThanOrEqual() { BINOP(Float64LessThanOrEqual); }
|
||||
|
||||
inline bool is32() const { return word_ == kMachineWord32; }
|
||||
inline bool is64() const { return word_ == kMachineWord64; }
|
||||
inline MachineRepresentation word() const { return word_; }
|
||||
|
||||
static inline MachineRepresentation pointer_rep() {
|
||||
return kPointerSize == 8 ? kMachineWord64 : kMachineWord32;
|
||||
}
|
||||
|
||||
#undef WORD_SIZE
|
||||
#undef UNOP
|
||||
#undef BINOP
|
||||
#undef OP1
|
||||
#undef SIMPLE
|
||||
|
||||
private:
|
||||
Zone* zone_;
|
||||
MachineRepresentation word_;
|
||||
};
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
||||
|
||||
#endif // V8_COMPILER_MACHINE_OPERATOR_H_
|
43
src/compiler/node-aux-data-inl.h
Normal file
43
src/compiler/node-aux-data-inl.h
Normal file
@ -0,0 +1,43 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_NODE_AUX_DATA_INL_H_
|
||||
#define V8_COMPILER_NODE_AUX_DATA_INL_H_
|
||||
|
||||
#include "src/compiler/graph.h"
|
||||
#include "src/compiler/node.h"
|
||||
#include "src/compiler/node-aux-data.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
template <class T>
|
||||
NodeAuxData<T>::NodeAuxData(Graph* graph)
|
||||
: aux_data_(ZoneAllocator(graph->zone())) {}
|
||||
|
||||
|
||||
template <class T>
|
||||
void NodeAuxData<T>::Set(Node* node, const T& data) {
|
||||
int id = node->id();
|
||||
if (id >= static_cast<int>(aux_data_.size())) {
|
||||
aux_data_.resize(id + 1);
|
||||
}
|
||||
aux_data_[id] = data;
|
||||
}
|
||||
|
||||
|
||||
template <class T>
|
||||
T NodeAuxData<T>::Get(Node* node) {
|
||||
int id = node->id();
|
||||
if (id >= static_cast<int>(aux_data_.size())) {
|
||||
return T();
|
||||
}
|
||||
return aux_data_[id];
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
||||
|
||||
#endif
|
38
src/compiler/node-aux-data.h
Normal file
38
src/compiler/node-aux-data.h
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_NODE_AUX_DATA_H_
|
||||
#define V8_COMPILER_NODE_AUX_DATA_H_
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "src/zone-allocator.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
// Forward declarations.
|
||||
class Graph;
|
||||
class Node;
|
||||
|
||||
template <class T>
|
||||
class NodeAuxData {
|
||||
public:
|
||||
inline explicit NodeAuxData(Graph* graph);
|
||||
|
||||
inline void Set(Node* node, const T& data);
|
||||
inline T Get(Node* node);
|
||||
|
||||
private:
|
||||
typedef zone_allocator<T> ZoneAllocator;
|
||||
typedef std::vector<T, ZoneAllocator> TZoneVector;
|
||||
|
||||
TZoneVector aux_data_;
|
||||
};
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
||||
|
||||
#endif
|
120
src/compiler/node-cache.cc
Normal file
120
src/compiler/node-cache.cc
Normal file
@ -0,0 +1,120 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/compiler/node-cache.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
#define INITIAL_SIZE 16
|
||||
#define LINEAR_PROBE 5
|
||||
|
||||
template <typename Key>
|
||||
int32_t NodeCacheHash(Key key) {
|
||||
UNIMPLEMENTED();
|
||||
return 0;
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int32_t NodeCacheHash(int32_t key) {
|
||||
return ComputeIntegerHash(key, 0);
|
||||
}
|
||||
|
||||
|
||||
template <>
|
||||
inline int32_t NodeCacheHash(int64_t key) {
|
||||
return ComputeLongHash(key);
|
||||
}
|
||||
|
||||
|
||||
template <>
|
||||
inline int32_t NodeCacheHash(double key) {
|
||||
return ComputeLongHash(BitCast<int64_t>(key));
|
||||
}
|
||||
|
||||
|
||||
template <>
|
||||
inline int32_t NodeCacheHash(void* key) {
|
||||
return ComputePointerHash(key);
|
||||
}
|
||||
|
||||
|
||||
template <typename Key>
|
||||
bool NodeCache<Key>::Resize(Zone* zone) {
|
||||
if (size_ >= max_) return false; // Don't grow past the maximum size.
|
||||
|
||||
// Allocate a new block of entries 4x the size.
|
||||
Entry* old_entries = entries_;
|
||||
int old_size = size_ + LINEAR_PROBE;
|
||||
size_ = size_ * 4;
|
||||
int num_entries = size_ + LINEAR_PROBE;
|
||||
entries_ = zone->NewArray<Entry>(num_entries);
|
||||
memset(entries_, 0, sizeof(Entry) * num_entries);
|
||||
|
||||
// Insert the old entries into the new block.
|
||||
for (int i = 0; i < old_size; i++) {
|
||||
Entry* old = &old_entries[i];
|
||||
if (old->value_ != NULL) {
|
||||
int hash = NodeCacheHash(old->key_);
|
||||
int start = hash & (size_ - 1);
|
||||
int end = start + LINEAR_PROBE;
|
||||
for (int j = start; j < end; j++) {
|
||||
Entry* entry = &entries_[j];
|
||||
if (entry->value_ == NULL) {
|
||||
entry->key_ = old->key_;
|
||||
entry->value_ = old->value_;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
template <typename Key>
|
||||
Node** NodeCache<Key>::Find(Zone* zone, Key key) {
|
||||
int32_t hash = NodeCacheHash(key);
|
||||
if (entries_ == NULL) {
|
||||
// Allocate the initial entries and insert the first entry.
|
||||
int num_entries = INITIAL_SIZE + LINEAR_PROBE;
|
||||
entries_ = zone->NewArray<Entry>(num_entries);
|
||||
size_ = INITIAL_SIZE;
|
||||
memset(entries_, 0, sizeof(Entry) * num_entries);
|
||||
Entry* entry = &entries_[hash & (INITIAL_SIZE - 1)];
|
||||
entry->key_ = key;
|
||||
return &entry->value_;
|
||||
}
|
||||
|
||||
while (true) {
|
||||
// Search up to N entries after (linear probing).
|
||||
int start = hash & (size_ - 1);
|
||||
int end = start + LINEAR_PROBE;
|
||||
for (int i = start; i < end; i++) {
|
||||
Entry* entry = &entries_[i];
|
||||
if (entry->key_ == key) return &entry->value_;
|
||||
if (entry->value_ == NULL) {
|
||||
entry->key_ = key;
|
||||
return &entry->value_;
|
||||
}
|
||||
}
|
||||
|
||||
if (!Resize(zone)) break; // Don't grow past the maximum size.
|
||||
}
|
||||
|
||||
// If resized to maximum and still didn't find space, overwrite an entry.
|
||||
Entry* entry = &entries_[hash & (size_ - 1)];
|
||||
entry->key_ = key;
|
||||
entry->value_ = NULL;
|
||||
return &entry->value_;
|
||||
}
|
||||
|
||||
|
||||
template class NodeCache<int64_t>;
|
||||
template class NodeCache<int32_t>;
|
||||
template class NodeCache<void*>;
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
53
src/compiler/node-cache.h
Normal file
53
src/compiler/node-cache.h
Normal file
@ -0,0 +1,53 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_NODE_CACHE_H_
|
||||
#define V8_COMPILER_NODE_CACHE_H_
|
||||
|
||||
#include "src/v8.h"
|
||||
|
||||
#include "src/compiler/node.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
// A cache for nodes based on a key. Useful for implementing canonicalization of
|
||||
// nodes such as constants, parameters, etc.
|
||||
template <typename Key>
|
||||
class NodeCache {
|
||||
public:
|
||||
explicit NodeCache(int max = 256) : entries_(NULL), size_(0), max_(max) {}
|
||||
|
||||
// Search for node associated with {key} and return a pointer to a memory
|
||||
// location in this cache that stores an entry for the key. If the location
|
||||
// returned by this method contains a non-NULL node, the caller can use that
|
||||
// node. Otherwise it is the responsibility of the caller to fill the entry
|
||||
// with a new node.
|
||||
// Note that a previous cache entry may be overwritten if the cache becomes
|
||||
// too full or encounters too many hash collisions.
|
||||
Node** Find(Zone* zone, Key key);
|
||||
|
||||
private:
|
||||
struct Entry {
|
||||
Key key_;
|
||||
Node* value_;
|
||||
};
|
||||
|
||||
Entry* entries_; // lazily-allocated hash entries.
|
||||
int32_t size_;
|
||||
int32_t max_;
|
||||
|
||||
bool Resize(Zone* zone);
|
||||
};
|
||||
|
||||
// Various default cache types.
|
||||
typedef NodeCache<int64_t> Int64NodeCache;
|
||||
typedef NodeCache<int32_t> Int32NodeCache;
|
||||
typedef NodeCache<void*> PtrNodeCache;
|
||||
}
|
||||
}
|
||||
} // namespace v8::internal::compiler
|
||||
|
||||
#endif // V8_COMPILER_NODE_CACHE_H_
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user