2012-01-09 16:37:47 +00:00
|
|
|
// Copyright 2012 the V8 project authors. All rights reserved.
|
2014-04-29 06:42:26 +00:00
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file.
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2009-05-04 13:36:43 +00:00
|
|
|
#ifndef V8_IA32_MACRO_ASSEMBLER_IA32_H_
|
|
|
|
#define V8_IA32_MACRO_ASSEMBLER_IA32_H_
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2014-06-03 08:12:43 +00:00
|
|
|
#include "src/assembler.h"
|
2014-09-24 07:08:27 +00:00
|
|
|
#include "src/bailout-reason.h"
|
2014-06-03 08:12:43 +00:00
|
|
|
#include "src/globals.h"
|
2017-09-13 10:56:20 +00:00
|
|
|
#include "src/ia32/assembler-ia32.h"
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2009-05-25 10:05:56 +00:00
|
|
|
namespace v8 {
|
|
|
|
namespace internal {
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2015-08-07 10:45:34 +00:00
|
|
|
// Give alias names to registers for calling conventions.
|
2017-09-01 14:29:13 +00:00
|
|
|
constexpr Register kReturnRegister0 = eax;
|
|
|
|
constexpr Register kReturnRegister1 = edx;
|
|
|
|
constexpr Register kReturnRegister2 = edi;
|
|
|
|
constexpr Register kJSFunctionRegister = edi;
|
|
|
|
constexpr Register kContextRegister = esi;
|
|
|
|
constexpr Register kAllocateSizeRegister = edx;
|
|
|
|
constexpr Register kInterpreterAccumulatorRegister = eax;
|
|
|
|
constexpr Register kInterpreterBytecodeOffsetRegister = ecx;
|
|
|
|
constexpr Register kInterpreterBytecodeArrayRegister = edi;
|
|
|
|
constexpr Register kInterpreterDispatchTableRegister = esi;
|
|
|
|
constexpr Register kJavaScriptCallArgCountRegister = eax;
|
|
|
|
constexpr Register kJavaScriptCallNewTargetRegister = edx;
|
|
|
|
constexpr Register kRuntimeCallFunctionRegister = ebx;
|
|
|
|
constexpr Register kRuntimeCallArgCountRegister = eax;
|
2015-08-07 10:45:34 +00:00
|
|
|
|
2010-01-05 11:29:27 +00:00
|
|
|
// Convenience for platform-independent signatures. We do not normally
|
|
|
|
// distinguish memory operands from other operands on ia32.
|
|
|
|
typedef Operand MemOperand;
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
|
|
|
|
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
|
|
|
|
|
2015-11-26 14:12:04 +00:00
|
|
|
enum RegisterValueType { REGISTER_VALUE_IS_SMI, REGISTER_VALUE_IS_INT32 };
|
2012-12-28 11:09:16 +00:00
|
|
|
|
2016-03-07 14:33:54 +00:00
|
|
|
enum class ReturnAddressState { kOnStack, kNotOnStack };
|
|
|
|
|
2014-07-14 15:03:38 +00:00
|
|
|
#ifdef DEBUG
|
2015-11-26 14:12:04 +00:00
|
|
|
bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
|
|
|
|
Register reg4 = no_reg, Register reg5 = no_reg,
|
|
|
|
Register reg6 = no_reg, Register reg7 = no_reg,
|
2014-07-14 15:03:38 +00:00
|
|
|
Register reg8 = no_reg);
|
|
|
|
#endif
|
2011-09-19 18:36:47 +00:00
|
|
|
|
2017-07-07 12:12:17 +00:00
|
|
|
class TurboAssembler : public Assembler {
|
|
|
|
public:
|
|
|
|
TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
|
2017-09-13 10:56:20 +00:00
|
|
|
CodeObjectRequired create_code_object);
|
2017-07-07 12:12:17 +00:00
|
|
|
|
|
|
|
void set_has_frame(bool value) { has_frame_ = value; }
|
|
|
|
bool has_frame() const { return has_frame_; }
|
|
|
|
|
|
|
|
Isolate* isolate() const { return isolate_; }
|
|
|
|
|
|
|
|
Handle<HeapObject> CodeObject() {
|
|
|
|
DCHECK(!code_object_.is_null());
|
|
|
|
return code_object_;
|
|
|
|
}
|
|
|
|
|
|
|
|
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
|
|
|
|
Label* condition_met,
|
|
|
|
Label::Distance condition_met_distance = Label::kFar);
|
|
|
|
|
|
|
|
// Activation support.
|
|
|
|
void EnterFrame(StackFrame::Type type);
|
|
|
|
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
|
|
|
|
// Out-of-line constant pool not implemented on ia32.
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
void LeaveFrame(StackFrame::Type type);
|
|
|
|
|
|
|
|
// Print a message to stdout and abort execution.
|
|
|
|
void Abort(BailoutReason reason);
|
|
|
|
|
|
|
|
// Calls Abort(msg) if the condition cc is not satisfied.
|
|
|
|
// Use --debug_code to enable.
|
|
|
|
void Assert(Condition cc, BailoutReason reason);
|
|
|
|
|
2017-07-17 13:39:49 +00:00
|
|
|
// Like Assert(), but without condition.
|
|
|
|
// Use --debug_code to enable.
|
|
|
|
void AssertUnreachable(BailoutReason reason);
|
|
|
|
|
2017-07-07 12:12:17 +00:00
|
|
|
// Like Assert(), but always enabled.
|
|
|
|
void Check(Condition cc, BailoutReason reason);
|
|
|
|
|
|
|
|
// Check that the stack is aligned.
|
|
|
|
void CheckStackAlignment();
|
|
|
|
|
|
|
|
// Nop, because ia32 does not have a root register.
|
|
|
|
void InitializeRootRegister() {}
|
|
|
|
|
|
|
|
// Move a constant into a destination using the most efficient encoding.
|
|
|
|
void Move(Register dst, const Immediate& x);
|
|
|
|
|
|
|
|
void Move(Register dst, Smi* source) { Move(dst, Immediate(source)); }
|
|
|
|
|
|
|
|
// Move if the registers are not identical.
|
|
|
|
void Move(Register target, Register source);
|
|
|
|
|
|
|
|
void Move(const Operand& dst, const Immediate& x);
|
|
|
|
|
|
|
|
// Move an immediate into an XMM register.
|
|
|
|
void Move(XMMRegister dst, uint32_t src);
|
|
|
|
void Move(XMMRegister dst, uint64_t src);
|
|
|
|
void Move(XMMRegister dst, float src) { Move(dst, bit_cast<uint32_t>(src)); }
|
|
|
|
void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }
|
|
|
|
|
|
|
|
void Move(Register dst, Handle<HeapObject> handle);
|
|
|
|
|
|
|
|
void Call(Handle<Code> target, RelocInfo::Mode rmode) { call(target, rmode); }
|
|
|
|
void Call(Label* target) { call(target); }
|
|
|
|
|
2017-08-21 18:19:52 +00:00
|
|
|
void CallForDeoptimization(Address target, RelocInfo::Mode rmode) {
|
|
|
|
call(target, rmode);
|
|
|
|
}
|
|
|
|
|
2017-07-07 12:12:17 +00:00
|
|
|
inline bool AllowThisStubCall(CodeStub* stub);
|
|
|
|
void CallStubDelayed(CodeStub* stub);
|
|
|
|
|
2017-07-17 13:39:49 +00:00
|
|
|
void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
|
|
|
|
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
|
|
|
|
|
2017-07-07 12:12:17 +00:00
|
|
|
// Jump the register contains a smi.
|
|
|
|
inline void JumpIfSmi(Register value, Label* smi_label,
|
|
|
|
Label::Distance distance = Label::kFar) {
|
|
|
|
test(value, Immediate(kSmiTagMask));
|
|
|
|
j(zero, smi_label, distance);
|
|
|
|
}
|
|
|
|
// Jump if the operand is a smi.
|
|
|
|
inline void JumpIfSmi(Operand value, Label* smi_label,
|
|
|
|
Label::Distance distance = Label::kFar) {
|
|
|
|
test(value, Immediate(kSmiTagMask));
|
|
|
|
j(zero, smi_label, distance);
|
|
|
|
}
|
|
|
|
|
|
|
|
void SmiUntag(Register reg) { sar(reg, kSmiTagSize); }
|
|
|
|
|
|
|
|
// Removes current frame and its arguments from the stack preserving
|
|
|
|
// the arguments and a return address pushed to the stack for the next call.
|
|
|
|
// |ra_state| defines whether return address is already pushed to stack or
|
|
|
|
// not. Both |callee_args_count| and |caller_args_count_reg| do not include
|
|
|
|
// receiver. |callee_args_count| is not modified, |caller_args_count_reg|
|
|
|
|
// is trashed. |number_of_temp_values_after_return_address| specifies
|
|
|
|
// the number of words pushed to the stack after the return address. This is
|
|
|
|
// to allow "allocation" of scratch registers that this function requires
|
|
|
|
// by saving their values on the stack.
|
|
|
|
void PrepareForTailCall(const ParameterCount& callee_args_count,
|
|
|
|
Register caller_args_count_reg, Register scratch0,
|
|
|
|
Register scratch1, ReturnAddressState ra_state,
|
|
|
|
int number_of_temp_values_after_return_address);
|
|
|
|
|
|
|
|
// Before calling a C-function from generated code, align arguments on stack.
|
|
|
|
// After aligning the frame, arguments must be stored in esp[0], esp[4],
|
|
|
|
// etc., not pushed. The argument count assumes all arguments are word sized.
|
|
|
|
// Some compilers/platforms require the stack to be aligned when calling
|
|
|
|
// C++ code.
|
|
|
|
// Needs a scratch register to do some arithmetic. This register will be
|
|
|
|
// trashed.
|
|
|
|
void PrepareCallCFunction(int num_arguments, Register scratch);
|
|
|
|
|
|
|
|
// Calls a C function and cleans up the space for arguments allocated
|
|
|
|
// by PrepareCallCFunction. The called function is not allowed to trigger a
|
|
|
|
// garbage collection, since that might move the code and invalidate the
|
|
|
|
// return address (unless this is somehow accounted for by the called
|
|
|
|
// function).
|
|
|
|
void CallCFunction(ExternalReference function, int num_arguments);
|
|
|
|
void CallCFunction(Register function, int num_arguments);
|
|
|
|
|
|
|
|
void ShlPair(Register high, Register low, uint8_t imm8);
|
|
|
|
void ShlPair_cl(Register high, Register low);
|
|
|
|
void ShrPair(Register high, Register low, uint8_t imm8);
|
|
|
|
void ShrPair_cl(Register high, Register src);
|
|
|
|
void SarPair(Register high, Register low, uint8_t imm8);
|
|
|
|
void SarPair_cl(Register high, Register low);
|
|
|
|
|
|
|
|
// Generates function and stub prologue code.
|
|
|
|
void StubPrologue(StackFrame::Type type);
|
Reland "[Compiler] Remove code aging support."
> This reverts commit 42d3d36bc3b4e76cbdf883432dcc3647526fbf58.
>
> Original change's description:
> > [Compiler] Remove code aging support.
> >
> > Code aging is no longer supported by any remaining compilers now
> > that full codegen has been removed. This CL removes all vestiges of
> > code aging.
> >
> > BUG=v8:6409
> >
> > Change-Id: I945ebcc20c7c55120550c8ee36188bfa042ea65e
> > Reviewed-on: https://chromium-review.googlesource.com/619153
> > Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
> > Reviewed-by: Yang Guo <yangguo@chromium.org>
> > Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> > Reviewed-by: Marja Hölttä <marja@chromium.org>
> > Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
> > Cr-Commit-Position: refs/heads/master@{#47501}
>
> TBR=ulan@chromium.org,rmcilroy@chromium.org,marja@chromium.org,yangguo@chromium.org,mstarzinger@chromium.org,rodolph.perfetta@arm.com
>
> Change-Id: I9d8b2985e2d472697908270d93a35eb7ef9c88a8
> No-Presubmit: true
> No-Tree-Checks: true
> No-Try: true
> Bug: v8:6409
> Reviewed-on: https://chromium-review.googlesource.com/625998
> Reviewed-by: Ross McIlroy <rmcilroy@chromium.org>
> Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#47506}
TBR=ulan@chromium.org,rmcilroy@chromium.org,marja@chromium.org,yangguo@chromium.org,mstarzinger@chromium.org,rodolph.perfetta@arm.com
Change-Id: I68785c6be7686e874b3848103e3a34483eaeb519
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: v8:6409
Reviewed-on: https://chromium-review.googlesource.com/625919
Reviewed-by: Ross McIlroy <rmcilroy@chromium.org>
Reviewed-by: Yang Guo <yangguo@chromium.org>
Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47535}
2017-08-23 08:22:33 +00:00
|
|
|
void Prologue();
|
2017-07-07 12:12:17 +00:00
|
|
|
|
|
|
|
void Lzcnt(Register dst, Register src) { Lzcnt(dst, Operand(src)); }
|
|
|
|
void Lzcnt(Register dst, const Operand& src);
|
|
|
|
|
|
|
|
void Tzcnt(Register dst, Register src) { Tzcnt(dst, Operand(src)); }
|
|
|
|
void Tzcnt(Register dst, const Operand& src);
|
|
|
|
|
|
|
|
void Popcnt(Register dst, Register src) { Popcnt(dst, Operand(src)); }
|
|
|
|
void Popcnt(Register dst, const Operand& src);
|
|
|
|
|
|
|
|
void Ret();
|
|
|
|
|
|
|
|
// Return and drop arguments from stack, where the number of arguments
|
|
|
|
// may be bigger than 2^16 - 1. Requires a scratch register.
|
|
|
|
void Ret(int bytes_dropped, Register scratch);
|
|
|
|
|
|
|
|
void Pshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
|
|
|
|
Pshuflw(dst, Operand(src), shuffle);
|
|
|
|
}
|
|
|
|
void Pshuflw(XMMRegister dst, const Operand& src, uint8_t shuffle);
|
|
|
|
void Pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
|
|
|
|
Pshufd(dst, Operand(src), shuffle);
|
|
|
|
}
|
|
|
|
void Pshufd(XMMRegister dst, const Operand& src, uint8_t shuffle);
|
|
|
|
|
|
|
|
// SSE/SSE2 instructions with AVX version.
|
|
|
|
#define AVX_OP2_WITH_TYPE(macro_name, name, dst_type, src_type) \
|
|
|
|
void macro_name(dst_type dst, src_type src) { \
|
|
|
|
if (CpuFeatures::IsSupported(AVX)) { \
|
|
|
|
CpuFeatureScope scope(this, AVX); \
|
|
|
|
v##name(dst, src); \
|
|
|
|
} else { \
|
|
|
|
name(dst, src); \
|
|
|
|
} \
|
|
|
|
}
|
|
|
|
|
|
|
|
AVX_OP2_WITH_TYPE(Movd, movd, XMMRegister, Register)
|
|
|
|
AVX_OP2_WITH_TYPE(Movd, movd, XMMRegister, const Operand&)
|
|
|
|
AVX_OP2_WITH_TYPE(Movd, movd, Register, XMMRegister)
|
|
|
|
AVX_OP2_WITH_TYPE(Movd, movd, const Operand&, XMMRegister)
|
|
|
|
|
|
|
|
#undef AVX_OP2_WITH_TYPE
|
|
|
|
|
2017-08-16 03:16:15 +00:00
|
|
|
// Only use these macros when non-destructive source of AVX version is not
|
|
|
|
// needed.
|
|
|
|
#define AVX_OP3_WITH_TYPE(macro_name, name, dst_type, src_type) \
|
|
|
|
void macro_name(dst_type dst, src_type src) { \
|
|
|
|
if (CpuFeatures::IsSupported(AVX)) { \
|
|
|
|
CpuFeatureScope scope(this, AVX); \
|
|
|
|
v##name(dst, dst, src); \
|
|
|
|
} else { \
|
|
|
|
name(dst, src); \
|
|
|
|
} \
|
|
|
|
}
|
|
|
|
#define AVX_OP3_XO(macro_name, name) \
|
|
|
|
AVX_OP3_WITH_TYPE(macro_name, name, XMMRegister, XMMRegister) \
|
|
|
|
AVX_OP3_WITH_TYPE(macro_name, name, XMMRegister, const Operand&)
|
|
|
|
|
|
|
|
AVX_OP3_XO(Pcmpeqd, pcmpeqd)
|
|
|
|
AVX_OP3_XO(Psubd, psubd)
|
|
|
|
AVX_OP3_XO(Pxor, pxor)
|
|
|
|
|
|
|
|
#undef AVX_OP3_XO
|
|
|
|
#undef AVX_OP3_WITH_TYPE
|
|
|
|
|
2017-07-07 12:12:17 +00:00
|
|
|
// Non-SSE2 instructions.
|
|
|
|
void Pshufb(XMMRegister dst, XMMRegister src) { Pshufb(dst, Operand(src)); }
|
|
|
|
void Pshufb(XMMRegister dst, const Operand& src);
|
2017-08-16 03:16:15 +00:00
|
|
|
void Psignd(XMMRegister dst, XMMRegister src) { Psignd(dst, Operand(src)); }
|
|
|
|
void Psignd(XMMRegister dst, const Operand& src);
|
2017-07-07 12:12:17 +00:00
|
|
|
|
|
|
|
void Pextrb(Register dst, XMMRegister src, int8_t imm8);
|
|
|
|
void Pextrw(Register dst, XMMRegister src, int8_t imm8);
|
|
|
|
void Pextrd(Register dst, XMMRegister src, int8_t imm8);
|
|
|
|
void Pinsrd(XMMRegister dst, Register src, int8_t imm8,
|
|
|
|
bool is_64_bits = false) {
|
|
|
|
Pinsrd(dst, Operand(src), imm8, is_64_bits);
|
|
|
|
}
|
|
|
|
void Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8,
|
|
|
|
bool is_64_bits = false);
|
|
|
|
|
|
|
|
void LoadUint32(XMMRegister dst, Register src) {
|
|
|
|
LoadUint32(dst, Operand(src));
|
|
|
|
}
|
|
|
|
void LoadUint32(XMMRegister dst, const Operand& src);
|
|
|
|
|
|
|
|
// Expression support
|
|
|
|
// cvtsi2sd instruction only writes to the low 64-bit of dst register, which
|
|
|
|
// hinders register renaming and makes dependence chains longer. So we use
|
|
|
|
// xorps to clear the dst register before cvtsi2sd to solve this issue.
|
|
|
|
void Cvtsi2sd(XMMRegister dst, Register src) { Cvtsi2sd(dst, Operand(src)); }
|
|
|
|
void Cvtsi2sd(XMMRegister dst, const Operand& src);
|
|
|
|
|
|
|
|
void Cvtui2ss(XMMRegister dst, Register src, Register tmp);
|
|
|
|
|
|
|
|
void SlowTruncateToIDelayed(Zone* zone, Register result_reg,
|
|
|
|
Register input_reg,
|
|
|
|
int offset = HeapNumber::kValueOffset -
|
|
|
|
kHeapObjectTag);
|
|
|
|
|
|
|
|
void Push(Register src) { push(src); }
|
|
|
|
void Push(const Operand& src) { push(src); }
|
|
|
|
void Push(Immediate value) { push(value); }
|
|
|
|
void Push(Handle<HeapObject> handle) { push(Immediate(handle)); }
|
|
|
|
void Push(Smi* smi) { Push(Immediate(smi)); }
|
2017-09-13 12:21:06 +00:00
|
|
|
|
|
|
|
void SaveRegisters(RegList registers);
|
|
|
|
void RestoreRegisters(RegList registers);
|
|
|
|
|
|
|
|
void CallRecordWriteStub(Register object, Register address,
|
|
|
|
RememberedSetAction remembered_set_action,
|
|
|
|
SaveFPRegsMode fp_mode);
|
2017-07-07 12:12:17 +00:00
|
|
|
|
2017-09-04 12:05:33 +00:00
|
|
|
// Calculate how much stack space (in bytes) are required to store caller
|
|
|
|
// registers excluding those specified in the arguments.
|
|
|
|
int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
|
|
|
|
Register exclusion1 = no_reg,
|
|
|
|
Register exclusion2 = no_reg,
|
|
|
|
Register exclusion3 = no_reg) const;
|
|
|
|
|
|
|
|
// PushCallerSaved and PopCallerSaved do not arrange the registers in any
|
|
|
|
// particular order so they are not useful for calls that can cause a GC.
|
|
|
|
// The caller can exclude up to 3 registers that do not need to be saved and
|
|
|
|
// restored.
|
|
|
|
|
|
|
|
// Push caller saved registers on the stack, and return the number of bytes
|
|
|
|
// stack pointer is adjusted.
|
|
|
|
int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
|
2017-08-21 15:23:17 +00:00
|
|
|
Register exclusion2 = no_reg,
|
|
|
|
Register exclusion3 = no_reg);
|
2017-09-04 12:05:33 +00:00
|
|
|
// Restore caller saved registers from the stack, and return the number of
|
|
|
|
// bytes stack pointer is adjusted.
|
|
|
|
int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
|
|
|
|
Register exclusion2 = no_reg,
|
|
|
|
Register exclusion3 = no_reg);
|
2017-08-21 15:23:17 +00:00
|
|
|
|
2017-07-07 12:12:17 +00:00
|
|
|
private:
|
|
|
|
bool has_frame_ = false;
|
|
|
|
Isolate* const isolate_;
|
|
|
|
// This handle will be patched with the code object on installation.
|
|
|
|
Handle<HeapObject> code_object_;
|
|
|
|
};
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// MacroAssembler implements a collection of frequently used macros.
|
2017-07-07 12:12:17 +00:00
|
|
|
class MacroAssembler : public TurboAssembler {
|
2008-07-03 15:10:15 +00:00
|
|
|
public:
|
2015-11-25 14:23:37 +00:00
|
|
|
MacroAssembler(Isolate* isolate, void* buffer, int size,
|
|
|
|
CodeObjectRequired create_code_object);
|
2013-11-08 17:35:58 +00:00
|
|
|
|
[builtins] Unify the various versions of [[Call]] with a Call builtin.
The new Call and CallFunction builtins supersede the current
CallFunctionStub (and CallIC magic) and will be the single bottleneck
for all calling, including the currently special Function.prototype.call
and Function.prototype.apply builtins, which had handwritten (and
not fully compliant) versions of CallFunctionStub, and also the
CallIC(s), which where also slightly different.
This also reduces the overhead for API function calls, which is still
unnecessary high, but let's do that step-by-step.
This also fixes a bunch of cases where the implicit ToObject for
sloppy receivers was done in the wrong context (in the caller
context instead of the callee context), which basically meant
that we allowed cross context access to %ObjectPrototype%.
MIPS and MIPS64 ports contributed by akos.palfi@imgtec.com.
R=mstarzinger@chromium.org, jarin@chromium.org, mvstanton@chromium.org
CQ_INCLUDE_TRYBOTS=tryserver.v8:v8_linux_layout_dbg,v8_linux_nosnap_dbg
BUG=v8:4413
LOG=n
Committed: https://crrev.com/ef268a83be4dead004047c25b702319ea4be7277
Cr-Commit-Position: refs/heads/master@{#30627}
Review URL: https://codereview.chromium.org/1311013008
Cr-Commit-Position: refs/heads/master@{#30629}
2015-09-08 07:50:22 +00:00
|
|
|
// Load a register with a long value as efficiently as possible.
|
|
|
|
void Set(Register dst, int32_t x) {
|
|
|
|
if (x == 0) {
|
|
|
|
xor_(dst, dst);
|
|
|
|
} else {
|
|
|
|
mov(dst, Immediate(x));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
void Set(const Operand& dst, int32_t x) { mov(dst, Immediate(x)); }
|
|
|
|
|
2013-08-09 13:43:46 +00:00
|
|
|
// Operations on roots in the root-array.
|
|
|
|
void LoadRoot(Register destination, Heap::RootListIndex index);
|
|
|
|
void CompareRoot(Register with, Register scratch, Heap::RootListIndex index);
|
|
|
|
// These methods can only be used with constant roots (i.e. non-writable
|
|
|
|
// and not in new space).
|
|
|
|
void CompareRoot(Register with, Heap::RootListIndex index);
|
|
|
|
void CompareRoot(const Operand& with, Heap::RootListIndex index);
|
[builtins] Unify the various versions of [[Call]] with a Call builtin.
The new Call and CallFunction builtins supersede the current
CallFunctionStub (and CallIC magic) and will be the single bottleneck
for all calling, including the currently special Function.prototype.call
and Function.prototype.apply builtins, which had handwritten (and
not fully compliant) versions of CallFunctionStub, and also the
CallIC(s), which where also slightly different.
This also reduces the overhead for API function calls, which is still
unnecessary high, but let's do that step-by-step.
This also fixes a bunch of cases where the implicit ToObject for
sloppy receivers was done in the wrong context (in the caller
context instead of the callee context), which basically meant
that we allowed cross context access to %ObjectPrototype%.
MIPS and MIPS64 ports contributed by akos.palfi@imgtec.com.
R=mstarzinger@chromium.org, jarin@chromium.org, mvstanton@chromium.org
CQ_INCLUDE_TRYBOTS=tryserver.v8:v8_linux_layout_dbg,v8_linux_nosnap_dbg
BUG=v8:4413
LOG=n
Committed: https://crrev.com/ef268a83be4dead004047c25b702319ea4be7277
Cr-Commit-Position: refs/heads/master@{#30627}
Review URL: https://codereview.chromium.org/1311013008
Cr-Commit-Position: refs/heads/master@{#30629}
2015-09-08 07:50:22 +00:00
|
|
|
void PushRoot(Heap::RootListIndex index);
|
|
|
|
|
|
|
|
// Compare the object in a register to a value and jump if they are equal.
|
|
|
|
void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal,
|
2015-12-01 12:23:16 +00:00
|
|
|
Label::Distance if_equal_distance = Label::kFar) {
|
[builtins] Unify the various versions of [[Call]] with a Call builtin.
The new Call and CallFunction builtins supersede the current
CallFunctionStub (and CallIC magic) and will be the single bottleneck
for all calling, including the currently special Function.prototype.call
and Function.prototype.apply builtins, which had handwritten (and
not fully compliant) versions of CallFunctionStub, and also the
CallIC(s), which where also slightly different.
This also reduces the overhead for API function calls, which is still
unnecessary high, but let's do that step-by-step.
This also fixes a bunch of cases where the implicit ToObject for
sloppy receivers was done in the wrong context (in the caller
context instead of the callee context), which basically meant
that we allowed cross context access to %ObjectPrototype%.
MIPS and MIPS64 ports contributed by akos.palfi@imgtec.com.
R=mstarzinger@chromium.org, jarin@chromium.org, mvstanton@chromium.org
CQ_INCLUDE_TRYBOTS=tryserver.v8:v8_linux_layout_dbg,v8_linux_nosnap_dbg
BUG=v8:4413
LOG=n
Committed: https://crrev.com/ef268a83be4dead004047c25b702319ea4be7277
Cr-Commit-Position: refs/heads/master@{#30627}
Review URL: https://codereview.chromium.org/1311013008
Cr-Commit-Position: refs/heads/master@{#30629}
2015-09-08 07:50:22 +00:00
|
|
|
CompareRoot(with, index);
|
|
|
|
j(equal, if_equal, if_equal_distance);
|
|
|
|
}
|
2015-11-30 13:23:04 +00:00
|
|
|
void JumpIfRoot(const Operand& with, Heap::RootListIndex index,
|
|
|
|
Label* if_equal,
|
2015-12-01 12:23:16 +00:00
|
|
|
Label::Distance if_equal_distance = Label::kFar) {
|
2015-11-30 13:23:04 +00:00
|
|
|
CompareRoot(with, index);
|
|
|
|
j(equal, if_equal, if_equal_distance);
|
|
|
|
}
|
[builtins] Unify the various versions of [[Call]] with a Call builtin.
The new Call and CallFunction builtins supersede the current
CallFunctionStub (and CallIC magic) and will be the single bottleneck
for all calling, including the currently special Function.prototype.call
and Function.prototype.apply builtins, which had handwritten (and
not fully compliant) versions of CallFunctionStub, and also the
CallIC(s), which where also slightly different.
This also reduces the overhead for API function calls, which is still
unnecessary high, but let's do that step-by-step.
This also fixes a bunch of cases where the implicit ToObject for
sloppy receivers was done in the wrong context (in the caller
context instead of the callee context), which basically meant
that we allowed cross context access to %ObjectPrototype%.
MIPS and MIPS64 ports contributed by akos.palfi@imgtec.com.
R=mstarzinger@chromium.org, jarin@chromium.org, mvstanton@chromium.org
CQ_INCLUDE_TRYBOTS=tryserver.v8:v8_linux_layout_dbg,v8_linux_nosnap_dbg
BUG=v8:4413
LOG=n
Committed: https://crrev.com/ef268a83be4dead004047c25b702319ea4be7277
Cr-Commit-Position: refs/heads/master@{#30627}
Review URL: https://codereview.chromium.org/1311013008
Cr-Commit-Position: refs/heads/master@{#30629}
2015-09-08 07:50:22 +00:00
|
|
|
|
|
|
|
// Compare the object in a register to a value and jump if they are not equal.
|
|
|
|
void JumpIfNotRoot(Register with, Heap::RootListIndex index,
|
|
|
|
Label* if_not_equal,
|
2015-12-01 12:23:16 +00:00
|
|
|
Label::Distance if_not_equal_distance = Label::kFar) {
|
[builtins] Unify the various versions of [[Call]] with a Call builtin.
The new Call and CallFunction builtins supersede the current
CallFunctionStub (and CallIC magic) and will be the single bottleneck
for all calling, including the currently special Function.prototype.call
and Function.prototype.apply builtins, which had handwritten (and
not fully compliant) versions of CallFunctionStub, and also the
CallIC(s), which where also slightly different.
This also reduces the overhead for API function calls, which is still
unnecessary high, but let's do that step-by-step.
This also fixes a bunch of cases where the implicit ToObject for
sloppy receivers was done in the wrong context (in the caller
context instead of the callee context), which basically meant
that we allowed cross context access to %ObjectPrototype%.
MIPS and MIPS64 ports contributed by akos.palfi@imgtec.com.
R=mstarzinger@chromium.org, jarin@chromium.org, mvstanton@chromium.org
CQ_INCLUDE_TRYBOTS=tryserver.v8:v8_linux_layout_dbg,v8_linux_nosnap_dbg
BUG=v8:4413
LOG=n
Committed: https://crrev.com/ef268a83be4dead004047c25b702319ea4be7277
Cr-Commit-Position: refs/heads/master@{#30627}
Review URL: https://codereview.chromium.org/1311013008
Cr-Commit-Position: refs/heads/master@{#30629}
2015-09-08 07:50:22 +00:00
|
|
|
CompareRoot(with, index);
|
|
|
|
j(not_equal, if_not_equal, if_not_equal_distance);
|
|
|
|
}
|
2015-11-30 13:23:04 +00:00
|
|
|
void JumpIfNotRoot(const Operand& with, Heap::RootListIndex index,
|
|
|
|
Label* if_not_equal,
|
2015-12-01 12:23:16 +00:00
|
|
|
Label::Distance if_not_equal_distance = Label::kFar) {
|
2015-11-30 13:23:04 +00:00
|
|
|
CompareRoot(with, index);
|
|
|
|
j(not_equal, if_not_equal, if_not_equal_distance);
|
|
|
|
}
|
2013-08-09 13:43:46 +00:00
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
// GC Support
|
2011-09-19 18:36:47 +00:00
|
|
|
// Record in the remembered set the fact that we have a pointer to new space
|
|
|
|
// at the address pointed to by the addr register. Only works if addr is not
|
|
|
|
// in new space.
|
2011-09-20 13:32:27 +00:00
|
|
|
void RememberedSetHelper(Register object, // Used for debug code.
|
2015-11-26 14:12:04 +00:00
|
|
|
Register addr, Register scratch,
|
2017-10-09 14:03:27 +00:00
|
|
|
SaveFPRegsMode save_fp);
|
2011-09-19 18:36:47 +00:00
|
|
|
|
|
|
|
// Check if object is in new space. Jumps if the object is not in new space.
|
2011-09-20 13:32:27 +00:00
|
|
|
// The register scratch can be object itself, but scratch will be clobbered.
|
2015-11-26 14:12:04 +00:00
|
|
|
void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch,
|
2011-09-19 18:36:47 +00:00
|
|
|
Label::Distance distance = Label::kFar) {
|
|
|
|
InNewSpace(object, scratch, zero, branch, distance);
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
// Check if object is in new space. Jumps if the object is in new space.
|
|
|
|
// The register scratch can be object itself, but it will be clobbered.
|
2015-11-26 14:12:04 +00:00
|
|
|
void JumpIfInNewSpace(Register object, Register scratch, Label* branch,
|
2011-09-19 18:36:47 +00:00
|
|
|
Label::Distance distance = Label::kFar) {
|
|
|
|
InNewSpace(object, scratch, not_zero, branch, distance);
|
|
|
|
}
|
2010-03-11 16:24:31 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
// Check if an object has a given incremental marking color. Also uses ecx!
|
2015-11-26 14:12:04 +00:00
|
|
|
void HasColor(Register object, Register scratch0, Register scratch1,
|
|
|
|
Label* has_color, Label::Distance has_color_distance,
|
|
|
|
int first_bit, int second_bit);
|
|
|
|
|
|
|
|
void JumpIfBlack(Register object, Register scratch0, Register scratch1,
|
2011-09-19 18:36:47 +00:00
|
|
|
Label* on_black,
|
|
|
|
Label::Distance on_black_distance = Label::kFar);
|
|
|
|
|
2015-12-23 12:51:59 +00:00
|
|
|
// Checks the color of an object. If the object is white we jump to the
|
|
|
|
// incremental marker.
|
|
|
|
void JumpIfWhite(Register value, Register scratch1, Register scratch2,
|
|
|
|
Label* value_is_white, Label::Distance distance);
|
2011-09-19 18:36:47 +00:00
|
|
|
|
|
|
|
// Notify the garbage collector that we wrote a pointer into an object.
|
|
|
|
// |object| is the object being stored into, |value| is the object being
|
|
|
|
// stored. value and scratch registers are clobbered by the operation.
|
|
|
|
// The offset is the offset from the start of the object, not the offset from
|
|
|
|
// the tagged HeapObject pointer. For use with FieldOperand(reg, off).
|
|
|
|
void RecordWriteField(
|
2015-11-26 14:12:04 +00:00
|
|
|
Register object, int offset, Register value, Register scratch,
|
2011-09-19 18:36:47 +00:00
|
|
|
SaveFPRegsMode save_fp,
|
|
|
|
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
|
2017-10-09 14:08:19 +00:00
|
|
|
SmiCheck smi_check = INLINE_SMI_CHECK);
|
2011-09-19 18:36:47 +00:00
|
|
|
|
|
|
|
// Notify the garbage collector that we wrote a pointer into a fixed array.
|
|
|
|
// |array| is the array being stored into, |value| is the
|
|
|
|
// object being stored. |index| is the array index represented as a
|
|
|
|
// Smi. All registers are clobbered by the operation RecordWriteArray
|
2010-06-30 12:27:49 +00:00
|
|
|
// filters out smis so it does not update the write barrier if the
|
|
|
|
// value is a smi.
|
2011-09-19 18:36:47 +00:00
|
|
|
void RecordWriteArray(
|
2015-11-26 14:12:04 +00:00
|
|
|
Register array, Register value, Register index, SaveFPRegsMode save_fp,
|
2011-09-19 18:36:47 +00:00
|
|
|
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
|
2017-10-09 14:08:19 +00:00
|
|
|
SmiCheck smi_check = INLINE_SMI_CHECK);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-06-30 12:27:49 +00:00
|
|
|
// For page containing |object| mark region covering |address|
|
|
|
|
// dirty. |object| is the object being stored into, |value| is the
|
2011-09-20 13:32:27 +00:00
|
|
|
// object being stored. The address and value registers are clobbered by the
|
2010-06-30 12:27:49 +00:00
|
|
|
// operation. RecordWrite filters out smis so it does not update the
|
|
|
|
// write barrier if the value is a smi.
|
2011-09-19 18:36:47 +00:00
|
|
|
void RecordWrite(
|
2015-11-26 14:12:04 +00:00
|
|
|
Register object, Register address, Register value, SaveFPRegsMode save_fp,
|
2011-09-19 18:36:47 +00:00
|
|
|
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
|
2017-10-09 14:08:19 +00:00
|
|
|
SmiCheck smi_check = INLINE_SMI_CHECK);
|
2010-06-30 12:27:49 +00:00
|
|
|
|
2017-02-22 08:45:09 +00:00
|
|
|
// Frame restart support
|
2017-01-27 07:31:03 +00:00
|
|
|
void MaybeDropFrames();
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
// Enter specific kind of exit frame. Expects the number of
|
|
|
|
// arguments in register eax and sets up the number of arguments in
|
|
|
|
// register edi and the pointer to the first argument in register
|
|
|
|
// esi.
|
2016-06-30 06:55:22 +00:00
|
|
|
void EnterExitFrame(int argc, bool save_doubles, StackFrame::Type frame_type);
|
2009-11-04 08:51:48 +00:00
|
|
|
|
2010-11-16 15:04:41 +00:00
|
|
|
void EnterApiExitFrame(int argc);
|
2008-09-23 08:19:26 +00:00
|
|
|
|
|
|
|
// Leave the current exit frame. Expects the return value in
|
|
|
|
// register eax:edx (untouched) and the pointer to the first
|
2015-10-02 18:13:41 +00:00
|
|
|
// argument in register esi (if pop_arguments == true).
|
|
|
|
void LeaveExitFrame(bool save_doubles, bool pop_arguments = true);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-11-16 15:04:41 +00:00
|
|
|
// Leave the current exit frame. Expects the return value in
|
|
|
|
// register eax (untouched).
|
2013-09-17 11:37:48 +00:00
|
|
|
void LeaveApiExitFrame(bool restore_context);
|
2010-11-16 15:04:41 +00:00
|
|
|
|
[builtins] Unify the various versions of [[Call]] with a Call builtin.
The new Call and CallFunction builtins supersede the current
CallFunctionStub (and CallIC magic) and will be the single bottleneck
for all calling, including the currently special Function.prototype.call
and Function.prototype.apply builtins, which had handwritten (and
not fully compliant) versions of CallFunctionStub, and also the
CallIC(s), which where also slightly different.
This also reduces the overhead for API function calls, which is still
unnecessary high, but let's do that step-by-step.
This also fixes a bunch of cases where the implicit ToObject for
sloppy receivers was done in the wrong context (in the caller
context instead of the callee context), which basically meant
that we allowed cross context access to %ObjectPrototype%.
MIPS and MIPS64 ports contributed by akos.palfi@imgtec.com.
R=mstarzinger@chromium.org, jarin@chromium.org, mvstanton@chromium.org
CQ_INCLUDE_TRYBOTS=tryserver.v8:v8_linux_layout_dbg,v8_linux_nosnap_dbg
BUG=v8:4413
LOG=n
Committed: https://crrev.com/ef268a83be4dead004047c25b702319ea4be7277
Cr-Commit-Position: refs/heads/master@{#30627}
Review URL: https://codereview.chromium.org/1311013008
Cr-Commit-Position: refs/heads/master@{#30629}
2015-09-08 07:50:22 +00:00
|
|
|
// Load the global proxy from the current context.
|
|
|
|
void LoadGlobalProxy(Register dst);
|
|
|
|
|
2010-08-26 13:59:37 +00:00
|
|
|
// Load the global function with the given index.
|
|
|
|
void LoadGlobalFunction(int index, Register function);
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
// Push and pop the registers that can hold pointers.
|
|
|
|
void PushSafepointRegisters() { pushad(); }
|
|
|
|
void PopSafepointRegisters() { popad(); }
|
|
|
|
|
2015-01-30 14:31:15 +00:00
|
|
|
void GetWeakValue(Register value, Handle<WeakCell> cell);
|
|
|
|
|
2014-12-02 14:25:17 +00:00
|
|
|
// Load the value of the weak cell in the value register. Branch to the given
|
|
|
|
// miss label if the weak cell was cleared.
|
|
|
|
void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
// JavaScript invokes
|
|
|
|
|
2016-03-07 14:33:54 +00:00
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// Invoke the JavaScript function code by either calling or jumping.
|
2011-10-03 11:44:39 +00:00
|
|
|
|
2015-11-26 14:12:04 +00:00
|
|
|
void InvokeFunctionCode(Register function, Register new_target,
|
|
|
|
const ParameterCount& expected,
|
2017-07-19 10:58:42 +00:00
|
|
|
const ParameterCount& actual, InvokeFlag flag);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2017-01-12 14:18:45 +00:00
|
|
|
// On function call, call into the debugger if necessary.
|
|
|
|
void CheckDebugHook(Register fun, Register new_target,
|
|
|
|
const ParameterCount& expected,
|
|
|
|
const ParameterCount& actual);
|
2015-12-04 13:25:58 +00:00
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// Invoke the JavaScript function in the given register. Changes the
|
|
|
|
// current context to the context in the function before invoking.
|
2015-11-26 14:12:04 +00:00
|
|
|
void InvokeFunction(Register function, Register new_target,
|
2017-07-19 10:58:42 +00:00
|
|
|
const ParameterCount& actual, InvokeFlag flag);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2015-11-26 14:12:04 +00:00
|
|
|
void InvokeFunction(Register function, const ParameterCount& expected,
|
2017-07-19 10:58:42 +00:00
|
|
|
const ParameterCount& actual, InvokeFlag flag);
|
2013-11-29 12:57:47 +00:00
|
|
|
|
2011-10-28 12:37:29 +00:00
|
|
|
void InvokeFunction(Handle<JSFunction> function,
|
2013-04-23 09:23:07 +00:00
|
|
|
const ParameterCount& expected,
|
2017-07-19 10:58:42 +00:00
|
|
|
const ParameterCount& actual, InvokeFlag flag);
|
2010-02-15 12:32:27 +00:00
|
|
|
|
2009-03-09 14:00:51 +00:00
|
|
|
// Compare object type for heap object.
|
|
|
|
// Incoming register is heap_object and outgoing register is map.
|
|
|
|
void CmpObjectType(Register heap_object, InstanceType type, Register map);
|
|
|
|
|
|
|
|
// Compare instance type for map.
|
|
|
|
void CmpInstanceType(Register map, InstanceType type);
|
|
|
|
|
2013-09-02 09:30:54 +00:00
|
|
|
void DoubleToI(Register result_reg, XMMRegister input_reg,
|
2014-09-23 11:38:12 +00:00
|
|
|
XMMRegister scratch, MinusZeroMode minus_zero_mode,
|
|
|
|
Label* lost_precision, Label* is_nan, Label* minus_zero,
|
|
|
|
Label::Distance dst = Label::kFar);
|
2013-09-02 09:30:54 +00:00
|
|
|
|
2009-12-18 11:13:33 +00:00
|
|
|
// Smi tagging support.
|
|
|
|
void SmiTag(Register reg) {
|
2011-08-29 13:02:35 +00:00
|
|
|
STATIC_ASSERT(kSmiTag == 0);
|
|
|
|
STATIC_ASSERT(kSmiTagSize == 1);
|
2011-10-03 11:44:39 +00:00
|
|
|
add(reg, reg);
|
2009-12-18 11:13:33 +00:00
|
|
|
}
|
|
|
|
|
2010-08-09 13:12:02 +00:00
|
|
|
// Modifies the register even if it does not contain a Smi!
|
2017-07-07 12:12:17 +00:00
|
|
|
void UntagSmi(Register reg, Label* is_smi) {
|
2011-08-29 13:02:35 +00:00
|
|
|
STATIC_ASSERT(kSmiTagSize == 1);
|
2010-08-09 13:12:02 +00:00
|
|
|
sar(reg, kSmiTagSize);
|
2011-08-29 13:02:35 +00:00
|
|
|
STATIC_ASSERT(kSmiTag == 0);
|
2010-08-09 13:12:02 +00:00
|
|
|
j(not_carry, is_smi);
|
|
|
|
}
|
|
|
|
|
2011-02-03 12:50:50 +00:00
|
|
|
// Jump if register contain a non-smi.
|
2015-11-26 14:12:04 +00:00
|
|
|
inline void JumpIfNotSmi(Register value, Label* not_smi_label,
|
2011-06-17 18:32:36 +00:00
|
|
|
Label::Distance distance = Label::kFar) {
|
2011-02-03 12:50:50 +00:00
|
|
|
test(value, Immediate(kSmiTagMask));
|
2011-06-17 18:32:36 +00:00
|
|
|
j(not_zero, not_smi_label, distance);
|
2011-02-03 12:50:50 +00:00
|
|
|
}
|
2017-01-23 10:25:02 +00:00
|
|
|
// Jump if the operand is not a smi.
|
|
|
|
inline void JumpIfNotSmi(Operand value, Label* smi_label,
|
|
|
|
Label::Distance distance = Label::kFar) {
|
|
|
|
test(value, Immediate(kSmiTagMask));
|
|
|
|
j(not_zero, smi_label, distance);
|
|
|
|
}
|
2016-04-13 18:21:30 +00:00
|
|
|
|
2011-05-23 15:59:38 +00:00
|
|
|
void LoadInstanceDescriptors(Register map, Register descriptors);
|
2015-01-28 16:31:29 +00:00
|
|
|
void LoadAccessor(Register dst, Register holder, int accessor_index,
|
|
|
|
AccessorComponent accessor);
|
2012-08-28 14:20:50 +00:00
|
|
|
|
|
|
|
template<typename Field>
|
|
|
|
void DecodeField(Register reg) {
|
Sharing of descriptor arrays.
This CL adds multiple things:
Transition arrays do not directly point at their descriptor array anymore, but rather do so via an indirect pointer (a JSGlobalPropertyCell).
An ownership bit is added to maps indicating whether it owns its own descriptor array or not.
Maps owning a descriptor array can pass on ownership if a transition from that map is generated; but only if the descriptor array stays exactly the same; or if a descriptor is added.
Maps that don't have ownership get ownership back if their direct child to which ownership was passed is cleared in ClearNonLiveTransitions.
To detect which descriptors in an array are valid, each map knows its own NumberOfOwnDescriptors. Since the descriptors are sorted in order of addition, if we search and find a descriptor with index bigger than this number, it is not valid for the given map.
We currently still build up an enumeration cache (although this may disappear). The enumeration cache is always built for the entire descriptor array, even if not all descriptors are owned by the map. Once a descriptor array has an enumeration cache for a given map; this invariant will always be true, even if the descriptor array was extended. The extended array will inherit the enumeration cache from the smaller descriptor array. If a map with more descriptors needs an enumeration cache, it's EnumLength will still be set to invalid, so it will have to recompute the enumeration cache. This new cache will also be valid for smaller maps since they have their own enumlength; and use this to loop over the cache. If the EnumLength is still invalid, but there is already a cache present that is big enough; we just initialize the EnumLength field for the map.
When we apply ClearNonLiveTransitions and descriptor ownership is passed back to a parent map, the descriptor array is trimmed in-place and resorted. At the same time, the enumeration cache is trimmed in-place.
Only transition arrays contain descriptor arrays. If we transition to a map and pass ownership of the descriptor array along, the child map will not store the descriptor array it owns. Rather its parent will keep the pointer. So for every leaf-map, we find the descriptor array by following the back pointer, reading out the transition array, and fetching the descriptor array from the JSGlobalPropertyCell. If a map has a transition array, we fetch it from there. If a map has undefined as its back-pointer and has no transition array; it is considered to have an empty descriptor array.
When we modify properties, we cannot share the descriptor array. To accommodate this, the child map will get its own transition array; even if there are not necessarily any transitions leaving from the child map. This is necessary since it's the only way to store its own descriptor array.
Review URL: https://chromiumcodereview.appspot.com/10909007
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@12492 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2012-09-12 16:43:57 +00:00
|
|
|
static const int shift = Field::kShift;
|
2014-05-09 16:18:58 +00:00
|
|
|
static const int mask = Field::kMask >> Field::kShift;
|
2014-05-27 14:55:29 +00:00
|
|
|
if (shift != 0) {
|
|
|
|
sar(reg, shift);
|
|
|
|
}
|
Sharing of descriptor arrays.
This CL adds multiple things:
Transition arrays do not directly point at their descriptor array anymore, but rather do so via an indirect pointer (a JSGlobalPropertyCell).
An ownership bit is added to maps indicating whether it owns its own descriptor array or not.
Maps owning a descriptor array can pass on ownership if a transition from that map is generated; but only if the descriptor array stays exactly the same; or if a descriptor is added.
Maps that don't have ownership get ownership back if their direct child to which ownership was passed is cleared in ClearNonLiveTransitions.
To detect which descriptors in an array are valid, each map knows its own NumberOfOwnDescriptors. Since the descriptors are sorted in order of addition, if we search and find a descriptor with index bigger than this number, it is not valid for the given map.
We currently still build up an enumeration cache (although this may disappear). The enumeration cache is always built for the entire descriptor array, even if not all descriptors are owned by the map. Once a descriptor array has an enumeration cache for a given map; this invariant will always be true, even if the descriptor array was extended. The extended array will inherit the enumeration cache from the smaller descriptor array. If a map with more descriptors needs an enumeration cache, it's EnumLength will still be set to invalid, so it will have to recompute the enumeration cache. This new cache will also be valid for smaller maps since they have their own enumlength; and use this to loop over the cache. If the EnumLength is still invalid, but there is already a cache present that is big enough; we just initialize the EnumLength field for the map.
When we apply ClearNonLiveTransitions and descriptor ownership is passed back to a parent map, the descriptor array is trimmed in-place and resorted. At the same time, the enumeration cache is trimmed in-place.
Only transition arrays contain descriptor arrays. If we transition to a map and pass ownership of the descriptor array along, the child map will not store the descriptor array it owns. Rather its parent will keep the pointer. So for every leaf-map, we find the descriptor array by following the back pointer, reading out the transition array, and fetching the descriptor array from the JSGlobalPropertyCell. If a map has a transition array, we fetch it from there. If a map has undefined as its back-pointer and has no transition array; it is considered to have an empty descriptor array.
When we modify properties, we cannot share the descriptor array. To accommodate this, the child map will get its own transition array; even if there are not necessarily any transitions leaving from the child map. This is necessary since it's the only way to store its own descriptor array.
Review URL: https://chromiumcodereview.appspot.com/10909007
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@12492 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2012-09-12 16:43:57 +00:00
|
|
|
and_(reg, Immediate(mask));
|
2012-08-28 14:20:50 +00:00
|
|
|
}
|
2014-05-27 14:55:29 +00:00
|
|
|
|
2012-10-12 11:09:14 +00:00
|
|
|
// Abort execution if argument is not a smi, enabled via --debug-code.
|
|
|
|
void AssertSmi(Register object);
|
2010-03-11 10:28:40 +00:00
|
|
|
|
2012-10-12 11:09:14 +00:00
|
|
|
// Abort execution if argument is a smi, enabled via --debug-code.
|
|
|
|
void AssertNotSmi(Register object);
|
2010-08-09 13:12:02 +00:00
|
|
|
|
2017-06-08 18:31:59 +00:00
|
|
|
// Abort execution if argument is not a FixedArray, enabled via --debug-code.
|
|
|
|
void AssertFixedArray(Register object);
|
|
|
|
|
[builtins] Unify the various versions of [[Call]] with a Call builtin.
The new Call and CallFunction builtins supersede the current
CallFunctionStub (and CallIC magic) and will be the single bottleneck
for all calling, including the currently special Function.prototype.call
and Function.prototype.apply builtins, which had handwritten (and
not fully compliant) versions of CallFunctionStub, and also the
CallIC(s), which where also slightly different.
This also reduces the overhead for API function calls, which is still
unnecessary high, but let's do that step-by-step.
This also fixes a bunch of cases where the implicit ToObject for
sloppy receivers was done in the wrong context (in the caller
context instead of the callee context), which basically meant
that we allowed cross context access to %ObjectPrototype%.
MIPS and MIPS64 ports contributed by akos.palfi@imgtec.com.
R=mstarzinger@chromium.org, jarin@chromium.org, mvstanton@chromium.org
CQ_INCLUDE_TRYBOTS=tryserver.v8:v8_linux_layout_dbg,v8_linux_nosnap_dbg
BUG=v8:4413
LOG=n
Committed: https://crrev.com/ef268a83be4dead004047c25b702319ea4be7277
Cr-Commit-Position: refs/heads/master@{#30627}
Review URL: https://codereview.chromium.org/1311013008
Cr-Commit-Position: refs/heads/master@{#30629}
2015-09-08 07:50:22 +00:00
|
|
|
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
|
|
|
|
void AssertFunction(Register object);
|
|
|
|
|
2015-12-27 06:30:53 +00:00
|
|
|
// Abort execution if argument is not a JSBoundFunction,
|
|
|
|
// enabled via --debug-code.
|
|
|
|
void AssertBoundFunction(Register object);
|
|
|
|
|
2017-07-14 15:20:23 +00:00
|
|
|
// Abort execution if argument is not a JSGeneratorObject (or subclass),
|
2016-04-06 08:37:09 +00:00
|
|
|
// enabled via --debug-code.
|
2017-07-14 15:20:23 +00:00
|
|
|
void AssertGeneratorObject(Register object);
|
2016-04-06 08:37:09 +00:00
|
|
|
|
2014-03-17 13:55:22 +00:00
|
|
|
// Abort execution if argument is not undefined or an AllocationSite, enabled
|
|
|
|
// via --debug-code.
|
|
|
|
void AssertUndefinedOrAllocationSite(Register object);
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
// Exception handling
|
|
|
|
|
2015-03-25 13:13:51 +00:00
|
|
|
// Push a new stack handler and link it into stack handler chain.
|
|
|
|
void PushStackHandler();
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2015-03-25 13:13:51 +00:00
|
|
|
// Unlink the stack handler on top of the stack from the stack handler chain.
|
|
|
|
void PopStackHandler();
|
2009-12-10 14:06:08 +00:00
|
|
|
|
2017-08-23 02:08:34 +00:00
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
// Support functions.
|
|
|
|
|
|
|
|
// Machine code version of Map::GetConstructor().
|
|
|
|
// |temp| holds |result|'s map when done.
|
|
|
|
void GetMapConstructor(Register result, Register map, Register temp);
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
// Runtime calls
|
|
|
|
|
2009-12-09 14:54:34 +00:00
|
|
|
// Call a code stub. Generate the code if necessary.
|
2017-06-20 08:02:49 +00:00
|
|
|
void CallStub(CodeStub* stub);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2009-12-09 14:54:34 +00:00
|
|
|
// Tail call a code stub (jump). Generate the code if necessary.
|
2009-12-03 07:56:21 +00:00
|
|
|
void TailCallStub(CodeStub* stub);
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// Call a runtime routine.
|
2015-11-26 14:12:04 +00:00
|
|
|
void CallRuntime(const Runtime::Function* f, int num_arguments,
|
2013-10-01 11:56:42 +00:00
|
|
|
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
// Convenience function: Same as above, but takes the fid instead.
|
2015-12-30 20:50:07 +00:00
|
|
|
void CallRuntime(Runtime::FunctionId fid,
|
2013-11-22 10:21:47 +00:00
|
|
|
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
|
2015-12-30 20:50:07 +00:00
|
|
|
const Runtime::Function* function = Runtime::FunctionForId(fid);
|
|
|
|
CallRuntime(function, function->nargs, save_doubles);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Convenience function: Same as above, but takes the fid instead.
|
|
|
|
void CallRuntime(Runtime::FunctionId fid, int num_arguments,
|
|
|
|
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
|
|
|
|
CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
|
2013-10-01 11:56:42 +00:00
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-02-24 08:33:51 +00:00
|
|
|
// Convenience function: tail call a runtime routine (jump).
|
2015-12-30 20:50:07 +00:00
|
|
|
void TailCallRuntime(Runtime::FunctionId fid);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2009-09-28 13:53:43 +00:00
|
|
|
// Jump to a runtime routine.
|
2016-06-30 06:55:22 +00:00
|
|
|
void JumpToExternalReference(const ExternalReference& ext,
|
|
|
|
bool builtin_exit_frame = false);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2008-09-23 08:19:26 +00:00
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
// Utilities
|
|
|
|
|
2016-09-23 14:29:08 +00:00
|
|
|
// Emit code that loads |parameter_index|'th parameter from the stack to
|
|
|
|
// the register according to the CallInterfaceDescriptor definition.
|
|
|
|
// |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
|
|
|
|
// below the caller's sp (on ia32 it's at least return address).
|
|
|
|
template <class Descriptor>
|
|
|
|
void LoadParameterFromStack(
|
|
|
|
Register reg, typename Descriptor::ParameterIndices parameter_index,
|
|
|
|
int sp_to_ra_offset_in_words = 1) {
|
|
|
|
DCHECK(Descriptor::kPassLastArgsOnStack);
|
|
|
|
DCHECK_LT(parameter_index, Descriptor::kParameterCount);
|
|
|
|
DCHECK_LE(Descriptor::kParameterCount - Descriptor::kStackArgumentsCount,
|
|
|
|
parameter_index);
|
|
|
|
int offset = (Descriptor::kParameterCount - parameter_index - 1 +
|
|
|
|
sp_to_ra_offset_in_words) *
|
|
|
|
kPointerSize;
|
|
|
|
mov(reg, Operand(esp, offset));
|
|
|
|
}
|
|
|
|
|
2010-01-12 08:48:26 +00:00
|
|
|
// Emit code to discard a non-negative number of pointer-sized elements
|
|
|
|
// from the stack, clobbering only the esp register.
|
2009-12-10 14:06:08 +00:00
|
|
|
void Drop(int element_count);
|
|
|
|
|
[builtins] Unify the various versions of [[Call]] with a Call builtin.
The new Call and CallFunction builtins supersede the current
CallFunctionStub (and CallIC magic) and will be the single bottleneck
for all calling, including the currently special Function.prototype.call
and Function.prototype.apply builtins, which had handwritten (and
not fully compliant) versions of CallFunctionStub, and also the
CallIC(s), which where also slightly different.
This also reduces the overhead for API function calls, which is still
unnecessary high, but let's do that step-by-step.
This also fixes a bunch of cases where the implicit ToObject for
sloppy receivers was done in the wrong context (in the caller
context instead of the callee context), which basically meant
that we allowed cross context access to %ObjectPrototype%.
MIPS and MIPS64 ports contributed by akos.palfi@imgtec.com.
R=mstarzinger@chromium.org, jarin@chromium.org, mvstanton@chromium.org
CQ_INCLUDE_TRYBOTS=tryserver.v8:v8_linux_layout_dbg,v8_linux_nosnap_dbg
BUG=v8:4413
LOG=n
Committed: https://crrev.com/ef268a83be4dead004047c25b702319ea4be7277
Cr-Commit-Position: refs/heads/master@{#30627}
Review URL: https://codereview.chromium.org/1311013008
Cr-Commit-Position: refs/heads/master@{#30629}
2015-09-08 07:50:22 +00:00
|
|
|
void Jump(Handle<Code> target, RelocInfo::Mode rmode) { jmp(target, rmode); }
|
2013-08-05 12:43:04 +00:00
|
|
|
void Pop(Register dst) { pop(dst); }
|
2015-11-15 19:18:59 +00:00
|
|
|
void Pop(const Operand& dst) { pop(dst); }
|
[builtins] Unify the various versions of [[Call]] with a Call builtin.
The new Call and CallFunction builtins supersede the current
CallFunctionStub (and CallIC magic) and will be the single bottleneck
for all calling, including the currently special Function.prototype.call
and Function.prototype.apply builtins, which had handwritten (and
not fully compliant) versions of CallFunctionStub, and also the
CallIC(s), which where also slightly different.
This also reduces the overhead for API function calls, which is still
unnecessary high, but let's do that step-by-step.
This also fixes a bunch of cases where the implicit ToObject for
sloppy receivers was done in the wrong context (in the caller
context instead of the callee context), which basically meant
that we allowed cross context access to %ObjectPrototype%.
MIPS and MIPS64 ports contributed by akos.palfi@imgtec.com.
R=mstarzinger@chromium.org, jarin@chromium.org, mvstanton@chromium.org
CQ_INCLUDE_TRYBOTS=tryserver.v8:v8_linux_layout_dbg,v8_linux_nosnap_dbg
BUG=v8:4413
LOG=n
Committed: https://crrev.com/ef268a83be4dead004047c25b702319ea4be7277
Cr-Commit-Position: refs/heads/master@{#30627}
Review URL: https://codereview.chromium.org/1311013008
Cr-Commit-Position: refs/heads/master@{#30629}
2015-09-08 07:50:22 +00:00
|
|
|
void PushReturnAddressFrom(Register src) { push(src); }
|
|
|
|
void PopReturnAddressTo(Register dst) { pop(dst); }
|
2009-12-10 14:06:08 +00:00
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
// StatsCounter support
|
|
|
|
|
|
|
|
void IncrementCounter(StatsCounter* counter, int value);
|
|
|
|
void DecrementCounter(StatsCounter* counter, int value);
|
|
|
|
|
2011-04-07 13:32:45 +00:00
|
|
|
static int SafepointRegisterStackIndex(Register reg) {
|
|
|
|
return SafepointRegisterStackIndex(reg.code());
|
|
|
|
}
|
|
|
|
|
2016-07-08 06:35:14 +00:00
|
|
|
void EnterBuiltinFrame(Register context, Register target, Register argc);
|
|
|
|
void LeaveBuiltinFrame(Register context, Register target, Register argc);
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
private:
|
|
|
|
// Helper functions for generating invokes.
|
|
|
|
void InvokePrologue(const ParameterCount& expected,
|
2015-11-26 14:12:04 +00:00
|
|
|
const ParameterCount& actual, Label* done,
|
|
|
|
bool* definitely_mismatches, InvokeFlag flag,
|
2017-07-19 10:58:42 +00:00
|
|
|
Label::Distance done_distance);
|
2008-09-12 03:29:06 +00:00
|
|
|
|
2016-06-30 06:55:22 +00:00
|
|
|
void EnterExitFramePrologue(StackFrame::Type frame_type);
|
2010-12-07 11:31:57 +00:00
|
|
|
void EnterExitFrameEpilogue(int argc, bool save_doubles);
|
2010-11-16 15:23:47 +00:00
|
|
|
|
2013-09-17 11:37:48 +00:00
|
|
|
void LeaveExitFrameEpilogue(bool restore_context);
|
2009-11-04 08:51:48 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
|
2015-11-26 14:12:04 +00:00
|
|
|
void InNewSpace(Register object, Register scratch, Condition cc,
|
2011-09-19 18:36:47 +00:00
|
|
|
Label* condition_met,
|
|
|
|
Label::Distance condition_met_distance = Label::kFar);
|
|
|
|
|
|
|
|
// Helper for finding the mark bits for an address. Afterwards, the
|
|
|
|
// bitmap register points at the word with the mark bits and the mask
|
|
|
|
// the position of the first bit. Uses ecx as scratch and leaves addr_reg
|
|
|
|
// unchanged.
|
2015-11-26 14:12:04 +00:00
|
|
|
inline void GetMarkBits(Register addr_reg, Register bitmap_reg,
|
2011-09-19 18:36:47 +00:00
|
|
|
Register mask_reg);
|
2011-02-21 11:29:45 +00:00
|
|
|
|
|
|
|
// Compute memory operands for safepoint stack slots.
|
|
|
|
static int SafepointRegisterStackIndex(int reg_code);
|
|
|
|
|
2012-12-18 16:25:45 +00:00
|
|
|
// Needs access to SafepointRegisterStackIndex for compiled frame
|
2011-02-21 11:29:45 +00:00
|
|
|
// traversal.
|
2012-12-18 16:25:45 +00:00
|
|
|
friend class StandardFrame;
|
2008-07-03 15:10:15 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// The code patcher is used to patch (typically) small parts of code e.g. for
|
|
|
|
// debugging and other types of instrumentation. When using the code patcher
|
|
|
|
// the exact number of bytes specified must be emitted. Is not legal to emit
|
|
|
|
// relocation information. If any of these constraints are violated it causes
|
|
|
|
// an assertion.
|
|
|
|
class CodePatcher {
|
|
|
|
public:
|
2015-11-27 08:37:27 +00:00
|
|
|
CodePatcher(Isolate* isolate, byte* address, int size);
|
2015-07-15 09:22:33 +00:00
|
|
|
~CodePatcher();
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
// Macro assembler to emit code.
|
|
|
|
MacroAssembler* masm() { return &masm_; }
|
|
|
|
|
|
|
|
private:
|
2015-11-26 14:12:04 +00:00
|
|
|
byte* address_; // The address of the code being patched.
|
|
|
|
int size_; // Number of bytes of the expected patch size.
|
2008-07-03 15:10:15 +00:00
|
|
|
MacroAssembler masm_; // Macro assembler used to generate the code.
|
|
|
|
};
|
|
|
|
|
|
|
|
// -----------------------------------------------------------------------------
|
|
|
|
// Static helper functions.
|
|
|
|
|
|
|
|
// Generate an Operand for loading a field from an object.
|
2011-11-29 10:56:11 +00:00
|
|
|
inline Operand FieldOperand(Register object, int offset) {
|
2008-07-03 15:10:15 +00:00
|
|
|
return Operand(object, offset - kHeapObjectTag);
|
|
|
|
}
|
|
|
|
|
2008-07-30 08:49:36 +00:00
|
|
|
// Generate an Operand for loading an indexed field from an object.
|
2015-11-26 14:12:04 +00:00
|
|
|
inline Operand FieldOperand(Register object, Register index, ScaleFactor scale,
|
2011-11-29 10:56:11 +00:00
|
|
|
int offset) {
|
2008-07-30 08:49:36 +00:00
|
|
|
return Operand(object, index, scale, offset - kHeapObjectTag);
|
|
|
|
}
|
|
|
|
|
2015-11-26 14:12:04 +00:00
|
|
|
inline Operand FixedArrayElementOperand(Register array, Register index_as_smi,
|
2014-02-04 13:53:41 +00:00
|
|
|
int additional_offset = 0) {
|
|
|
|
int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
|
|
|
|
return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
|
|
|
|
}
|
|
|
|
|
2011-11-29 10:56:11 +00:00
|
|
|
inline Operand ContextOperand(Register context, int index) {
|
2010-11-10 17:00:20 +00:00
|
|
|
return Operand(context, Context::SlotOffset(index));
|
|
|
|
}
|
|
|
|
|
2015-07-24 07:16:46 +00:00
|
|
|
inline Operand ContextOperand(Register context, Register index) {
|
|
|
|
return Operand(context, index, times_pointer_size, Context::SlotOffset(0));
|
|
|
|
}
|
|
|
|
|
2015-11-27 16:59:28 +00:00
|
|
|
inline Operand NativeContextOperand() {
|
|
|
|
return ContextOperand(esi, Context::NATIVE_CONTEXT_INDEX);
|
2010-11-10 17:00:20 +00:00
|
|
|
}
|
|
|
|
|
2009-04-21 13:42:12 +00:00
|
|
|
#define ACCESS_MASM(masm) masm->
|
|
|
|
|
2015-09-30 13:46:56 +00:00
|
|
|
} // namespace internal
|
|
|
|
} // namespace v8
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2009-05-04 13:36:43 +00:00
|
|
|
#endif // V8_IA32_MACRO_ASSEMBLER_IA32_H_
|