[Interpreter] Add ForInPrepare runtime function which returns a ObjectTriple.
Adds a ForInPrepare Runtime function which returns a triple of cache_type, cache_array and cache_length. This requires adding support to CEntryStub to call runtime functions which return a ObjectTriple - a struct containing three Object* pointers. Also did some cleanup of the x64 CEntryStub to avoid replicated code. Replaces the interpreter's use of the ad-hock InterpreterForInPrepare Runtime function with ForInPrepare in preparation for fixing deopt in BytecodeGraphBuilder for ForIn (which will be done in a followup CL). MIPS port contributed by Balazs Kilvady <balazs.kilvady@imgtec.com>. BUG=v8:4280 LOG=N Review URL: https://codereview.chromium.org/1576093004 Cr-Commit-Position: refs/heads/master@{#33334}
This commit is contained in:
parent
80a648f557
commit
84f8a506e2
@ -285,6 +285,8 @@ static Type __RT_impl_##Name(Arguments args, Isolate* isolate)
|
||||
#define RUNTIME_FUNCTION(Name) RUNTIME_FUNCTION_RETURNS_TYPE(Object*, Name)
|
||||
#define RUNTIME_FUNCTION_RETURN_PAIR(Name) \
|
||||
RUNTIME_FUNCTION_RETURNS_TYPE(ObjectPair, Name)
|
||||
#define RUNTIME_FUNCTION_RETURN_TRIPLE(Name) \
|
||||
RUNTIME_FUNCTION_RETURNS_TYPE(ObjectTriple, Name)
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -999,11 +999,9 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
||||
// r1: pointer to the first argument (C callee-saved)
|
||||
// r5: pointer to builtin function (C callee-saved)
|
||||
|
||||
// Result returned in r0 or r0+r1 by default.
|
||||
|
||||
#if V8_HOST_ARCH_ARM
|
||||
int frame_alignment = MacroAssembler::ActivationFrameAlignment();
|
||||
int frame_alignment_mask = frame_alignment - 1;
|
||||
#if V8_HOST_ARCH_ARM
|
||||
if (FLAG_debug_code) {
|
||||
if (frame_alignment > kPointerSize) {
|
||||
Label alignment_as_expected;
|
||||
@ -1018,8 +1016,25 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
||||
#endif
|
||||
|
||||
// Call C built-in.
|
||||
// r0 = argc, r1 = argv
|
||||
int result_stack_size;
|
||||
if (result_size() <= 2) {
|
||||
// r0 = argc, r1 = argv, r2 = isolate
|
||||
__ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
|
||||
result_stack_size = 0;
|
||||
} else {
|
||||
DCHECK_EQ(3, result_size());
|
||||
// Allocate additional space for the result.
|
||||
result_stack_size =
|
||||
((result_size() * kPointerSize) + frame_alignment_mask) &
|
||||
~frame_alignment_mask;
|
||||
__ sub(sp, sp, Operand(result_stack_size));
|
||||
|
||||
// r0 = hidden result argument, r1 = argc, r2 = argv, r3 = isolate.
|
||||
__ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
|
||||
__ mov(r2, Operand(r1));
|
||||
__ mov(r1, Operand(r0));
|
||||
__ mov(r0, Operand(sp));
|
||||
}
|
||||
|
||||
// To let the GC traverse the return address of the exit frames, we need to
|
||||
// know where the return address is. The CEntryStub is unmovable, so
|
||||
@ -1032,11 +1047,19 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
||||
// Prevent literal pool emission before return address.
|
||||
Assembler::BlockConstPoolScope block_const_pool(masm);
|
||||
__ add(lr, pc, Operand(4));
|
||||
__ str(lr, MemOperand(sp, 0));
|
||||
__ str(lr, MemOperand(sp, result_stack_size));
|
||||
__ Call(r5);
|
||||
}
|
||||
if (result_size() > 2) {
|
||||
DCHECK_EQ(3, result_size());
|
||||
// Read result values stored on stack.
|
||||
__ ldr(r2, MemOperand(r0, 2 * kPointerSize));
|
||||
__ ldr(r1, MemOperand(r0, 1 * kPointerSize));
|
||||
__ ldr(r0, MemOperand(r0, 0 * kPointerSize));
|
||||
}
|
||||
// Result returned in r0, r1:r0 or r2:r1:r0 - do not destroy these registers!
|
||||
|
||||
__ VFPEnsureFPSCRState(r2);
|
||||
__ VFPEnsureFPSCRState(r3);
|
||||
|
||||
// Check result for exception sentinel.
|
||||
Label exception_returned;
|
||||
@ -1049,9 +1072,9 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
||||
Label okay;
|
||||
ExternalReference pending_exception_address(
|
||||
Isolate::kPendingExceptionAddress, isolate());
|
||||
__ mov(r2, Operand(pending_exception_address));
|
||||
__ ldr(r2, MemOperand(r2));
|
||||
__ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
|
||||
__ mov(r3, Operand(pending_exception_address));
|
||||
__ ldr(r3, MemOperand(r3));
|
||||
__ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
|
||||
// Cannot use check here as it attempts to generate call into runtime.
|
||||
__ b(eq, &okay);
|
||||
__ stop("Unexpected pending exception");
|
||||
|
@ -16,6 +16,7 @@ namespace internal {
|
||||
// Give alias names to registers for calling conventions.
|
||||
const Register kReturnRegister0 = {Register::kCode_r0};
|
||||
const Register kReturnRegister1 = {Register::kCode_r1};
|
||||
const Register kReturnRegister2 = {Register::kCode_r2};
|
||||
const Register kJSFunctionRegister = {Register::kCode_r1};
|
||||
const Register kContextRegister = {Register::kCode_r7};
|
||||
const Register kInterpreterAccumulatorRegister = {Register::kCode_r0};
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include "src/base/bits.h"
|
||||
#include "src/codegen.h"
|
||||
#include "src/disasm.h"
|
||||
#include "src/runtime/runtime-utils.h"
|
||||
|
||||
#if defined(USE_SIMULATOR)
|
||||
|
||||
@ -1717,6 +1718,10 @@ typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
|
||||
int32_t arg4,
|
||||
int32_t arg5);
|
||||
|
||||
typedef ObjectTriple (*SimulatorRuntimeTripleCall)(int32_t arg0, int32_t arg1,
|
||||
int32_t arg2, int32_t arg3,
|
||||
int32_t arg4);
|
||||
|
||||
// These prototypes handle the four types of FP calls.
|
||||
typedef int64_t (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
|
||||
typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1);
|
||||
@ -1900,6 +1905,32 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
|
||||
reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(
|
||||
external);
|
||||
target(arg0, arg1, Redirection::ReverseRedirection(arg2));
|
||||
} else if (redirection->type() ==
|
||||
ExternalReference::BUILTIN_CALL_TRIPLE) {
|
||||
// builtin call returning ObjectTriple.
|
||||
SimulatorRuntimeTripleCall target =
|
||||
reinterpret_cast<SimulatorRuntimeTripleCall>(external);
|
||||
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
|
||||
PrintF(
|
||||
"Call to host triple returning runtime function %p "
|
||||
"args %08x, %08x, %08x, %08x, %08x",
|
||||
FUNCTION_ADDR(target), arg1, arg2, arg3, arg4, arg5);
|
||||
if (!stack_aligned) {
|
||||
PrintF(" with unaligned stack %08x\n", get_register(sp));
|
||||
}
|
||||
PrintF("\n");
|
||||
}
|
||||
CHECK(stack_aligned);
|
||||
// arg0 is a hidden argument pointing to the return location, so don't
|
||||
// pass it to the target function.
|
||||
ObjectTriple result = target(arg1, arg2, arg3, arg4, arg5);
|
||||
if (::v8::internal::FLAG_trace_sim) {
|
||||
PrintF("Returned { %p, %p, %p }\n", result.x, result.y, result.z);
|
||||
}
|
||||
// Return is passed back in address pointed to by hidden first argument.
|
||||
ObjectTriple* sim_result = reinterpret_cast<ObjectTriple*>(arg0);
|
||||
*sim_result = result;
|
||||
set_register(r0, arg0);
|
||||
} else {
|
||||
// builtin call.
|
||||
DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL);
|
||||
|
@ -1104,10 +1104,13 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
||||
__ Sub(temp_argv, temp_argv, 1 * kPointerSize);
|
||||
}
|
||||
|
||||
// Enter the exit frame. Reserve three slots to preserve x21-x23 callee-saved
|
||||
// registers.
|
||||
// Reserve three slots to preserve x21-x23 callee-saved registers. If the
|
||||
// result size is too large to be returned in registers then also reserve
|
||||
// space for the return value.
|
||||
int extra_stack_space = 3 + (result_size() <= 2 ? 0 : result_size());
|
||||
// Enter the exit frame.
|
||||
FrameScope scope(masm, StackFrame::MANUAL);
|
||||
__ EnterExitFrame(save_doubles(), x10, 3);
|
||||
__ EnterExitFrame(save_doubles(), x10, extra_stack_space);
|
||||
DCHECK(csp.Is(__ StackPointer()));
|
||||
|
||||
// Poke callee-saved registers into reserved space.
|
||||
@ -1115,6 +1118,11 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
||||
__ Poke(argc, 2 * kPointerSize);
|
||||
__ Poke(target, 3 * kPointerSize);
|
||||
|
||||
if (result_size() > 2) {
|
||||
// Save the location of the return value into x8 for call.
|
||||
__ Add(x8, __ StackPointer(), Operand(4 * kPointerSize));
|
||||
}
|
||||
|
||||
// We normally only keep tagged values in callee-saved registers, as they
|
||||
// could be pushed onto the stack by called stubs and functions, and on the
|
||||
// stack they can confuse the GC. However, we're only calling C functions
|
||||
@ -1184,7 +1192,18 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
||||
__ Blr(target);
|
||||
__ Bind(&return_location);
|
||||
|
||||
// x0 result The return code from the call.
|
||||
if (result_size() > 2) {
|
||||
DCHECK_EQ(3, result_size());
|
||||
// Read result values stored on stack.
|
||||
__ Ldr(x0, MemOperand(__ StackPointer(), 4 * kPointerSize));
|
||||
__ Ldr(x1, MemOperand(__ StackPointer(), 5 * kPointerSize));
|
||||
__ Ldr(x2, MemOperand(__ StackPointer(), 6 * kPointerSize));
|
||||
}
|
||||
// Result returned in x0, x1:x0 or x2:x1:x0 - do not destroy these registers!
|
||||
|
||||
// x0 result0 The return code from the call.
|
||||
// x1 result1 For calls which return ObjectPair or ObjectTriple.
|
||||
// x2 result2 For calls which return ObjectTriple.
|
||||
// x21 argv
|
||||
// x22 argc
|
||||
// x23 target
|
||||
|
@ -37,6 +37,7 @@ namespace internal {
|
||||
// TODO(titzer): arm64 is a pain for aliasing; get rid of these macros
|
||||
#define kReturnRegister0 x0
|
||||
#define kReturnRegister1 x1
|
||||
#define kReturnRegister2 x2
|
||||
#define kJSFunctionRegister x1
|
||||
#define kContextRegister cp
|
||||
#define kInterpreterAccumulatorRegister x0
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include "src/disasm.h"
|
||||
#include "src/macro-assembler.h"
|
||||
#include "src/ostreams.h"
|
||||
#include "src/runtime/runtime-utils.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
@ -533,12 +534,6 @@ void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
|
||||
// uses the ObjectPair structure.
|
||||
// The simulator assumes all runtime calls return two 64-bits values. If they
|
||||
// don't, register x1 is clobbered. This is fine because x1 is caller-saved.
|
||||
struct ObjectPair {
|
||||
int64_t res0;
|
||||
int64_t res1;
|
||||
};
|
||||
|
||||
|
||||
typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0,
|
||||
int64_t arg1,
|
||||
int64_t arg2,
|
||||
@ -548,6 +543,11 @@ typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0,
|
||||
int64_t arg6,
|
||||
int64_t arg7);
|
||||
|
||||
typedef ObjectTriple (*SimulatorRuntimeTripleCall)(int64_t arg0, int64_t arg1,
|
||||
int64_t arg2, int64_t arg3,
|
||||
int64_t arg4, int64_t arg5,
|
||||
int64_t arg6, int64_t arg7);
|
||||
|
||||
typedef int64_t (*SimulatorRuntimeCompareCall)(double arg1, double arg2);
|
||||
typedef double (*SimulatorRuntimeFPFPCall)(double arg1, double arg2);
|
||||
typedef double (*SimulatorRuntimeFPCall)(double arg1);
|
||||
@ -590,7 +590,8 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
|
||||
break;
|
||||
|
||||
case ExternalReference::BUILTIN_CALL: {
|
||||
// Object* f(v8::internal::Arguments).
|
||||
// Object* f(v8::internal::Arguments) or
|
||||
// ObjectPair f(v8::internal::Arguments).
|
||||
TraceSim("Type: BUILTIN_CALL\n");
|
||||
SimulatorRuntimeCall target =
|
||||
reinterpret_cast<SimulatorRuntimeCall>(external);
|
||||
@ -607,13 +608,41 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
|
||||
xreg(4), xreg(5), xreg(6), xreg(7));
|
||||
ObjectPair result = target(xreg(0), xreg(1), xreg(2), xreg(3),
|
||||
xreg(4), xreg(5), xreg(6), xreg(7));
|
||||
TraceSim("Returned: {0x%" PRIx64 ", 0x%" PRIx64 "}\n",
|
||||
result.res0, result.res1);
|
||||
TraceSim("Returned: {%p, %p}\n", result.x, result.y);
|
||||
#ifdef DEBUG
|
||||
CorruptAllCallerSavedCPURegisters();
|
||||
#endif
|
||||
set_xreg(0, result.res0);
|
||||
set_xreg(1, result.res1);
|
||||
set_xreg(0, reinterpret_cast<int64_t>(result.x));
|
||||
set_xreg(1, reinterpret_cast<int64_t>(result.y));
|
||||
break;
|
||||
}
|
||||
|
||||
case ExternalReference::BUILTIN_CALL_TRIPLE: {
|
||||
// ObjectTriple f(v8::internal::Arguments).
|
||||
TraceSim("Type: BUILTIN_CALL TRIPLE\n");
|
||||
SimulatorRuntimeTripleCall target =
|
||||
reinterpret_cast<SimulatorRuntimeTripleCall>(external);
|
||||
|
||||
// We don't know how many arguments are being passed, but we can
|
||||
// pass 8 without touching the stack. They will be ignored by the
|
||||
// host function if they aren't used.
|
||||
TraceSim(
|
||||
"Arguments: "
|
||||
"0x%016" PRIx64 ", 0x%016" PRIx64 ", "
|
||||
"0x%016" PRIx64 ", 0x%016" PRIx64 ", "
|
||||
"0x%016" PRIx64 ", 0x%016" PRIx64 ", "
|
||||
"0x%016" PRIx64 ", 0x%016" PRIx64,
|
||||
xreg(0), xreg(1), xreg(2), xreg(3), xreg(4), xreg(5), xreg(6),
|
||||
xreg(7));
|
||||
// Return location passed in x8.
|
||||
ObjectTriple* sim_result = reinterpret_cast<ObjectTriple*>(xreg(8));
|
||||
ObjectTriple result = target(xreg(0), xreg(1), xreg(2), xreg(3), xreg(4),
|
||||
xreg(5), xreg(6), xreg(7));
|
||||
TraceSim("Returned: {%p, %p, %p}\n", result.x, result.y, result.z);
|
||||
#ifdef DEBUG
|
||||
CorruptAllCallerSavedCPURegisters();
|
||||
#endif
|
||||
*sim_result = result;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1024,12 +1024,14 @@ ExternalReference::ExternalReference(Builtins::Name name, Isolate* isolate)
|
||||
|
||||
|
||||
ExternalReference::ExternalReference(Runtime::FunctionId id, Isolate* isolate)
|
||||
: address_(Redirect(isolate, Runtime::FunctionForId(id)->entry)) {}
|
||||
: ExternalReference(Runtime::FunctionForId(id), isolate) {}
|
||||
|
||||
|
||||
ExternalReference::ExternalReference(const Runtime::Function* f,
|
||||
Isolate* isolate)
|
||||
: address_(Redirect(isolate, f->entry)) {}
|
||||
: address_(Redirect(isolate, f->entry, f->result_size == 3
|
||||
? BUILTIN_CALL_TRIPLE
|
||||
: BUILTIN_CALL)) {}
|
||||
|
||||
|
||||
ExternalReference ExternalReference::isolate_address(Isolate* isolate) {
|
||||
|
@ -814,9 +814,14 @@ class ExternalReference BASE_EMBEDDED {
|
||||
// Used in the simulator to support different native api calls.
|
||||
enum Type {
|
||||
// Builtin call.
|
||||
// Object* f(v8::internal::Arguments).
|
||||
// Object* f(v8::internal::Arguments) or
|
||||
// ObjectPair f(v8::internal::Arguments).
|
||||
BUILTIN_CALL, // default
|
||||
|
||||
// Builtin call that returns .
|
||||
// ObjectTriple f(v8::internal::Arguments).
|
||||
BUILTIN_CALL_TRIPLE,
|
||||
|
||||
// Builtin that takes float arguments and returns an int.
|
||||
// int f(double, double).
|
||||
BUILTIN_COMPARE_CALL,
|
||||
|
@ -1746,10 +1746,8 @@ class CEntryStub : public PlatformCodeStub {
|
||||
: PlatformCodeStub(isolate) {
|
||||
minor_key_ = SaveDoublesBits::encode(save_doubles == kSaveFPRegs) |
|
||||
ArgvMode::encode(argv_mode == kArgvInRegister);
|
||||
DCHECK(result_size == 1 || result_size == 2);
|
||||
#if _WIN64 || V8_TARGET_ARCH_PPC
|
||||
DCHECK(result_size == 1 || result_size == 2 || result_size == 3);
|
||||
minor_key_ = ResultSizeBits::update(minor_key_, result_size);
|
||||
#endif // _WIN64
|
||||
}
|
||||
|
||||
// The version of this stub that doesn't save doubles is generated ahead of
|
||||
@ -1761,9 +1759,7 @@ class CEntryStub : public PlatformCodeStub {
|
||||
private:
|
||||
bool save_doubles() const { return SaveDoublesBits::decode(minor_key_); }
|
||||
bool argv_in_register() const { return ArgvMode::decode(minor_key_); }
|
||||
#if _WIN64 || V8_TARGET_ARCH_PPC
|
||||
int result_size() const { return ResultSizeBits::decode(minor_key_); }
|
||||
#endif // _WIN64
|
||||
|
||||
bool NeedsImmovableCode() override;
|
||||
|
||||
|
@ -242,6 +242,9 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
|
||||
if (locations.return_count_ > 1) {
|
||||
locations.AddReturn(regloc(kReturnRegister1));
|
||||
}
|
||||
if (locations.return_count_ > 2) {
|
||||
locations.AddReturn(regloc(kReturnRegister2));
|
||||
}
|
||||
for (size_t i = 0; i < return_count; i++) {
|
||||
types.AddReturn(MachineType::AnyTagged());
|
||||
}
|
||||
@ -448,6 +451,9 @@ CallDescriptor* Linkage::GetStubCallDescriptor(
|
||||
if (locations.return_count_ > 1) {
|
||||
locations.AddReturn(regloc(kReturnRegister1));
|
||||
}
|
||||
if (locations.return_count_ > 2) {
|
||||
locations.AddReturn(regloc(kReturnRegister2));
|
||||
}
|
||||
for (size_t i = 0; i < return_count; i++) {
|
||||
types.AddReturn(return_type);
|
||||
}
|
||||
|
@ -2303,16 +2303,22 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
ProfileEntryHookStub::MaybeCallEntryHook(masm);
|
||||
|
||||
// Reserve space on the stack for the three arguments passed to the call. If
|
||||
// result size is greater than can be returned in registers, also reserve
|
||||
// space for the hidden argument for the result location, and space for the
|
||||
// result itself.
|
||||
int arg_stack_space = result_size() < 3 ? 3 : 4 + result_size();
|
||||
|
||||
// Enter the exit frame that transitions from JavaScript to C++.
|
||||
if (argv_in_register()) {
|
||||
DCHECK(!save_doubles());
|
||||
__ EnterApiExitFrame(3);
|
||||
__ EnterApiExitFrame(arg_stack_space);
|
||||
|
||||
// Move argc and argv into the correct registers.
|
||||
__ mov(esi, ecx);
|
||||
__ mov(edi, eax);
|
||||
} else {
|
||||
__ EnterExitFrame(save_doubles());
|
||||
__ EnterExitFrame(arg_stack_space, save_doubles());
|
||||
}
|
||||
|
||||
// ebx: pointer to C function (C callee-saved)
|
||||
@ -2327,14 +2333,36 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
||||
if (FLAG_debug_code) {
|
||||
__ CheckStackAlignment();
|
||||
}
|
||||
|
||||
// Call C function.
|
||||
if (result_size() <= 2) {
|
||||
__ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
|
||||
__ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
|
||||
__ mov(Operand(esp, 2 * kPointerSize),
|
||||
Immediate(ExternalReference::isolate_address(isolate())));
|
||||
} else {
|
||||
DCHECK_EQ(3, result_size());
|
||||
// Pass a pointer to the result location as the first argument.
|
||||
__ lea(eax, Operand(esp, 4 * kPointerSize));
|
||||
__ mov(Operand(esp, 0 * kPointerSize), eax);
|
||||
__ mov(Operand(esp, 1 * kPointerSize), edi); // argc.
|
||||
__ mov(Operand(esp, 2 * kPointerSize), esi); // argv.
|
||||
__ mov(Operand(esp, 3 * kPointerSize),
|
||||
Immediate(ExternalReference::isolate_address(isolate())));
|
||||
}
|
||||
__ call(ebx);
|
||||
// Result is in eax or edx:eax - do not destroy these registers!
|
||||
|
||||
if (result_size() > 2) {
|
||||
DCHECK_EQ(3, result_size());
|
||||
#ifndef _WIN32
|
||||
// Restore the "hidden" argument on the stack which was popped by caller.
|
||||
__ sub(esp, Immediate(kPointerSize));
|
||||
#endif
|
||||
// Read result values stored on stack. Result is stored above the arguments.
|
||||
__ mov(kReturnRegister0, Operand(esp, 4 * kPointerSize));
|
||||
__ mov(kReturnRegister1, Operand(esp, 5 * kPointerSize));
|
||||
__ mov(kReturnRegister2, Operand(esp, 6 * kPointerSize));
|
||||
}
|
||||
// Result is in eax, edx:eax or edi:edx:eax - do not destroy these registers!
|
||||
|
||||
// Check result for exception sentinel.
|
||||
Label exception_returned;
|
||||
|
@ -967,7 +967,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::EnterExitFrame(bool save_doubles) {
|
||||
void MacroAssembler::EnterExitFrame(int argc, bool save_doubles) {
|
||||
EnterExitFramePrologue();
|
||||
|
||||
// Set up argc and argv in callee-saved registers.
|
||||
@ -976,7 +976,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles) {
|
||||
lea(esi, Operand(ebp, eax, times_4, offset));
|
||||
|
||||
// Reserve space for argc, argv and isolate.
|
||||
EnterExitFrameEpilogue(3, save_doubles);
|
||||
EnterExitFrameEpilogue(argc, save_doubles);
|
||||
}
|
||||
|
||||
|
||||
|
@ -16,6 +16,7 @@ namespace internal {
|
||||
// Give alias names to registers for calling conventions.
|
||||
const Register kReturnRegister0 = {Register::kCode_eax};
|
||||
const Register kReturnRegister1 = {Register::kCode_edx};
|
||||
const Register kReturnRegister2 = {Register::kCode_edi};
|
||||
const Register kJSFunctionRegister = {Register::kCode_edi};
|
||||
const Register kContextRegister = {Register::kCode_esi};
|
||||
const Register kInterpreterAccumulatorRegister = {Register::kCode_eax};
|
||||
@ -225,7 +226,7 @@ class MacroAssembler: public Assembler {
|
||||
// arguments in register eax and sets up the number of arguments in
|
||||
// register edi and the pointer to the first argument in register
|
||||
// esi.
|
||||
void EnterExitFrame(bool save_doubles);
|
||||
void EnterExitFrame(int argc, bool save_doubles);
|
||||
|
||||
void EnterApiExitFrame(int argc);
|
||||
|
||||
|
@ -1716,14 +1716,16 @@ void Interpreter::DoReturn(compiler::InterpreterAssembler* assembler) {
|
||||
// |cache_length| represent output parameters.
|
||||
void Interpreter::DoForInPrepare(compiler::InterpreterAssembler* assembler) {
|
||||
Node* object = __ GetAccumulator();
|
||||
Node* result = __ CallRuntime(Runtime::kInterpreterForInPrepare, object);
|
||||
for (int i = 0; i < 3; i++) {
|
||||
Node* result_triple = __ CallRuntime(Runtime::kForInPrepare, object);
|
||||
|
||||
// Set output registers:
|
||||
// 0 == cache_type, 1 == cache_array, 2 == cache_length
|
||||
Node* cache_info = __ LoadFixedArrayElement(result, i);
|
||||
for (int i = 0; i < 3; i++) {
|
||||
Node* cache_info = __ Projection(i, result_triple);
|
||||
Node* cache_info_reg = __ BytecodeOperandReg(i);
|
||||
__ StoreRegister(cache_info, cache_info_reg);
|
||||
}
|
||||
__ SetAccumulator(result);
|
||||
|
||||
__ Dispatch();
|
||||
}
|
||||
|
||||
|
@ -1092,14 +1092,34 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
||||
// a0 = argc
|
||||
__ mov(s0, a0);
|
||||
__ mov(s2, a1);
|
||||
// a1 = argv (set in the delay slot after find_ra below).
|
||||
|
||||
// We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
|
||||
// also need to reserve the 4 argument slots on the stack.
|
||||
|
||||
__ AssertStackIsAligned();
|
||||
|
||||
int frame_alignment = MacroAssembler::ActivationFrameAlignment();
|
||||
int frame_alignment_mask = frame_alignment - 1;
|
||||
int result_stack_size;
|
||||
if (result_size() <= 2) {
|
||||
// a0 = argc, a1 = argv, a2 = isolate
|
||||
__ li(a2, Operand(ExternalReference::isolate_address(isolate())));
|
||||
__ mov(a1, s1);
|
||||
result_stack_size = 0;
|
||||
} else {
|
||||
DCHECK_EQ(3, result_size());
|
||||
// Allocate additional space for the result.
|
||||
result_stack_size =
|
||||
((result_size() * kPointerSize) + frame_alignment_mask) &
|
||||
~frame_alignment_mask;
|
||||
__ Subu(sp, sp, Operand(result_stack_size));
|
||||
|
||||
// a0 = hidden result argument, a1 = argc, a2 = argv, a3 = isolate.
|
||||
__ li(a3, Operand(ExternalReference::isolate_address(isolate())));
|
||||
__ mov(a2, s1);
|
||||
__ mov(a1, a0);
|
||||
__ mov(a0, sp);
|
||||
}
|
||||
|
||||
// To let the GC traverse the return address of the exit frames, we need to
|
||||
// know where the return address is. The CEntryStub is unmovable, so
|
||||
@ -1111,29 +1131,37 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
||||
// Use masm-> here instead of the double-underscore macro since extra
|
||||
// coverage code can interfere with the proper calculation of ra.
|
||||
Label find_ra;
|
||||
masm->bal(&find_ra); // bal exposes branch delay slot.
|
||||
masm->mov(a1, s1);
|
||||
masm->bind(&find_ra);
|
||||
__ bal(&find_ra); // bal exposes branch delay slot.
|
||||
__ nop();
|
||||
__ bind(&find_ra);
|
||||
|
||||
// Adjust the value in ra to point to the correct return location, 2nd
|
||||
// instruction past the real call into C code (the jalr(t9)), and push it.
|
||||
// This is the return address of the exit frame.
|
||||
const int kNumInstructionsToJump = 5;
|
||||
masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
|
||||
masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
|
||||
__ Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
|
||||
// This spot was reserved in EnterExitFrame.
|
||||
__ sw(ra, MemOperand(sp, result_stack_size));
|
||||
// Stack space reservation moved to the branch delay slot below.
|
||||
// Stack is still aligned.
|
||||
|
||||
// Call the C routine.
|
||||
masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
|
||||
masm->jalr(t9);
|
||||
__ mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
|
||||
__ jalr(t9);
|
||||
// Set up sp in the delay slot.
|
||||
masm->addiu(sp, sp, -kCArgsSlotsSize);
|
||||
__ addiu(sp, sp, -kCArgsSlotsSize);
|
||||
// Make sure the stored 'ra' points to this position.
|
||||
DCHECK_EQ(kNumInstructionsToJump,
|
||||
masm->InstructionsGeneratedSince(&find_ra));
|
||||
}
|
||||
|
||||
if (result_size() > 2) {
|
||||
DCHECK_EQ(3, result_size());
|
||||
// Read result values stored on stack.
|
||||
__ lw(a0, MemOperand(v0, 2 * kPointerSize));
|
||||
__ lw(v1, MemOperand(v0, 1 * kPointerSize));
|
||||
__ lw(v0, MemOperand(v0, 0 * kPointerSize));
|
||||
}
|
||||
// Result returned in v0, v1:v0 or a0:v1:v0 - do not destroy these registers!
|
||||
|
||||
// Check result for exception sentinel.
|
||||
Label exception_returned;
|
||||
|
@ -15,6 +15,7 @@ namespace internal {
|
||||
// Give alias names to registers for calling conventions.
|
||||
const Register kReturnRegister0 = {Register::kCode_v0};
|
||||
const Register kReturnRegister1 = {Register::kCode_v1};
|
||||
const Register kReturnRegister2 = {Register::kCode_a0};
|
||||
const Register kJSFunctionRegister = {Register::kCode_a1};
|
||||
const Register kContextRegister = {Register::kCpRegister};
|
||||
const Register kInterpreterAccumulatorRegister = {Register::kCode_v0};
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include "src/mips/constants-mips.h"
|
||||
#include "src/mips/simulator-mips.h"
|
||||
#include "src/ostreams.h"
|
||||
#include "src/runtime/runtime-utils.h"
|
||||
|
||||
|
||||
// Only build the simulator if not compiling for real MIPS hardware.
|
||||
@ -1970,6 +1971,10 @@ typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
|
||||
int32_t arg4,
|
||||
int32_t arg5);
|
||||
|
||||
typedef ObjectTriple (*SimulatorRuntimeTripleCall)(int32_t arg0, int32_t arg1,
|
||||
int32_t arg2, int32_t arg3,
|
||||
int32_t arg4);
|
||||
|
||||
// These prototypes handle the four types of FP calls.
|
||||
typedef int64_t (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
|
||||
typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1);
|
||||
@ -2181,6 +2186,26 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
|
||||
SimulatorRuntimeProfilingGetterCall target =
|
||||
reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
|
||||
target(arg0, arg1, Redirection::ReverseRedirection(arg2));
|
||||
} else if (redirection->type() == ExternalReference::BUILTIN_CALL_TRIPLE) {
|
||||
// builtin call returning ObjectTriple.
|
||||
SimulatorRuntimeTripleCall target =
|
||||
reinterpret_cast<SimulatorRuntimeTripleCall>(external);
|
||||
if (::v8::internal::FLAG_trace_sim) {
|
||||
PrintF(
|
||||
"Call to host triple returning runtime function %p "
|
||||
"args %08x, %08x, %08x, %08x, %08x\n",
|
||||
FUNCTION_ADDR(target), arg1, arg2, arg3, arg4, arg5);
|
||||
}
|
||||
// arg0 is a hidden argument pointing to the return location, so don't
|
||||
// pass it to the target function.
|
||||
ObjectTriple result = target(arg1, arg2, arg3, arg4, arg5);
|
||||
if (::v8::internal::FLAG_trace_sim) {
|
||||
PrintF("Returned { %p, %p, %p }\n", result.x, result.y, result.z);
|
||||
}
|
||||
// Return is passed back in address pointed to by hidden first argument.
|
||||
ObjectTriple* sim_result = reinterpret_cast<ObjectTriple*>(arg0);
|
||||
*sim_result = result;
|
||||
set_register(v0, arg0);
|
||||
} else {
|
||||
SimulatorRuntimeCall target =
|
||||
reinterpret_cast<SimulatorRuntimeCall>(external);
|
||||
|
@ -1090,14 +1090,34 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
||||
// a0 = argc
|
||||
__ mov(s0, a0);
|
||||
__ mov(s2, a1);
|
||||
// a1 = argv (set in the delay slot after find_ra below).
|
||||
|
||||
// We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
|
||||
// also need to reserve the 4 argument slots on the stack.
|
||||
|
||||
__ AssertStackIsAligned();
|
||||
|
||||
int frame_alignment = MacroAssembler::ActivationFrameAlignment();
|
||||
int frame_alignment_mask = frame_alignment - 1;
|
||||
int result_stack_size;
|
||||
if (result_size() <= 2) {
|
||||
// a0 = argc, a1 = argv, a2 = isolate
|
||||
__ li(a2, Operand(ExternalReference::isolate_address(isolate())));
|
||||
__ mov(a1, s1);
|
||||
result_stack_size = 0;
|
||||
} else {
|
||||
DCHECK_EQ(3, result_size());
|
||||
// Allocate additional space for the result.
|
||||
result_stack_size =
|
||||
((result_size() * kPointerSize) + frame_alignment_mask) &
|
||||
~frame_alignment_mask;
|
||||
__ Dsubu(sp, sp, Operand(result_stack_size));
|
||||
|
||||
// a0 = hidden result argument, a1 = argc, a2 = argv, a3 = isolate.
|
||||
__ li(a3, Operand(ExternalReference::isolate_address(isolate())));
|
||||
__ mov(a2, s1);
|
||||
__ mov(a1, a0);
|
||||
__ mov(a0, sp);
|
||||
}
|
||||
|
||||
// To let the GC traverse the return address of the exit frames, we need to
|
||||
// know where the return address is. The CEntryStub is unmovable, so
|
||||
@ -1109,28 +1129,37 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
||||
// Use masm-> here instead of the double-underscore macro since extra
|
||||
// coverage code can interfere with the proper calculation of ra.
|
||||
Label find_ra;
|
||||
masm->bal(&find_ra); // bal exposes branch delay slot.
|
||||
masm->mov(a1, s1);
|
||||
masm->bind(&find_ra);
|
||||
__ bal(&find_ra); // bal exposes branch delay slot.
|
||||
__ nop();
|
||||
__ bind(&find_ra);
|
||||
|
||||
// Adjust the value in ra to point to the correct return location, 2nd
|
||||
// instruction past the real call into C code (the jalr(t9)), and push it.
|
||||
// This is the return address of the exit frame.
|
||||
const int kNumInstructionsToJump = 5;
|
||||
masm->Daddu(ra, ra, kNumInstructionsToJump * kInt32Size);
|
||||
masm->sd(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
|
||||
__ Daddu(ra, ra, kNumInstructionsToJump * kInt32Size);
|
||||
// This spot was reserved in EnterExitFrame.
|
||||
__ sd(ra, MemOperand(sp, result_stack_size));
|
||||
// Stack space reservation moved to the branch delay slot below.
|
||||
// Stack is still aligned.
|
||||
|
||||
// Call the C routine.
|
||||
masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
|
||||
masm->jalr(t9);
|
||||
__ mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
|
||||
__ jalr(t9);
|
||||
// Set up sp in the delay slot.
|
||||
masm->daddiu(sp, sp, -kCArgsSlotsSize);
|
||||
__ daddiu(sp, sp, -kCArgsSlotsSize);
|
||||
// Make sure the stored 'ra' points to this position.
|
||||
DCHECK_EQ(kNumInstructionsToJump,
|
||||
masm->InstructionsGeneratedSince(&find_ra));
|
||||
}
|
||||
if (result_size() > 2) {
|
||||
DCHECK_EQ(3, result_size());
|
||||
// Read result values stored on stack.
|
||||
__ ld(a0, MemOperand(v0, 2 * kPointerSize));
|
||||
__ ld(v1, MemOperand(v0, 1 * kPointerSize));
|
||||
__ ld(v0, MemOperand(v0, 0 * kPointerSize));
|
||||
}
|
||||
// Result returned in v0, v1:v0 or a0:v1:v0 - do not destroy these registers!
|
||||
|
||||
// Check result for exception sentinel.
|
||||
Label exception_returned;
|
||||
|
@ -15,6 +15,7 @@ namespace internal {
|
||||
// Give alias names to registers for calling conventions.
|
||||
const Register kReturnRegister0 = {Register::kCode_v0};
|
||||
const Register kReturnRegister1 = {Register::kCode_v1};
|
||||
const Register kReturnRegister2 = {Register::kCode_a0};
|
||||
const Register kJSFunctionRegister = {Register::kCode_a1};
|
||||
const Register kContextRegister = {Register::kCpRegister};
|
||||
const Register kInterpreterAccumulatorRegister = {Register::kCode_v0};
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include "src/mips64/constants-mips64.h"
|
||||
#include "src/mips64/simulator-mips64.h"
|
||||
#include "src/ostreams.h"
|
||||
#include "src/runtime/runtime-utils.h"
|
||||
|
||||
// Only build the simulator if not compiling for real MIPS hardware.
|
||||
#if defined(USE_SIMULATOR)
|
||||
@ -1964,11 +1965,6 @@ void Simulator::Format(Instruction* instr, const char* format) {
|
||||
// 64 bits of result. If they don't, the v1 result register contains a bogus
|
||||
// value, which is fine because it is caller-saved.
|
||||
|
||||
struct ObjectPair {
|
||||
Object* x;
|
||||
Object* y;
|
||||
};
|
||||
|
||||
typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0,
|
||||
int64_t arg1,
|
||||
int64_t arg2,
|
||||
@ -1976,6 +1972,9 @@ typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0,
|
||||
int64_t arg4,
|
||||
int64_t arg5);
|
||||
|
||||
typedef ObjectTriple (*SimulatorRuntimeTripleCall)(int64_t arg0, int64_t arg1,
|
||||
int64_t arg2, int64_t arg3,
|
||||
int64_t arg4);
|
||||
|
||||
// These prototypes handle the four types of FP calls.
|
||||
typedef int64_t (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
|
||||
@ -2175,6 +2174,27 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
|
||||
SimulatorRuntimeProfilingGetterCall target =
|
||||
reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
|
||||
target(arg0, arg1, Redirection::ReverseRedirection(arg2));
|
||||
} else if (redirection->type() == ExternalReference::BUILTIN_CALL_TRIPLE) {
|
||||
// builtin call returning ObjectTriple.
|
||||
SimulatorRuntimeTripleCall target =
|
||||
reinterpret_cast<SimulatorRuntimeTripleCall>(external);
|
||||
if (::v8::internal::FLAG_trace_sim) {
|
||||
PrintF(
|
||||
"Call to host triple returning runtime function %p "
|
||||
"args %016" PRIx64 ", %016" PRIx64 ", %016" PRIx64 ", %016" PRIx64
|
||||
", %016" PRIx64 "\n",
|
||||
FUNCTION_ADDR(target), arg1, arg2, arg3, arg4, arg5);
|
||||
}
|
||||
// arg0 is a hidden argument pointing to the return location, so don't
|
||||
// pass it to the target function.
|
||||
ObjectTriple result = target(arg1, arg2, arg3, arg4, arg5);
|
||||
if (::v8::internal::FLAG_trace_sim) {
|
||||
PrintF("Returned { %p, %p, %p }\n", result.x, result.y, result.z);
|
||||
}
|
||||
// Return is passed back in address pointed to by hidden first argument.
|
||||
ObjectTriple* sim_result = reinterpret_cast<ObjectTriple*>(arg0);
|
||||
*sim_result = result;
|
||||
set_register(v0, arg0);
|
||||
} else {
|
||||
SimulatorRuntimeCall target =
|
||||
reinterpret_cast<SimulatorRuntimeCall>(external);
|
||||
|
@ -5,11 +5,57 @@
|
||||
#include "src/runtime/runtime-utils.h"
|
||||
|
||||
#include "src/arguments.h"
|
||||
#include "src/factory.h"
|
||||
#include "src/isolate-inl.h"
|
||||
#include "src/objects-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
RUNTIME_FUNCTION_RETURN_TRIPLE(Runtime_ForInPrepare) {
|
||||
HandleScope scope(isolate);
|
||||
DCHECK_EQ(1, args.length());
|
||||
|
||||
if (!args[0]->IsJSReceiver()) {
|
||||
return MakeTriple(isolate->ThrowIllegalOperation(), nullptr, nullptr);
|
||||
}
|
||||
Handle<JSReceiver> receiver = args.at<JSReceiver>(0);
|
||||
|
||||
Object* property_names = Runtime_GetPropertyNamesFast(
|
||||
1, Handle<Object>::cast(receiver).location(), isolate);
|
||||
if (isolate->has_pending_exception()) {
|
||||
return MakeTriple(property_names, nullptr, nullptr);
|
||||
}
|
||||
|
||||
Handle<Object> cache_type(property_names, isolate);
|
||||
Handle<FixedArray> cache_array;
|
||||
int cache_length;
|
||||
|
||||
Handle<Map> receiver_map = handle(receiver->map(), isolate);
|
||||
if (cache_type->IsMap()) {
|
||||
Handle<Map> cache_type_map =
|
||||
handle(Handle<Map>::cast(cache_type)->map(), isolate);
|
||||
DCHECK(cache_type_map.is_identical_to(isolate->factory()->meta_map()));
|
||||
int enum_length = cache_type_map->EnumLength();
|
||||
DescriptorArray* descriptors = receiver_map->instance_descriptors();
|
||||
if (enum_length > 0 && descriptors->HasEnumCache()) {
|
||||
cache_array = handle(descriptors->GetEnumCache(), isolate);
|
||||
cache_length = cache_array->length();
|
||||
} else {
|
||||
cache_array = isolate->factory()->empty_fixed_array();
|
||||
cache_length = 0;
|
||||
}
|
||||
} else {
|
||||
cache_array = Handle<FixedArray>::cast(cache_type);
|
||||
cache_length = cache_array->length();
|
||||
// Cache type of SMI one entails slow check.
|
||||
cache_type = Handle<Object>(Smi::FromInt(1), isolate);
|
||||
}
|
||||
|
||||
return MakeTriple(*cache_type, *cache_array, Smi::FromInt(cache_length));
|
||||
}
|
||||
|
||||
|
||||
RUNTIME_FUNCTION(Runtime_ForInDone) {
|
||||
SealHandleScope scope(isolate);
|
||||
DCHECK_EQ(2, args.length());
|
||||
|
@ -147,56 +147,5 @@ RUNTIME_FUNCTION(Runtime_InterpreterNewClosure) {
|
||||
shared, context, static_cast<PretenureFlag>(pretenured_flag));
|
||||
}
|
||||
|
||||
|
||||
RUNTIME_FUNCTION(Runtime_InterpreterForInPrepare) {
|
||||
HandleScope scope(isolate);
|
||||
DCHECK_EQ(1, args.length());
|
||||
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
|
||||
|
||||
Object* property_names = Runtime_GetPropertyNamesFast(
|
||||
1, Handle<Object>::cast(receiver).location(), isolate);
|
||||
if (isolate->has_pending_exception()) {
|
||||
return property_names;
|
||||
}
|
||||
|
||||
Handle<Object> cache_type(property_names, isolate);
|
||||
Handle<FixedArray> cache_array;
|
||||
int cache_length;
|
||||
|
||||
Handle<Map> receiver_map = handle(receiver->map(), isolate);
|
||||
if (cache_type->IsMap()) {
|
||||
Handle<Map> cache_type_map =
|
||||
handle(Handle<Map>::cast(cache_type)->map(), isolate);
|
||||
DCHECK(cache_type_map.is_identical_to(isolate->factory()->meta_map()));
|
||||
int enum_length = cache_type_map->EnumLength();
|
||||
DescriptorArray* descriptors = receiver_map->instance_descriptors();
|
||||
if (enum_length > 0 && descriptors->HasEnumCache()) {
|
||||
cache_array = handle(descriptors->GetEnumCache(), isolate);
|
||||
cache_length = cache_array->length();
|
||||
} else {
|
||||
cache_array = isolate->factory()->empty_fixed_array();
|
||||
cache_length = 0;
|
||||
}
|
||||
} else {
|
||||
cache_array = Handle<FixedArray>::cast(cache_type);
|
||||
cache_length = cache_array->length();
|
||||
|
||||
STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
|
||||
if (receiver_map->instance_type() == JS_PROXY_TYPE) {
|
||||
// Zero indicates proxy
|
||||
cache_type = Handle<Object>(Smi::FromInt(0), isolate);
|
||||
} else {
|
||||
// One entails slow check
|
||||
cache_type = Handle<Object>(Smi::FromInt(1), isolate);
|
||||
}
|
||||
}
|
||||
|
||||
Handle<FixedArray> result = isolate->factory()->NewFixedArray(3);
|
||||
result->set(0, *cache_type);
|
||||
result->set(1, *cache_array);
|
||||
result->set(2, Smi::FromInt(cache_length));
|
||||
return *result;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -162,6 +162,22 @@ static inline ObjectPair MakePair(Object* x, Object* y) {
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
// A mechanism to return a triple of Object pointers. In all calling
|
||||
// conventions, a struct of two pointers is returned in memory,
|
||||
// allocated by the caller, and passed as a pointer in a hidden first parameter.
|
||||
struct ObjectTriple {
|
||||
Object* x;
|
||||
Object* y;
|
||||
Object* z;
|
||||
};
|
||||
|
||||
static inline ObjectTriple MakeTriple(Object* x, Object* y, Object* z) {
|
||||
ObjectTriple result = {x, y, z};
|
||||
// ObjectTriple is assigned to a hidden first argument.
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
|
@ -27,6 +27,12 @@ FOR_EACH_INTRINSIC_RETURN_OBJECT(F)
|
||||
FOR_EACH_INTRINSIC_RETURN_PAIR(P)
|
||||
#undef P
|
||||
|
||||
#define T(name, number_of_args, result_size) \
|
||||
ObjectTriple Runtime_##name(int args_length, Object** args_object, \
|
||||
Isolate* isolate);
|
||||
FOR_EACH_INTRINSIC_RETURN_TRIPLE(T)
|
||||
#undef T
|
||||
|
||||
|
||||
#define F(name, number_of_args, result_size) \
|
||||
{ \
|
||||
|
@ -220,8 +220,7 @@ namespace internal {
|
||||
F(InterpreterToBoolean, 1, 1) \
|
||||
F(InterpreterLogicalNot, 1, 1) \
|
||||
F(InterpreterTypeOf, 1, 1) \
|
||||
F(InterpreterNewClosure, 2, 1) \
|
||||
F(InterpreterForInPrepare, 1, 1)
|
||||
F(InterpreterNewClosure, 2, 1)
|
||||
|
||||
|
||||
#define FOR_EACH_INTRINSIC_FUNCTION(F) \
|
||||
@ -1009,6 +1008,8 @@ namespace internal {
|
||||
F(LoadLookupSlot, 2, 2) \
|
||||
F(LoadLookupSlotNoReferenceError, 2, 2)
|
||||
|
||||
#define FOR_EACH_INTRINSIC_RETURN_TRIPLE(F) \
|
||||
F(ForInPrepare, 1, 3)
|
||||
|
||||
// Most intrinsics are implemented in the runtime/ directory, but ICs are
|
||||
// implemented in ic.cc for now.
|
||||
@ -1075,6 +1076,7 @@ namespace internal {
|
||||
// FOR_EACH_INTRINSIC defines the list of all intrinsics, coming in 2 flavors,
|
||||
// either returning an object or a pair.
|
||||
#define FOR_EACH_INTRINSIC(F) \
|
||||
FOR_EACH_INTRINSIC_RETURN_TRIPLE(F) \
|
||||
FOR_EACH_INTRINSIC_RETURN_PAIR(F) \
|
||||
FOR_EACH_INTRINSIC_RETURN_OBJECT(F)
|
||||
|
||||
|
@ -2158,12 +2158,34 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
ProfileEntryHookStub::MaybeCallEntryHook(masm);
|
||||
|
||||
// Enter the exit frame that transitions from JavaScript to C++.
|
||||
#ifdef _WIN64
|
||||
int arg_stack_space = (result_size() < 2 ? 2 : 4);
|
||||
#else // _WIN64
|
||||
int arg_stack_space = 0;
|
||||
// Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9. It requires the
|
||||
// stack to be aligned to 16 bytes. It only allows a single-word to be
|
||||
// returned in register rax. Larger return sizes must be written to an address
|
||||
// passed as a hidden first argument.
|
||||
const Register kCCallArg0 = rcx;
|
||||
const Register kCCallArg1 = rdx;
|
||||
const Register kCCallArg2 = r8;
|
||||
const Register kCCallArg3 = r9;
|
||||
const int kArgExtraStackSpace = 2;
|
||||
const int kMaxRegisterResultSize = 1;
|
||||
#else
|
||||
// GCC / Clang passes arguments in rdi, rsi, rdx, rcx, r8, r9. Simple results
|
||||
// are returned in rax, and a struct of two pointers are returned in rax+rdx.
|
||||
// Larger return sizes must be written to an address passed as a hidden first
|
||||
// argument.
|
||||
const Register kCCallArg0 = rdi;
|
||||
const Register kCCallArg1 = rsi;
|
||||
const Register kCCallArg2 = rdx;
|
||||
const Register kCCallArg3 = rcx;
|
||||
const int kArgExtraStackSpace = 0;
|
||||
const int kMaxRegisterResultSize = 2;
|
||||
#endif // _WIN64
|
||||
|
||||
// Enter the exit frame that transitions from JavaScript to C++.
|
||||
int arg_stack_space =
|
||||
kArgExtraStackSpace +
|
||||
(result_size() <= kMaxRegisterResultSize ? 0 : result_size());
|
||||
if (argv_in_register()) {
|
||||
DCHECK(!save_doubles());
|
||||
__ EnterApiExitFrame(arg_stack_space);
|
||||
@ -2179,56 +2201,41 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
||||
// r14: number of arguments including receiver (C callee-saved).
|
||||
// r15: argv pointer (C callee-saved).
|
||||
|
||||
// Simple results returned in rax (both AMD64 and Win64 calling conventions).
|
||||
// Complex results must be written to address passed as first argument.
|
||||
// AMD64 calling convention: a struct of two pointers in rax+rdx
|
||||
|
||||
// Check stack alignment.
|
||||
if (FLAG_debug_code) {
|
||||
__ CheckStackAlignment();
|
||||
}
|
||||
|
||||
// Call C function.
|
||||
#ifdef _WIN64
|
||||
// Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9.
|
||||
// Pass argv and argc as two parameters. The arguments object will
|
||||
// be created by stubs declared by DECLARE_RUNTIME_FUNCTION().
|
||||
if (result_size() < 2) {
|
||||
// Call C function. The arguments object will be created by stubs declared by
|
||||
// DECLARE_RUNTIME_FUNCTION().
|
||||
if (result_size() <= kMaxRegisterResultSize) {
|
||||
// Pass a pointer to the Arguments object as the first argument.
|
||||
// Return result in single register (rax).
|
||||
__ movp(rcx, r14); // argc.
|
||||
__ movp(rdx, r15); // argv.
|
||||
__ Move(r8, ExternalReference::isolate_address(isolate()));
|
||||
// Return result in single register (rax), or a register pair (rax, rdx).
|
||||
__ movp(kCCallArg0, r14); // argc.
|
||||
__ movp(kCCallArg1, r15); // argv.
|
||||
__ Move(kCCallArg2, ExternalReference::isolate_address(isolate()));
|
||||
} else {
|
||||
DCHECK_EQ(2, result_size());
|
||||
DCHECK_LE(result_size(), 3);
|
||||
// Pass a pointer to the result location as the first argument.
|
||||
__ leap(rcx, StackSpaceOperand(2));
|
||||
__ leap(kCCallArg0, StackSpaceOperand(kArgExtraStackSpace));
|
||||
// Pass a pointer to the Arguments object as the second argument.
|
||||
__ movp(rdx, r14); // argc.
|
||||
__ movp(r8, r15); // argv.
|
||||
__ Move(r9, ExternalReference::isolate_address(isolate()));
|
||||
__ movp(kCCallArg1, r14); // argc.
|
||||
__ movp(kCCallArg2, r15); // argv.
|
||||
__ Move(kCCallArg3, ExternalReference::isolate_address(isolate()));
|
||||
}
|
||||
|
||||
#else // _WIN64
|
||||
// GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
|
||||
__ movp(rdi, r14); // argc.
|
||||
__ movp(rsi, r15); // argv.
|
||||
__ Move(rdx, ExternalReference::isolate_address(isolate()));
|
||||
#endif // _WIN64
|
||||
__ call(rbx);
|
||||
// Result is in rax - do not destroy this register!
|
||||
|
||||
#ifdef _WIN64
|
||||
// If return value is on the stack, pop it to registers.
|
||||
if (result_size() > 1) {
|
||||
DCHECK_EQ(2, result_size());
|
||||
if (result_size() > kMaxRegisterResultSize) {
|
||||
// Read result values stored on stack. Result is stored
|
||||
// above the four argument mirror slots and the two
|
||||
// Arguments object slots.
|
||||
__ movq(rax, Operand(rsp, 6 * kRegisterSize));
|
||||
__ movq(rdx, Operand(rsp, 7 * kRegisterSize));
|
||||
// above the the two Arguments object slots on Win64.
|
||||
DCHECK_LE(result_size(), 3);
|
||||
__ movq(kReturnRegister0, StackSpaceOperand(kArgExtraStackSpace + 0));
|
||||
__ movq(kReturnRegister1, StackSpaceOperand(kArgExtraStackSpace + 1));
|
||||
if (result_size() > 2) {
|
||||
__ movq(kReturnRegister2, StackSpaceOperand(kArgExtraStackSpace + 2));
|
||||
}
|
||||
#endif // _WIN64
|
||||
}
|
||||
// Result is in rax, rdx:rax or r8:rdx:rax - do not destroy these registers!
|
||||
|
||||
// Check result for exception sentinel.
|
||||
Label exception_returned;
|
||||
|
@ -18,6 +18,7 @@ namespace internal {
|
||||
// Give alias names to registers for calling conventions.
|
||||
const Register kReturnRegister0 = {Register::kCode_rax};
|
||||
const Register kReturnRegister1 = {Register::kCode_rdx};
|
||||
const Register kReturnRegister2 = {Register::kCode_r8};
|
||||
const Register kJSFunctionRegister = {Register::kCode_rdi};
|
||||
const Register kContextRegister = {Register::kCode_rsi};
|
||||
const Register kInterpreterAccumulatorRegister = {Register::kCode_rax};
|
||||
|
Loading…
Reference in New Issue
Block a user