Remove x87 port

Bug: v8:6550
Change-Id: I888f91db1fd842d1fef8a5fb749da229dfb6ab97
Reviewed-on: https://chromium-review.googlesource.com/575756
Reviewed-by: Benedikt Meurer <bmeurer@chromium.org>
Reviewed-by: Daniel Clifford <danno@chromium.org>
Reviewed-by: Yang Guo <yangguo@chromium.org>
Reviewed-by: Michael Achenbach <machenbach@chromium.org>
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Commit-Queue: Jakob Kummerow <jkummerow@chromium.org>
Cr-Commit-Position: refs/heads/master@{#46746}
This commit is contained in:
Jakob Kummerow 2017-07-18 08:55:06 -07:00 committed by Commit Bot
parent 973314f2e1
commit e825c4318e
82 changed files with 45 additions and 28602 deletions

View File

@ -112,9 +112,9 @@ declare_args() {
v8_experimental_extra_library_files =
[ "//test/cctest/test-experimental-extra.js" ]
v8_enable_gdbjit = ((v8_current_cpu == "x86" || v8_current_cpu == "x64" ||
v8_current_cpu == "x87") && (is_linux || is_mac)) ||
(v8_current_cpu == "ppc64" && is_linux)
v8_enable_gdbjit =
((v8_current_cpu == "x86" || v8_current_cpu == "x64") &&
(is_linux || is_mac)) || (v8_current_cpu == "ppc64" && is_linux)
# Temporary flag to allow embedders to update their microtasks scopes
# while rolling in a new version of V8.
@ -439,9 +439,6 @@ config("toolchain") {
ldflags += [ "/STACK:2097152" ]
}
}
if (v8_current_cpu == "x87") {
defines += [ "V8_TARGET_ARCH_X87" ]
}
if (is_android && v8_android_log_stdout) {
defines += [ "V8_ANDROID_LOG_STDOUT" ]
}
@ -1039,11 +1036,6 @@ v8_source_set("v8_builtins_generators") {
### gcmole(arch:s390) ###
"src/builtins/s390/builtins-s390.cc",
]
} else if (v8_current_cpu == "x87") {
sources += [
### gcmole(arch:x87) ###
"src/builtins/x87/builtins-x87.cc",
]
}
if (!v8_enable_i18n_support) {
@ -2313,37 +2305,6 @@ v8_source_set("v8_base") {
"src/s390/simulator-s390.cc",
"src/s390/simulator-s390.h",
]
} else if (v8_current_cpu == "x87") {
sources += [ ### gcmole(arch:x87) ###
"src/compiler/x87/code-generator-x87.cc",
"src/compiler/x87/instruction-codes-x87.h",
"src/compiler/x87/instruction-scheduler-x87.cc",
"src/compiler/x87/instruction-selector-x87.cc",
"src/debug/x87/debug-x87.cc",
"src/full-codegen/x87/full-codegen-x87.cc",
"src/ic/x87/access-compiler-x87.cc",
"src/ic/x87/handler-compiler-x87.cc",
"src/ic/x87/ic-x87.cc",
"src/regexp/x87/regexp-macro-assembler-x87.cc",
"src/regexp/x87/regexp-macro-assembler-x87.h",
"src/x87/assembler-x87-inl.h",
"src/x87/assembler-x87.cc",
"src/x87/assembler-x87.h",
"src/x87/code-stubs-x87.cc",
"src/x87/code-stubs-x87.h",
"src/x87/codegen-x87.cc",
"src/x87/codegen-x87.h",
"src/x87/cpu-x87.cc",
"src/x87/deoptimizer-x87.cc",
"src/x87/disasm-x87.cc",
"src/x87/frames-x87.cc",
"src/x87/frames-x87.h",
"src/x87/interface-descriptors-x87.cc",
"src/x87/macro-assembler-x87.cc",
"src/x87/macro-assembler-x87.h",
"src/x87/simulator-x87.cc",
"src/x87/simulator-x87.h",
]
}
configs = [ ":internal_config" ]

View File

@ -255,14 +255,13 @@ endif
# Architectures and modes to be compiled. Consider these to be internal
# variables, don't override them (use the targets instead).
ARCHES = ia32 x64 arm arm64 mips mipsel mips64 mips64el x87 ppc ppc64 s390 \
s390x
ARCHES32 = ia32 arm mips mipsel x87 ppc s390
ARCHES = ia32 x64 arm arm64 mips mipsel mips64 mips64el ppc ppc64 s390 s390x
ARCHES32 = ia32 arm mips mipsel ppc s390
DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug optdebug
DEFAULT_MODES = release debug
ANDROID_ARCHES = android_ia32 android_x64 android_arm android_arm64 \
android_mipsel android_x87
android_mipsel
# List of files that trigger Makefile regeneration:
GYPFILES = third_party/icu/icu.gypi third_party/icu/icu.gyp \

View File

@ -262,14 +262,14 @@
# goma doesn't support PDB yet.
'fastbuild%': 1,
}],
['((v8_target_arch=="ia32" or v8_target_arch=="x64" or v8_target_arch=="x87") and \
['((v8_target_arch=="ia32" or v8_target_arch=="x64") and \
(OS=="linux" or OS=="mac")) or (v8_target_arch=="ppc64" and OS=="linux")', {
'v8_enable_gdbjit%': 1,
}, {
'v8_enable_gdbjit%': 0,
}],
['(OS=="linux" or OS=="mac") and (target_arch=="ia32" or target_arch=="x64") and \
(v8_target_arch!="x87" and v8_target_arch!="x32")', {
v8_target_arch!="x32"', {
'clang%': 1,
}, {
'clang%': 0,
@ -1207,7 +1207,7 @@
'-L<(android_libcpp_libs)/arm64-v8a',
],
}],
['target_arch=="ia32" or target_arch=="x87"', {
['target_arch=="ia32"', {
# The x86 toolchain currently has problems with stack-protector.
'cflags!': [
'-fstack-protector',

View File

@ -144,7 +144,7 @@
'host_cxx_is_biarch%': 0,
},
}],
['target_arch=="ia32" or target_arch=="x64" or target_arch=="x87" or \
['target_arch=="ia32" or target_arch=="x64" or \
target_arch=="ppc" or target_arch=="ppc64" or target_arch=="s390" or \
target_arch=="s390x" or clang==1', {
'variables': {
@ -342,12 +342,6 @@
'V8_TARGET_ARCH_IA32',
],
}], # v8_target_arch=="ia32"
['v8_target_arch=="x87"', {
'defines': [
'V8_TARGET_ARCH_X87',
],
'cflags': ['-march=i586'],
}], # v8_target_arch=="x87"
['v8_target_arch=="mips" or v8_target_arch=="mipsel" \
or v8_target_arch=="mips64" or v8_target_arch=="mips64el"', {
'target_conditions': [
@ -1006,9 +1000,8 @@
['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd" or OS=="mac" or OS=="android" or OS=="qnx") and \
(v8_target_arch=="arm" or v8_target_arch=="ia32" or \
v8_target_arch=="x87" or v8_target_arch=="mips" or \
v8_target_arch=="mipsel" or v8_target_arch=="ppc" or \
v8_target_arch=="s390")', {
v8_target_arch=="mips" or v8_target_arch=="mipsel" or \
v8_target_arch=="ppc" or v8_target_arch=="s390")', {
'target_conditions': [
['_toolset=="host"', {
'conditions': [

View File

@ -23,8 +23,6 @@
#include "src/mips64/assembler-mips64-inl.h"
#elif V8_TARGET_ARCH_S390
#include "src/s390/assembler-s390-inl.h"
#elif V8_TARGET_ARCH_X87
#include "src/x87/assembler-x87-inl.h"
#else
#error Unknown architecture.
#endif

View File

@ -85,8 +85,6 @@
#include "src/regexp/mips64/regexp-macro-assembler-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_S390
#include "src/regexp/s390/regexp-macro-assembler-s390.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/regexp/x87/regexp-macro-assembler-x87.h" // NOLINT
#else // Unknown architecture.
#error "Unknown architecture."
#endif // Target architecture.
@ -1324,8 +1322,6 @@ ExternalReference ExternalReference::re_check_stack_guard_state(
function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
#elif V8_TARGET_ARCH_S390
function = FUNCTION_ADDR(RegExpMacroAssemblerS390::CheckStackGuardState);
#elif V8_TARGET_ARCH_X87
function = FUNCTION_ADDR(RegExpMacroAssemblerX87::CheckStackGuardState);
#else
UNREACHABLE();
#endif

View File

@ -76,9 +76,9 @@
// Target architecture detection. This may be set externally. If not, detect
// in the same way as the host architecture, that is, target the native
// environment as presented by the compiler.
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_X87 && \
!V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_S390
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && \
!V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64 && \
!V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_S390
#if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1
#elif defined(_M_IX86) || defined(__i386__)
@ -129,8 +129,6 @@
#else
#define V8_TARGET_ARCH_32_BIT 1
#endif
#elif V8_TARGET_ARCH_X87
#define V8_TARGET_ARCH_32_BIT 1
#else
#error Unknown target architecture pointer size
#endif
@ -181,8 +179,6 @@
#else
#define V8_TARGET_LITTLE_ENDIAN 1
#endif
#elif V8_TARGET_ARCH_X87
#define V8_TARGET_LITTLE_ENDIAN 1
#elif __BIG_ENDIAN__ // FOR PPCGR on AIX
#define V8_TARGET_BIG_ENDIAN 1
#elif V8_TARGET_ARCH_PPC_LE
@ -199,8 +195,7 @@
#error Unknown target architecture endianness
#endif
#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64) || \
defined(V8_TARGET_ARCH_X87)
#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64)
#define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK 1
#else
#define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK 0

View File

@ -1,2 +0,0 @@
weiliang.lin@intel.com
chunyang.dai@intel.com

File diff suppressed because it is too large Load Diff

View File

@ -514,8 +514,6 @@ class RuntimeCallHelper {
#include "src/mips64/code-stubs-mips64.h"
#elif V8_TARGET_ARCH_S390
#include "src/s390/code-stubs-s390.h"
#elif V8_TARGET_ARCH_X87
#include "src/x87/code-stubs-x87.h"
#else
#error Unsupported target architecture.
#endif

View File

@ -59,8 +59,6 @@
#include "src/mips64/codegen-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_S390
#include "src/s390/codegen-s390.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/codegen-x87.h" // NOLINT
#else
#error Unsupported target architecture.
#endif

View File

@ -50,12 +50,6 @@ LinkageLocation regloc(Register reg, MachineType type) {
rbx.bit() | r12.bit() | r13.bit() | r14.bit() | r15.bit()
#endif
#elif V8_TARGET_ARCH_X87
// ===========================================================================
// == x87 ====================================================================
// ===========================================================================
#define CALLEE_SAVE_REGISTERS esi.bit() | edi.bit() | ebx.bit()
#elif V8_TARGET_ARCH_ARM
// ===========================================================================
// == arm ====================================================================
@ -161,7 +155,7 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor(
msig->parameter_count());
// Check the types of the signature.
// Currently no floating point parameters or returns are allowed because
// on x87 and ia32, the FP top of stack is involved.
// on ia32, the FP top of stack is involved.
for (size_t i = 0; i < msig->return_count(); i++) {
MachineRepresentation rep = msig->GetReturn(i).representation();
CHECK_NE(MachineRepresentation::kFloat32, rep);

View File

@ -23,8 +23,6 @@
#include "src/compiler/ppc/instruction-codes-ppc.h"
#elif V8_TARGET_ARCH_S390
#include "src/compiler/s390/instruction-codes-s390.h"
#elif V8_TARGET_ARCH_X87
#include "src/compiler/x87/instruction-codes-x87.h"
#else
#define TARGET_ARCH_OPCODE_LIST(V)
#define TARGET_ADDRESSING_MODE_LIST(V)

View File

@ -69,14 +69,6 @@ LinkageLocation stackloc(int i, MachineType type) {
#define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6
#define FP_RETURN_REGISTERS xmm1, xmm2
#elif V8_TARGET_ARCH_X87
// ===========================================================================
// == x87 ====================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS eax, edx, ecx, ebx, esi
#define GP_RETURN_REGISTERS eax, edx
#define FP_RETURN_REGISTERS stX_0
#elif V8_TARGET_ARCH_ARM
// ===========================================================================
// == arm ====================================================================

View File

@ -1,2 +0,0 @@
weiliang.lin@intel.com
chunyang.dai@intel.com

File diff suppressed because it is too large Load Diff

View File

@ -1,144 +0,0 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_X87_INSTRUCTION_CODES_X87_H_
#define V8_COMPILER_X87_INSTRUCTION_CODES_X87_H_
#include "src/compiler/instruction.h"
#include "src/compiler/instruction-codes.h"
namespace v8 {
namespace internal {
namespace compiler {
// X87-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
#define TARGET_ARCH_OPCODE_LIST(V) \
V(X87Add) \
V(X87And) \
V(X87Cmp) \
V(X87Cmp16) \
V(X87Cmp8) \
V(X87Test) \
V(X87Test16) \
V(X87Test8) \
V(X87Or) \
V(X87Xor) \
V(X87Sub) \
V(X87Imul) \
V(X87ImulHigh) \
V(X87UmulHigh) \
V(X87Idiv) \
V(X87Udiv) \
V(X87Not) \
V(X87Neg) \
V(X87Shl) \
V(X87Shr) \
V(X87Sar) \
V(X87AddPair) \
V(X87SubPair) \
V(X87MulPair) \
V(X87ShlPair) \
V(X87ShrPair) \
V(X87SarPair) \
V(X87Ror) \
V(X87Lzcnt) \
V(X87Popcnt) \
V(X87Float32Cmp) \
V(X87Float32Add) \
V(X87Float32Sub) \
V(X87Float32Mul) \
V(X87Float32Div) \
V(X87Float32Abs) \
V(X87Float32Neg) \
V(X87Float32Sqrt) \
V(X87Float32Round) \
V(X87LoadFloat64Constant) \
V(X87Float64Add) \
V(X87Float64Sub) \
V(X87Float64Mul) \
V(X87Float64Div) \
V(X87Float64Mod) \
V(X87Float32Max) \
V(X87Float64Max) \
V(X87Float32Min) \
V(X87Float64Min) \
V(X87Float64Abs) \
V(X87Float64Neg) \
V(X87Int32ToFloat32) \
V(X87Uint32ToFloat32) \
V(X87Int32ToFloat64) \
V(X87Float32ToFloat64) \
V(X87Uint32ToFloat64) \
V(X87Float64ToInt32) \
V(X87Float32ToInt32) \
V(X87Float32ToUint32) \
V(X87Float64ToFloat32) \
V(X87Float64ToUint32) \
V(X87Float64ExtractHighWord32) \
V(X87Float64ExtractLowWord32) \
V(X87Float64InsertHighWord32) \
V(X87Float64InsertLowWord32) \
V(X87Float64Sqrt) \
V(X87Float64Round) \
V(X87Float64Cmp) \
V(X87Float64SilenceNaN) \
V(X87Movsxbl) \
V(X87Movzxbl) \
V(X87Movb) \
V(X87Movsxwl) \
V(X87Movzxwl) \
V(X87Movw) \
V(X87Movl) \
V(X87Movss) \
V(X87Movsd) \
V(X87Lea) \
V(X87BitcastFI) \
V(X87BitcastIF) \
V(X87Push) \
V(X87PushFloat64) \
V(X87PushFloat32) \
V(X87Poke) \
V(X87StackCheck) \
V(X87Xchgb) \
V(X87Xchgw) \
V(X87Xchgl)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
// are encoded into the InstructionCode of the instruction and tell the
// code generator after register allocation which assembler method to call.
//
// We use the following local notation for addressing modes:
//
// M = memory operand
// R = base register
// N = index register * N for N in {1, 2, 4, 8}
// I = immediate displacement (int32_t)
#define TARGET_ADDRESSING_MODE_LIST(V) \
V(MR) /* [%r1 ] */ \
V(MRI) /* [%r1 + K] */ \
V(MR1) /* [%r1 + %r2*1 ] */ \
V(MR2) /* [%r1 + %r2*2 ] */ \
V(MR4) /* [%r1 + %r2*4 ] */ \
V(MR8) /* [%r1 + %r2*8 ] */ \
V(MR1I) /* [%r1 + %r2*1 + K] */ \
V(MR2I) /* [%r1 + %r2*2 + K] */ \
V(MR4I) /* [%r1 + %r2*3 + K] */ \
V(MR8I) /* [%r1 + %r2*4 + K] */ \
V(M1) /* [ %r2*1 ] */ \
V(M2) /* [ %r2*2 ] */ \
V(M4) /* [ %r2*4 ] */ \
V(M8) /* [ %r2*8 ] */ \
V(M1I) /* [ %r2*1 + K] */ \
V(M2I) /* [ %r2*2 + K] */ \
V(M4I) /* [ %r2*4 + K] */ \
V(M8I) /* [ %r2*8 + K] */ \
V(MI) /* [ K] */
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_X87_INSTRUCTION_CODES_X87_H_

View File

@ -1,26 +0,0 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/instruction-scheduler.h"
namespace v8 {
namespace internal {
namespace compiler {
bool InstructionScheduler::SchedulerSupported() { return false; }
int InstructionScheduler::GetTargetInstructionFlags(
const Instruction* instr) const {
UNIMPLEMENTED();
}
int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
UNIMPLEMENTED();
}
} // namespace compiler
} // namespace internal
} // namespace v8

File diff suppressed because it is too large Load Diff

View File

@ -1,2 +0,0 @@
weiliang.lin@intel.com
chunyang.dai@intel.com

View File

@ -1,157 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_X87
#include "src/debug/debug.h"
#include "src/codegen.h"
#include "src/debug/liveedit.h"
#include "src/x87/frames-x87.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
void EmitDebugBreakSlot(MacroAssembler* masm) {
Label check_codesize;
__ bind(&check_codesize);
__ Nop(Assembler::kDebugBreakSlotLength);
DCHECK_EQ(Assembler::kDebugBreakSlotLength,
masm->SizeOfCodeGeneratedSince(&check_codesize));
}
void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode) {
// Generate enough nop's to make space for a call instruction.
masm->RecordDebugBreakSlot(mode);
EmitDebugBreakSlot(masm);
}
void DebugCodegen::ClearDebugBreakSlot(Isolate* isolate, Address pc) {
CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotLength);
EmitDebugBreakSlot(patcher.masm());
}
void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
Handle<Code> code) {
DCHECK(code->is_debug_stub());
static const int kSize = Assembler::kDebugBreakSlotLength;
CodePatcher patcher(isolate, pc, kSize);
// Add a label for checking the size of the code used for returning.
Label check_codesize;
patcher.masm()->bind(&check_codesize);
patcher.masm()->call(code->entry(), RelocInfo::NONE32);
// Check that the size of the code generated is as expected.
DCHECK_EQ(kSize, patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
}
bool DebugCodegen::DebugBreakSlotIsPatched(Address pc) {
return !Assembler::IsNop(pc);
}
void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
DebugBreakCallHelperMode mode) {
__ RecordComment("Debug break");
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Load padding words on stack.
for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
__ push(Immediate(Smi::FromInt(LiveEdit::kFramePaddingValue)));
}
__ push(Immediate(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
// Push arguments for DebugBreak call.
if (mode == SAVE_RESULT_REGISTER) {
// Break on return.
__ push(eax);
} else {
// Non-return breaks.
__ Push(masm->isolate()->factory()->the_hole_value());
}
__ Move(eax, Immediate(1));
__ mov(ebx,
Immediate(ExternalReference(
Runtime::FunctionForId(Runtime::kDebugBreak), masm->isolate())));
CEntryStub ceb(masm->isolate(), 1);
__ CallStub(&ceb);
if (FLAG_debug_code) {
for (int i = 0; i < kNumJSCallerSaved; ++i) {
Register reg = {JSCallerSavedCode(i)};
// Do not clobber eax if mode is SAVE_RESULT_REGISTER. It will
// contain return value of the function.
if (!(reg.is(eax) && (mode == SAVE_RESULT_REGISTER))) {
__ Move(reg, Immediate(kDebugZapValue));
}
}
}
__ pop(ebx);
// We divide stored value by 2 (untagging) and multiply it by word's size.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiShiftSize == 0);
__ lea(esp, Operand(esp, ebx, times_half_pointer_size, 0));
// Get rid of the internal frame.
}
// This call did not replace a call , so there will be an unwanted
// return address left on the stack. Here we get rid of that.
__ add(esp, Immediate(kPointerSize));
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
ExternalReference after_break_target =
ExternalReference::debug_after_break_target_address(masm->isolate());
__ jmp(Operand::StaticVariable(after_break_target));
}
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
// We do not know our frame height, but set esp based on ebp.
__ lea(esp, Operand(ebp, FrameDropperFrameConstants::kFunctionOffset));
__ pop(edi); // Function.
__ add(esp, Immediate(-FrameDropperFrameConstants::kCodeOffset)); // INTERNAL
// frame
// marker
// and code
__ pop(ebp);
ParameterCount dummy(0);
__ CheckDebugHook(edi, no_reg, dummy, dummy);
// Load context from the function.
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Clear new.target register as a safety measure.
__ mov(edx, masm->isolate()->factory()->undefined_value());
// Get function code.
__ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kCodeOffset));
__ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
// Re-run JSFunction, edi is function, esi is context.
__ jmp(ebx);
}
const bool LiveEdit::kFrameDropperSupported = true;
#undef __
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_X87

View File

@ -26,8 +26,6 @@
#include "src/mips64/frames-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_S390
#include "src/s390/frames-s390.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/frames-x87.h" // NOLINT
#else
#error Unsupported target architecture.
#endif

View File

@ -45,7 +45,7 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
static const int kMaxBackEdgeWeight = 127;
// Platform-specific code size multiplier.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
#if V8_TARGET_ARCH_IA32
static const int kCodeSizeMultiplier = 105;
#elif V8_TARGET_ARCH_X64
static const int kCodeSizeMultiplier = 165;

View File

@ -1,2 +0,0 @@
weiliang.lin@intel.com
chunyang.dai@intel.com

File diff suppressed because it is too large Load Diff

View File

@ -199,7 +199,7 @@ class DebugSectionBase : public ZoneObject {
struct MachOSectionHeader {
char sectname[16];
char segname[16];
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
#if V8_TARGET_ARCH_IA32
uint32_t addr;
uint32_t size;
#else
@ -507,7 +507,7 @@ class MachO BASE_EMBEDDED {
uint32_t cmd;
uint32_t cmdsize;
char segname[16];
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
#if V8_TARGET_ARCH_IA32
uint32_t vmaddr;
uint32_t vmsize;
uint32_t fileoff;
@ -533,7 +533,7 @@ class MachO BASE_EMBEDDED {
Writer::Slot<MachOHeader> WriteHeader(Writer* w) {
DCHECK(w->position() == 0);
Writer::Slot<MachOHeader> header = w->CreateSlotHere<MachOHeader>();
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
#if V8_TARGET_ARCH_IA32
header->magic = 0xFEEDFACEu;
header->cputype = 7; // i386
header->cpusubtype = 3; // CPU_SUBTYPE_I386_ALL
@ -558,7 +558,7 @@ class MachO BASE_EMBEDDED {
uintptr_t code_size) {
Writer::Slot<MachOSegmentCommand> cmd =
w->CreateSlotHere<MachOSegmentCommand>();
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
#if V8_TARGET_ARCH_IA32
cmd->cmd = LC_SEGMENT_32;
#else
cmd->cmd = LC_SEGMENT_64;
@ -646,7 +646,7 @@ class ELF BASE_EMBEDDED {
void WriteHeader(Writer* w) {
DCHECK(w->position() == 0);
Writer::Slot<ELFHeader> header = w->CreateSlotHere<ELFHeader>();
#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X87 || \
#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || \
(V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT))
const uint8_t ident[16] =
{ 0x7f, 'E', 'L', 'F', 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
@ -668,7 +668,7 @@ class ELF BASE_EMBEDDED {
#endif
memcpy(header->ident, ident, 16);
header->type = 1;
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
#if V8_TARGET_ARCH_IA32
header->machine = 3;
#elif V8_TARGET_ARCH_X64
// Processor identification value for x64 is 62 as defined in
@ -783,8 +783,8 @@ class ELFSymbol BASE_EMBEDDED {
Binding binding() const {
return static_cast<Binding>(info >> 4);
}
#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X87 || \
(V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT) || \
#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || \
(V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT) || \
(V8_TARGET_ARCH_S390 && V8_TARGET_ARCH_32_BIT))
struct SerializedLayout {
SerializedLayout(uint32_t name,
@ -1146,7 +1146,7 @@ class DebugInfoSection : public DebugSection {
w->Write<intptr_t>(desc_->CodeStart() + desc_->CodeSize());
Writer::Slot<uint32_t> fb_block_size = w->CreateSlotHere<uint32_t>();
uintptr_t fb_block_start = w->position();
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
#if V8_TARGET_ARCH_IA32
w->Write<uint8_t>(DW_OP_reg5); // The frame pointer's here on ia32
#elif V8_TARGET_ARCH_X64
w->Write<uint8_t>(DW_OP_reg6); // and here on x64.

View File

@ -167,7 +167,7 @@ const int kRegisterSize = kPointerSize;
const int kPCOnStackSize = kRegisterSize;
const int kFPOnStackSize = kRegisterSize;
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
const int kElidedFrameSlots = kPCOnStackSize / kPointerSize;
#else
const int kElidedFrameSlots = 0;
@ -912,16 +912,10 @@ enum AllocationSiteMode {
};
// The mips architecture prior to revision 5 has inverted encoding for sNaN.
// The x87 FPU convert the sNaN to qNaN automatically when loading sNaN from
// memmory.
// Use mips sNaN which is a not used qNaN in x87 port as sNaN to workaround this
// issue
// for some test cases.
#if (V8_TARGET_ARCH_MIPS && !defined(_MIPS_ARCH_MIPS32R6) && \
(!defined(USE_SIMULATOR) || !defined(_MIPS_TARGET_SIMULATOR))) || \
(V8_TARGET_ARCH_MIPS64 && !defined(_MIPS_ARCH_MIPS64R6) && \
(!defined(USE_SIMULATOR) || !defined(_MIPS_TARGET_SIMULATOR))) || \
(V8_TARGET_ARCH_X87)
(!defined(USE_SIMULATOR) || !defined(_MIPS_TARGET_SIMULATOR)))
const uint32_t kHoleNanUpper32 = 0xFFFF7FFF;
const uint32_t kHoleNanLower32 = 0xFFFF7FFF;
#else

View File

@ -1,2 +0,0 @@
weiliang.lin@intel.com
chunyang.dai@intel.com

View File

@ -1,40 +0,0 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_X87
#include "src/ic/access-compiler.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
Handle<Code> code) {
__ jmp(code, RelocInfo::CODE_TARGET);
}
void PropertyAccessCompiler::InitializePlatformSpecific(
AccessCompilerData* data) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
// Load calling convention.
// receiver, name, scratch1, scratch2, scratch3.
Register load_registers[] = {receiver, name, ebx, eax, edi};
// Store calling convention.
// receiver, name, scratch1, scratch2.
Register store_registers[] = {receiver, name, ebx, edi};
data->Initialize(arraysize(load_registers), load_registers,
arraysize(store_registers), store_registers);
}
#undef __
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_X87

View File

@ -1,450 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_X87
#include "src/ic/handler-compiler.h"
#include "src/api-arguments.h"
#include "src/field-type.h"
#include "src/ic/call-optimization.h"
#include "src/ic/ic.h"
#include "src/isolate-inl.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
void NamedLoadHandlerCompiler::GenerateLoadViaGetterForDeopt(
MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
// Restore context register.
__ pop(esi);
}
__ ret(0);
}
void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
Register slot) {
MacroAssembler* masm = this->masm();
STATIC_ASSERT(LoadWithVectorDescriptor::kSlot <
LoadWithVectorDescriptor::kVector);
STATIC_ASSERT(StoreWithVectorDescriptor::kSlot <
StoreWithVectorDescriptor::kVector);
STATIC_ASSERT(StoreTransitionDescriptor::kSlot <
StoreTransitionDescriptor::kVector);
__ push(slot);
__ push(vector);
}
void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
MacroAssembler* masm = this->masm();
__ pop(vector);
__ pop(slot);
}
void PropertyHandlerCompiler::DiscardVectorAndSlot() {
MacroAssembler* masm = this->masm();
// Remove vector and slot.
__ add(esp, Immediate(2 * kPointerSize));
}
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
DCHECK(name->IsUniqueName());
DCHECK(!receiver.is(scratch0));
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->negative_lookups(), 1);
__ IncrementCounter(counters->negative_lookups_miss(), 1);
__ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
const int kInterceptorOrAccessCheckNeededMask =
(1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
// Bail out if the receiver has a named interceptor or requires access checks.
__ test_b(FieldOperand(scratch0, Map::kBitFieldOffset),
Immediate(kInterceptorOrAccessCheckNeededMask));
__ j(not_zero, miss_label);
// Check that receiver is a JSObject.
__ CmpInstanceType(scratch0, FIRST_JS_RECEIVER_TYPE);
__ j(below, miss_label);
// Load properties array.
Register properties = scratch0;
__ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOrHashOffset));
// Check that the properties array is a dictionary.
__ cmp(FieldOperand(properties, HeapObject::kMapOffset),
Immediate(masm->isolate()->factory()->hash_table_map()));
__ j(not_equal, miss_label);
Label done;
NameDictionaryLookupStub::GenerateNegativeLookup(masm, miss_label, &done,
properties, name, scratch1);
__ bind(&done);
__ DecrementCounter(counters->negative_lookups_miss(), 1);
}
// Generate call to api function.
// This function uses push() to generate smaller, faster code than
// the version above. It is an optimization that should will be removed
// when api call ICs are generated in hydrogen.
void PropertyHandlerCompiler::GenerateApiAccessorCall(
MacroAssembler* masm, const CallOptimization& optimization,
Handle<Map> receiver_map, Register receiver, Register scratch,
bool is_store, Register store_parameter, Register accessor_holder,
int accessor_index) {
DCHECK(!accessor_holder.is(scratch));
// Copy return value.
__ pop(scratch);
if (is_store) {
// Discard stack arguments.
__ add(esp, Immediate(StoreWithVectorDescriptor::kStackArgumentsCount *
kPointerSize));
}
// Write the receiver and arguments to stack frame.
__ push(receiver);
if (is_store) {
DCHECK(!AreAliased(receiver, scratch, store_parameter));
__ push(store_parameter);
}
__ push(scratch);
// Stack now matches JSFunction abi.
DCHECK(optimization.is_simple_api_call());
// Abi for CallApiCallbackStub.
Register callee = edi;
Register data = ebx;
Register holder = ecx;
Register api_function_address = edx;
scratch = no_reg;
// Put callee in place.
__ LoadAccessor(callee, accessor_holder, accessor_index,
is_store ? ACCESSOR_SETTER : ACCESSOR_GETTER);
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Move(holder, receiver);
break;
case CallOptimization::kHolderFound:
__ mov(holder, FieldOperand(receiver, HeapObject::kMapOffset));
__ mov(holder, FieldOperand(holder, Map::kPrototypeOffset));
break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
break;
}
Isolate* isolate = masm->isolate();
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
bool call_data_undefined = false;
// Put call data in place.
if (api_call_info->data()->IsUndefined(isolate)) {
call_data_undefined = true;
__ mov(data, Immediate(isolate->factory()->undefined_value()));
} else {
if (optimization.is_constant_call()) {
__ mov(data, FieldOperand(callee, JSFunction::kSharedFunctionInfoOffset));
__ mov(data, FieldOperand(data, SharedFunctionInfo::kFunctionDataOffset));
__ mov(data, FieldOperand(data, FunctionTemplateInfo::kCallCodeOffset));
} else {
__ mov(data, FieldOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
}
__ mov(data, FieldOperand(data, CallHandlerInfo::kDataOffset));
}
// Put api_function_address in place.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
__ mov(api_function_address, Immediate(function_address));
// Jump to stub.
CallApiCallbackStub stub(isolate, is_store, call_data_undefined,
!optimization.is_constant_call());
__ TailCallStub(&stub);
}
// Generate code to check that a global property cell is empty. Create
// the property cell at compilation time if no cell exists for the
// property.
void PropertyHandlerCompiler::GenerateCheckPropertyCell(
MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
Register scratch, Label* miss) {
Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
global, name, PropertyCellType::kInvalidated);
Isolate* isolate = masm->isolate();
DCHECK(cell->value()->IsTheHole(isolate));
Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
__ LoadWeakValue(scratch, weak_cell, miss);
__ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
Immediate(isolate->factory()->the_hole_value()));
__ j(not_equal, miss);
}
void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
int accessor_index, int expected_arguments, Register scratch) {
// ----------- S t a t e -------------
// -- esp[12] : value
// -- esp[8] : slot
// -- esp[4] : vector
// -- esp[0] : return address
// -----------------------------------
__ LoadParameterFromStack<Descriptor>(value(), Descriptor::kValue);
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Save context register
__ push(esi);
// Save value register, so we can restore it later.
__ push(value());
if (accessor_index >= 0) {
DCHECK(!holder.is(scratch));
DCHECK(!receiver.is(scratch));
DCHECK(!value().is(scratch));
// Call the JavaScript setter with receiver and value on the stack.
if (map->IsJSGlobalObjectMap()) {
__ mov(scratch,
FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
receiver = scratch;
}
__ push(receiver);
__ push(value());
__ LoadAccessor(edi, holder, accessor_index, ACCESSOR_SETTER);
__ Set(eax, 1);
__ Call(masm->isolate()->builtins()->CallFunction(
ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
}
// We have to return the passed value, not the return value of the setter.
__ pop(eax);
// Restore context register.
__ pop(esi);
}
if (accessor_index >= 0) {
__ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
} else {
// If we generate a global code snippet for deoptimization only, don't try
// to drop stack arguments for the StoreIC because they are not a part of
// expression stack and deoptimizer does not reconstruct them.
__ ret(0);
}
}
#undef __
#define __ ACCESS_MASM(masm())
void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
Handle<Name> name) {
if (!label->is_unused()) {
__ bind(label);
__ mov(this->name(), Immediate(name));
}
}
void PropertyHandlerCompiler::GenerateAccessCheck(
Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
Label* miss, bool compare_native_contexts_only) {
Label done;
// Load current native context.
__ mov(scratch1, NativeContextOperand());
// Load expected native context.
__ LoadWeakValue(scratch2, native_context_cell, miss);
__ cmp(scratch1, scratch2);
if (!compare_native_contexts_only) {
__ j(equal, &done);
// Compare security tokens of current and expected native contexts.
__ mov(scratch1, ContextOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
__ mov(scratch2, ContextOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
__ cmp(scratch1, scratch2);
}
__ j(not_equal, miss);
__ bind(&done);
}
Register PropertyHandlerCompiler::CheckPrototypes(
Register object_reg, Register holder_reg, Register scratch1,
Register scratch2, Handle<Name> name, Label* miss) {
Handle<Map> receiver_map = map();
// Make sure there's no overlap between holder and object registers.
DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
!scratch2.is(scratch1));
Handle<Cell> validity_cell =
Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
if (!validity_cell.is_null()) {
DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
// Operand::ForCell(...) points to the cell's payload!
__ cmp(Operand::ForCell(validity_cell),
Immediate(Smi::FromInt(Map::kPrototypeChainValid)));
__ j(not_equal, miss);
}
// Keep track of the current object in register reg.
Register reg = object_reg;
int depth = 0;
Handle<JSObject> current = Handle<JSObject>::null();
if (receiver_map->IsJSGlobalObjectMap()) {
current = isolate()->global_object();
}
Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
isolate());
Handle<Map> holder_map(holder()->map());
// Traverse the prototype chain and check the maps in the prototype chain for
// fast and global objects or do negative lookup for normal objects.
while (!current_map.is_identical_to(holder_map)) {
++depth;
if (current_map->IsJSGlobalObjectMap()) {
GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
name, scratch2, miss);
} else if (current_map->is_dictionary_map()) {
DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
DCHECK(name->IsUniqueName());
DCHECK(current.is_null() ||
current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
if (depth > 1) {
Handle<WeakCell> weak_cell =
Map::GetOrCreatePrototypeWeakCell(current, isolate());
__ LoadWeakValue(reg, weak_cell, miss);
}
GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
scratch2);
}
reg = holder_reg; // From now on the object will be in holder_reg.
// Go to the next object in the prototype chain.
current = handle(JSObject::cast(current_map->prototype()));
current_map = handle(current->map());
}
DCHECK(!current_map->IsJSGlobalProxyMap());
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
if (depth != 0) {
Handle<WeakCell> weak_cell =
Map::GetOrCreatePrototypeWeakCell(current, isolate());
__ LoadWeakValue(reg, weak_cell, miss);
}
// Return the register containing the holder.
return reg;
}
void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
if (!miss->is_unused()) {
Label success;
__ jmp(&success);
__ bind(miss);
if (IC::ShouldPushPopSlotAndVector(kind())) {
DCHECK(kind() == Code::LOAD_IC);
PopVectorAndSlot();
}
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
}
void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
if (!miss->is_unused()) {
Label success;
__ jmp(&success);
GenerateRestoreName(miss, name);
DCHECK(!IC::ShouldPushPopSlotAndVector(kind()));
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
}
void NamedStoreHandlerCompiler::ZapStackArgumentsRegisterAliases() {
// Zap register aliases of the arguments passed on the stack to ensure they
// are properly loaded by the handler (debug-only).
STATIC_ASSERT(Descriptor::kPassLastArgsOnStack);
STATIC_ASSERT(Descriptor::kStackArgumentsCount == 3);
__ mov(Descriptor::ValueRegister(), Immediate(kDebugZapValue));
__ mov(Descriptor::SlotRegister(), Immediate(kDebugZapValue));
__ mov(Descriptor::VectorRegister(), Immediate(kDebugZapValue));
}
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
LanguageMode language_mode) {
Register holder_reg = Frontend(name);
__ LoadParameterFromStack<Descriptor>(value(), Descriptor::kValue);
__ pop(scratch1()); // remove the return address
// Discard stack arguments.
__ add(esp, Immediate(StoreWithVectorDescriptor::kStackArgumentsCount *
kPointerSize));
__ push(receiver());
__ push(holder_reg);
// If the callback cannot leak, then push the callback directly,
// otherwise wrap it in a weak cell.
if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
__ Push(callback);
} else {
Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
__ Push(cell);
}
__ Push(name);
__ push(value());
__ push(Immediate(Smi::FromInt(language_mode)));
__ push(scratch1()); // restore return address
// Do tail-call to the runtime system.
__ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
return GetCode(kind(), name);
}
Register NamedStoreHandlerCompiler::value() {
return StoreDescriptor::ValueRegister();
}
#undef __
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_X87

View File

@ -1,84 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_X87
#include "src/codegen.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
Condition CompareIC::ComputeCondition(Token::Value op) {
switch (op) {
case Token::EQ_STRICT:
case Token::EQ:
return equal;
case Token::LT:
return less;
case Token::GT:
return greater;
case Token::LTE:
return less_equal;
case Token::GTE:
return greater_equal;
default:
UNREACHABLE();
}
}
bool CompareIC::HasInlinedSmiCode(Address address) {
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
// If the instruction following the call is not a test al, nothing
// was inlined.
return *test_instruction_address == Assembler::kTestAlByte;
}
void PatchInlinedSmiCode(Isolate* isolate, Address address,
InlinedSmiCheck check) {
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
// If the instruction following the call is not a test al, nothing
// was inlined.
if (*test_instruction_address != Assembler::kTestAlByte) {
DCHECK(*test_instruction_address == Assembler::kNopByte);
return;
}
Address delta_address = test_instruction_address + 1;
// The delta to the start of the map check instruction and the
// condition code uses at the patched jump.
uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
if (FLAG_trace_ic) {
LOG(isolate, PatchIC(address, test_instruction_address, delta));
}
// Patch with a short conditional jump. Enabling means switching from a short
// jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the
// reverse operation of that.
Address jmp_address = test_instruction_address - delta;
DCHECK((check == ENABLE_INLINED_SMI_CHECK)
? (*jmp_address == Assembler::kJncShortOpcode ||
*jmp_address == Assembler::kJcShortOpcode)
: (*jmp_address == Assembler::kJnzShortOpcode ||
*jmp_address == Assembler::kJzShortOpcode));
Condition cc =
(check == ENABLE_INLINED_SMI_CHECK)
? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
: (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
*jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_X87

View File

@ -392,7 +392,7 @@ class StoreDescriptor : public CallInterfaceDescriptor {
static const Register ValueRegister();
static const Register SlotRegister();
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
#if V8_TARGET_ARCH_IA32
static const bool kPassLastArgsOnStack = true;
#else
static const bool kPassLastArgsOnStack = false;

View File

@ -1367,9 +1367,8 @@ void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) {
bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
return false;
#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_X87 || \
V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_PPC
#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC
return true;
#else
#error "Unknown Architecture"

View File

@ -370,8 +370,6 @@ void LowLevelLogger::LogCodeInfo() {
const char arch[] = "ppc";
#elif V8_TARGET_ARCH_MIPS
const char arch[] = "mips";
#elif V8_TARGET_ARCH_X87
const char arch[] = "x87";
#elif V8_TARGET_ARCH_ARM64
const char arch[] = "arm64";
#elif V8_TARGET_ARCH_S390

View File

@ -52,8 +52,6 @@ enum AllocationFlags {
#elif V8_TARGET_ARCH_S390
#include "src/s390/constants-s390.h"
#include "src/s390/macro-assembler-s390.h"
#elif V8_TARGET_ARCH_X87
#include "src/x87/macro-assembler-x87.h"
#else
#error Unsupported target architecture.
#endif

View File

@ -48,8 +48,6 @@
#include "src/regexp/mips/regexp-macro-assembler-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/regexp/mips64/regexp-macro-assembler-mips64.h"
#elif V8_TARGET_ARCH_X87
#include "src/regexp/x87/regexp-macro-assembler-x87.h"
#else
#error Unsupported target architecture.
#endif
@ -6762,9 +6760,6 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
#elif V8_TARGET_ARCH_MIPS64
RegExpMacroAssemblerMIPS macro_assembler(isolate, zone, mode,
(data->capture_count + 1) * 2);
#elif V8_TARGET_ARCH_X87
RegExpMacroAssemblerX87 macro_assembler(isolate, zone, mode,
(data->capture_count + 1) * 2);
#else
#error "Unsupported architecture"
#endif

View File

@ -1,2 +0,0 @@
weiliang.lin@intel.com
chunyang.dai@intel.com

File diff suppressed because it is too large Load Diff

View File

@ -1,204 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_REGEXP_X87_REGEXP_MACRO_ASSEMBLER_X87_H_
#define V8_REGEXP_X87_REGEXP_MACRO_ASSEMBLER_X87_H_
#include "src/macro-assembler.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/x87/assembler-x87.h"
namespace v8 {
namespace internal {
#ifndef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerX87: public NativeRegExpMacroAssembler {
public:
RegExpMacroAssemblerX87(Isolate* isolate, Zone* zone, Mode mode,
int registers_to_save);
virtual ~RegExpMacroAssemblerX87();
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
virtual void AdvanceRegister(int reg, int by);
virtual void Backtrack();
virtual void Bind(Label* label);
virtual void CheckAtStart(Label* on_at_start);
virtual void CheckCharacter(uint32_t c, Label* on_equal);
virtual void CheckCharacterAfterAnd(uint32_t c,
uint32_t mask,
Label* on_equal);
virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
virtual void CheckCharacterLT(uc16 limit, Label* on_less);
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
virtual void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
bool read_backward, bool unicode,
Label* on_no_match);
virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(uint32_t c,
uint32_t mask,
Label* on_not_equal);
virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
uc16 minus,
uc16 mask,
Label* on_not_equal);
virtual void CheckCharacterInRange(uc16 from,
uc16 to,
Label* on_in_range);
virtual void CheckCharacterNotInRange(uc16 from,
uc16 to,
Label* on_not_in_range);
virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
// Checks whether the given offset from the current position is before
// the end of the string.
virtual void CheckPosition(int cp_offset, Label* on_outside_input);
virtual bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match);
virtual void Fail();
virtual Handle<HeapObject> GetCode(Handle<String> source);
virtual void GoTo(Label* label);
virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
virtual void IfRegisterEqPos(int reg, Label* if_eq);
virtual IrregexpImplementation Implementation();
virtual void LoadCurrentCharacter(int cp_offset,
Label* on_end_of_input,
bool check_bounds = true,
int characters = 1);
virtual void PopCurrentPosition();
virtual void PopRegister(int register_index);
virtual void PushBacktrack(Label* label);
virtual void PushCurrentPosition();
virtual void PushRegister(int register_index,
StackCheckFlag check_stack_limit);
virtual void ReadCurrentPositionFromRegister(int reg);
virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to);
virtual bool Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
// returning.
static int CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame);
private:
// Offsets from ebp of function parameters and stored registers.
static const int kFramePointer = 0;
// Above the frame pointer - function parameters and return address.
static const int kReturn_eip = kFramePointer + kPointerSize;
static const int kFrameAlign = kReturn_eip + kPointerSize;
// Parameters.
static const int kInputString = kFrameAlign;
static const int kStartIndex = kInputString + kPointerSize;
static const int kInputStart = kStartIndex + kPointerSize;
static const int kInputEnd = kInputStart + kPointerSize;
static const int kRegisterOutput = kInputEnd + kPointerSize;
// For the case of global regular expression, we have room to store at least
// one set of capture results. For the case of non-global regexp, we ignore
// this value.
static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize;
// Below the frame pointer - local stack variables.
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kBackup_esi = kFramePointer - kPointerSize;
static const int kBackup_edi = kBackup_esi - kPointerSize;
static const int kBackup_ebx = kBackup_edi - kPointerSize;
static const int kSuccessfulCaptures = kBackup_ebx - kPointerSize;
static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
// First register address. Following registers are below it on the stack.
static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
// Load a number of characters at the given offset from the
// current position, into the current-character register.
void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
// Check whether preemption has been requested.
void CheckPreemption();
// Check whether we are exceeding the stack limit on the backtrack stack.
void CheckStackLimit();
// Generate a call to CheckStackGuardState.
void CallCheckStackGuardState(Register scratch);
// The ebp-relative location of a regexp register.
Operand register_location(int register_index);
// The register containing the current character after LoadCurrentCharacter.
inline Register current_character() { return edx; }
// The register containing the backtrack stack top. Provides a meaningful
// name to the register.
inline Register backtrack_stackpointer() { return ecx; }
// Byte size of chars in the string to match (decided by the Mode argument)
inline int char_size() { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
// is NULL, in which case it is a conditional Backtrack.
void BranchOrBacktrack(Condition condition, Label* to);
// Call and return internally in the generated code in a way that
// is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
inline void SafeCall(Label* to);
inline void SafeReturn();
inline void SafeCallTarget(Label* name);
// Pushes the value of a register on the backtrack stack. Decrements the
// stack pointer (ecx) by a word size and stores the register's value there.
inline void Push(Register source);
// Pushes a value on the backtrack stack. Decrements the stack pointer (ecx)
// by a word size and stores the value there.
inline void Push(Immediate value);
// Pops a value from the backtrack stack. Reads the word at the stack pointer
// (ecx) and increments it by a word size.
inline void Pop(Register target);
Isolate* isolate() const { return masm_->isolate(); }
MacroAssembler* masm_;
// Which mode to generate code for (LATIN1 or UC16).
Mode mode_;
// One greater than maximal register index actually used.
int num_registers_;
// Number of registers to output at the end (the saved registers
// are always 0..num_saved_registers_-1)
int num_saved_registers_;
// Labels used internally.
Label entry_label_;
Label start_label_;
Label success_label_;
Label backtrack_label_;
Label exit_label_;
Label check_preempt_label_;
Label stack_overflow_label_;
};
#endif // V8_INTERPRETED_REGEXP
} // namespace internal
} // namespace v8
#endif // V8_REGEXP_X87_REGEXP_MACRO_ASSEMBLER_X87_H_

View File

@ -74,9 +74,6 @@ class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
#if V8_TARGET_ARCH_IA32
kMaxAllocatableGeneralRegisterCount,
kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_X87
kMaxAllocatableGeneralRegisterCount,
compiler == TURBOFAN ? 1 : kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_X64
kMaxAllocatableGeneralRegisterCount,
kMaxAllocatableDoubleRegisterCount,

View File

@ -28,8 +28,7 @@ class V8_EXPORT_PRIVATE RegisterConfiguration {
static const int kMaxFPRegisters = 32;
// Default RegisterConfigurations for the target architecture.
// TODO(X87): This distinction in RegisterConfigurations is temporary
// until x87 TF supports all of the registers that Crankshaft does.
// TODO(mstarzinger): Crankshaft is gone.
static const RegisterConfiguration* Crankshaft();
static const RegisterConfiguration* Turbofan();

View File

@ -21,8 +21,6 @@
#include "src/mips64/simulator-mips64.h"
#elif V8_TARGET_ARCH_S390
#include "src/s390/simulator-s390.h"
#elif V8_TARGET_ARCH_X87
#include "src/x87/simulator-x87.h"
#else
#error Unsupported target architecture.
#endif

View File

@ -154,8 +154,7 @@ static void ReadDiyFp(Vector<const char> buffer,
static bool DoubleStrtod(Vector<const char> trimmed,
int exponent,
double* result) {
#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87 || defined(USE_SIMULATOR)) && \
!defined(_MSC_VER)
#if (V8_TARGET_ARCH_IA32 || defined(USE_SIMULATOR)) && !defined(_MSC_VER)
// On x86 the floating-point stack can be 64 or 80 bits wide. If it is
// 80 bits wide (as is the case on Linux) then double-rounding occurs and the
// result is not accurate.

View File

@ -356,8 +356,7 @@ void StringBuilder::AddFormattedList(const char* format, va_list list) {
}
}
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
#if V8_TARGET_ARCH_IA32
static void MemMoveWrapper(void* dest, const void* src, size_t size) {
memmove(dest, src, size);
}
@ -411,7 +410,7 @@ static bool g_memcopy_functions_initialized = false;
void init_memcopy_functions(Isolate* isolate) {
if (g_memcopy_functions_initialized) return;
g_memcopy_functions_initialized = true;
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
#if V8_TARGET_ARCH_IA32
MemMoveFunction generated_memmove = CreateMemMoveFunction(isolate);
if (generated_memmove != NULL) {
memmove_function = generated_memmove;

View File

@ -431,7 +431,7 @@ inline uint32_t ComputePointerHash(void* ptr) {
// Initializes the codegen support that depends on CPU features.
void init_memcopy_functions(Isolate* isolate);
#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X87)
#if defined(V8_TARGET_ARCH_IA32)
// Limit below which the extra overhead of the MemCopy function is likely
// to outweigh the benefits of faster copying.
const int kMinComplexMemCopy = 64;

View File

@ -279,11 +279,6 @@
'builtins/s390/builtins-s390.cc',
],
}],
['v8_target_arch=="x87"', {
'sources': [ ### gcmole(arch:x87) ###
'builtins/x87/builtins-x87.cc',
],
}],
['v8_enable_i18n_support==0', {
'sources!': [
'builtins/builtins-intl-gen.cc',
@ -1592,38 +1587,6 @@
'regexp/ia32/regexp-macro-assembler-ia32.h',
],
}],
['v8_target_arch=="x87"', {
'sources': [ ### gcmole(arch:x87) ###
'x87/assembler-x87-inl.h',
'x87/assembler-x87.cc',
'x87/assembler-x87.h',
'x87/code-stubs-x87.cc',
'x87/code-stubs-x87.h',
'x87/codegen-x87.cc',
'x87/codegen-x87.h',
'x87/cpu-x87.cc',
'x87/deoptimizer-x87.cc',
'x87/disasm-x87.cc',
'x87/frames-x87.cc',
'x87/frames-x87.h',
'x87/interface-descriptors-x87.cc',
'x87/macro-assembler-x87.cc',
'x87/macro-assembler-x87.h',
'x87/simulator-x87.cc',
'x87/simulator-x87.h',
'compiler/x87/code-generator-x87.cc',
'compiler/x87/instruction-codes-x87.h',
'compiler/x87/instruction-scheduler-x87.cc',
'compiler/x87/instruction-selector-x87.cc',
'debug/x87/debug-x87.cc',
'full-codegen/x87/full-codegen-x87.cc',
'ic/x87/access-compiler-x87.cc',
'ic/x87/handler-compiler-x87.cc',
'ic/x87/ic-x87.cc',
'regexp/x87/regexp-macro-assembler-x87.cc',
'regexp/x87/regexp-macro-assembler-x87.h',
],
}],
['v8_target_arch=="mips" or v8_target_arch=="mipsel"', {
'sources': [ ### gcmole(arch:mipsel) ###
'mips/assembler-mips.cc',

View File

@ -1,2 +0,0 @@
weiliang.lin@intel.com
chunyang.dai@intel.com

View File

@ -1,546 +0,0 @@
// Copyright (c) 1994-2006 Sun Microsystems Inc.
// All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// - Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// - Redistribution in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// - Neither the name of Sun Microsystems or the names of contributors may
// be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
// A light-weight IA32 Assembler.
#ifndef V8_X87_ASSEMBLER_X87_INL_H_
#define V8_X87_ASSEMBLER_X87_INL_H_
#include "src/x87/assembler-x87.h"
#include "src/assembler.h"
#include "src/debug/debug.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; }
bool CpuFeatures::SupportsWasmSimd128() { return false; }
static const byte kCallOpcode = 0xE8;
static const int kNoCodeAgeSequenceLength = 5;
// The modes possibly affected by apply must be in kApplyMask.
void RelocInfo::apply(intptr_t delta) {
if (IsRuntimeEntry(rmode_) || IsCodeTarget(rmode_)) {
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p -= delta; // Relocate entry.
} else if (IsCodeAgeSequence(rmode_)) {
if (*pc_ == kCallOpcode) {
int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
*p -= delta; // Relocate entry.
}
} else if (IsDebugBreakSlot(rmode_) && IsPatchedDebugBreakSlotSequence()) {
// Special handling of a debug break slot when a break point is set (call
// instruction has been inserted).
int32_t* p = reinterpret_cast<int32_t*>(
pc_ + Assembler::kPatchDebugBreakSlotAddressOffset);
*p -= delta; // Relocate entry.
} else if (IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p += delta; // Relocate entry.
}
}
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
|| rmode_ == EXTERNAL_REFERENCE);
return reinterpret_cast<Address>(pc_);
}
Address RelocInfo::constant_pool_entry_address() {
UNREACHABLE();
}
int RelocInfo::target_address_size() {
return Assembler::kSpecialTargetSize;
}
HeapObject* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return HeapObject::cast(Memory::Object_at(pc_));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Handle<HeapObject>::cast(Memory::Object_Handle_at(pc_));
}
void RelocInfo::set_target_object(HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Memory::Object_at(pc_) = target;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(target->GetIsolate(), pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
target);
}
}
Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
return Memory::Address_at(pc_);
}
Address RelocInfo::target_internal_reference() {
DCHECK(rmode_ == INTERNAL_REFERENCE);
return Memory::Address_at(pc_);
}
Address RelocInfo::target_internal_reference_address() {
DCHECK(rmode_ == INTERNAL_REFERENCE);
return reinterpret_cast<Address>(pc_);
}
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
return reinterpret_cast<Address>(*reinterpret_cast<int32_t*>(pc_));
}
void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsRuntimeEntry(rmode_));
if (target_address() != target) {
set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
}
}
Handle<Cell> RelocInfo::target_cell_handle() {
DCHECK(rmode_ == RelocInfo::CELL);
Address address = Memory::Address_at(pc_);
return Handle<Cell>(reinterpret_cast<Cell**>(address));
}
Cell* RelocInfo::target_cell() {
DCHECK(rmode_ == RelocInfo::CELL);
return Cell::FromValueAddress(Memory::Address_at(pc_));
}
void RelocInfo::set_target_cell(Cell* cell,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(cell->IsCell());
DCHECK(rmode_ == RelocInfo::CELL);
Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(cell->GetIsolate(), pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
cell);
}
}
Handle<Code> RelocInfo::code_age_stub_handle(Assembler* origin) {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
DCHECK(*pc_ == kCallOpcode);
return Handle<Code>::cast(Memory::Object_Handle_at(pc_ + 1));
}
Code* RelocInfo::code_age_stub() {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
DCHECK(*pc_ == kCallOpcode);
return Code::GetCodeFromTargetAddress(
Assembler::target_address_at(pc_ + 1, host_));
}
void RelocInfo::set_code_age_stub(Code* stub,
ICacheFlushMode icache_flush_mode) {
DCHECK(*pc_ == kCallOpcode);
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
Assembler::set_target_address_at(stub->GetIsolate(), pc_ + 1, host_,
stub->instruction_start(),
icache_flush_mode);
}
Address RelocInfo::debug_call_address() {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
Address location = pc_ + Assembler::kPatchDebugBreakSlotAddressOffset;
return Assembler::target_address_at(location, host_);
}
void RelocInfo::set_debug_call_address(Isolate* isolate, Address target) {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
Address location = pc_ + Assembler::kPatchDebugBreakSlotAddressOffset;
Assembler::set_target_address_at(isolate, location, host_, target);
if (host() != NULL) {
Code* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
target_code);
}
}
void RelocInfo::WipeOut(Isolate* isolate) {
if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = NULL;
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
// Effectively write zero into the relocation.
Assembler::set_target_address_at(isolate, pc_, host_,
pc_ + sizeof(int32_t));
} else {
UNREACHABLE();
}
}
template <typename ObjectVisitor>
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(host(), this);
Assembler::FlushICache(isolate, pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(host(), this);
} else if (mode == RelocInfo::CELL) {
visitor->VisitCellPointer(host(), this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(host(), this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
visitor->VisitInternalReference(host(), this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(host(), this);
} else if (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()) {
visitor->VisitDebugTarget(host(), this);
} else if (IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(host(), this);
}
}
template<typename StaticVisitor>
void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitEmbeddedPointer(heap, this);
Assembler::FlushICache(heap->isolate(), pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::CELL) {
StaticVisitor::VisitCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
StaticVisitor::VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
} else if (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()) {
StaticVisitor::VisitDebugTarget(heap, this);
} else if (IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
}
}
Immediate::Immediate(int x) {
x_ = x;
rmode_ = RelocInfo::NONE32;
}
Immediate::Immediate(Address x, RelocInfo::Mode rmode) {
x_ = reinterpret_cast<int32_t>(x);
rmode_ = rmode;
}
Immediate::Immediate(const ExternalReference& ext) {
x_ = reinterpret_cast<int32_t>(ext.address());
rmode_ = RelocInfo::EXTERNAL_REFERENCE;
}
Immediate::Immediate(Label* internal_offset) {
x_ = reinterpret_cast<int32_t>(internal_offset);
rmode_ = RelocInfo::INTERNAL_REFERENCE;
}
Immediate::Immediate(Handle<Object> handle) {
AllowDeferredHandleDereference using_raw_address;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
if (obj->IsHeapObject()) {
x_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
} else {
// no relocation needed
x_ = reinterpret_cast<intptr_t>(obj);
rmode_ = RelocInfo::NONE32;
}
}
Immediate::Immediate(Smi* value) {
x_ = reinterpret_cast<intptr_t>(value);
rmode_ = RelocInfo::NONE32;
}
Immediate::Immediate(Address addr) {
x_ = reinterpret_cast<int32_t>(addr);
rmode_ = RelocInfo::NONE32;
}
void Assembler::emit(uint32_t x) {
*reinterpret_cast<uint32_t*>(pc_) = x;
pc_ += sizeof(uint32_t);
}
void Assembler::emit_q(uint64_t x) {
*reinterpret_cast<uint64_t*>(pc_) = x;
pc_ += sizeof(uint64_t);
}
void Assembler::emit(Handle<Object> handle) {
AllowDeferredHandleDereference heap_object_check;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
if (obj->IsHeapObject()) {
emit(reinterpret_cast<intptr_t>(handle.location()),
RelocInfo::EMBEDDED_OBJECT);
} else {
// no relocation needed
emit(reinterpret_cast<intptr_t>(obj));
}
}
void Assembler::emit(uint32_t x, RelocInfo::Mode rmode, TypeFeedbackId id) {
if (rmode == RelocInfo::CODE_TARGET && !id.IsNone()) {
RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, id.ToInt());
} else if (!RelocInfo::IsNone(rmode)
&& rmode != RelocInfo::CODE_AGE_SEQUENCE) {
RecordRelocInfo(rmode);
}
emit(x);
}
void Assembler::emit(Handle<Code> code,
RelocInfo::Mode rmode,
TypeFeedbackId id) {
AllowDeferredHandleDereference embedding_raw_address;
emit(reinterpret_cast<intptr_t>(code.location()), rmode, id);
}
void Assembler::emit(const Immediate& x) {
if (x.rmode_ == RelocInfo::INTERNAL_REFERENCE) {
Label* label = reinterpret_cast<Label*>(x.x_);
emit_code_relative_offset(label);
return;
}
if (!RelocInfo::IsNone(x.rmode_)) RecordRelocInfo(x.rmode_);
emit(x.x_);
}
void Assembler::emit_code_relative_offset(Label* label) {
if (label->is_bound()) {
int32_t pos;
pos = label->pos() + Code::kHeaderSize - kHeapObjectTag;
emit(pos);
} else {
emit_disp(label, Displacement::CODE_RELATIVE);
}
}
void Assembler::emit_b(Immediate x) {
DCHECK(x.is_int8() || x.is_uint8());
uint8_t value = static_cast<uint8_t>(x.x_);
*pc_++ = value;
}
void Assembler::emit_w(const Immediate& x) {
DCHECK(RelocInfo::IsNone(x.rmode_));
uint16_t value = static_cast<uint16_t>(x.x_);
reinterpret_cast<uint16_t*>(pc_)[0] = value;
pc_ += sizeof(uint16_t);
}
Address Assembler::target_address_at(Address pc, Address constant_pool) {
return pc + sizeof(int32_t) + *reinterpret_cast<int32_t*>(pc);
}
void Assembler::set_target_address_at(Isolate* isolate, Address pc,
Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode) {
DCHECK_IMPLIES(isolate == nullptr, icache_flush_mode == SKIP_ICACHE_FLUSH);
int32_t* p = reinterpret_cast<int32_t*>(pc);
*p = target - (pc + sizeof(int32_t));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(isolate, p, sizeof(int32_t));
}
}
Address Assembler::target_address_at(Address pc, Code* code) {
Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
Address target,
ICacheFlushMode icache_flush_mode) {
Address constant_pool = code ? code->constant_pool() : NULL;
set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
}
Address Assembler::target_address_from_return_address(Address pc) {
return pc - kCallTargetAddressOffset;
}
Displacement Assembler::disp_at(Label* L) {
return Displacement(long_at(L->pos()));
}
void Assembler::disp_at_put(Label* L, Displacement disp) {
long_at_put(L->pos(), disp.data());
}
void Assembler::emit_disp(Label* L, Displacement::Type type) {
Displacement disp(L, type);
L->link_to(pc_offset());
emit(static_cast<int>(disp.data()));
}
void Assembler::emit_near_disp(Label* L) {
byte disp = 0x00;
if (L->is_near_linked()) {
int offset = L->near_link_pos() - pc_offset();
DCHECK(is_int8(offset));
disp = static_cast<byte>(offset & 0xFF);
}
L->link_to(pc_offset(), Label::kNear);
*pc_++ = disp;
}
void Assembler::deserialization_set_target_internal_reference_at(
Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
Memory::Address_at(pc) = target;
}
void Operand::set_modrm(int mod, Register rm) {
DCHECK((mod & -4) == 0);
buf_[0] = mod << 6 | rm.code();
len_ = 1;
}
void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
DCHECK(len_ == 1);
DCHECK((scale & -4) == 0);
// Use SIB with no index register only for base esp.
DCHECK(!index.is(esp) || base.is(esp));
buf_[1] = scale << 6 | index.code() << 3 | base.code();
len_ = 2;
}
void Operand::set_disp8(int8_t disp) {
DCHECK(len_ == 1 || len_ == 2);
*reinterpret_cast<int8_t*>(&buf_[len_++]) = disp;
}
void Operand::set_dispr(int32_t disp, RelocInfo::Mode rmode) {
DCHECK(len_ == 1 || len_ == 2);
int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
*p = disp;
len_ += sizeof(int32_t);
rmode_ = rmode;
}
Operand::Operand(Register reg) {
// reg
set_modrm(3, reg);
}
Operand::Operand(int32_t disp, RelocInfo::Mode rmode) {
// [disp/r]
set_modrm(0, ebp);
set_dispr(disp, rmode);
}
Operand::Operand(Immediate imm) {
// [disp/r]
set_modrm(0, ebp);
set_dispr(imm.x_, imm.rmode_);
}
} // namespace internal
} // namespace v8
#endif // V8_X87_ASSEMBLER_X87_INL_H_

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,351 +0,0 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_X87_CODE_STUBS_X87_H_
#define V8_X87_CODE_STUBS_X87_H_
namespace v8 {
namespace internal {
void ArrayNativeCode(MacroAssembler* masm,
bool construct_call,
Label* call_generic_code);
class StringHelper : public AllStatic {
public:
// Compares two flat one byte strings and returns result in eax.
static void GenerateCompareFlatOneByteStrings(MacroAssembler* masm,
Register left, Register right,
Register scratch1,
Register scratch2,
Register scratch3);
// Compares two flat one byte strings for equality and returns result in eax.
static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
Register left, Register right,
Register scratch1,
Register scratch2);
private:
static void GenerateOneByteCharsCompareLoop(
MacroAssembler* masm, Register left, Register right, Register length,
Register scratch, Label* chars_not_equal,
Label::Distance chars_not_equal_near = Label::kFar);
DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
};
class NameDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
NameDictionaryLookupStub(Isolate* isolate, Register dictionary,
Register result, Register index, LookupMode mode)
: PlatformCodeStub(isolate) {
minor_key_ = DictionaryBits::encode(dictionary.code()) |
ResultBits::encode(result.code()) |
IndexBits::encode(index.code()) | LookupModeBits::encode(mode);
}
static void GenerateNegativeLookup(MacroAssembler* masm,
Label* miss,
Label* done,
Register properties,
Handle<Name> name,
Register r0);
bool SometimesSetsUpAFrame() override { return false; }
private:
static const int kInlinedProbes = 4;
static const int kTotalProbes = 20;
static const int kCapacityOffset =
NameDictionary::kHeaderSize +
NameDictionary::kCapacityIndex * kPointerSize;
static const int kElementsStartOffset =
NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
Register dictionary() const {
return Register::from_code(DictionaryBits::decode(minor_key_));
}
Register result() const {
return Register::from_code(ResultBits::decode(minor_key_));
}
Register index() const {
return Register::from_code(IndexBits::decode(minor_key_));
}
LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
class DictionaryBits: public BitField<int, 0, 3> {};
class ResultBits: public BitField<int, 3, 3> {};
class IndexBits: public BitField<int, 6, 3> {};
class LookupModeBits: public BitField<LookupMode, 9, 1> {};
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
};
class RecordWriteStub: public PlatformCodeStub {
public:
RecordWriteStub(Isolate* isolate, Register object, Register value,
Register address, RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode)
: PlatformCodeStub(isolate),
regs_(object, // An input reg.
address, // An input reg.
value) { // One scratch reg.
minor_key_ = ObjectBits::encode(object.code()) |
ValueBits::encode(value.code()) |
AddressBits::encode(address.code()) |
RememberedSetActionBits::encode(remembered_set_action) |
SaveFPRegsModeBits::encode(fp_mode);
}
RecordWriteStub(uint32_t key, Isolate* isolate)
: PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
enum Mode {
STORE_BUFFER_ONLY,
INCREMENTAL,
INCREMENTAL_COMPACTION
};
bool SometimesSetsUpAFrame() override { return false; }
static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8.
static const byte kTwoByteJumpInstruction = 0xeb; // Jmp #imm8.
static const byte kFiveByteNopInstruction = 0x3d; // Cmpl eax, #imm32.
static const byte kFiveByteJumpInstruction = 0xe9; // Jmp #imm32.
static Mode GetMode(Code* stub) {
byte first_instruction = stub->instruction_start()[0];
byte second_instruction = stub->instruction_start()[2];
if (first_instruction == kTwoByteJumpInstruction) {
return INCREMENTAL;
}
DCHECK(first_instruction == kTwoByteNopInstruction);
if (second_instruction == kFiveByteJumpInstruction) {
return INCREMENTAL_COMPACTION;
}
DCHECK(second_instruction == kFiveByteNopInstruction);
return STORE_BUFFER_ONLY;
}
static void Patch(Code* stub, Mode mode) {
switch (mode) {
case STORE_BUFFER_ONLY:
DCHECK(GetMode(stub) == INCREMENTAL ||
GetMode(stub) == INCREMENTAL_COMPACTION);
stub->instruction_start()[0] = kTwoByteNopInstruction;
stub->instruction_start()[2] = kFiveByteNopInstruction;
break;
case INCREMENTAL:
DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
stub->instruction_start()[0] = kTwoByteJumpInstruction;
break;
case INCREMENTAL_COMPACTION:
DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
stub->instruction_start()[0] = kTwoByteNopInstruction;
stub->instruction_start()[2] = kFiveByteJumpInstruction;
break;
}
DCHECK(GetMode(stub) == mode);
Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(), 7);
}
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
private:
// This is a helper class for freeing up 3 scratch registers, where the third
// is always ecx (needed for shift operations). The input is two registers
// that must be preserved and one scratch register provided by the caller.
class RegisterAllocation {
public:
RegisterAllocation(Register object,
Register address,
Register scratch0)
: object_orig_(object),
address_orig_(address),
scratch0_orig_(scratch0),
object_(object),
address_(address),
scratch0_(scratch0) {
DCHECK(!AreAliased(scratch0, object, address, no_reg));
scratch1_ = GetRegThatIsNotEcxOr(object_, address_, scratch0_);
if (scratch0.is(ecx)) {
scratch0_ = GetRegThatIsNotEcxOr(object_, address_, scratch1_);
}
if (object.is(ecx)) {
object_ = GetRegThatIsNotEcxOr(address_, scratch0_, scratch1_);
}
if (address.is(ecx)) {
address_ = GetRegThatIsNotEcxOr(object_, scratch0_, scratch1_);
}
DCHECK(!AreAliased(scratch0_, object_, address_, ecx));
}
void Save(MacroAssembler* masm) {
DCHECK(!address_orig_.is(object_));
DCHECK(object_.is(object_orig_) || address_.is(address_orig_));
DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
DCHECK(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
DCHECK(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
// We don't have to save scratch0_orig_ because it was given to us as
// a scratch register. But if we had to switch to a different reg then
// we should save the new scratch0_.
if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
if (!ecx.is(scratch0_orig_) &&
!ecx.is(object_orig_) &&
!ecx.is(address_orig_)) {
masm->push(ecx);
}
masm->push(scratch1_);
if (!address_.is(address_orig_)) {
masm->push(address_);
masm->mov(address_, address_orig_);
}
if (!object_.is(object_orig_)) {
masm->push(object_);
masm->mov(object_, object_orig_);
}
}
void Restore(MacroAssembler* masm) {
// These will have been preserved the entire time, so we just need to move
// them back. Only in one case is the orig_ reg different from the plain
// one, since only one of them can alias with ecx.
if (!object_.is(object_orig_)) {
masm->mov(object_orig_, object_);
masm->pop(object_);
}
if (!address_.is(address_orig_)) {
masm->mov(address_orig_, address_);
masm->pop(address_);
}
masm->pop(scratch1_);
if (!ecx.is(scratch0_orig_) &&
!ecx.is(object_orig_) &&
!ecx.is(address_orig_)) {
masm->pop(ecx);
}
if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
}
// If we have to call into C then we need to save and restore all caller-
// saved registers that were not already preserved. The caller saved
// registers are eax, ecx and edx. The three scratch registers (incl. ecx)
// will be restored by other means so we don't bother pushing them here.
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
masm->PushCallerSaved(mode, ecx, scratch0_, scratch1_);
}
inline void RestoreCallerSaveRegisters(MacroAssembler* masm,
SaveFPRegsMode mode) {
masm->PopCallerSaved(mode, ecx, scratch0_, scratch1_);
}
inline Register object() { return object_; }
inline Register address() { return address_; }
inline Register scratch0() { return scratch0_; }
inline Register scratch1() { return scratch1_; }
private:
Register object_orig_;
Register address_orig_;
Register scratch0_orig_;
Register object_;
Register address_;
Register scratch0_;
Register scratch1_;
// Third scratch register is always ecx.
Register GetRegThatIsNotEcxOr(Register r1,
Register r2,
Register r3) {
for (int i = 0; i < Register::kNumRegisters; i++) {
if (RegisterConfiguration::Crankshaft()->IsAllocatableGeneralCode(i)) {
Register candidate = Register::from_code(i);
if (candidate.is(ecx)) continue;
if (candidate.is(r1)) continue;
if (candidate.is(r2)) continue;
if (candidate.is(r3)) continue;
return candidate;
}
}
UNREACHABLE();
}
friend class RecordWriteStub;
};
enum OnNoNeedToInformIncrementalMarker {
kReturnOnNoNeedToInformIncrementalMarker,
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
};
inline Major MajorKey() const final { return RecordWrite; }
void Generate(MacroAssembler* masm) override;
void GenerateIncremental(MacroAssembler* masm, Mode mode);
void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
void Activate(Code* code) override {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
Register object() const {
return Register::from_code(ObjectBits::decode(minor_key_));
}
Register value() const {
return Register::from_code(ValueBits::decode(minor_key_));
}
Register address() const {
return Register::from_code(AddressBits::decode(minor_key_));
}
RememberedSetAction remembered_set_action() const {
return RememberedSetActionBits::decode(minor_key_);
}
SaveFPRegsMode save_fp_regs_mode() const {
return SaveFPRegsModeBits::decode(minor_key_);
}
class ObjectBits: public BitField<int, 0, 3> {};
class ValueBits: public BitField<int, 3, 3> {};
class AddressBits: public BitField<int, 6, 3> {};
class RememberedSetActionBits: public BitField<RememberedSetAction, 9, 1> {};
class SaveFPRegsModeBits : public BitField<SaveFPRegsMode, 10, 1> {};
RegisterAllocation regs_;
DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
};
} // namespace internal
} // namespace v8
#endif // V8_X87_CODE_STUBS_X87_H_

View File

@ -1,381 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/x87/codegen-x87.h"
#if V8_TARGET_ARCH_X87
#include "src/codegen.h"
#include "src/heap/heap.h"
#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
masm->EnterFrame(StackFrame::INTERNAL);
DCHECK(!masm->has_frame());
masm->set_has_frame(true);
}
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
masm->LeaveFrame(StackFrame::INTERNAL);
DCHECK(masm->has_frame());
masm->set_has_frame(false);
}
#define __ masm.
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
CodeObjectRequired::kNo);
// Load double input into registers.
__ fld_d(MemOperand(esp, 4));
__ X87SetFPUCW(0x027F);
__ fsqrt();
__ X87SetFPUCW(0x037F);
__ Ret();
CodeDesc desc;
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
}
// Helper functions for CreateMemMoveFunction.
#undef __
#define __ ACCESS_MASM(masm)
enum Direction { FORWARD, BACKWARD };
enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED };
void MemMoveEmitPopAndReturn(MacroAssembler* masm) {
__ pop(esi);
__ pop(edi);
__ ret(0);
}
#undef __
#define __ masm.
class LabelConverter {
public:
explicit LabelConverter(byte* buffer) : buffer_(buffer) {}
int32_t address(Label* l) const {
return reinterpret_cast<int32_t>(buffer_) + l->pos();
}
private:
byte* buffer_;
};
MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
CodeObjectRequired::kNo);
LabelConverter conv(buffer);
// Generated code is put into a fixed, unmovable buffer, and not into
// the V8 heap. We can't, and don't, refer to any relocatable addresses
// (e.g. the JavaScript nan-object).
// 32-bit C declaration function calls pass arguments on stack.
// Stack layout:
// esp[12]: Third argument, size.
// esp[8]: Second argument, source pointer.
// esp[4]: First argument, destination pointer.
// esp[0]: return address
const int kDestinationOffset = 1 * kPointerSize;
const int kSourceOffset = 2 * kPointerSize;
const int kSizeOffset = 3 * kPointerSize;
int stack_offset = 0; // Update if we change the stack height.
Label backward, backward_much_overlap;
Label forward_much_overlap, small_size, medium_size, pop_and_return;
__ push(edi);
__ push(esi);
stack_offset += 2 * kPointerSize;
Register dst = edi;
Register src = esi;
Register count = ecx;
__ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
__ mov(src, Operand(esp, stack_offset + kSourceOffset));
__ mov(count, Operand(esp, stack_offset + kSizeOffset));
__ cmp(dst, src);
__ j(equal, &pop_and_return);
// No SSE2.
Label forward;
__ cmp(count, 0);
__ j(equal, &pop_and_return);
__ cmp(dst, src);
__ j(above, &backward);
__ jmp(&forward);
{
// Simple forward copier.
Label forward_loop_1byte, forward_loop_4byte;
__ bind(&forward_loop_4byte);
__ mov(eax, Operand(src, 0));
__ sub(count, Immediate(4));
__ add(src, Immediate(4));
__ mov(Operand(dst, 0), eax);
__ add(dst, Immediate(4));
__ bind(&forward); // Entry point.
__ cmp(count, 3);
__ j(above, &forward_loop_4byte);
__ bind(&forward_loop_1byte);
__ cmp(count, 0);
__ j(below_equal, &pop_and_return);
__ mov_b(eax, Operand(src, 0));
__ dec(count);
__ inc(src);
__ mov_b(Operand(dst, 0), eax);
__ inc(dst);
__ jmp(&forward_loop_1byte);
}
{
// Simple backward copier.
Label backward_loop_1byte, backward_loop_4byte, entry_shortcut;
__ bind(&backward);
__ add(src, count);
__ add(dst, count);
__ cmp(count, 3);
__ j(below_equal, &entry_shortcut);
__ bind(&backward_loop_4byte);
__ sub(src, Immediate(4));
__ sub(count, Immediate(4));
__ mov(eax, Operand(src, 0));
__ sub(dst, Immediate(4));
__ mov(Operand(dst, 0), eax);
__ cmp(count, 3);
__ j(above, &backward_loop_4byte);
__ bind(&backward_loop_1byte);
__ cmp(count, 0);
__ j(below_equal, &pop_and_return);
__ bind(&entry_shortcut);
__ dec(src);
__ dec(count);
__ mov_b(eax, Operand(src, 0));
__ dec(dst);
__ mov_b(Operand(dst, 0), eax);
__ jmp(&backward_loop_1byte);
}
__ bind(&pop_and_return);
MemMoveEmitPopAndReturn(&masm);
CodeDesc desc;
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
// TODO(jkummerow): It would be nice to register this code creation event
// with the PROFILE / GDBJIT system.
return FUNCTION_CAST<MemMoveFunction>(buffer);
}
#undef __
// -------------------------------------------------------------------------
// Code generators
#define __ ACCESS_MASM(masm)
void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Factory* factory,
Register string,
Register index,
Register result,
Label* call_runtime) {
Label indirect_string_loaded;
__ bind(&indirect_string_loaded);
// Fetch the instance type of the receiver into result register.
__ mov(result, FieldOperand(string, HeapObject::kMapOffset));
__ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
// We need special handling for indirect strings.
Label check_sequential;
__ test(result, Immediate(kIsIndirectStringMask));
__ j(zero, &check_sequential, Label::kNear);
// Dispatch on the indirect string shape: slice or cons.
Label cons_string, thin_string;
__ and_(result, Immediate(kStringRepresentationMask));
__ cmp(result, Immediate(kConsStringTag));
__ j(equal, &cons_string, Label::kNear);
__ cmp(result, Immediate(kThinStringTag));
__ j(equal, &thin_string, Label::kNear);
// Handle slices.
__ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
__ SmiUntag(result);
__ add(index, result);
__ mov(string, FieldOperand(string, SlicedString::kParentOffset));
__ jmp(&indirect_string_loaded);
// Handle thin strings.
__ bind(&thin_string);
__ mov(string, FieldOperand(string, ThinString::kActualOffset));
__ jmp(&indirect_string_loaded);
// Handle cons strings.
// Check whether the right hand side is the empty string (i.e. if
// this is really a flat string in a cons string). If that is not
// the case we would rather go to the runtime system now to flatten
// the string.
__ bind(&cons_string);
__ cmp(FieldOperand(string, ConsString::kSecondOffset),
Immediate(factory->empty_string()));
__ j(not_equal, call_runtime);
__ mov(string, FieldOperand(string, ConsString::kFirstOffset));
__ jmp(&indirect_string_loaded);
// Distinguish sequential and external strings. Only these two string
// representations can reach here (slices and flat cons strings have been
// reduced to the underlying sequential or external string).
Label seq_string;
__ bind(&check_sequential);
STATIC_ASSERT(kSeqStringTag == 0);
__ test(result, Immediate(kStringRepresentationMask));
__ j(zero, &seq_string, Label::kNear);
// Handle external strings.
Label one_byte_external, done;
if (FLAG_debug_code) {
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ test(result, Immediate(kIsIndirectStringMask));
__ Assert(zero, kExternalStringExpectedButNotFound);
}
// Rule out short external strings.
STATIC_ASSERT(kShortExternalStringTag != 0);
__ test_b(result, Immediate(kShortExternalStringMask));
__ j(not_zero, call_runtime);
// Check encoding.
STATIC_ASSERT(kTwoByteStringTag == 0);
__ test_b(result, Immediate(kStringEncodingMask));
__ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset));
__ j(not_equal, &one_byte_external, Label::kNear);
// Two-byte string.
__ movzx_w(result, Operand(result, index, times_2, 0));
__ jmp(&done, Label::kNear);
__ bind(&one_byte_external);
// One-byte string.
__ movzx_b(result, Operand(result, index, times_1, 0));
__ jmp(&done, Label::kNear);
// Dispatch on the encoding: one-byte or two-byte.
Label one_byte;
__ bind(&seq_string);
STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ test(result, Immediate(kStringEncodingMask));
__ j(not_zero, &one_byte, Label::kNear);
// Two-byte string.
// Load the two-byte character code into the result register.
__ movzx_w(result, FieldOperand(string,
index,
times_2,
SeqTwoByteString::kHeaderSize));
__ jmp(&done, Label::kNear);
// One-byte string.
// Load the byte into the result register.
__ bind(&one_byte);
__ movzx_b(result, FieldOperand(string,
index,
times_1,
SeqOneByteString::kHeaderSize));
__ bind(&done);
}
#undef __
CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
CodePatcher patcher(isolate, young_sequence_.start(),
young_sequence_.length());
patcher.masm()->push(ebp);
patcher.masm()->mov(ebp, esp);
patcher.masm()->push(esi);
patcher.masm()->push(edi);
}
#ifdef DEBUG
bool CodeAgingHelper::IsOld(byte* candidate) const {
return *candidate == kCallOpcode;
}
#endif
bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
bool result = isolate->code_aging_helper()->IsYoung(sequence);
DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
return result;
}
Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
sequence++; // Skip the kCallOpcode byte
Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
Assembler::kCallTargetAddressOffset;
Code* stub = GetCodeFromTargetAddress(target_address);
return GetAgeOfCodeAgeStub(stub);
}
void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
Code::Age age) {
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
Assembler::FlushICache(isolate, sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age);
CodePatcher patcher(isolate, sequence, young_length);
patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
}
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_X87

View File

@ -1,33 +0,0 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_X87_CODEGEN_X87_H_
#define V8_X87_CODEGEN_X87_H_
#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
class StringCharLoadGenerator : public AllStatic {
public:
// Generates the code for handling different string types and loading the
// indexed character into |result|. We expect |index| as untagged input and
// |result| as untagged output.
static void Generate(MacroAssembler* masm,
Factory* factory,
Register string,
Register index,
Register result,
Label* call_runtime);
private:
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
} // namespace internal
} // namespace v8
#endif // V8_X87_CODEGEN_X87_H_

View File

@ -1,43 +0,0 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// CPU specific code for ia32 independent of OS goes here.
#ifdef __GNUC__
#include "src/third_party/valgrind/valgrind.h"
#endif
#if V8_TARGET_ARCH_X87
#include "src/assembler.h"
#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
void CpuFeatures::FlushICache(void* start, size_t size) {
// No need to flush the instruction cache on Intel. On Intel instruction
// cache flushing is only necessary when multiple cores running the same
// code simultaneously. V8 (and JavaScript) is single threaded and when code
// is patched on an intel CPU the core performing the patching will have its
// own instruction cache updated automatically.
// If flushing of the instruction cache becomes necessary Windows has the
// API function FlushInstructionCache.
// By default, valgrind only checks the stack for writes that might need to
// invalidate already cached translated code. This leads to random
// instability when code patches or moves are sometimes unnoticed. One
// solution is to run valgrind with --smc-check=all, but this comes at a big
// performance cost. We can notify valgrind to invalidate its cache.
#ifdef VALGRIND_DISCARD_TRANSLATIONS
unsigned res = VALGRIND_DISCARD_TRANSLATIONS(start, size);
USE(res);
#endif
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_X87

View File

@ -1,412 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_X87
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h"
#include "src/register-configuration.h"
#include "src/safepoint-table.h"
#include "src/x87/frames-x87.h"
namespace v8 {
namespace internal {
const int Deoptimizer::table_entry_size_ = 10;
int Deoptimizer::patch_size() {
return Assembler::kCallInstructionLength;
}
void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
Isolate* isolate = code->GetIsolate();
HandleScope scope(isolate);
// Compute the size of relocation information needed for the code
// patching in Deoptimizer::PatchCodeForDeoptimization below.
int min_reloc_size = 0;
int prev_pc_offset = 0;
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
int pc_offset = deopt_data->Pc(i)->value();
if (pc_offset == -1) continue;
pc_offset = pc_offset + 1; // We will encode the pc offset after the call.
DCHECK_GE(pc_offset, prev_pc_offset);
int pc_delta = pc_offset - prev_pc_offset;
// We use RUNTIME_ENTRY reloc info which has a size of 2 bytes
// if encodable with small pc delta encoding and up to 6 bytes
// otherwise.
if (pc_delta <= RelocInfo::kMaxSmallPCDelta) {
min_reloc_size += 2;
} else {
min_reloc_size += 6;
}
prev_pc_offset = pc_offset;
}
// If the relocation information is not big enough we create a new
// relocation info object that is padded with comments to make it
// big enough for lazy doptimization.
int reloc_length = code->relocation_info()->length();
if (min_reloc_size > reloc_length) {
int comment_reloc_size = RelocInfo::kMinRelocCommentSize;
// Padding needed.
int min_padding = min_reloc_size - reloc_length;
// Number of comments needed to take up at least that much space.
int additional_comments =
(min_padding + comment_reloc_size - 1) / comment_reloc_size;
// Actual padding size.
int padding = additional_comments * comment_reloc_size;
// Allocate new relocation info and copy old relocation to the end
// of the new relocation info array because relocation info is
// written and read backwards.
Factory* factory = isolate->factory();
Handle<ByteArray> new_reloc =
factory->NewByteArray(reloc_length + padding, TENURED);
MemCopy(new_reloc->GetDataStartAddress() + padding,
code->relocation_info()->GetDataStartAddress(), reloc_length);
// Create a relocation writer to write the comments in the padding
// space. Use position 0 for everything to ensure short encoding.
RelocInfoWriter reloc_info_writer(
new_reloc->GetDataStartAddress() + padding, 0);
intptr_t comment_string
= reinterpret_cast<intptr_t>(RelocInfo::kFillerCommentString);
RelocInfo rinfo(isolate, 0, RelocInfo::COMMENT, comment_string, NULL);
for (int i = 0; i < additional_comments; ++i) {
#ifdef DEBUG
byte* pos_before = reloc_info_writer.pos();
#endif
reloc_info_writer.Write(&rinfo);
DCHECK(RelocInfo::kMinRelocCommentSize ==
pos_before - reloc_info_writer.pos());
}
// Replace relocation information on the code object.
code->set_relocation_info(*new_reloc);
}
}
void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address code_start_address = code->instruction_start();
// Fail hard and early if we enter this code object again.
byte* pointer = code->FindCodeAgeSequence();
if (pointer != NULL) {
pointer += kNoCodeAgeSequenceLength;
} else {
pointer = code->instruction_start();
}
CodePatcher patcher(isolate, pointer, 1);
patcher.masm()->int3();
DeoptimizationInputData* data =
DeoptimizationInputData::cast(code->deoptimization_data());
int osr_offset = data->OsrPcOffset()->value();
if (osr_offset > 0) {
CodePatcher osr_patcher(isolate, code_start_address + osr_offset, 1);
osr_patcher.masm()->int3();
}
// We will overwrite the code's relocation info in-place. Relocation info
// is written backward. The relocation info is the payload of a byte
// array. Later on we will slide this to the start of the byte array and
// create a filler object in the remaining space.
ByteArray* reloc_info = code->relocation_info();
Address reloc_end_address = reloc_info->address() + reloc_info->Size();
RelocInfoWriter reloc_info_writer(reloc_end_address, code_start_address);
// Since the call is a relative encoding, write new
// reloc info. We do not need any of the existing reloc info because the
// existing code will not be used again (we zap it in debug builds).
//
// Emit call to lazy deoptimization at all lazy deopt points.
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
#ifdef DEBUG
Address prev_call_address = NULL;
#endif
// For each LLazyBailout instruction insert a call to the corresponding
// deoptimization entry.
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
// Patch lazy deoptimization entry.
Address call_address = code_start_address + deopt_data->Pc(i)->value();
CodePatcher patcher(isolate, call_address, patch_size());
Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
patcher.masm()->call(deopt_entry, RelocInfo::NONE32);
// We use RUNTIME_ENTRY for deoptimization bailouts.
RelocInfo rinfo(isolate, call_address + 1, // 1 after the call opcode.
RelocInfo::RUNTIME_ENTRY,
reinterpret_cast<intptr_t>(deopt_entry), NULL);
reloc_info_writer.Write(&rinfo);
DCHECK_GE(reloc_info_writer.pos(),
reloc_info->address() + ByteArray::kHeaderSize);
DCHECK(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
DCHECK(call_address + patch_size() <= code->instruction_end());
#ifdef DEBUG
prev_call_address = call_address;
#endif
}
// Move the relocation info to the beginning of the byte array.
const int new_reloc_length = reloc_end_address - reloc_info_writer.pos();
MemMove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_length);
// Right trim the relocation info to free up remaining space.
const int delta = reloc_info->length() - new_reloc_length;
if (delta > 0) {
isolate->heap()->RightTrimFixedArray(reloc_info, delta);
}
}
#define __ masm()->
void Deoptimizer::TableEntryGenerator::Generate() {
GeneratePrologue();
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
const int kDoubleRegsSize = kDoubleSize * X87Register::kMaxNumRegisters;
// Reserve space for x87 fp registers.
__ sub(esp, Immediate(kDoubleRegsSize));
__ pushad();
ExternalReference c_entry_fp_address(IsolateAddressId::kCEntryFPAddress,
isolate());
__ mov(Operand::StaticVariable(c_entry_fp_address), ebp);
// GP registers are safe to use now.
// Save used x87 fp registers in correct position of previous reserve space.
Label loop, done;
// Get the layout of x87 stack.
__ sub(esp, Immediate(kPointerSize));
__ fistp_s(MemOperand(esp, 0));
__ pop(eax);
// Preserve stack layout in edi
__ mov(edi, eax);
// Get the x87 stack depth, the first 3 bits.
__ mov(ecx, eax);
__ and_(ecx, 0x7);
__ j(zero, &done, Label::kNear);
__ bind(&loop);
__ shr(eax, 0x3);
__ mov(ebx, eax);
__ and_(ebx, 0x7); // Extract the st_x index into ebx.
// Pop TOS to the correct position. The disp(0x20) is due to pushad.
// The st_i should be saved to (esp + ebx * kDoubleSize + 0x20).
__ fstp_d(Operand(esp, ebx, times_8, 0x20));
__ dec(ecx); // Decrease stack depth.
__ j(not_zero, &loop, Label::kNear);
__ bind(&done);
const int kSavedRegistersAreaSize =
kNumberOfRegisters * kPointerSize + kDoubleRegsSize;
// Get the bailout id from the stack.
__ mov(ebx, Operand(esp, kSavedRegistersAreaSize));
// Get the address of the location in the code object
// and compute the fp-to-sp delta in register edx.
__ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
__ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
__ sub(edx, ebp);
__ neg(edx);
__ push(edi);
// Allocate a new deoptimizer object.
__ PrepareCallCFunction(6, eax);
__ mov(eax, Immediate(0));
Label context_check;
__ mov(edi, Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ JumpIfSmi(edi, &context_check);
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ mov(Operand(esp, 0 * kPointerSize), eax); // Function.
__ mov(Operand(esp, 1 * kPointerSize), Immediate(type())); // Bailout type.
__ mov(Operand(esp, 2 * kPointerSize), ebx); // Bailout id.
__ mov(Operand(esp, 3 * kPointerSize), ecx); // Code address or 0.
__ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta.
__ mov(Operand(esp, 5 * kPointerSize),
Immediate(ExternalReference::isolate_address(isolate())));
{
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
}
__ pop(edi);
// Preserve deoptimizer object in register eax and get the input
// frame descriptor pointer.
__ mov(ebx, Operand(eax, Deoptimizer::input_offset()));
// Fill in the input registers.
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ pop(Operand(ebx, offset));
}
int double_regs_offset = FrameDescription::double_registers_offset();
const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
// Fill in the double input registers.
for (int i = 0; i < X87Register::kMaxNumAllocatableRegisters; ++i) {
int code = config->GetAllocatableDoubleCode(i);
int dst_offset = code * kDoubleSize + double_regs_offset;
int src_offset = code * kDoubleSize;
__ fld_d(Operand(esp, src_offset));
__ fstp_d(Operand(ebx, dst_offset));
}
// Clear FPU all exceptions.
// TODO(ulan): Find out why the TOP register is not zero here in some cases,
// and check that the generated code never deoptimizes with unbalanced stack.
__ fnclex();
// Remove the bailout id, return address and the double registers.
__ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
// Compute a pointer to the unwinding limit in register ecx; that is
// the first stack slot not part of the input frame.
__ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
__ add(ecx, esp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
__ lea(edx, Operand(ebx, FrameDescription::frame_content_offset()));
Label pop_loop_header;
__ jmp(&pop_loop_header);
Label pop_loop;
__ bind(&pop_loop);
__ pop(Operand(edx, 0));
__ add(edx, Immediate(sizeof(uint32_t)));
__ bind(&pop_loop_header);
__ cmp(ecx, esp);
__ j(not_equal, &pop_loop);
// Compute the output frame in the deoptimizer.
__ push(edi);
__ push(eax);
__ PrepareCallCFunction(1, ebx);
__ mov(Operand(esp, 0 * kPointerSize), eax);
{
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(
ExternalReference::compute_output_frames_function(isolate()), 1);
}
__ pop(eax);
__ pop(edi);
__ mov(esp, Operand(eax, Deoptimizer::caller_frame_top_offset()));
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop,
outer_loop_header, inner_loop_header;
// Outer loop state: eax = current FrameDescription**, edx = one past the
// last FrameDescription**.
__ mov(edx, Operand(eax, Deoptimizer::output_count_offset()));
__ mov(eax, Operand(eax, Deoptimizer::output_offset()));
__ lea(edx, Operand(eax, edx, times_4, 0));
__ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: ebx = current FrameDescription*, ecx = loop index.
__ mov(ebx, Operand(eax, 0));
__ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ sub(ecx, Immediate(sizeof(uint32_t)));
__ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset()));
__ bind(&inner_loop_header);
__ test(ecx, ecx);
__ j(not_zero, &inner_push_loop);
__ add(eax, Immediate(kPointerSize));
__ bind(&outer_loop_header);
__ cmp(eax, edx);
__ j(below, &outer_push_loop);
// In case of a failed STUB, we have to restore the x87 stack.
// x87 stack layout is in edi.
Label loop2, done2;
// Get the x87 stack depth, the first 3 bits.
__ mov(ecx, edi);
__ and_(ecx, 0x7);
__ j(zero, &done2, Label::kNear);
__ lea(ecx, Operand(ecx, ecx, times_2, 0));
__ bind(&loop2);
__ mov(eax, edi);
__ shr_cl(eax);
__ and_(eax, 0x7);
__ fld_d(Operand(ebx, eax, times_8, double_regs_offset));
__ sub(ecx, Immediate(0x3));
__ j(not_zero, &loop2, Label::kNear);
__ bind(&done2);
// Push state, pc, and continuation from the last output frame.
__ push(Operand(ebx, FrameDescription::state_offset()));
__ push(Operand(ebx, FrameDescription::pc_offset()));
__ push(Operand(ebx, FrameDescription::continuation_offset()));
// Push the registers from the last output frame.
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ push(Operand(ebx, offset));
}
// Restore the registers from the stack.
__ popad();
// Return to the continuation point.
__ ret(0);
}
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
// Create a sequence of deoptimization entries.
Label done;
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
__ push_imm32(i);
__ jmp(&done);
DCHECK(masm()->pc_offset() - start == table_entry_size_);
}
__ bind(&done);
}
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
// No embedded constant pool support.
UNREACHABLE();
}
#undef __
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_X87

File diff suppressed because it is too large Load Diff

View File

@ -1,27 +0,0 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_X87
#include "src/assembler.h"
#include "src/frames.h"
#include "src/x87/assembler-x87-inl.h"
#include "src/x87/assembler-x87.h"
#include "src/x87/frames-x87.h"
namespace v8 {
namespace internal {
Register JavaScriptFrame::fp_register() { return ebp; }
Register JavaScriptFrame::context_register() { return esi; }
Register JavaScriptFrame::constant_pool_pointer_register() {
UNREACHABLE();
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_X87

View File

@ -1,78 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_X87_FRAMES_X87_H_
#define V8_X87_FRAMES_X87_H_
namespace v8 {
namespace internal {
// Register lists
// Note that the bit values must match those used in actual instruction encoding
const int kNumRegs = 8;
// Caller-saved registers
const RegList kJSCallerSaved =
1 << 0 | // eax
1 << 1 | // ecx
1 << 2 | // edx
1 << 3 | // ebx - used as a caller-saved register in JavaScript code
1 << 7; // edi - callee function
const int kNumJSCallerSaved = 5;
// Number of registers for which space is reserved in safepoints.
const int kNumSafepointRegisters = 8;
// ----------------------------------------------------
class EntryFrameConstants : public AllStatic {
public:
static const int kCallerFPOffset = -6 * kPointerSize;
static const int kNewTargetArgOffset = +2 * kPointerSize;
static const int kFunctionArgOffset = +3 * kPointerSize;
static const int kReceiverArgOffset = +4 * kPointerSize;
static const int kArgcOffset = +5 * kPointerSize;
static const int kArgvOffset = +6 * kPointerSize;
};
class ExitFrameConstants : public TypedFrameConstants {
public:
static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
DEFINE_TYPED_FRAME_SIZES(2);
static const int kCallerFPOffset = 0 * kPointerSize;
static const int kCallerPCOffset = +1 * kPointerSize;
// FP-relative displacement of the caller's SP. It points just
// below the saved PC.
static const int kCallerSPDisplacement = +2 * kPointerSize;
static const int kConstantPoolOffset = 0; // Not used
};
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
static const int kLastParameterOffset = +2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
// Caller SP-relative.
static const int kParam0Offset = -2 * kPointerSize;
static const int kReceiverOffset = -1 * kPointerSize;
};
} // namespace internal
} // namespace v8
#endif // V8_X87_FRAMES_X87_H_

View File

@ -1,386 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_X87
#include "src/interface-descriptors.h"
namespace v8 {
namespace internal {
const Register CallInterfaceDescriptor::ContextRegister() { return esi; }
void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
CallInterfaceDescriptorData* data, int register_parameter_count) {
const Register default_stub_registers[] = {eax, ebx, ecx, edx, edi};
CHECK_LE(static_cast<size_t>(register_parameter_count),
arraysize(default_stub_registers));
data->InitializePlatformSpecific(register_parameter_count,
default_stub_registers);
}
const Register FastNewFunctionContextDescriptor::FunctionRegister() {
return edi;
}
const Register FastNewFunctionContextDescriptor::SlotsRegister() { return eax; }
const Register LoadDescriptor::ReceiverRegister() { return edx; }
const Register LoadDescriptor::NameRegister() { return ecx; }
const Register LoadDescriptor::SlotRegister() { return eax; }
const Register LoadWithVectorDescriptor::VectorRegister() { return ebx; }
const Register LoadICProtoArrayDescriptor::HandlerRegister() { return edi; }
const Register StoreDescriptor::ReceiverRegister() { return edx; }
const Register StoreDescriptor::NameRegister() { return ecx; }
const Register StoreDescriptor::ValueRegister() { return eax; }
const Register StoreDescriptor::SlotRegister() { return edi; }
const Register StoreWithVectorDescriptor::VectorRegister() { return ebx; }
const Register StoreTransitionDescriptor::SlotRegister() { return no_reg; }
const Register StoreTransitionDescriptor::VectorRegister() { return ebx; }
const Register StoreTransitionDescriptor::MapRegister() { return edi; }
const Register StringCompareDescriptor::LeftRegister() { return edx; }
const Register StringCompareDescriptor::RightRegister() { return eax; }
const Register StringConcatDescriptor::ArgumentsCountRegister() { return eax; }
const Register ApiGetterDescriptor::HolderRegister() { return ecx; }
const Register ApiGetterDescriptor::CallbackRegister() { return eax; }
const Register MathPowTaggedDescriptor::exponent() { return eax; }
const Register MathPowIntegerDescriptor::exponent() {
return MathPowTaggedDescriptor::exponent();
}
const Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return ebx; }
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// SharedFunctionInfo, vector, slot index.
Register registers[] = {ebx, ecx, edx};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void FastNewRestParameterDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edi};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edi};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edi};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return eax; }
void TypeofDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ebx};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void FastCloneRegExpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edi, eax, ecx, edx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {eax, ebx, ecx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {eax, ebx, ecx, edx};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ebx, edx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateWeakCellDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ebx, edx, edi};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edi};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void CallICTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edi, eax, edx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edi, eax, edx, ebx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// eax : number of arguments
// ebx : feedback vector
// ecx : new target (for IsSuperConstructorCall)
// edx : slot in feedback vector (Smi, for RecordCallTarget)
// edi : constructor function
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {eax, edi, ecx, ebx};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void CallTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// eax : number of arguments
// edi : the target to call
Register registers[] = {edi, eax};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// ecx : start index (to support rest parameters)
// edi : the target to call
Register registers[] = {edi, ecx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ConstructStubDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// eax : number of arguments
// edx : the new target
// edi : the target to call
// ebx : allocation site or undefined
Register registers[] = {edi, edx, eax, ebx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ConstructTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// eax : number of arguments
// edx : the new target
// edi : the target to call
Register registers[] = {edi, edx, eax};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void TransitionElementsKindDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {eax, ebx};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
// eax -- number of arguments
// edi -- function
// ebx -- allocation site with elements kind
Register registers[] = {edi, ebx, eax};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
// eax -- number of arguments
// edi -- function
// ebx -- allocation site with elements kind
Register registers[] = {edi, ebx, eax};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
// eax -- number of arguments
// edi -- function
// ebx -- allocation site with elements kind
Register registers[] = {edi, ebx, eax};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void VarArgFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (arg count)
Register registers[] = {eax};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edx, eax};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void BinaryOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edx, eax};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ecx, edx, eax};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void BinaryOpWithVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
// edx -- lhs
// eax -- rhs
// edi -- slot id
// ebx -- vector
Register registers[] = {edx, eax, edi, ebx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CountOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {eax};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void StringAddDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edx, eax};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
edi, // JSFunction
edx, // the new target
eax, // actual number of arguments
ebx, // expected number of arguments
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
edi, // callee
ebx, // call_data
ecx, // holder
edx, // api_function_address
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
eax, // argument count (not including receiver)
ebx, // address of first argument
edi // the target callable to be call
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
eax, // argument count (not including receiver)
edx, // new target
edi, // constructor
ebx, // allocation site feedback
ecx, // address of first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsThenConstructArrayDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
eax, // argument count (not including receiver)
edx, // target to the call. It is checked to be Array function.
ebx, // allocation site feedback
ecx, // address of first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
eax, // argument count (argc)
ecx, // address of first argument (argv)
ebx // the runtime function to call
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ResumeGeneratorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
eax, // the value to pass to the generator
ebx, // the JSGeneratorObject to resume
edx // the resume mode (tagged)
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_X87

File diff suppressed because it is too large Load Diff

View File

@ -1,906 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_X87_MACRO_ASSEMBLER_X87_H_
#define V8_X87_MACRO_ASSEMBLER_X87_H_
#include "src/assembler.h"
#include "src/bailout-reason.h"
#include "src/frames.h"
#include "src/globals.h"
namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
const Register kReturnRegister0 = {Register::kCode_eax};
const Register kReturnRegister1 = {Register::kCode_edx};
const Register kReturnRegister2 = {Register::kCode_edi};
const Register kJSFunctionRegister = {Register::kCode_edi};
const Register kContextRegister = {Register::kCode_esi};
const Register kAllocateSizeRegister = {Register::kCode_edx};
const Register kInterpreterAccumulatorRegister = {Register::kCode_eax};
const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_ecx};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_edi};
const Register kInterpreterDispatchTableRegister = {Register::kCode_esi};
const Register kJavaScriptCallArgCountRegister = {Register::kCode_eax};
const Register kJavaScriptCallNewTargetRegister = {Register::kCode_edx};
const Register kRuntimeCallFunctionRegister = {Register::kCode_ebx};
const Register kRuntimeCallArgCountRegister = {Register::kCode_eax};
// Spill slots used by interpreter dispatch calling convention.
const int kInterpreterDispatchTableSpillSlot = -1;
// Convenience for platform-independent signatures. We do not normally
// distinguish memory operands from other operands on ia32.
typedef Operand MemOperand;
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum PointersToHereCheck {
kPointersToHereMaybeInteresting,
kPointersToHereAreAlwaysInteresting
};
enum RegisterValueType { REGISTER_VALUE_IS_SMI, REGISTER_VALUE_IS_INT32 };
enum class ReturnAddressState { kOnStack, kNotOnStack };
#ifdef DEBUG
bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
Register reg4 = no_reg, Register reg5 = no_reg,
Register reg6 = no_reg, Register reg7 = no_reg,
Register reg8 = no_reg);
#endif
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object);
Isolate* isolate() const { return isolate_; }
void Load(Register dst, const Operand& src, Representation r);
void Store(Register src, const Operand& dst, Representation r);
// Load a register with a long value as efficiently as possible.
void Set(Register dst, int32_t x) {
if (x == 0) {
xor_(dst, dst);
} else {
mov(dst, Immediate(x));
}
}
void Set(const Operand& dst, int32_t x) { mov(dst, Immediate(x)); }
// Operations on roots in the root-array.
void LoadRoot(Register destination, Heap::RootListIndex index);
void StoreRoot(Register source, Register scratch, Heap::RootListIndex index);
void CompareRoot(Register with, Register scratch, Heap::RootListIndex index);
// These methods can only be used with constant roots (i.e. non-writable
// and not in new space).
void CompareRoot(Register with, Heap::RootListIndex index);
void CompareRoot(const Operand& with, Heap::RootListIndex index);
void PushRoot(Heap::RootListIndex index);
// Compare the object in a register to a value and jump if they are equal.
void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal,
Label::Distance if_equal_distance = Label::kFar) {
CompareRoot(with, index);
j(equal, if_equal, if_equal_distance);
}
void JumpIfRoot(const Operand& with, Heap::RootListIndex index,
Label* if_equal,
Label::Distance if_equal_distance = Label::kFar) {
CompareRoot(with, index);
j(equal, if_equal, if_equal_distance);
}
// Compare the object in a register to a value and jump if they are not equal.
void JumpIfNotRoot(Register with, Heap::RootListIndex index,
Label* if_not_equal,
Label::Distance if_not_equal_distance = Label::kFar) {
CompareRoot(with, index);
j(not_equal, if_not_equal, if_not_equal_distance);
}
void JumpIfNotRoot(const Operand& with, Heap::RootListIndex index,
Label* if_not_equal,
Label::Distance if_not_equal_distance = Label::kFar) {
CompareRoot(with, index);
j(not_equal, if_not_equal, if_not_equal_distance);
}
// These functions do not arrange the registers in any particular order so
// they are not useful for calls that can cause a GC. The caller can
// exclude up to 3 registers that do not need to be saved and restored.
void PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
void PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
// ---------------------------------------------------------------------------
// GC Support
enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
// Record in the remembered set the fact that we have a pointer to new space
// at the address pointed to by the addr register. Only works if addr is not
// in new space.
void RememberedSetHelper(Register object, // Used for debug code.
Register addr, Register scratch,
SaveFPRegsMode save_fp,
RememberedSetFinalAction and_then);
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
void CheckPageFlagForMap(
Handle<Map> map, int mask, Condition cc, Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch,
Label::Distance distance = Label::kFar) {
InNewSpace(object, scratch, zero, branch, distance);
}
// Check if object is in new space. Jumps if the object is in new space.
// The register scratch can be object itself, but it will be clobbered.
void JumpIfInNewSpace(Register object, Register scratch, Label* branch,
Label::Distance distance = Label::kFar) {
InNewSpace(object, scratch, not_zero, branch, distance);
}
// Check if an object has a given incremental marking color. Also uses ecx!
void HasColor(Register object, Register scratch0, Register scratch1,
Label* has_color, Label::Distance has_color_distance,
int first_bit, int second_bit);
void JumpIfBlack(Register object, Register scratch0, Register scratch1,
Label* on_black,
Label::Distance on_black_distance = Label::kFar);
// Checks the color of an object. If the object is white we jump to the
// incremental marker.
void JumpIfWhite(Register value, Register scratch1, Register scratch2,
Label* value_is_white, Label::Distance distance);
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
// stored. value and scratch registers are clobbered by the operation.
// The offset is the offset from the start of the object, not the offset from
// the tagged HeapObject pointer. For use with FieldOperand(reg, off).
void RecordWriteField(
Register object, int offset, Register value, Register scratch,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK,
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting);
// As above, but the offset has the tag presubtracted. For use with
// Operand(reg, off).
void RecordWriteContextSlot(
Register context, int offset, Register value, Register scratch,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK,
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting) {
RecordWriteField(context, offset + kHeapObjectTag, value, scratch, save_fp,
remembered_set_action, smi_check,
pointers_to_here_check_for_value);
}
// For page containing |object| mark region covering |address|
// dirty. |object| is the object being stored into, |value| is the
// object being stored. The address and value registers are clobbered by the
// operation. RecordWrite filters out smis so it does not update the
// write barrier if the value is a smi.
void RecordWrite(
Register object, Register address, Register value, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK,
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting);
// Notify the garbage collector that we wrote a code entry into a
// JSFunction. Only scratch is clobbered by the operation.
void RecordWriteCodeEntryField(Register js_function, Register code_entry,
Register scratch);
// For page containing |object| mark the region covering the object's map
// dirty. |object| is the object being stored into, |map| is the Map object
// that was stored.
void RecordWriteForMap(Register object, Handle<Map> map, Register scratch1,
Register scratch2, SaveFPRegsMode save_fp);
// ---------------------------------------------------------------------------
// Debugger Support
void DebugBreak();
// Generates function and stub prologue code.
void StubPrologue(StackFrame::Type type);
void Prologue(bool code_pre_aging);
// Enter specific kind of exit frame. Expects the number of
// arguments in register eax and sets up the number of arguments in
// register edi and the pointer to the first argument in register
// esi.
void EnterExitFrame(int argc, bool save_doubles, StackFrame::Type frame_type);
void EnterApiExitFrame(int argc);
// Leave the current exit frame. Expects the return value in
// register eax:edx (untouched) and the pointer to the first
// argument in register esi (if pop_arguments == true).
void LeaveExitFrame(bool save_doubles, bool pop_arguments = true);
// Leave the current exit frame. Expects the return value in
// register eax (untouched).
void LeaveApiExitFrame(bool restore_context);
// Find the function context up the context chain.
void LoadContext(Register dst, int context_chain_length);
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst);
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
// Load the initial map from the global function. The registers
// function and map can be the same.
void LoadGlobalFunctionInitialMap(Register function, Register map);
// Push and pop the registers that can hold pointers.
void PushSafepointRegisters() { pushad(); }
void PopSafepointRegisters() { popad(); }
// Store the value in register/immediate src in the safepoint
// register stack slot for register dst.
void StoreToSafepointRegisterSlot(Register dst, Register src);
void StoreToSafepointRegisterSlot(Register dst, Immediate src);
void LoadFromSafepointRegisterSlot(Register dst, Register src);
// Nop, because x87 does not have a root register.
void InitializeRootRegister() {}
void LoadHeapObject(Register result, Handle<HeapObject> object);
void CmpHeapObject(Register reg, Handle<HeapObject> object);
void PushHeapObject(Handle<HeapObject> object);
void LoadObject(Register result, Handle<Object> object) {
AllowDeferredHandleDereference heap_object_check;
if (object->IsHeapObject()) {
LoadHeapObject(result, Handle<HeapObject>::cast(object));
} else {
Move(result, Immediate(object));
}
}
void CmpObject(Register reg, Handle<Object> object) {
AllowDeferredHandleDereference heap_object_check;
if (object->IsHeapObject()) {
CmpHeapObject(reg, Handle<HeapObject>::cast(object));
} else {
cmp(reg, Immediate(object));
}
}
void GetWeakValue(Register value, Handle<WeakCell> cell);
void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
// ---------------------------------------------------------------------------
// JavaScript invokes
// Removes current frame and its arguments from the stack preserving
// the arguments and a return address pushed to the stack for the next call.
// |ra_state| defines whether return address is already pushed to stack or
// not. Both |callee_args_count| and |caller_args_count_reg| do not include
// receiver. |callee_args_count| is not modified, |caller_args_count_reg|
// is trashed. |number_of_temp_values_after_return_address| specifies
// the number of words pushed to the stack after the return address. This is
// to allow "allocation" of scratch registers that this function requires
// by saving their values on the stack.
void PrepareForTailCall(const ParameterCount& callee_args_count,
Register caller_args_count_reg, Register scratch0,
Register scratch1, ReturnAddressState ra_state,
int number_of_temp_values_after_return_address);
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
// On function call, call into the debugger if necessary.
void CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function, Register new_target,
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
void InvokeFunction(Register function, const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
void ShlPair(Register high, Register low, uint8_t imm8);
void ShlPair_cl(Register high, Register low);
void ShrPair(Register high, Register low, uint8_t imm8);
void ShrPair_cl(Register high, Register src);
void SarPair(Register high, Register low, uint8_t imm8);
void SarPair_cl(Register high, Register low);
// Expression support
// Support for constant splitting.
bool IsUnsafeImmediate(const Immediate& x);
void SafeMove(Register dst, const Immediate& x);
void SafePush(const Immediate& x);
// Compare object type for heap object.
// Incoming register is heap_object and outgoing register is map.
void CmpObjectType(Register heap_object, InstanceType type, Register map);
// Compare instance type for map.
void CmpInstanceType(Register map, InstanceType type);
// Compare an object's map with the specified map.
void CompareMap(Register obj, Handle<Map> map);
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
// heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
// against maps that are ElementsKind transition maps of the specified map.
void CheckMap(Register obj, Handle<Map> map, Label* fail,
SmiCheckType smi_check_type);
// Check if the object in register heap_object is a string. Afterwards the
// register map contains the object map and the register instance_type
// contains the instance_type. The registers map and instance_type can be the
// same in which case it contains the instance type afterwards. Either of the
// registers map and instance_type can be the same as heap_object.
Condition IsObjectStringType(Register heap_object, Register map,
Register instance_type);
// FCmp is similar to integer cmp, but requires unsigned
// jcc instructions (je, ja, jae, jb, jbe, je, and jz).
void FCmp();
void FXamMinusZero();
void FXamSign();
void X87CheckIA();
void X87SetRC(int rc);
void X87SetFPUCW(int cw);
void ClampUint8(Register reg);
void ClampTOSToUint8(Register result_reg);
void SlowTruncateToI(Register result_reg, Register input_reg,
int offset = HeapNumber::kValueOffset - kHeapObjectTag);
void TruncateHeapNumberToI(Register result_reg, Register input_reg);
void TruncateX87TOSToI(Register result_reg);
void X87TOSToI(Register result_reg, MinusZeroMode minus_zero_mode,
Label* lost_precision, Label* is_nan, Label* minus_zero,
Label::Distance dst = Label::kFar);
// Smi tagging support.
void SmiTag(Register reg) {
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
add(reg, reg);
}
void SmiUntag(Register reg) {
sar(reg, kSmiTagSize);
}
// Modifies the register even if it does not contain a Smi!
void SmiUntag(Register reg, Label* is_smi) {
STATIC_ASSERT(kSmiTagSize == 1);
sar(reg, kSmiTagSize);
STATIC_ASSERT(kSmiTag == 0);
j(not_carry, is_smi);
}
void LoadUint32NoSSE2(Register src) {
LoadUint32NoSSE2(Operand(src));
}
void LoadUint32NoSSE2(const Operand& src);
// Jump the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label,
Label::Distance distance = Label::kFar) {
test(value, Immediate(kSmiTagMask));
j(zero, smi_label, distance);
}
// Jump if the operand is a smi.
inline void JumpIfSmi(Operand value, Label* smi_label,
Label::Distance distance = Label::kFar) {
test(value, Immediate(kSmiTagMask));
j(zero, smi_label, distance);
}
// Jump if register contain a non-smi.
inline void JumpIfNotSmi(Register value, Label* not_smi_label,
Label::Distance distance = Label::kFar) {
test(value, Immediate(kSmiTagMask));
j(not_zero, not_smi_label, distance);
}
// Jump if the operand is not a smi.
inline void JumpIfNotSmi(Operand value, Label* smi_label,
Label::Distance distance = Label::kFar) {
test(value, Immediate(kSmiTagMask));
j(not_zero, smi_label, distance);
}
// Jump if the value cannot be represented by a smi.
inline void JumpIfNotValidSmiValue(Register value, Register scratch,
Label* on_invalid,
Label::Distance distance = Label::kFar) {
mov(scratch, value);
add(scratch, Immediate(0x40000000U));
j(sign, on_invalid, distance);
}
// Jump if the unsigned integer value cannot be represented by a smi.
inline void JumpIfUIntNotValidSmiValue(
Register value, Label* on_invalid,
Label::Distance distance = Label::kFar) {
cmp(value, Immediate(0x40000000U));
j(above_equal, on_invalid, distance);
}
void LoadInstanceDescriptors(Register map, Register descriptors);
void EnumLength(Register dst, Register map);
void NumberOfOwnDescriptors(Register dst, Register map);
void LoadAccessor(Register dst, Register holder, int accessor_index,
AccessorComponent accessor);
template<typename Field>
void DecodeField(Register reg) {
static const int shift = Field::kShift;
static const int mask = Field::kMask >> Field::kShift;
if (shift != 0) {
sar(reg, shift);
}
and_(reg, Immediate(mask));
}
template<typename Field>
void DecodeFieldToSmi(Register reg) {
static const int shift = Field::kShift;
static const int mask = (Field::kMask >> Field::kShift) << kSmiTagSize;
STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0);
STATIC_ASSERT(kSmiTag == 0);
if (shift < kSmiTagSize) {
shl(reg, kSmiTagSize - shift);
} else if (shift > kSmiTagSize) {
sar(reg, shift - kSmiTagSize);
}
and_(reg, Immediate(mask));
}
// Abort execution if argument is not a smi, enabled via --debug-code.
void AssertSmi(Register object);
// Abort execution if argument is a smi, enabled via --debug-code.
void AssertNotSmi(Register object);
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
// Abort execution if argument is not a JSBoundFunction,
// enabled via --debug-code.
void AssertBoundFunction(Register object);
// Abort execution if argument is not a JSGeneratorObject,
// enabled via --debug-code.
void AssertGeneratorObject(Register object);
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object);
// ---------------------------------------------------------------------------
// Exception handling
// Push a new stack handler and link it into stack handler chain.
void PushStackHandler();
// Unlink the stack handler on top of the stack from the stack handler chain.
void PopStackHandler();
// ---------------------------------------------------------------------------
// Inline caching support
void GetNumberHash(Register r0, Register scratch);
// ---------------------------------------------------------------------------
// Allocation support
// Allocate an object in new space or old space. If the given space
// is exhausted control continues at the gc_required label. The allocated
// object is returned in result and end of the new object is returned in
// result_end. The register scratch can be passed as no_reg in which case
// an additional object reference will be added to the reloc info. The
// returned pointers in result and result_end have not yet been tagged as
// heap objects. If result_contains_top_on_entry is true the content of
// result is known to be the allocation top on entry (could be result_end
// from a previous call). If result_contains_top_on_entry is true scratch
// should be no_reg as it is never used.
void Allocate(int object_size, Register result, Register result_end,
Register scratch, Label* gc_required, AllocationFlags flags);
void Allocate(int header_size, ScaleFactor element_size,
Register element_count, RegisterValueType element_count_type,
Register result, Register result_end, Register scratch,
Label* gc_required, AllocationFlags flags);
void Allocate(Register object_size, Register result, Register result_end,
Register scratch, Label* gc_required, AllocationFlags flags);
// Allocate a heap number in new space with undefined value. The
// register scratch2 can be passed as no_reg; the others must be
// valid registers. Returns tagged pointer in result register, or
// jumps to gc_required if new space is full.
void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
Label* gc_required, MutableMode mode = IMMUTABLE);
// Allocate and initialize a JSValue wrapper with the specified {constructor}
// and {value}.
void AllocateJSValue(Register result, Register constructor, Register value,
Register scratch, Label* gc_required);
// Initialize fields with filler values. Fields starting at |current_address|
// not including |end_address| are overwritten with the value in |filler|. At
// the end the loop, |current_address| takes the value of |end_address|.
void InitializeFieldsWithFiller(Register current_address,
Register end_address, Register filler);
// ---------------------------------------------------------------------------
// Support functions.
// Check a boolean-bit of a Smi field.
void BooleanBitTest(Register object, int field_offset, int bit_index);
// Machine code version of Map::GetConstructor().
// |temp| holds |result|'s map when done.
void GetMapConstructor(Register result, Register map, Register temp);
// ---------------------------------------------------------------------------
// Runtime calls
// Call a code stub. Generate the code if necessary.
void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
// Tail call a code stub (jump). Generate the code if necessary.
void TailCallStub(CodeStub* stub);
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, kSaveFPRegs);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid,
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, save_doubles);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
// Convenience function: call an external reference.
void CallExternalReference(ExternalReference ref, int num_arguments);
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid);
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, arguments must be stored in esp[0], esp[4],
// etc., not pushed. The argument count assumes all arguments are word sized.
// Some compilers/platforms require the stack to be aligned when calling
// C++ code.
// Needs a scratch register to do some arithmetic. This register will be
// trashed.
void PrepareCallCFunction(int num_arguments, Register scratch);
// Calls a C function and cleans up the space for arguments allocated
// by PrepareCallCFunction. The called function is not allowed to trigger a
// garbage collection, since that might move the code and invalidate the
// return address (unless this is somehow accounted for by the called
// function).
void CallCFunction(ExternalReference function, int num_arguments);
void CallCFunction(Register function, int num_arguments);
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& ext,
bool builtin_exit_frame = false);
// ---------------------------------------------------------------------------
// Utilities
void Ret();
// Return and drop arguments from stack, where the number of arguments
// may be bigger than 2^16 - 1. Requires a scratch register.
void Ret(int bytes_dropped, Register scratch);
// Emit code that loads |parameter_index|'th parameter from the stack to
// the register according to the CallInterfaceDescriptor definition.
// |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
// below the caller's sp (on x87 it's at least return address).
template <class Descriptor>
void LoadParameterFromStack(
Register reg, typename Descriptor::ParameterIndices parameter_index,
int sp_to_ra_offset_in_words = 1) {
DCHECK(Descriptor::kPassLastArgsOnStack);
DCHECK_LT(parameter_index, Descriptor::kParameterCount);
DCHECK_LE(Descriptor::kParameterCount - Descriptor::kStackArgumentsCount,
parameter_index);
int offset = (Descriptor::kParameterCount - parameter_index - 1 +
sp_to_ra_offset_in_words) *
kPointerSize;
mov(reg, Operand(esp, offset));
}
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the esp register.
void Drop(int element_count);
void Call(Label* target) { call(target); }
void Call(Handle<Code> target, RelocInfo::Mode rmode,
TypeFeedbackId id = TypeFeedbackId::None()) {
call(target, rmode, id);
}
void Jump(Handle<Code> target, RelocInfo::Mode rmode) { jmp(target, rmode); }
void Push(Register src) { push(src); }
void Push(const Operand& src) { push(src); }
void Push(Immediate value) { push(value); }
void Pop(Register dst) { pop(dst); }
void Pop(const Operand& dst) { pop(dst); }
void PushReturnAddressFrom(Register src) { push(src); }
void PopReturnAddressTo(Register dst) { pop(dst); }
void Lzcnt(Register dst, Register src) { Lzcnt(dst, Operand(src)); }
void Lzcnt(Register dst, const Operand& src);
void Tzcnt(Register dst, Register src) { Tzcnt(dst, Operand(src)); }
void Tzcnt(Register dst, const Operand& src);
void Popcnt(Register dst, Register src) { Popcnt(dst, Operand(src)); }
void Popcnt(Register dst, const Operand& src);
// Move if the registers are not identical.
void Move(Register target, Register source);
// Move a constant into a destination using the most efficient encoding.
void Move(Register dst, const Immediate& x);
void Move(const Operand& dst, const Immediate& x);
void Move(Register dst, Handle<Object> handle) { LoadObject(dst, handle); }
void Move(Register dst, Smi* source) { Move(dst, Immediate(source)); }
// Push a handle value.
void Push(Handle<Object> handle) { push(Immediate(handle)); }
void Push(Smi* smi) { Push(Immediate(smi)); }
Handle<Object> CodeObject() {
DCHECK(!code_object_.is_null());
return code_object_;
}
// Insert code to verify that the x87 stack has the specified depth (0-7)
void VerifyX87StackDepth(uint32_t depth);
// Emit code for a truncating division by a constant. The dividend register is
// unchanged, the result is in edx, and eax gets clobbered.
void TruncatingDiv(Register dividend, int32_t divisor);
// ---------------------------------------------------------------------------
// StatsCounter support
void SetCounter(StatsCounter* counter, int value);
void IncrementCounter(StatsCounter* counter, int value);
void DecrementCounter(StatsCounter* counter, int value);
void IncrementCounter(Condition cc, StatsCounter* counter, int value);
void DecrementCounter(Condition cc, StatsCounter* counter, int value);
// ---------------------------------------------------------------------------
// Debugging
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cc, BailoutReason reason);
// Like Assert(), but always enabled.
void Check(Condition cc, BailoutReason reason);
// Print a message to stdout and abort execution.
void Abort(BailoutReason reason);
// Check that the stack is aligned.
void CheckStackAlignment();
// Verify restrictions about code generated in stubs.
void set_generating_stub(bool value) { generating_stub_ = value; }
bool generating_stub() { return generating_stub_; }
void set_has_frame(bool value) { has_frame_ = value; }
bool has_frame() { return has_frame_; }
inline bool AllowThisStubCall(CodeStub* stub);
// ---------------------------------------------------------------------------
// String utilities.
// Checks if both objects are sequential one-byte strings, and jumps to label
// if either is not.
void JumpIfNotBothSequentialOneByteStrings(
Register object1, Register object2, Register scratch1, Register scratch2,
Label* on_not_flat_one_byte_strings);
// Checks if the given register or operand is a unique name
void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name,
Label::Distance distance = Label::kFar) {
JumpIfNotUniqueNameInstanceType(Operand(reg), not_unique_name, distance);
}
void JumpIfNotUniqueNameInstanceType(Operand operand, Label* not_unique_name,
Label::Distance distance = Label::kFar);
void EmitSeqStringSetCharCheck(Register string, Register index,
Register value, uint32_t encoding_mask);
static int SafepointRegisterStackIndex(Register reg) {
return SafepointRegisterStackIndex(reg.code());
}
// Load the type feedback vector from a JavaScript frame.
void EmitLoadFeedbackVector(Register vector);
// Activation support.
void EnterFrame(StackFrame::Type type);
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
void LeaveFrame(StackFrame::Type type);
void EnterBuiltinFrame(Register context, Register target, Register argc);
void LeaveBuiltinFrame(Register context, Register target, Register argc);
// Expects object in eax and returns map with validated enum cache
// in eax. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Label* call_runtime);
// AllocationMemento support. Arrays may have an associated
// AllocationMemento object that can be checked for in order to pretransition
// to another type.
// On entry, receiver_reg should point to the array object.
// scratch_reg gets clobbered.
// If allocation info is present, conditional code is set to equal.
void TestJSArrayForAllocationMemento(Register receiver_reg,
Register scratch_reg,
Label* no_memento_found);
private:
bool generating_stub_;
bool has_frame_;
Isolate* isolate_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual, Label* done,
bool* definitely_mismatches, InvokeFlag flag,
Label::Distance done_distance,
const CallWrapper& call_wrapper);
void EnterExitFramePrologue(StackFrame::Type frame_type);
void EnterExitFrameEpilogue(int argc, bool save_doubles);
void LeaveExitFrameEpilogue(bool restore_context);
// Allocation support helpers.
void LoadAllocationTopHelper(Register result, Register scratch,
AllocationFlags flags);
void UpdateAllocationTopHelper(Register result_end, Register scratch,
AllocationFlags flags);
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
void InNewSpace(Register object, Register scratch, Condition cc,
Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
// Helper for finding the mark bits for an address. Afterwards, the
// bitmap register points at the word with the mark bits and the mask
// the position of the first bit. Uses ecx as scratch and leaves addr_reg
// unchanged.
inline void GetMarkBits(Register addr_reg, Register bitmap_reg,
Register mask_reg);
// Compute memory operands for safepoint stack slots.
Operand SafepointRegisterSlot(Register reg);
static int SafepointRegisterStackIndex(int reg_code);
// Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
friend class StandardFrame;
};
// The code patcher is used to patch (typically) small parts of code e.g. for
// debugging and other types of instrumentation. When using the code patcher
// the exact number of bytes specified must be emitted. Is not legal to emit
// relocation information. If any of these constraints are violated it causes
// an assertion.
class CodePatcher {
public:
CodePatcher(Isolate* isolate, byte* address, int size);
~CodePatcher();
// Macro assembler to emit code.
MacroAssembler* masm() { return &masm_; }
private:
byte* address_; // The address of the code being patched.
int size_; // Number of bytes of the expected patch size.
MacroAssembler masm_; // Macro assembler used to generate the code.
};
// -----------------------------------------------------------------------------
// Static helper functions.
// Generate an Operand for loading a field from an object.
inline Operand FieldOperand(Register object, int offset) {
return Operand(object, offset - kHeapObjectTag);
}
// Generate an Operand for loading an indexed field from an object.
inline Operand FieldOperand(Register object, Register index, ScaleFactor scale,
int offset) {
return Operand(object, index, scale, offset - kHeapObjectTag);
}
inline Operand FixedArrayElementOperand(Register array, Register index_as_smi,
int additional_offset = 0) {
int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
}
inline Operand ContextOperand(Register context, int index) {
return Operand(context, Context::SlotOffset(index));
}
inline Operand ContextOperand(Register context, Register index) {
return Operand(context, index, times_pointer_size, Context::SlotOffset(0));
}
inline Operand NativeContextOperand() {
return ContextOperand(esi, Context::NATIVE_CONTEXT_INDEX);
}
#define ACCESS_MASM(masm) masm->
} // namespace internal
} // namespace v8
#endif // V8_X87_MACRO_ASSEMBLER_X87_H_

View File

@ -1,7 +0,0 @@
// Copyright 2008 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/x87/simulator-x87.h"
// Since there is no simulator for the ia32 architecture this file is empty.

View File

@ -1,52 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_X87_SIMULATOR_X87_H_
#define V8_X87_SIMULATOR_X87_H_
#include "src/allocation.h"
namespace v8 {
namespace internal {
// Since there is no simulator for the ia32 architecture the only thing we can
// do is to call the entry directly.
#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
typedef int (*regexp_matcher)(String*, int, const byte*,
const byte*, int*, int, Address, int, Isolate*);
// Call the generated regexp code directly. The code at the entry address should
// expect eight int/pointer sized arguments and return an int.
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
p7, p8) \
(FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on ia32 uses the C stack, we
// just use the C stack limit.
class SimulatorStack : public v8::internal::AllStatic {
public:
static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
uintptr_t c_limit) {
USE(isolate);
return c_limit;
}
static inline uintptr_t RegisterCTryCatch(Isolate* isolate,
uintptr_t try_catch_address) {
USE(isolate);
return try_catch_address;
}
static inline void UnregisterCTryCatch(Isolate* isolate) { USE(isolate); }
};
} // namespace internal
} // namespace v8
#endif // V8_X87_SIMULATOR_X87_H_

View File

@ -287,17 +287,6 @@ v8_executable("cctest") {
"test-macro-assembler-x64.cc",
"test-run-wasm-relocation-x64.cc",
]
} else if (v8_current_cpu == "x87") {
sources += [ ### gcmole(arch:x87) ###
"test-assembler-x87.cc",
"test-code-stubs-x87.cc",
"test-code-stubs.cc",
"test-code-stubs.h",
"test-disasm-x87.cc",
"test-log-stack-tracer.cc",
"test-macro-assembler-x87.cc",
"test-run-wasm-relocation-x87.cc",
]
} else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
sources += [ ### gcmole(arch:ppc) ###
"test-assembler-ppc.cc",

View File

@ -11,5 +11,3 @@ per-file *-s390*=joransiu@ca.ibm.com
per-file *-s390*=jyan@ca.ibm.com
per-file *-s390*=mbrandy@us.ibm.com
per-file *-s390*=michael_dawson@ca.ibm.com
per-file *-x87*=chunyang.dai@intel.com
per-file *-x87*=weiliang.lin@intel.com

View File

@ -308,16 +308,6 @@
'test-disasm-mips64.cc',
'test-macro-assembler-mips64.cc',
],
'cctest_sources_x87': [ ### gcmole(arch:x87) ###
'test-assembler-x87.cc',
'test-code-stubs.cc',
'test-code-stubs.h',
'test-code-stubs-x87.cc',
'test-disasm-x87.cc',
'test-macro-assembler-x87.cc',
'test-log-stack-tracer.cc',
'test-run-wasm-relocation-x87.cc',
],
},
'includes': ['../../gypfiles/toolchain.gypi', '../../gypfiles/features.gypi'],
'targets': [
@ -402,11 +392,6 @@
'<@(cctest_sources_mips64el)',
],
}],
['v8_target_arch=="x87"', {
'sources': [
'<@(cctest_sources_x87)',
],
}],
[ 'OS=="linux" or OS=="qnx"', {
'sources': [
'test-platform-linux.cc',

View File

@ -291,31 +291,6 @@
'test-run-wasm-simd/*': [SKIP],
}], # 'arch == mips or arch == mipsel or arch == mips64 or arch == mips64el'
##############################################################################
['arch == x87', {
'test-run-machops/RunFloat64InsertLowWord32': [SKIP],
'test-run-native-calls/MixedParams_0': [SKIP],
'test-run-native-calls/MixedParams_1': [SKIP],
'test-run-native-calls/MixedParams_2': [SKIP],
'test-run-native-calls/MixedParams_3': [SKIP],
'test-run-machops/RunFloat64MulAndFloat64Add1': [SKIP],
'test-run-machops/RunFloat64MulAndFloat64Add2': [SKIP],
'test-run-machops/RunFloat64MulAndFloat64Sub1': [SKIP],
'test-run-machops/RunFloat64MulAndFloat64Sub2': [SKIP],
'test-run-machops/RunFloat64Sin': [SKIP],
'test-run-machops/RunFloat64Cos': [SKIP],
'test-run-machops/RunFloat64Expm1': [SKIP],
'test-run-machops/RunFloat64Tan': [SKIP],
'test-cpu-profiler/Inlining': [SKIP],
'test-gap-resolver/FuzzResolver': [SKIP],
'test-run-wasm/RunWasmCompiled_MultiReturnSelect_f32': [SKIP],
'test-run-wasm/RunWasmCompiled_MultiReturnSelect_f64': [SKIP],
'test-run-wasm/RunWasmCompiled_SignallingNanSurvivesI32ReinterpretF32': [SKIP],
'test-run-wasm-64/RunWasmCompiled_SignallingNanSurvivesI64ReinterpretF64': [SKIP],
'test-run-wasm/RunWasmInterpreted_SignallingNanSurvivesI32ReinterpretF32': [SKIP],
'test-run-wasm-64/RunWasmInterpreted_SignallingNanSurvivesI64ReinterpretF64': [SKIP],
}], # 'arch == x87'
##############################################################################
['arch == android_arm or arch == android_ia32', {

View File

@ -1,451 +0,0 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h>
#include "src/v8.h"
#include "src/base/platform/platform.h"
#include "src/base/utils/random-number-generator.h"
#include "src/disassembler.h"
#include "src/factory.h"
#include "src/macro-assembler.h"
#include "src/ostreams.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
typedef int (*F0)();
typedef int (*F1)(int x);
typedef int (*F2)(int x, int y);
#define __ assm.
TEST(AssemblerIa320) {
CcTest::InitializeVM();
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
Assembler assm(isolate, buffer, sizeof buffer);
__ mov(eax, Operand(esp, 4));
__ add(eax, Operand(esp, 8));
__ ret(0);
CodeDesc desc;
assm.GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
#endif
F2 f = FUNCTION_CAST<F2>(code->entry());
int res = f(3, 4);
::printf("f() = %d\n", res);
CHECK_EQ(7, res);
}
TEST(AssemblerIa321) {
CcTest::InitializeVM();
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
Assembler assm(isolate, buffer, sizeof buffer);
Label L, C;
__ mov(edx, Operand(esp, 4));
__ xor_(eax, eax); // clear eax
__ jmp(&C);
__ bind(&L);
__ add(eax, edx);
__ sub(edx, Immediate(1));
__ bind(&C);
__ test(edx, edx);
__ j(not_zero, &L);
__ ret(0);
CodeDesc desc;
assm.GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
#endif
F1 f = FUNCTION_CAST<F1>(code->entry());
int res = f(100);
::printf("f() = %d\n", res);
CHECK_EQ(5050, res);
}
TEST(AssemblerIa322) {
CcTest::InitializeVM();
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
Assembler assm(isolate, buffer, sizeof buffer);
Label L, C;
__ mov(edx, Operand(esp, 4));
__ mov(eax, 1);
__ jmp(&C);
__ bind(&L);
__ imul(eax, edx);
__ sub(edx, Immediate(1));
__ bind(&C);
__ test(edx, edx);
__ j(not_zero, &L);
__ ret(0);
// some relocated stuff here, not executed
__ mov(eax, isolate->factory()->true_value());
__ jmp(NULL, RelocInfo::RUNTIME_ENTRY);
CodeDesc desc;
assm.GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
#endif
F1 f = FUNCTION_CAST<F1>(code->entry());
int res = f(10);
::printf("f() = %d\n", res);
CHECK_EQ(3628800, res);
}
typedef int (*F3)(float x);
typedef int (*F4)(double x);
static int baz = 42;
TEST(AssemblerIa325) {
CcTest::InitializeVM();
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
Assembler assm(isolate, buffer, sizeof buffer);
__ mov(eax, Operand(reinterpret_cast<intptr_t>(&baz), RelocInfo::NONE32));
__ ret(0);
CodeDesc desc;
assm.GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F0 f = FUNCTION_CAST<F0>(code->entry());
int res = f();
CHECK_EQ(42, res);
}
typedef int (*F7)(double x, double y);
TEST(AssemblerIa329) {
CcTest::InitializeVM();
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
MacroAssembler assm(isolate, buffer, sizeof(buffer),
v8::internal::CodeObjectRequired::kYes);
enum { kEqual = 0, kGreater = 1, kLess = 2, kNaN = 3, kUndefined = 4 };
Label equal_l, less_l, greater_l, nan_l;
__ fld_d(Operand(esp, 3 * kPointerSize));
__ fld_d(Operand(esp, 1 * kPointerSize));
__ FCmp();
__ j(parity_even, &nan_l);
__ j(equal, &equal_l);
__ j(below, &less_l);
__ j(above, &greater_l);
__ mov(eax, kUndefined);
__ ret(0);
__ bind(&equal_l);
__ mov(eax, kEqual);
__ ret(0);
__ bind(&greater_l);
__ mov(eax, kGreater);
__ ret(0);
__ bind(&less_l);
__ mov(eax, kLess);
__ ret(0);
__ bind(&nan_l);
__ mov(eax, kNaN);
__ ret(0);
CodeDesc desc;
assm.GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
#endif
F7 f = FUNCTION_CAST<F7>(code->entry());
CHECK_EQ(kLess, f(1.1, 2.2));
CHECK_EQ(kEqual, f(2.2, 2.2));
CHECK_EQ(kGreater, f(3.3, 2.2));
CHECK_EQ(kNaN, f(std::numeric_limits<double>::quiet_NaN(), 1.1));
}
TEST(AssemblerIa3210) {
// Test chaining of label usages within instructions (issue 1644).
CcTest::InitializeVM();
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
Assembler assm(isolate, NULL, 0);
Label target;
__ j(equal, &target);
__ j(not_equal, &target);
__ bind(&target);
__ nop();
}
TEST(AssemblerMultiByteNop) {
CcTest::InitializeVM();
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
Assembler assm(isolate, buffer, sizeof(buffer));
__ push(ebx);
__ push(ecx);
__ push(edx);
__ push(edi);
__ push(esi);
__ mov(eax, 1);
__ mov(ebx, 2);
__ mov(ecx, 3);
__ mov(edx, 4);
__ mov(edi, 5);
__ mov(esi, 6);
for (int i = 0; i < 16; i++) {
int before = assm.pc_offset();
__ Nop(i);
CHECK_EQ(assm.pc_offset() - before, i);
}
Label fail;
__ cmp(eax, 1);
__ j(not_equal, &fail);
__ cmp(ebx, 2);
__ j(not_equal, &fail);
__ cmp(ecx, 3);
__ j(not_equal, &fail);
__ cmp(edx, 4);
__ j(not_equal, &fail);
__ cmp(edi, 5);
__ j(not_equal, &fail);
__ cmp(esi, 6);
__ j(not_equal, &fail);
__ mov(eax, 42);
__ pop(esi);
__ pop(edi);
__ pop(edx);
__ pop(ecx);
__ pop(ebx);
__ ret(0);
__ bind(&fail);
__ mov(eax, 13);
__ pop(esi);
__ pop(edi);
__ pop(edx);
__ pop(ecx);
__ pop(ebx);
__ ret(0);
CodeDesc desc;
assm.GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
CHECK(code->IsCode());
F0 f = FUNCTION_CAST<F0>(code->entry());
int res = f();
CHECK_EQ(42, res);
}
TEST(AssemblerIa32JumpTables1) {
// Test jump tables with forward jumps.
CcTest::InitializeVM();
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
Assembler assm(isolate, nullptr, 0);
const int kNumCases = 512;
int values[kNumCases];
isolate->random_number_generator()->NextBytes(values, sizeof(values));
Label labels[kNumCases];
Label done, table;
__ mov(eax, Operand(esp, 4));
__ jmp(Operand::JumpTable(eax, times_4, &table));
__ ud2();
__ bind(&table);
for (int i = 0; i < kNumCases; ++i) {
__ dd(&labels[i]);
}
for (int i = 0; i < kNumCases; ++i) {
__ bind(&labels[i]);
__ mov(eax, Immediate(values[i]));
__ jmp(&done);
}
__ bind(&done);
__ ret(0);
CodeDesc desc;
assm.GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
#endif
F1 f = FUNCTION_CAST<F1>(code->entry());
for (int i = 0; i < kNumCases; ++i) {
int res = f(i);
::printf("f(%d) = %d\n", i, res);
CHECK_EQ(values[i], res);
}
}
TEST(AssemblerIa32JumpTables2) {
// Test jump tables with backward jumps.
CcTest::InitializeVM();
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
Assembler assm(isolate, nullptr, 0);
const int kNumCases = 512;
int values[kNumCases];
isolate->random_number_generator()->NextBytes(values, sizeof(values));
Label labels[kNumCases];
Label done, table;
__ mov(eax, Operand(esp, 4));
__ jmp(Operand::JumpTable(eax, times_4, &table));
__ ud2();
for (int i = 0; i < kNumCases; ++i) {
__ bind(&labels[i]);
__ mov(eax, Immediate(values[i]));
__ jmp(&done);
}
__ bind(&table);
for (int i = 0; i < kNumCases; ++i) {
__ dd(&labels[i]);
}
__ bind(&done);
__ ret(0);
CodeDesc desc;
assm.GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
#endif
F1 f = FUNCTION_CAST<F1>(code->entry());
for (int i = 0; i < kNumCases; ++i) {
int res = f(i);
::printf("f(%d) = %d\n", i, res);
CHECK_EQ(values[i], res);
}
}
TEST(Regress621926) {
// Bug description:
// The opcodes for cmpw r/m16, r16 and cmpw r16, r/m16 were swapped.
// This was causing non-commutative comparisons to produce the wrong result.
CcTest::InitializeVM();
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
Assembler assm(isolate, nullptr, 0);
uint16_t a = 42;
Label fail;
__ push(ebx);
__ mov(ebx, Immediate(reinterpret_cast<intptr_t>(&a)));
__ mov(eax, Immediate(41));
__ cmpw(eax, Operand(ebx, 0));
__ j(above_equal, &fail);
__ cmpw(Operand(ebx, 0), eax);
__ j(below_equal, &fail);
__ mov(eax, 1);
__ pop(ebx);
__ ret(0);
__ bind(&fail);
__ mov(eax, 0);
__ pop(ebx);
__ ret(0);
CodeDesc desc;
assm.GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
#endif
F0 f = FUNCTION_CAST<F0>(code->entry());
CHECK_EQ(1, f());
}
#undef __

View File

@ -1,155 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h>
#include <limits>
#include "src/v8.h"
#include "src/base/platform/platform.h"
#include "src/code-stubs.h"
#include "src/factory.h"
#include "src/macro-assembler.h"
#include "test/cctest/cctest.h"
#include "test/cctest/test-code-stubs.h"
using namespace v8::internal;
#define __ assm.
ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
Register source_reg,
Register destination_reg) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
HandleScope handles(isolate);
MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size),
v8::internal::CodeObjectRequired::kYes);
int offset =
source_reg.is(esp) ? 0 : (HeapNumber::kValueOffset - kSmiTagSize);
DoubleToIStub stub(isolate, source_reg, destination_reg, offset, true);
byte* start = stub.GetCode()->instruction_start();
__ push(ebx);
__ push(ecx);
__ push(edx);
__ push(esi);
__ push(edi);
if (!source_reg.is(esp)) {
__ lea(source_reg, MemOperand(esp, 6 * kPointerSize - offset));
}
int param_offset = 7 * kPointerSize;
// Save registers make sure they don't get clobbered.
int reg_num = 0;
for (; reg_num < Register::kNumRegisters; ++reg_num) {
if (RegisterConfiguration::Crankshaft()->IsAllocatableGeneralCode(
reg_num)) {
Register reg = Register::from_code(reg_num);
if (!reg.is(esp) && !reg.is(ebp) && !reg.is(destination_reg)) {
__ push(reg);
param_offset += kPointerSize;
}
}
}
// Re-push the double argument
__ push(MemOperand(esp, param_offset));
__ push(MemOperand(esp, param_offset));
// Call through to the actual stub
__ call(start, RelocInfo::EXTERNAL_REFERENCE);
__ add(esp, Immediate(kDoubleSize));
// Make sure no registers have been unexpectedly clobbered
for (--reg_num; reg_num >= 0; --reg_num) {
if (RegisterConfiguration::Crankshaft()->IsAllocatableGeneralCode(
reg_num)) {
Register reg = Register::from_code(reg_num);
if (!reg.is(esp) && !reg.is(ebp) && !reg.is(destination_reg)) {
__ cmp(reg, MemOperand(esp, 0));
__ Assert(equal, kRegisterWasClobbered);
__ add(esp, Immediate(kPointerSize));
}
}
}
__ mov(eax, destination_reg);
__ pop(edi);
__ pop(esi);
__ pop(edx);
__ pop(ecx);
__ pop(ebx);
__ ret(kDoubleSize);
CodeDesc desc;
assm.GetCode(&desc);
return reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer));
}
#undef __
static Isolate* GetIsolateFrom(LocalContext* context) {
return reinterpret_cast<Isolate*>((*context)->GetIsolate());
}
TEST(ConvertDToI) {
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = GetIsolateFrom(&context);
HandleScope scope(isolate);
#if DEBUG
// Verify that the tests actually work with the C version. In the release
// code, the compiler optimizes it away because it's all constant, but does it
// wrong, triggering an assert on gcc.
RunAllTruncationTests(&ConvertDToICVersion);
#endif
Register source_registers[] = {esp, eax, ebx, ecx, edx, edi, esi};
Register dest_registers[] = {eax, ebx, ecx, edx, edi, esi};
for (size_t s = 0; s < sizeof(source_registers) / sizeof(Register); s++) {
for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) {
RunAllTruncationTests(
MakeConvertDToIFuncTrampoline(isolate,
source_registers[s],
dest_registers[d]));
}
}
}

View File

@ -1,443 +0,0 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h>
#include "src/v8.h"
#include "src/code-factory.h"
#include "src/debug/debug.h"
#include "src/disasm.h"
#include "src/disassembler.h"
#include "src/macro-assembler.h"
#include "src/x87/frames-x87.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
#define __ assm.
static void DummyStaticFunction(Object* result) {
}
TEST(DisasmIa320) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
v8::internal::byte buffer[2048];
Assembler assm(isolate, buffer, sizeof buffer);
DummyStaticFunction(NULL); // just bloody use it (DELETE; debugging)
// Short immediate instructions
__ adc(eax, 12345678);
__ add(eax, Immediate(12345678));
__ or_(eax, 12345678);
__ sub(eax, Immediate(12345678));
__ xor_(eax, 12345678);
__ and_(eax, 12345678);
Handle<FixedArray> foo = isolate->factory()->NewFixedArray(10, TENURED);
__ cmp(eax, foo);
// ---- This one caused crash
__ mov(ebx, Operand(esp, ecx, times_2, 0)); // [esp+ecx*4]
// ---- All instructions that I can think of
__ add(edx, ebx);
__ add(edx, Operand(12, RelocInfo::NONE32));
__ add(edx, Operand(ebx, 0));
__ add(edx, Operand(ebx, 16));
__ add(edx, Operand(ebx, 1999));
__ add(edx, Operand(ebx, -4));
__ add(edx, Operand(ebx, -1999));
__ add(edx, Operand(esp, 0));
__ add(edx, Operand(esp, 16));
__ add(edx, Operand(esp, 1999));
__ add(edx, Operand(esp, -4));
__ add(edx, Operand(esp, -1999));
__ nop();
__ add(esi, Operand(ecx, times_4, 0));
__ add(esi, Operand(ecx, times_4, 24));
__ add(esi, Operand(ecx, times_4, -4));
__ add(esi, Operand(ecx, times_4, -1999));
__ nop();
__ add(edi, Operand(ebp, ecx, times_4, 0));
__ add(edi, Operand(ebp, ecx, times_4, 12));
__ add(edi, Operand(ebp, ecx, times_4, -8));
__ add(edi, Operand(ebp, ecx, times_4, -3999));
__ add(Operand(ebp, ecx, times_4, 12), Immediate(12));
__ nop();
__ add(ebx, Immediate(12));
__ nop();
__ adc(edx, Operand(ebx));
__ adc(ecx, 12);
__ adc(ecx, 1000);
__ nop();
__ and_(edx, 3);
__ and_(edx, Operand(esp, 4));
__ cmp(edx, 3);
__ cmp(edx, Operand(esp, 4));
__ cmp(Operand(ebp, ecx, times_4, 0), Immediate(1000));
Handle<FixedArray> foo2 = isolate->factory()->NewFixedArray(10, TENURED);
__ cmp(ebx, foo2);
__ cmpb(ebx, Operand(ebp, ecx, times_2, 0));
__ cmpb(Operand(ebp, ecx, times_2, 0), ebx);
__ or_(edx, 3);
__ xor_(edx, 3);
__ nop();
__ cpuid();
__ movsx_b(edx, ecx);
__ movsx_w(edx, ecx);
__ movzx_b(edx, ecx);
__ movzx_w(edx, ecx);
__ nop();
__ imul(edx, ecx);
__ shld(edx, ecx, 10);
__ shld_cl(edx, ecx);
__ shrd(edx, ecx, 10);
__ shrd_cl(edx, ecx);
__ bts(edx, ecx);
__ bts(Operand(ebx, ecx, times_4, 0), ecx);
__ nop();
__ pushad();
__ popad();
__ pushfd();
__ popfd();
__ push(Immediate(12));
__ push(Immediate(23456));
__ push(ecx);
__ push(esi);
__ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(Operand(ebx, ecx, times_4, 0));
__ push(Operand(ebx, ecx, times_4, 0));
__ push(Operand(ebx, ecx, times_4, 10000));
__ pop(edx);
__ pop(eax);
__ pop(Operand(ebx, ecx, times_4, 0));
__ nop();
__ add(edx, Operand(esp, 16));
__ add(edx, ecx);
__ mov_b(edx, ecx);
__ mov_b(ecx, 6);
__ mov_b(Operand(ebx, ecx, times_4, 10000), 6);
__ mov_b(Operand(esp, 16), edx);
__ mov_w(edx, Operand(esp, 16));
__ mov_w(Operand(esp, 16), edx);
__ nop();
__ movsx_w(edx, Operand(esp, 12));
__ movsx_b(edx, Operand(esp, 12));
__ movzx_w(edx, Operand(esp, 12));
__ movzx_b(edx, Operand(esp, 12));
__ nop();
__ mov(edx, 1234567);
__ mov(edx, Operand(esp, 12));
__ mov(Operand(ebx, ecx, times_4, 10000), Immediate(12345));
__ mov(Operand(ebx, ecx, times_4, 10000), edx);
__ nop();
__ dec_b(edx);
__ dec_b(Operand(eax, 10));
__ dec_b(Operand(ebx, ecx, times_4, 10000));
__ dec(edx);
__ cdq();
__ nop();
__ idiv(edx);
__ idiv(Operand(edx, ecx, times_1, 1));
__ idiv(Operand(esp, 12));
__ div(edx);
__ div(Operand(edx, ecx, times_1, 1));
__ div(Operand(esp, 12));
__ mul(edx);
__ neg(edx);
__ not_(edx);
__ test(Operand(ebx, ecx, times_4, 10000), Immediate(123456));
__ imul(edx, Operand(ebx, ecx, times_4, 10000));
__ imul(edx, ecx, 12);
__ imul(edx, Operand(edx, eax, times_2, 42), 8);
__ imul(edx, ecx, 1000);
__ imul(edx, Operand(ebx, ecx, times_4, 1), 9000);
__ inc(edx);
__ inc(Operand(ebx, ecx, times_4, 10000));
__ push(Operand(ebx, ecx, times_4, 10000));
__ pop(Operand(ebx, ecx, times_4, 10000));
__ call(Operand(ebx, ecx, times_4, 10000));
__ jmp(Operand(ebx, ecx, times_4, 10000));
__ lea(edx, Operand(ebx, ecx, times_4, 10000));
__ or_(edx, 12345);
__ or_(edx, Operand(ebx, ecx, times_4, 10000));
__ nop();
__ rcl(edx, 1);
__ rcl(edx, 7);
__ rcr(edx, 1);
__ rcr(edx, 7);
__ ror(edx, 1);
__ ror(edx, 6);
__ ror_cl(edx);
__ ror(Operand(ebx, ecx, times_4, 10000), 1);
__ ror(Operand(ebx, ecx, times_4, 10000), 6);
__ ror_cl(Operand(ebx, ecx, times_4, 10000));
__ sar(edx, 1);
__ sar(edx, 6);
__ sar_cl(edx);
__ sar(Operand(ebx, ecx, times_4, 10000), 1);
__ sar(Operand(ebx, ecx, times_4, 10000), 6);
__ sar_cl(Operand(ebx, ecx, times_4, 10000));
__ sbb(edx, Operand(ebx, ecx, times_4, 10000));
__ shl(edx, 1);
__ shl(edx, 6);
__ shl_cl(edx);
__ shl(Operand(ebx, ecx, times_4, 10000), 1);
__ shl(Operand(ebx, ecx, times_4, 10000), 6);
__ shl_cl(Operand(ebx, ecx, times_4, 10000));
__ shrd_cl(Operand(ebx, ecx, times_4, 10000), edx);
__ shr(edx, 1);
__ shr(edx, 7);
__ shr_cl(edx);
__ shr(Operand(ebx, ecx, times_4, 10000), 1);
__ shr(Operand(ebx, ecx, times_4, 10000), 6);
__ shr_cl(Operand(ebx, ecx, times_4, 10000));
// Immediates
__ adc(edx, 12345);
__ add(ebx, Immediate(12));
__ add(Operand(edx, ecx, times_4, 10000), Immediate(12));
__ and_(ebx, 12345);
__ cmp(ebx, 12345);
__ cmp(ebx, Immediate(12));
__ cmp(Operand(edx, ecx, times_4, 10000), Immediate(12));
__ cmpb(eax, Immediate(100));
__ or_(ebx, 12345);
__ sub(ebx, Immediate(12));
__ sub(Operand(edx, ecx, times_4, 10000), Immediate(12));
__ xor_(ebx, 12345);
__ imul(edx, ecx, 12);
__ imul(edx, ecx, 1000);
__ cld();
__ rep_movs();
__ rep_stos();
__ stos();
__ sub(edx, Operand(ebx, ecx, times_4, 10000));
__ sub(edx, ebx);
__ test(edx, Immediate(12345));
__ test(edx, Operand(ebx, ecx, times_8, 10000));
__ test(Operand(esi, edi, times_1, -20000000), Immediate(300000000));
__ test_b(edx, Operand(ecx, ebx, times_2, 1000));
__ test_b(Operand(eax, -20), Immediate(0x9A));
__ nop();
__ xor_(edx, 12345);
__ xor_(edx, Operand(ebx, ecx, times_8, 10000));
__ bts(Operand(ebx, ecx, times_8, 10000), edx);
__ hlt();
__ int3();
__ ret(0);
__ ret(8);
// Calls
Label L1, L2;
__ bind(&L1);
__ nop();
__ call(&L1);
__ call(&L2);
__ nop();
__ bind(&L2);
__ call(Operand(ebx, ecx, times_4, 10000));
__ nop();
Handle<Code> ic = isolate->builtins()->LoadIC();
__ call(ic, RelocInfo::CODE_TARGET);
__ nop();
__ call(FUNCTION_ADDR(DummyStaticFunction), RelocInfo::RUNTIME_ENTRY);
__ nop();
__ jmp(&L1);
__ jmp(Operand(ebx, ecx, times_4, 10000));
ExternalReference after_break_target =
ExternalReference::debug_after_break_target_address(isolate);
__ jmp(Operand::StaticVariable(after_break_target));
__ jmp(ic, RelocInfo::CODE_TARGET);
__ nop();
Label Ljcc;
__ nop();
// long jumps
__ j(overflow, &Ljcc);
__ j(no_overflow, &Ljcc);
__ j(below, &Ljcc);
__ j(above_equal, &Ljcc);
__ j(equal, &Ljcc);
__ j(not_equal, &Ljcc);
__ j(below_equal, &Ljcc);
__ j(above, &Ljcc);
__ j(sign, &Ljcc);
__ j(not_sign, &Ljcc);
__ j(parity_even, &Ljcc);
__ j(parity_odd, &Ljcc);
__ j(less, &Ljcc);
__ j(greater_equal, &Ljcc);
__ j(less_equal, &Ljcc);
__ j(greater, &Ljcc);
__ nop();
__ bind(&Ljcc);
// short jumps
__ j(overflow, &Ljcc);
__ j(no_overflow, &Ljcc);
__ j(below, &Ljcc);
__ j(above_equal, &Ljcc);
__ j(equal, &Ljcc);
__ j(not_equal, &Ljcc);
__ j(below_equal, &Ljcc);
__ j(above, &Ljcc);
__ j(sign, &Ljcc);
__ j(not_sign, &Ljcc);
__ j(parity_even, &Ljcc);
__ j(parity_odd, &Ljcc);
__ j(less, &Ljcc);
__ j(greater_equal, &Ljcc);
__ j(less_equal, &Ljcc);
__ j(greater, &Ljcc);
// 0xD9 instructions
__ nop();
__ fld(1);
__ fld1();
__ fldz();
__ fldpi();
__ fabs();
__ fchs();
__ fprem();
__ fprem1();
__ fincstp();
__ ftst();
__ fxam();
__ fxch(3);
__ fld_s(Operand(ebx, ecx, times_4, 10000));
__ fstp_s(Operand(ebx, ecx, times_4, 10000));
__ ffree(3);
__ fld_d(Operand(ebx, ecx, times_4, 10000));
__ fstp_d(Operand(ebx, ecx, times_4, 10000));
__ nop();
__ fild_s(Operand(ebx, ecx, times_4, 10000));
__ fistp_s(Operand(ebx, ecx, times_4, 10000));
__ fild_d(Operand(ebx, ecx, times_4, 10000));
__ fistp_d(Operand(ebx, ecx, times_4, 10000));
__ fnstsw_ax();
__ nop();
__ fadd(3);
__ fsub(3);
__ fmul(3);
__ fdiv(3);
__ faddp(3);
__ fsubp(3);
__ fmulp(3);
__ fdivp(3);
__ fcompp();
__ fwait();
__ frndint();
__ fninit();
__ nop();
__ fldcw(Operand(ebx, ecx, times_4, 10000));
__ fnstcw(Operand(ebx, ecx, times_4, 10000));
__ fadd_d(Operand(ebx, ecx, times_4, 10000));
__ fnsave(Operand(ebx, ecx, times_4, 10000));
__ frstor(Operand(ebx, ecx, times_4, 10000));
// xchg.
{
__ xchg_b(eax, Operand(eax, 8));
__ xchg_w(eax, Operand(ebx, 8));
__ xchg(eax, eax);
__ xchg(eax, ebx);
__ xchg(ebx, ebx);
__ xchg(ebx, Operand(esp, 12));
}
// cmpxchg.
{
__ cmpxchg_b(Operand(esp, 12), eax);
__ cmpxchg_w(Operand(ebx, ecx, times_4, 10000), eax);
__ cmpxchg(Operand(ebx, ecx, times_4, 10000), eax);
}
// lock prefix.
{
__ lock();
__ cmpxchg(Operand(esp, 12), ebx);
__ lock();
__ xchg_w(eax, Operand(ecx, 8));
}
// Nop instructions
for (int i = 0; i < 16; i++) {
__ Nop(i);
}
__ ret(0);
CodeDesc desc;
assm.GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
USE(code);
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
byte* begin = code->instruction_start();
byte* end = begin + code->instruction_size();
disasm::Disassembler::Disassemble(stdout, begin, end);
#endif
}
#undef __

View File

@ -1,148 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h>
#include "src/v8.h"
#include "test/cctest/cctest.h"
#include "src/base/platform/platform.h"
#include "src/factory.h"
#include "src/macro-assembler.h"
using namespace v8::internal;
#if __GNUC__
#define STDCALL __attribute__((stdcall))
#else
#define STDCALL __stdcall
#endif
typedef int STDCALL F0Type();
typedef F0Type* F0;
#define __ masm->
TEST(LoadAndStoreWithRepresentation) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
__ push(ebx);
__ push(edx);
__ sub(esp, Immediate(1 * kPointerSize));
Label exit;
// Test 1.
__ mov(eax, Immediate(1)); // Test number.
__ mov(Operand(esp, 0 * kPointerSize), Immediate(0));
__ mov(ebx, Immediate(-1));
__ Store(ebx, Operand(esp, 0 * kPointerSize), Representation::UInteger8());
__ mov(ebx, Operand(esp, 0 * kPointerSize));
__ mov(edx, Immediate(255));
__ cmp(ebx, edx);
__ j(not_equal, &exit);
__ Load(ebx, Operand(esp, 0 * kPointerSize), Representation::UInteger8());
__ cmp(ebx, edx);
__ j(not_equal, &exit);
// Test 2.
__ mov(eax, Immediate(2)); // Test number.
__ mov(Operand(esp, 0 * kPointerSize), Immediate(0));
__ mov(ebx, Immediate(-1));
__ Store(ebx, Operand(esp, 0 * kPointerSize), Representation::Integer8());
__ mov(ebx, Operand(esp, 0 * kPointerSize));
__ mov(edx, Immediate(255));
__ cmp(ebx, edx);
__ j(not_equal, &exit);
__ Load(ebx, Operand(esp, 0 * kPointerSize), Representation::Integer8());
__ mov(edx, Immediate(-1));
__ cmp(ebx, edx);
__ j(not_equal, &exit);
// Test 3.
__ mov(eax, Immediate(3)); // Test number.
__ mov(Operand(esp, 0 * kPointerSize), Immediate(0));
__ mov(ebx, Immediate(-1));
__ Store(ebx, Operand(esp, 0 * kPointerSize), Representation::Integer16());
__ mov(ebx, Operand(esp, 0 * kPointerSize));
__ mov(edx, Immediate(65535));
__ cmp(ebx, edx);
__ j(not_equal, &exit);
__ Load(edx, Operand(esp, 0 * kPointerSize), Representation::Integer16());
__ mov(ebx, Immediate(-1));
__ cmp(ebx, edx);
__ j(not_equal, &exit);
// Test 4.
__ mov(eax, Immediate(4)); // Test number.
__ mov(Operand(esp, 0 * kPointerSize), Immediate(0));
__ mov(ebx, Immediate(-1));
__ Store(ebx, Operand(esp, 0 * kPointerSize), Representation::UInteger16());
__ mov(ebx, Operand(esp, 0 * kPointerSize));
__ mov(edx, Immediate(65535));
__ cmp(ebx, edx);
__ j(not_equal, &exit);
__ Load(edx, Operand(esp, 0 * kPointerSize), Representation::UInteger16());
__ cmp(ebx, edx);
__ j(not_equal, &exit);
// Test 5.
__ mov(eax, Immediate(5));
__ Move(edx, Immediate(0)); // Test Move()
__ cmp(edx, Immediate(0));
__ j(not_equal, &exit);
__ Move(ecx, Immediate(-1));
__ cmp(ecx, Immediate(-1));
__ j(not_equal, &exit);
__ Move(ebx, Immediate(0x77));
__ cmp(ebx, Immediate(0x77));
__ j(not_equal, &exit);
__ xor_(eax, eax); // Success.
__ bind(&exit);
__ add(esp, Immediate(1 * kPointerSize));
__ pop(edx);
__ pop(ebx);
__ ret(0);
CodeDesc desc;
masm->GetCode(&desc);
// Call the function from C++.
int result = FUNCTION_CAST<F0>(buffer)();
CHECK_EQ(0, result);
}
#undef __

View File

@ -86,11 +86,6 @@
#include "src/ia32/macro-assembler-ia32.h"
#include "src/regexp/ia32/regexp-macro-assembler-ia32.h"
#endif
#if V8_TARGET_ARCH_X87
#include "src/regexp/x87/regexp-macro-assembler-x87.h"
#include "src/x87/assembler-x87.h"
#include "src/x87/macro-assembler-x87.h"
#endif
#endif // V8_INTERPRETED_REGEXP
#include "test/cctest/cctest.h"

View File

@ -1,143 +0,0 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <stdlib.h>
#include "src/v8.h"
#include "src/debug/debug.h"
#include "src/disasm.h"
#include "src/disassembler.h"
#include "src/ic/ic.h"
#include "src/macro-assembler.h"
#include "src/x87/frames-x87.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/c-signature.h"
#include "test/cctest/compiler/call-tester.h"
using namespace v8::internal;
using namespace v8::internal::compiler;
#define __ assm.
static int32_t DummyStaticFunction(Object* result) { return 1; }
TEST(WasmRelocationX87MemoryReference) {
Isolate* isolate = CcTest::i_isolate();
Zone zone(isolate->allocator(), ZONE_NAME);
HandleScope scope(isolate);
v8::internal::byte buffer[4096];
Assembler assm(isolate, buffer, sizeof buffer);
DummyStaticFunction(NULL);
int32_t imm = 1234567;
__ mov(eax, Immediate(reinterpret_cast<Address>(imm),
RelocInfo::WASM_MEMORY_REFERENCE));
__ nop();
__ ret(0);
CSignature0<int32_t> csig;
CodeDesc desc;
assm.GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
USE(code);
CodeRunner<int32_t> runnable(isolate, code, &csig);
int32_t ret_value = runnable.Call();
CHECK_EQ(ret_value, imm);
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
byte* begin = code->instruction_start();
byte* end = begin + code->instruction_size();
disasm::Disassembler::Disassemble(stdout, begin, end);
#endif
int offset = 1234;
// Relocating references by offset
int mode_mask = (1 << RelocInfo::WASM_MEMORY_REFERENCE);
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
DCHECK(RelocInfo::IsWasmMemoryReference(it.rinfo()->rmode()));
it.rinfo()->update_wasm_memory_reference(
it.rinfo()->wasm_memory_reference(),
it.rinfo()->wasm_memory_reference() + offset, SKIP_ICACHE_FLUSH);
}
// Check if immediate is updated correctly
ret_value = runnable.Call();
CHECK_EQ(ret_value, imm + offset);
#ifdef OBJECT_PRINT
code->Print(os);
begin = code->instruction_start();
end = begin + code->instruction_size();
disasm::Disassembler::Disassemble(stdout, begin, end);
#endif
}
TEST(WasmRelocationX87MemorySizeReference) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Zone zone(isolate->allocator(), ZONE_NAME);
HandleScope scope(isolate);
v8::internal::byte buffer[4096];
Assembler assm(isolate, buffer, sizeof buffer);
DummyStaticFunction(NULL);
int32_t size = 80;
Label fail;
__ mov(eax, Immediate(reinterpret_cast<Address>(size),
RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
__ cmp(eax, Immediate(reinterpret_cast<Address>(size),
RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
__ j(not_equal, &fail);
__ ret(0);
__ bind(&fail);
__ mov(eax, 0xdeadbeef);
__ ret(0);
CSignature0<int32_t> csig;
CodeDesc desc;
assm.GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
USE(code);
CodeRunner<int32_t> runnable(isolate, code, &csig);
int32_t ret_value = runnable.Call();
CHECK_NE(ret_value, bit_cast<int32_t>(0xdeadbeef));
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
byte* begin = code->instruction_start();
byte* end = begin + code->instruction_size();
disasm::Disassembler::Disassemble(stdout, begin, end);
#endif
size_t offset = 10;
int mode_mask = (1 << RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
DCHECK(RelocInfo::IsWasmMemorySizeReference(mode));
it.rinfo()->update_wasm_memory_size(
it.rinfo()->wasm_memory_size_reference(),
it.rinfo()->wasm_memory_size_reference() + offset, SKIP_ICACHE_FLUSH);
}
ret_value = runnable.Call();
CHECK_NE(ret_value, bit_cast<int32_t>(0xdeadbeef));
#ifdef OBJECT_PRINT
code->Print(os);
begin = code->instruction_start();
end = begin + code->instruction_size();
disasm::Disassembler::Disassemble(stdout, begin, end);
#endif
}
#undef __

View File

@ -124,9 +124,8 @@
'compiler/alloc-number-debug': [PASS, ['mode == release', SKIP]],
'regress/regress-634-debug': [PASS, ['mode == release', SKIP]],
# BUG(v8:2989). PASS/FAIL on linux32 because crankshaft is turned off for
# nosse2. Also for arm novfp3.
'regress/regress-2989': [FAIL, NO_VARIANTS, ['system == linux and arch == x87 or arch == arm and simulator == True', PASS]],
# BUG(v8:2989).
'regress/regress-2989': [FAIL, NO_VARIANTS],
# This test variant makes only sense on arm.
'math-floor-of-div-nosudiv': [PASS, SLOW, ['arch not in [arm, arm64, android_arm, android_arm64]', SKIP]],
@ -520,12 +519,6 @@
'math-floor-of-div-minus-zero': [SKIP],
}], # 'arch == mips64el or arch == mips64'
['arch == x87', {
# The result produced by Gcc on linux platform is extended 80-bit double
# precision and not the expected standard 64-bit double precision.
'number-tostring-big-integer': [SKIP],
}], # 'arch == x87'
##############################################################################
['system == windows', {
# TODO(mstarzinger): Too slow with turbo fan.

View File

@ -7,15 +7,4 @@
# BUG(5677): Real timers are flaky
'RuntimeCallStatsTest.*': [SKIP],
}], # ALWAYS
['arch == x87', {
'Ieee754.Expm1': [SKIP],
'Ieee754.Cos': [SKIP],
'Ieee754.Tan': [SKIP],
'Ieee754.Acosh': [SKIP],
'Ieee754.Asinh': [SKIP],
'MoveOptimizerTest.RemovesRedundantExplicit': [SKIP],
'RegisterAllocatorTest.CanAllocateFPRegisters': [SKIP],
}], # 'arch == x87'
]

View File

@ -72,10 +72,6 @@
# Too slow.
'dfg-int-overflow-in-loop': [SKIP],
}], # 'arch == s390 or arch == s390x'
['arch == x87', {
# Too slow.
'dfg-negative-array-index': [SKIP],
}], # 'arch == x87'
##############################################################################
['asan == True', {

View File

@ -20,7 +20,7 @@ import subprocess
import sys
# All arches that this script understands.
ARCHES = ["ia32", "x64", "arm", "arm64", "mips", "mips64", "ppc", "s390", "x87"]
ARCHES = ["ia32", "x64", "arm", "arm64", "mips", "mips64", "ppc", "s390"]
def PrintHelpAndExit():
print(__doc__)

View File

@ -33,7 +33,7 @@ BUILD_TARGETS_ALL = ["all"]
# All arches that this script understands.
ARCHES = ["ia32", "x64", "arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64",
"s390", "s390x", "x87"]
"s390", "s390x"]
# Arches that get built/run when you don't specify any.
DEFAULT_ARCHES = ["ia32", "x64", "arm", "arm64"]
# Modes that this script understands.

View File

@ -187,7 +187,6 @@ SUPPORTED_ARCHS = ["android_arm",
"android_x64",
"arm",
"ia32",
"x87",
"mips",
"mipsel",
"mips64",
@ -211,7 +210,6 @@ SLOW_ARCHS = ["android_arm",
"mips64el",
"s390",
"s390x",
"x87",
"arm64"]

View File

@ -59,10 +59,10 @@ DEFS = {FAIL_OK: [FAIL, OKAY],
# Support arches, modes to be written as keywords instead of strings.
VARIABLES = {ALWAYS: True}
for var in ["debug", "release", "big", "little",
"android_arm", "android_arm64", "android_ia32", "android_x87",
"android_x64", "arm", "arm64", "ia32", "mips", "mipsel", "mips64",
"mips64el", "x64", "x87", "ppc", "ppc64", "s390", "s390x", "macos",
"windows", "linux", "aix"]:
"android_arm", "android_arm64", "android_ia32", "android_x64",
"arm", "arm64", "ia32", "mips", "mipsel", "mips64", "mips64el",
"x64", "ppc", "ppc64", "s390", "s390x", "macos", "windows",
"linux", "aix"]:
VARIABLES[var] = var
# Allow using variants as keywords.

View File

@ -82,7 +82,6 @@ GN_UNSUPPORTED_FEATURES = [
'solaris',
'vtune',
'v8-version.h',
'x87',
]
ALL_GN_PREFIXES = [