Introduce x87 port
Support x87-only platform (ia32 without SSE) R=danno@chromium.org Review URL: https://codereview.chromium.org/293743005 Patch from Weiliang Lin <weiliang.lin@intel.com>. git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@21469 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
8d334ed1dc
commit
9c485e182b
3
AUTHORS
3
AUTHORS
@ -13,6 +13,7 @@ Bloomberg Finance L.P.
|
||||
NVIDIA Corporation
|
||||
BlackBerry Limited
|
||||
Opera Software ASA
|
||||
Intel Corporation
|
||||
|
||||
Akinori MUSHA <knu@FreeBSD.org>
|
||||
Alexander Botero-Lowry <alexbl@FreeBSD.org>
|
||||
@ -24,6 +25,7 @@ Baptiste Afsa <baptiste.afsa@arm.com>
|
||||
Bert Belder <bertbelder@gmail.com>
|
||||
Burcu Dogan <burcujdogan@gmail.com>
|
||||
Craig Schlenter <craig.schlenter@gmail.com>
|
||||
Chunyang Dai <chunyang.dai@intel.com>
|
||||
Daniel Andersson <kodandersson@gmail.com>
|
||||
Daniel James <dnljms@gmail.com>
|
||||
Derek J Conrod <dconrod@codeaurora.org>
|
||||
@ -64,6 +66,7 @@ Subrato K De <subratokde@codeaurora.org>
|
||||
Tobias Burnus <burnus@net-b.de>
|
||||
Vincent Belliard <vincent.belliard@arm.com>
|
||||
Vlad Burlik <vladbph@gmail.com>
|
||||
Weiliang Lin<weiliang.lin@intel.com>
|
||||
Xi Qian <xi.qian@intel.com>
|
||||
Yuqiang Xian <yuqiang.xian@intel.com>
|
||||
Zaheer Ahmad <zahmad@codeaurora.org>
|
||||
|
4
Makefile
4
Makefile
@ -217,11 +217,11 @@ endif
|
||||
|
||||
# Architectures and modes to be compiled. Consider these to be internal
|
||||
# variables, don't override them (use the targets instead).
|
||||
ARCHES = ia32 x64 arm arm64 mips mipsel
|
||||
ARCHES = ia32 x64 arm arm64 mips mipsel x87
|
||||
DEFAULT_ARCHES = ia32 x64 arm
|
||||
MODES = release debug optdebug
|
||||
DEFAULT_MODES = release debug
|
||||
ANDROID_ARCHES = android_ia32 android_arm android_arm64 android_mipsel
|
||||
ANDROID_ARCHES = android_ia32 android_arm android_arm64 android_mipsel android_x87
|
||||
NACL_ARCHES = nacl_ia32 nacl_x64
|
||||
|
||||
# List of files that trigger Makefile regeneration:
|
||||
|
@ -26,7 +26,7 @@
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
# Those definitions should be consistent with the main Makefile
|
||||
ANDROID_ARCHES = android_ia32 android_arm android_arm64 android_mipsel
|
||||
ANDROID_ARCHES = android_ia32 android_arm android_arm64 android_mipsel android_x87
|
||||
MODES = release debug
|
||||
|
||||
# Generates all combinations of ANDROID ARCHES and MODES,
|
||||
@ -73,7 +73,14 @@ else
|
||||
TOOLCHAIN_PREFIX = i686-linux-android
|
||||
TOOLCHAIN_VER = 4.6
|
||||
else
|
||||
$(error Target architecture "${ARCH}" is not supported)
|
||||
ifeq ($(ARCH), android_x87)
|
||||
DEFINES = target_arch=x87 v8_target_arch=x87 android_target_arch=x86 android_target_platform=14
|
||||
TOOLCHAIN_ARCH = x86
|
||||
TOOLCHAIN_PREFIX = i686-linux-android
|
||||
TOOLCHAIN_VER = 4.6
|
||||
else
|
||||
$(error Target architecture "${ARCH}" is not supported)
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
@ -179,7 +179,7 @@
|
||||
'-L<(android_stlport_libs)/mips',
|
||||
],
|
||||
}],
|
||||
['target_arch=="ia32"', {
|
||||
['target_arch=="ia32" or target_arch=="x87"', {
|
||||
'ldflags': [
|
||||
'-L<(android_stlport_libs)/x86',
|
||||
],
|
||||
@ -196,7 +196,7 @@
|
||||
}],
|
||||
],
|
||||
}],
|
||||
['target_arch=="ia32"', {
|
||||
['target_arch=="ia32" or target_arch=="x87"', {
|
||||
# The x86 toolchain currently has problems with stack-protector.
|
||||
'cflags!': [
|
||||
'-fstack-protector',
|
||||
|
@ -115,7 +115,7 @@
|
||||
}, {
|
||||
'os_posix%': 1,
|
||||
}],
|
||||
['(v8_target_arch=="ia32" or v8_target_arch=="x64") and \
|
||||
['(v8_target_arch=="ia32" or v8_target_arch=="x64" or v8_target_arch=="x87") and \
|
||||
(OS=="linux" or OS=="mac")', {
|
||||
'v8_enable_gdbjit%': 1,
|
||||
}, {
|
||||
|
@ -288,6 +288,13 @@
|
||||
'V8_TARGET_ARCH_IA32',
|
||||
],
|
||||
}], # v8_target_arch=="ia32"
|
||||
['v8_target_arch=="x87"', {
|
||||
'defines': [
|
||||
'V8_TARGET_ARCH_X87',
|
||||
],
|
||||
'cflags': ['-mfpmath=387'],
|
||||
'ldflags': ['-mfpmath=387'],
|
||||
}], # v8_target_arch=="x87"
|
||||
['v8_target_arch=="mips"', {
|
||||
'defines': [
|
||||
'V8_TARGET_ARCH_MIPS',
|
||||
@ -440,7 +447,7 @@
|
||||
}],
|
||||
['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
|
||||
or OS=="netbsd" or OS=="mac" or OS=="android" or OS=="qnx") and \
|
||||
(v8_target_arch=="arm" or v8_target_arch=="ia32" or \
|
||||
(v8_target_arch=="arm" or v8_target_arch=="ia32" or v8_target_arch=="x87" or\
|
||||
v8_target_arch=="mips" or v8_target_arch=="mipsel")', {
|
||||
# Check whether the host compiler and target compiler support the
|
||||
# '-m32' option and set it if so.
|
||||
|
@ -66,6 +66,8 @@
|
||||
#include "arm/assembler-arm-inl.h"
|
||||
#elif V8_TARGET_ARCH_MIPS
|
||||
#include "mips/assembler-mips-inl.h"
|
||||
#elif V8_TARGET_ARCH_X87
|
||||
#include "x87/assembler-x87-inl.h"
|
||||
#else
|
||||
#error "Unknown architecture."
|
||||
#endif
|
||||
@ -82,6 +84,8 @@
|
||||
#include "arm/regexp-macro-assembler-arm.h"
|
||||
#elif V8_TARGET_ARCH_MIPS
|
||||
#include "mips/regexp-macro-assembler-mips.h"
|
||||
#elif V8_TARGET_ARCH_X87
|
||||
#include "x87/regexp-macro-assembler-x87.h"
|
||||
#else // Unknown architecture.
|
||||
#error "Unknown architecture."
|
||||
#endif // Target architecture.
|
||||
@ -1345,6 +1349,8 @@ ExternalReference ExternalReference::re_check_stack_guard_state(
|
||||
function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
|
||||
#elif V8_TARGET_ARCH_MIPS
|
||||
function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
|
||||
#elif V8_TARGET_ARCH_X87
|
||||
function = FUNCTION_ADDR(RegExpMacroAssemblerX87::CheckStackGuardState);
|
||||
#else
|
||||
UNREACHABLE();
|
||||
#endif
|
||||
|
@ -446,6 +446,8 @@ class RuntimeCallHelper {
|
||||
#include "arm/code-stubs-arm.h"
|
||||
#elif V8_TARGET_ARCH_MIPS
|
||||
#include "mips/code-stubs-mips.h"
|
||||
#elif V8_TARGET_ARCH_X87
|
||||
#include "x87/code-stubs-x87.h"
|
||||
#else
|
||||
#error Unsupported target architecture.
|
||||
#endif
|
||||
|
@ -55,6 +55,8 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
|
||||
#include "arm/codegen-arm.h"
|
||||
#elif V8_TARGET_ARCH_MIPS
|
||||
#include "mips/codegen-mips.h"
|
||||
#elif V8_TARGET_ARCH_X87
|
||||
#include "x87/codegen-x87.h"
|
||||
#else
|
||||
#error Unsupported target architecture.
|
||||
#endif
|
||||
|
@ -19,6 +19,8 @@
|
||||
#include "arm/frames-arm.h"
|
||||
#elif V8_TARGET_ARCH_MIPS
|
||||
#include "mips/frames-mips.h"
|
||||
#elif V8_TARGET_ARCH_X87
|
||||
#include "x87/frames-x87.h"
|
||||
#else
|
||||
#error Unsupported target architecture.
|
||||
#endif
|
||||
|
@ -450,7 +450,7 @@ StackFrame::Type StackFrame::GetCallerState(State* state) const {
|
||||
|
||||
|
||||
Address StackFrame::UnpaddedFP() const {
|
||||
#if V8_TARGET_ARCH_IA32
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
|
||||
if (!is_optimized()) return fp();
|
||||
int32_t alignment_state = Memory::int32_at(
|
||||
fp() + JavaScriptFrameConstants::kDynamicAlignmentStateOffset);
|
||||
|
@ -99,7 +99,7 @@ class FullCodeGenerator: public AstVisitor {
|
||||
static const int kMaxBackEdgeWeight = 127;
|
||||
|
||||
// Platform-specific code size multiplier.
|
||||
#if V8_TARGET_ARCH_IA32
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
|
||||
static const int kCodeSizeMultiplier = 105;
|
||||
static const int kBootCodeSizeMultiplier = 100;
|
||||
#elif V8_TARGET_ARCH_X64
|
||||
|
@ -194,7 +194,7 @@ class DebugSectionBase : public ZoneObject {
|
||||
struct MachOSectionHeader {
|
||||
char sectname[16];
|
||||
char segname[16];
|
||||
#if V8_TARGET_ARCH_IA32
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
|
||||
uint32_t addr;
|
||||
uint32_t size;
|
||||
#else
|
||||
@ -511,7 +511,7 @@ class MachO BASE_EMBEDDED {
|
||||
uint32_t cmd;
|
||||
uint32_t cmdsize;
|
||||
char segname[16];
|
||||
#if V8_TARGET_ARCH_IA32
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
|
||||
uint32_t vmaddr;
|
||||
uint32_t vmsize;
|
||||
uint32_t fileoff;
|
||||
@ -537,7 +537,7 @@ class MachO BASE_EMBEDDED {
|
||||
Writer::Slot<MachOHeader> WriteHeader(Writer* w) {
|
||||
ASSERT(w->position() == 0);
|
||||
Writer::Slot<MachOHeader> header = w->CreateSlotHere<MachOHeader>();
|
||||
#if V8_TARGET_ARCH_IA32
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
|
||||
header->magic = 0xFEEDFACEu;
|
||||
header->cputype = 7; // i386
|
||||
header->cpusubtype = 3; // CPU_SUBTYPE_I386_ALL
|
||||
@ -562,7 +562,7 @@ class MachO BASE_EMBEDDED {
|
||||
uintptr_t code_size) {
|
||||
Writer::Slot<MachOSegmentCommand> cmd =
|
||||
w->CreateSlotHere<MachOSegmentCommand>();
|
||||
#if V8_TARGET_ARCH_IA32
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
|
||||
cmd->cmd = LC_SEGMENT_32;
|
||||
#else
|
||||
cmd->cmd = LC_SEGMENT_64;
|
||||
@ -649,7 +649,7 @@ class ELF BASE_EMBEDDED {
|
||||
void WriteHeader(Writer* w) {
|
||||
ASSERT(w->position() == 0);
|
||||
Writer::Slot<ELFHeader> header = w->CreateSlotHere<ELFHeader>();
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X87
|
||||
const uint8_t ident[16] =
|
||||
{ 0x7f, 'E', 'L', 'F', 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
|
||||
#elif V8_TARGET_ARCH_X64
|
||||
@ -660,7 +660,7 @@ class ELF BASE_EMBEDDED {
|
||||
#endif
|
||||
OS::MemCopy(header->ident, ident, 16);
|
||||
header->type = 1;
|
||||
#if V8_TARGET_ARCH_IA32
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
|
||||
header->machine = 3;
|
||||
#elif V8_TARGET_ARCH_X64
|
||||
// Processor identification value for x64 is 62 as defined in
|
||||
@ -762,7 +762,7 @@ class ELFSymbol BASE_EMBEDDED {
|
||||
Binding binding() const {
|
||||
return static_cast<Binding>(info >> 4);
|
||||
}
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X87
|
||||
struct SerializedLayout {
|
||||
SerializedLayout(uint32_t name,
|
||||
uintptr_t value,
|
||||
@ -1084,7 +1084,7 @@ class DebugInfoSection : public DebugSection {
|
||||
w->Write<intptr_t>(desc_->CodeStart() + desc_->CodeSize());
|
||||
Writer::Slot<uint32_t> fb_block_size = w->CreateSlotHere<uint32_t>();
|
||||
uintptr_t fb_block_start = w->position();
|
||||
#if V8_TARGET_ARCH_IA32
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
|
||||
w->Write<uint8_t>(DW_OP_reg5); // The frame pointer's here on ia32
|
||||
#elif V8_TARGET_ARCH_X64
|
||||
w->Write<uint8_t>(DW_OP_reg6); // and here on x64.
|
||||
|
@ -77,7 +77,7 @@ namespace internal {
|
||||
// Target architecture detection. This may be set externally. If not, detect
|
||||
// in the same way as the host architecture, that is, target the native
|
||||
// environment as presented by the compiler.
|
||||
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && \
|
||||
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_X87 &&\
|
||||
!V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
|
||||
#if defined(_M_X64) || defined(__x86_64__)
|
||||
#define V8_TARGET_ARCH_X64 1
|
||||
@ -141,6 +141,8 @@ namespace internal {
|
||||
#else
|
||||
#define V8_TARGET_LITTLE_ENDIAN 1
|
||||
#endif
|
||||
#elif V8_TARGET_ARCH_X87
|
||||
#define V8_TARGET_LITTLE_ENDIAN 1
|
||||
#else
|
||||
#error Unknown target architecture endianness
|
||||
#endif
|
||||
|
@ -19,6 +19,8 @@
|
||||
#include "arm/lithium-arm.h"
|
||||
#elif V8_TARGET_ARCH_MIPS
|
||||
#include "mips/lithium-mips.h"
|
||||
#elif V8_TARGET_ARCH_X87
|
||||
#include "x87/lithium-x87.h"
|
||||
#else
|
||||
#error Unsupported target architecture.
|
||||
#endif
|
||||
|
@ -51,6 +51,8 @@
|
||||
#include "arm/lithium-codegen-arm.h"
|
||||
#elif V8_TARGET_ARCH_MIPS
|
||||
#include "mips/lithium-codegen-mips.h"
|
||||
#elif V8_TARGET_ARCH_X87
|
||||
#include "x87/lithium-codegen-x87.h"
|
||||
#else
|
||||
#error Unsupported target architecture.
|
||||
#endif
|
||||
|
@ -32,6 +32,8 @@
|
||||
#include "arm/regexp-macro-assembler-arm.h"
|
||||
#elif V8_TARGET_ARCH_MIPS
|
||||
#include "mips/regexp-macro-assembler-mips.h"
|
||||
#elif V8_TARGET_ARCH_X87
|
||||
#include "x87/regexp-macro-assembler-x87.h"
|
||||
#else
|
||||
#error Unsupported target architecture.
|
||||
#endif
|
||||
@ -6075,6 +6077,9 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
|
||||
#elif V8_TARGET_ARCH_MIPS
|
||||
RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2,
|
||||
zone);
|
||||
#elif V8_TARGET_ARCH_X87
|
||||
RegExpMacroAssemblerX87 macro_assembler(mode, (data->capture_count + 1) * 2,
|
||||
zone);
|
||||
#else
|
||||
#error "Unsupported architecture"
|
||||
#endif
|
||||
|
@ -17,6 +17,8 @@
|
||||
#include "arm/lithium-arm.h"
|
||||
#elif V8_TARGET_ARCH_MIPS
|
||||
#include "mips/lithium-mips.h"
|
||||
#elif V8_TARGET_ARCH_X87
|
||||
#include "x87/lithium-x87.h"
|
||||
#else
|
||||
#error "Unknown architecture."
|
||||
#endif
|
||||
|
@ -18,6 +18,8 @@
|
||||
#include "arm/lithium-arm.h"
|
||||
#elif V8_TARGET_ARCH_MIPS
|
||||
#include "mips/lithium-mips.h"
|
||||
#elif V8_TARGET_ARCH_X87
|
||||
#include "x87/lithium-x87.h"
|
||||
#else
|
||||
#error "Unknown architecture."
|
||||
#endif
|
||||
|
@ -21,6 +21,9 @@
|
||||
#elif V8_TARGET_ARCH_MIPS
|
||||
#include "mips/lithium-mips.h"
|
||||
#include "mips/lithium-codegen-mips.h"
|
||||
#elif V8_TARGET_ARCH_X87
|
||||
#include "x87/lithium-x87.h"
|
||||
#include "x87/lithium-codegen-x87.h"
|
||||
#else
|
||||
#error Unsupported target architecture.
|
||||
#endif
|
||||
|
@ -22,6 +22,9 @@
|
||||
#elif V8_TARGET_ARCH_ARM64
|
||||
#include "arm64/lithium-arm64.h"
|
||||
#include "arm64/lithium-codegen-arm64.h"
|
||||
#elif V8_TARGET_ARCH_X87
|
||||
#include "x87/lithium-x87.h"
|
||||
#include "x87/lithium-codegen-x87.h"
|
||||
#else
|
||||
#error "Unknown architecture."
|
||||
#endif
|
||||
|
@ -322,6 +322,7 @@ class PerfJitLogger : public CodeEventLogger {
|
||||
static const uint32_t kElfMachX64 = 62;
|
||||
static const uint32_t kElfMachARM = 40;
|
||||
static const uint32_t kElfMachMIPS = 10;
|
||||
static const uint32_t kElfMachX87 = 3;
|
||||
|
||||
struct jitheader {
|
||||
uint32_t magic;
|
||||
@ -361,6 +362,8 @@ class PerfJitLogger : public CodeEventLogger {
|
||||
return kElfMachARM;
|
||||
#elif V8_TARGET_ARCH_MIPS
|
||||
return kElfMachMIPS;
|
||||
#elif V8_TARGET_ARCH_X87
|
||||
return kElfMachX87;
|
||||
#else
|
||||
UNIMPLEMENTED();
|
||||
return 0;
|
||||
@ -557,6 +560,8 @@ void LowLevelLogger::LogCodeInfo() {
|
||||
const char arch[] = "arm";
|
||||
#elif V8_TARGET_ARCH_MIPS
|
||||
const char arch[] = "mips";
|
||||
#elif V8_TARGET_ARCH_X87
|
||||
const char arch[] = "x87";
|
||||
#else
|
||||
const char arch[] = "unknown";
|
||||
#endif
|
||||
|
@ -71,6 +71,12 @@ const int kInvalidProtoDepth = -1;
|
||||
#include "mips/assembler-mips-inl.h"
|
||||
#include "code.h" // must be after assembler_*.h
|
||||
#include "mips/macro-assembler-mips.h"
|
||||
#elif V8_TARGET_ARCH_X87
|
||||
#include "assembler.h"
|
||||
#include "x87/assembler-x87.h"
|
||||
#include "x87/assembler-x87-inl.h"
|
||||
#include "code.h" // must be after assembler_*.h
|
||||
#include "x87/macro-assembler-x87.h"
|
||||
#else
|
||||
#error Unsupported target architecture.
|
||||
#endif
|
||||
|
@ -442,7 +442,7 @@ int OS::VSNPrintF(Vector<char> str,
|
||||
}
|
||||
|
||||
|
||||
#if V8_TARGET_ARCH_IA32
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
|
||||
static void MemMoveWrapper(void* dest, const void* src, size_t size) {
|
||||
memmove(dest, src, size);
|
||||
}
|
||||
@ -491,7 +491,7 @@ OS::MemCopyUint8Function CreateMemCopyUint8Function(
|
||||
|
||||
|
||||
void OS::PostSetUp() {
|
||||
#if V8_TARGET_ARCH_IA32
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
|
||||
OS::MemMoveFunction generated_memmove = CreateMemMoveFunction();
|
||||
if (generated_memmove != NULL) {
|
||||
memmove_function = generated_memmove;
|
||||
|
@ -107,7 +107,7 @@ intptr_t OS::MaxVirtualMemory() {
|
||||
}
|
||||
|
||||
|
||||
#if V8_TARGET_ARCH_IA32
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
|
||||
static void MemMoveWrapper(void* dest, const void* src, size_t size) {
|
||||
memmove(dest, src, size);
|
||||
}
|
||||
@ -127,7 +127,7 @@ void OS::MemMove(void* dest, const void* src, size_t size) {
|
||||
(*memmove_function)(dest, src, size);
|
||||
}
|
||||
|
||||
#endif // V8_TARGET_ARCH_IA32
|
||||
#endif // V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
|
||||
|
||||
|
||||
class TimezoneCache {
|
||||
@ -453,7 +453,7 @@ char* Win32Time::LocalTimezone(TimezoneCache* cache) {
|
||||
|
||||
|
||||
void OS::PostSetUp() {
|
||||
#if V8_TARGET_ARCH_IA32
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
|
||||
OS::MemMoveFunction generated_memmove = CreateMemMoveFunction();
|
||||
if (generated_memmove != NULL) {
|
||||
memmove_function = generated_memmove;
|
||||
|
@ -52,7 +52,7 @@ int strncasecmp(const char* s1, const char* s2, int n);
|
||||
#if (_MSC_VER < 1800)
|
||||
inline int lrint(double flt) {
|
||||
int intgr;
|
||||
#if V8_TARGET_ARCH_IA32
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
|
||||
__asm {
|
||||
fld flt
|
||||
fistp intgr
|
||||
@ -78,7 +78,7 @@ namespace internal {
|
||||
|
||||
#ifndef V8_NO_FAST_TLS
|
||||
|
||||
#if defined(_MSC_VER) && V8_HOST_ARCH_IA32
|
||||
#if defined(_MSC_VER) && (V8_HOST_ARCH_IA32)
|
||||
|
||||
#define V8_FAST_TLS_SUPPORTED 1
|
||||
|
||||
@ -292,7 +292,7 @@ class OS {
|
||||
// the platform doesn't care. Guaranteed to be a power of two.
|
||||
static int ActivationFrameAlignment();
|
||||
|
||||
#if defined(V8_TARGET_ARCH_IA32)
|
||||
#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X87)
|
||||
// Limit below which the extra overhead of the MemCopy function is likely
|
||||
// to outweigh the benefits of faster copying.
|
||||
static const int kMinComplexMemCopy = 64;
|
||||
|
@ -17,7 +17,7 @@ RegExpMacroAssemblerTracer::RegExpMacroAssemblerTracer(
|
||||
unsigned int type = assembler->Implementation();
|
||||
ASSERT(type < 6);
|
||||
const char* impl_names[] = {"IA32", "ARM", "ARM64",
|
||||
"MIPS", "X64", "Bytecode"};
|
||||
"MIPS", "X64", "X87", "Bytecode"};
|
||||
PrintF("RegExpMacroAssembler%s();\n", impl_names[type]);
|
||||
}
|
||||
|
||||
|
@ -33,6 +33,7 @@ class RegExpMacroAssembler {
|
||||
kARM64Implementation,
|
||||
kMIPSImplementation,
|
||||
kX64Implementation,
|
||||
kX87Implementation,
|
||||
kBytecodeImplementation
|
||||
};
|
||||
|
||||
|
@ -15,6 +15,8 @@
|
||||
#include "arm/simulator-arm.h"
|
||||
#elif V8_TARGET_ARCH_MIPS
|
||||
#include "mips/simulator-mips.h"
|
||||
#elif V8_TARGET_ARCH_X87
|
||||
#include "x87/simulator-x87.h"
|
||||
#else
|
||||
#error Unsupported target architecture.
|
||||
#endif
|
||||
|
@ -153,7 +153,8 @@ static void ReadDiyFp(Vector<const char> buffer,
|
||||
static bool DoubleStrtod(Vector<const char> trimmed,
|
||||
int exponent,
|
||||
double* result) {
|
||||
#if (V8_TARGET_ARCH_IA32 || defined(USE_SIMULATOR)) && !defined(_MSC_VER)
|
||||
#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87 || defined(USE_SIMULATOR)) && \
|
||||
!defined(_MSC_VER)
|
||||
// On x86 the floating-point stack can be 64 or 80 bits wide. If it is
|
||||
// 80 bits wide (as is the case on Linux) then double-rounding occurs and the
|
||||
// result is not accurate.
|
||||
|
561
src/x87/assembler-x87-inl.h
Normal file
561
src/x87/assembler-x87-inl.h
Normal file
@ -0,0 +1,561 @@
|
||||
// Copyright (c) 1994-2006 Sun Microsystems Inc.
|
||||
// All Rights Reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// - Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// - Redistribution in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution.
|
||||
//
|
||||
// - Neither the name of Sun Microsystems or the names of contributors may
|
||||
// be used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
||||
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// The original source code covered by the above license above has been
|
||||
// modified significantly by Google Inc.
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
|
||||
// A light-weight IA32 Assembler.
|
||||
|
||||
#ifndef V8_X87_ASSEMBLER_X87_INL_H_
|
||||
#define V8_X87_ASSEMBLER_X87_INL_H_
|
||||
|
||||
#include "x87/assembler-x87.h"
|
||||
|
||||
#include "cpu.h"
|
||||
#include "debug.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
bool CpuFeatures::SupportsCrankshaft() { return false; }
|
||||
|
||||
|
||||
static const byte kCallOpcode = 0xE8;
|
||||
static const int kNoCodeAgeSequenceLength = 5;
|
||||
|
||||
|
||||
// The modes possibly affected by apply must be in kApplyMask.
|
||||
void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
|
||||
bool flush_icache = icache_flush_mode != SKIP_ICACHE_FLUSH;
|
||||
if (IsRuntimeEntry(rmode_) || IsCodeTarget(rmode_)) {
|
||||
int32_t* p = reinterpret_cast<int32_t*>(pc_);
|
||||
*p -= delta; // Relocate entry.
|
||||
if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
|
||||
} else if (rmode_ == CODE_AGE_SEQUENCE) {
|
||||
if (*pc_ == kCallOpcode) {
|
||||
int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
|
||||
*p -= delta; // Relocate entry.
|
||||
if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
|
||||
}
|
||||
} else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
|
||||
// Special handling of js_return when a break point is set (call
|
||||
// instruction has been inserted).
|
||||
int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
|
||||
*p -= delta; // Relocate entry.
|
||||
if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
|
||||
} else if (rmode_ == DEBUG_BREAK_SLOT && IsPatchedDebugBreakSlotSequence()) {
|
||||
// Special handling of a debug break slot when a break point is set (call
|
||||
// instruction has been inserted).
|
||||
int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
|
||||
*p -= delta; // Relocate entry.
|
||||
if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
|
||||
} else if (IsInternalReference(rmode_)) {
|
||||
// absolute code pointer inside code object moves with the code object.
|
||||
int32_t* p = reinterpret_cast<int32_t*>(pc_);
|
||||
*p += delta; // Relocate entry.
|
||||
if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Address RelocInfo::target_address() {
|
||||
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
|
||||
return Assembler::target_address_at(pc_, host_);
|
||||
}
|
||||
|
||||
|
||||
Address RelocInfo::target_address_address() {
|
||||
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|
||||
|| rmode_ == EMBEDDED_OBJECT
|
||||
|| rmode_ == EXTERNAL_REFERENCE);
|
||||
return reinterpret_cast<Address>(pc_);
|
||||
}
|
||||
|
||||
|
||||
Address RelocInfo::constant_pool_entry_address() {
|
||||
UNREACHABLE();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
int RelocInfo::target_address_size() {
|
||||
return Assembler::kSpecialTargetSize;
|
||||
}
|
||||
|
||||
|
||||
void RelocInfo::set_target_address(Address target,
|
||||
WriteBarrierMode write_barrier_mode,
|
||||
ICacheFlushMode icache_flush_mode) {
|
||||
Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
|
||||
Assembler::set_target_address_at(pc_, host_, target);
|
||||
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
|
||||
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
|
||||
IsCodeTarget(rmode_)) {
|
||||
Object* target_code = Code::GetCodeFromTargetAddress(target);
|
||||
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
|
||||
host(), this, HeapObject::cast(target_code));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Object* RelocInfo::target_object() {
|
||||
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
|
||||
return Memory::Object_at(pc_);
|
||||
}
|
||||
|
||||
|
||||
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
|
||||
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
|
||||
return Memory::Object_Handle_at(pc_);
|
||||
}
|
||||
|
||||
|
||||
void RelocInfo::set_target_object(Object* target,
|
||||
WriteBarrierMode write_barrier_mode,
|
||||
ICacheFlushMode icache_flush_mode) {
|
||||
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
|
||||
ASSERT(!target->IsConsString());
|
||||
Memory::Object_at(pc_) = target;
|
||||
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
|
||||
CPU::FlushICache(pc_, sizeof(Address));
|
||||
}
|
||||
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
|
||||
host() != NULL &&
|
||||
target->IsHeapObject()) {
|
||||
host()->GetHeap()->incremental_marking()->RecordWrite(
|
||||
host(), &Memory::Object_at(pc_), HeapObject::cast(target));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Address RelocInfo::target_reference() {
|
||||
ASSERT(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
|
||||
return Memory::Address_at(pc_);
|
||||
}
|
||||
|
||||
|
||||
Address RelocInfo::target_runtime_entry(Assembler* origin) {
|
||||
ASSERT(IsRuntimeEntry(rmode_));
|
||||
return reinterpret_cast<Address>(*reinterpret_cast<int32_t*>(pc_));
|
||||
}
|
||||
|
||||
|
||||
void RelocInfo::set_target_runtime_entry(Address target,
|
||||
WriteBarrierMode write_barrier_mode,
|
||||
ICacheFlushMode icache_flush_mode) {
|
||||
ASSERT(IsRuntimeEntry(rmode_));
|
||||
if (target_address() != target) {
|
||||
set_target_address(target, write_barrier_mode, icache_flush_mode);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Handle<Cell> RelocInfo::target_cell_handle() {
|
||||
ASSERT(rmode_ == RelocInfo::CELL);
|
||||
Address address = Memory::Address_at(pc_);
|
||||
return Handle<Cell>(reinterpret_cast<Cell**>(address));
|
||||
}
|
||||
|
||||
|
||||
Cell* RelocInfo::target_cell() {
|
||||
ASSERT(rmode_ == RelocInfo::CELL);
|
||||
return Cell::FromValueAddress(Memory::Address_at(pc_));
|
||||
}
|
||||
|
||||
|
||||
void RelocInfo::set_target_cell(Cell* cell,
|
||||
WriteBarrierMode write_barrier_mode,
|
||||
ICacheFlushMode icache_flush_mode) {
|
||||
ASSERT(rmode_ == RelocInfo::CELL);
|
||||
Address address = cell->address() + Cell::kValueOffset;
|
||||
Memory::Address_at(pc_) = address;
|
||||
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
|
||||
CPU::FlushICache(pc_, sizeof(Address));
|
||||
}
|
||||
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
|
||||
// TODO(1550) We are passing NULL as a slot because cell can never be on
|
||||
// evacuation candidate.
|
||||
host()->GetHeap()->incremental_marking()->RecordWrite(
|
||||
host(), NULL, cell);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
|
||||
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
|
||||
ASSERT(*pc_ == kCallOpcode);
|
||||
return Memory::Object_Handle_at(pc_ + 1);
|
||||
}
|
||||
|
||||
|
||||
Code* RelocInfo::code_age_stub() {
|
||||
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
|
||||
ASSERT(*pc_ == kCallOpcode);
|
||||
return Code::GetCodeFromTargetAddress(
|
||||
Assembler::target_address_at(pc_ + 1, host_));
|
||||
}
|
||||
|
||||
|
||||
void RelocInfo::set_code_age_stub(Code* stub,
|
||||
ICacheFlushMode icache_flush_mode) {
|
||||
ASSERT(*pc_ == kCallOpcode);
|
||||
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
|
||||
Assembler::set_target_address_at(pc_ + 1, host_, stub->instruction_start(),
|
||||
icache_flush_mode);
|
||||
}
|
||||
|
||||
|
||||
Address RelocInfo::call_address() {
|
||||
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
|
||||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
|
||||
return Assembler::target_address_at(pc_ + 1, host_);
|
||||
}
|
||||
|
||||
|
||||
void RelocInfo::set_call_address(Address target) {
|
||||
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
|
||||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
|
||||
Assembler::set_target_address_at(pc_ + 1, host_, target);
|
||||
if (host() != NULL) {
|
||||
Object* target_code = Code::GetCodeFromTargetAddress(target);
|
||||
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
|
||||
host(), this, HeapObject::cast(target_code));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Object* RelocInfo::call_object() {
|
||||
return *call_object_address();
|
||||
}
|
||||
|
||||
|
||||
void RelocInfo::set_call_object(Object* target) {
|
||||
*call_object_address() = target;
|
||||
}
|
||||
|
||||
|
||||
Object** RelocInfo::call_object_address() {
|
||||
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
|
||||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
|
||||
return reinterpret_cast<Object**>(pc_ + 1);
|
||||
}
|
||||
|
||||
|
||||
void RelocInfo::WipeOut() {
|
||||
if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_)) {
|
||||
Memory::Address_at(pc_) = NULL;
|
||||
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
|
||||
// Effectively write zero into the relocation.
|
||||
Assembler::set_target_address_at(pc_, host_, pc_ + sizeof(int32_t));
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool RelocInfo::IsPatchedReturnSequence() {
|
||||
return *pc_ == kCallOpcode;
|
||||
}
|
||||
|
||||
|
||||
bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
|
||||
return !Assembler::IsNop(pc());
|
||||
}
|
||||
|
||||
|
||||
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
|
||||
RelocInfo::Mode mode = rmode();
|
||||
if (mode == RelocInfo::EMBEDDED_OBJECT) {
|
||||
visitor->VisitEmbeddedPointer(this);
|
||||
CPU::FlushICache(pc_, sizeof(Address));
|
||||
} else if (RelocInfo::IsCodeTarget(mode)) {
|
||||
visitor->VisitCodeTarget(this);
|
||||
} else if (mode == RelocInfo::CELL) {
|
||||
visitor->VisitCell(this);
|
||||
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
|
||||
visitor->VisitExternalReference(this);
|
||||
CPU::FlushICache(pc_, sizeof(Address));
|
||||
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
|
||||
visitor->VisitCodeAgeSequence(this);
|
||||
} else if (((RelocInfo::IsJSReturn(mode) &&
|
||||
IsPatchedReturnSequence()) ||
|
||||
(RelocInfo::IsDebugBreakSlot(mode) &&
|
||||
IsPatchedDebugBreakSlotSequence())) &&
|
||||
isolate->debug()->has_break_points()) {
|
||||
visitor->VisitDebugTarget(this);
|
||||
} else if (IsRuntimeEntry(mode)) {
|
||||
visitor->VisitRuntimeEntry(this);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<typename StaticVisitor>
|
||||
void RelocInfo::Visit(Heap* heap) {
|
||||
RelocInfo::Mode mode = rmode();
|
||||
if (mode == RelocInfo::EMBEDDED_OBJECT) {
|
||||
StaticVisitor::VisitEmbeddedPointer(heap, this);
|
||||
CPU::FlushICache(pc_, sizeof(Address));
|
||||
} else if (RelocInfo::IsCodeTarget(mode)) {
|
||||
StaticVisitor::VisitCodeTarget(heap, this);
|
||||
} else if (mode == RelocInfo::CELL) {
|
||||
StaticVisitor::VisitCell(heap, this);
|
||||
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
|
||||
StaticVisitor::VisitExternalReference(this);
|
||||
CPU::FlushICache(pc_, sizeof(Address));
|
||||
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
|
||||
StaticVisitor::VisitCodeAgeSequence(heap, this);
|
||||
} else if (heap->isolate()->debug()->has_break_points() &&
|
||||
((RelocInfo::IsJSReturn(mode) &&
|
||||
IsPatchedReturnSequence()) ||
|
||||
(RelocInfo::IsDebugBreakSlot(mode) &&
|
||||
IsPatchedDebugBreakSlotSequence()))) {
|
||||
StaticVisitor::VisitDebugTarget(heap, this);
|
||||
} else if (IsRuntimeEntry(mode)) {
|
||||
StaticVisitor::VisitRuntimeEntry(this);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
Immediate::Immediate(int x) {
|
||||
x_ = x;
|
||||
rmode_ = RelocInfo::NONE32;
|
||||
}
|
||||
|
||||
|
||||
Immediate::Immediate(const ExternalReference& ext) {
|
||||
x_ = reinterpret_cast<int32_t>(ext.address());
|
||||
rmode_ = RelocInfo::EXTERNAL_REFERENCE;
|
||||
}
|
||||
|
||||
|
||||
Immediate::Immediate(Label* internal_offset) {
|
||||
x_ = reinterpret_cast<int32_t>(internal_offset);
|
||||
rmode_ = RelocInfo::INTERNAL_REFERENCE;
|
||||
}
|
||||
|
||||
|
||||
Immediate::Immediate(Handle<Object> handle) {
|
||||
AllowDeferredHandleDereference using_raw_address;
|
||||
// Verify all Objects referred by code are NOT in new space.
|
||||
Object* obj = *handle;
|
||||
if (obj->IsHeapObject()) {
|
||||
ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
|
||||
x_ = reinterpret_cast<intptr_t>(handle.location());
|
||||
rmode_ = RelocInfo::EMBEDDED_OBJECT;
|
||||
} else {
|
||||
// no relocation needed
|
||||
x_ = reinterpret_cast<intptr_t>(obj);
|
||||
rmode_ = RelocInfo::NONE32;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Immediate::Immediate(Smi* value) {
|
||||
x_ = reinterpret_cast<intptr_t>(value);
|
||||
rmode_ = RelocInfo::NONE32;
|
||||
}
|
||||
|
||||
|
||||
Immediate::Immediate(Address addr) {
|
||||
x_ = reinterpret_cast<int32_t>(addr);
|
||||
rmode_ = RelocInfo::NONE32;
|
||||
}
|
||||
|
||||
|
||||
void Assembler::emit(uint32_t x) {
|
||||
*reinterpret_cast<uint32_t*>(pc_) = x;
|
||||
pc_ += sizeof(uint32_t);
|
||||
}
|
||||
|
||||
|
||||
void Assembler::emit(Handle<Object> handle) {
|
||||
AllowDeferredHandleDereference heap_object_check;
|
||||
// Verify all Objects referred by code are NOT in new space.
|
||||
Object* obj = *handle;
|
||||
ASSERT(!isolate()->heap()->InNewSpace(obj));
|
||||
if (obj->IsHeapObject()) {
|
||||
emit(reinterpret_cast<intptr_t>(handle.location()),
|
||||
RelocInfo::EMBEDDED_OBJECT);
|
||||
} else {
|
||||
// no relocation needed
|
||||
emit(reinterpret_cast<intptr_t>(obj));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Assembler::emit(uint32_t x, RelocInfo::Mode rmode, TypeFeedbackId id) {
|
||||
if (rmode == RelocInfo::CODE_TARGET && !id.IsNone()) {
|
||||
RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, id.ToInt());
|
||||
} else if (!RelocInfo::IsNone(rmode)
|
||||
&& rmode != RelocInfo::CODE_AGE_SEQUENCE) {
|
||||
RecordRelocInfo(rmode);
|
||||
}
|
||||
emit(x);
|
||||
}
|
||||
|
||||
|
||||
void Assembler::emit(Handle<Code> code,
|
||||
RelocInfo::Mode rmode,
|
||||
TypeFeedbackId id) {
|
||||
AllowDeferredHandleDereference embedding_raw_address;
|
||||
emit(reinterpret_cast<intptr_t>(code.location()), rmode, id);
|
||||
}
|
||||
|
||||
|
||||
void Assembler::emit(const Immediate& x) {
|
||||
if (x.rmode_ == RelocInfo::INTERNAL_REFERENCE) {
|
||||
Label* label = reinterpret_cast<Label*>(x.x_);
|
||||
emit_code_relative_offset(label);
|
||||
return;
|
||||
}
|
||||
if (!RelocInfo::IsNone(x.rmode_)) RecordRelocInfo(x.rmode_);
|
||||
emit(x.x_);
|
||||
}
|
||||
|
||||
|
||||
void Assembler::emit_code_relative_offset(Label* label) {
|
||||
if (label->is_bound()) {
|
||||
int32_t pos;
|
||||
pos = label->pos() + Code::kHeaderSize - kHeapObjectTag;
|
||||
emit(pos);
|
||||
} else {
|
||||
emit_disp(label, Displacement::CODE_RELATIVE);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Assembler::emit_w(const Immediate& x) {
|
||||
ASSERT(RelocInfo::IsNone(x.rmode_));
|
||||
uint16_t value = static_cast<uint16_t>(x.x_);
|
||||
reinterpret_cast<uint16_t*>(pc_)[0] = value;
|
||||
pc_ += sizeof(uint16_t);
|
||||
}
|
||||
|
||||
|
||||
Address Assembler::target_address_at(Address pc,
|
||||
ConstantPoolArray* constant_pool) {
|
||||
return pc + sizeof(int32_t) + *reinterpret_cast<int32_t*>(pc);
|
||||
}
|
||||
|
||||
|
||||
void Assembler::set_target_address_at(Address pc,
|
||||
ConstantPoolArray* constant_pool,
|
||||
Address target,
|
||||
ICacheFlushMode icache_flush_mode) {
|
||||
int32_t* p = reinterpret_cast<int32_t*>(pc);
|
||||
*p = target - (pc + sizeof(int32_t));
|
||||
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
|
||||
CPU::FlushICache(p, sizeof(int32_t));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Address Assembler::target_address_from_return_address(Address pc) {
|
||||
return pc - kCallTargetAddressOffset;
|
||||
}
|
||||
|
||||
|
||||
Displacement Assembler::disp_at(Label* L) {
|
||||
return Displacement(long_at(L->pos()));
|
||||
}
|
||||
|
||||
|
||||
void Assembler::disp_at_put(Label* L, Displacement disp) {
|
||||
long_at_put(L->pos(), disp.data());
|
||||
}
|
||||
|
||||
|
||||
void Assembler::emit_disp(Label* L, Displacement::Type type) {
|
||||
Displacement disp(L, type);
|
||||
L->link_to(pc_offset());
|
||||
emit(static_cast<int>(disp.data()));
|
||||
}
|
||||
|
||||
|
||||
void Assembler::emit_near_disp(Label* L) {
|
||||
byte disp = 0x00;
|
||||
if (L->is_near_linked()) {
|
||||
int offset = L->near_link_pos() - pc_offset();
|
||||
ASSERT(is_int8(offset));
|
||||
disp = static_cast<byte>(offset & 0xFF);
|
||||
}
|
||||
L->link_to(pc_offset(), Label::kNear);
|
||||
*pc_++ = disp;
|
||||
}
|
||||
|
||||
|
||||
void Operand::set_modrm(int mod, Register rm) {
|
||||
ASSERT((mod & -4) == 0);
|
||||
buf_[0] = mod << 6 | rm.code();
|
||||
len_ = 1;
|
||||
}
|
||||
|
||||
|
||||
void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
|
||||
ASSERT(len_ == 1);
|
||||
ASSERT((scale & -4) == 0);
|
||||
// Use SIB with no index register only for base esp.
|
||||
ASSERT(!index.is(esp) || base.is(esp));
|
||||
buf_[1] = scale << 6 | index.code() << 3 | base.code();
|
||||
len_ = 2;
|
||||
}
|
||||
|
||||
|
||||
void Operand::set_disp8(int8_t disp) {
|
||||
ASSERT(len_ == 1 || len_ == 2);
|
||||
*reinterpret_cast<int8_t*>(&buf_[len_++]) = disp;
|
||||
}
|
||||
|
||||
|
||||
void Operand::set_dispr(int32_t disp, RelocInfo::Mode rmode) {
|
||||
ASSERT(len_ == 1 || len_ == 2);
|
||||
int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
|
||||
*p = disp;
|
||||
len_ += sizeof(int32_t);
|
||||
rmode_ = rmode;
|
||||
}
|
||||
|
||||
Operand::Operand(Register reg) {
|
||||
// reg
|
||||
set_modrm(3, reg);
|
||||
}
|
||||
|
||||
|
||||
Operand::Operand(int32_t disp, RelocInfo::Mode rmode) {
|
||||
// [disp/r]
|
||||
set_modrm(0, ebp);
|
||||
set_dispr(disp, rmode);
|
||||
}
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_X87_ASSEMBLER_X87_INL_H_
|
2032
src/x87/assembler-x87.cc
Normal file
2032
src/x87/assembler-x87.cc
Normal file
File diff suppressed because it is too large
Load Diff
1031
src/x87/assembler-x87.h
Normal file
1031
src/x87/assembler-x87.h
Normal file
File diff suppressed because it is too large
Load Diff
1452
src/x87/builtins-x87.cc
Normal file
1452
src/x87/builtins-x87.cc
Normal file
File diff suppressed because it is too large
Load Diff
4781
src/x87/code-stubs-x87.cc
Normal file
4781
src/x87/code-stubs-x87.cc
Normal file
File diff suppressed because it is too large
Load Diff
413
src/x87/code-stubs-x87.h
Normal file
413
src/x87/code-stubs-x87.h
Normal file
@ -0,0 +1,413 @@
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_X87_CODE_STUBS_X87_H_
|
||||
#define V8_X87_CODE_STUBS_X87_H_
|
||||
|
||||
#include "macro-assembler.h"
|
||||
#include "ic-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
|
||||
void ArrayNativeCode(MacroAssembler* masm,
|
||||
bool construct_call,
|
||||
Label* call_generic_code);
|
||||
|
||||
|
||||
class StoreBufferOverflowStub: public PlatformCodeStub {
|
||||
public:
|
||||
explicit StoreBufferOverflowStub(Isolate* isolate)
|
||||
: PlatformCodeStub(isolate) { }
|
||||
|
||||
void Generate(MacroAssembler* masm);
|
||||
|
||||
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
|
||||
virtual bool SometimesSetsUpAFrame() { return false; }
|
||||
|
||||
private:
|
||||
Major MajorKey() { return StoreBufferOverflow; }
|
||||
int MinorKey() { return 0; }
|
||||
};
|
||||
|
||||
|
||||
class StringHelper : public AllStatic {
|
||||
public:
|
||||
// Generate code for copying characters using the rep movs instruction.
|
||||
// Copies ecx characters from esi to edi. Copying of overlapping regions is
|
||||
// not supported.
|
||||
static void GenerateCopyCharactersREP(MacroAssembler* masm,
|
||||
Register dest, // Must be edi.
|
||||
Register src, // Must be esi.
|
||||
Register count, // Must be ecx.
|
||||
Register scratch, // Neither of above.
|
||||
bool ascii);
|
||||
|
||||
// Generate string hash.
|
||||
static void GenerateHashInit(MacroAssembler* masm,
|
||||
Register hash,
|
||||
Register character,
|
||||
Register scratch);
|
||||
static void GenerateHashAddCharacter(MacroAssembler* masm,
|
||||
Register hash,
|
||||
Register character,
|
||||
Register scratch);
|
||||
static void GenerateHashGetHash(MacroAssembler* masm,
|
||||
Register hash,
|
||||
Register scratch);
|
||||
|
||||
private:
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
|
||||
};
|
||||
|
||||
|
||||
class SubStringStub: public PlatformCodeStub {
|
||||
public:
|
||||
explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
|
||||
|
||||
private:
|
||||
Major MajorKey() { return SubString; }
|
||||
int MinorKey() { return 0; }
|
||||
|
||||
void Generate(MacroAssembler* masm);
|
||||
};
|
||||
|
||||
|
||||
class StringCompareStub: public PlatformCodeStub {
|
||||
public:
|
||||
explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
|
||||
|
||||
// Compares two flat ASCII strings and returns result in eax.
|
||||
static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
|
||||
Register left,
|
||||
Register right,
|
||||
Register scratch1,
|
||||
Register scratch2,
|
||||
Register scratch3);
|
||||
|
||||
// Compares two flat ASCII strings for equality and returns result
|
||||
// in eax.
|
||||
static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
|
||||
Register left,
|
||||
Register right,
|
||||
Register scratch1,
|
||||
Register scratch2);
|
||||
|
||||
private:
|
||||
virtual Major MajorKey() { return StringCompare; }
|
||||
virtual int MinorKey() { return 0; }
|
||||
virtual void Generate(MacroAssembler* masm);
|
||||
|
||||
static void GenerateAsciiCharsCompareLoop(
|
||||
MacroAssembler* masm,
|
||||
Register left,
|
||||
Register right,
|
||||
Register length,
|
||||
Register scratch,
|
||||
Label* chars_not_equal,
|
||||
Label::Distance chars_not_equal_near = Label::kFar);
|
||||
};
|
||||
|
||||
|
||||
class NameDictionaryLookupStub: public PlatformCodeStub {
|
||||
public:
|
||||
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
|
||||
|
||||
NameDictionaryLookupStub(Isolate* isolate,
|
||||
Register dictionary,
|
||||
Register result,
|
||||
Register index,
|
||||
LookupMode mode)
|
||||
: PlatformCodeStub(isolate),
|
||||
dictionary_(dictionary), result_(result), index_(index), mode_(mode) { }
|
||||
|
||||
void Generate(MacroAssembler* masm);
|
||||
|
||||
static void GenerateNegativeLookup(MacroAssembler* masm,
|
||||
Label* miss,
|
||||
Label* done,
|
||||
Register properties,
|
||||
Handle<Name> name,
|
||||
Register r0);
|
||||
|
||||
static void GeneratePositiveLookup(MacroAssembler* masm,
|
||||
Label* miss,
|
||||
Label* done,
|
||||
Register elements,
|
||||
Register name,
|
||||
Register r0,
|
||||
Register r1);
|
||||
|
||||
virtual bool SometimesSetsUpAFrame() { return false; }
|
||||
|
||||
private:
|
||||
static const int kInlinedProbes = 4;
|
||||
static const int kTotalProbes = 20;
|
||||
|
||||
static const int kCapacityOffset =
|
||||
NameDictionary::kHeaderSize +
|
||||
NameDictionary::kCapacityIndex * kPointerSize;
|
||||
|
||||
static const int kElementsStartOffset =
|
||||
NameDictionary::kHeaderSize +
|
||||
NameDictionary::kElementsStartIndex * kPointerSize;
|
||||
|
||||
Major MajorKey() { return NameDictionaryLookup; }
|
||||
|
||||
int MinorKey() {
|
||||
return DictionaryBits::encode(dictionary_.code()) |
|
||||
ResultBits::encode(result_.code()) |
|
||||
IndexBits::encode(index_.code()) |
|
||||
LookupModeBits::encode(mode_);
|
||||
}
|
||||
|
||||
class DictionaryBits: public BitField<int, 0, 3> {};
|
||||
class ResultBits: public BitField<int, 3, 3> {};
|
||||
class IndexBits: public BitField<int, 6, 3> {};
|
||||
class LookupModeBits: public BitField<LookupMode, 9, 1> {};
|
||||
|
||||
Register dictionary_;
|
||||
Register result_;
|
||||
Register index_;
|
||||
LookupMode mode_;
|
||||
};
|
||||
|
||||
|
||||
class RecordWriteStub: public PlatformCodeStub {
|
||||
public:
|
||||
RecordWriteStub(Isolate* isolate,
|
||||
Register object,
|
||||
Register value,
|
||||
Register address,
|
||||
RememberedSetAction remembered_set_action)
|
||||
: PlatformCodeStub(isolate),
|
||||
object_(object),
|
||||
value_(value),
|
||||
address_(address),
|
||||
remembered_set_action_(remembered_set_action),
|
||||
regs_(object, // An input reg.
|
||||
address, // An input reg.
|
||||
value) { // One scratch reg.
|
||||
}
|
||||
|
||||
enum Mode {
|
||||
STORE_BUFFER_ONLY,
|
||||
INCREMENTAL,
|
||||
INCREMENTAL_COMPACTION
|
||||
};
|
||||
|
||||
virtual bool SometimesSetsUpAFrame() { return false; }
|
||||
|
||||
static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8.
|
||||
static const byte kTwoByteJumpInstruction = 0xeb; // Jmp #imm8.
|
||||
|
||||
static const byte kFiveByteNopInstruction = 0x3d; // Cmpl eax, #imm32.
|
||||
static const byte kFiveByteJumpInstruction = 0xe9; // Jmp #imm32.
|
||||
|
||||
static Mode GetMode(Code* stub) {
|
||||
byte first_instruction = stub->instruction_start()[0];
|
||||
byte second_instruction = stub->instruction_start()[2];
|
||||
|
||||
if (first_instruction == kTwoByteJumpInstruction) {
|
||||
return INCREMENTAL;
|
||||
}
|
||||
|
||||
ASSERT(first_instruction == kTwoByteNopInstruction);
|
||||
|
||||
if (second_instruction == kFiveByteJumpInstruction) {
|
||||
return INCREMENTAL_COMPACTION;
|
||||
}
|
||||
|
||||
ASSERT(second_instruction == kFiveByteNopInstruction);
|
||||
|
||||
return STORE_BUFFER_ONLY;
|
||||
}
|
||||
|
||||
static void Patch(Code* stub, Mode mode) {
|
||||
switch (mode) {
|
||||
case STORE_BUFFER_ONLY:
|
||||
ASSERT(GetMode(stub) == INCREMENTAL ||
|
||||
GetMode(stub) == INCREMENTAL_COMPACTION);
|
||||
stub->instruction_start()[0] = kTwoByteNopInstruction;
|
||||
stub->instruction_start()[2] = kFiveByteNopInstruction;
|
||||
break;
|
||||
case INCREMENTAL:
|
||||
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
|
||||
stub->instruction_start()[0] = kTwoByteJumpInstruction;
|
||||
break;
|
||||
case INCREMENTAL_COMPACTION:
|
||||
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
|
||||
stub->instruction_start()[0] = kTwoByteNopInstruction;
|
||||
stub->instruction_start()[2] = kFiveByteJumpInstruction;
|
||||
break;
|
||||
}
|
||||
ASSERT(GetMode(stub) == mode);
|
||||
CPU::FlushICache(stub->instruction_start(), 7);
|
||||
}
|
||||
|
||||
private:
|
||||
// This is a helper class for freeing up 3 scratch registers, where the third
|
||||
// is always ecx (needed for shift operations). The input is two registers
|
||||
// that must be preserved and one scratch register provided by the caller.
|
||||
class RegisterAllocation {
|
||||
public:
|
||||
RegisterAllocation(Register object,
|
||||
Register address,
|
||||
Register scratch0)
|
||||
: object_orig_(object),
|
||||
address_orig_(address),
|
||||
scratch0_orig_(scratch0),
|
||||
object_(object),
|
||||
address_(address),
|
||||
scratch0_(scratch0) {
|
||||
ASSERT(!AreAliased(scratch0, object, address, no_reg));
|
||||
scratch1_ = GetRegThatIsNotEcxOr(object_, address_, scratch0_);
|
||||
if (scratch0.is(ecx)) {
|
||||
scratch0_ = GetRegThatIsNotEcxOr(object_, address_, scratch1_);
|
||||
}
|
||||
if (object.is(ecx)) {
|
||||
object_ = GetRegThatIsNotEcxOr(address_, scratch0_, scratch1_);
|
||||
}
|
||||
if (address.is(ecx)) {
|
||||
address_ = GetRegThatIsNotEcxOr(object_, scratch0_, scratch1_);
|
||||
}
|
||||
ASSERT(!AreAliased(scratch0_, object_, address_, ecx));
|
||||
}
|
||||
|
||||
void Save(MacroAssembler* masm) {
|
||||
ASSERT(!address_orig_.is(object_));
|
||||
ASSERT(object_.is(object_orig_) || address_.is(address_orig_));
|
||||
ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
|
||||
ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
|
||||
ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
|
||||
// We don't have to save scratch0_orig_ because it was given to us as
|
||||
// a scratch register. But if we had to switch to a different reg then
|
||||
// we should save the new scratch0_.
|
||||
if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
|
||||
if (!ecx.is(scratch0_orig_) &&
|
||||
!ecx.is(object_orig_) &&
|
||||
!ecx.is(address_orig_)) {
|
||||
masm->push(ecx);
|
||||
}
|
||||
masm->push(scratch1_);
|
||||
if (!address_.is(address_orig_)) {
|
||||
masm->push(address_);
|
||||
masm->mov(address_, address_orig_);
|
||||
}
|
||||
if (!object_.is(object_orig_)) {
|
||||
masm->push(object_);
|
||||
masm->mov(object_, object_orig_);
|
||||
}
|
||||
}
|
||||
|
||||
void Restore(MacroAssembler* masm) {
|
||||
// These will have been preserved the entire time, so we just need to move
|
||||
// them back. Only in one case is the orig_ reg different from the plain
|
||||
// one, since only one of them can alias with ecx.
|
||||
if (!object_.is(object_orig_)) {
|
||||
masm->mov(object_orig_, object_);
|
||||
masm->pop(object_);
|
||||
}
|
||||
if (!address_.is(address_orig_)) {
|
||||
masm->mov(address_orig_, address_);
|
||||
masm->pop(address_);
|
||||
}
|
||||
masm->pop(scratch1_);
|
||||
if (!ecx.is(scratch0_orig_) &&
|
||||
!ecx.is(object_orig_) &&
|
||||
!ecx.is(address_orig_)) {
|
||||
masm->pop(ecx);
|
||||
}
|
||||
if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
|
||||
}
|
||||
|
||||
// If we have to call into C then we need to save and restore all caller-
|
||||
// saved registers that were not already preserved. The caller saved
|
||||
// registers are eax, ecx and edx. The three scratch registers (incl. ecx)
|
||||
// will be restored by other means so we don't bother pushing them here.
|
||||
void SaveCallerSaveRegisters(MacroAssembler* masm) {
|
||||
if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->push(eax);
|
||||
if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx);
|
||||
}
|
||||
|
||||
inline void RestoreCallerSaveRegisters(MacroAssembler*masm) {
|
||||
if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx);
|
||||
if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->pop(eax);
|
||||
}
|
||||
|
||||
inline Register object() { return object_; }
|
||||
inline Register address() { return address_; }
|
||||
inline Register scratch0() { return scratch0_; }
|
||||
inline Register scratch1() { return scratch1_; }
|
||||
|
||||
private:
|
||||
Register object_orig_;
|
||||
Register address_orig_;
|
||||
Register scratch0_orig_;
|
||||
Register object_;
|
||||
Register address_;
|
||||
Register scratch0_;
|
||||
Register scratch1_;
|
||||
// Third scratch register is always ecx.
|
||||
|
||||
Register GetRegThatIsNotEcxOr(Register r1,
|
||||
Register r2,
|
||||
Register r3) {
|
||||
for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
|
||||
Register candidate = Register::FromAllocationIndex(i);
|
||||
if (candidate.is(ecx)) continue;
|
||||
if (candidate.is(r1)) continue;
|
||||
if (candidate.is(r2)) continue;
|
||||
if (candidate.is(r3)) continue;
|
||||
return candidate;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return no_reg;
|
||||
}
|
||||
friend class RecordWriteStub;
|
||||
};
|
||||
|
||||
enum OnNoNeedToInformIncrementalMarker {
|
||||
kReturnOnNoNeedToInformIncrementalMarker,
|
||||
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
|
||||
}
|
||||
;
|
||||
void Generate(MacroAssembler* masm);
|
||||
void GenerateIncremental(MacroAssembler* masm, Mode mode);
|
||||
void CheckNeedsToInformIncrementalMarker(
|
||||
MacroAssembler* masm,
|
||||
OnNoNeedToInformIncrementalMarker on_no_need,
|
||||
Mode mode);
|
||||
void InformIncrementalMarker(MacroAssembler* masm);
|
||||
|
||||
Major MajorKey() { return RecordWrite; }
|
||||
|
||||
int MinorKey() {
|
||||
return ObjectBits::encode(object_.code()) |
|
||||
ValueBits::encode(value_.code()) |
|
||||
AddressBits::encode(address_.code()) |
|
||||
RememberedSetActionBits::encode(remembered_set_action_);
|
||||
}
|
||||
|
||||
void Activate(Code* code) {
|
||||
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
|
||||
}
|
||||
|
||||
class ObjectBits: public BitField<int, 0, 3> {};
|
||||
class ValueBits: public BitField<int, 3, 3> {};
|
||||
class AddressBits: public BitField<int, 6, 3> {};
|
||||
class RememberedSetActionBits: public BitField<RememberedSetAction, 9, 1> {};
|
||||
|
||||
Register object_;
|
||||
Register value_;
|
||||
Register address_;
|
||||
RememberedSetAction remembered_set_action_;
|
||||
RegisterAllocation regs_;
|
||||
};
|
||||
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_X87_CODE_STUBS_X87_H_
|
632
src/x87/codegen-x87.cc
Normal file
632
src/x87/codegen-x87.cc
Normal file
@ -0,0 +1,632 @@
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "v8.h"
|
||||
|
||||
#if V8_TARGET_ARCH_X87
|
||||
|
||||
#include "codegen.h"
|
||||
#include "heap.h"
|
||||
#include "macro-assembler.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Platform-specific RuntimeCallHelper functions.
|
||||
|
||||
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
|
||||
masm->EnterFrame(StackFrame::INTERNAL);
|
||||
ASSERT(!masm->has_frame());
|
||||
masm->set_has_frame(true);
|
||||
}
|
||||
|
||||
|
||||
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
|
||||
masm->LeaveFrame(StackFrame::INTERNAL);
|
||||
ASSERT(masm->has_frame());
|
||||
masm->set_has_frame(false);
|
||||
}
|
||||
|
||||
|
||||
#define __ masm.
|
||||
|
||||
|
||||
UnaryMathFunction CreateExpFunction() {
|
||||
// No SSE2 support
|
||||
return &std::exp;
|
||||
}
|
||||
|
||||
|
||||
UnaryMathFunction CreateSqrtFunction() {
|
||||
// No SSE2 support
|
||||
return &std::sqrt;
|
||||
}
|
||||
|
||||
|
||||
// Helper functions for CreateMemMoveFunction.
|
||||
#undef __
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
enum Direction { FORWARD, BACKWARD };
|
||||
enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED };
|
||||
|
||||
|
||||
void MemMoveEmitPopAndReturn(MacroAssembler* masm) {
|
||||
__ pop(esi);
|
||||
__ pop(edi);
|
||||
__ ret(0);
|
||||
}
|
||||
|
||||
|
||||
#undef __
|
||||
#define __ masm.
|
||||
|
||||
|
||||
class LabelConverter {
|
||||
public:
|
||||
explicit LabelConverter(byte* buffer) : buffer_(buffer) {}
|
||||
int32_t address(Label* l) const {
|
||||
return reinterpret_cast<int32_t>(buffer_) + l->pos();
|
||||
}
|
||||
private:
|
||||
byte* buffer_;
|
||||
};
|
||||
|
||||
|
||||
OS::MemMoveFunction CreateMemMoveFunction() {
|
||||
size_t actual_size;
|
||||
// Allocate buffer in executable space.
|
||||
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
|
||||
if (buffer == NULL) return NULL;
|
||||
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
|
||||
LabelConverter conv(buffer);
|
||||
|
||||
// Generated code is put into a fixed, unmovable buffer, and not into
|
||||
// the V8 heap. We can't, and don't, refer to any relocatable addresses
|
||||
// (e.g. the JavaScript nan-object).
|
||||
|
||||
// 32-bit C declaration function calls pass arguments on stack.
|
||||
|
||||
// Stack layout:
|
||||
// esp[12]: Third argument, size.
|
||||
// esp[8]: Second argument, source pointer.
|
||||
// esp[4]: First argument, destination pointer.
|
||||
// esp[0]: return address
|
||||
|
||||
const int kDestinationOffset = 1 * kPointerSize;
|
||||
const int kSourceOffset = 2 * kPointerSize;
|
||||
const int kSizeOffset = 3 * kPointerSize;
|
||||
|
||||
int stack_offset = 0; // Update if we change the stack height.
|
||||
|
||||
Label backward, backward_much_overlap;
|
||||
Label forward_much_overlap, small_size, medium_size, pop_and_return;
|
||||
__ push(edi);
|
||||
__ push(esi);
|
||||
stack_offset += 2 * kPointerSize;
|
||||
Register dst = edi;
|
||||
Register src = esi;
|
||||
Register count = ecx;
|
||||
__ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
|
||||
__ mov(src, Operand(esp, stack_offset + kSourceOffset));
|
||||
__ mov(count, Operand(esp, stack_offset + kSizeOffset));
|
||||
|
||||
__ cmp(dst, src);
|
||||
__ j(equal, &pop_and_return);
|
||||
|
||||
// No SSE2.
|
||||
Label forward;
|
||||
__ cmp(count, 0);
|
||||
__ j(equal, &pop_and_return);
|
||||
__ cmp(dst, src);
|
||||
__ j(above, &backward);
|
||||
__ jmp(&forward);
|
||||
{
|
||||
// Simple forward copier.
|
||||
Label forward_loop_1byte, forward_loop_4byte;
|
||||
__ bind(&forward_loop_4byte);
|
||||
__ mov(eax, Operand(src, 0));
|
||||
__ sub(count, Immediate(4));
|
||||
__ add(src, Immediate(4));
|
||||
__ mov(Operand(dst, 0), eax);
|
||||
__ add(dst, Immediate(4));
|
||||
__ bind(&forward); // Entry point.
|
||||
__ cmp(count, 3);
|
||||
__ j(above, &forward_loop_4byte);
|
||||
__ bind(&forward_loop_1byte);
|
||||
__ cmp(count, 0);
|
||||
__ j(below_equal, &pop_and_return);
|
||||
__ mov_b(eax, Operand(src, 0));
|
||||
__ dec(count);
|
||||
__ inc(src);
|
||||
__ mov_b(Operand(dst, 0), eax);
|
||||
__ inc(dst);
|
||||
__ jmp(&forward_loop_1byte);
|
||||
}
|
||||
{
|
||||
// Simple backward copier.
|
||||
Label backward_loop_1byte, backward_loop_4byte, entry_shortcut;
|
||||
__ bind(&backward);
|
||||
__ add(src, count);
|
||||
__ add(dst, count);
|
||||
__ cmp(count, 3);
|
||||
__ j(below_equal, &entry_shortcut);
|
||||
|
||||
__ bind(&backward_loop_4byte);
|
||||
__ sub(src, Immediate(4));
|
||||
__ sub(count, Immediate(4));
|
||||
__ mov(eax, Operand(src, 0));
|
||||
__ sub(dst, Immediate(4));
|
||||
__ mov(Operand(dst, 0), eax);
|
||||
__ cmp(count, 3);
|
||||
__ j(above, &backward_loop_4byte);
|
||||
__ bind(&backward_loop_1byte);
|
||||
__ cmp(count, 0);
|
||||
__ j(below_equal, &pop_and_return);
|
||||
__ bind(&entry_shortcut);
|
||||
__ dec(src);
|
||||
__ dec(count);
|
||||
__ mov_b(eax, Operand(src, 0));
|
||||
__ dec(dst);
|
||||
__ mov_b(Operand(dst, 0), eax);
|
||||
__ jmp(&backward_loop_1byte);
|
||||
}
|
||||
|
||||
__ bind(&pop_and_return);
|
||||
MemMoveEmitPopAndReturn(&masm);
|
||||
|
||||
CodeDesc desc;
|
||||
masm.GetCode(&desc);
|
||||
ASSERT(!RelocInfo::RequiresRelocation(desc));
|
||||
CPU::FlushICache(buffer, actual_size);
|
||||
OS::ProtectCode(buffer, actual_size);
|
||||
// TODO(jkummerow): It would be nice to register this code creation event
|
||||
// with the PROFILE / GDBJIT system.
|
||||
return FUNCTION_CAST<OS::MemMoveFunction>(buffer);
|
||||
}
|
||||
|
||||
|
||||
#undef __
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Code generators
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
|
||||
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
|
||||
MacroAssembler* masm, AllocationSiteMode mode,
|
||||
Label* allocation_memento_found) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax : value
|
||||
// -- ebx : target map
|
||||
// -- ecx : key
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
ASSERT(allocation_memento_found != NULL);
|
||||
__ JumpIfJSArrayHasAllocationMemento(edx, edi, allocation_memento_found);
|
||||
}
|
||||
|
||||
// Set transitioned map.
|
||||
__ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
|
||||
__ RecordWriteField(edx,
|
||||
HeapObject::kMapOffset,
|
||||
ebx,
|
||||
edi,
|
||||
EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
}
|
||||
|
||||
|
||||
void ElementsTransitionGenerator::GenerateSmiToDouble(
|
||||
MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax : value
|
||||
// -- ebx : target map
|
||||
// -- ecx : key
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
Label loop, entry, convert_hole, gc_required, only_change_map;
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
__ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
|
||||
}
|
||||
|
||||
// Check for empty arrays, which only require a map transition and no changes
|
||||
// to the backing store.
|
||||
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
|
||||
__ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
|
||||
__ j(equal, &only_change_map);
|
||||
|
||||
__ push(eax);
|
||||
__ push(ebx);
|
||||
|
||||
__ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset));
|
||||
|
||||
// Allocate new FixedDoubleArray.
|
||||
// edx: receiver
|
||||
// edi: length of source FixedArray (smi-tagged)
|
||||
AllocationFlags flags =
|
||||
static_cast<AllocationFlags>(TAG_OBJECT | DOUBLE_ALIGNMENT);
|
||||
__ Allocate(FixedDoubleArray::kHeaderSize, times_8, edi,
|
||||
REGISTER_VALUE_IS_SMI, eax, ebx, no_reg, &gc_required, flags);
|
||||
|
||||
// eax: destination FixedDoubleArray
|
||||
// edi: number of elements
|
||||
// edx: receiver
|
||||
__ mov(FieldOperand(eax, HeapObject::kMapOffset),
|
||||
Immediate(masm->isolate()->factory()->fixed_double_array_map()));
|
||||
__ mov(FieldOperand(eax, FixedDoubleArray::kLengthOffset), edi);
|
||||
__ mov(esi, FieldOperand(edx, JSObject::kElementsOffset));
|
||||
// Replace receiver's backing store with newly created FixedDoubleArray.
|
||||
__ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
|
||||
__ mov(ebx, eax);
|
||||
__ RecordWriteField(edx,
|
||||
JSObject::kElementsOffset,
|
||||
ebx,
|
||||
edi,
|
||||
EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
|
||||
__ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset));
|
||||
|
||||
// Prepare for conversion loop.
|
||||
ExternalReference canonical_the_hole_nan_reference =
|
||||
ExternalReference::address_of_the_hole_nan();
|
||||
__ jmp(&entry);
|
||||
|
||||
// Call into runtime if GC is required.
|
||||
__ bind(&gc_required);
|
||||
// Restore registers before jumping into runtime.
|
||||
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
||||
__ pop(ebx);
|
||||
__ pop(eax);
|
||||
__ jmp(fail);
|
||||
|
||||
// Convert and copy elements
|
||||
// esi: source FixedArray
|
||||
__ bind(&loop);
|
||||
__ mov(ebx, FieldOperand(esi, edi, times_2, FixedArray::kHeaderSize));
|
||||
// ebx: current element from source
|
||||
// edi: index of current element
|
||||
__ JumpIfNotSmi(ebx, &convert_hole);
|
||||
|
||||
// Normal smi, convert it to double and store.
|
||||
__ SmiUntag(ebx);
|
||||
__ push(ebx);
|
||||
__ fild_s(Operand(esp, 0));
|
||||
__ pop(ebx);
|
||||
__ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
|
||||
__ jmp(&entry);
|
||||
|
||||
// Found hole, store hole_nan_as_double instead.
|
||||
__ bind(&convert_hole);
|
||||
|
||||
if (FLAG_debug_code) {
|
||||
__ cmp(ebx, masm->isolate()->factory()->the_hole_value());
|
||||
__ Assert(equal, kObjectFoundInSmiOnlyArray);
|
||||
}
|
||||
|
||||
__ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference));
|
||||
__ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
|
||||
|
||||
__ bind(&entry);
|
||||
__ sub(edi, Immediate(Smi::FromInt(1)));
|
||||
__ j(not_sign, &loop);
|
||||
|
||||
__ pop(ebx);
|
||||
__ pop(eax);
|
||||
|
||||
// Restore esi.
|
||||
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
||||
|
||||
__ bind(&only_change_map);
|
||||
// eax: value
|
||||
// ebx: target map
|
||||
// Set transitioned map.
|
||||
__ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
|
||||
__ RecordWriteField(edx,
|
||||
HeapObject::kMapOffset,
|
||||
ebx,
|
||||
edi,
|
||||
OMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
}
|
||||
|
||||
|
||||
void ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax : value
|
||||
// -- ebx : target map
|
||||
// -- ecx : key
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
Label loop, entry, convert_hole, gc_required, only_change_map, success;
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
__ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
|
||||
}
|
||||
|
||||
// Check for empty arrays, which only require a map transition and no changes
|
||||
// to the backing store.
|
||||
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
|
||||
__ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
|
||||
__ j(equal, &only_change_map);
|
||||
|
||||
__ push(eax);
|
||||
__ push(edx);
|
||||
__ push(ebx);
|
||||
|
||||
__ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
|
||||
|
||||
// Allocate new FixedArray.
|
||||
// ebx: length of source FixedDoubleArray (smi-tagged)
|
||||
__ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
|
||||
__ Allocate(edi, eax, esi, no_reg, &gc_required, TAG_OBJECT);
|
||||
|
||||
// eax: destination FixedArray
|
||||
// ebx: number of elements
|
||||
__ mov(FieldOperand(eax, HeapObject::kMapOffset),
|
||||
Immediate(masm->isolate()->factory()->fixed_array_map()));
|
||||
__ mov(FieldOperand(eax, FixedArray::kLengthOffset), ebx);
|
||||
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
|
||||
|
||||
__ jmp(&entry);
|
||||
|
||||
// ebx: target map
|
||||
// edx: receiver
|
||||
// Set transitioned map.
|
||||
__ bind(&only_change_map);
|
||||
__ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
|
||||
__ RecordWriteField(edx,
|
||||
HeapObject::kMapOffset,
|
||||
ebx,
|
||||
edi,
|
||||
OMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
__ jmp(&success);
|
||||
|
||||
// Call into runtime if GC is required.
|
||||
__ bind(&gc_required);
|
||||
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
||||
__ pop(ebx);
|
||||
__ pop(edx);
|
||||
__ pop(eax);
|
||||
__ jmp(fail);
|
||||
|
||||
// Box doubles into heap numbers.
|
||||
// edi: source FixedDoubleArray
|
||||
// eax: destination FixedArray
|
||||
__ bind(&loop);
|
||||
// ebx: index of current element (smi-tagged)
|
||||
uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
|
||||
__ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32));
|
||||
__ j(equal, &convert_hole);
|
||||
|
||||
// Non-hole double, copy value into a heap number.
|
||||
__ AllocateHeapNumber(edx, esi, no_reg, &gc_required);
|
||||
// edx: new heap number
|
||||
__ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
|
||||
__ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi);
|
||||
__ mov(esi, FieldOperand(edi, ebx, times_4, offset));
|
||||
__ mov(FieldOperand(edx, HeapNumber::kValueOffset + kPointerSize), esi);
|
||||
__ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx);
|
||||
__ mov(esi, ebx);
|
||||
__ RecordWriteArray(eax,
|
||||
edx,
|
||||
esi,
|
||||
EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
__ jmp(&entry, Label::kNear);
|
||||
|
||||
// Replace the-hole NaN with the-hole pointer.
|
||||
__ bind(&convert_hole);
|
||||
__ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
|
||||
masm->isolate()->factory()->the_hole_value());
|
||||
|
||||
__ bind(&entry);
|
||||
__ sub(ebx, Immediate(Smi::FromInt(1)));
|
||||
__ j(not_sign, &loop);
|
||||
|
||||
__ pop(ebx);
|
||||
__ pop(edx);
|
||||
// ebx: target map
|
||||
// edx: receiver
|
||||
// Set transitioned map.
|
||||
__ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
|
||||
__ RecordWriteField(edx,
|
||||
HeapObject::kMapOffset,
|
||||
ebx,
|
||||
edi,
|
||||
OMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
// Replace receiver's backing store with newly created and filled FixedArray.
|
||||
__ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
|
||||
__ RecordWriteField(edx,
|
||||
JSObject::kElementsOffset,
|
||||
eax,
|
||||
edi,
|
||||
EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
|
||||
// Restore registers.
|
||||
__ pop(eax);
|
||||
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
||||
|
||||
__ bind(&success);
|
||||
}
|
||||
|
||||
|
||||
void StringCharLoadGenerator::Generate(MacroAssembler* masm,
|
||||
Factory* factory,
|
||||
Register string,
|
||||
Register index,
|
||||
Register result,
|
||||
Label* call_runtime) {
|
||||
// Fetch the instance type of the receiver into result register.
|
||||
__ mov(result, FieldOperand(string, HeapObject::kMapOffset));
|
||||
__ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
|
||||
|
||||
// We need special handling for indirect strings.
|
||||
Label check_sequential;
|
||||
__ test(result, Immediate(kIsIndirectStringMask));
|
||||
__ j(zero, &check_sequential, Label::kNear);
|
||||
|
||||
// Dispatch on the indirect string shape: slice or cons.
|
||||
Label cons_string;
|
||||
__ test(result, Immediate(kSlicedNotConsMask));
|
||||
__ j(zero, &cons_string, Label::kNear);
|
||||
|
||||
// Handle slices.
|
||||
Label indirect_string_loaded;
|
||||
__ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
|
||||
__ SmiUntag(result);
|
||||
__ add(index, result);
|
||||
__ mov(string, FieldOperand(string, SlicedString::kParentOffset));
|
||||
__ jmp(&indirect_string_loaded, Label::kNear);
|
||||
|
||||
// Handle cons strings.
|
||||
// Check whether the right hand side is the empty string (i.e. if
|
||||
// this is really a flat string in a cons string). If that is not
|
||||
// the case we would rather go to the runtime system now to flatten
|
||||
// the string.
|
||||
__ bind(&cons_string);
|
||||
__ cmp(FieldOperand(string, ConsString::kSecondOffset),
|
||||
Immediate(factory->empty_string()));
|
||||
__ j(not_equal, call_runtime);
|
||||
__ mov(string, FieldOperand(string, ConsString::kFirstOffset));
|
||||
|
||||
__ bind(&indirect_string_loaded);
|
||||
__ mov(result, FieldOperand(string, HeapObject::kMapOffset));
|
||||
__ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
|
||||
|
||||
// Distinguish sequential and external strings. Only these two string
|
||||
// representations can reach here (slices and flat cons strings have been
|
||||
// reduced to the underlying sequential or external string).
|
||||
Label seq_string;
|
||||
__ bind(&check_sequential);
|
||||
STATIC_ASSERT(kSeqStringTag == 0);
|
||||
__ test(result, Immediate(kStringRepresentationMask));
|
||||
__ j(zero, &seq_string, Label::kNear);
|
||||
|
||||
// Handle external strings.
|
||||
Label ascii_external, done;
|
||||
if (FLAG_debug_code) {
|
||||
// Assert that we do not have a cons or slice (indirect strings) here.
|
||||
// Sequential strings have already been ruled out.
|
||||
__ test(result, Immediate(kIsIndirectStringMask));
|
||||
__ Assert(zero, kExternalStringExpectedButNotFound);
|
||||
}
|
||||
// Rule out short external strings.
|
||||
STATIC_CHECK(kShortExternalStringTag != 0);
|
||||
__ test_b(result, kShortExternalStringMask);
|
||||
__ j(not_zero, call_runtime);
|
||||
// Check encoding.
|
||||
STATIC_ASSERT(kTwoByteStringTag == 0);
|
||||
__ test_b(result, kStringEncodingMask);
|
||||
__ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset));
|
||||
__ j(not_equal, &ascii_external, Label::kNear);
|
||||
// Two-byte string.
|
||||
__ movzx_w(result, Operand(result, index, times_2, 0));
|
||||
__ jmp(&done, Label::kNear);
|
||||
__ bind(&ascii_external);
|
||||
// Ascii string.
|
||||
__ movzx_b(result, Operand(result, index, times_1, 0));
|
||||
__ jmp(&done, Label::kNear);
|
||||
|
||||
// Dispatch on the encoding: ASCII or two-byte.
|
||||
Label ascii;
|
||||
__ bind(&seq_string);
|
||||
STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
|
||||
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
|
||||
__ test(result, Immediate(kStringEncodingMask));
|
||||
__ j(not_zero, &ascii, Label::kNear);
|
||||
|
||||
// Two-byte string.
|
||||
// Load the two-byte character code into the result register.
|
||||
__ movzx_w(result, FieldOperand(string,
|
||||
index,
|
||||
times_2,
|
||||
SeqTwoByteString::kHeaderSize));
|
||||
__ jmp(&done, Label::kNear);
|
||||
|
||||
// Ascii string.
|
||||
// Load the byte into the result register.
|
||||
__ bind(&ascii);
|
||||
__ movzx_b(result, FieldOperand(string,
|
||||
index,
|
||||
times_1,
|
||||
SeqOneByteString::kHeaderSize));
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
|
||||
#undef __
|
||||
|
||||
|
||||
CodeAgingHelper::CodeAgingHelper() {
|
||||
ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength);
|
||||
CodePatcher patcher(young_sequence_.start(), young_sequence_.length());
|
||||
patcher.masm()->push(ebp);
|
||||
patcher.masm()->mov(ebp, esp);
|
||||
patcher.masm()->push(esi);
|
||||
patcher.masm()->push(edi);
|
||||
}
|
||||
|
||||
|
||||
#ifdef DEBUG
|
||||
bool CodeAgingHelper::IsOld(byte* candidate) const {
|
||||
return *candidate == kCallOpcode;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
|
||||
bool result = isolate->code_aging_helper()->IsYoung(sequence);
|
||||
ASSERT(result || isolate->code_aging_helper()->IsOld(sequence));
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
|
||||
MarkingParity* parity) {
|
||||
if (IsYoungSequence(isolate, sequence)) {
|
||||
*age = kNoAgeCodeAge;
|
||||
*parity = NO_MARKING_PARITY;
|
||||
} else {
|
||||
sequence++; // Skip the kCallOpcode byte
|
||||
Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
|
||||
Assembler::kCallTargetAddressOffset;
|
||||
Code* stub = GetCodeFromTargetAddress(target_address);
|
||||
GetCodeAgeAndParity(stub, age, parity);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Code::PatchPlatformCodeAge(Isolate* isolate,
|
||||
byte* sequence,
|
||||
Code::Age age,
|
||||
MarkingParity parity) {
|
||||
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
|
||||
if (age == kNoAgeCodeAge) {
|
||||
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
|
||||
CPU::FlushICache(sequence, young_length);
|
||||
} else {
|
||||
Code* stub = GetCodeAgeStub(isolate, age, parity);
|
||||
CodePatcher patcher(sequence, young_length);
|
||||
patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_TARGET_ARCH_X87
|
33
src/x87/codegen-x87.h
Normal file
33
src/x87/codegen-x87.h
Normal file
@ -0,0 +1,33 @@
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_X87_CODEGEN_X87_H_
|
||||
#define V8_X87_CODEGEN_X87_H_
|
||||
|
||||
#include "ast.h"
|
||||
#include "ic-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
|
||||
class StringCharLoadGenerator : public AllStatic {
|
||||
public:
|
||||
// Generates the code for handling different string types and loading the
|
||||
// indexed character into |result|. We expect |index| as untagged input and
|
||||
// |result| as untagged output.
|
||||
static void Generate(MacroAssembler* masm,
|
||||
Factory* factory,
|
||||
Register string,
|
||||
Register index,
|
||||
Register result,
|
||||
Label* call_runtime);
|
||||
|
||||
private:
|
||||
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
|
||||
};
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_X87_CODEGEN_X87_H_
|
44
src/x87/cpu-x87.cc
Normal file
44
src/x87/cpu-x87.cc
Normal file
@ -0,0 +1,44 @@
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// CPU specific code for ia32 independent of OS goes here.
|
||||
|
||||
#ifdef __GNUC__
|
||||
#include "third_party/valgrind/valgrind.h"
|
||||
#endif
|
||||
|
||||
#include "v8.h"
|
||||
|
||||
#if V8_TARGET_ARCH_X87
|
||||
|
||||
#include "cpu.h"
|
||||
#include "macro-assembler.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
void CPU::FlushICache(void* start, size_t size) {
|
||||
// No need to flush the instruction cache on Intel. On Intel instruction
|
||||
// cache flushing is only necessary when multiple cores running the same
|
||||
// code simultaneously. V8 (and JavaScript) is single threaded and when code
|
||||
// is patched on an intel CPU the core performing the patching will have its
|
||||
// own instruction cache updated automatically.
|
||||
|
||||
// If flushing of the instruction cache becomes necessary Windows has the
|
||||
// API function FlushInstructionCache.
|
||||
|
||||
// By default, valgrind only checks the stack for writes that might need to
|
||||
// invalidate already cached translated code. This leads to random
|
||||
// instability when code patches or moves are sometimes unnoticed. One
|
||||
// solution is to run valgrind with --smc-check=all, but this comes at a big
|
||||
// performance cost. We can notify valgrind to invalidate its cache.
|
||||
#ifdef VALGRIND_DISCARD_TRANSLATIONS
|
||||
unsigned res = VALGRIND_DISCARD_TRANSLATIONS(start, size);
|
||||
USE(res);
|
||||
#endif
|
||||
}
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_TARGET_ARCH_X87
|
339
src/x87/debug-x87.cc
Normal file
339
src/x87/debug-x87.cc
Normal file
@ -0,0 +1,339 @@
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "v8.h"
|
||||
|
||||
#if V8_TARGET_ARCH_X87
|
||||
|
||||
#include "codegen.h"
|
||||
#include "debug.h"
|
||||
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
bool BreakLocationIterator::IsDebugBreakAtReturn() {
|
||||
return Debug::IsDebugBreakAtReturn(rinfo());
|
||||
}
|
||||
|
||||
|
||||
// Patch the JS frame exit code with a debug break call. See
|
||||
// CodeGenerator::VisitReturnStatement and VirtualFrame::Exit in codegen-x87.cc
|
||||
// for the precise return instructions sequence.
|
||||
void BreakLocationIterator::SetDebugBreakAtReturn() {
|
||||
ASSERT(Assembler::kJSReturnSequenceLength >=
|
||||
Assembler::kCallInstructionLength);
|
||||
rinfo()->PatchCodeWithCall(
|
||||
debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry(),
|
||||
Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
|
||||
}
|
||||
|
||||
|
||||
// Restore the JS frame exit code.
|
||||
void BreakLocationIterator::ClearDebugBreakAtReturn() {
|
||||
rinfo()->PatchCode(original_rinfo()->pc(),
|
||||
Assembler::kJSReturnSequenceLength);
|
||||
}
|
||||
|
||||
|
||||
// A debug break in the frame exit code is identified by the JS frame exit code
|
||||
// having been patched with a call instruction.
|
||||
bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
|
||||
ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
|
||||
return rinfo->IsPatchedReturnSequence();
|
||||
}
|
||||
|
||||
|
||||
bool BreakLocationIterator::IsDebugBreakAtSlot() {
|
||||
ASSERT(IsDebugBreakSlot());
|
||||
// Check whether the debug break slot instructions have been patched.
|
||||
return rinfo()->IsPatchedDebugBreakSlotSequence();
|
||||
}
|
||||
|
||||
|
||||
void BreakLocationIterator::SetDebugBreakAtSlot() {
|
||||
ASSERT(IsDebugBreakSlot());
|
||||
Isolate* isolate = debug_info_->GetIsolate();
|
||||
rinfo()->PatchCodeWithCall(
|
||||
isolate->builtins()->Slot_DebugBreak()->entry(),
|
||||
Assembler::kDebugBreakSlotLength - Assembler::kCallInstructionLength);
|
||||
}
|
||||
|
||||
|
||||
void BreakLocationIterator::ClearDebugBreakAtSlot() {
|
||||
ASSERT(IsDebugBreakSlot());
|
||||
rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
|
||||
}
|
||||
|
||||
|
||||
// All debug break stubs support padding for LiveEdit.
|
||||
const bool Debug::FramePaddingLayout::kIsSupported = true;
|
||||
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
|
||||
RegList object_regs,
|
||||
RegList non_object_regs,
|
||||
bool convert_call_to_jmp) {
|
||||
// Enter an internal frame.
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
|
||||
// Load padding words on stack.
|
||||
for (int i = 0; i < Debug::FramePaddingLayout::kInitialSize; i++) {
|
||||
__ push(Immediate(Smi::FromInt(
|
||||
Debug::FramePaddingLayout::kPaddingValue)));
|
||||
}
|
||||
__ push(Immediate(Smi::FromInt(Debug::FramePaddingLayout::kInitialSize)));
|
||||
|
||||
// Store the registers containing live values on the expression stack to
|
||||
// make sure that these are correctly updated during GC. Non object values
|
||||
// are stored as a smi causing it to be untouched by GC.
|
||||
ASSERT((object_regs & ~kJSCallerSaved) == 0);
|
||||
ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
|
||||
ASSERT((object_regs & non_object_regs) == 0);
|
||||
for (int i = 0; i < kNumJSCallerSaved; i++) {
|
||||
int r = JSCallerSavedCode(i);
|
||||
Register reg = { r };
|
||||
if ((object_regs & (1 << r)) != 0) {
|
||||
__ push(reg);
|
||||
}
|
||||
if ((non_object_regs & (1 << r)) != 0) {
|
||||
if (FLAG_debug_code) {
|
||||
__ test(reg, Immediate(0xc0000000));
|
||||
__ Assert(zero, kUnableToEncodeValueAsSmi);
|
||||
}
|
||||
__ SmiTag(reg);
|
||||
__ push(reg);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
__ RecordComment("// Calling from debug break to runtime - come in - over");
|
||||
#endif
|
||||
__ Move(eax, Immediate(0)); // No arguments.
|
||||
__ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate())));
|
||||
|
||||
CEntryStub ceb(masm->isolate(), 1);
|
||||
__ CallStub(&ceb);
|
||||
|
||||
// Automatically find register that could be used after register restore.
|
||||
// We need one register for padding skip instructions.
|
||||
Register unused_reg = { -1 };
|
||||
|
||||
// Restore the register values containing object pointers from the
|
||||
// expression stack.
|
||||
for (int i = kNumJSCallerSaved; --i >= 0;) {
|
||||
int r = JSCallerSavedCode(i);
|
||||
Register reg = { r };
|
||||
if (FLAG_debug_code) {
|
||||
__ Move(reg, Immediate(kDebugZapValue));
|
||||
}
|
||||
bool taken = reg.code() == esi.code();
|
||||
if ((object_regs & (1 << r)) != 0) {
|
||||
__ pop(reg);
|
||||
taken = true;
|
||||
}
|
||||
if ((non_object_regs & (1 << r)) != 0) {
|
||||
__ pop(reg);
|
||||
__ SmiUntag(reg);
|
||||
taken = true;
|
||||
}
|
||||
if (!taken) {
|
||||
unused_reg = reg;
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(unused_reg.code() != -1);
|
||||
|
||||
// Read current padding counter and skip corresponding number of words.
|
||||
__ pop(unused_reg);
|
||||
// We divide stored value by 2 (untagging) and multiply it by word's size.
|
||||
STATIC_ASSERT(kSmiTagSize == 1 && kSmiShiftSize == 0);
|
||||
__ lea(esp, Operand(esp, unused_reg, times_half_pointer_size, 0));
|
||||
|
||||
// Get rid of the internal frame.
|
||||
}
|
||||
|
||||
// If this call did not replace a call but patched other code then there will
|
||||
// be an unwanted return address left on the stack. Here we get rid of that.
|
||||
if (convert_call_to_jmp) {
|
||||
__ add(esp, Immediate(kPointerSize));
|
||||
}
|
||||
|
||||
// Now that the break point has been handled, resume normal execution by
|
||||
// jumping to the target address intended by the caller and that was
|
||||
// overwritten by the address of DebugBreakXXX.
|
||||
ExternalReference after_break_target =
|
||||
ExternalReference::debug_after_break_target_address(masm->isolate());
|
||||
__ jmp(Operand::StaticVariable(after_break_target));
|
||||
}
|
||||
|
||||
|
||||
void Debug::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
|
||||
// Register state for CallICStub
|
||||
// ----------- S t a t e -------------
|
||||
// -- edx : type feedback slot (smi)
|
||||
// -- edi : function
|
||||
// -----------------------------------
|
||||
Generate_DebugBreakCallHelper(masm, edx.bit() | edi.bit(),
|
||||
0, false);
|
||||
}
|
||||
|
||||
|
||||
void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
|
||||
// Register state for IC load call (from ic-x87.cc).
|
||||
// ----------- S t a t e -------------
|
||||
// -- ecx : name
|
||||
// -- edx : receiver
|
||||
// -----------------------------------
|
||||
Generate_DebugBreakCallHelper(masm, ecx.bit() | edx.bit(), 0, false);
|
||||
}
|
||||
|
||||
|
||||
void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
|
||||
// Register state for IC store call (from ic-x87.cc).
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax : value
|
||||
// -- ecx : name
|
||||
// -- edx : receiver
|
||||
// -----------------------------------
|
||||
Generate_DebugBreakCallHelper(
|
||||
masm, eax.bit() | ecx.bit() | edx.bit(), 0, false);
|
||||
}
|
||||
|
||||
|
||||
void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
|
||||
// Register state for keyed IC load call (from ic-x87.cc).
|
||||
// ----------- S t a t e -------------
|
||||
// -- ecx : key
|
||||
// -- edx : receiver
|
||||
// -----------------------------------
|
||||
Generate_DebugBreakCallHelper(masm, ecx.bit() | edx.bit(), 0, false);
|
||||
}
|
||||
|
||||
|
||||
void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
|
||||
// Register state for keyed IC load call (from ic-x87.cc).
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax : value
|
||||
// -- ecx : key
|
||||
// -- edx : receiver
|
||||
// -----------------------------------
|
||||
Generate_DebugBreakCallHelper(
|
||||
masm, eax.bit() | ecx.bit() | edx.bit(), 0, false);
|
||||
}
|
||||
|
||||
|
||||
void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
|
||||
// Register state for CompareNil IC
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax : value
|
||||
// -----------------------------------
|
||||
Generate_DebugBreakCallHelper(masm, eax.bit(), 0, false);
|
||||
}
|
||||
|
||||
|
||||
void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
|
||||
// Register state just before return from JS function (from codegen-x87.cc).
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax: return value
|
||||
// -----------------------------------
|
||||
Generate_DebugBreakCallHelper(masm, eax.bit(), 0, true);
|
||||
}
|
||||
|
||||
|
||||
void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
|
||||
// Register state for CallFunctionStub (from code-stubs-x87.cc).
|
||||
// ----------- S t a t e -------------
|
||||
// -- edi: function
|
||||
// -----------------------------------
|
||||
Generate_DebugBreakCallHelper(masm, edi.bit(), 0, false);
|
||||
}
|
||||
|
||||
|
||||
void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
|
||||
// Register state for CallConstructStub (from code-stubs-x87.cc).
|
||||
// eax is the actual number of arguments not encoded as a smi see comment
|
||||
// above IC call.
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax: number of arguments (not smi)
|
||||
// -- edi: constructor function
|
||||
// -----------------------------------
|
||||
// The number of arguments in eax is not smi encoded.
|
||||
Generate_DebugBreakCallHelper(masm, edi.bit(), eax.bit(), false);
|
||||
}
|
||||
|
||||
|
||||
void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
|
||||
// Register state for CallConstructStub (from code-stubs-x87.cc).
|
||||
// eax is the actual number of arguments not encoded as a smi see comment
|
||||
// above IC call.
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax: number of arguments (not smi)
|
||||
// -- ebx: feedback array
|
||||
// -- edx: feedback slot (smi)
|
||||
// -- edi: constructor function
|
||||
// -----------------------------------
|
||||
// The number of arguments in eax is not smi encoded.
|
||||
Generate_DebugBreakCallHelper(masm, ebx.bit() | edx.bit() | edi.bit(),
|
||||
eax.bit(), false);
|
||||
}
|
||||
|
||||
|
||||
void Debug::GenerateSlot(MacroAssembler* masm) {
|
||||
// Generate enough nop's to make space for a call instruction.
|
||||
Label check_codesize;
|
||||
__ bind(&check_codesize);
|
||||
__ RecordDebugBreakSlot();
|
||||
__ Nop(Assembler::kDebugBreakSlotLength);
|
||||
ASSERT_EQ(Assembler::kDebugBreakSlotLength,
|
||||
masm->SizeOfCodeGeneratedSince(&check_codesize));
|
||||
}
|
||||
|
||||
|
||||
void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
|
||||
// In the places where a debug break slot is inserted no registers can contain
|
||||
// object pointers.
|
||||
Generate_DebugBreakCallHelper(masm, 0, 0, true);
|
||||
}
|
||||
|
||||
|
||||
void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
|
||||
masm->ret(0);
|
||||
}
|
||||
|
||||
|
||||
void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
|
||||
ExternalReference restarter_frame_function_slot =
|
||||
ExternalReference::debug_restarter_frame_function_pointer_address(
|
||||
masm->isolate());
|
||||
__ mov(Operand::StaticVariable(restarter_frame_function_slot), Immediate(0));
|
||||
|
||||
// We do not know our frame height, but set esp based on ebp.
|
||||
__ lea(esp, Operand(ebp, -1 * kPointerSize));
|
||||
|
||||
__ pop(edi); // Function.
|
||||
__ pop(ebp);
|
||||
|
||||
// Load context from the function.
|
||||
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
|
||||
|
||||
// Get function code.
|
||||
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
|
||||
__ lea(edx, FieldOperand(edx, Code::kHeaderSize));
|
||||
|
||||
// Re-run JSFunction, edi is function, esi is context.
|
||||
__ jmp(edx);
|
||||
}
|
||||
|
||||
const bool Debug::kFrameDropperSupported = true;
|
||||
|
||||
#undef __
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_TARGET_ARCH_X87
|
408
src/x87/deoptimizer-x87.cc
Normal file
408
src/x87/deoptimizer-x87.cc
Normal file
@ -0,0 +1,408 @@
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "v8.h"
|
||||
|
||||
#if V8_TARGET_ARCH_X87
|
||||
|
||||
#include "codegen.h"
|
||||
#include "deoptimizer.h"
|
||||
#include "full-codegen.h"
|
||||
#include "safepoint-table.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
const int Deoptimizer::table_entry_size_ = 10;
|
||||
|
||||
|
||||
int Deoptimizer::patch_size() {
|
||||
return Assembler::kCallInstructionLength;
|
||||
}
|
||||
|
||||
|
||||
void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
|
||||
Isolate* isolate = code->GetIsolate();
|
||||
HandleScope scope(isolate);
|
||||
|
||||
// Compute the size of relocation information needed for the code
|
||||
// patching in Deoptimizer::DeoptimizeFunction.
|
||||
int min_reloc_size = 0;
|
||||
int prev_pc_offset = 0;
|
||||
DeoptimizationInputData* deopt_data =
|
||||
DeoptimizationInputData::cast(code->deoptimization_data());
|
||||
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
|
||||
int pc_offset = deopt_data->Pc(i)->value();
|
||||
if (pc_offset == -1) continue;
|
||||
ASSERT_GE(pc_offset, prev_pc_offset);
|
||||
int pc_delta = pc_offset - prev_pc_offset;
|
||||
// We use RUNTIME_ENTRY reloc info which has a size of 2 bytes
|
||||
// if encodable with small pc delta encoding and up to 6 bytes
|
||||
// otherwise.
|
||||
if (pc_delta <= RelocInfo::kMaxSmallPCDelta) {
|
||||
min_reloc_size += 2;
|
||||
} else {
|
||||
min_reloc_size += 6;
|
||||
}
|
||||
prev_pc_offset = pc_offset;
|
||||
}
|
||||
|
||||
// If the relocation information is not big enough we create a new
|
||||
// relocation info object that is padded with comments to make it
|
||||
// big enough for lazy doptimization.
|
||||
int reloc_length = code->relocation_info()->length();
|
||||
if (min_reloc_size > reloc_length) {
|
||||
int comment_reloc_size = RelocInfo::kMinRelocCommentSize;
|
||||
// Padding needed.
|
||||
int min_padding = min_reloc_size - reloc_length;
|
||||
// Number of comments needed to take up at least that much space.
|
||||
int additional_comments =
|
||||
(min_padding + comment_reloc_size - 1) / comment_reloc_size;
|
||||
// Actual padding size.
|
||||
int padding = additional_comments * comment_reloc_size;
|
||||
// Allocate new relocation info and copy old relocation to the end
|
||||
// of the new relocation info array because relocation info is
|
||||
// written and read backwards.
|
||||
Factory* factory = isolate->factory();
|
||||
Handle<ByteArray> new_reloc =
|
||||
factory->NewByteArray(reloc_length + padding, TENURED);
|
||||
OS::MemCopy(new_reloc->GetDataStartAddress() + padding,
|
||||
code->relocation_info()->GetDataStartAddress(),
|
||||
reloc_length);
|
||||
// Create a relocation writer to write the comments in the padding
|
||||
// space. Use position 0 for everything to ensure short encoding.
|
||||
RelocInfoWriter reloc_info_writer(
|
||||
new_reloc->GetDataStartAddress() + padding, 0);
|
||||
intptr_t comment_string
|
||||
= reinterpret_cast<intptr_t>(RelocInfo::kFillerCommentString);
|
||||
RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string, NULL);
|
||||
for (int i = 0; i < additional_comments; ++i) {
|
||||
#ifdef DEBUG
|
||||
byte* pos_before = reloc_info_writer.pos();
|
||||
#endif
|
||||
reloc_info_writer.Write(&rinfo);
|
||||
ASSERT(RelocInfo::kMinRelocCommentSize ==
|
||||
pos_before - reloc_info_writer.pos());
|
||||
}
|
||||
// Replace relocation information on the code object.
|
||||
code->set_relocation_info(*new_reloc);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
|
||||
Address code_start_address = code->instruction_start();
|
||||
|
||||
if (FLAG_zap_code_space) {
|
||||
// Fail hard and early if we enter this code object again.
|
||||
byte* pointer = code->FindCodeAgeSequence();
|
||||
if (pointer != NULL) {
|
||||
pointer += kNoCodeAgeSequenceLength;
|
||||
} else {
|
||||
pointer = code->instruction_start();
|
||||
}
|
||||
CodePatcher patcher(pointer, 1);
|
||||
patcher.masm()->int3();
|
||||
|
||||
DeoptimizationInputData* data =
|
||||
DeoptimizationInputData::cast(code->deoptimization_data());
|
||||
int osr_offset = data->OsrPcOffset()->value();
|
||||
if (osr_offset > 0) {
|
||||
CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
|
||||
osr_patcher.masm()->int3();
|
||||
}
|
||||
}
|
||||
|
||||
// We will overwrite the code's relocation info in-place. Relocation info
|
||||
// is written backward. The relocation info is the payload of a byte
|
||||
// array. Later on we will slide this to the start of the byte array and
|
||||
// create a filler object in the remaining space.
|
||||
ByteArray* reloc_info = code->relocation_info();
|
||||
Address reloc_end_address = reloc_info->address() + reloc_info->Size();
|
||||
RelocInfoWriter reloc_info_writer(reloc_end_address, code_start_address);
|
||||
|
||||
// Since the call is a relative encoding, write new
|
||||
// reloc info. We do not need any of the existing reloc info because the
|
||||
// existing code will not be used again (we zap it in debug builds).
|
||||
//
|
||||
// Emit call to lazy deoptimization at all lazy deopt points.
|
||||
DeoptimizationInputData* deopt_data =
|
||||
DeoptimizationInputData::cast(code->deoptimization_data());
|
||||
SharedFunctionInfo* shared =
|
||||
SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
|
||||
shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
|
||||
#ifdef DEBUG
|
||||
Address prev_call_address = NULL;
|
||||
#endif
|
||||
// For each LLazyBailout instruction insert a call to the corresponding
|
||||
// deoptimization entry.
|
||||
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
|
||||
if (deopt_data->Pc(i)->value() == -1) continue;
|
||||
// Patch lazy deoptimization entry.
|
||||
Address call_address = code_start_address + deopt_data->Pc(i)->value();
|
||||
CodePatcher patcher(call_address, patch_size());
|
||||
Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
|
||||
patcher.masm()->call(deopt_entry, RelocInfo::NONE32);
|
||||
// We use RUNTIME_ENTRY for deoptimization bailouts.
|
||||
RelocInfo rinfo(call_address + 1, // 1 after the call opcode.
|
||||
RelocInfo::RUNTIME_ENTRY,
|
||||
reinterpret_cast<intptr_t>(deopt_entry),
|
||||
NULL);
|
||||
reloc_info_writer.Write(&rinfo);
|
||||
ASSERT_GE(reloc_info_writer.pos(),
|
||||
reloc_info->address() + ByteArray::kHeaderSize);
|
||||
ASSERT(prev_call_address == NULL ||
|
||||
call_address >= prev_call_address + patch_size());
|
||||
ASSERT(call_address + patch_size() <= code->instruction_end());
|
||||
#ifdef DEBUG
|
||||
prev_call_address = call_address;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Move the relocation info to the beginning of the byte array.
|
||||
int new_reloc_size = reloc_end_address - reloc_info_writer.pos();
|
||||
OS::MemMove(
|
||||
code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
|
||||
|
||||
// The relocation info is in place, update the size.
|
||||
reloc_info->set_length(new_reloc_size);
|
||||
|
||||
// Handle the junk part after the new relocation info. We will create
|
||||
// a non-live object in the extra space at the end of the former reloc info.
|
||||
Address junk_address = reloc_info->address() + reloc_info->Size();
|
||||
ASSERT(junk_address <= reloc_end_address);
|
||||
isolate->heap()->CreateFillerObjectAt(junk_address,
|
||||
reloc_end_address - junk_address);
|
||||
}
|
||||
|
||||
|
||||
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
|
||||
// Set the register values. The values are not important as there are no
|
||||
// callee saved registers in JavaScript frames, so all registers are
|
||||
// spilled. Registers ebp and esp are set to the correct values though.
|
||||
|
||||
for (int i = 0; i < Register::kNumRegisters; i++) {
|
||||
input_->SetRegister(i, i * 4);
|
||||
}
|
||||
input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
|
||||
input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
|
||||
for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
|
||||
input_->SetDoubleRegister(i, 0.0);
|
||||
}
|
||||
|
||||
// Fill the frame content from the actual data on the frame.
|
||||
for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
|
||||
input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Deoptimizer::SetPlatformCompiledStubRegisters(
|
||||
FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
|
||||
intptr_t handler =
|
||||
reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_);
|
||||
int params = descriptor->GetHandlerParameterCount();
|
||||
output_frame->SetRegister(eax.code(), params);
|
||||
output_frame->SetRegister(ebx.code(), handler);
|
||||
}
|
||||
|
||||
|
||||
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
|
||||
// Do nothing for X87.
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
|
||||
int parameter_count = function->shared()->formal_parameter_count() + 1;
|
||||
unsigned input_frame_size = input_->GetFrameSize();
|
||||
unsigned alignment_state_offset =
|
||||
input_frame_size - parameter_count * kPointerSize -
|
||||
StandardFrameConstants::kFixedFrameSize -
|
||||
kPointerSize;
|
||||
ASSERT(JavaScriptFrameConstants::kDynamicAlignmentStateOffset ==
|
||||
JavaScriptFrameConstants::kLocal0Offset);
|
||||
int32_t alignment_state = input_->GetFrameSlot(alignment_state_offset);
|
||||
return (alignment_state == kAlignmentPaddingPushed);
|
||||
}
|
||||
|
||||
|
||||
#define __ masm()->
|
||||
|
||||
void Deoptimizer::EntryGenerator::Generate() {
|
||||
GeneratePrologue();
|
||||
|
||||
// Save all general purpose registers before messing with them.
|
||||
const int kNumberOfRegisters = Register::kNumRegisters;
|
||||
__ pushad();
|
||||
|
||||
const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize;
|
||||
|
||||
// Get the bailout id from the stack.
|
||||
__ mov(ebx, Operand(esp, kSavedRegistersAreaSize));
|
||||
|
||||
// Get the address of the location in the code object
|
||||
// and compute the fp-to-sp delta in register edx.
|
||||
__ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
|
||||
__ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
|
||||
|
||||
__ sub(edx, ebp);
|
||||
__ neg(edx);
|
||||
|
||||
// Allocate a new deoptimizer object.
|
||||
__ PrepareCallCFunction(6, eax);
|
||||
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
|
||||
__ mov(Operand(esp, 0 * kPointerSize), eax); // Function.
|
||||
__ mov(Operand(esp, 1 * kPointerSize), Immediate(type())); // Bailout type.
|
||||
__ mov(Operand(esp, 2 * kPointerSize), ebx); // Bailout id.
|
||||
__ mov(Operand(esp, 3 * kPointerSize), ecx); // Code address or 0.
|
||||
__ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta.
|
||||
__ mov(Operand(esp, 5 * kPointerSize),
|
||||
Immediate(ExternalReference::isolate_address(isolate())));
|
||||
{
|
||||
AllowExternalCallThatCantCauseGC scope(masm());
|
||||
__ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
|
||||
}
|
||||
|
||||
// Preserve deoptimizer object in register eax and get the input
|
||||
// frame descriptor pointer.
|
||||
__ mov(ebx, Operand(eax, Deoptimizer::input_offset()));
|
||||
|
||||
// Fill in the input registers.
|
||||
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
|
||||
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
|
||||
__ pop(Operand(ebx, offset));
|
||||
}
|
||||
|
||||
// Clear FPU all exceptions.
|
||||
// TODO(ulan): Find out why the TOP register is not zero here in some cases,
|
||||
// and check that the generated code never deoptimizes with unbalanced stack.
|
||||
__ fnclex();
|
||||
|
||||
// Remove the bailout id, return address and the double registers.
|
||||
__ add(esp, Immediate(2 * kPointerSize));
|
||||
|
||||
// Compute a pointer to the unwinding limit in register ecx; that is
|
||||
// the first stack slot not part of the input frame.
|
||||
__ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
|
||||
__ add(ecx, esp);
|
||||
|
||||
// Unwind the stack down to - but not including - the unwinding
|
||||
// limit and copy the contents of the activation frame to the input
|
||||
// frame description.
|
||||
__ lea(edx, Operand(ebx, FrameDescription::frame_content_offset()));
|
||||
Label pop_loop_header;
|
||||
__ jmp(&pop_loop_header);
|
||||
Label pop_loop;
|
||||
__ bind(&pop_loop);
|
||||
__ pop(Operand(edx, 0));
|
||||
__ add(edx, Immediate(sizeof(uint32_t)));
|
||||
__ bind(&pop_loop_header);
|
||||
__ cmp(ecx, esp);
|
||||
__ j(not_equal, &pop_loop);
|
||||
|
||||
// Compute the output frame in the deoptimizer.
|
||||
__ push(eax);
|
||||
__ PrepareCallCFunction(1, ebx);
|
||||
__ mov(Operand(esp, 0 * kPointerSize), eax);
|
||||
{
|
||||
AllowExternalCallThatCantCauseGC scope(masm());
|
||||
__ CallCFunction(
|
||||
ExternalReference::compute_output_frames_function(isolate()), 1);
|
||||
}
|
||||
__ pop(eax);
|
||||
|
||||
// If frame was dynamically aligned, pop padding.
|
||||
Label no_padding;
|
||||
__ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
|
||||
Immediate(0));
|
||||
__ j(equal, &no_padding);
|
||||
__ pop(ecx);
|
||||
if (FLAG_debug_code) {
|
||||
__ cmp(ecx, Immediate(kAlignmentZapValue));
|
||||
__ Assert(equal, kAlignmentMarkerExpected);
|
||||
}
|
||||
__ bind(&no_padding);
|
||||
|
||||
// Replace the current frame with the output frames.
|
||||
Label outer_push_loop, inner_push_loop,
|
||||
outer_loop_header, inner_loop_header;
|
||||
// Outer loop state: eax = current FrameDescription**, edx = one past the
|
||||
// last FrameDescription**.
|
||||
__ mov(edx, Operand(eax, Deoptimizer::output_count_offset()));
|
||||
__ mov(eax, Operand(eax, Deoptimizer::output_offset()));
|
||||
__ lea(edx, Operand(eax, edx, times_4, 0));
|
||||
__ jmp(&outer_loop_header);
|
||||
__ bind(&outer_push_loop);
|
||||
// Inner loop state: ebx = current FrameDescription*, ecx = loop index.
|
||||
__ mov(ebx, Operand(eax, 0));
|
||||
__ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
|
||||
__ jmp(&inner_loop_header);
|
||||
__ bind(&inner_push_loop);
|
||||
__ sub(ecx, Immediate(sizeof(uint32_t)));
|
||||
__ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset()));
|
||||
__ bind(&inner_loop_header);
|
||||
__ test(ecx, ecx);
|
||||
__ j(not_zero, &inner_push_loop);
|
||||
__ add(eax, Immediate(kPointerSize));
|
||||
__ bind(&outer_loop_header);
|
||||
__ cmp(eax, edx);
|
||||
__ j(below, &outer_push_loop);
|
||||
|
||||
// Push state, pc, and continuation from the last output frame.
|
||||
__ push(Operand(ebx, FrameDescription::state_offset()));
|
||||
__ push(Operand(ebx, FrameDescription::pc_offset()));
|
||||
__ push(Operand(ebx, FrameDescription::continuation_offset()));
|
||||
|
||||
|
||||
// Push the registers from the last output frame.
|
||||
for (int i = 0; i < kNumberOfRegisters; i++) {
|
||||
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
|
||||
__ push(Operand(ebx, offset));
|
||||
}
|
||||
|
||||
// Restore the registers from the stack.
|
||||
__ popad();
|
||||
|
||||
// Return to the continuation point.
|
||||
__ ret(0);
|
||||
}
|
||||
|
||||
|
||||
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
|
||||
// Create a sequence of deoptimization entries.
|
||||
Label done;
|
||||
for (int i = 0; i < count(); i++) {
|
||||
int start = masm()->pc_offset();
|
||||
USE(start);
|
||||
__ push_imm32(i);
|
||||
__ jmp(&done);
|
||||
ASSERT(masm()->pc_offset() - start == table_entry_size_);
|
||||
}
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
|
||||
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
|
||||
SetFrameSlot(offset, value);
|
||||
}
|
||||
|
||||
|
||||
void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
|
||||
SetFrameSlot(offset, value);
|
||||
}
|
||||
|
||||
|
||||
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
|
||||
// No out-of-line constant pool support.
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
|
||||
#undef __
|
||||
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_TARGET_ARCH_X87
|
1762
src/x87/disasm-x87.cc
Normal file
1762
src/x87/disasm-x87.cc
Normal file
File diff suppressed because it is too large
Load Diff
42
src/x87/frames-x87.cc
Normal file
42
src/x87/frames-x87.cc
Normal file
@ -0,0 +1,42 @@
|
||||
// Copyright 2006-2008 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "v8.h"
|
||||
|
||||
#if V8_TARGET_ARCH_X87
|
||||
|
||||
#include "assembler.h"
|
||||
#include "assembler-x87.h"
|
||||
#include "assembler-x87-inl.h"
|
||||
#include "frames.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
|
||||
Register JavaScriptFrame::fp_register() { return ebp; }
|
||||
Register JavaScriptFrame::context_register() { return esi; }
|
||||
Register JavaScriptFrame::constant_pool_pointer_register() {
|
||||
UNREACHABLE();
|
||||
return no_reg;
|
||||
}
|
||||
|
||||
|
||||
Register StubFailureTrampolineFrame::fp_register() { return ebp; }
|
||||
Register StubFailureTrampolineFrame::context_register() { return esi; }
|
||||
Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
|
||||
UNREACHABLE();
|
||||
return no_reg;
|
||||
}
|
||||
|
||||
|
||||
Object*& ExitFrame::constant_pool_slot() const {
|
||||
UNREACHABLE();
|
||||
return Memory::Object_at(NULL);
|
||||
}
|
||||
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_TARGET_ARCH_X87
|
125
src/x87/frames-x87.h
Normal file
125
src/x87/frames-x87.h
Normal file
@ -0,0 +1,125 @@
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_X87_FRAMES_X87_H_
|
||||
#define V8_X87_FRAMES_X87_H_
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
|
||||
// Register lists
|
||||
// Note that the bit values must match those used in actual instruction encoding
|
||||
const int kNumRegs = 8;
|
||||
|
||||
|
||||
// Caller-saved registers
|
||||
const RegList kJSCallerSaved =
|
||||
1 << 0 | // eax
|
||||
1 << 1 | // ecx
|
||||
1 << 2 | // edx
|
||||
1 << 3 | // ebx - used as a caller-saved register in JavaScript code
|
||||
1 << 7; // edi - callee function
|
||||
|
||||
const int kNumJSCallerSaved = 5;
|
||||
|
||||
|
||||
// Number of registers for which space is reserved in safepoints.
|
||||
const int kNumSafepointRegisters = 8;
|
||||
|
||||
const int kNoAlignmentPadding = 0;
|
||||
const int kAlignmentPaddingPushed = 2;
|
||||
const int kAlignmentZapValue = 0x12345678; // Not heap object tagged.
|
||||
|
||||
// ----------------------------------------------------
|
||||
|
||||
|
||||
class EntryFrameConstants : public AllStatic {
|
||||
public:
|
||||
static const int kCallerFPOffset = -6 * kPointerSize;
|
||||
|
||||
static const int kFunctionArgOffset = +3 * kPointerSize;
|
||||
static const int kReceiverArgOffset = +4 * kPointerSize;
|
||||
static const int kArgcOffset = +5 * kPointerSize;
|
||||
static const int kArgvOffset = +6 * kPointerSize;
|
||||
};
|
||||
|
||||
|
||||
class ExitFrameConstants : public AllStatic {
|
||||
public:
|
||||
static const int kFrameSize = 2 * kPointerSize;
|
||||
|
||||
static const int kCodeOffset = -2 * kPointerSize;
|
||||
static const int kSPOffset = -1 * kPointerSize;
|
||||
|
||||
static const int kCallerFPOffset = 0 * kPointerSize;
|
||||
static const int kCallerPCOffset = +1 * kPointerSize;
|
||||
|
||||
// FP-relative displacement of the caller's SP. It points just
|
||||
// below the saved PC.
|
||||
static const int kCallerSPDisplacement = +2 * kPointerSize;
|
||||
|
||||
static const int kConstantPoolOffset = 0; // Not used
|
||||
};
|
||||
|
||||
|
||||
class JavaScriptFrameConstants : public AllStatic {
|
||||
public:
|
||||
// FP-relative.
|
||||
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
|
||||
static const int kLastParameterOffset = +2 * kPointerSize;
|
||||
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
|
||||
|
||||
// Caller SP-relative.
|
||||
static const int kParam0Offset = -2 * kPointerSize;
|
||||
static const int kReceiverOffset = -1 * kPointerSize;
|
||||
|
||||
static const int kDynamicAlignmentStateOffset = kLocal0Offset;
|
||||
};
|
||||
|
||||
|
||||
class ArgumentsAdaptorFrameConstants : public AllStatic {
|
||||
public:
|
||||
// FP-relative.
|
||||
static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
|
||||
|
||||
static const int kFrameSize =
|
||||
StandardFrameConstants::kFixedFrameSize + kPointerSize;
|
||||
};
|
||||
|
||||
|
||||
class ConstructFrameConstants : public AllStatic {
|
||||
public:
|
||||
// FP-relative.
|
||||
static const int kImplicitReceiverOffset = -5 * kPointerSize;
|
||||
static const int kConstructorOffset = kMinInt;
|
||||
static const int kLengthOffset = -4 * kPointerSize;
|
||||
static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
|
||||
|
||||
static const int kFrameSize =
|
||||
StandardFrameConstants::kFixedFrameSize + 3 * kPointerSize;
|
||||
};
|
||||
|
||||
|
||||
class InternalFrameConstants : public AllStatic {
|
||||
public:
|
||||
// FP-relative.
|
||||
static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
|
||||
};
|
||||
|
||||
|
||||
inline Object* JavaScriptFrame::function_slot_object() const {
|
||||
const int offset = JavaScriptFrameConstants::kFunctionOffset;
|
||||
return Memory::Object_at(fp() + offset);
|
||||
}
|
||||
|
||||
|
||||
inline void StackHandler::SetFp(Address slot, Address fp) {
|
||||
Memory::Address_at(slot) = fp;
|
||||
}
|
||||
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_X87_FRAMES_X87_H_
|
4795
src/x87/full-codegen-x87.cc
Normal file
4795
src/x87/full-codegen-x87.cc
Normal file
File diff suppressed because it is too large
Load Diff
1286
src/x87/ic-x87.cc
Normal file
1286
src/x87/ic-x87.cc
Normal file
File diff suppressed because it is too large
Load Diff
5709
src/x87/lithium-codegen-x87.cc
Normal file
5709
src/x87/lithium-codegen-x87.cc
Normal file
File diff suppressed because it is too large
Load Diff
504
src/x87/lithium-codegen-x87.h
Normal file
504
src/x87/lithium-codegen-x87.h
Normal file
@ -0,0 +1,504 @@
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_X87_LITHIUM_CODEGEN_X87_H_
|
||||
#define V8_X87_LITHIUM_CODEGEN_X87_H_
|
||||
|
||||
#include "x87/lithium-x87.h"
|
||||
|
||||
#include "checks.h"
|
||||
#include "deoptimizer.h"
|
||||
#include "x87/lithium-gap-resolver-x87.h"
|
||||
#include "lithium-codegen.h"
|
||||
#include "safepoint-table.h"
|
||||
#include "scopes.h"
|
||||
#include "utils.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// Forward declarations.
|
||||
class LDeferredCode;
|
||||
class LGapNode;
|
||||
class SafepointGenerator;
|
||||
|
||||
class LCodeGen: public LCodeGenBase {
|
||||
public:
|
||||
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
|
||||
: LCodeGenBase(chunk, assembler, info),
|
||||
deoptimizations_(4, info->zone()),
|
||||
jump_table_(4, info->zone()),
|
||||
deoptimization_literals_(8, info->zone()),
|
||||
inlined_function_count_(0),
|
||||
scope_(info->scope()),
|
||||
translations_(info->zone()),
|
||||
deferred_(8, info->zone()),
|
||||
dynamic_frame_alignment_(false),
|
||||
support_aligned_spilled_doubles_(false),
|
||||
osr_pc_offset_(-1),
|
||||
frame_is_built_(false),
|
||||
x87_stack_(assembler),
|
||||
safepoints_(info->zone()),
|
||||
resolver_(this),
|
||||
expected_safepoint_kind_(Safepoint::kSimple) {
|
||||
PopulateDeoptimizationLiteralsWithInlinedFunctions();
|
||||
}
|
||||
|
||||
int LookupDestination(int block_id) const {
|
||||
return chunk()->LookupDestination(block_id);
|
||||
}
|
||||
|
||||
bool IsNextEmittedBlock(int block_id) const {
|
||||
return LookupDestination(block_id) == GetNextEmittedBlock();
|
||||
}
|
||||
|
||||
bool NeedsEagerFrame() const {
|
||||
return GetStackSlotCount() > 0 ||
|
||||
info()->is_non_deferred_calling() ||
|
||||
!info()->IsStub() ||
|
||||
info()->requires_frame();
|
||||
}
|
||||
bool NeedsDeferredFrame() const {
|
||||
return !NeedsEagerFrame() && info()->is_deferred_calling();
|
||||
}
|
||||
|
||||
// Support for converting LOperands to assembler types.
|
||||
Operand ToOperand(LOperand* op) const;
|
||||
Register ToRegister(LOperand* op) const;
|
||||
X87Register ToX87Register(LOperand* op) const;
|
||||
|
||||
bool IsInteger32(LConstantOperand* op) const;
|
||||
bool IsSmi(LConstantOperand* op) const;
|
||||
Immediate ToImmediate(LOperand* op, const Representation& r) const {
|
||||
return Immediate(ToRepresentation(LConstantOperand::cast(op), r));
|
||||
}
|
||||
double ToDouble(LConstantOperand* op) const;
|
||||
|
||||
// Support for non-sse2 (x87) floating point stack handling.
|
||||
// These functions maintain the mapping of physical stack registers to our
|
||||
// virtual registers between instructions.
|
||||
enum X87OperandType { kX87DoubleOperand, kX87FloatOperand, kX87IntOperand };
|
||||
|
||||
void X87Mov(X87Register reg, Operand src,
|
||||
X87OperandType operand = kX87DoubleOperand);
|
||||
void X87Mov(Operand src, X87Register reg,
|
||||
X87OperandType operand = kX87DoubleOperand);
|
||||
|
||||
void X87PrepareBinaryOp(
|
||||
X87Register left, X87Register right, X87Register result);
|
||||
|
||||
void X87LoadForUsage(X87Register reg);
|
||||
void X87LoadForUsage(X87Register reg1, X87Register reg2);
|
||||
void X87PrepareToWrite(X87Register reg) { x87_stack_.PrepareToWrite(reg); }
|
||||
void X87CommitWrite(X87Register reg) { x87_stack_.CommitWrite(reg); }
|
||||
|
||||
void X87Fxch(X87Register reg, int other_slot = 0) {
|
||||
x87_stack_.Fxch(reg, other_slot);
|
||||
}
|
||||
void X87Free(X87Register reg) {
|
||||
x87_stack_.Free(reg);
|
||||
}
|
||||
|
||||
|
||||
bool X87StackEmpty() {
|
||||
return x87_stack_.depth() == 0;
|
||||
}
|
||||
|
||||
Handle<Object> ToHandle(LConstantOperand* op) const;
|
||||
|
||||
// The operand denoting the second word (the one with a higher address) of
|
||||
// a double stack slot.
|
||||
Operand HighOperand(LOperand* op);
|
||||
|
||||
// Try to generate code for the entire chunk, but it may fail if the
|
||||
// chunk contains constructs we cannot handle. Returns true if the
|
||||
// code generation attempt succeeded.
|
||||
bool GenerateCode();
|
||||
|
||||
// Finish the code by setting stack height, safepoint, and bailout
|
||||
// information on it.
|
||||
void FinishCode(Handle<Code> code);
|
||||
|
||||
// Deferred code support.
|
||||
void DoDeferredNumberTagD(LNumberTagD* instr);
|
||||
|
||||
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
|
||||
void DoDeferredNumberTagIU(LInstruction* instr,
|
||||
LOperand* value,
|
||||
LOperand* temp,
|
||||
IntegerSignedness signedness);
|
||||
|
||||
void DoDeferredTaggedToI(LTaggedToI* instr, Label* done);
|
||||
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
|
||||
void DoDeferredStackCheck(LStackCheck* instr);
|
||||
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
|
||||
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
|
||||
void DoDeferredAllocate(LAllocate* instr);
|
||||
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
|
||||
Label* map_check);
|
||||
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
|
||||
void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
|
||||
Register object,
|
||||
Register index);
|
||||
|
||||
// Parallel move support.
|
||||
void DoParallelMove(LParallelMove* move);
|
||||
void DoGap(LGap* instr);
|
||||
|
||||
// Emit frame translation commands for an environment.
|
||||
void WriteTranslation(LEnvironment* environment, Translation* translation);
|
||||
|
||||
void EnsureRelocSpaceForDeoptimization();
|
||||
|
||||
// Declare methods that deal with the individual node types.
|
||||
#define DECLARE_DO(type) void Do##type(L##type* node);
|
||||
LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
|
||||
#undef DECLARE_DO
|
||||
|
||||
private:
|
||||
StrictMode strict_mode() const { return info()->strict_mode(); }
|
||||
|
||||
Scope* scope() const { return scope_; }
|
||||
|
||||
void EmitClassOfTest(Label* if_true,
|
||||
Label* if_false,
|
||||
Handle<String> class_name,
|
||||
Register input,
|
||||
Register temporary,
|
||||
Register temporary2);
|
||||
|
||||
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
|
||||
|
||||
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
|
||||
|
||||
// Code generation passes. Returns true if code generation should
|
||||
// continue.
|
||||
void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
|
||||
void GenerateBodyInstructionPost(LInstruction* instr) V8_OVERRIDE;
|
||||
bool GeneratePrologue();
|
||||
bool GenerateDeferredCode();
|
||||
bool GenerateJumpTable();
|
||||
bool GenerateSafepointTable();
|
||||
|
||||
// Generates the custom OSR entrypoint and sets the osr_pc_offset.
|
||||
void GenerateOsrPrologue();
|
||||
|
||||
enum SafepointMode {
|
||||
RECORD_SIMPLE_SAFEPOINT,
|
||||
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
|
||||
};
|
||||
|
||||
void CallCode(Handle<Code> code,
|
||||
RelocInfo::Mode mode,
|
||||
LInstruction* instr);
|
||||
|
||||
void CallCodeGeneric(Handle<Code> code,
|
||||
RelocInfo::Mode mode,
|
||||
LInstruction* instr,
|
||||
SafepointMode safepoint_mode);
|
||||
|
||||
void CallRuntime(const Runtime::Function* fun,
|
||||
int argc,
|
||||
LInstruction* instr);
|
||||
|
||||
void CallRuntime(Runtime::FunctionId id,
|
||||
int argc,
|
||||
LInstruction* instr) {
|
||||
const Runtime::Function* function = Runtime::FunctionForId(id);
|
||||
CallRuntime(function, argc, instr);
|
||||
}
|
||||
|
||||
void CallRuntimeFromDeferred(Runtime::FunctionId id,
|
||||
int argc,
|
||||
LInstruction* instr,
|
||||
LOperand* context);
|
||||
|
||||
void LoadContextFromDeferred(LOperand* context);
|
||||
|
||||
enum EDIState {
|
||||
EDI_UNINITIALIZED,
|
||||
EDI_CONTAINS_TARGET
|
||||
};
|
||||
|
||||
// Generate a direct call to a known function. Expects the function
|
||||
// to be in edi.
|
||||
void CallKnownFunction(Handle<JSFunction> function,
|
||||
int formal_parameter_count,
|
||||
int arity,
|
||||
LInstruction* instr,
|
||||
EDIState edi_state);
|
||||
|
||||
void RecordSafepointWithLazyDeopt(LInstruction* instr,
|
||||
SafepointMode safepoint_mode);
|
||||
|
||||
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
|
||||
Safepoint::DeoptMode mode);
|
||||
void DeoptimizeIf(Condition cc,
|
||||
LEnvironment* environment,
|
||||
Deoptimizer::BailoutType bailout_type);
|
||||
void DeoptimizeIf(Condition cc, LEnvironment* environment);
|
||||
|
||||
bool DeoptEveryNTimes() {
|
||||
return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
|
||||
}
|
||||
|
||||
void AddToTranslation(LEnvironment* environment,
|
||||
Translation* translation,
|
||||
LOperand* op,
|
||||
bool is_tagged,
|
||||
bool is_uint32,
|
||||
int* object_index_pointer,
|
||||
int* dematerialized_index_pointer);
|
||||
void PopulateDeoptimizationData(Handle<Code> code);
|
||||
int DefineDeoptimizationLiteral(Handle<Object> literal);
|
||||
|
||||
void PopulateDeoptimizationLiteralsWithInlinedFunctions();
|
||||
|
||||
Register ToRegister(int index) const;
|
||||
X87Register ToX87Register(int index) const;
|
||||
int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
|
||||
int32_t ToInteger32(LConstantOperand* op) const;
|
||||
ExternalReference ToExternalReference(LConstantOperand* op) const;
|
||||
|
||||
Operand BuildFastArrayOperand(LOperand* elements_pointer,
|
||||
LOperand* key,
|
||||
Representation key_representation,
|
||||
ElementsKind elements_kind,
|
||||
uint32_t base_offset);
|
||||
|
||||
Operand BuildSeqStringOperand(Register string,
|
||||
LOperand* index,
|
||||
String::Encoding encoding);
|
||||
|
||||
void EmitIntegerMathAbs(LMathAbs* instr);
|
||||
|
||||
// Support for recording safepoint and position information.
|
||||
void RecordSafepoint(LPointerMap* pointers,
|
||||
Safepoint::Kind kind,
|
||||
int arguments,
|
||||
Safepoint::DeoptMode mode);
|
||||
void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
|
||||
void RecordSafepoint(Safepoint::DeoptMode mode);
|
||||
void RecordSafepointWithRegisters(LPointerMap* pointers,
|
||||
int arguments,
|
||||
Safepoint::DeoptMode mode);
|
||||
|
||||
void RecordAndWritePosition(int position) V8_OVERRIDE;
|
||||
|
||||
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
|
||||
void EmitGoto(int block);
|
||||
|
||||
// EmitBranch expects to be the last instruction of a block.
|
||||
template<class InstrType>
|
||||
void EmitBranch(InstrType instr, Condition cc);
|
||||
template<class InstrType>
|
||||
void EmitFalseBranch(InstrType instr, Condition cc);
|
||||
void EmitNumberUntagDNoSSE2(
|
||||
Register input,
|
||||
Register temp,
|
||||
X87Register res_reg,
|
||||
bool allow_undefined_as_nan,
|
||||
bool deoptimize_on_minus_zero,
|
||||
LEnvironment* env,
|
||||
NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED);
|
||||
|
||||
// Emits optimized code for typeof x == "y". Modifies input register.
|
||||
// Returns the condition on which a final split to
|
||||
// true and false label should be made, to optimize fallthrough.
|
||||
Condition EmitTypeofIs(LTypeofIsAndBranch* instr, Register input);
|
||||
|
||||
// Emits optimized code for %_IsObject(x). Preserves input register.
|
||||
// Returns the condition on which a final split to
|
||||
// true and false label should be made, to optimize fallthrough.
|
||||
Condition EmitIsObject(Register input,
|
||||
Register temp1,
|
||||
Label* is_not_object,
|
||||
Label* is_object);
|
||||
|
||||
// Emits optimized code for %_IsString(x). Preserves input register.
|
||||
// Returns the condition on which a final split to
|
||||
// true and false label should be made, to optimize fallthrough.
|
||||
Condition EmitIsString(Register input,
|
||||
Register temp1,
|
||||
Label* is_not_string,
|
||||
SmiCheck check_needed);
|
||||
|
||||
// Emits optimized code for %_IsConstructCall().
|
||||
// Caller should branch on equal condition.
|
||||
void EmitIsConstructCall(Register temp);
|
||||
|
||||
// Emits optimized code to deep-copy the contents of statically known
|
||||
// object graphs (e.g. object literal boilerplate).
|
||||
void EmitDeepCopy(Handle<JSObject> object,
|
||||
Register result,
|
||||
Register source,
|
||||
int* offset,
|
||||
AllocationSiteMode mode);
|
||||
|
||||
void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
|
||||
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
|
||||
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
|
||||
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
|
||||
void DoStoreKeyedExternalArray(LStoreKeyed* instr);
|
||||
void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
|
||||
void DoStoreKeyedFixedArray(LStoreKeyed* instr);
|
||||
|
||||
void EmitReturn(LReturn* instr, bool dynamic_frame_alignment);
|
||||
|
||||
// Emits code for pushing either a tagged constant, a (non-double)
|
||||
// register, or a stack slot operand.
|
||||
void EmitPushTaggedOperand(LOperand* operand);
|
||||
|
||||
void X87Fld(Operand src, X87OperandType opts);
|
||||
|
||||
void EmitFlushX87ForDeopt();
|
||||
void FlushX87StackIfNecessary(LInstruction* instr) {
|
||||
x87_stack_.FlushIfNecessary(instr, this);
|
||||
}
|
||||
friend class LGapResolver;
|
||||
|
||||
#ifdef _MSC_VER
|
||||
// On windows, you may not access the stack more than one page below
|
||||
// the most recently mapped page. To make the allocated area randomly
|
||||
// accessible, we write an arbitrary value to each page in range
|
||||
// esp + offset - page_size .. esp in turn.
|
||||
void MakeSureStackPagesMapped(int offset);
|
||||
#endif
|
||||
|
||||
ZoneList<LEnvironment*> deoptimizations_;
|
||||
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
|
||||
ZoneList<Handle<Object> > deoptimization_literals_;
|
||||
int inlined_function_count_;
|
||||
Scope* const scope_;
|
||||
TranslationBuffer translations_;
|
||||
ZoneList<LDeferredCode*> deferred_;
|
||||
bool dynamic_frame_alignment_;
|
||||
bool support_aligned_spilled_doubles_;
|
||||
int osr_pc_offset_;
|
||||
bool frame_is_built_;
|
||||
|
||||
class X87Stack {
|
||||
public:
|
||||
explicit X87Stack(MacroAssembler* masm)
|
||||
: stack_depth_(0), is_mutable_(true), masm_(masm) { }
|
||||
explicit X87Stack(const X87Stack& other)
|
||||
: stack_depth_(other.stack_depth_), is_mutable_(false), masm_(masm()) {
|
||||
for (int i = 0; i < stack_depth_; i++) {
|
||||
stack_[i] = other.stack_[i];
|
||||
}
|
||||
}
|
||||
bool operator==(const X87Stack& other) const {
|
||||
if (stack_depth_ != other.stack_depth_) return false;
|
||||
for (int i = 0; i < stack_depth_; i++) {
|
||||
if (!stack_[i].is(other.stack_[i])) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
bool Contains(X87Register reg);
|
||||
void Fxch(X87Register reg, int other_slot = 0);
|
||||
void Free(X87Register reg);
|
||||
void PrepareToWrite(X87Register reg);
|
||||
void CommitWrite(X87Register reg);
|
||||
void FlushIfNecessary(LInstruction* instr, LCodeGen* cgen);
|
||||
void LeavingBlock(int current_block_id, LGoto* goto_instr);
|
||||
int depth() const { return stack_depth_; }
|
||||
void pop() {
|
||||
ASSERT(is_mutable_);
|
||||
stack_depth_--;
|
||||
}
|
||||
void push(X87Register reg) {
|
||||
ASSERT(is_mutable_);
|
||||
ASSERT(stack_depth_ < X87Register::kMaxNumAllocatableRegisters);
|
||||
stack_[stack_depth_] = reg;
|
||||
stack_depth_++;
|
||||
}
|
||||
|
||||
MacroAssembler* masm() const { return masm_; }
|
||||
Isolate* isolate() const { return masm_->isolate(); }
|
||||
|
||||
private:
|
||||
int ArrayIndex(X87Register reg);
|
||||
int st2idx(int pos);
|
||||
|
||||
X87Register stack_[X87Register::kMaxNumAllocatableRegisters];
|
||||
int stack_depth_;
|
||||
bool is_mutable_;
|
||||
MacroAssembler* masm_;
|
||||
};
|
||||
X87Stack x87_stack_;
|
||||
|
||||
// Builder that keeps track of safepoints in the code. The table
|
||||
// itself is emitted at the end of the generated code.
|
||||
SafepointTableBuilder safepoints_;
|
||||
|
||||
// Compiler from a set of parallel moves to a sequential list of moves.
|
||||
LGapResolver resolver_;
|
||||
|
||||
Safepoint::Kind expected_safepoint_kind_;
|
||||
|
||||
class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
|
||||
public:
|
||||
explicit PushSafepointRegistersScope(LCodeGen* codegen)
|
||||
: codegen_(codegen) {
|
||||
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
|
||||
codegen_->masm_->PushSafepointRegisters();
|
||||
codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
|
||||
ASSERT(codegen_->info()->is_calling());
|
||||
}
|
||||
|
||||
~PushSafepointRegistersScope() {
|
||||
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
|
||||
codegen_->masm_->PopSafepointRegisters();
|
||||
codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
|
||||
}
|
||||
|
||||
private:
|
||||
LCodeGen* codegen_;
|
||||
};
|
||||
|
||||
friend class LDeferredCode;
|
||||
friend class LEnvironment;
|
||||
friend class SafepointGenerator;
|
||||
DISALLOW_COPY_AND_ASSIGN(LCodeGen);
|
||||
};
|
||||
|
||||
|
||||
class LDeferredCode : public ZoneObject {
|
||||
public:
|
||||
explicit LDeferredCode(LCodeGen* codegen, const LCodeGen::X87Stack& x87_stack)
|
||||
: codegen_(codegen),
|
||||
external_exit_(NULL),
|
||||
instruction_index_(codegen->current_instruction_),
|
||||
x87_stack_(x87_stack) {
|
||||
codegen->AddDeferredCode(this);
|
||||
}
|
||||
|
||||
virtual ~LDeferredCode() {}
|
||||
virtual void Generate() = 0;
|
||||
virtual LInstruction* instr() = 0;
|
||||
|
||||
void SetExit(Label* exit) { external_exit_ = exit; }
|
||||
Label* entry() { return &entry_; }
|
||||
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
|
||||
Label* done() { return codegen_->NeedsDeferredFrame() ? &done_ : exit(); }
|
||||
int instruction_index() const { return instruction_index_; }
|
||||
const LCodeGen::X87Stack& x87_stack() const { return x87_stack_; }
|
||||
|
||||
protected:
|
||||
LCodeGen* codegen() const { return codegen_; }
|
||||
MacroAssembler* masm() const { return codegen_->masm(); }
|
||||
|
||||
private:
|
||||
LCodeGen* codegen_;
|
||||
Label entry_;
|
||||
Label exit_;
|
||||
Label* external_exit_;
|
||||
Label done_;
|
||||
int instruction_index_;
|
||||
LCodeGen::X87Stack x87_stack_;
|
||||
};
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_X87_LITHIUM_CODEGEN_X87_H_
|
445
src/x87/lithium-gap-resolver-x87.cc
Normal file
445
src/x87/lithium-gap-resolver-x87.cc
Normal file
@ -0,0 +1,445 @@
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "v8.h"
|
||||
|
||||
#if V8_TARGET_ARCH_X87
|
||||
|
||||
#include "x87/lithium-gap-resolver-x87.h"
|
||||
#include "x87/lithium-codegen-x87.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
LGapResolver::LGapResolver(LCodeGen* owner)
|
||||
: cgen_(owner),
|
||||
moves_(32, owner->zone()),
|
||||
source_uses_(),
|
||||
destination_uses_(),
|
||||
spilled_register_(-1) {}
|
||||
|
||||
|
||||
void LGapResolver::Resolve(LParallelMove* parallel_move) {
|
||||
ASSERT(HasBeenReset());
|
||||
// Build up a worklist of moves.
|
||||
BuildInitialMoveList(parallel_move);
|
||||
|
||||
for (int i = 0; i < moves_.length(); ++i) {
|
||||
LMoveOperands move = moves_[i];
|
||||
// Skip constants to perform them last. They don't block other moves
|
||||
// and skipping such moves with register destinations keeps those
|
||||
// registers free for the whole algorithm.
|
||||
if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
|
||||
PerformMove(i);
|
||||
}
|
||||
}
|
||||
|
||||
// Perform the moves with constant sources.
|
||||
for (int i = 0; i < moves_.length(); ++i) {
|
||||
if (!moves_[i].IsEliminated()) {
|
||||
ASSERT(moves_[i].source()->IsConstantOperand());
|
||||
EmitMove(i);
|
||||
}
|
||||
}
|
||||
|
||||
Finish();
|
||||
ASSERT(HasBeenReset());
|
||||
}
|
||||
|
||||
|
||||
void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
|
||||
// Perform a linear sweep of the moves to add them to the initial list of
|
||||
// moves to perform, ignoring any move that is redundant (the source is
|
||||
// the same as the destination, the destination is ignored and
|
||||
// unallocated, or the move was already eliminated).
|
||||
const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
|
||||
for (int i = 0; i < moves->length(); ++i) {
|
||||
LMoveOperands move = moves->at(i);
|
||||
if (!move.IsRedundant()) AddMove(move);
|
||||
}
|
||||
Verify();
|
||||
}
|
||||
|
||||
|
||||
void LGapResolver::PerformMove(int index) {
|
||||
// Each call to this function performs a move and deletes it from the move
|
||||
// graph. We first recursively perform any move blocking this one. We
|
||||
// mark a move as "pending" on entry to PerformMove in order to detect
|
||||
// cycles in the move graph. We use operand swaps to resolve cycles,
|
||||
// which means that a call to PerformMove could change any source operand
|
||||
// in the move graph.
|
||||
|
||||
ASSERT(!moves_[index].IsPending());
|
||||
ASSERT(!moves_[index].IsRedundant());
|
||||
|
||||
// Clear this move's destination to indicate a pending move. The actual
|
||||
// destination is saved on the side.
|
||||
ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
|
||||
LOperand* destination = moves_[index].destination();
|
||||
moves_[index].set_destination(NULL);
|
||||
|
||||
// Perform a depth-first traversal of the move graph to resolve
|
||||
// dependencies. Any unperformed, unpending move with a source the same
|
||||
// as this one's destination blocks this one so recursively perform all
|
||||
// such moves.
|
||||
for (int i = 0; i < moves_.length(); ++i) {
|
||||
LMoveOperands other_move = moves_[i];
|
||||
if (other_move.Blocks(destination) && !other_move.IsPending()) {
|
||||
// Though PerformMove can change any source operand in the move graph,
|
||||
// this call cannot create a blocking move via a swap (this loop does
|
||||
// not miss any). Assume there is a non-blocking move with source A
|
||||
// and this move is blocked on source B and there is a swap of A and
|
||||
// B. Then A and B must be involved in the same cycle (or they would
|
||||
// not be swapped). Since this move's destination is B and there is
|
||||
// only a single incoming edge to an operand, this move must also be
|
||||
// involved in the same cycle. In that case, the blocking move will
|
||||
// be created but will be "pending" when we return from PerformMove.
|
||||
PerformMove(i);
|
||||
}
|
||||
}
|
||||
|
||||
// We are about to resolve this move and don't need it marked as
|
||||
// pending, so restore its destination.
|
||||
moves_[index].set_destination(destination);
|
||||
|
||||
// This move's source may have changed due to swaps to resolve cycles and
|
||||
// so it may now be the last move in the cycle. If so remove it.
|
||||
if (moves_[index].source()->Equals(destination)) {
|
||||
RemoveMove(index);
|
||||
return;
|
||||
}
|
||||
|
||||
// The move may be blocked on a (at most one) pending move, in which case
|
||||
// we have a cycle. Search for such a blocking move and perform a swap to
|
||||
// resolve it.
|
||||
for (int i = 0; i < moves_.length(); ++i) {
|
||||
LMoveOperands other_move = moves_[i];
|
||||
if (other_move.Blocks(destination)) {
|
||||
ASSERT(other_move.IsPending());
|
||||
EmitSwap(index);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// This move is not blocked.
|
||||
EmitMove(index);
|
||||
}
|
||||
|
||||
|
||||
void LGapResolver::AddMove(LMoveOperands move) {
|
||||
LOperand* source = move.source();
|
||||
if (source->IsRegister()) ++source_uses_[source->index()];
|
||||
|
||||
LOperand* destination = move.destination();
|
||||
if (destination->IsRegister()) ++destination_uses_[destination->index()];
|
||||
|
||||
moves_.Add(move, cgen_->zone());
|
||||
}
|
||||
|
||||
|
||||
void LGapResolver::RemoveMove(int index) {
|
||||
LOperand* source = moves_[index].source();
|
||||
if (source->IsRegister()) {
|
||||
--source_uses_[source->index()];
|
||||
ASSERT(source_uses_[source->index()] >= 0);
|
||||
}
|
||||
|
||||
LOperand* destination = moves_[index].destination();
|
||||
if (destination->IsRegister()) {
|
||||
--destination_uses_[destination->index()];
|
||||
ASSERT(destination_uses_[destination->index()] >= 0);
|
||||
}
|
||||
|
||||
moves_[index].Eliminate();
|
||||
}
|
||||
|
||||
|
||||
int LGapResolver::CountSourceUses(LOperand* operand) {
|
||||
int count = 0;
|
||||
for (int i = 0; i < moves_.length(); ++i) {
|
||||
if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) {
|
||||
++count;
|
||||
}
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
|
||||
Register LGapResolver::GetFreeRegisterNot(Register reg) {
|
||||
int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg);
|
||||
for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
|
||||
if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) {
|
||||
return Register::FromAllocationIndex(i);
|
||||
}
|
||||
}
|
||||
return no_reg;
|
||||
}
|
||||
|
||||
|
||||
bool LGapResolver::HasBeenReset() {
|
||||
if (!moves_.is_empty()) return false;
|
||||
if (spilled_register_ >= 0) return false;
|
||||
|
||||
for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
|
||||
if (source_uses_[i] != 0) return false;
|
||||
if (destination_uses_[i] != 0) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void LGapResolver::Verify() {
|
||||
#ifdef ENABLE_SLOW_ASSERTS
|
||||
// No operand should be the destination for more than one move.
|
||||
for (int i = 0; i < moves_.length(); ++i) {
|
||||
LOperand* destination = moves_[i].destination();
|
||||
for (int j = i + 1; j < moves_.length(); ++j) {
|
||||
SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
#define __ ACCESS_MASM(cgen_->masm())
|
||||
|
||||
void LGapResolver::Finish() {
|
||||
if (spilled_register_ >= 0) {
|
||||
__ pop(Register::FromAllocationIndex(spilled_register_));
|
||||
spilled_register_ = -1;
|
||||
}
|
||||
moves_.Rewind(0);
|
||||
}
|
||||
|
||||
|
||||
void LGapResolver::EnsureRestored(LOperand* operand) {
|
||||
if (operand->IsRegister() && operand->index() == spilled_register_) {
|
||||
__ pop(Register::FromAllocationIndex(spilled_register_));
|
||||
spilled_register_ = -1;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Register LGapResolver::EnsureTempRegister() {
|
||||
// 1. We may have already spilled to create a temp register.
|
||||
if (spilled_register_ >= 0) {
|
||||
return Register::FromAllocationIndex(spilled_register_);
|
||||
}
|
||||
|
||||
// 2. We may have a free register that we can use without spilling.
|
||||
Register free = GetFreeRegisterNot(no_reg);
|
||||
if (!free.is(no_reg)) return free;
|
||||
|
||||
// 3. Prefer to spill a register that is not used in any remaining move
|
||||
// because it will not need to be restored until the end.
|
||||
for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
|
||||
if (source_uses_[i] == 0 && destination_uses_[i] == 0) {
|
||||
Register scratch = Register::FromAllocationIndex(i);
|
||||
__ push(scratch);
|
||||
spilled_register_ = i;
|
||||
return scratch;
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Use an arbitrary register. Register 0 is as arbitrary as any other.
|
||||
Register scratch = Register::FromAllocationIndex(0);
|
||||
__ push(scratch);
|
||||
spilled_register_ = 0;
|
||||
return scratch;
|
||||
}
|
||||
|
||||
|
||||
void LGapResolver::EmitMove(int index) {
|
||||
LOperand* source = moves_[index].source();
|
||||
LOperand* destination = moves_[index].destination();
|
||||
EnsureRestored(source);
|
||||
EnsureRestored(destination);
|
||||
|
||||
// Dispatch on the source and destination operand kinds. Not all
|
||||
// combinations are possible.
|
||||
if (source->IsRegister()) {
|
||||
ASSERT(destination->IsRegister() || destination->IsStackSlot());
|
||||
Register src = cgen_->ToRegister(source);
|
||||
Operand dst = cgen_->ToOperand(destination);
|
||||
__ mov(dst, src);
|
||||
|
||||
} else if (source->IsStackSlot()) {
|
||||
ASSERT(destination->IsRegister() || destination->IsStackSlot());
|
||||
Operand src = cgen_->ToOperand(source);
|
||||
if (destination->IsRegister()) {
|
||||
Register dst = cgen_->ToRegister(destination);
|
||||
__ mov(dst, src);
|
||||
} else {
|
||||
// Spill on demand to use a temporary register for memory-to-memory
|
||||
// moves.
|
||||
Register tmp = EnsureTempRegister();
|
||||
Operand dst = cgen_->ToOperand(destination);
|
||||
__ mov(tmp, src);
|
||||
__ mov(dst, tmp);
|
||||
}
|
||||
|
||||
} else if (source->IsConstantOperand()) {
|
||||
LConstantOperand* constant_source = LConstantOperand::cast(source);
|
||||
if (destination->IsRegister()) {
|
||||
Register dst = cgen_->ToRegister(destination);
|
||||
Representation r = cgen_->IsSmi(constant_source)
|
||||
? Representation::Smi() : Representation::Integer32();
|
||||
if (cgen_->IsInteger32(constant_source)) {
|
||||
__ Move(dst, cgen_->ToImmediate(constant_source, r));
|
||||
} else {
|
||||
__ LoadObject(dst, cgen_->ToHandle(constant_source));
|
||||
}
|
||||
} else if (destination->IsDoubleRegister()) {
|
||||
double v = cgen_->ToDouble(constant_source);
|
||||
uint64_t int_val = BitCast<uint64_t, double>(v);
|
||||
int32_t lower = static_cast<int32_t>(int_val);
|
||||
int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
|
||||
__ push(Immediate(upper));
|
||||
__ push(Immediate(lower));
|
||||
X87Register dst = cgen_->ToX87Register(destination);
|
||||
cgen_->X87Mov(dst, MemOperand(esp, 0));
|
||||
__ add(esp, Immediate(kDoubleSize));
|
||||
} else {
|
||||
ASSERT(destination->IsStackSlot());
|
||||
Operand dst = cgen_->ToOperand(destination);
|
||||
Representation r = cgen_->IsSmi(constant_source)
|
||||
? Representation::Smi() : Representation::Integer32();
|
||||
if (cgen_->IsInteger32(constant_source)) {
|
||||
__ Move(dst, cgen_->ToImmediate(constant_source, r));
|
||||
} else {
|
||||
Register tmp = EnsureTempRegister();
|
||||
__ LoadObject(tmp, cgen_->ToHandle(constant_source));
|
||||
__ mov(dst, tmp);
|
||||
}
|
||||
}
|
||||
|
||||
} else if (source->IsDoubleRegister()) {
|
||||
// load from the register onto the stack, store in destination, which must
|
||||
// be a double stack slot in the non-SSE2 case.
|
||||
ASSERT(destination->IsDoubleStackSlot());
|
||||
Operand dst = cgen_->ToOperand(destination);
|
||||
X87Register src = cgen_->ToX87Register(source);
|
||||
cgen_->X87Mov(dst, src);
|
||||
} else if (source->IsDoubleStackSlot()) {
|
||||
// load from the stack slot on top of the floating point stack, and then
|
||||
// store in destination. If destination is a double register, then it
|
||||
// represents the top of the stack and nothing needs to be done.
|
||||
if (destination->IsDoubleStackSlot()) {
|
||||
Register tmp = EnsureTempRegister();
|
||||
Operand src0 = cgen_->ToOperand(source);
|
||||
Operand src1 = cgen_->HighOperand(source);
|
||||
Operand dst0 = cgen_->ToOperand(destination);
|
||||
Operand dst1 = cgen_->HighOperand(destination);
|
||||
__ mov(tmp, src0); // Then use tmp to copy source to destination.
|
||||
__ mov(dst0, tmp);
|
||||
__ mov(tmp, src1);
|
||||
__ mov(dst1, tmp);
|
||||
} else {
|
||||
Operand src = cgen_->ToOperand(source);
|
||||
X87Register dst = cgen_->ToX87Register(destination);
|
||||
cgen_->X87Mov(dst, src);
|
||||
}
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
RemoveMove(index);
|
||||
}
|
||||
|
||||
|
||||
void LGapResolver::EmitSwap(int index) {
|
||||
LOperand* source = moves_[index].source();
|
||||
LOperand* destination = moves_[index].destination();
|
||||
EnsureRestored(source);
|
||||
EnsureRestored(destination);
|
||||
|
||||
// Dispatch on the source and destination operand kinds. Not all
|
||||
// combinations are possible.
|
||||
if (source->IsRegister() && destination->IsRegister()) {
|
||||
// Register-register.
|
||||
Register src = cgen_->ToRegister(source);
|
||||
Register dst = cgen_->ToRegister(destination);
|
||||
__ xchg(dst, src);
|
||||
|
||||
} else if ((source->IsRegister() && destination->IsStackSlot()) ||
|
||||
(source->IsStackSlot() && destination->IsRegister())) {
|
||||
// Register-memory. Use a free register as a temp if possible. Do not
|
||||
// spill on demand because the simple spill implementation cannot avoid
|
||||
// spilling src at this point.
|
||||
Register tmp = GetFreeRegisterNot(no_reg);
|
||||
Register reg =
|
||||
cgen_->ToRegister(source->IsRegister() ? source : destination);
|
||||
Operand mem =
|
||||
cgen_->ToOperand(source->IsRegister() ? destination : source);
|
||||
if (tmp.is(no_reg)) {
|
||||
__ xor_(reg, mem);
|
||||
__ xor_(mem, reg);
|
||||
__ xor_(reg, mem);
|
||||
} else {
|
||||
__ mov(tmp, mem);
|
||||
__ mov(mem, reg);
|
||||
__ mov(reg, tmp);
|
||||
}
|
||||
|
||||
} else if (source->IsStackSlot() && destination->IsStackSlot()) {
|
||||
// Memory-memory. Spill on demand to use a temporary. If there is a
|
||||
// free register after that, use it as a second temporary.
|
||||
Register tmp0 = EnsureTempRegister();
|
||||
Register tmp1 = GetFreeRegisterNot(tmp0);
|
||||
Operand src = cgen_->ToOperand(source);
|
||||
Operand dst = cgen_->ToOperand(destination);
|
||||
if (tmp1.is(no_reg)) {
|
||||
// Only one temp register available to us.
|
||||
__ mov(tmp0, dst);
|
||||
__ xor_(tmp0, src);
|
||||
__ xor_(src, tmp0);
|
||||
__ xor_(tmp0, src);
|
||||
__ mov(dst, tmp0);
|
||||
} else {
|
||||
__ mov(tmp0, dst);
|
||||
__ mov(tmp1, src);
|
||||
__ mov(dst, tmp1);
|
||||
__ mov(src, tmp0);
|
||||
}
|
||||
} else {
|
||||
// No other combinations are possible.
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
// The swap of source and destination has executed a move from source to
|
||||
// destination.
|
||||
RemoveMove(index);
|
||||
|
||||
// Any unperformed (including pending) move with a source of either
|
||||
// this move's source or destination needs to have their source
|
||||
// changed to reflect the state of affairs after the swap.
|
||||
for (int i = 0; i < moves_.length(); ++i) {
|
||||
LMoveOperands other_move = moves_[i];
|
||||
if (other_move.Blocks(source)) {
|
||||
moves_[i].set_source(destination);
|
||||
} else if (other_move.Blocks(destination)) {
|
||||
moves_[i].set_source(source);
|
||||
}
|
||||
}
|
||||
|
||||
// In addition to swapping the actual uses as sources, we need to update
|
||||
// the use counts.
|
||||
if (source->IsRegister() && destination->IsRegister()) {
|
||||
int temp = source_uses_[source->index()];
|
||||
source_uses_[source->index()] = source_uses_[destination->index()];
|
||||
source_uses_[destination->index()] = temp;
|
||||
} else if (source->IsRegister()) {
|
||||
// We don't have use counts for non-register operands like destination.
|
||||
// Compute those counts now.
|
||||
source_uses_[source->index()] = CountSourceUses(source);
|
||||
} else if (destination->IsRegister()) {
|
||||
source_uses_[destination->index()] = CountSourceUses(destination);
|
||||
}
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_TARGET_ARCH_X87
|
87
src/x87/lithium-gap-resolver-x87.h
Normal file
87
src/x87/lithium-gap-resolver-x87.h
Normal file
@ -0,0 +1,87 @@
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_X87_LITHIUM_GAP_RESOLVER_X87_H_
|
||||
#define V8_X87_LITHIUM_GAP_RESOLVER_X87_H_
|
||||
|
||||
#include "v8.h"
|
||||
|
||||
#include "lithium.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
class LCodeGen;
|
||||
class LGapResolver;
|
||||
|
||||
class LGapResolver V8_FINAL BASE_EMBEDDED {
|
||||
public:
|
||||
explicit LGapResolver(LCodeGen* owner);
|
||||
|
||||
// Resolve a set of parallel moves, emitting assembler instructions.
|
||||
void Resolve(LParallelMove* parallel_move);
|
||||
|
||||
private:
|
||||
// Build the initial list of moves.
|
||||
void BuildInitialMoveList(LParallelMove* parallel_move);
|
||||
|
||||
// Perform the move at the moves_ index in question (possibly requiring
|
||||
// other moves to satisfy dependencies).
|
||||
void PerformMove(int index);
|
||||
|
||||
// Emit any code necessary at the end of a gap move.
|
||||
void Finish();
|
||||
|
||||
// Add or delete a move from the move graph without emitting any code.
|
||||
// Used to build up the graph and remove trivial moves.
|
||||
void AddMove(LMoveOperands move);
|
||||
void RemoveMove(int index);
|
||||
|
||||
// Report the count of uses of operand as a source in a not-yet-performed
|
||||
// move. Used to rebuild use counts.
|
||||
int CountSourceUses(LOperand* operand);
|
||||
|
||||
// Emit a move and remove it from the move graph.
|
||||
void EmitMove(int index);
|
||||
|
||||
// Execute a move by emitting a swap of two operands. The move from
|
||||
// source to destination is removed from the move graph.
|
||||
void EmitSwap(int index);
|
||||
|
||||
// Ensure that the given operand is not spilled.
|
||||
void EnsureRestored(LOperand* operand);
|
||||
|
||||
// Return a register that can be used as a temp register, spilling
|
||||
// something if necessary.
|
||||
Register EnsureTempRegister();
|
||||
|
||||
// Return a known free register different from the given one (which could
|
||||
// be no_reg---returning any free register), or no_reg if there is no such
|
||||
// register.
|
||||
Register GetFreeRegisterNot(Register reg);
|
||||
|
||||
// Verify that the state is the initial one, ready to resolve a single
|
||||
// parallel move.
|
||||
bool HasBeenReset();
|
||||
|
||||
// Verify the move list before performing moves.
|
||||
void Verify();
|
||||
|
||||
LCodeGen* cgen_;
|
||||
|
||||
// List of moves not yet resolved.
|
||||
ZoneList<LMoveOperands> moves_;
|
||||
|
||||
// Source and destination use counts for the general purpose registers.
|
||||
int source_uses_[Register::kMaxNumAllocatableRegisters];
|
||||
int destination_uses_[Register::kMaxNumAllocatableRegisters];
|
||||
|
||||
// If we had to spill on demand, the currently spilled register's
|
||||
// allocation index.
|
||||
int spilled_register_;
|
||||
};
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_X87_LITHIUM_GAP_RESOLVER_X87_H_
|
2666
src/x87/lithium-x87.cc
Normal file
2666
src/x87/lithium-x87.cc
Normal file
File diff suppressed because it is too large
Load Diff
2874
src/x87/lithium-x87.h
Normal file
2874
src/x87/lithium-x87.h
Normal file
File diff suppressed because it is too large
Load Diff
3353
src/x87/macro-assembler-x87.cc
Normal file
3353
src/x87/macro-assembler-x87.cc
Normal file
File diff suppressed because it is too large
Load Diff
1067
src/x87/macro-assembler-x87.h
Normal file
1067
src/x87/macro-assembler-x87.h
Normal file
File diff suppressed because it is too large
Load Diff
1308
src/x87/regexp-macro-assembler-x87.cc
Normal file
1308
src/x87/regexp-macro-assembler-x87.cc
Normal file
File diff suppressed because it is too large
Load Diff
200
src/x87/regexp-macro-assembler-x87.h
Normal file
200
src/x87/regexp-macro-assembler-x87.h
Normal file
@ -0,0 +1,200 @@
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_X87_REGEXP_MACRO_ASSEMBLER_X87_H_
|
||||
#define V8_X87_REGEXP_MACRO_ASSEMBLER_X87_H_
|
||||
|
||||
#include "x87/assembler-x87.h"
|
||||
#include "x87/assembler-x87-inl.h"
|
||||
#include "macro-assembler.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
#ifndef V8_INTERPRETED_REGEXP
|
||||
class RegExpMacroAssemblerX87: public NativeRegExpMacroAssembler {
|
||||
public:
|
||||
RegExpMacroAssemblerX87(Mode mode, int registers_to_save, Zone* zone);
|
||||
virtual ~RegExpMacroAssemblerX87();
|
||||
virtual int stack_limit_slack();
|
||||
virtual void AdvanceCurrentPosition(int by);
|
||||
virtual void AdvanceRegister(int reg, int by);
|
||||
virtual void Backtrack();
|
||||
virtual void Bind(Label* label);
|
||||
virtual void CheckAtStart(Label* on_at_start);
|
||||
virtual void CheckCharacter(uint32_t c, Label* on_equal);
|
||||
virtual void CheckCharacterAfterAnd(uint32_t c,
|
||||
uint32_t mask,
|
||||
Label* on_equal);
|
||||
virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
|
||||
virtual void CheckCharacterLT(uc16 limit, Label* on_less);
|
||||
// A "greedy loop" is a loop that is both greedy and with a simple
|
||||
// body. It has a particularly simple implementation.
|
||||
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
|
||||
virtual void CheckNotAtStart(Label* on_not_at_start);
|
||||
virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
|
||||
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
|
||||
Label* on_no_match);
|
||||
virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
|
||||
virtual void CheckNotCharacterAfterAnd(uint32_t c,
|
||||
uint32_t mask,
|
||||
Label* on_not_equal);
|
||||
virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
|
||||
uc16 minus,
|
||||
uc16 mask,
|
||||
Label* on_not_equal);
|
||||
virtual void CheckCharacterInRange(uc16 from,
|
||||
uc16 to,
|
||||
Label* on_in_range);
|
||||
virtual void CheckCharacterNotInRange(uc16 from,
|
||||
uc16 to,
|
||||
Label* on_not_in_range);
|
||||
virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
|
||||
|
||||
// Checks whether the given offset from the current position is before
|
||||
// the end of the string.
|
||||
virtual void CheckPosition(int cp_offset, Label* on_outside_input);
|
||||
virtual bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match);
|
||||
virtual void Fail();
|
||||
virtual Handle<HeapObject> GetCode(Handle<String> source);
|
||||
virtual void GoTo(Label* label);
|
||||
virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
|
||||
virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
|
||||
virtual void IfRegisterEqPos(int reg, Label* if_eq);
|
||||
virtual IrregexpImplementation Implementation();
|
||||
virtual void LoadCurrentCharacter(int cp_offset,
|
||||
Label* on_end_of_input,
|
||||
bool check_bounds = true,
|
||||
int characters = 1);
|
||||
virtual void PopCurrentPosition();
|
||||
virtual void PopRegister(int register_index);
|
||||
virtual void PushBacktrack(Label* label);
|
||||
virtual void PushCurrentPosition();
|
||||
virtual void PushRegister(int register_index,
|
||||
StackCheckFlag check_stack_limit);
|
||||
virtual void ReadCurrentPositionFromRegister(int reg);
|
||||
virtual void ReadStackPointerFromRegister(int reg);
|
||||
virtual void SetCurrentPositionFromEnd(int by);
|
||||
virtual void SetRegister(int register_index, int to);
|
||||
virtual bool Succeed();
|
||||
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
|
||||
virtual void ClearRegisters(int reg_from, int reg_to);
|
||||
virtual void WriteStackPointerToRegister(int reg);
|
||||
|
||||
// Called from RegExp if the stack-guard is triggered.
|
||||
// If the code object is relocated, the return address is fixed before
|
||||
// returning.
|
||||
static int CheckStackGuardState(Address* return_address,
|
||||
Code* re_code,
|
||||
Address re_frame);
|
||||
|
||||
private:
|
||||
// Offsets from ebp of function parameters and stored registers.
|
||||
static const int kFramePointer = 0;
|
||||
// Above the frame pointer - function parameters and return address.
|
||||
static const int kReturn_eip = kFramePointer + kPointerSize;
|
||||
static const int kFrameAlign = kReturn_eip + kPointerSize;
|
||||
// Parameters.
|
||||
static const int kInputString = kFrameAlign;
|
||||
static const int kStartIndex = kInputString + kPointerSize;
|
||||
static const int kInputStart = kStartIndex + kPointerSize;
|
||||
static const int kInputEnd = kInputStart + kPointerSize;
|
||||
static const int kRegisterOutput = kInputEnd + kPointerSize;
|
||||
// For the case of global regular expression, we have room to store at least
|
||||
// one set of capture results. For the case of non-global regexp, we ignore
|
||||
// this value.
|
||||
static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
|
||||
static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
|
||||
static const int kDirectCall = kStackHighEnd + kPointerSize;
|
||||
static const int kIsolate = kDirectCall + kPointerSize;
|
||||
// Below the frame pointer - local stack variables.
|
||||
// When adding local variables remember to push space for them in
|
||||
// the frame in GetCode.
|
||||
static const int kBackup_esi = kFramePointer - kPointerSize;
|
||||
static const int kBackup_edi = kBackup_esi - kPointerSize;
|
||||
static const int kBackup_ebx = kBackup_edi - kPointerSize;
|
||||
static const int kSuccessfulCaptures = kBackup_ebx - kPointerSize;
|
||||
static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
|
||||
// First register address. Following registers are below it on the stack.
|
||||
static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
|
||||
|
||||
// Initial size of code buffer.
|
||||
static const size_t kRegExpCodeSize = 1024;
|
||||
|
||||
// Load a number of characters at the given offset from the
|
||||
// current position, into the current-character register.
|
||||
void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
|
||||
|
||||
// Check whether preemption has been requested.
|
||||
void CheckPreemption();
|
||||
|
||||
// Check whether we are exceeding the stack limit on the backtrack stack.
|
||||
void CheckStackLimit();
|
||||
|
||||
// Generate a call to CheckStackGuardState.
|
||||
void CallCheckStackGuardState(Register scratch);
|
||||
|
||||
// The ebp-relative location of a regexp register.
|
||||
Operand register_location(int register_index);
|
||||
|
||||
// The register containing the current character after LoadCurrentCharacter.
|
||||
inline Register current_character() { return edx; }
|
||||
|
||||
// The register containing the backtrack stack top. Provides a meaningful
|
||||
// name to the register.
|
||||
inline Register backtrack_stackpointer() { return ecx; }
|
||||
|
||||
// Byte size of chars in the string to match (decided by the Mode argument)
|
||||
inline int char_size() { return static_cast<int>(mode_); }
|
||||
|
||||
// Equivalent to a conditional branch to the label, unless the label
|
||||
// is NULL, in which case it is a conditional Backtrack.
|
||||
void BranchOrBacktrack(Condition condition, Label* to);
|
||||
|
||||
// Call and return internally in the generated code in a way that
|
||||
// is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
|
||||
inline void SafeCall(Label* to);
|
||||
inline void SafeReturn();
|
||||
inline void SafeCallTarget(Label* name);
|
||||
|
||||
// Pushes the value of a register on the backtrack stack. Decrements the
|
||||
// stack pointer (ecx) by a word size and stores the register's value there.
|
||||
inline void Push(Register source);
|
||||
|
||||
// Pushes a value on the backtrack stack. Decrements the stack pointer (ecx)
|
||||
// by a word size and stores the value there.
|
||||
inline void Push(Immediate value);
|
||||
|
||||
// Pops a value from the backtrack stack. Reads the word at the stack pointer
|
||||
// (ecx) and increments it by a word size.
|
||||
inline void Pop(Register target);
|
||||
|
||||
Isolate* isolate() const { return masm_->isolate(); }
|
||||
|
||||
MacroAssembler* masm_;
|
||||
|
||||
// Which mode to generate code for (ASCII or UC16).
|
||||
Mode mode_;
|
||||
|
||||
// One greater than maximal register index actually used.
|
||||
int num_registers_;
|
||||
|
||||
// Number of registers to output at the end (the saved registers
|
||||
// are always 0..num_saved_registers_-1)
|
||||
int num_saved_registers_;
|
||||
|
||||
// Labels used internally.
|
||||
Label entry_label_;
|
||||
Label start_label_;
|
||||
Label success_label_;
|
||||
Label backtrack_label_;
|
||||
Label exit_label_;
|
||||
Label check_preempt_label_;
|
||||
Label stack_overflow_label_;
|
||||
};
|
||||
#endif // V8_INTERPRETED_REGEXP
|
||||
|
||||
}} // namespace v8::internal
|
||||
|
||||
#endif // V8_X87_REGEXP_MACRO_ASSEMBLER_X87_H_
|
6
src/x87/simulator-x87.cc
Normal file
6
src/x87/simulator-x87.cc
Normal file
@ -0,0 +1,6 @@
|
||||
// Copyright 2008 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
|
||||
// Since there is no simulator for the ia32 architecture this file is empty.
|
48
src/x87/simulator-x87.h
Normal file
48
src/x87/simulator-x87.h
Normal file
@ -0,0 +1,48 @@
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_X87_SIMULATOR_X87_H_
|
||||
#define V8_X87_SIMULATOR_X87_H_
|
||||
|
||||
#include "allocation.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// Since there is no simulator for the ia32 architecture the only thing we can
|
||||
// do is to call the entry directly.
|
||||
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
|
||||
(entry(p0, p1, p2, p3, p4))
|
||||
|
||||
|
||||
typedef int (*regexp_matcher)(String*, int, const byte*,
|
||||
const byte*, int*, int, Address, int, Isolate*);
|
||||
|
||||
// Call the generated regexp code directly. The code at the entry address should
|
||||
// expect eight int/pointer sized arguments and return an int.
|
||||
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
|
||||
(FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
|
||||
|
||||
|
||||
// The stack limit beyond which we will throw stack overflow errors in
|
||||
// generated code. Because generated code on ia32 uses the C stack, we
|
||||
// just use the C stack limit.
|
||||
class SimulatorStack : public v8::internal::AllStatic {
|
||||
public:
|
||||
static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
|
||||
uintptr_t c_limit) {
|
||||
USE(isolate);
|
||||
return c_limit;
|
||||
}
|
||||
|
||||
static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
|
||||
return try_catch_address;
|
||||
}
|
||||
|
||||
static inline void UnregisterCTryCatch() { }
|
||||
};
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_X87_SIMULATOR_X87_H_
|
1508
src/x87/stub-cache-x87.cc
Normal file
1508
src/x87/stub-cache-x87.cc
Normal file
File diff suppressed because it is too large
Load Diff
@ -175,6 +175,17 @@
|
||||
'test-macro-assembler-mips.cc'
|
||||
],
|
||||
}],
|
||||
['v8_target_arch=="x87"', {
|
||||
'sources': [ ### gcmole(arch:x87) ###
|
||||
'test-assembler-x87.cc',
|
||||
'test-code-stubs.cc',
|
||||
'test-code-stubs-x87.cc',
|
||||
'test-cpu-x87.cc',
|
||||
'test-disasm-x87.cc',
|
||||
'test-macro-assembler-x87.cc',
|
||||
'test-log-stack-tracer.cc'
|
||||
],
|
||||
}],
|
||||
[ 'OS=="linux" or OS=="qnx"', {
|
||||
'sources': [
|
||||
'test-platform-linux.cc',
|
||||
|
310
test/cctest/test-assembler-x87.cc
Normal file
310
test/cctest/test-assembler-x87.cc
Normal file
@ -0,0 +1,310 @@
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following
|
||||
// disclaimer in the documentation and/or other materials provided
|
||||
// with the distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived
|
||||
// from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "v8.h"
|
||||
|
||||
#include "disassembler.h"
|
||||
#include "factory.h"
|
||||
#include "macro-assembler.h"
|
||||
#include "platform.h"
|
||||
#include "serialize.h"
|
||||
#include "cctest.h"
|
||||
|
||||
using namespace v8::internal;
|
||||
|
||||
|
||||
typedef int (*F0)();
|
||||
typedef int (*F1)(int x);
|
||||
typedef int (*F2)(int x, int y);
|
||||
|
||||
|
||||
#define __ assm.
|
||||
|
||||
TEST(AssemblerIa320) {
|
||||
CcTest::InitializeVM();
|
||||
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
|
||||
HandleScope scope(isolate);
|
||||
|
||||
v8::internal::byte buffer[256];
|
||||
Assembler assm(isolate, buffer, sizeof buffer);
|
||||
|
||||
__ mov(eax, Operand(esp, 4));
|
||||
__ add(eax, Operand(esp, 8));
|
||||
__ ret(0);
|
||||
|
||||
CodeDesc desc;
|
||||
assm.GetCode(&desc);
|
||||
Handle<Code> code = isolate->factory()->NewCode(
|
||||
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
|
||||
#ifdef OBJECT_PRINT
|
||||
code->Print();
|
||||
#endif
|
||||
F2 f = FUNCTION_CAST<F2>(code->entry());
|
||||
int res = f(3, 4);
|
||||
::printf("f() = %d\n", res);
|
||||
CHECK_EQ(7, res);
|
||||
}
|
||||
|
||||
|
||||
TEST(AssemblerIa321) {
|
||||
CcTest::InitializeVM();
|
||||
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
|
||||
HandleScope scope(isolate);
|
||||
|
||||
v8::internal::byte buffer[256];
|
||||
Assembler assm(isolate, buffer, sizeof buffer);
|
||||
Label L, C;
|
||||
|
||||
__ mov(edx, Operand(esp, 4));
|
||||
__ xor_(eax, eax); // clear eax
|
||||
__ jmp(&C);
|
||||
|
||||
__ bind(&L);
|
||||
__ add(eax, edx);
|
||||
__ sub(edx, Immediate(1));
|
||||
|
||||
__ bind(&C);
|
||||
__ test(edx, edx);
|
||||
__ j(not_zero, &L);
|
||||
__ ret(0);
|
||||
|
||||
CodeDesc desc;
|
||||
assm.GetCode(&desc);
|
||||
Handle<Code> code = isolate->factory()->NewCode(
|
||||
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
|
||||
#ifdef OBJECT_PRINT
|
||||
code->Print();
|
||||
#endif
|
||||
F1 f = FUNCTION_CAST<F1>(code->entry());
|
||||
int res = f(100);
|
||||
::printf("f() = %d\n", res);
|
||||
CHECK_EQ(5050, res);
|
||||
}
|
||||
|
||||
|
||||
TEST(AssemblerIa322) {
|
||||
CcTest::InitializeVM();
|
||||
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
|
||||
HandleScope scope(isolate);
|
||||
|
||||
v8::internal::byte buffer[256];
|
||||
Assembler assm(isolate, buffer, sizeof buffer);
|
||||
Label L, C;
|
||||
|
||||
__ mov(edx, Operand(esp, 4));
|
||||
__ mov(eax, 1);
|
||||
__ jmp(&C);
|
||||
|
||||
__ bind(&L);
|
||||
__ imul(eax, edx);
|
||||
__ sub(edx, Immediate(1));
|
||||
|
||||
__ bind(&C);
|
||||
__ test(edx, edx);
|
||||
__ j(not_zero, &L);
|
||||
__ ret(0);
|
||||
|
||||
// some relocated stuff here, not executed
|
||||
__ mov(eax, isolate->factory()->true_value());
|
||||
__ jmp(NULL, RelocInfo::RUNTIME_ENTRY);
|
||||
|
||||
CodeDesc desc;
|
||||
assm.GetCode(&desc);
|
||||
Handle<Code> code = isolate->factory()->NewCode(
|
||||
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
|
||||
#ifdef OBJECT_PRINT
|
||||
code->Print();
|
||||
#endif
|
||||
F1 f = FUNCTION_CAST<F1>(code->entry());
|
||||
int res = f(10);
|
||||
::printf("f() = %d\n", res);
|
||||
CHECK_EQ(3628800, res);
|
||||
}
|
||||
|
||||
|
||||
typedef int (*F3)(float x);
|
||||
|
||||
typedef int (*F4)(double x);
|
||||
|
||||
static int baz = 42;
|
||||
TEST(AssemblerIa325) {
|
||||
CcTest::InitializeVM();
|
||||
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
|
||||
HandleScope scope(isolate);
|
||||
|
||||
v8::internal::byte buffer[256];
|
||||
Assembler assm(isolate, buffer, sizeof buffer);
|
||||
|
||||
__ mov(eax, Operand(reinterpret_cast<intptr_t>(&baz), RelocInfo::NONE32));
|
||||
__ ret(0);
|
||||
|
||||
CodeDesc desc;
|
||||
assm.GetCode(&desc);
|
||||
Handle<Code> code = isolate->factory()->NewCode(
|
||||
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
|
||||
F0 f = FUNCTION_CAST<F0>(code->entry());
|
||||
int res = f();
|
||||
CHECK_EQ(42, res);
|
||||
}
|
||||
|
||||
|
||||
typedef int (*F7)(double x, double y);
|
||||
|
||||
TEST(AssemblerIa329) {
|
||||
CcTest::InitializeVM();
|
||||
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
|
||||
HandleScope scope(isolate);
|
||||
v8::internal::byte buffer[256];
|
||||
MacroAssembler assm(isolate, buffer, sizeof buffer);
|
||||
enum { kEqual = 0, kGreater = 1, kLess = 2, kNaN = 3, kUndefined = 4 };
|
||||
Label equal_l, less_l, greater_l, nan_l;
|
||||
__ fld_d(Operand(esp, 3 * kPointerSize));
|
||||
__ fld_d(Operand(esp, 1 * kPointerSize));
|
||||
__ FCmp();
|
||||
__ j(parity_even, &nan_l);
|
||||
__ j(equal, &equal_l);
|
||||
__ j(below, &less_l);
|
||||
__ j(above, &greater_l);
|
||||
|
||||
__ mov(eax, kUndefined);
|
||||
__ ret(0);
|
||||
|
||||
__ bind(&equal_l);
|
||||
__ mov(eax, kEqual);
|
||||
__ ret(0);
|
||||
|
||||
__ bind(&greater_l);
|
||||
__ mov(eax, kGreater);
|
||||
__ ret(0);
|
||||
|
||||
__ bind(&less_l);
|
||||
__ mov(eax, kLess);
|
||||
__ ret(0);
|
||||
|
||||
__ bind(&nan_l);
|
||||
__ mov(eax, kNaN);
|
||||
__ ret(0);
|
||||
|
||||
|
||||
CodeDesc desc;
|
||||
assm.GetCode(&desc);
|
||||
Handle<Code> code = isolate->factory()->NewCode(
|
||||
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
|
||||
#ifdef OBJECT_PRINT
|
||||
code->Print();
|
||||
#endif
|
||||
|
||||
F7 f = FUNCTION_CAST<F7>(code->entry());
|
||||
CHECK_EQ(kLess, f(1.1, 2.2));
|
||||
CHECK_EQ(kEqual, f(2.2, 2.2));
|
||||
CHECK_EQ(kGreater, f(3.3, 2.2));
|
||||
CHECK_EQ(kNaN, f(OS::nan_value(), 1.1));
|
||||
}
|
||||
|
||||
|
||||
TEST(AssemblerIa3210) {
|
||||
// Test chaining of label usages within instructions (issue 1644).
|
||||
CcTest::InitializeVM();
|
||||
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
|
||||
HandleScope scope(isolate);
|
||||
Assembler assm(isolate, NULL, 0);
|
||||
|
||||
Label target;
|
||||
__ j(equal, &target);
|
||||
__ j(not_equal, &target);
|
||||
__ bind(&target);
|
||||
__ nop();
|
||||
}
|
||||
|
||||
|
||||
TEST(AssemblerMultiByteNop) {
|
||||
CcTest::InitializeVM();
|
||||
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
|
||||
HandleScope scope(isolate);
|
||||
v8::internal::byte buffer[1024];
|
||||
Assembler assm(isolate, buffer, sizeof(buffer));
|
||||
__ push(ebx);
|
||||
__ push(ecx);
|
||||
__ push(edx);
|
||||
__ push(edi);
|
||||
__ push(esi);
|
||||
__ mov(eax, 1);
|
||||
__ mov(ebx, 2);
|
||||
__ mov(ecx, 3);
|
||||
__ mov(edx, 4);
|
||||
__ mov(edi, 5);
|
||||
__ mov(esi, 6);
|
||||
for (int i = 0; i < 16; i++) {
|
||||
int before = assm.pc_offset();
|
||||
__ Nop(i);
|
||||
CHECK_EQ(assm.pc_offset() - before, i);
|
||||
}
|
||||
|
||||
Label fail;
|
||||
__ cmp(eax, 1);
|
||||
__ j(not_equal, &fail);
|
||||
__ cmp(ebx, 2);
|
||||
__ j(not_equal, &fail);
|
||||
__ cmp(ecx, 3);
|
||||
__ j(not_equal, &fail);
|
||||
__ cmp(edx, 4);
|
||||
__ j(not_equal, &fail);
|
||||
__ cmp(edi, 5);
|
||||
__ j(not_equal, &fail);
|
||||
__ cmp(esi, 6);
|
||||
__ j(not_equal, &fail);
|
||||
__ mov(eax, 42);
|
||||
__ pop(esi);
|
||||
__ pop(edi);
|
||||
__ pop(edx);
|
||||
__ pop(ecx);
|
||||
__ pop(ebx);
|
||||
__ ret(0);
|
||||
__ bind(&fail);
|
||||
__ mov(eax, 13);
|
||||
__ pop(esi);
|
||||
__ pop(edi);
|
||||
__ pop(edx);
|
||||
__ pop(ecx);
|
||||
__ pop(ebx);
|
||||
__ ret(0);
|
||||
|
||||
CodeDesc desc;
|
||||
assm.GetCode(&desc);
|
||||
Handle<Code> code = isolate->factory()->NewCode(
|
||||
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
|
||||
CHECK(code->IsCode());
|
||||
|
||||
F0 f = FUNCTION_CAST<F0>(code->entry());
|
||||
int res = f();
|
||||
CHECK_EQ(42, res);
|
||||
}
|
||||
|
||||
|
||||
#undef __
|
149
test/cctest/test-code-stubs-x87.cc
Normal file
149
test/cctest/test-code-stubs-x87.cc
Normal file
@ -0,0 +1,149 @@
|
||||
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following
|
||||
// disclaimer in the documentation and/or other materials provided
|
||||
// with the distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived
|
||||
// from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <limits>
|
||||
|
||||
#include "v8.h"
|
||||
|
||||
#include "cctest.h"
|
||||
#include "code-stubs.h"
|
||||
#include "test-code-stubs.h"
|
||||
#include "factory.h"
|
||||
#include "macro-assembler.h"
|
||||
#include "platform.h"
|
||||
|
||||
using namespace v8::internal;
|
||||
|
||||
#define __ assm.
|
||||
|
||||
ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
|
||||
Register source_reg,
|
||||
Register destination_reg) {
|
||||
// Allocate an executable page of memory.
|
||||
size_t actual_size;
|
||||
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
|
||||
&actual_size,
|
||||
true));
|
||||
CHECK(buffer);
|
||||
HandleScope handles(isolate);
|
||||
MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size));
|
||||
int offset =
|
||||
source_reg.is(esp) ? 0 : (HeapNumber::kValueOffset - kSmiTagSize);
|
||||
DoubleToIStub stub(isolate, source_reg, destination_reg, offset, true);
|
||||
byte* start = stub.GetCode()->instruction_start();
|
||||
|
||||
__ push(ebx);
|
||||
__ push(ecx);
|
||||
__ push(edx);
|
||||
__ push(esi);
|
||||
__ push(edi);
|
||||
|
||||
if (!source_reg.is(esp)) {
|
||||
__ lea(source_reg, MemOperand(esp, 6 * kPointerSize - offset));
|
||||
}
|
||||
|
||||
int param_offset = 7 * kPointerSize;
|
||||
// Save registers make sure they don't get clobbered.
|
||||
int reg_num = 0;
|
||||
for (;reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
|
||||
Register reg = Register::FromAllocationIndex(reg_num);
|
||||
if (!reg.is(esp) && !reg.is(ebp) && !reg.is(destination_reg)) {
|
||||
__ push(reg);
|
||||
param_offset += kPointerSize;
|
||||
}
|
||||
}
|
||||
|
||||
// Re-push the double argument
|
||||
__ push(MemOperand(esp, param_offset));
|
||||
__ push(MemOperand(esp, param_offset));
|
||||
|
||||
// Call through to the actual stub
|
||||
__ call(start, RelocInfo::EXTERNAL_REFERENCE);
|
||||
|
||||
__ add(esp, Immediate(kDoubleSize));
|
||||
|
||||
// Make sure no registers have been unexpectedly clobbered
|
||||
for (--reg_num; reg_num >= 0; --reg_num) {
|
||||
Register reg = Register::FromAllocationIndex(reg_num);
|
||||
if (!reg.is(esp) && !reg.is(ebp) && !reg.is(destination_reg)) {
|
||||
__ cmp(reg, MemOperand(esp, 0));
|
||||
__ Assert(equal, kRegisterWasClobbered);
|
||||
__ add(esp, Immediate(kPointerSize));
|
||||
}
|
||||
}
|
||||
|
||||
__ mov(eax, destination_reg);
|
||||
|
||||
__ pop(edi);
|
||||
__ pop(esi);
|
||||
__ pop(edx);
|
||||
__ pop(ecx);
|
||||
__ pop(ebx);
|
||||
|
||||
__ ret(kDoubleSize);
|
||||
|
||||
CodeDesc desc;
|
||||
assm.GetCode(&desc);
|
||||
return reinterpret_cast<ConvertDToIFunc>(
|
||||
reinterpret_cast<intptr_t>(buffer));
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
|
||||
static Isolate* GetIsolateFrom(LocalContext* context) {
|
||||
return reinterpret_cast<Isolate*>((*context)->GetIsolate());
|
||||
}
|
||||
|
||||
|
||||
TEST(ConvertDToI) {
|
||||
CcTest::InitializeVM();
|
||||
LocalContext context;
|
||||
Isolate* isolate = GetIsolateFrom(&context);
|
||||
HandleScope scope(isolate);
|
||||
|
||||
#if DEBUG
|
||||
// Verify that the tests actually work with the C version. In the release
|
||||
// code, the compiler optimizes it away because it's all constant, but does it
|
||||
// wrong, triggering an assert on gcc.
|
||||
RunAllTruncationTests(&ConvertDToICVersion);
|
||||
#endif
|
||||
|
||||
Register source_registers[] = {esp, eax, ebx, ecx, edx, edi, esi};
|
||||
Register dest_registers[] = {eax, ebx, ecx, edx, edi, esi};
|
||||
|
||||
for (size_t s = 0; s < sizeof(source_registers) / sizeof(Register); s++) {
|
||||
for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) {
|
||||
RunAllTruncationTests(
|
||||
MakeConvertDToIFuncTrampoline(isolate,
|
||||
source_registers[s],
|
||||
dest_registers[d]));
|
||||
}
|
||||
}
|
||||
}
|
@ -28,7 +28,7 @@
|
||||
#ifndef V8_TEST_CODE_STUBS_H_
|
||||
#define V8_TEST_CODE_STUBS_H_
|
||||
|
||||
#if V8_TARGET_ARCH_IA32
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
|
||||
#if __GNUC__
|
||||
#define STDCALL __attribute__((stdcall))
|
||||
#else
|
||||
|
40
test/cctest/test-cpu-x87.cc
Normal file
40
test/cctest/test-cpu-x87.cc
Normal file
@ -0,0 +1,40 @@
|
||||
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following
|
||||
// disclaimer in the documentation and/or other materials provided
|
||||
// with the distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived
|
||||
// from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "v8.h"
|
||||
|
||||
#include "cctest.h"
|
||||
#include "cpu.h"
|
||||
|
||||
using namespace v8::internal;
|
||||
|
||||
|
||||
TEST(RequiredFeaturesX64) {
|
||||
// Test for the features required by every x86 CPU in compat/legacy mode.
|
||||
CPU cpu;
|
||||
CHECK(cpu.has_sahf());
|
||||
}
|
385
test/cctest/test-disasm-x87.cc
Normal file
385
test/cctest/test-disasm-x87.cc
Normal file
@ -0,0 +1,385 @@
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following
|
||||
// disclaimer in the documentation and/or other materials provided
|
||||
// with the distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived
|
||||
// from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "v8.h"
|
||||
|
||||
#include "debug.h"
|
||||
#include "disasm.h"
|
||||
#include "disassembler.h"
|
||||
#include "macro-assembler.h"
|
||||
#include "serialize.h"
|
||||
#include "stub-cache.h"
|
||||
#include "cctest.h"
|
||||
|
||||
using namespace v8::internal;
|
||||
|
||||
|
||||
#define __ assm.
|
||||
|
||||
|
||||
static void DummyStaticFunction(Object* result) {
|
||||
}
|
||||
|
||||
|
||||
TEST(DisasmIa320) {
|
||||
CcTest::InitializeVM();
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
HandleScope scope(isolate);
|
||||
v8::internal::byte buffer[2048];
|
||||
Assembler assm(isolate, buffer, sizeof buffer);
|
||||
DummyStaticFunction(NULL); // just bloody use it (DELETE; debugging)
|
||||
|
||||
// Short immediate instructions
|
||||
__ adc(eax, 12345678);
|
||||
__ add(eax, Immediate(12345678));
|
||||
__ or_(eax, 12345678);
|
||||
__ sub(eax, Immediate(12345678));
|
||||
__ xor_(eax, 12345678);
|
||||
__ and_(eax, 12345678);
|
||||
Handle<FixedArray> foo = isolate->factory()->NewFixedArray(10, TENURED);
|
||||
__ cmp(eax, foo);
|
||||
|
||||
// ---- This one caused crash
|
||||
__ mov(ebx, Operand(esp, ecx, times_2, 0)); // [esp+ecx*4]
|
||||
|
||||
// ---- All instructions that I can think of
|
||||
__ add(edx, ebx);
|
||||
__ add(edx, Operand(12, RelocInfo::NONE32));
|
||||
__ add(edx, Operand(ebx, 0));
|
||||
__ add(edx, Operand(ebx, 16));
|
||||
__ add(edx, Operand(ebx, 1999));
|
||||
__ add(edx, Operand(ebx, -4));
|
||||
__ add(edx, Operand(ebx, -1999));
|
||||
__ add(edx, Operand(esp, 0));
|
||||
__ add(edx, Operand(esp, 16));
|
||||
__ add(edx, Operand(esp, 1999));
|
||||
__ add(edx, Operand(esp, -4));
|
||||
__ add(edx, Operand(esp, -1999));
|
||||
__ nop();
|
||||
__ add(esi, Operand(ecx, times_4, 0));
|
||||
__ add(esi, Operand(ecx, times_4, 24));
|
||||
__ add(esi, Operand(ecx, times_4, -4));
|
||||
__ add(esi, Operand(ecx, times_4, -1999));
|
||||
__ nop();
|
||||
__ add(edi, Operand(ebp, ecx, times_4, 0));
|
||||
__ add(edi, Operand(ebp, ecx, times_4, 12));
|
||||
__ add(edi, Operand(ebp, ecx, times_4, -8));
|
||||
__ add(edi, Operand(ebp, ecx, times_4, -3999));
|
||||
__ add(Operand(ebp, ecx, times_4, 12), Immediate(12));
|
||||
|
||||
__ nop();
|
||||
__ add(ebx, Immediate(12));
|
||||
__ nop();
|
||||
__ adc(ecx, 12);
|
||||
__ adc(ecx, 1000);
|
||||
__ nop();
|
||||
__ and_(edx, 3);
|
||||
__ and_(edx, Operand(esp, 4));
|
||||
__ cmp(edx, 3);
|
||||
__ cmp(edx, Operand(esp, 4));
|
||||
__ cmp(Operand(ebp, ecx, times_4, 0), Immediate(1000));
|
||||
Handle<FixedArray> foo2 = isolate->factory()->NewFixedArray(10, TENURED);
|
||||
__ cmp(ebx, foo2);
|
||||
__ cmpb(ebx, Operand(ebp, ecx, times_2, 0));
|
||||
__ cmpb(Operand(ebp, ecx, times_2, 0), ebx);
|
||||
__ or_(edx, 3);
|
||||
__ xor_(edx, 3);
|
||||
__ nop();
|
||||
__ cpuid();
|
||||
__ movsx_b(edx, ecx);
|
||||
__ movsx_w(edx, ecx);
|
||||
__ movzx_b(edx, ecx);
|
||||
__ movzx_w(edx, ecx);
|
||||
|
||||
__ nop();
|
||||
__ imul(edx, ecx);
|
||||
__ shld(edx, ecx);
|
||||
__ shrd(edx, ecx);
|
||||
__ bts(edx, ecx);
|
||||
__ bts(Operand(ebx, ecx, times_4, 0), ecx);
|
||||
__ nop();
|
||||
__ pushad();
|
||||
__ popad();
|
||||
__ pushfd();
|
||||
__ popfd();
|
||||
__ push(Immediate(12));
|
||||
__ push(Immediate(23456));
|
||||
__ push(ecx);
|
||||
__ push(esi);
|
||||
__ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
|
||||
__ push(Operand(ebx, ecx, times_4, 0));
|
||||
__ push(Operand(ebx, ecx, times_4, 0));
|
||||
__ push(Operand(ebx, ecx, times_4, 10000));
|
||||
__ pop(edx);
|
||||
__ pop(eax);
|
||||
__ pop(Operand(ebx, ecx, times_4, 0));
|
||||
__ nop();
|
||||
|
||||
__ add(edx, Operand(esp, 16));
|
||||
__ add(edx, ecx);
|
||||
__ mov_b(edx, ecx);
|
||||
__ mov_b(ecx, 6);
|
||||
__ mov_b(Operand(ebx, ecx, times_4, 10000), 6);
|
||||
__ mov_b(Operand(esp, 16), edx);
|
||||
__ mov_w(edx, Operand(esp, 16));
|
||||
__ mov_w(Operand(esp, 16), edx);
|
||||
__ nop();
|
||||
__ movsx_w(edx, Operand(esp, 12));
|
||||
__ movsx_b(edx, Operand(esp, 12));
|
||||
__ movzx_w(edx, Operand(esp, 12));
|
||||
__ movzx_b(edx, Operand(esp, 12));
|
||||
__ nop();
|
||||
__ mov(edx, 1234567);
|
||||
__ mov(edx, Operand(esp, 12));
|
||||
__ mov(Operand(ebx, ecx, times_4, 10000), Immediate(12345));
|
||||
__ mov(Operand(ebx, ecx, times_4, 10000), edx);
|
||||
__ nop();
|
||||
__ dec_b(edx);
|
||||
__ dec_b(Operand(eax, 10));
|
||||
__ dec_b(Operand(ebx, ecx, times_4, 10000));
|
||||
__ dec(edx);
|
||||
__ cdq();
|
||||
|
||||
__ nop();
|
||||
__ idiv(edx);
|
||||
__ mul(edx);
|
||||
__ neg(edx);
|
||||
__ not_(edx);
|
||||
__ test(Operand(ebx, ecx, times_4, 10000), Immediate(123456));
|
||||
|
||||
__ imul(edx, Operand(ebx, ecx, times_4, 10000));
|
||||
__ imul(edx, ecx, 12);
|
||||
__ imul(edx, ecx, 1000);
|
||||
|
||||
__ inc(edx);
|
||||
__ inc(Operand(ebx, ecx, times_4, 10000));
|
||||
__ push(Operand(ebx, ecx, times_4, 10000));
|
||||
__ pop(Operand(ebx, ecx, times_4, 10000));
|
||||
__ call(Operand(ebx, ecx, times_4, 10000));
|
||||
__ jmp(Operand(ebx, ecx, times_4, 10000));
|
||||
|
||||
__ lea(edx, Operand(ebx, ecx, times_4, 10000));
|
||||
__ or_(edx, 12345);
|
||||
__ or_(edx, Operand(ebx, ecx, times_4, 10000));
|
||||
|
||||
__ nop();
|
||||
|
||||
__ rcl(edx, 1);
|
||||
__ rcl(edx, 7);
|
||||
__ rcr(edx, 1);
|
||||
__ rcr(edx, 7);
|
||||
__ sar(edx, 1);
|
||||
__ sar(edx, 6);
|
||||
__ sar_cl(edx);
|
||||
__ sbb(edx, Operand(ebx, ecx, times_4, 10000));
|
||||
__ shld(edx, Operand(ebx, ecx, times_4, 10000));
|
||||
__ shl(edx, 1);
|
||||
__ shl(edx, 6);
|
||||
__ shl_cl(edx);
|
||||
__ shrd(edx, Operand(ebx, ecx, times_4, 10000));
|
||||
__ shr(edx, 1);
|
||||
__ shr(edx, 7);
|
||||
__ shr_cl(edx);
|
||||
|
||||
|
||||
// Immediates
|
||||
|
||||
__ adc(edx, 12345);
|
||||
|
||||
__ add(ebx, Immediate(12));
|
||||
__ add(Operand(edx, ecx, times_4, 10000), Immediate(12));
|
||||
|
||||
__ and_(ebx, 12345);
|
||||
|
||||
__ cmp(ebx, 12345);
|
||||
__ cmp(ebx, Immediate(12));
|
||||
__ cmp(Operand(edx, ecx, times_4, 10000), Immediate(12));
|
||||
__ cmpb(eax, 100);
|
||||
|
||||
__ or_(ebx, 12345);
|
||||
|
||||
__ sub(ebx, Immediate(12));
|
||||
__ sub(Operand(edx, ecx, times_4, 10000), Immediate(12));
|
||||
|
||||
__ xor_(ebx, 12345);
|
||||
|
||||
__ imul(edx, ecx, 12);
|
||||
__ imul(edx, ecx, 1000);
|
||||
|
||||
__ cld();
|
||||
__ rep_movs();
|
||||
__ rep_stos();
|
||||
__ stos();
|
||||
|
||||
__ sub(edx, Operand(ebx, ecx, times_4, 10000));
|
||||
__ sub(edx, ebx);
|
||||
|
||||
__ test(edx, Immediate(12345));
|
||||
__ test(edx, Operand(ebx, ecx, times_8, 10000));
|
||||
__ test(Operand(esi, edi, times_1, -20000000), Immediate(300000000));
|
||||
__ test_b(edx, Operand(ecx, ebx, times_2, 1000));
|
||||
__ test_b(Operand(eax, -20), 0x9A);
|
||||
__ nop();
|
||||
|
||||
__ xor_(edx, 12345);
|
||||
__ xor_(edx, Operand(ebx, ecx, times_8, 10000));
|
||||
__ bts(Operand(ebx, ecx, times_8, 10000), edx);
|
||||
__ hlt();
|
||||
__ int3();
|
||||
__ ret(0);
|
||||
__ ret(8);
|
||||
|
||||
// Calls
|
||||
|
||||
Label L1, L2;
|
||||
__ bind(&L1);
|
||||
__ nop();
|
||||
__ call(&L1);
|
||||
__ call(&L2);
|
||||
__ nop();
|
||||
__ bind(&L2);
|
||||
__ call(Operand(ebx, ecx, times_4, 10000));
|
||||
__ nop();
|
||||
Handle<Code> ic(LoadIC::initialize_stub(isolate, NOT_CONTEXTUAL));
|
||||
__ call(ic, RelocInfo::CODE_TARGET);
|
||||
__ nop();
|
||||
__ call(FUNCTION_ADDR(DummyStaticFunction), RelocInfo::RUNTIME_ENTRY);
|
||||
__ nop();
|
||||
|
||||
__ jmp(&L1);
|
||||
__ jmp(Operand(ebx, ecx, times_4, 10000));
|
||||
ExternalReference after_break_target =
|
||||
ExternalReference::debug_after_break_target_address(isolate);
|
||||
__ jmp(Operand::StaticVariable(after_break_target));
|
||||
__ jmp(ic, RelocInfo::CODE_TARGET);
|
||||
__ nop();
|
||||
|
||||
|
||||
Label Ljcc;
|
||||
__ nop();
|
||||
// long jumps
|
||||
__ j(overflow, &Ljcc);
|
||||
__ j(no_overflow, &Ljcc);
|
||||
__ j(below, &Ljcc);
|
||||
__ j(above_equal, &Ljcc);
|
||||
__ j(equal, &Ljcc);
|
||||
__ j(not_equal, &Ljcc);
|
||||
__ j(below_equal, &Ljcc);
|
||||
__ j(above, &Ljcc);
|
||||
__ j(sign, &Ljcc);
|
||||
__ j(not_sign, &Ljcc);
|
||||
__ j(parity_even, &Ljcc);
|
||||
__ j(parity_odd, &Ljcc);
|
||||
__ j(less, &Ljcc);
|
||||
__ j(greater_equal, &Ljcc);
|
||||
__ j(less_equal, &Ljcc);
|
||||
__ j(greater, &Ljcc);
|
||||
__ nop();
|
||||
__ bind(&Ljcc);
|
||||
// short jumps
|
||||
__ j(overflow, &Ljcc);
|
||||
__ j(no_overflow, &Ljcc);
|
||||
__ j(below, &Ljcc);
|
||||
__ j(above_equal, &Ljcc);
|
||||
__ j(equal, &Ljcc);
|
||||
__ j(not_equal, &Ljcc);
|
||||
__ j(below_equal, &Ljcc);
|
||||
__ j(above, &Ljcc);
|
||||
__ j(sign, &Ljcc);
|
||||
__ j(not_sign, &Ljcc);
|
||||
__ j(parity_even, &Ljcc);
|
||||
__ j(parity_odd, &Ljcc);
|
||||
__ j(less, &Ljcc);
|
||||
__ j(greater_equal, &Ljcc);
|
||||
__ j(less_equal, &Ljcc);
|
||||
__ j(greater, &Ljcc);
|
||||
|
||||
// 0xD9 instructions
|
||||
__ nop();
|
||||
|
||||
__ fld(1);
|
||||
__ fld1();
|
||||
__ fldz();
|
||||
__ fldpi();
|
||||
__ fabs();
|
||||
__ fchs();
|
||||
__ fprem();
|
||||
__ fprem1();
|
||||
__ fincstp();
|
||||
__ ftst();
|
||||
__ fxch(3);
|
||||
__ fld_s(Operand(ebx, ecx, times_4, 10000));
|
||||
__ fstp_s(Operand(ebx, ecx, times_4, 10000));
|
||||
__ ffree(3);
|
||||
__ fld_d(Operand(ebx, ecx, times_4, 10000));
|
||||
__ fstp_d(Operand(ebx, ecx, times_4, 10000));
|
||||
__ nop();
|
||||
|
||||
__ fild_s(Operand(ebx, ecx, times_4, 10000));
|
||||
__ fistp_s(Operand(ebx, ecx, times_4, 10000));
|
||||
__ fild_d(Operand(ebx, ecx, times_4, 10000));
|
||||
__ fistp_d(Operand(ebx, ecx, times_4, 10000));
|
||||
__ fnstsw_ax();
|
||||
__ nop();
|
||||
__ fadd(3);
|
||||
__ fsub(3);
|
||||
__ fmul(3);
|
||||
__ fdiv(3);
|
||||
|
||||
__ faddp(3);
|
||||
__ fsubp(3);
|
||||
__ fmulp(3);
|
||||
__ fdivp(3);
|
||||
__ fcompp();
|
||||
__ fwait();
|
||||
__ frndint();
|
||||
__ fninit();
|
||||
__ nop();
|
||||
|
||||
// Nop instructions
|
||||
for (int i = 0; i < 16; i++) {
|
||||
__ Nop(i);
|
||||
}
|
||||
|
||||
__ ret(0);
|
||||
|
||||
CodeDesc desc;
|
||||
assm.GetCode(&desc);
|
||||
Handle<Code> code = isolate->factory()->NewCode(
|
||||
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
|
||||
USE(code);
|
||||
#ifdef OBJECT_PRINT
|
||||
code->Print();
|
||||
byte* begin = code->instruction_start();
|
||||
byte* end = begin + code->instruction_size();
|
||||
disasm::Disassembler::Disassemble(stdout, begin, end);
|
||||
#endif
|
||||
}
|
||||
|
||||
#undef __
|
@ -51,7 +51,7 @@ void generate(MacroAssembler* masm, i::Vector<const uint8_t> string) {
|
||||
// GenerateHashInit takes the first character as an argument so it can't
|
||||
// handle the zero length string.
|
||||
ASSERT(string.length() > 0);
|
||||
#if V8_TARGET_ARCH_IA32
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
|
||||
__ push(ebx);
|
||||
__ push(ecx);
|
||||
__ mov(eax, Immediate(0));
|
||||
@ -136,7 +136,7 @@ void generate(MacroAssembler* masm, i::Vector<const uint8_t> string) {
|
||||
|
||||
|
||||
void generate(MacroAssembler* masm, uint32_t key) {
|
||||
#if V8_TARGET_ARCH_IA32
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
|
||||
__ push(ebx);
|
||||
__ mov(eax, Immediate(key));
|
||||
__ GetNumberHash(eax, ebx);
|
||||
|
151
test/cctest/test-macro-assembler-x87.cc
Normal file
151
test/cctest/test-macro-assembler-x87.cc
Normal file
@ -0,0 +1,151 @@
|
||||
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following
|
||||
// disclaimer in the documentation and/or other materials provided
|
||||
// with the distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived
|
||||
// from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "v8.h"
|
||||
|
||||
#include "macro-assembler.h"
|
||||
#include "factory.h"
|
||||
#include "platform.h"
|
||||
#include "serialize.h"
|
||||
#include "cctest.h"
|
||||
|
||||
using namespace v8::internal;
|
||||
|
||||
#if __GNUC__
|
||||
#define STDCALL __attribute__((stdcall))
|
||||
#else
|
||||
#define STDCALL __stdcall
|
||||
#endif
|
||||
|
||||
typedef int STDCALL F0Type();
|
||||
typedef F0Type* F0;
|
||||
|
||||
#define __ masm->
|
||||
|
||||
|
||||
TEST(LoadAndStoreWithRepresentation) {
|
||||
v8::internal::V8::Initialize(NULL);
|
||||
|
||||
// Allocate an executable page of memory.
|
||||
size_t actual_size;
|
||||
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
|
||||
&actual_size,
|
||||
true));
|
||||
CHECK(buffer);
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
HandleScope handles(isolate);
|
||||
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
|
||||
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
|
||||
__ push(ebx);
|
||||
__ push(edx);
|
||||
__ sub(esp, Immediate(1 * kPointerSize));
|
||||
Label exit;
|
||||
|
||||
// Test 1.
|
||||
__ mov(eax, Immediate(1)); // Test number.
|
||||
__ mov(Operand(esp, 0 * kPointerSize), Immediate(0));
|
||||
__ mov(ebx, Immediate(-1));
|
||||
__ Store(ebx, Operand(esp, 0 * kPointerSize), Representation::UInteger8());
|
||||
__ mov(ebx, Operand(esp, 0 * kPointerSize));
|
||||
__ mov(edx, Immediate(255));
|
||||
__ cmp(ebx, edx);
|
||||
__ j(not_equal, &exit);
|
||||
__ Load(ebx, Operand(esp, 0 * kPointerSize), Representation::UInteger8());
|
||||
__ cmp(ebx, edx);
|
||||
__ j(not_equal, &exit);
|
||||
|
||||
|
||||
// Test 2.
|
||||
__ mov(eax, Immediate(2)); // Test number.
|
||||
__ mov(Operand(esp, 0 * kPointerSize), Immediate(0));
|
||||
__ mov(ebx, Immediate(-1));
|
||||
__ Store(ebx, Operand(esp, 0 * kPointerSize), Representation::Integer8());
|
||||
__ mov(ebx, Operand(esp, 0 * kPointerSize));
|
||||
__ mov(edx, Immediate(255));
|
||||
__ cmp(ebx, edx);
|
||||
__ j(not_equal, &exit);
|
||||
__ Load(ebx, Operand(esp, 0 * kPointerSize), Representation::Integer8());
|
||||
__ mov(edx, Immediate(-1));
|
||||
__ cmp(ebx, edx);
|
||||
__ j(not_equal, &exit);
|
||||
|
||||
// Test 3.
|
||||
__ mov(eax, Immediate(3)); // Test number.
|
||||
__ mov(Operand(esp, 0 * kPointerSize), Immediate(0));
|
||||
__ mov(ebx, Immediate(-1));
|
||||
__ Store(ebx, Operand(esp, 0 * kPointerSize), Representation::Integer16());
|
||||
__ mov(ebx, Operand(esp, 0 * kPointerSize));
|
||||
__ mov(edx, Immediate(65535));
|
||||
__ cmp(ebx, edx);
|
||||
__ j(not_equal, &exit);
|
||||
__ Load(edx, Operand(esp, 0 * kPointerSize), Representation::Integer16());
|
||||
__ mov(ebx, Immediate(-1));
|
||||
__ cmp(ebx, edx);
|
||||
__ j(not_equal, &exit);
|
||||
|
||||
// Test 4.
|
||||
__ mov(eax, Immediate(4)); // Test number.
|
||||
__ mov(Operand(esp, 0 * kPointerSize), Immediate(0));
|
||||
__ mov(ebx, Immediate(-1));
|
||||
__ Store(ebx, Operand(esp, 0 * kPointerSize), Representation::UInteger16());
|
||||
__ mov(ebx, Operand(esp, 0 * kPointerSize));
|
||||
__ mov(edx, Immediate(65535));
|
||||
__ cmp(ebx, edx);
|
||||
__ j(not_equal, &exit);
|
||||
__ Load(edx, Operand(esp, 0 * kPointerSize), Representation::UInteger16());
|
||||
__ cmp(ebx, edx);
|
||||
__ j(not_equal, &exit);
|
||||
|
||||
// Test 5.
|
||||
__ mov(eax, Immediate(5));
|
||||
__ Move(edx, Immediate(0)); // Test Move()
|
||||
__ cmp(edx, Immediate(0));
|
||||
__ j(not_equal, &exit);
|
||||
__ Move(ecx, Immediate(-1));
|
||||
__ cmp(ecx, Immediate(-1));
|
||||
__ j(not_equal, &exit);
|
||||
__ Move(ebx, Immediate(0x77));
|
||||
__ cmp(ebx, Immediate(0x77));
|
||||
__ j(not_equal, &exit);
|
||||
|
||||
__ xor_(eax, eax); // Success.
|
||||
__ bind(&exit);
|
||||
__ add(esp, Immediate(1 * kPointerSize));
|
||||
__ pop(edx);
|
||||
__ pop(ebx);
|
||||
__ ret(0);
|
||||
|
||||
CodeDesc desc;
|
||||
masm->GetCode(&desc);
|
||||
// Call the function from C++.
|
||||
int result = FUNCTION_CAST<F0>(buffer)();
|
||||
CHECK_EQ(0, result);
|
||||
}
|
||||
|
||||
#undef __
|
@ -69,6 +69,11 @@
|
||||
#include "ia32/macro-assembler-ia32.h"
|
||||
#include "ia32/regexp-macro-assembler-ia32.h"
|
||||
#endif
|
||||
#if V8_TARGET_ARCH_X87
|
||||
#include "x87/assembler-x87.h"
|
||||
#include "x87/macro-assembler-x87.h"
|
||||
#include "x87/regexp-macro-assembler-x87.h"
|
||||
#endif
|
||||
#endif // V8_INTERPRETED_REGEXP
|
||||
|
||||
using namespace v8::internal;
|
||||
@ -698,6 +703,8 @@ typedef RegExpMacroAssemblerARM ArchRegExpMacroAssembler;
|
||||
typedef RegExpMacroAssemblerARM64 ArchRegExpMacroAssembler;
|
||||
#elif V8_TARGET_ARCH_MIPS
|
||||
typedef RegExpMacroAssemblerMIPS ArchRegExpMacroAssembler;
|
||||
#elif V8_TARGET_ARCH_X87
|
||||
typedef RegExpMacroAssemblerX87 ArchRegExpMacroAssembler;
|
||||
#endif
|
||||
|
||||
class ContextInitializer {
|
||||
|
@ -117,7 +117,7 @@
|
||||
|
||||
# BUG(v8:2989). PASS/FAIL on linux32 because crankshaft is turned off for
|
||||
# nosse2. Also for arm novfp3.
|
||||
'regress/regress-2989': [FAIL, NO_VARIANTS, ['system == linux and arch == ia32 or arch == arm and simulator == True', PASS]],
|
||||
'regress/regress-2989': [FAIL, NO_VARIANTS, ['system == linux and arch == x87 or arch == arm and simulator == True', PASS]],
|
||||
|
||||
# Skip endain dependent test for mips due to different typed views of the same
|
||||
# array buffer.
|
||||
|
@ -717,6 +717,37 @@
|
||||
'../../src/ia32/stub-cache-ia32.cc',
|
||||
],
|
||||
}],
|
||||
['v8_target_arch=="x87"', {
|
||||
'sources': [ ### gcmole(arch:x87) ###
|
||||
'../../src/x87/assembler-x87-inl.h',
|
||||
'../../src/x87/assembler-x87.cc',
|
||||
'../../src/x87/assembler-x87.h',
|
||||
'../../src/x87/builtins-x87.cc',
|
||||
'../../src/x87/code-stubs-x87.cc',
|
||||
'../../src/x87/code-stubs-x87.h',
|
||||
'../../src/x87/codegen-x87.cc',
|
||||
'../../src/x87/codegen-x87.h',
|
||||
'../../src/x87/cpu-x87.cc',
|
||||
'../../src/x87/debug-x87.cc',
|
||||
'../../src/x87/deoptimizer-x87.cc',
|
||||
'../../src/x87/disasm-x87.cc',
|
||||
'../../src/x87/frames-x87.cc',
|
||||
'../../src/x87/frames-x87.h',
|
||||
'../../src/x87/full-codegen-x87.cc',
|
||||
'../../src/x87/ic-x87.cc',
|
||||
'../../src/x87/lithium-codegen-x87.cc',
|
||||
'../../src/x87/lithium-codegen-x87.h',
|
||||
'../../src/x87/lithium-gap-resolver-x87.cc',
|
||||
'../../src/x87/lithium-gap-resolver-x87.h',
|
||||
'../../src/x87/lithium-x87.cc',
|
||||
'../../src/x87/lithium-x87.h',
|
||||
'../../src/x87/macro-assembler-x87.cc',
|
||||
'../../src/x87/macro-assembler-x87.h',
|
||||
'../../src/x87/regexp-macro-assembler-x87.cc',
|
||||
'../../src/x87/regexp-macro-assembler-x87.h',
|
||||
'../../src/x87/stub-cache-x87.cc',
|
||||
],
|
||||
}],
|
||||
['v8_target_arch=="mips" or v8_target_arch=="mipsel"', {
|
||||
'sources': [ ### gcmole(arch:mipsel) ###
|
||||
'../../src/mips/assembler-mips.cc',
|
||||
|
@ -80,6 +80,7 @@ SUPPORTED_ARCHS = ["android_arm",
|
||||
"android_ia32",
|
||||
"arm",
|
||||
"ia32",
|
||||
"x87",
|
||||
"mips",
|
||||
"mipsel",
|
||||
"nacl_ia32",
|
||||
@ -95,6 +96,7 @@ SLOW_ARCHS = ["android_arm",
|
||||
"mipsel",
|
||||
"nacl_ia32",
|
||||
"nacl_x64",
|
||||
"x87",
|
||||
"arm64"]
|
||||
|
||||
|
||||
|
@ -52,8 +52,8 @@ DEFS = {FAIL_OK: [FAIL, OKAY],
|
||||
|
||||
# Support arches, modes to be written as keywords instead of strings.
|
||||
VARIABLES = {ALWAYS: True}
|
||||
for var in ["debug", "release", "android_arm", "android_arm64", "android_ia32",
|
||||
"arm", "arm64", "ia32", "mips", "mipsel", "x64", "nacl_ia32",
|
||||
for var in ["debug", "release", "android_arm", "android_arm64", "android_ia32", "android_x87",
|
||||
"arm", "arm64", "ia32", "mips", "mipsel", "x64", "x87", "nacl_ia32",
|
||||
"nacl_x64", "macos", "windows", "linux"]:
|
||||
VARIABLES[var] = var
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user