Contribution of PowerPC port (continuation of 422063005)

Contribution of PowerPC port (continuation of 422063005). The inital patch
covers the core changes to the common files.  Subsequent patches will cover
changes to common files to support AIX and to update the ppc directories so
they are current with the changes in the rest of the project.

This is based off of the GitHub repository
https://github.com/andrewlow/v8ppc

BUG=
R=svenpanne@chromium.org, danno@chromium.org, sevnpanne@chromium.org

Review URL: https://codereview.chromium.org/817143002

Cr-Commit-Position: refs/heads/master@{#26091}
This commit is contained in:
Sven Panne 2015-01-16 08:42:00 +01:00
parent 1b3490adab
commit e4c5b84652
56 changed files with 1876 additions and 57 deletions

View File

@ -232,7 +232,7 @@ endif
# Architectures and modes to be compiled. Consider these to be internal
# variables, don't override them (use the targets instead).
ARCHES = ia32 x64 x32 arm arm64 mips mipsel mips64el x87
ARCHES = ia32 x64 x32 arm arm64 mips mipsel mips64el x87 ppc ppc64
DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug optdebug
DEFAULT_MODES = release debug

View File

@ -32,6 +32,7 @@
'msvs_use_common_release': 0,
'clang%': 0,
'v8_target_arch%': '<(target_arch)',
'v8_host_byteorder%': '<!(python -c "import sys; print sys.byteorder")',
# Native Client builds currently use the V8 ARM JIT and
# arm/simulator-arm.cc to defer the significant effort required
# for NaCl JIT support. The nacl_target_arch variable provides
@ -91,7 +92,9 @@
'android_webview_build%': '<(android_webview_build)',
},
'conditions': [
['host_arch=="ia32" or host_arch=="x64" or clang==1', {
['host_arch=="ia32" or host_arch=="x64" or \
host_arch=="ppc" or host_arch=="ppc64" or \
clang==1', {
'variables': {
'host_cxx_is_biarch%': 1,
},
@ -101,6 +104,7 @@
},
}],
['target_arch=="ia32" or target_arch=="x64" or target_arch=="x87" or \
target_arch=="ppc" or target_arch=="ppc64" or \
clang==1', {
'variables': {
'target_cxx_is_biarch%': 1,
@ -250,6 +254,28 @@
'V8_TARGET_ARCH_ARM64',
],
}],
['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
'defines': [
'V8_TARGET_ARCH_PPC',
],
'conditions': [
['v8_target_arch=="ppc64"', {
'defines': [
'V8_TARGET_ARCH_PPC64',
],
}],
['v8_host_byteorder=="little"', {
'defines': [
'V8_TARGET_ARCH_PPC_LE',
],
}],
['v8_host_byteorder=="big"', {
'defines': [
'V8_TARGET_ARCH_PPC_BE',
],
}],
],
}], # ppc
['v8_target_arch=="ia32"', {
'defines': [
'V8_TARGET_ARCH_IA32',
@ -783,11 +809,11 @@
},
},
}],
['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd" or OS=="mac" or OS=="android" or OS=="qnx") and \
(v8_target_arch=="arm" or v8_target_arch=="ia32" or \
v8_target_arch=="x87" or v8_target_arch=="mips" or \
v8_target_arch=="mipsel")', {
v8_target_arch=="mipsel" or v8_target_arch=="ppc")', {
'target_conditions': [
['_toolset=="host"', {
'conditions': [
@ -820,7 +846,8 @@
],
}],
['(OS=="linux" or OS=="android") and \
(v8_target_arch=="x64" or v8_target_arch=="arm64")', {
(v8_target_arch=="x64" or v8_target_arch=="arm64" or \
v8_target_arch=="ppc64")', {
'target_conditions': [
['_toolset=="host"', {
'conditions': [

View File

@ -65,6 +65,8 @@
#include "src/arm64/assembler-arm64-inl.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/arm/assembler-arm-inl.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
#include "src/ppc/assembler-ppc-inl.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/assembler-mips-inl.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
@ -85,6 +87,8 @@
#include "src/arm64/regexp-macro-assembler-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/arm/regexp-macro-assembler-arm.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
#include "src/ppc/regexp-macro-assembler-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/regexp-macro-assembler-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
@ -1297,6 +1301,8 @@ ExternalReference ExternalReference::re_check_stack_guard_state(
function = FUNCTION_ADDR(RegExpMacroAssemblerARM64::CheckStackGuardState);
#elif V8_TARGET_ARCH_ARM
function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
#elif V8_TARGET_ARCH_PPC
function = FUNCTION_ADDR(RegExpMacroAssemblerPPC::CheckStackGuardState);
#elif V8_TARGET_ARCH_MIPS
function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
#elif V8_TARGET_ARCH_MIPS64

View File

@ -8,6 +8,7 @@
namespace v8 {
namespace internal {
// TODO(svenpanne) introduce an AbortReason and partition this list
#define ERROR_MESSAGES_LIST(V) \
V(kNoReason, "no reason") \
\
@ -227,12 +228,18 @@ namespace internal {
V(kStackFrameTypesMustMatch, "Stack frame types must match") \
V(kTheCurrentStackPointerIsBelowCsp, \
"The current stack pointer is below csp") \
V(kTheInstructionShouldBeALis, "The instruction should be a lis") \
V(kTheInstructionShouldBeALui, "The instruction should be a lui") \
V(kTheInstructionShouldBeAnOri, "The instruction should be an ori") \
V(kTheInstructionShouldBeAnOris, "The instruction should be an oris") \
V(kTheInstructionShouldBeALi, "The instruction should be a li") \
V(kTheInstructionShouldBeASldi, "The instruction should be a sldi") \
V(kTheInstructionToPatchShouldBeALoadFromConstantPool, \
"The instruction to patch should be a load from the constant pool") \
V(kTheInstructionToPatchShouldBeAnLdrLiteral, \
"The instruction to patch should be a ldr literal") \
V(kTheInstructionToPatchShouldBeALis, \
"The instruction to patch should be a lis") \
V(kTheInstructionToPatchShouldBeALui, \
"The instruction to patch should be a lui") \
V(kTheInstructionToPatchShouldBeAnOri, \

View File

@ -148,6 +148,8 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
#include "src/base/atomicops_internals_arm64_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_ARM
#include "src/base/atomicops_internals_arm_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_PPC
#include "src/base/atomicops_internals_ppc_gcc.h"
#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#include "src/base/atomicops_internals_x86_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS

View File

@ -0,0 +1,168 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is an internal atomic implementation, use atomicops.h instead.
//
#ifndef V8_BASE_ATOMICOPS_INTERNALS_PPC_H_
#define V8_BASE_ATOMICOPS_INTERNALS_PPC_H_
namespace v8 {
namespace base {
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return (__sync_val_compare_and_swap(ptr, old_value, new_value));
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
old_value = *ptr;
} while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false);
return old_value;
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return Barrier_AtomicIncrement(ptr, increment);
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
for (;;) {
Atomic32 old_value = *ptr;
Atomic32 new_value = old_value + increment;
if (__sync_bool_compare_and_swap(ptr, old_value, new_value)) {
return new_value;
// The exchange took place as expected.
}
// Otherwise, *ptr changed mid-loop and we need to retry.
}
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value, Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value, Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
*ptr = value;
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void MemoryBarrier() {
__asm__ __volatile__("sync" : : : "memory"); }
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { return *ptr; }
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; }
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
#ifdef V8_TARGET_ARCH_PPC64
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
return (__sync_val_compare_and_swap(ptr, old_value, new_value));
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
Atomic64 old_value;
do {
old_value = *ptr;
} while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false);
return old_value;
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return Barrier_AtomicIncrement(ptr, increment);
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
for (;;) {
Atomic64 old_value = *ptr;
Atomic64 new_value = old_value + increment;
if (__sync_bool_compare_and_swap(ptr, old_value, new_value)) {
return new_value;
// The exchange took place as expected.
}
// Otherwise, *ptr changed mid-loop and we need to retry.
}
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value, Atomic64 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value, Atomic64 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { return *ptr; }
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return *ptr;
}
#endif
}
} // namespace v8::base
#endif // V8_BASE_ATOMICOPS_INTERNALS_PPC_GCC_H_

View File

@ -48,6 +48,13 @@
#elif defined(__MIPSEB__) || defined(__MIPSEL__)
#define V8_HOST_ARCH_MIPS 1
#define V8_HOST_ARCH_32_BIT 1
#elif defined(__PPC__) || defined(_ARCH_PPC)
#define V8_HOST_ARCH_PPC 1
#if defined(__PPC64__) || defined(_ARCH_PPC64)
#define V8_HOST_ARCH_64_BIT 1
#else
#define V8_HOST_ARCH_32_BIT 1
#endif
#else
#error "Host architecture was not detected as supported by v8"
#endif
@ -65,9 +72,9 @@
// Target architecture detection. This may be set externally. If not, detect
// in the same way as the host architecture, that is, target the native
// environment as presented by the compiler.
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_X87 && \
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_X87 && \
!V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
!V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_PPC
#if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1
#elif defined(_M_IX86) || defined(__i386__)
@ -104,6 +111,12 @@
#define V8_TARGET_ARCH_32_BIT 1
#elif V8_TARGET_ARCH_MIPS64
#define V8_TARGET_ARCH_64_BIT 1
#elif V8_TARGET_ARCH_PPC
#if V8_TARGET_ARCH_PPC64
#define V8_TARGET_ARCH_64_BIT 1
#else
#define V8_TARGET_ARCH_32_BIT 1
#endif
#elif V8_TARGET_ARCH_X87
#define V8_TARGET_ARCH_32_BIT 1
#else
@ -154,6 +167,10 @@
#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_X87
#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_PPC_LE
#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_PPC_BE
#define V8_TARGET_BIG_ENDIAN 1
#else
#error Unknown target architecture endianness
#endif

View File

@ -16,6 +16,9 @@
#if V8_OS_QNX
#include <sys/syspage.h> // cpuinfo
#endif
#if V8_OS_LINUX && V8_HOST_ARCH_PPC
#include <elf.h>
#endif
#if V8_OS_POSIX
#include <unistd.h> // sysconf()
#endif
@ -580,7 +583,54 @@ CPU::CPU()
delete[] part;
}
#elif V8_HOST_ARCH_PPC
#ifndef USE_SIMULATOR
#if V8_OS_LINUX
// Read processor info from /proc/self/auxv.
char* auxv_cpu_type = NULL;
FILE* fp = fopen("/proc/self/auxv", "r");
if (fp != NULL) {
#if V8_TARGET_ARCH_PPC64
Elf64_auxv_t entry;
#else
Elf32_auxv_t entry;
#endif
for (;;) {
size_t n = fread(&entry, sizeof(entry), 1, fp);
if (n == 0 || entry.a_type == AT_NULL) {
break;
}
if (entry.a_type == AT_PLATFORM) {
auxv_cpu_type = reinterpret_cast<char*>(entry.a_un.a_val);
break;
}
}
fclose(fp);
}
part_ = -1;
if (auxv_cpu_type) {
if (strcmp(auxv_cpu_type, "power8") == 0) {
part_ = PPC_POWER8;
} else if (strcmp(auxv_cpu_type, "power7") == 0) {
part_ = PPC_POWER7;
} else if (strcmp(auxv_cpu_type, "power6") == 0) {
part_ = PPC_POWER6;
} else if (strcmp(auxv_cpu_type, "power5") == 0) {
part_ = PPC_POWER5;
} else if (strcmp(auxv_cpu_type, "ppc970") == 0) {
part_ = PPC_G5;
} else if (strcmp(auxv_cpu_type, "ppc7450") == 0) {
part_ = PPC_G4;
} else if (strcmp(auxv_cpu_type, "pa6t") == 0) {
part_ = PPC_PA6T;
}
}
#endif // V8_OS_LINUX
#endif // !USE_SIMULATOR
#endif // V8_HOST_ARCH_PPC
}
} } // namespace v8::base

View File

@ -50,6 +50,8 @@ class CPU FINAL {
int variant() const { return variant_; }
static const int NVIDIA_DENVER = 0x0;
int part() const { return part_; }
// ARM-specific part codes
static const int ARM_CORTEX_A5 = 0xc05;
static const int ARM_CORTEX_A7 = 0xc07;
static const int ARM_CORTEX_A8 = 0xc08;
@ -57,6 +59,17 @@ class CPU FINAL {
static const int ARM_CORTEX_A12 = 0xc0c;
static const int ARM_CORTEX_A15 = 0xc0f;
// PPC-specific part codes
enum {
PPC_POWER5,
PPC_POWER6,
PPC_POWER7,
PPC_POWER8,
PPC_G4,
PPC_G5,
PPC_PA6T
};
// General features
bool has_fpu() const { return has_fpu_; }

View File

@ -85,8 +85,8 @@ int OS::ActivationFrameAlignment() {
// Otherwise we just assume 16 byte alignment, i.e.:
// - With gcc 4.4 the tree vectorization optimizer can generate code
// that requires 16 byte alignment such as movdqa on x86.
// - Mac OS X and Solaris (64-bit) activation frames must be 16 byte-aligned;
// see "Mac OS X ABI Function Call Guide"
// - Mac OS X, PPC and Solaris (64-bit) activation frames must
// be 16 byte-aligned; see "Mac OS X ABI Function Call Guide"
return 16;
#endif
}
@ -171,6 +171,14 @@ void* OS::GetRandomMmapAddr() {
// the hint address to 46 bits to give the kernel a fighting chance of
// fulfilling our placement request.
raw_addr &= V8_UINT64_C(0x3ffffffff000);
#elif V8_TARGET_ARCH_PPC64
#if V8_TARGET_BIG_ENDIAN
// Big-endian Linux: 44 bits of virtual addressing.
raw_addr &= V8_UINT64_C(0x03fffffff000);
#else
// Little-endian Linux: 48 bits of virtual addressing.
raw_addr &= V8_UINT64_C(0x3ffffffff000);
#endif
#else
raw_addr &= 0x3ffff000;
@ -225,6 +233,8 @@ void OS::DebugBreak() {
asm("break");
#elif V8_HOST_ARCH_MIPS64
asm("break");
#elif V8_HOST_ARCH_PPC
asm("twge 2,2");
#elif V8_HOST_ARCH_IA32
#if V8_OS_NACL
asm("hlt");

View File

@ -111,6 +111,16 @@ namespace internal {
#define CODE_STUB_LIST_ARM64(V)
#endif
// List of code stubs only used on PPC platforms.
#ifdef V8_TARGET_ARCH_PPC
#define CODE_STUB_LIST_PPC(V) \
V(DirectCEntry) \
V(StoreRegistersState) \
V(RestoreRegistersState)
#else
#define CODE_STUB_LIST_PPC(V)
#endif
// List of code stubs only used on MIPS platforms.
#if V8_TARGET_ARCH_MIPS
#define CODE_STUB_LIST_MIPS(V) \
@ -127,10 +137,11 @@ namespace internal {
#endif
// Combined list of code stubs.
#define CODE_STUB_LIST(V) \
CODE_STUB_LIST_ALL_PLATFORMS(V) \
CODE_STUB_LIST_ARM(V) \
CODE_STUB_LIST_ARM64(V) \
#define CODE_STUB_LIST(V) \
CODE_STUB_LIST_ALL_PLATFORMS(V) \
CODE_STUB_LIST_ARM(V) \
CODE_STUB_LIST_ARM64(V) \
CODE_STUB_LIST_PPC(V) \
CODE_STUB_LIST_MIPS(V)
// Stub is base classes of all stubs.
@ -504,6 +515,8 @@ class RuntimeCallHelper {
#include "src/arm64/code-stubs-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "src/arm/code-stubs-arm.h"
#elif V8_TARGET_ARCH_PPC
#include "src/ppc/code-stubs-ppc.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/code-stubs-mips.h"
#elif V8_TARGET_ARCH_MIPS64
@ -1460,7 +1473,7 @@ class CEntryStub : public PlatformCodeStub {
: PlatformCodeStub(isolate) {
minor_key_ = SaveDoublesBits::encode(save_doubles == kSaveFPRegs);
DCHECK(result_size == 1 || result_size == 2);
#ifdef _WIN64
#if _WIN64 || (V8_TARGET_ARCH_PPC64 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS)
minor_key_ = ResultSizeBits::update(minor_key_, result_size);
#endif // _WIN64
}
@ -1473,7 +1486,7 @@ class CEntryStub : public PlatformCodeStub {
private:
bool save_doubles() const { return SaveDoublesBits::decode(minor_key_); }
#ifdef _WIN64
#if _WIN64 || (V8_TARGET_ARCH_PPC64 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS)
int result_size() const { return ResultSizeBits::decode(minor_key_); }
#endif // _WIN64

View File

@ -53,6 +53,8 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
#include "src/arm64/codegen-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/arm/codegen-arm.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
#include "src/ppc/codegen-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/codegen-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64

View File

@ -19,6 +19,8 @@
#include "src/compiler/mips64/instruction-codes-mips64.h"
#elif V8_TARGET_ARCH_X64
#include "src/compiler/x64/instruction-codes-x64.h"
#elif V8_TARGET_ARCH_PPC
#include "src/compiler/ppc/instruction-codes-ppc.h"
#else
#define TARGET_ARCH_OPCODE_LIST(V)
#define TARGET_ADDRESSING_MODE_LIST(V)

View File

@ -464,7 +464,7 @@ DEFINE_BOOL(enable_32dregs, ENABLE_32DREGS_DEFAULT,
DEFINE_BOOL(enable_vldr_imm, false,
"enable use of constant pools for double immediate (ARM only)")
DEFINE_BOOL(force_long_branches, false,
"force all emitted branches to be in long mode (MIPS only)")
"force all emitted branches to be in long mode (MIPS/PPC only)")
// bootstrapper.cc
DEFINE_STRING(expose_natives_as, NULL, "expose natives in global object")
@ -667,7 +667,8 @@ DEFINE_BOOL(debug_sim, false, "Enable debugging the simulator")
DEFINE_BOOL(check_icache, false,
"Check icache flushes in ARM and MIPS simulator")
DEFINE_INT(stop_sim_at, 0, "Simulator stop after x number of instructions")
#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_MIPS64)
#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_MIPS64) || \
defined(V8_TARGET_ARCH_PPC64)
DEFINE_INT(sim_stack_alignment, 16,
"Stack alignment in bytes in simulator. This must be a power of two "
"and it must be at least 16. 16 is default.")

View File

@ -17,6 +17,8 @@
#include "src/arm64/frames-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/arm/frames-arm.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
#include "src/ppc/frames-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/frames-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64

View File

@ -112,6 +112,12 @@ class FullCodeGenerator: public AstVisitor {
// TODO(all): Copied ARM value. Check this is sensible for ARM64.
static const int kCodeSizeMultiplier = 149;
static const int kBootCodeSizeMultiplier = 110;
#elif V8_TARGET_ARCH_PPC64
static const int kCodeSizeMultiplier = 200;
static const int kBootCodeSizeMultiplier = 120;
#elif V8_TARGET_ARCH_PPC
static const int kCodeSizeMultiplier = 200;
static const int kBootCodeSizeMultiplier = 120;
#elif V8_TARGET_ARCH_MIPS
static const int kCodeSizeMultiplier = 149;
static const int kBootCodeSizeMultiplier = 120;
@ -330,12 +336,15 @@ class FullCodeGenerator: public AstVisitor {
Label* if_true,
Label* if_false,
Label* fall_through);
#else // All non-mips arch.
#elif V8_TARGET_ARCH_PPC
void Split(Condition cc, Label* if_true, Label* if_false, Label* fall_through,
CRegister cr = cr7);
#else // All other arch.
void Split(Condition cc,
Label* if_true,
Label* if_false,
Label* fall_through);
#endif // V8_TARGET_ARCH_MIPS
#endif
// Load the value of a known (PARAMETER, LOCAL, or CONTEXT) variable into
// a register. Emits a context chain walk if if necessary (so does

View File

@ -28,7 +28,7 @@
#if V8_TARGET_ARCH_IA32 || (V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_32_BIT) || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64
V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC
#define V8_TURBOFAN_BACKEND 1
#else
#define V8_TURBOFAN_BACKEND 0
@ -59,6 +59,9 @@ namespace internal {
#if (V8_TARGET_ARCH_ARM && !V8_HOST_ARCH_ARM)
#define USE_SIMULATOR 1
#endif
#if (V8_TARGET_ARCH_PPC && !V8_HOST_ARCH_PPC)
#define USE_SIMULATOR 1
#endif
#if (V8_TARGET_ARCH_MIPS && !V8_HOST_ARCH_MIPS)
#define USE_SIMULATOR 1
#endif
@ -83,7 +86,7 @@ namespace internal {
// Determine whether double field unboxing feature is enabled.
#if (V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64)
#if V8_TARGET_ARCH_64_BIT
#define V8_DOUBLE_FIELDS_UNBOXING 1
#else
#define V8_DOUBLE_FIELDS_UNBOXING 0
@ -625,6 +628,10 @@ enum CpuFeature {
// ARM64
ALWAYS_ALIGN_CSP,
COHERENT_CACHE,
// PPC
FPR_GPR_MOV,
LWSYNC,
ISELECT,
NUMBER_OF_CPU_FEATURES
};

View File

@ -34,6 +34,10 @@
#include "src/v8threads.h"
#include "src/vm-state-inl.h"
#if V8_TARGET_ARCH_PPC && !V8_INTERPRETED_REGEXP
#include "src/regexp-macro-assembler.h" // NOLINT
#include "src/ppc/regexp-macro-assembler-ppc.h" // NOLINT
#endif
#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
#include "src/regexp-macro-assembler.h" // NOLINT
#include "src/arm/regexp-macro-assembler-arm.h" // NOLINT

View File

@ -18,6 +18,8 @@
#include "src/arm64/lithium-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/arm/lithium-arm.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
#include "src/ppc/lithium-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/lithium-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64

View File

@ -51,6 +51,8 @@
#include "src/arm64/lithium-codegen-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/arm/lithium-codegen-arm.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
#include "src/ppc/lithium-codegen-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/lithium-codegen-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64

View File

@ -2027,8 +2027,8 @@ bool Isolate::Init(Deserializer* des) {
// Initialize other runtime facilities
#if defined(USE_SIMULATOR)
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC
Simulator::Initialize(this);
#endif
#endif

View File

@ -82,9 +82,10 @@ class Debug;
class Debugger;
class PromiseOnStack;
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
!defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS || \
!defined(__PPC__) && V8_TARGET_ARCH_PPC || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS64
class Redirection;
class Simulator;
@ -322,9 +323,10 @@ class ThreadLocalTop BASE_EMBEDDED {
};
#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
V8_TARGET_ARCH_ARM64 && !defined(__aarch64__) || \
V8_TARGET_ARCH_MIPS && !defined(__mips__) || \
V8_TARGET_ARCH_PPC && !defined(__PPC__) || \
V8_TARGET_ARCH_MIPS && !defined(__mips__) || \
V8_TARGET_ARCH_MIPS64 && !defined(__mips__)
#define ISOLATE_INIT_SIMULATOR_LIST(V) \
@ -417,9 +419,10 @@ class Isolate {
thread_id_(thread_id),
stack_limit_(0),
thread_state_(NULL),
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
!defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS || \
!defined(__PPC__) && V8_TARGET_ARCH_PPC || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS64
simulator_(NULL),
#endif
@ -432,9 +435,10 @@ class Isolate {
FIELD_ACCESSOR(uintptr_t, stack_limit)
FIELD_ACCESSOR(ThreadState*, thread_state)
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
!defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS || \
!defined(__PPC__) && V8_TARGET_ARCH_PPC || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS64
FIELD_ACCESSOR(Simulator*, simulator)
#endif
@ -449,9 +453,10 @@ class Isolate {
uintptr_t stack_limit_;
ThreadState* thread_state_;
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
!defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS || \
!defined(__PPC__) && V8_TARGET_ARCH_PPC || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS64
Simulator* simulator_;
#endif

View File

@ -31,6 +31,8 @@
#include "src/arm64/regexp-macro-assembler-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/arm/regexp-macro-assembler-arm.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
#include "src/ppc/regexp-macro-assembler-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/regexp-macro-assembler-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
@ -6094,6 +6096,9 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
#elif V8_TARGET_ARCH_ARM64
RegExpMacroAssemblerARM64 macro_assembler(mode, (data->capture_count + 1) * 2,
zone);
#elif V8_TARGET_ARCH_PPC
RegExpMacroAssemblerPPC macro_assembler(mode, (data->capture_count + 1) * 2,
zone);
#elif V8_TARGET_ARCH_MIPS
RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2,
zone);

View File

@ -15,6 +15,8 @@
#include "src/arm64/lithium-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/arm/lithium-arm.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
#include "src/ppc/lithium-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/lithium-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64

View File

@ -29,6 +29,9 @@
#elif V8_TARGET_ARCH_X87
#include "src/x87/lithium-x87.h" // NOLINT
#include "src/x87/lithium-codegen-x87.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
#include "src/ppc/lithium-ppc.h" // NOLINT
#include "src/ppc/lithium-codegen-ppc.h" // NOLINT
#else
#error Unsupported target architecture.
#endif

View File

@ -19,6 +19,8 @@
#include "src/mips/lithium-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/lithium-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
#include "src/ppc/lithium-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/lithium-x87.h" // NOLINT
#else

View File

@ -18,6 +18,9 @@
#elif V8_TARGET_ARCH_ARM
#include "src/arm/lithium-arm.h" // NOLINT
#include "src/arm/lithium-codegen-arm.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
#include "src/ppc/lithium-ppc.h" // NOLINT
#include "src/ppc/lithium-codegen-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/lithium-mips.h" // NOLINT
#include "src/mips/lithium-codegen-mips.h" // NOLINT

View File

@ -402,6 +402,8 @@ void LowLevelLogger::LogCodeInfo() {
const char arch[] = "x32";
#elif V8_TARGET_ARCH_ARM
const char arch[] = "arm";
#elif V8_TARGET_ARCH_PPC
const char arch[] = "ppc";
#elif V8_TARGET_ARCH_MIPS
const char arch[] = "mips";
#elif V8_TARGET_ARCH_X87

View File

@ -64,6 +64,13 @@ const int kInvalidProtoDepth = -1;
#include "src/arm/assembler-arm-inl.h"
#include "src/code.h" // NOLINT, must be after assembler_*.h
#include "src/arm/macro-assembler-arm.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
#include "src/ppc/constants-ppc.h"
#include "src/assembler.h" // NOLINT
#include "src/ppc/assembler-ppc.h" // NOLINT
#include "src/ppc/assembler-ppc-inl.h"
#include "src/code.h" // NOLINT, must be after assembler_*.h
#include "src/ppc/macro-assembler-ppc.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/constants-mips.h"
#include "src/assembler.h" // NOLINT

View File

@ -31,6 +31,8 @@
#include "src/mips/constants-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/constants-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
#include "src/ppc/constants-ppc.h" // NOLINT
#endif

View File

@ -82,9 +82,6 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
// Assume support
supported_ |= (1u << FPU);
}
if (cpu.cache_line_size() != 0) {
cache_line_size_ = cpu.cache_line_size();
}
#elif V8_OS_AIX
// Assume support FP support and default cache line size
supported_ |= (1u << FPU);
@ -1422,11 +1419,13 @@ void Assembler::marker_asm(int mcode) {
// Code address skips the function descriptor "header".
// TOC and static chain are ignored and set to 0.
void Assembler::function_descriptor() {
#if ABI_USES_FUNCTION_DESCRIPTORS
DCHECK(pc_offset() == 0);
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
emit_ptr(reinterpret_cast<uintptr_t>(pc_) + 3 * kPointerSize);
emit_ptr(0);
emit_ptr(0);
#endif
}

View File

@ -1150,9 +1150,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
Label invoke, handler_entry, exit;
// Called from C
#if ABI_USES_FUNCTION_DESCRIPTORS
__ function_descriptor();
#endif
ProfileEntryHookStub::MaybeCallEntryHook(masm);

View File

@ -46,9 +46,7 @@ UnaryMathFunction CreateExpFunction() {
Register temp3 = r9;
// Called from C
#if ABI_USES_FUNCTION_DESCRIPTORS
__ function_descriptor();
#endif
__ Push(temp3, temp2, temp1);
MathExpGenerator::EmitMathExp(&masm, input, result, double_scratch1,
@ -88,9 +86,7 @@ UnaryMathFunction CreateSqrtFunction() {
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
// Called from C
#if ABI_USES_FUNCTION_DESCRIPTORS
__ function_descriptor();
#endif
__ MovFromFloatParameter(d1);
__ fsqrt(d1, d1);

View File

@ -115,9 +115,7 @@ RegExpMacroAssemblerPPC::RegExpMacroAssemblerPPC(Mode mode,
DCHECK_EQ(0, registers_to_save % 2);
// Called from C
#if ABI_USES_FUNCTION_DESCRIPTORS
__ function_descriptor();
#endif
__ b(&entry_label_); // We'll write the entry code later.
// If the code gets too big or corrupted, an internal exception will be

View File

@ -32,6 +32,7 @@ class RegExpMacroAssembler {
kARMImplementation,
kARM64Implementation,
kMIPSImplementation,
kPPCImplementation,
kX64Implementation,
kX87Implementation,
kBytecodeImplementation

View File

@ -256,6 +256,12 @@ class SimulatorHelper {
Simulator::sp));
state->fp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::fp));
#elif V8_TARGET_ARCH_PPC
state->pc = reinterpret_cast<Address>(simulator_->get_pc());
state->sp =
reinterpret_cast<Address>(simulator_->get_register(Simulator::sp));
state->fp =
reinterpret_cast<Address>(simulator_->get_register(Simulator::fp));
#endif
}
@ -361,7 +367,7 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
#else
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
#if !V8_OS_OPENBSD
#if !(V8_OS_OPENBSD || (V8_OS_LINUX && V8_HOST_ARCH_PPC))
mcontext_t& mcontext = ucontext->uc_mcontext;
#endif
#if V8_OS_LINUX
@ -398,6 +404,10 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
state.pc = reinterpret_cast<Address>(mcontext.pc);
state.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
state.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
#elif V8_HOST_ARCH_PPC
state.pc = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->nip);
state.sp = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->gpr[PT_R1]);
state.fp = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->gpr[PT_R31]);
#endif // V8_HOST_ARCH_*
#elif V8_OS_MACOSX
#if V8_HOST_ARCH_X64

View File

@ -1193,16 +1193,16 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
// allocation point and write a pointer to it to the current object.
ALL_SPACES(kBackref, kPlain, kStartOfObject)
ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject)
#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL || \
defined(V8_TARGET_ARCH_MIPS64)
#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
defined(V8_TARGET_ARCH_PPC) || V8_OOL_CONSTANT_POOL
// Deserialize a new object from pointer found in code and write
// a pointer to it to the current object. Required only for MIPS or ARM
// with ool constant pool, and omitted on the other architectures because
// it is fully unrolled and would cause bloat.
// a pointer to it to the current object. Required only for MIPS, PPC or
// ARM with ool constant pool, and omitted on the other architectures
// because it is fully unrolled and would cause bloat.
ALL_SPACES(kNewObject, kFromCode, kStartOfObject)
// Find a recently deserialized code object using its offset from the
// current allocation point and write a pointer to it to the current
// object. Required only for MIPS or ARM with ool constant pool.
// object. Required only for MIPS, PPC or ARM with ool constant pool.
ALL_SPACES(kBackref, kFromCode, kStartOfObject)
ALL_SPACES(kBackrefWithSkip, kFromCode, kStartOfObject)
#endif
@ -1219,7 +1219,7 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0)
CASE_BODY(kRootArray, kPlain, kStartOfObject, 0)
#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL || \
defined(V8_TARGET_ARCH_MIPS64)
defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC)
// Find an object in the roots array and write a pointer to it to in code.
CASE_STATEMENT(kRootArray, kFromCode, kStartOfObject, 0)
CASE_BODY(kRootArray, kFromCode, kStartOfObject, 0)

View File

@ -13,6 +13,8 @@
#include "src/arm64/simulator-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "src/arm/simulator-arm.h"
#elif V8_TARGET_ARCH_PPC
#include "src/ppc/simulator-ppc.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/simulator-mips.h"
#elif V8_TARGET_ARCH_MIPS64

View File

@ -1327,6 +1327,9 @@ INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars));
#elif defined(V8_HOST_ARCH_MIPS)
INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars));
INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars));
#elif defined(V8_HOST_ARCH_PPC)
INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars));
INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars));
#endif
// Copy from 8bit/16bit chars to 8bit/16bit chars.
@ -1486,6 +1489,136 @@ void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars) {
MemCopy(dest, src, chars * sizeof(*dest));
}
}
#elif defined(V8_HOST_ARCH_PPC)
#define CASE(n) \
case n: \
memcpy(dest, src, n); \
break
void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars) {
switch (static_cast<unsigned>(chars)) {
case 0:
break;
case 1:
*dest = *src;
break;
CASE(2);
CASE(3);
CASE(4);
CASE(5);
CASE(6);
CASE(7);
CASE(8);
CASE(9);
CASE(10);
CASE(11);
CASE(12);
CASE(13);
CASE(14);
CASE(15);
CASE(16);
CASE(17);
CASE(18);
CASE(19);
CASE(20);
CASE(21);
CASE(22);
CASE(23);
CASE(24);
CASE(25);
CASE(26);
CASE(27);
CASE(28);
CASE(29);
CASE(30);
CASE(31);
CASE(32);
CASE(33);
CASE(34);
CASE(35);
CASE(36);
CASE(37);
CASE(38);
CASE(39);
CASE(40);
CASE(41);
CASE(42);
CASE(43);
CASE(44);
CASE(45);
CASE(46);
CASE(47);
CASE(48);
CASE(49);
CASE(50);
CASE(51);
CASE(52);
CASE(53);
CASE(54);
CASE(55);
CASE(56);
CASE(57);
CASE(58);
CASE(59);
CASE(60);
CASE(61);
CASE(62);
CASE(63);
CASE(64);
default:
memcpy(dest, src, chars);
break;
}
}
#undef CASE
#define CASE(n) \
case n: \
memcpy(dest, src, n * 2); \
break
void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars) {
switch (static_cast<unsigned>(chars)) {
case 0:
break;
case 1:
*dest = *src;
break;
CASE(2);
CASE(3);
CASE(4);
CASE(5);
CASE(6);
CASE(7);
CASE(8);
CASE(9);
CASE(10);
CASE(11);
CASE(12);
CASE(13);
CASE(14);
CASE(15);
CASE(16);
CASE(17);
CASE(18);
CASE(19);
CASE(20);
CASE(21);
CASE(22);
CASE(23);
CASE(24);
CASE(25);
CASE(26);
CASE(27);
CASE(28);
CASE(29);
CASE(30);
CASE(31);
CASE(32);
default:
memcpy(dest, src, chars * 2);
break;
}
}
#undef CASE
#endif

View File

@ -212,6 +212,20 @@
'test-js-arm64-variables.cc'
],
}],
['v8_target_arch=="ppc"', {
'sources': [ ### gcmole(arch:ppc) ###
'test-assembler-ppc.cc',
'test-code-stubs.cc',
'test-disasm-ppc.cc'
],
}],
['v8_target_arch=="ppc64"', {
'sources': [ ### gcmole(arch:ppc64) ###
'test-assembler-ppc.cc',
'test-code-stubs.cc',
'test-disasm-ppc.cc'
],
}],
['v8_target_arch=="mipsel"', {
'sources': [ ### gcmole(arch:mipsel) ###
'test-assembler-mips.cc',

View File

@ -207,7 +207,7 @@ class CallHelper {
Simulator::CallArgument::End()};
return ReturnValueTraits<R>::Cast(CallSimulator(FUNCTION_ADDR(f), args));
}
#elif USE_SIMULATOR && V8_TARGET_ARCH_MIPS64
#elif USE_SIMULATOR && (V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64)
uintptr_t CallSimulator(byte* f, int64_t p1 = 0, int64_t p2 = 0,
int64_t p3 = 0, int64_t p4 = 0) {
Simulator* simulator = Simulator::current(isolate_);
@ -243,7 +243,8 @@ class CallHelper {
ParameterTraits<P2>::Cast(p2), ParameterTraits<P3>::Cast(p3),
ParameterTraits<P4>::Cast(p4)));
}
#elif USE_SIMULATOR && (V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS)
#elif USE_SIMULATOR && \
(V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_PPC)
uintptr_t CallSimulator(byte* f, int32_t p1 = 0, int32_t p2 = 0,
int32_t p3 = 0, int32_t p4 = 0) {
Simulator* simulator = Simulator::current(isolate_);

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,155 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <stdlib.h>
#include "src/v8.h"
#include "src/debug.h"
#include "src/disasm.h"
#include "src/disassembler.h"
#include "src/macro-assembler.h"
#include "src/serialize.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
bool DisassembleAndCompare(byte* pc, const char* compare_string) {
disasm::NameConverter converter;
disasm::Disassembler disasm(converter);
EmbeddedVector<char, 128> disasm_buffer;
disasm.InstructionDecode(disasm_buffer, pc);
if (strcmp(compare_string, disasm_buffer.start()) != 0) {
fprintf(stderr,
"expected: \n"
"%s\n"
"disassembled: \n"
"%s\n\n",
compare_string, disasm_buffer.start());
return false;
}
return true;
}
// Set up V8 to a state where we can at least run the assembler and
// disassembler. Declare the variables and allocate the data structures used
// in the rest of the macros.
#define SET_UP() \
CcTest::InitializeVM(); \
Isolate* isolate = Isolate::Current(); \
HandleScope scope(isolate); \
byte* buffer = reinterpret_cast<byte*>(malloc(4 * 1024)); \
Assembler assm(isolate, buffer, 4 * 1024); \
bool failure = false;
// This macro assembles one instruction using the preallocated assembler and
// disassembles the generated instruction, comparing the output to the expected
// value. If the comparison fails an error message is printed, but the test
// continues to run until the end.
#define COMPARE(asm_, compare_string) \
{ \
int pc_offset = assm.pc_offset(); \
byte* progcounter = &buffer[pc_offset]; \
assm.asm_; \
if (!DisassembleAndCompare(progcounter, compare_string)) failure = true; \
}
// Force emission of any pending literals into a pool.
#define EMIT_PENDING_LITERALS() assm.CheckConstPool(true, false)
// Verify that all invocations of the COMPARE macro passed successfully.
// Exit with a failure if at least one of the tests failed.
#define VERIFY_RUN() \
if (failure) { \
V8_Fatal(__FILE__, __LINE__, "PPC Disassembler tests failed.\n"); \
}
TEST(DisasmPPC) {
SET_UP();
COMPARE(addc(r9, r7, r9), "7d274814 addc r9, r7, r9");
COMPARE(addic(r3, r5, Operand(20)), "30650014 addic r3, r5, 20");
COMPARE(addi(r0, ip, Operand(63)), "380c003f addi r0, r12, 63");
COMPARE(add(r5, r7, r0), "7ca70214 add r5, r7, r0");
COMPARE(addze(r0, r0, LeaveOE, SetRC), "7c000195 addze. r0, r0");
COMPARE(andi(r0, r3, Operand(4)), "70600004 andi. r0, r3, 4");
COMPARE(and_(r3, r6, r5), "7cc32838 and r3, r6, r5");
COMPARE(and_(r6, r0, r6, SetRC), "7c063039 and. r6, r0, r6");
// skipping branches (for now?)
COMPARE(bctr(), "4e800420 bctr");
COMPARE(blr(), "4e800020 blr");
COMPARE(bclr(BA, SetLK), "4e800021 blrl");
// skipping call - only used in simulator
#if V8_TARGET_ARCH_PPC64
COMPARE(cmpi(r0, Operand(5)), "2fa00005 cmpi r0, 5");
#else
COMPARE(cmpi(r0, Operand(5)), "2f800005 cmpi r0, 5");
#endif
#if V8_TARGET_ARCH_PPC64
COMPARE(cmpl(r6, r7), "7fa63840 cmpl r6, r7");
#else
COMPARE(cmpl(r6, r7), "7f863840 cmpl r6, r7");
#endif
#if V8_TARGET_ARCH_PPC64
COMPARE(cmp(r5, r11), "7fa55800 cmp r5, r11");
#else
COMPARE(cmp(r5, r11), "7f855800 cmp r5, r11");
#endif
// skipping crxor - incomplete disassembly
COMPARE(lbz(r4, MemOperand(r4, 7)), "88840007 lbz r4, 7(r4)");
COMPARE(lfd(d0, MemOperand(sp, 128)), "c8010080 lfd d0, 128(sp)");
COMPARE(li(r0, Operand(16)), "38000010 li r0, 16");
COMPARE(lis(r8, Operand(22560)), "3d005820 lis r8, 22560");
COMPARE(lwz(ip, MemOperand(r19, 44)), "8193002c lwz r12, 44(r19)");
COMPARE(lwzx(r0, MemOperand(r5, ip)), "7c05602e lwzx r0, r5, r12");
COMPARE(mflr(r0), "7c0802a6 mflr r0");
COMPARE(mr(r15, r4), "7c8f2378 mr r15, r4");
COMPARE(mtctr(r0), "7c0903a6 mtctr r0");
COMPARE(mtlr(r15), "7de803a6 mtlr r15");
COMPARE(ori(r8, r8, Operand(42849)), "6108a761 ori r8, r8, 42849");
COMPARE(orx(r5, r3, r4), "7c652378 or r5, r3, r4");
COMPARE(rlwinm(r4, r3, 2, 0, 29), "5464103a rlwinm r4, r3, 2, 0, 29");
COMPARE(rlwinm(r0, r3, 0, 31, 31, SetRC),
"546007ff rlwinm. r0, r3, 0, 31, 31");
COMPARE(srawi(r3, r6, 1), "7cc30e70 srawi r3,r6,1");
COMPARE(stb(r5, MemOperand(r11, 11)), "98ab000b stb r5, 11(r11)");
COMPARE(stfd(d2, MemOperand(sp, 8)), "d8410008 stfd d2, 8(sp)");
COMPARE(stw(r16, MemOperand(sp, 64)), "92010040 stw r16, 64(sp)");
COMPARE(stwu(r3, MemOperand(sp, -4)), "9461fffc stwu r3, -4(sp)");
COMPARE(sub(r3, r3, r4), "7c641850 subf r3, r4, r3");
COMPARE(sub(r0, r9, r8, LeaveOE, SetRC), "7c084851 subf. r0, r8, r9");
COMPARE(xor_(r6, r5, r4), "7ca62278 xor r6, r5, r4");
VERIFY_RUN();
}

View File

@ -90,6 +90,14 @@ void generate(MacroAssembler* masm, uint32_t key) {
__ pop(kRootRegister);
__ jr(ra);
__ nop();
#elif V8_TARGET_ARCH_PPC
__ function_descriptor();
__ push(kRootRegister);
__ InitializeRootRegister();
__ li(r3, Operand(key));
__ GetNumberHash(r3, ip);
__ pop(kRootRegister);
__ blr();
#else
#error Unsupported architecture.
#endif

View File

@ -161,8 +161,7 @@ TEST(HeapObjects) {
CHECK(value->IsNumber());
CHECK_EQ(Smi::kMaxValue, Handle<Smi>::cast(value)->value());
#if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_ARM64) && \
!defined(V8_TARGET_ARCH_MIPS64)
#if !defined(V8_TARGET_ARCH_64_BIT)
// TODO(lrn): We need a NumberFromIntptr function in order to test this.
value = factory->NewNumberFromInt(Smi::kMinValue - 1);
CHECK(value->IsHeapNumber());

View File

@ -24,6 +24,10 @@ void GetStackPointer(const v8::FunctionCallbackInfo<v8::Value>& args) {
__asm__ __volatile__("sw $sp, %0" : "=g"(sp_addr));
#elif V8_HOST_ARCH_MIPS64
__asm__ __volatile__("sd $sp, %0" : "=g"(sp_addr));
#elif defined(__PPC64__) || defined(_ARCH_PPC64)
__asm__ __volatile__("std 1, %0" : "=g"(sp_addr));
#elif defined(__PPC__) || defined(_ARCH_PPC)
__asm__ __volatile__("stw 1, %0" : "=g"(sp_addr));
#else
#error Host architecture was not detected as supported by v8
#endif

View File

@ -53,6 +53,11 @@
#include "src/arm64/macro-assembler-arm64.h"
#include "src/arm64/regexp-macro-assembler-arm64.h"
#endif
#if V8_TARGET_ARCH_PPC
#include "src/ppc/assembler-ppc.h"
#include "src/ppc/macro-assembler-ppc.h"
#include "src/ppc/regexp-macro-assembler-ppc.h"
#endif
#if V8_TARGET_ARCH_MIPS
#include "src/mips/assembler-mips.h"
#include "src/mips/macro-assembler-mips.h"
@ -687,6 +692,8 @@ typedef RegExpMacroAssemblerX64 ArchRegExpMacroAssembler;
typedef RegExpMacroAssemblerARM ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_ARM64
typedef RegExpMacroAssemblerARM64 ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_PPC
typedef RegExpMacroAssemblerPPC ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_MIPS
typedef RegExpMacroAssemblerMIPS ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_MIPS64

View File

@ -1445,6 +1445,7 @@ TEST(InvalidExternalString) {
static const int invalid = String::kMaxLength + 1; \
HandleScope scope(isolate); \
Vector<TYPE> dummy = Vector<TYPE>::New(invalid); \
memset(dummy.start(), 0x0, dummy.length() * sizeof(TYPE)); \
CHECK(isolate->factory()->FUN(Vector<const TYPE>::cast(dummy)).is_null()); \
memset(dummy.start(), 0x20, dummy.length() * sizeof(TYPE)); \
CHECK(isolate->has_pending_exception()); \

View File

@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// On MacOS X 10.7.5, this test needs a stack size of at least 788 kBytes.
// On PPC64, this test needs a stack size of at least 698 kBytes.
// Flags: --stack-size=800
// Flags: --turbo-deoptimization

View File

@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --stack-size=1300
// Create a regexp in the form of a?a?...a? so that fully
// traversing the entire graph would be prohibitively expensive.
// This should not cause time out.

View File

@ -106,6 +106,11 @@
'compiler/x64/instruction-selector-x64-unittest.cc',
],
}],
['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
'sources': [ ### gcmole(arch:ppc) ###
'compiler/ppc/instruction-selector-ppc-unittest.cc',
],
}],
['component=="shared_library"', {
# compiler-unittests can't be built against a shared library, so we
# need to depend on the underlying static target in that case.

View File

@ -1203,6 +1203,49 @@
'../../src/compiler/x64/linkage-x64.cc',
],
}],
['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
'sources': [ ### gcmole(arch:ppc) ###
'../../src/ppc/assembler-ppc-inl.h',
'../../src/ppc/assembler-ppc.cc',
'../../src/ppc/assembler-ppc.h',
'../../src/ppc/builtins-ppc.cc',
'../../src/ppc/code-stubs-ppc.cc',
'../../src/ppc/code-stubs-ppc.h',
'../../src/ppc/codegen-ppc.cc',
'../../src/ppc/codegen-ppc.h',
'../../src/ppc/constants-ppc.h',
'../../src/ppc/constants-ppc.cc',
'../../src/ppc/cpu-ppc.cc',
'../../src/ppc/debug-ppc.cc',
'../../src/ppc/deoptimizer-ppc.cc',
'../../src/ppc/disasm-ppc.cc',
'../../src/ppc/frames-ppc.cc',
'../../src/ppc/frames-ppc.h',
'../../src/ppc/full-codegen-ppc.cc',
'../../src/ppc/interface-descriptors-ppc.cc',
'../../src/ppc/interface-descriptors-ppc.h',
'../../src/ppc/lithium-ppc.cc',
'../../src/ppc/lithium-ppc.h',
'../../src/ppc/lithium-codegen-ppc.cc',
'../../src/ppc/lithium-codegen-ppc.h',
'../../src/ppc/lithium-gap-resolver-ppc.cc',
'../../src/ppc/lithium-gap-resolver-ppc.h',
'../../src/ppc/macro-assembler-ppc.cc',
'../../src/ppc/macro-assembler-ppc.h',
'../../src/ppc/regexp-macro-assembler-ppc.cc',
'../../src/ppc/regexp-macro-assembler-ppc.h',
'../../src/ppc/simulator-ppc.cc',
'../../src/compiler/ppc/code-generator-ppc.cc',
'../../src/compiler/ppc/instruction-codes-ppc.h',
'../../src/compiler/ppc/instruction-selector-ppc.cc',
'../../src/compiler/ppc/linkage-ppc.cc',
'../../src/ic/ppc/access-compiler-ppc.cc',
'../../src/ic/ppc/handler-compiler-ppc.cc',
'../../src/ic/ppc/ic-ppc.cc',
'../../src/ic/ppc/ic-compiler-ppc.cc',
'../../src/ic/ppc/stub-cache-ppc.cc',
],
}],
['OS=="win"', {
'variables': {
'gyp_generators': '<!(echo $GYP_GENERATORS)',
@ -1265,6 +1308,7 @@
'../../src/base/atomicops_internals_atomicword_compat.h',
'../../src/base/atomicops_internals_mac.h',
'../../src/base/atomicops_internals_mips_gcc.h',
'../../src/base/atomicops_internals_ppc_gcc.h',
'../../src/base/atomicops_internals_tsan.h',
'../../src/base/atomicops_internals_x86_gcc.cc',
'../../src/base/atomicops_internals_x86_gcc.h',

View File

@ -66,6 +66,8 @@ SUPPORTED_ARCHS = ["android_arm",
"android_ia32",
"arm",
"ia32",
"ppc",
"ppc64",
"mipsel",
"nacl_ia32",
"nacl_x64",

View File

@ -109,6 +109,8 @@ SUPPORTED_ARCHS = ["android_arm",
"mips64el",
"nacl_ia32",
"nacl_x64",
"ppc",
"ppc64",
"x64",
"x32",
"arm64"]

View File

@ -55,7 +55,7 @@ DEFS = {FAIL_OK: [FAIL, OKAY],
VARIABLES = {ALWAYS: True}
for var in ["debug", "release", "android_arm", "android_arm64", "android_ia32", "android_x87",
"arm", "arm64", "ia32", "mips", "mipsel", "mips64el", "x64", "x87", "nacl_ia32",
"nacl_x64", "macos", "windows", "linux"]:
"nacl_x64", "ppc", "ppc64", "macos", "windows", "linux", "aix"]:
VARIABLES[var] = var

View File

@ -99,6 +99,8 @@ def DefaultArch():
return 'ia32'
elif machine == 'amd64':
return 'ia32'
elif id == 'ppc64':
return 'ppc'
else:
return None