S390: Initial impl of S390 asm, masm, code-stubs,...

Initial commit with the bulk of the src/s390/* changes
along with associated changes to the build toolchain for
the new files.

A minor update to V8PRIuPTR definition for Mac OS X
affecting 32-bit S390 sim compilations.

R=danno@chromium.org,jkummerow@chromium.org,jochen@chromium.org,jyan@ca.ibm.com,michael_dawson@ca.ibm.com,mbrandy@us.ibm.com
BUG=

Review URL: https://codereview.chromium.org/1725243004

Cr-Commit-Position: refs/heads/master@{#34331}
This commit is contained in:
joransiu 2016-02-26 08:24:36 -08:00 committed by Commit bot
parent 9945b3dddc
commit 23cf65926e
23 changed files with 31322 additions and 0 deletions

View File

@ -1619,6 +1619,29 @@ source_set("v8_base") {
"src/regexp/mips64/regexp-macro-assembler-mips64.cc",
"src/regexp/mips64/regexp-macro-assembler-mips64.h",
]
} else if (v8_target_arch == "s390" || v8_target_arch == "s390x") {
sources += [
"src/s390/assembler-s390-inl.h",
"src/s390/assembler-s390.cc",
"src/s390/assembler-s390.h",
"src/s390/builtins-s390.cc",
"src/s390/code-stubs-s390.cc",
"src/s390/code-stubs-s390.h",
"src/s390/codegen-s390.cc",
"src/s390/codegen-s390.h",
"src/s390/constants-s390.cc",
"src/s390/constants-s390.h",
"src/s390/cpu-s390.cc",
"src/s390/deoptimizer-s390.cc",
"src/s390/disasm-s390.cc",
"src/s390/frames-s390.cc",
"src/s390/frames-s390.h",
"src/s390/interface-descriptors-s390.cc",
"src/s390/macro-assembler-s390.cc",
"src/s390/macro-assembler-s390.h",
"src/s390/simulator-s390.cc",
"src/s390/simulator-s390.h",
]
}
configs -= [ "//build/config/compiler:chromium_code" ]

View File

@ -278,6 +278,8 @@ inline void USE(T) { }
#if V8_OS_MACOSX
#undef V8PRIxPTR
#define V8PRIxPTR "lx"
#undef V8PRIuPTR
#define V8PRIuPTR "lxu"
#endif
// The following macro works on both 32 and 64-bit platforms.

View File

@ -0,0 +1,575 @@
// Copyright (c) 1994-2006 Sun Microsystems Inc.
// All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// - Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// - Redistribution in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the
// distribution.
//
// - Neither the name of Sun Microsystems or the names of contributors may
// be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
// The original source code covered by the above license above has been modified
// significantly by Google Inc.
// Copyright 2014 the V8 project authors. All rights reserved.
#ifndef V8_S390_ASSEMBLER_S390_INL_H_
#define V8_S390_ASSEMBLER_S390_INL_H_
#include "src/s390/assembler-s390.h"
#include "src/assembler.h"
#include "src/debug/debug.h"
namespace v8 {
namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; }
void RelocInfo::apply(intptr_t delta) {
// Absolute code pointer inside code object moves with the code object.
if (IsInternalReference(rmode_)) {
// Jump table entry
Address target = Memory::Address_at(pc_);
Memory::Address_at(pc_) = target + delta;
} else if (IsCodeTarget(rmode_)) {
SixByteInstr instr =
Instruction::InstructionBits(reinterpret_cast<const byte*>(pc_));
int32_t dis = static_cast<int32_t>(instr & 0xFFFFFFFF) * 2 // halfwords
- static_cast<int32_t>(delta);
instr >>= 32; // Clear the 4-byte displacement field.
instr <<= 32;
instr |= static_cast<uint32_t>(dis / 2);
Instruction::SetInstructionBits<SixByteInstr>(reinterpret_cast<byte*>(pc_),
instr);
} else {
// mov sequence
DCHECK(IsInternalReferenceEncoded(rmode_));
Address target = Assembler::target_address_at(pc_, host_);
Assembler::set_target_address_at(isolate_, pc_, host_, target + delta,
SKIP_ICACHE_FLUSH);
}
}
Address RelocInfo::target_internal_reference() {
if (IsInternalReference(rmode_)) {
// Jump table entry
return Memory::Address_at(pc_);
} else {
// mov sequence
DCHECK(IsInternalReferenceEncoded(rmode_));
return Assembler::target_address_at(pc_, host_);
}
}
Address RelocInfo::target_internal_reference_address() {
DCHECK(IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
return reinterpret_cast<Address>(pc_);
}
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) ||
rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE);
// Read the address of the word containing the target_address in an
// instruction stream.
// The only architecture-independent user of this function is the serializer.
// The serializer uses it to find out how many raw bytes of instruction to
// output before the next target.
// For an instruction like LIS/ORI where the target bits are mixed into the
// instruction bits, the size of the target will be zero, indicating that the
// serializer should not step forward in memory after a target is resolved
// and written.
return reinterpret_cast<Address>(pc_);
}
Address RelocInfo::constant_pool_entry_address() {
UNREACHABLE();
return NULL;
}
int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
Assembler::set_target_address_at(isolate_, pc_, host_, target,
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
}
}
Address Assembler::target_address_from_return_address(Address pc) {
// Returns the address of the call target from the return address that will
// be returned to after a call.
// Sequence is:
// BRASL r14, RI
return pc - kCallTargetAddressOffset;
}
Address Assembler::return_address_from_call_start(Address pc) {
// Sequence is:
// BRASL r14, RI
return pc + kCallTargetAddressOffset;
}
Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
SixByteInstr instr =
Instruction::InstructionBits(reinterpret_cast<const byte*>(pc));
int index = instr & 0xFFFFFFFF;
return code_targets_[index];
}
Object* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
}
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
if (rmode_ == EMBEDDED_OBJECT) {
return Handle<Object>(
reinterpret_cast<Object**>(Assembler::target_address_at(pc_, host_)));
} else {
return origin->code_target_object_handle_at(pc_);
}
}
void RelocInfo::set_target_object(Object* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Assembler::set_target_address_at(isolate_, pc_, host_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target));
}
}
Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == EXTERNAL_REFERENCE);
return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
return target_address();
}
void RelocInfo::set_target_runtime_entry(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsRuntimeEntry(rmode_));
if (target_address() != target)
set_target_address(target, write_barrier_mode, icache_flush_mode);
}
Handle<Cell> RelocInfo::target_cell_handle() {
DCHECK(rmode_ == RelocInfo::CELL);
Address address = Memory::Address_at(pc_);
return Handle<Cell>(reinterpret_cast<Cell**>(address));
}
Cell* RelocInfo::target_cell() {
DCHECK(rmode_ == RelocInfo::CELL);
return Cell::FromValueAddress(Memory::Address_at(pc_));
}
void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(rmode_ == RelocInfo::CELL);
Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
cell);
}
}
#if V8_TARGET_ARCH_S390X
// NOP(2byte) + PUSH + MOV + BASR =
// NOP + LAY + STG + IIHF + IILF + BASR
static const int kCodeAgingSequenceLength = 28;
static const int kCodeAgingTargetDelta = 14; // Jump past NOP + PUSH to IIHF
// LAY + 4 * STG + LA
static const int kNoCodeAgeSequenceLength = 34;
#else
#if (V8_HOST_ARCH_S390)
// NOP + NILH + LAY + ST + IILF + BASR
static const int kCodeAgingSequenceLength = 24;
static const int kCodeAgingTargetDelta = 16; // Jump past NOP to IILF
// NILH + LAY + 4 * ST + LA
static const int kNoCodeAgeSequenceLength = 30;
#else
// NOP + LAY + ST + IILF + BASR
static const int kCodeAgingSequenceLength = 20;
static const int kCodeAgingTargetDelta = 12; // Jump past NOP to IILF
// LAY + 4 * ST + LA
static const int kNoCodeAgeSequenceLength = 26;
#endif
#endif
Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
UNREACHABLE(); // This should never be reached on S390.
return Handle<Object>();
}
Code* RelocInfo::code_age_stub() {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
return Code::GetCodeFromTargetAddress(
Assembler::target_address_at(pc_ + kCodeAgingTargetDelta, host_));
}
void RelocInfo::set_code_age_stub(Code* stub,
ICacheFlushMode icache_flush_mode) {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
Assembler::set_target_address_at(isolate_, pc_ + kCodeAgingTargetDelta, host_,
stub->instruction_start(),
icache_flush_mode);
}
Address RelocInfo::debug_call_address() {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
return Assembler::target_address_at(pc_, host_);
}
void RelocInfo::set_debug_call_address(Address target) {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
Assembler::set_target_address_at(isolate_, pc_, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
}
}
void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
if (IsInternalReference(rmode_)) {
// Jump table entry
Memory::Address_at(pc_) = NULL;
} else if (IsInternalReferenceEncoded(rmode_)) {
// mov sequence
// Currently used only by deserializer, no need to flush.
Assembler::set_target_address_at(isolate_, pc_, host_, NULL,
SKIP_ICACHE_FLUSH);
} else {
Assembler::set_target_address_at(isolate_, pc_, host_, NULL);
}
}
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(this);
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::CELL) {
visitor->VisitCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
visitor->VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
} else if (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()) {
visitor->VisitDebugTarget(this);
} else if (IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(this);
}
}
template <typename StaticVisitor>
void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitEmbeddedPointer(heap, this);
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::CELL) {
StaticVisitor::VisitCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
StaticVisitor::VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
} else if (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()) {
StaticVisitor::VisitDebugTarget(heap, this);
} else if (IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
}
}
// Operand constructors
Operand::Operand(intptr_t immediate, RelocInfo::Mode rmode) {
rm_ = no_reg;
imm_ = immediate;
rmode_ = rmode;
}
Operand::Operand(const ExternalReference& f) {
rm_ = no_reg;
imm_ = reinterpret_cast<intptr_t>(f.address());
rmode_ = RelocInfo::EXTERNAL_REFERENCE;
}
Operand::Operand(Smi* value) {
rm_ = no_reg;
imm_ = reinterpret_cast<intptr_t>(value);
rmode_ = kRelocInfo_NONEPTR;
}
Operand::Operand(Register rm) {
rm_ = rm;
rmode_ = kRelocInfo_NONEPTR; // S390 -why doesn't ARM do this?
}
void Assembler::CheckBuffer() {
if (buffer_space() <= kGap) {
GrowBuffer();
}
}
int32_t Assembler::emit_code_target(Handle<Code> target, RelocInfo::Mode rmode,
TypeFeedbackId ast_id) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
SetRecordedAstId(ast_id);
RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID);
} else {
RecordRelocInfo(rmode);
}
int current = code_targets_.length();
if (current > 0 && code_targets_.last().is_identical_to(target)) {
// Optimization if we keep jumping to the same code target.
current--;
} else {
code_targets_.Add(target);
}
return current;
}
// Helper to emit the binary encoding of a 2 byte instruction
void Assembler::emit2bytes(uint16_t x) {
CheckBuffer();
#if V8_TARGET_LITTLE_ENDIAN
// We need to emit instructions in big endian format as disassembler /
// simulator require the first byte of the instruction in order to decode
// the instruction length. Swap the bytes.
x = ((x & 0x00FF) << 8) | ((x & 0xFF00) >> 8);
#endif
*reinterpret_cast<uint16_t*>(pc_) = x;
pc_ += 2;
}
// Helper to emit the binary encoding of a 4 byte instruction
void Assembler::emit4bytes(uint32_t x) {
CheckBuffer();
#if V8_TARGET_LITTLE_ENDIAN
// We need to emit instructions in big endian format as disassembler /
// simulator require the first byte of the instruction in order to decode
// the instruction length. Swap the bytes.
x = ((x & 0x000000FF) << 24) | ((x & 0x0000FF00) << 8) |
((x & 0x00FF0000) >> 8) | ((x & 0xFF000000) >> 24);
#endif
*reinterpret_cast<uint32_t*>(pc_) = x;
pc_ += 4;
}
// Helper to emit the binary encoding of a 6 byte instruction
void Assembler::emit6bytes(uint64_t x) {
CheckBuffer();
#if V8_TARGET_LITTLE_ENDIAN
// We need to emit instructions in big endian format as disassembler /
// simulator require the first byte of the instruction in order to decode
// the instruction length. Swap the bytes.
x = (static_cast<uint64_t>(x & 0xFF) << 40) |
(static_cast<uint64_t>((x >> 8) & 0xFF) << 32) |
(static_cast<uint64_t>((x >> 16) & 0xFF) << 24) |
(static_cast<uint64_t>((x >> 24) & 0xFF) << 16) |
(static_cast<uint64_t>((x >> 32) & 0xFF) << 8) |
(static_cast<uint64_t>((x >> 40) & 0xFF));
x |= (*reinterpret_cast<uint64_t*>(pc_) >> 48) << 48;
#else
// We need to pad two bytes of zeros in order to get the 6-bytes
// stored from low address.
x = x << 16;
x |= *reinterpret_cast<uint64_t*>(pc_) & 0xFFFF;
#endif
// It is safe to store 8-bytes, as CheckBuffer() guarantees we have kGap
// space left over.
*reinterpret_cast<uint64_t*>(pc_) = x;
pc_ += 6;
}
bool Operand::is_reg() const { return rm_.is_valid(); }
// Fetch the 32bit value from the FIXED_SEQUENCE IIHF / IILF
Address Assembler::target_address_at(Address pc, Address constant_pool) {
// S390 Instruction!
// We want to check for instructions generated by Asm::mov()
Opcode op1 = Instruction::S390OpcodeValue(reinterpret_cast<const byte*>(pc));
SixByteInstr instr_1 =
Instruction::InstructionBits(reinterpret_cast<const byte*>(pc));
if (BRASL == op1 || BRCL == op1) {
int32_t dis = static_cast<int32_t>(instr_1 & 0xFFFFFFFF) * 2;
return reinterpret_cast<Address>(reinterpret_cast<uint64_t>(pc) + dis);
}
#if V8_TARGET_ARCH_S390X
int instr1_length =
Instruction::InstructionLength(reinterpret_cast<const byte*>(pc));
Opcode op2 = Instruction::S390OpcodeValue(
reinterpret_cast<const byte*>(pc + instr1_length));
SixByteInstr instr_2 = Instruction::InstructionBits(
reinterpret_cast<const byte*>(pc + instr1_length));
// IIHF for hi_32, IILF for lo_32
if (IIHF == op1 && IILF == op2) {
return reinterpret_cast<Address>(((instr_1 & 0xFFFFFFFF) << 32) |
((instr_2 & 0xFFFFFFFF)));
}
#else
// IILF loads 32-bits
if (IILF == op1 || CFI == op1) {
return reinterpret_cast<Address>((instr_1 & 0xFFFFFFFF));
}
#endif
UNIMPLEMENTED();
return (Address)0;
}
// This sets the branch destination (which gets loaded at the call address).
// This is for calls and branches within generated code. The serializer
// has already deserialized the mov instructions etc.
// There is a FIXED_SEQUENCE assumption here
void Assembler::deserialization_set_special_target_at(
Isolate* isolate, Address instruction_payload, Code* code, Address target) {
set_target_address_at(isolate, instruction_payload, code, target);
}
void Assembler::deserialization_set_target_internal_reference_at(
Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
if (RelocInfo::IsInternalReferenceEncoded(mode)) {
Code* code = NULL;
set_target_address_at(isolate, pc, code, target, SKIP_ICACHE_FLUSH);
} else {
Memory::Address_at(pc) = target;
}
}
// This code assumes the FIXED_SEQUENCE of IIHF/IILF
void Assembler::set_target_address_at(Isolate* isolate, Address pc,
Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode) {
// Check for instructions generated by Asm::mov()
Opcode op1 = Instruction::S390OpcodeValue(reinterpret_cast<const byte*>(pc));
SixByteInstr instr_1 =
Instruction::InstructionBits(reinterpret_cast<const byte*>(pc));
bool patched = false;
if (BRASL == op1 || BRCL == op1) {
instr_1 >>= 32; // Zero out the lower 32-bits
instr_1 <<= 32;
int32_t halfwords = (target - pc) / 2; // number of halfwords
instr_1 |= static_cast<uint32_t>(halfwords);
Instruction::SetInstructionBits<SixByteInstr>(reinterpret_cast<byte*>(pc),
instr_1);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(isolate, pc, 6);
}
patched = true;
} else {
#if V8_TARGET_ARCH_S390X
int instr1_length =
Instruction::InstructionLength(reinterpret_cast<const byte*>(pc));
Opcode op2 = Instruction::S390OpcodeValue(
reinterpret_cast<const byte*>(pc + instr1_length));
SixByteInstr instr_2 = Instruction::InstructionBits(
reinterpret_cast<const byte*>(pc + instr1_length));
// IIHF for hi_32, IILF for lo_32
if (IIHF == op1 && IILF == op2) {
// IIHF
instr_1 >>= 32; // Zero out the lower 32-bits
instr_1 <<= 32;
instr_1 |= reinterpret_cast<uint64_t>(target) >> 32;
Instruction::SetInstructionBits<SixByteInstr>(reinterpret_cast<byte*>(pc),
instr_1);
// IILF
instr_2 >>= 32;
instr_2 <<= 32;
instr_2 |= reinterpret_cast<uint64_t>(target) & 0xFFFFFFFF;
Instruction::SetInstructionBits<SixByteInstr>(
reinterpret_cast<byte*>(pc + instr1_length), instr_2);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(isolate, pc, 12);
}
patched = true;
}
#else
// IILF loads 32-bits
if (IILF == op1 || CFI == op1) {
instr_1 >>= 32; // Zero out the lower 32-bits
instr_1 <<= 32;
instr_1 |= reinterpret_cast<uint32_t>(target);
Instruction::SetInstructionBits<SixByteInstr>(reinterpret_cast<byte*>(pc),
instr_1);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(isolate, pc, 6);
}
patched = true;
}
#endif
}
if (!patched) UNREACHABLE();
}
} // namespace internal
} // namespace v8
#endif // V8_S390_ASSEMBLER_S390_INL_H_

3034
src/s390/assembler-s390.cc Normal file

File diff suppressed because it is too large Load Diff

1467
src/s390/assembler-s390.h Normal file

File diff suppressed because it is too large Load Diff

2604
src/s390/builtins-s390.cc Normal file

File diff suppressed because it is too large Load Diff

5770
src/s390/code-stubs-s390.cc Normal file

File diff suppressed because it is too large Load Diff

467
src/s390/code-stubs-s390.h Normal file
View File

@ -0,0 +1,467 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_S390_CODE_STUBS_S390_H_
#define V8_S390_CODE_STUBS_S390_H_
#include "src/s390/frames-s390.h"
namespace v8 {
namespace internal {
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
class StringHelper : public AllStatic {
public:
// Generate code for copying a large number of characters. This function
// is allowed to spend extra time setting up conditions to make copying
// faster. Copying of overlapping regions is not supported.
// Dest register ends at the position after the last character written.
static void GenerateCopyCharacters(MacroAssembler* masm, Register dest,
Register src, Register count,
Register scratch,
String::Encoding encoding);
// Compares two flat one-byte strings and returns result in r0.
static void GenerateCompareFlatOneByteStrings(MacroAssembler* masm,
Register left, Register right,
Register scratch1,
Register scratch2,
Register scratch3);
// Compares two flat one-byte strings for equality and returns result in r0.
static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
Register left, Register right,
Register scratch1,
Register scratch2);
private:
static void GenerateOneByteCharsCompareLoop(MacroAssembler* masm,
Register left, Register right,
Register length,
Register scratch1,
Label* chars_not_equal);
DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
};
class StoreRegistersStateStub : public PlatformCodeStub {
public:
explicit StoreRegistersStateStub(Isolate* isolate)
: PlatformCodeStub(isolate) {}
static void GenerateAheadOfTime(Isolate* isolate);
private:
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(StoreRegistersState, PlatformCodeStub);
};
class RestoreRegistersStateStub : public PlatformCodeStub {
public:
explicit RestoreRegistersStateStub(Isolate* isolate)
: PlatformCodeStub(isolate) {}
static void GenerateAheadOfTime(Isolate* isolate);
private:
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(RestoreRegistersState, PlatformCodeStub);
};
class RecordWriteStub : public PlatformCodeStub {
public:
RecordWriteStub(Isolate* isolate, Register object, Register value,
Register address, RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode)
: PlatformCodeStub(isolate),
regs_(object, // An input reg.
address, // An input reg.
value) { // One scratch reg.
minor_key_ = ObjectBits::encode(object.code()) |
ValueBits::encode(value.code()) |
AddressBits::encode(address.code()) |
RememberedSetActionBits::encode(remembered_set_action) |
SaveFPRegsModeBits::encode(fp_mode);
}
RecordWriteStub(uint32_t key, Isolate* isolate)
: PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
enum Mode { STORE_BUFFER_ONLY, INCREMENTAL, INCREMENTAL_COMPACTION };
bool SometimesSetsUpAFrame() override { return false; }
// Patch an always taken branch into a NOP branch
static void PatchBranchCondMask(MacroAssembler* masm, int pos, Condition c) {
int32_t instrLen = masm->instr_length_at(pos);
DCHECK(instrLen == 4 || instrLen == 6);
if (instrLen == 4) {
// BRC - Branch Mask @ Bits 23-20
FourByteInstr updatedMask = static_cast<FourByteInstr>(c) << 20;
masm->instr_at_put<FourByteInstr>(
pos, (masm->instr_at(pos) & ~kFourByteBrCondMask) | updatedMask);
} else {
// BRCL - Branch Mask @ Bits 39-36
SixByteInstr updatedMask = static_cast<SixByteInstr>(c) << 36;
masm->instr_at_put<SixByteInstr>(
pos, (masm->instr_at(pos) & ~kSixByteBrCondMask) | updatedMask);
}
}
static bool isBranchNop(SixByteInstr instr, int instrLength) {
if ((4 == instrLength && 0 == (instr & kFourByteBrCondMask)) ||
// BRC - Check for 0x0 mask condition.
(6 == instrLength && 0 == (instr & kSixByteBrCondMask))) {
// BRCL - Check for 0x0 mask condition
return true;
}
return false;
}
static Mode GetMode(Code* stub) {
int32_t first_instr_length =
Instruction::InstructionLength(stub->instruction_start());
int32_t second_instr_length = Instruction::InstructionLength(
stub->instruction_start() + first_instr_length);
uint64_t first_instr = Assembler::instr_at(stub->instruction_start());
uint64_t second_instr =
Assembler::instr_at(stub->instruction_start() + first_instr_length);
DCHECK(first_instr_length == 4 || first_instr_length == 6);
DCHECK(second_instr_length == 4 || second_instr_length == 6);
bool isFirstInstrNOP = isBranchNop(first_instr, first_instr_length);
bool isSecondInstrNOP = isBranchNop(second_instr, second_instr_length);
// STORE_BUFFER_ONLY has NOP on both branches
if (isSecondInstrNOP && isFirstInstrNOP) return STORE_BUFFER_ONLY;
// INCREMENTAL_COMPACTION has NOP on second branch.
else if (isFirstInstrNOP && !isSecondInstrNOP)
return INCREMENTAL_COMPACTION;
// INCREMENTAL has NOP on first branch.
else if (!isFirstInstrNOP && isSecondInstrNOP)
return INCREMENTAL;
DCHECK(false);
return STORE_BUFFER_ONLY;
}
static void Patch(Code* stub, Mode mode) {
MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
stub->instruction_size(), CodeObjectRequired::kNo);
// Get instruction lengths of two branches
int32_t first_instr_length = masm.instr_length_at(0);
int32_t second_instr_length = masm.instr_length_at(first_instr_length);
switch (mode) {
case STORE_BUFFER_ONLY:
DCHECK(GetMode(stub) == INCREMENTAL ||
GetMode(stub) == INCREMENTAL_COMPACTION);
PatchBranchCondMask(&masm, 0, CC_NOP);
PatchBranchCondMask(&masm, first_instr_length, CC_NOP);
break;
case INCREMENTAL:
DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
PatchBranchCondMask(&masm, 0, CC_ALWAYS);
break;
case INCREMENTAL_COMPACTION:
DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
PatchBranchCondMask(&masm, first_instr_length, CC_ALWAYS);
break;
}
DCHECK(GetMode(stub) == mode);
Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(),
first_instr_length + second_instr_length);
}
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
private:
// This is a helper class for freeing up 3 scratch registers. The input is
// two registers that must be preserved and one scratch register provided by
// the caller.
class RegisterAllocation {
public:
RegisterAllocation(Register object, Register address, Register scratch0)
: object_(object), address_(address), scratch0_(scratch0) {
DCHECK(!AreAliased(scratch0, object, address, no_reg));
scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
}
void Save(MacroAssembler* masm) {
DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
// We don't have to save scratch0_ because it was given to us as
// a scratch register.
masm->push(scratch1_);
}
void Restore(MacroAssembler* masm) { masm->pop(scratch1_); }
// If we have to call into C then we need to save and restore all caller-
// saved registers that were not already preserved. The scratch registers
// will be restored by other means so we don't bother pushing them here.
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
masm->push(r14);
masm->MultiPush(kJSCallerSaved & ~scratch1_.bit());
if (mode == kSaveFPRegs) {
// Save all volatile FP registers except d0.
masm->MultiPushDoubles(kCallerSavedDoubles & ~d0.bit());
}
}
inline void RestoreCallerSaveRegisters(MacroAssembler* masm,
SaveFPRegsMode mode) {
if (mode == kSaveFPRegs) {
// Restore all volatile FP registers except d0.
masm->MultiPopDoubles(kCallerSavedDoubles & ~d0.bit());
}
masm->MultiPop(kJSCallerSaved & ~scratch1_.bit());
masm->pop(r14);
}
inline Register object() { return object_; }
inline Register address() { return address_; }
inline Register scratch0() { return scratch0_; }
inline Register scratch1() { return scratch1_; }
private:
Register object_;
Register address_;
Register scratch0_;
Register scratch1_;
friend class RecordWriteStub;
};
enum OnNoNeedToInformIncrementalMarker {
kReturnOnNoNeedToInformIncrementalMarker,
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
};
inline Major MajorKey() const final { return RecordWrite; }
void Generate(MacroAssembler* masm) override;
void GenerateIncremental(MacroAssembler* masm, Mode mode);
void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
void Activate(Code* code) override {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
Register object() const {
return Register::from_code(ObjectBits::decode(minor_key_));
}
Register value() const {
return Register::from_code(ValueBits::decode(minor_key_));
}
Register address() const {
return Register::from_code(AddressBits::decode(minor_key_));
}
RememberedSetAction remembered_set_action() const {
return RememberedSetActionBits::decode(minor_key_);
}
SaveFPRegsMode save_fp_regs_mode() const {
return SaveFPRegsModeBits::decode(minor_key_);
}
class ObjectBits : public BitField<int, 0, 4> {};
class ValueBits : public BitField<int, 4, 4> {};
class AddressBits : public BitField<int, 8, 4> {};
class RememberedSetActionBits : public BitField<RememberedSetAction, 15, 1> {
};
class SaveFPRegsModeBits : public BitField<SaveFPRegsMode, 16, 1> {};
Label slow_;
RegisterAllocation regs_;
DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
};
// Trampoline stub to call into native code. To call safely into native code
// in the presence of compacting GC (which can move code objects) we need to
// keep the code which called into native pinned in the memory. Currently the
// simplest approach is to generate such stub early enough so it can never be
// moved by GC
class DirectCEntryStub : public PlatformCodeStub {
public:
explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
void GenerateCall(MacroAssembler* masm, Register target);
private:
bool NeedsImmovableCode() override { return true; }
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
};
class NameDictionaryLookupStub : public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
: PlatformCodeStub(isolate) {
minor_key_ = LookupModeBits::encode(mode);
}
static void GenerateNegativeLookup(MacroAssembler* masm, Label* miss,
Label* done, Register receiver,
Register properties, Handle<Name> name,
Register scratch0);
static void GeneratePositiveLookup(MacroAssembler* masm, Label* miss,
Label* done, Register elements,
Register name, Register r0, Register r1);
bool SometimesSetsUpAFrame() override { return false; }
private:
static const int kInlinedProbes = 4;
static const int kTotalProbes = 20;
static const int kCapacityOffset =
NameDictionary::kHeaderSize +
NameDictionary::kCapacityIndex * kPointerSize;
static const int kElementsStartOffset =
NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
class LookupModeBits : public BitField<LookupMode, 0, 1> {};
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
};
class FloatingPointHelper : public AllStatic {
public:
enum Destination { kFPRegisters, kCoreRegisters };
// Loads smis from r0 and r1 (right and left in binary operations) into
// floating point registers. Depending on the destination the values ends up
// either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
// floating point registers VFP3 must be supported. If core registers are
// requested when VFP3 is supported d6 and d7 will be scratched.
static void LoadSmis(MacroAssembler* masm, Register scratch1,
Register scratch2);
// Loads objects from r0 and r1 (right and left in binary operations) into
// floating point registers. Depending on the destination the values ends up
// either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
// floating point registers VFP3 must be supported. If core registers are
// requested when VFP3 is supported d6 and d7 will still be scratched. If
// either r0 or r1 is not a number (not smi and not heap number object) the
// not_number label is jumped to with r0 and r1 intact.
static void LoadOperands(MacroAssembler* masm, Register heap_number_map,
Register scratch1, Register scratch2,
Label* not_number);
// Convert the smi or heap number in object to an int32 using the rules
// for ToInt32 as described in ECMAScript 9.5.: the value is truncated
// and brought into the range -2^31 .. +2^31 - 1.
static void ConvertNumberToInt32(MacroAssembler* masm, Register object,
Register dst, Register heap_number_map,
Register scratch1, Register scratch2,
Register scratch3,
DoubleRegister double_scratch,
Label* not_int32);
// Converts the integer (untagged smi) in |src| to a double, storing
// the result to |double_dst|
static void ConvertIntToDouble(MacroAssembler* masm, Register src,
DoubleRegister double_dst);
// Converts the unsigned integer (untagged smi) in |src| to
// a double, storing the result to |double_dst|
static void ConvertUnsignedIntToDouble(MacroAssembler* masm, Register src,
DoubleRegister double_dst);
// Converts the integer (untagged smi) in |src| to
// a float, storing the result in |dst|
static void ConvertIntToFloat(MacroAssembler* masm, const DoubleRegister dst,
const Register src);
// Load the number from object into double_dst in the double format.
// Control will jump to not_int32 if the value cannot be exactly represented
// by a 32-bit integer.
// Floating point value in the 32-bit integer range that are not exact integer
// won't be loaded.
static void LoadNumberAsInt32Double(MacroAssembler* masm, Register object,
DoubleRegister double_dst,
DoubleRegister double_scratch,
Register heap_number_map,
Register scratch1, Register scratch2,
Label* not_int32);
// Loads the number from object into dst as a 32-bit integer.
// Control will jump to not_int32 if the object cannot be exactly represented
// by a 32-bit integer.
// Floating point value in the 32-bit integer range that are not exact integer
// won't be converted.
// scratch3 is not used when VFP3 is supported.
static void LoadNumberAsInt32(MacroAssembler* masm, Register object,
Register dst, Register heap_number_map,
Register scratch1, Register scratch2,
Register scratch3,
DoubleRegister double_scratch0,
DoubleRegister double_scratch1,
Label* not_int32);
// Generate non VFP3 code to check if a double can be exactly represented by a
// 32-bit integer. This does not check for 0 or -0, which need
// to be checked for separately.
// Control jumps to not_int32 if the value is not a 32-bit integer, and falls
// through otherwise.
// src1 and src2 will be cloberred.
//
// Expected input:
// - src1: higher (exponent) part of the double value.
// - src2: lower (mantissa) part of the double value.
// Output status:
// - dst: 32 higher bits of the mantissa. (mantissa[51:20])
// - src2: contains 1.
// - other registers are clobbered.
static void DoubleIs32BitInteger(MacroAssembler* masm, Register src1,
Register src2, Register dst,
Register scratch, Label* not_int32);
// Generates code to call a C function to do a double operation using core
// registers. (Used when VFP3 is not supported.)
// This code never falls through, but returns with a heap number containing
// the result in r0.
// Register heapnumber_result must be a heap number in which the
// result of the operation will be stored.
// Requires the following layout on entry:
// r0: Left value (least significant part of mantissa).
// r1: Left value (sign, exponent, top of mantissa).
// r2: Right value (least significant part of mantissa).
// r3: Right value (sign, exponent, top of mantissa).
static void CallCCodeForDoubleOperation(MacroAssembler* masm, Token::Value op,
Register heap_number_result,
Register scratch);
private:
static void LoadNumber(MacroAssembler* masm, Register object,
DoubleRegister dst, Register heap_number_map,
Register scratch1, Register scratch2,
Label* not_number);
};
} // namespace internal
} // namespace v8
#endif // V8_S390_CODE_STUBS_S390_H_

675
src/s390/codegen-s390.cc Normal file
View File

@ -0,0 +1,675 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/s390/codegen-s390.h"
#if V8_TARGET_ARCH_S390
#include "src/codegen.h"
#include "src/macro-assembler.h"
#include "src/s390/simulator-s390.h"
namespace v8 {
namespace internal {
#define __ masm.
#if defined(USE_SIMULATOR)
byte* fast_exp_s390_machine_code = nullptr;
double fast_exp_simulator(double x, Isolate* isolate) {
return Simulator::current(isolate)->CallFPReturnsDouble(
fast_exp_s390_machine_code, x, 0);
}
#endif
UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
if (buffer == nullptr) return nullptr;
ExternalReference::InitializeMathExpData();
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
CodeObjectRequired::kNo);
{
DoubleRegister input = d0;
DoubleRegister result = d2;
DoubleRegister double_scratch1 = d3;
DoubleRegister double_scratch2 = d4;
Register temp1 = r6;
Register temp2 = r7;
Register temp3 = r8;
__ Push(temp3, temp2, temp1);
MathExpGenerator::EmitMathExp(&masm, input, result, double_scratch1,
double_scratch2, temp1, temp2, temp3);
__ Pop(temp3, temp2, temp1);
__ ldr(d0, result);
__ Ret();
}
CodeDesc desc;
masm.GetCode(&desc);
DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#else
fast_exp_s390_machine_code = buffer;
return &fast_exp_simulator;
#endif
}
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
return nullptr;
#else
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
CodeObjectRequired::kNo);
__ MovFromFloatParameter(d0);
__ sqdbr(d0, d0);
__ MovToFloatResult(d0);
__ Ret();
CodeDesc desc;
masm.GetCode(&desc);
DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
#undef __
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
masm->EnterFrame(StackFrame::INTERNAL);
DCHECK(!masm->has_frame());
masm->set_has_frame(true);
}
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
masm->LeaveFrame(StackFrame::INTERNAL);
DCHECK(masm->has_frame());
masm->set_has_frame(false);
}
// -------------------------------------------------------------------------
// Code generators
#define __ ACCESS_MASM(masm)
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm, Register receiver, Register key, Register value,
Register target_map, AllocationSiteMode mode,
Label* allocation_memento_found) {
Register scratch_elements = r6;
DCHECK(!AreAliased(receiver, key, value, target_map, scratch_elements));
if (mode == TRACK_ALLOCATION_SITE) {
DCHECK(allocation_memento_found != NULL);
__ JumpIfJSArrayHasAllocationMemento(receiver, scratch_elements,
allocation_memento_found);
}
// Set transitioned map.
__ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, r1,
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
}
void ElementsTransitionGenerator::GenerateSmiToDouble(
MacroAssembler* masm, Register receiver, Register key, Register value,
Register target_map, AllocationSiteMode mode, Label* fail) {
// lr contains the return address
Label loop, entry, convert_hole, gc_required, only_change_map, done;
Register elements = r6;
Register length = r7;
Register array = r8;
Register array_end = array;
// target_map parameter can be clobbered.
Register scratch1 = target_map;
Register scratch2 = r1;
// Verify input registers don't conflict with locals.
DCHECK(!AreAliased(receiver, key, value, target_map, elements, length, array,
scratch2));
if (mode == TRACK_ALLOCATION_SITE) {
__ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
}
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
__ beq(&only_change_map, Label::kNear);
// Preserve lr and use r14 as a temporary register.
__ push(r14);
__ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
// length: number of elements (smi-tagged)
// Allocate new FixedDoubleArray.
__ SmiToDoubleArrayOffset(r14, length);
__ AddP(r14, Operand(FixedDoubleArray::kHeaderSize));
__ Allocate(r14, array, r9, scratch2, &gc_required, DOUBLE_ALIGNMENT);
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
__ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
// Update receiver's map.
__ StoreP(scratch2, MemOperand(array, HeapObject::kMapOffset));
__ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created FixedDoubleArray.
__ AddP(scratch1, array, Operand(kHeapObjectTag));
__ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(receiver, JSObject::kElementsOffset, scratch1, scratch2,
kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Prepare for conversion loop.
__ AddP(target_map, elements,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ AddP(r9, array, Operand(FixedDoubleArray::kHeaderSize));
__ SmiToDoubleArrayOffset(array, length);
__ AddP(array_end, r9, array);
// Repurpose registers no longer in use.
#if V8_TARGET_ARCH_S390X
Register hole_int64 = elements;
#else
Register hole_lower = elements;
Register hole_upper = length;
#endif
// scratch1: begin of source FixedArray element fields, not tagged
// hole_lower: kHoleNanLower32 OR hol_int64
// hole_upper: kHoleNanUpper32
// array_end: end of destination FixedDoubleArray, not tagged
// scratch2: begin of FixedDoubleArray element fields, not tagged
__ b(&entry, Label::kNear);
__ bind(&only_change_map);
__ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ b(&done, Label::kNear);
// Call into runtime if GC is required.
__ bind(&gc_required);
__ pop(r14);
__ b(fail);
// Convert and copy elements.
__ bind(&loop);
__ LoadP(r14, MemOperand(scratch1));
__ la(scratch1, MemOperand(scratch1, kPointerSize));
// r1: current element
__ UntagAndJumpIfNotSmi(r14, r14, &convert_hole);
// Normal smi, convert to double and store.
__ ConvertIntToDouble(r14, d0);
__ StoreDouble(d0, MemOperand(r9, 0));
__ la(r9, MemOperand(r9, 8));
__ b(&entry, Label::kNear);
// Hole found, store the-hole NaN.
__ bind(&convert_hole);
if (FLAG_debug_code) {
// Restore a "smi-untagged" heap object.
__ LoadP(r1, MemOperand(r5, -kPointerSize));
__ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
__ Assert(eq, kObjectFoundInSmiOnlyArray);
}
#if V8_TARGET_ARCH_S390X
__ stg(hole_int64, MemOperand(r9, 0));
#else
__ StoreW(hole_upper, MemOperand(r9, Register::kExponentOffset));
__ StoreW(hole_lower, MemOperand(r9, Register::kMantissaOffset));
#endif
__ AddP(r9, Operand(8));
__ bind(&entry);
__ CmpP(r9, array_end);
__ blt(&loop);
__ pop(r14);
__ bind(&done);
}
void ElementsTransitionGenerator::GenerateDoubleToObject(
MacroAssembler* masm, Register receiver, Register key, Register value,
Register target_map, AllocationSiteMode mode, Label* fail) {
// Register lr contains the return address.
Label loop, convert_hole, gc_required, only_change_map;
Register elements = r6;
Register array = r8;
Register length = r7;
Register scratch = r1;
// Verify input registers don't conflict with locals.
DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length,
scratch));
if (mode == TRACK_ALLOCATION_SITE) {
__ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
}
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
__ beq(&only_change_map);
__ Push(target_map, receiver, key, value);
__ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
// elements: source FixedDoubleArray
// length: number of elements (smi-tagged)
// Allocate new FixedArray.
// Re-use value and target_map registers, as they have been saved on the
// stack.
Register array_size = value;
Register allocate_scratch = target_map;
__ LoadImmP(array_size, Operand(FixedDoubleArray::kHeaderSize));
__ SmiToPtrArrayOffset(r0, length);
__ AddP(array_size, r0);
__ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
NO_ALLOCATION_FLAGS);
// array: destination FixedArray, not tagged as heap object
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
__ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
__ StoreP(scratch, MemOperand(array, HeapObject::kMapOffset));
__ AddP(array, Operand(kHeapObjectTag));
// Prepare for conversion loop.
Register src_elements = elements;
Register dst_elements = target_map;
Register dst_end = length;
Register heap_number_map = scratch;
__ AddP(src_elements,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
__ SmiToPtrArrayOffset(length, length);
__ LoadRoot(r9, Heap::kTheHoleValueRootIndex);
Label initialization_loop, loop_done;
__ ShiftRightP(r0, length, Operand(kPointerSizeLog2));
__ beq(&loop_done, Label::kNear /*, cr0*/);
// Allocating heap numbers in the loop below can fail and cause a jump to
// gc_required. We can't leave a partly initialized FixedArray behind,
// so pessimistically fill it with holes now.
__ AddP(dst_elements, array,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
__ bind(&initialization_loop);
__ StoreP(r9, MemOperand(dst_elements, kPointerSize));
__ lay(dst_elements, MemOperand(dst_elements, kPointerSize));
__ BranchOnCount(r0, &initialization_loop);
__ AddP(dst_elements, array,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ AddP(dst_end, dst_elements, length);
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
// Using offsetted addresses in src_elements to fully take advantage of
// post-indexing.
// dst_elements: begin of destination FixedArray element fields, not tagged
// src_elements: begin of source FixedDoubleArray element fields,
// not tagged, +4
// dst_end: end of destination FixedArray, not tagged
// array: destination FixedArray
// r9: the-hole pointer
// heap_number_map: heap number map
__ b(&loop, Label::kNear);
// Call into runtime if GC is required.
__ bind(&gc_required);
__ Pop(target_map, receiver, key, value);
__ b(fail);
// Replace the-hole NaN with the-hole pointer.
__ bind(&convert_hole);
__ StoreP(r9, MemOperand(dst_elements));
__ AddP(dst_elements, Operand(kPointerSize));
__ CmpLogicalP(dst_elements, dst_end);
__ bge(&loop_done);
__ bind(&loop);
Register upper_bits = key;
__ LoadlW(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
__ AddP(src_elements, Operand(kDoubleSize));
// upper_bits: current element's upper 32 bit
// src_elements: address of next element's upper 32 bit
__ Cmp32(upper_bits, Operand(kHoleNanUpper32));
__ beq(&convert_hole, Label::kNear);
// Non-hole double, copy value into a heap number.
Register heap_number = receiver;
Register scratch2 = value;
__ AllocateHeapNumber(heap_number, scratch2, r1, heap_number_map,
&gc_required);
// heap_number: new heap number
#if V8_TARGET_ARCH_S390X
__ lg(scratch2, MemOperand(src_elements, -kDoubleSize));
// subtract tag for std
__ AddP(upper_bits, heap_number, Operand(-kHeapObjectTag));
__ stg(scratch2, MemOperand(upper_bits, HeapNumber::kValueOffset));
#else
__ LoadlW(scratch2,
MemOperand(src_elements, Register::kMantissaOffset - kDoubleSize));
__ LoadlW(upper_bits,
MemOperand(src_elements, Register::kExponentOffset - kDoubleSize));
__ StoreW(scratch2,
FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
__ StoreW(upper_bits,
FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
#endif
__ LoadRR(scratch2, dst_elements);
__ StoreP(heap_number, MemOperand(dst_elements));
__ AddP(dst_elements, Operand(kPointerSize));
__ RecordWrite(array, scratch2, heap_number, kLRHasNotBeenSaved,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ CmpLogicalP(dst_elements, dst_end);
__ blt(&loop);
__ bind(&loop_done);
__ Pop(target_map, receiver, key, value);
// Replace receiver's backing store with newly created and filled FixedArray.
__ StoreP(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(receiver, JSObject::kElementsOffset, array, scratch,
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ bind(&only_change_map);
// Update receiver's map.
__ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
}
// assume ip can be used as a scratch register below
void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
Register index, Register result,
Label* call_runtime) {
// Fetch the instance type of the receiver into result register.
__ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
__ LoadlB(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
// We need special handling for indirect strings.
Label check_sequential;
__ mov(r0, Operand(kIsIndirectStringMask));
__ AndP(r0, result);
__ beq(&check_sequential, Label::kNear /*, cr0*/);
// Dispatch on the indirect string shape: slice or cons.
Label cons_string;
__ mov(ip, Operand(kSlicedNotConsMask));
__ LoadRR(r0, result);
__ AndP(r0, ip /*, SetRC*/); // Should be okay to remove RC
__ beq(&cons_string, Label::kNear /*, cr0*/);
// Handle slices.
Label indirect_string_loaded;
__ LoadP(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
__ LoadP(string, FieldMemOperand(string, SlicedString::kParentOffset));
__ SmiUntag(ip, result);
__ AddP(index, ip);
__ b(&indirect_string_loaded, Label::kNear);
// Handle cons strings.
// Check whether the right hand side is the empty string (i.e. if
// this is really a flat string in a cons string). If that is not
// the case we would rather go to the runtime system now to flatten
// the string.
__ bind(&cons_string);
__ LoadP(result, FieldMemOperand(string, ConsString::kSecondOffset));
__ CompareRoot(result, Heap::kempty_stringRootIndex);
__ bne(call_runtime);
// Get the first of the two strings and load its instance type.
__ LoadP(string, FieldMemOperand(string, ConsString::kFirstOffset));
__ bind(&indirect_string_loaded);
__ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
__ LoadlB(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
// Distinguish sequential and external strings. Only these two string
// representations can reach here (slices and flat cons strings have been
// reduced to the underlying sequential or external string).
Label external_string, check_encoding;
__ bind(&check_sequential);
STATIC_ASSERT(kSeqStringTag == 0);
__ mov(r0, Operand(kStringRepresentationMask));
__ AndP(r0, result);
__ bne(&external_string, Label::kNear);
// Prepare sequential strings
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ AddP(string, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
__ b(&check_encoding, Label::kNear);
// Handle external strings.
__ bind(&external_string);
if (FLAG_debug_code) {
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ mov(r0, Operand(kIsIndirectStringMask));
__ AndP(r0, result);
__ Assert(eq, kExternalStringExpectedButNotFound, cr0);
}
// Rule out short external strings.
STATIC_ASSERT(kShortExternalStringTag != 0);
__ mov(r0, Operand(kShortExternalStringMask));
__ AndP(r0, result);
__ bne(call_runtime /*, cr0*/);
__ LoadP(string,
FieldMemOperand(string, ExternalString::kResourceDataOffset));
Label one_byte, done;
__ bind(&check_encoding);
STATIC_ASSERT(kTwoByteStringTag == 0);
__ mov(r0, Operand(kStringEncodingMask));
__ AndP(r0, result);
__ bne(&one_byte, Label::kNear);
// Two-byte string.
__ ShiftLeftP(result, index, Operand(1));
__ LoadLogicalHalfWordP(result, MemOperand(string, result));
__ b(&done, Label::kNear);
__ bind(&one_byte);
// One-byte string.
__ LoadlB(result, MemOperand(string, index));
__ bind(&done);
}
static MemOperand ExpConstant(int index, Register base) {
return MemOperand(base, index * kDoubleSize);
}
void MathExpGenerator::EmitMathExp(MacroAssembler* masm, DoubleRegister input,
DoubleRegister result,
DoubleRegister double_scratch1,
DoubleRegister double_scratch2,
Register temp1, Register temp2,
Register temp3) {
DCHECK(!input.is(result));
DCHECK(!input.is(double_scratch1));
DCHECK(!input.is(double_scratch2));
DCHECK(!result.is(double_scratch1));
DCHECK(!result.is(double_scratch2));
DCHECK(!double_scratch1.is(double_scratch2));
DCHECK(!temp1.is(temp2));
DCHECK(!temp1.is(temp3));
DCHECK(!temp2.is(temp3));
DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
DCHECK(!masm->serializer_enabled()); // External references not serializable.
Label zero, infinity, done;
__ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
__ LoadDouble(double_scratch1, ExpConstant(0, temp3));
__ cdbr(double_scratch1, input);
__ ldr(result, input);
__ bunordered(&done, Label::kNear);
__ bge(&zero, Label::kNear);
__ LoadDouble(double_scratch2, ExpConstant(1, temp3));
__ cdbr(input, double_scratch2);
__ bge(&infinity, Label::kNear);
__ LoadDouble(double_scratch1, ExpConstant(3, temp3));
__ LoadDouble(result, ExpConstant(4, temp3));
// Do not generate madbr, as intermediate result are not
// rounded properly
__ mdbr(double_scratch1, input);
__ adbr(double_scratch1, result);
// Move low word of double_scratch1 to temp2
__ lgdr(temp2, double_scratch1);
__ nihf(temp2, Operand::Zero());
__ sdbr(double_scratch1, result);
__ LoadDouble(result, ExpConstant(6, temp3));
__ LoadDouble(double_scratch2, ExpConstant(5, temp3));
__ mdbr(double_scratch1, double_scratch2);
__ sdbr(double_scratch1, input);
__ sdbr(result, double_scratch1);
__ ldr(double_scratch2, double_scratch1);
__ mdbr(double_scratch2, double_scratch2);
__ mdbr(result, double_scratch2);
__ LoadDouble(double_scratch2, ExpConstant(7, temp3));
__ mdbr(result, double_scratch2);
__ sdbr(result, double_scratch1);
__ LoadDouble(double_scratch2, ExpConstant(8, temp3));
__ adbr(result, double_scratch2);
__ ShiftRight(temp1, temp2, Operand(11));
__ AndP(temp2, Operand(0x7ff));
__ AddP(temp1, Operand(0x3ff));
// Must not call ExpConstant() after overwriting temp3!
__ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
__ ShiftLeft(temp2, temp2, Operand(3));
__ lg(temp2, MemOperand(temp2, temp3));
__ sllg(temp1, temp1, Operand(52));
__ ogr(temp2, temp1);
__ ldgr(double_scratch1, temp2);
__ mdbr(result, double_scratch1);
__ b(&done, Label::kNear);
__ bind(&zero);
__ lzdr(kDoubleRegZero);
__ ldr(result, kDoubleRegZero);
__ b(&done, Label::kNear);
__ bind(&infinity);
__ LoadDouble(result, ExpConstant(2, temp3));
__ bind(&done);
}
#undef __
CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
// Since patcher is a large object, allocate it dynamically when needed,
// to avoid overloading the stack in stress conditions.
// DONT_FLUSH is used because the CodeAgingHelper is initialized early in
// the process, before ARM simulator ICache is setup.
base::SmartPointer<CodePatcher> patcher(
new CodePatcher(isolate, young_sequence_.start(),
young_sequence_.length(), CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
patcher->masm()->PushFixedFrame(r3);
patcher->masm()->la(
fp, MemOperand(sp, StandardFrameConstants::kFixedFrameSizeFromFp));
}
#ifdef DEBUG
bool CodeAgingHelper::IsOld(byte* candidate) const {
return Assembler::IsNop(Assembler::instr_at(candidate));
}
#endif
bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
bool result = isolate->code_aging_helper()->IsYoung(sequence);
DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
return result;
}
void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
MarkingParity* parity) {
if (IsYoungSequence(isolate, sequence)) {
*age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY;
} else {
Code* code = NULL;
Address target_address =
Assembler::target_address_at(sequence + kCodeAgingTargetDelta, code);
Code* stub = GetCodeFromTargetAddress(target_address);
GetCodeAgeAndParity(stub, age, parity);
}
}
void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Code::Age age,
MarkingParity parity) {
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
Assembler::FlushICache(isolate, sequence, young_length);
} else {
// FIXED_SEQUENCE
Code* stub = GetCodeAgeStub(isolate, age, parity);
CodePatcher patcher(isolate, sequence, young_length);
intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
// We need to push lr on stack so that GenerateMakeCodeYoungAgainCommon
// knows where to pick up the return address
//
// Since we can no longer guarentee ip will hold the branch address
// because of BRASL, use Call so that GenerateMakeCodeYoungAgainCommon
// can calculate the branch address offset
patcher.masm()->nop(); // marker to detect sequence (see IsOld)
patcher.masm()->CleanseP(r14);
patcher.masm()->Push(r14);
patcher.masm()->mov(r2, Operand(target));
patcher.masm()->Call(r2);
for (int i = 0; i < kNoCodeAgeSequenceLength - kCodeAgingSequenceLength;
i += 2) {
// TODO(joransiu): Create nop function to pad
// (kNoCodeAgeSequenceLength - kCodeAgingSequenceLength) bytes.
patcher.masm()->nop(); // 2-byte nops().
}
}
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_S390

44
src/s390/codegen-s390.h Normal file
View File

@ -0,0 +1,44 @@
// Copyright 2011 the V8 project authors. All rights reserved.
//
// Copyright IBM Corp. 2012, 2015. All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_S390_CODEGEN_S390_H_
#define V8_S390_CODEGEN_S390_H_
#include "src/ast/ast.h"
#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
class StringCharLoadGenerator : public AllStatic {
public:
// Generates the code for handling different string types and loading the
// indexed character into |result|. We expect |index| as untagged input and
// |result| as untagged output.
static void Generate(MacroAssembler* masm, Register string, Register index,
Register result, Label* call_runtime);
private:
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
class MathExpGenerator : public AllStatic {
public:
// Register input isn't modified. All other registers are clobbered.
static void EmitMathExp(MacroAssembler* masm, DoubleRegister input,
DoubleRegister result, DoubleRegister double_scratch1,
DoubleRegister double_scratch2, Register temp1,
Register temp2, Register temp3);
private:
DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
};
} // namespace internal
} // namespace v8
#endif // V8_S390_CODEGEN_S390_H_

View File

@ -0,0 +1,48 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_S390
#include "src/s390/constants-s390.h"
namespace v8 {
namespace internal {
// These register names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
const char* Registers::names_[kNumRegisters] = {
"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
"r8", "r9", "r10", "fp", "ip", "r13", "r14", "sp"};
const char* DoubleRegisters::names_[kNumDoubleRegisters] = {
"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
"f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15"};
int DoubleRegisters::Number(const char* name) {
for (int i = 0; i < kNumDoubleRegisters; i++) {
if (strcmp(names_[i], name) == 0) {
return i;
}
}
// No register with the requested name found.
return kNoRegister;
}
int Registers::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumRegisters; i++) {
if (strcmp(names_[i], name) == 0) {
return i;
}
}
// No register with the requested name found.
return kNoRegister;
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_S390

1561
src/s390/constants-s390.h Normal file

File diff suppressed because it is too large Load Diff

25
src/s390/cpu-s390.cc Normal file
View File

@ -0,0 +1,25 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// CPU specific code for s390 independent of OS goes here.
#include "src/v8.h"
#if V8_TARGET_ARCH_S390
#include "src/assembler.h"
namespace v8 {
namespace internal {
void CpuFeatures::FlushICache(void* buffer, size_t size) {
// Given the strong memory model on z/Architecture, and the single
// thread nature of V8 and JavaScript, instruction cache flushing
// is not necessary. The architecture guarantees that if a core
// patches its own instruction cache, the updated instructions will be
// reflected automatically.
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_S390

View File

@ -0,0 +1,336 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/deoptimizer.h"
#include "src/codegen.h"
#include "src/full-codegen/full-codegen.h"
#include "src/register-configuration.h"
#include "src/safepoint-table.h"
namespace v8 {
namespace internal {
// LAY + LGHI/LHI + BRCL
const int Deoptimizer::table_entry_size_ = 16;
int Deoptimizer::patch_size() {
#if V8_TARGET_ARCH_S390X
const int kCallInstructionSize = 16;
#else
const int kCallInstructionSize = 10;
#endif
return kCallInstructionSize;
}
void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
// Empty because there is no need for relocation information for the code
// patching in Deoptimizer::PatchCodeForDeoptimization below.
}
void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address code_start_address = code->instruction_start();
// Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more.
code->InvalidateRelocation();
if (FLAG_zap_code_space) {
// Fail hard and early if we enter this code object again.
byte* pointer = code->FindCodeAgeSequence();
if (pointer != NULL) {
pointer += kNoCodeAgeSequenceLength;
} else {
pointer = code->instruction_start();
}
CodePatcher patcher(isolate, pointer, 2);
patcher.masm()->bkpt(0);
DeoptimizationInputData* data =
DeoptimizationInputData::cast(code->deoptimization_data());
int osr_offset = data->OsrPcOffset()->value();
if (osr_offset > 0) {
CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
2);
osr_patcher.masm()->bkpt(0);
}
}
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
#ifdef DEBUG
Address prev_call_address = NULL;
#endif
// For each LLazyBailout instruction insert a call to the corresponding
// deoptimization entry.
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address call_address = code_start_address + deopt_data->Pc(i)->value();
Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
// We need calls to have a predictable size in the unoptimized code, but
// this is optimized code, so we don't have to have a predictable size.
int call_size_in_bytes = MacroAssembler::CallSizeNotPredictableCodeSize(
deopt_entry, kRelocInfo_NONEPTR);
DCHECK(call_size_in_bytes <= patch_size());
CodePatcher patcher(isolate, call_address, call_size_in_bytes);
patcher.masm()->Call(deopt_entry, kRelocInfo_NONEPTR);
DCHECK(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
DCHECK(call_address + patch_size() <= code->instruction_end());
#ifdef DEBUG
prev_call_address = call_address;
#endif
}
}
void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
ApiFunction function(descriptor->deoptimization_handler());
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
int params = descriptor->GetHandlerParameterCount();
output_frame->SetRegister(r2.code(), params);
output_frame->SetRegister(r3.code(), handler);
}
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
for (int i = 0; i < DoubleRegister::kNumRegisters; ++i) {
double double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
}
bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
// There is no dynamic alignment padding on S390 in the input frame.
return false;
}
#define __ masm()->
// This code tries to be close to ia32 code so that any changes can be
// easily ported.
void Deoptimizer::TableEntryGenerator::Generate() {
GeneratePrologue();
// Save all the registers onto the stack
const int kNumberOfRegisters = Register::kNumRegisters;
RegList restored_regs = kJSCallerSaved | kCalleeSaved;
const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
// Save all double registers before messing with them.
__ lay(sp, MemOperand(sp, -kDoubleRegsSize));
const RegisterConfiguration* config =
RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
const DoubleRegister dreg = DoubleRegister::from_code(code);
int offset = code * kDoubleSize;
__ StoreDouble(dreg, MemOperand(sp, offset));
}
// Push all GPRs onto the stack
__ lay(sp, MemOperand(sp, -kNumberOfRegisters * kPointerSize));
__ StoreMultipleP(r0, sp, MemOperand(sp)); // Save all 16 registers
__ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
__ StoreP(fp, MemOperand(ip));
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
// Get the bailout id from the stack.
__ LoadP(r4, MemOperand(sp, kSavedRegistersAreaSize));
// Cleanse the Return address for 31-bit
__ CleanseP(r14);
// Get the address of the location in the code object (r5)(return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register r6.
__ LoadRR(r5, r14);
__ la(r6, MemOperand(sp, kSavedRegistersAreaSize + (1 * kPointerSize)));
__ SubP(r6, fp, r6);
// Allocate a new deoptimizer object.
// Pass six arguments in r2 to r7.
__ PrepareCallCFunction(6, r7);
__ LoadP(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ LoadImmP(r3, Operand(type())); // bailout type,
// r4: bailout id already loaded.
// r5: code address or 0 already loaded.
// r6: Fp-to-sp delta.
// Parm6: isolate is passed on the stack.
__ mov(r7, Operand(ExternalReference::isolate_address(isolate())));
__ StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
// Call Deoptimizer::New().
{
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
}
// Preserve "deoptimizer" object in register r2 and get the input
// frame descriptor pointer to r3 (deoptimizer->input_);
__ LoadP(r3, MemOperand(r2, Deoptimizer::input_offset()));
// Copy core registers into FrameDescription::registers_[kNumRegisters].
// DCHECK(Register::kNumRegisters == kNumberOfRegisters);
// __ mvc(MemOperand(r3, FrameDescription::registers_offset()),
// MemOperand(sp), kNumberOfRegisters * kPointerSize);
// Copy core registers into FrameDescription::registers_[kNumRegisters].
// TODO(john.yan): optimize the following code by using mvc instruction
DCHECK(Register::kNumRegisters == kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ LoadP(r4, MemOperand(sp, i * kPointerSize));
__ StoreP(r4, MemOperand(r3, offset));
}
int double_regs_offset = FrameDescription::double_registers_offset();
// Copy double registers to
// double_registers_[DoubleRegister::kNumRegisters]
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
int dst_offset = code * kDoubleSize + double_regs_offset;
int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
// TODO(joransiu): MVC opportunity
__ LoadDouble(d0, MemOperand(sp, src_offset));
__ StoreDouble(d0, MemOperand(r3, dst_offset));
}
// Remove the bailout id and the saved registers from the stack.
__ la(sp, MemOperand(sp, kSavedRegistersAreaSize + (1 * kPointerSize)));
// Compute a pointer to the unwinding limit in register r4; that is
// the first stack slot not part of the input frame.
__ LoadP(r4, MemOperand(r3, FrameDescription::frame_size_offset()));
__ AddP(r4, sp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
__ la(r5, MemOperand(r3, FrameDescription::frame_content_offset()));
Label pop_loop;
Label pop_loop_header;
__ b(&pop_loop_header, Label::kNear);
__ bind(&pop_loop);
__ pop(r6);
__ StoreP(r6, MemOperand(r5, 0));
__ la(r5, MemOperand(r5, kPointerSize));
__ bind(&pop_loop_header);
__ CmpP(r4, sp);
__ bne(&pop_loop);
// Compute the output frame in the deoptimizer.
__ push(r2); // Preserve deoptimizer object across call.
// r2: deoptimizer object; r3: scratch.
__ PrepareCallCFunction(1, r3);
// Call Deoptimizer::ComputeOutputFrames().
{
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(
ExternalReference::compute_output_frames_function(isolate()), 1);
}
__ pop(r2); // Restore deoptimizer object (class Deoptimizer).
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
// Outer loop state: r6 = current "FrameDescription** output_",
// r3 = one past the last FrameDescription**.
__ LoadlW(r3, MemOperand(r2, Deoptimizer::output_count_offset()));
__ LoadP(r6, MemOperand(r2, Deoptimizer::output_offset())); // r6 is output_.
__ ShiftLeftP(r3, r3, Operand(kPointerSizeLog2));
__ AddP(r3, r6, r3);
__ b(&outer_loop_header, Label::kNear);
__ bind(&outer_push_loop);
// Inner loop state: r4 = current FrameDescription*, r5 = loop index.
__ LoadP(r4, MemOperand(r6, 0)); // output_[ix]
__ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset()));
__ b(&inner_loop_header, Label::kNear);
__ bind(&inner_push_loop);
__ AddP(r5, Operand(-sizeof(intptr_t)));
__ AddP(r8, r4, r5);
__ LoadP(r8, MemOperand(r8, FrameDescription::frame_content_offset()));
__ push(r8);
__ bind(&inner_loop_header);
__ CmpP(r5, Operand::Zero());
__ bne(&inner_push_loop); // test for gt?
__ AddP(r6, r6, Operand(kPointerSize));
__ bind(&outer_loop_header);
__ CmpP(r6, r3);
__ blt(&outer_push_loop);
__ LoadP(r3, MemOperand(r2, Deoptimizer::input_offset()));
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
const DoubleRegister dreg = DoubleRegister::from_code(code);
int src_offset = code * kDoubleSize + double_regs_offset;
__ ld(dreg, MemOperand(r3, src_offset));
}
// Push state, pc, and continuation from the last output frame.
__ LoadP(r8, MemOperand(r4, FrameDescription::state_offset()));
__ push(r8);
__ LoadP(r8, MemOperand(r4, FrameDescription::pc_offset()));
__ push(r8);
__ LoadP(r8, MemOperand(r4, FrameDescription::continuation_offset()));
__ push(r8);
// Restore the registers from the last output frame.
__ LoadRR(r1, r4);
for (int i = kNumberOfRegisters - 1; i > 0; i--) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
if ((restored_regs & (1 << i)) != 0) {
__ LoadP(ToRegister(i), MemOperand(r1, offset));
}
}
__ InitializeRootRegister();
__ pop(ip); // get continuation, leave pc on stack
__ pop(r14);
__ Jump(ip);
__ stop("Unreachable.");
}
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
// Create a sequence of deoptimization entries. Note that any
// registers may be still live.
Label done;
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
__ lay(sp, MemOperand(sp, -kPointerSize));
__ LoadImmP(ip, Operand(i));
__ b(&done);
int end = masm()->pc_offset();
USE(end);
DCHECK(masm()->pc_offset() - start == table_entry_size_);
}
__ bind(&done);
__ StoreP(ip, MemOperand(sp));
}
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
// No out-of-line constant pool support.
UNREACHABLE();
}
#undef __
} // namespace internal
} // namespace v8

1396
src/s390/disasm-s390.cc Normal file

File diff suppressed because it is too large Load Diff

35
src/s390/frames-s390.cc Normal file
View File

@ -0,0 +1,35 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_S390
#include "src/frames.h"
#include "src/assembler.h"
#include "src/macro-assembler.h"
#include "src/s390/assembler-s390-inl.h"
#include "src/s390/assembler-s390.h"
#include "src/s390/frames-s390.h"
#include "src/s390/macro-assembler-s390.h"
namespace v8 {
namespace internal {
Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() {
UNREACHABLE();
return no_reg;
}
Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
Register StubFailureTrampolineFrame::context_register() { return cp; }
Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
UNREACHABLE();
return no_reg;
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_S390

190
src/s390/frames-s390.h Normal file
View File

@ -0,0 +1,190 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_S390_FRAMES_S390_H_
#define V8_S390_FRAMES_S390_H_
namespace v8 {
namespace internal {
// Register list in load/store instructions
// Note that the bit values must match those used in actual instruction encoding
const int kNumRegs = 16;
// Caller-saved/arguments registers
const RegList kJSCallerSaved = 1 << 1 | 1 << 2 | // r2 a1
1 << 3 | // r3 a2
1 << 4 | // r4 a3
1 << 5; // r5 a4
const int kNumJSCallerSaved = 5;
// Return the code of the n-th caller-saved register available to JavaScript
// e.g. JSCallerSavedReg(0) returns r0.code() == 0
int JSCallerSavedCode(int n);
// Callee-saved registers preserved when switching from C to JavaScript
const RegList kCalleeSaved =
1 << 6 | // r6 (argument passing in CEntryStub)
// (HandleScope logic in MacroAssembler)
1 << 7 | // r7 (argument passing in CEntryStub)
// (HandleScope logic in MacroAssembler)
1 << 8 | // r8 (argument passing in CEntryStub)
// (HandleScope logic in MacroAssembler)
1 << 9 | // r9 (HandleScope logic in MacroAssembler)
1 << 10 | // r10 (Roots register in Javascript)
1 << 11 | // r11 (fp in Javascript)
1 << 12 | // r12 (ip in Javascript)
1 << 13; // r13 (cp in Javascript)
// 1 << 15; // r15 (sp in Javascript)
const int kNumCalleeSaved = 8;
#ifdef V8_TARGET_ARCH_S390X
const RegList kCallerSavedDoubles = 1 << 0 | // d0
1 << 1 | // d1
1 << 2 | // d2
1 << 3 | // d3
1 << 4 | // d4
1 << 5 | // d5
1 << 6 | // d6
1 << 7; // d7
const int kNumCallerSavedDoubles = 8;
const RegList kCalleeSavedDoubles = 1 << 8 | // d8
1 << 9 | // d9
1 << 10 | // d10
1 << 11 | // d11
1 << 12 | // d12
1 << 13 | // d12
1 << 14 | // d12
1 << 15; // d13
const int kNumCalleeSavedDoubles = 8;
#else
const RegList kCallerSavedDoubles = 1 << 14 | // d14
1 << 15 | // d15
1 << 0 | // d0
1 << 1 | // d1
1 << 2 | // d2
1 << 3 | // d3
1 << 5 | // d5
1 << 7 | // d7
1 << 8 | // d8
1 << 9 | // d9
1 << 10 | // d10
1 << 11 | // d10
1 << 12 | // d10
1 << 13; // d11
const int kNumCallerSavedDoubles = 14;
const RegList kCalleeSavedDoubles = 1 << 4 | // d4
1 << 6; // d6
const int kNumCalleeSavedDoubles = 2;
#endif
// Number of registers for which space is reserved in safepoints. Must be a
// multiple of 8.
// TODO(regis): Only 8 registers may actually be sufficient. Revisit.
const int kNumSafepointRegisters = 16;
// Define the list of registers actually saved at safepoints.
// Note that the number of saved registers may be smaller than the reserved
// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
// const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
// const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
// The following constants describe the stack frame linkage area as
// defined by the ABI.
#if V8_TARGET_ARCH_S390X
// [0] Back Chain
// [1] Reserved for compiler use
// [2] GPR 2
// [3] GPR 3
// ...
// [15] GPR 15
// [16] FPR 0
// [17] FPR 2
// [18] FPR 4
// [19] FPR 6
const int kNumRequiredStackFrameSlots = 20;
const int kStackFrameRASlot = 14;
const int kStackFrameSPSlot = 15;
const int kStackFrameExtraParamSlot = 20;
#else
// [0] Back Chain
// [1] Reserved for compiler use
// [2] GPR 2
// [3] GPR 3
// ...
// [15] GPR 15
// [16..17] FPR 0
// [18..19] FPR 2
// [20..21] FPR 4
// [22..23] FPR 6
const int kNumRequiredStackFrameSlots = 24;
const int kStackFrameRASlot = 14;
const int kStackFrameSPSlot = 15;
const int kStackFrameExtraParamSlot = 24;
#endif
// zLinux ABI requires caller frames to include sufficient space for
// callee preserved register save area.
#if V8_TARGET_ARCH_S390X
const int kCalleeRegisterSaveAreaSize = 160;
#elif V8_TARGET_ARCH_S390
const int kCalleeRegisterSaveAreaSize = 96;
#else
const int kCalleeRegisterSaveAreaSize = 0;
#endif
// ----------------------------------------------------
class EntryFrameConstants : public AllStatic {
public:
static const int kCallerFPOffset =
-(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};
class ExitFrameConstants : public AllStatic {
public:
static const int kFrameSize = 2 * kPointerSize;
static const int kConstantPoolOffset = 0; // Not used.
static const int kCodeOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize;
// The caller fields are below the frame pointer on the stack.
static const int kCallerFPOffset = 0 * kPointerSize;
// The calling JS function is below FP.
static const int kCallerPCOffset = 1 * kPointerSize;
// FP-relative displacement of the caller's SP. It points just
// below the saved PC.
static const int kCallerSPDisplacement = 2 * kPointerSize;
};
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
static const int kLastParameterOffset = +2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
// Caller SP-relative.
static const int kParam0Offset = -2 * kPointerSize;
static const int kReceiverOffset = -1 * kPointerSize;
};
} // namespace internal
} // namespace v8
#endif // V8_S390_FRAMES_S390_H_

View File

@ -0,0 +1,403 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_S390
#include "src/interface-descriptors.h"
namespace v8 {
namespace internal {
const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
const Register LoadDescriptor::ReceiverRegister() { return r3; }
const Register LoadDescriptor::NameRegister() { return r4; }
const Register LoadDescriptor::SlotRegister() { return r2; }
const Register LoadWithVectorDescriptor::VectorRegister() { return r5; }
const Register StoreDescriptor::ReceiverRegister() { return r3; }
const Register StoreDescriptor::NameRegister() { return r4; }
const Register StoreDescriptor::ValueRegister() { return r2; }
const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return r6; }
const Register VectorStoreICDescriptor::VectorRegister() { return r5; }
const Register VectorStoreTransitionDescriptor::SlotRegister() { return r6; }
const Register VectorStoreTransitionDescriptor::VectorRegister() { return r5; }
const Register VectorStoreTransitionDescriptor::MapRegister() { return r7; }
const Register StoreTransitionDescriptor::MapRegister() { return r5; }
const Register LoadGlobalViaContextDescriptor::SlotRegister() { return r4; }
const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r4; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r2; }
const Register InstanceOfDescriptor::LeftRegister() { return r3; }
const Register InstanceOfDescriptor::RightRegister() { return r2; }
const Register StringCompareDescriptor::LeftRegister() { return r3; }
const Register StringCompareDescriptor::RightRegister() { return r2; }
const Register ApiGetterDescriptor::function_address() { return r4; }
const Register MathPowTaggedDescriptor::exponent() { return r4; }
const Register MathPowIntegerDescriptor::exponent() {
return MathPowTaggedDescriptor::exponent();
}
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r2; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r5; }
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastNewContextDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastNewObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r5};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastNewRestParameterDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ToNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
// static
const Register ToLengthDescriptor::ReceiverRegister() { return r2; }
// static
const Register ToStringDescriptor::ReceiverRegister() { return r2; }
// static
const Register ToNameDescriptor::ReceiverRegister() { return r2; }
// static
const Register ToObjectDescriptor::ReceiverRegister() { return r2; }
void NumberToStringDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void TypeofDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r5};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastCloneRegExpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r5, r4, r3, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r5, r4, r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r5, r4, r3, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4, r5};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateWeakCellDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4, r5, r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r5};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r5, r4};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r2 : number of arguments
// r3 : the function to call
// r4 : feedback vector
// r5 : slot in feedback vector (Smi, for RecordCallTarget)
// r6 : new target (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {r2, r3, r6, r4};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r2 : number of arguments
// r3 : the target to call
Register registers[] = {r3, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ConstructStubDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r2 : number of arguments
// r3 : the target to call
// r5 : the new target
// r4 : allocation site or undefined
Register registers[] = {r3, r5, r2, r4};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ConstructTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r2 : number of arguments
// r3 : the target to call
// r5 : the new target
Register registers[] = {r3, r5, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4, r3, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void TransitionElementsKindDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2, r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
// r2 -- number of arguments
// r3 -- function
// r4 -- allocation site with elements kind
Register registers[] = {r3, r4};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
Register registers[] = {r3, r4, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InternalArrayConstructorConstantArgCountDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
// register state
// r2 -- number of arguments
// r3 -- constructor function
Register registers[] = {r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
Register registers[] = {r3, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CompareNilDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ToBooleanDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void BinaryOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4, r3, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void StringAddDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void KeyedDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r4, // key
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void NamedDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r4, // name
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallHandlerDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r2, // receiver
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r3, // JSFunction
r5, // the new target
r2, // actual number of arguments
r4, // expected number of arguments
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ApiFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r2, // callee
r6, // call_data
r4, // holder
r3, // api_function_address
r5, // actual number of arguments
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ApiAccessorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r2, // callee
r6, // call_data
r4, // holder
r3, // api_function_address
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
kInterpreterDispatchTableRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r2, // argument count (not including receiver)
r4, // address of first argument
r3 // the target callable to be call
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r2, // argument count (not including receiver)
r5, // new target
r3, // constructor to call
r4 // address of the first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r2, // argument count (argc)
r4, // address of first argument (argv)
r3 // the runtime function to call
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_S390

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

5012
src/s390/simulator-s390.cc Normal file

File diff suppressed because it is too large Load Diff

473
src/s390/simulator-s390.h Normal file
View File

@ -0,0 +1,473 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Declares a Simulator for S390 instructions if we are not generating a native
// S390 binary. This Simulator allows us to run and debug S390 code generation
// on regular desktop machines.
// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
// which will start execution in the Simulator or forwards to the real entry
// on a S390 hardware platform.
#ifndef V8_S390_SIMULATOR_S390_H_
#define V8_S390_SIMULATOR_S390_H_
#include "src/allocation.h"
#if !defined(USE_SIMULATOR)
// Running without a simulator on a native s390 platform.
namespace v8 {
namespace internal {
// When running without a simulator we call the entry directly.
#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
typedef int (*s390_regexp_matcher)(String*, int, const byte*, const byte*, int*,
int, Address, int, void*, Isolate*);
// Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type ppc_regexp_matcher.
// The ninth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
p7, p8) \
(FUNCTION_CAST<s390_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
NULL, p8))
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on s390 uses the C stack, we
// just use the C stack limit.
class SimulatorStack : public v8::internal::AllStatic {
public:
static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
uintptr_t c_limit) {
USE(isolate);
return c_limit;
}
static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
uintptr_t try_catch_address) {
USE(isolate);
return try_catch_address;
}
static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
USE(isolate);
}
};
} // namespace internal
} // namespace v8
#else // !defined(USE_SIMULATOR)
// Running with a simulator.
#include "src/assembler.h"
#include "src/hashmap.h"
#include "src/s390/constants-s390.h"
namespace v8 {
namespace internal {
class CachePage {
public:
static const int LINE_VALID = 0;
static const int LINE_INVALID = 1;
static const int kPageShift = 12;
static const int kPageSize = 1 << kPageShift;
static const int kPageMask = kPageSize - 1;
static const int kLineShift = 2; // The cache line is only 4 bytes right now.
static const int kLineLength = 1 << kLineShift;
static const int kLineMask = kLineLength - 1;
CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); }
char* ValidityByte(int offset) {
return &validity_map_[offset >> kLineShift];
}
char* CachedData(int offset) { return &data_[offset]; }
private:
char data_[kPageSize]; // The cached data.
static const int kValidityMapSize = kPageSize >> kLineShift;
char validity_map_[kValidityMapSize]; // One byte per line.
};
class Simulator {
public:
friend class S390Debugger;
enum Register {
no_reg = -1,
r0 = 0,
r1 = 1,
r2 = 2,
r3 = 3,
r4 = 4,
r5 = 5,
r6 = 6,
r7 = 7,
r8 = 8,
r9 = 9,
r10 = 10,
r11 = 11,
r12 = 12,
r13 = 13,
r14 = 14,
r15 = 15,
fp = r11,
ip = r12,
cp = r13,
ra = r14,
sp = r15, // name aliases
kNumGPRs = 16,
d0 = 0,
d1,
d2,
d3,
d4,
d5,
d6,
d7,
d8,
d9,
d10,
d11,
d12,
d13,
d14,
d15,
kNumFPRs = 16
};
explicit Simulator(Isolate* isolate);
~Simulator();
// The currently executing Simulator instance. Potentially there can be one
// for each native thread.
static Simulator* current(v8::internal::Isolate* isolate);
// Accessors for register state.
void set_register(int reg, uint64_t value);
uint64_t get_register(int reg) const;
template <typename T>
T get_low_register(int reg) const;
template <typename T>
T get_high_register(int reg) const;
void set_low_register(int reg, uint32_t value);
void set_high_register(int reg, uint32_t value);
double get_double_from_register_pair(int reg);
void set_d_register_from_double(int dreg, const double dbl) {
DCHECK(dreg >= 0 && dreg < kNumFPRs);
*bit_cast<double*>(&fp_registers_[dreg]) = dbl;
}
double get_double_from_d_register(int dreg) {
DCHECK(dreg >= 0 && dreg < kNumFPRs);
return *bit_cast<double*>(&fp_registers_[dreg]);
}
void set_d_register(int dreg, int64_t value) {
DCHECK(dreg >= 0 && dreg < kNumFPRs);
fp_registers_[dreg] = value;
}
int64_t get_d_register(int dreg) {
DCHECK(dreg >= 0 && dreg < kNumFPRs);
return fp_registers_[dreg];
}
void set_d_register_from_float32(int dreg, const float f) {
DCHECK(dreg >= 0 && dreg < kNumFPRs);
int32_t f_int = *bit_cast<int32_t*>(&f);
int64_t finalval = static_cast<int64_t>(f_int) << 32;
set_d_register(dreg, finalval);
}
float get_float32_from_d_register(int dreg) {
DCHECK(dreg >= 0 && dreg < kNumFPRs);
int64_t regval = get_d_register(dreg) >> 32;
int32_t regval32 = static_cast<int32_t>(regval);
return *bit_cast<float*>(&regval32);
}
// Special case of set_register and get_register to access the raw PC value.
void set_pc(intptr_t value);
intptr_t get_pc() const;
Address get_sp() const {
return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp)));
}
// Accessor to the internal simulator stack area.
uintptr_t StackLimit(uintptr_t c_limit) const;
// Executes S390 instructions until the PC reaches end_sim_pc.
void Execute();
// Call on program start.
static void Initialize(Isolate* isolate);
static void TearDown(HashMap* i_cache, Redirection* first);
// V8 generally calls into generated JS code with 5 parameters and into
// generated RegExp code with 7 parameters. This is a convenience function,
// which sets up the simulator state and grabs the result on return.
intptr_t Call(byte* entry, int argument_count, ...);
// Alternative: call a 2-argument double function.
void CallFP(byte* entry, double d0, double d1);
int32_t CallFPReturnsInt(byte* entry, double d0, double d1);
double CallFPReturnsDouble(byte* entry, double d0, double d1);
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
// Pop an address from the JS stack.
uintptr_t PopAddress();
// Debugger input.
void set_last_debugger_input(char* input);
char* last_debugger_input() { return last_debugger_input_; }
// ICache checking.
static void FlushICache(v8::internal::HashMap* i_cache, void* start,
size_t size);
// Returns true if pc register contains one of the 'special_values' defined
// below (bad_lr, end_sim_pc).
bool has_bad_pc() const;
private:
enum special_values {
// Known bad pc value to ensure that the simulator does not execute
// without being properly setup.
bad_lr = -1,
// A pc value used to signal the simulator to stop execution. Generally
// the lr is set to this value on transition from native C code to
// simulated execution, so that the simulator can "return" to the native
// C code.
end_sim_pc = -2
};
// Unsupported instructions use Format to print an error and stop execution.
void Format(Instruction* instr, const char* format);
// Helper functions to set the conditional flags in the architecture state.
bool CarryFrom(int32_t left, int32_t right, int32_t carry = 0);
bool BorrowFrom(int32_t left, int32_t right);
bool OverflowFrom(int32_t alu_out, int32_t left, int32_t right,
bool addition);
// Helper functions to decode common "addressing" modes
int32_t GetShiftRm(Instruction* instr, bool* carry_out);
int32_t GetImm(Instruction* instr, bool* carry_out);
void ProcessPUW(Instruction* instr, int num_regs, int operand_size,
intptr_t* start_address, intptr_t* end_address);
void HandleRList(Instruction* instr, bool load);
void HandleVList(Instruction* inst);
void SoftwareInterrupt(Instruction* instr);
// Stop helper functions.
inline bool isStopInstruction(Instruction* instr);
inline bool isWatchedStop(uint32_t bkpt_code);
inline bool isEnabledStop(uint32_t bkpt_code);
inline void EnableStop(uint32_t bkpt_code);
inline void DisableStop(uint32_t bkpt_code);
inline void IncreaseStopCounter(uint32_t bkpt_code);
void PrintStopInfo(uint32_t code);
// Byte Reverse
inline int16_t ByteReverse(int16_t hword);
inline int32_t ByteReverse(int32_t word);
// Read and write memory.
inline uint8_t ReadBU(intptr_t addr);
inline int8_t ReadB(intptr_t addr);
inline void WriteB(intptr_t addr, uint8_t value);
inline void WriteB(intptr_t addr, int8_t value);
inline uint16_t ReadHU(intptr_t addr, Instruction* instr);
inline int16_t ReadH(intptr_t addr, Instruction* instr);
// Note: Overloaded on the sign of the value.
inline void WriteH(intptr_t addr, uint16_t value, Instruction* instr);
inline void WriteH(intptr_t addr, int16_t value, Instruction* instr);
inline uint32_t ReadWU(intptr_t addr, Instruction* instr);
inline int32_t ReadW(intptr_t addr, Instruction* instr);
inline void WriteW(intptr_t addr, uint32_t value, Instruction* instr);
inline void WriteW(intptr_t addr, int32_t value, Instruction* instr);
inline int64_t ReadDW(intptr_t addr);
inline double ReadDouble(intptr_t addr);
inline void WriteDW(intptr_t addr, int64_t value);
// S390
void Trace(Instruction* instr);
bool DecodeTwoByte(Instruction* instr);
bool DecodeFourByte(Instruction* instr);
bool DecodeFourByteArithmetic(Instruction* instr);
bool DecodeFourByteFloatingPoint(Instruction* instr);
void DecodeFourByteFloatingPointIntConversion(Instruction* instr);
void DecodeFourByteFloatingPointRound(Instruction* instr);
bool DecodeSixByte(Instruction* instr);
bool DecodeSixByteArithmetic(Instruction* instr);
bool S390InstructionDecode(Instruction* instr);
template <typename T>
void SetS390ConditionCode(T lhs, T rhs) {
condition_reg_ = 0;
if (lhs == rhs) {
condition_reg_ |= CC_EQ;
} else if (lhs < rhs) {
condition_reg_ |= CC_LT;
} else if (lhs > rhs) {
condition_reg_ |= CC_GT;
}
// We get down here only for floating point
// comparisons and the values are unordered
// i.e. NaN
if (condition_reg_ == 0) condition_reg_ = unordered;
}
bool isNaN(double value) { return (value != value); }
// Set the condition code for bitwise operations
// CC0 is set if value == 0.
// CC1 is set if value != 0.
// CC2/CC3 are not set.
template <typename T>
void SetS390BitWiseConditionCode(T value) {
condition_reg_ = 0;
if (value == 0)
condition_reg_ |= CC_EQ;
else
condition_reg_ |= CC_LT;
}
void SetS390OverflowCode(bool isOF) {
if (isOF) condition_reg_ = CC_OF;
}
bool TestConditionCode(Condition mask) {
// Check for unconditional branch
if (mask == 0xf) return true;
return (condition_reg_ & mask) != 0;
}
// Executes one instruction.
void ExecuteInstruction(Instruction* instr, bool auto_incr_pc = true);
// ICache.
static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
int size);
static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
// Runtime call support.
static void* RedirectExternalReference(
Isolate* isolate, void* external_function,
v8::internal::ExternalReference::Type type);
// Handle arguments and return value for runtime FP functions.
void GetFpArgs(double* x, double* y, intptr_t* z);
void SetFpResult(const double& result);
void TrashCallerSaveRegisters();
void CallInternal(byte* entry, int reg_arg_count = 3);
// Architecture state.
// On z9 and higher and supported Linux on z Systems platforms, all registers
// are 64-bit, even in 31-bit mode.
uint64_t registers_[kNumGPRs];
int64_t fp_registers_[kNumFPRs];
// Condition Code register. In S390, the last 4 bits are used.
int32_t condition_reg_;
// Special register to track PC.
intptr_t special_reg_pc_;
// Simulator support.
char* stack_;
static const size_t stack_protection_size_ = 256 * kPointerSize;
bool pc_modified_;
int64_t icount_;
// Debugger input.
char* last_debugger_input_;
// Icache simulation
v8::internal::HashMap* i_cache_;
// Registered breakpoints.
Instruction* break_pc_;
Instr break_instr_;
v8::internal::Isolate* isolate_;
// A stop is watched if its code is less than kNumOfWatchedStops.
// Only watched stops support enabling/disabling and the counter feature.
static const uint32_t kNumOfWatchedStops = 256;
// Breakpoint is disabled if bit 31 is set.
static const uint32_t kStopDisabledBit = 1 << 31;
// A stop is enabled, meaning the simulator will stop when meeting the
// instruction, if bit 31 of watched_stops_[code].count is unset.
// The value watched_stops_[code].count & ~(1 << 31) indicates how many times
// the breakpoint was hit or gone through.
struct StopCountAndDesc {
uint32_t count;
char* desc;
};
StopCountAndDesc watched_stops_[kNumOfWatchedStops];
void DebugStart();
};
// When running with the simulator transition into simulated execution at this
// point.
#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
reinterpret_cast<Object*>(Simulator::current(isolate)->Call( \
FUNCTION_ADDR(entry), 5, (intptr_t)p0, (intptr_t)p1, (intptr_t)p2, \
(intptr_t)p3, (intptr_t)p4))
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
p7, p8) \
Simulator::current(isolate)->Call(entry, 10, (intptr_t)p0, (intptr_t)p1, \
(intptr_t)p2, (intptr_t)p3, (intptr_t)p4, \
(intptr_t)p5, (intptr_t)p6, (intptr_t)p7, \
(intptr_t)NULL, (intptr_t)p8)
// The simulator has its own stack. Thus it has a different stack limit from
// the C-based native code. The JS-based limit normally points near the end of
// the simulator stack. When the C-based limit is exhausted we reflect that by
// lowering the JS-based limit as well, to make stack checks trigger.
class SimulatorStack : public v8::internal::AllStatic {
public:
static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
uintptr_t c_limit) {
return Simulator::current(isolate)->StackLimit(c_limit);
}
static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
uintptr_t try_catch_address) {
Simulator* sim = Simulator::current(isolate);
return sim->PushAddress(try_catch_address);
}
static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
Simulator::current(isolate)->PopAddress();
}
};
} // namespace internal
} // namespace v8
#endif // !defined(USE_SIMULATOR)
#endif // V8_S390_SIMULATOR_S390_H_

View File

@ -1545,6 +1545,30 @@
'../../src/regexp/ppc/regexp-macro-assembler-ppc.h',
],
}],
['v8_target_arch=="s390" or v8_target_arch=="s390x"', {
'sources': [ ### gcmole(arch:s390) ###
'../../src/s390/assembler-s390-inl.h',
'../../src/s390/assembler-s390.cc',
'../../src/s390/assembler-s390.h',
'../../src/s390/builtins-s390.cc',
'../../src/s390/code-stubs-s390.cc',
'../../src/s390/code-stubs-s390.h',
'../../src/s390/codegen-s390.cc',
'../../src/s390/codegen-s390.h',
'../../src/s390/constants-s390.h',
'../../src/s390/constants-s390.cc',
'../../src/s390/cpu-s390.cc',
'../../src/s390/deoptimizer-s390.cc',
'../../src/s390/disasm-s390.cc',
'../../src/s390/frames-s390.cc',
'../../src/s390/frames-s390.h',
'../../src/s390/interface-descriptors-s390.cc',
'../../src/s390/macro-assembler-s390.cc',
'../../src/s390/macro-assembler-s390.h',
'../../src/s390/simulator-s390.cc',
'../../src/s390/simulator-s390.h',
],
}],
['OS=="win"', {
'variables': {
'gyp_generators': '<!(echo $GYP_GENERATORS)',