Contribution of PowerPC port (continuation of 422063005) - currency

Contribution of PowerPC port (continuation of 422063005, 817143002,
866843003, and 901083004. This patch updates the ppc directories
to make them current with changes in common code, removes the
optimization to use the ool constant pool, and excludes tests that
don't pass under the ppc simulator given a 240s timeout.

Subsequent patches will cover:
   - remaining optimizations for PPC
   - remaining AIX changes not resolved by 4.8 compiler (4.8 is only recently available for AIX)
   - incremental updates required to ppc directories due to platform specific changes made
    in google repos while we complete the above steps.

	modified:   src/compiler/ppc/code-generator-ppc.cc
	modified:   src/ic/ppc/handler-compiler-ppc.cc
	modified:   src/ppc/assembler-ppc-inl.h
	modified:   src/ppc/assembler-ppc.cc
	modified:   src/ppc/assembler-ppc.h
	modified:   src/ppc/builtins-ppc.cc
	modified:   src/ppc/code-stubs-ppc.cc
	modified:   src/ppc/debug-ppc.cc
	modified:   src/ppc/deoptimizer-ppc.cc
	modified:   src/ppc/frames-ppc.cc
	modified:   src/ppc/frames-ppc.h
	modified:   src/ppc/full-codegen-ppc.cc
	modified:   src/ppc/lithium-codegen-ppc.cc
	modified:   src/ppc/lithium-ppc.cc
	modified:   src/ppc/lithium-ppc.h
	modified:   src/ppc/macro-assembler-ppc.cc
	modified:   src/ppc/macro-assembler-ppc.h
	modified:   test/cctest/cctest.status
	modified:   test/mjsunit/mjsunit.status

R=danno@chromium.org, svenpanne@chromium.org

BUG=

Review URL: https://codereview.chromium.org/965823002

Cr-Commit-Position: refs/heads/master@{#26951}
This commit is contained in:
michael_dawson 2015-03-02 23:04:41 -08:00 committed by Commit bot
parent 11d97bf531
commit a3465838bc
19 changed files with 206 additions and 816 deletions

View File

@ -52,7 +52,7 @@ class PPCOperandConverter FINAL : public InstructionOperandConverter {
return false;
}
Operand InputImmediate(int index) {
Operand InputImmediate(size_t index) {
Constant constant = ToConstant(instr_->InputAt(index));
switch (constant.type()) {
case Constant::kInt32:
@ -76,8 +76,8 @@ class PPCOperandConverter FINAL : public InstructionOperandConverter {
return Operand::Zero();
}
MemOperand MemoryOperand(AddressingMode* mode, int* first_index) {
const int index = *first_index;
MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
const size_t index = *first_index;
*mode = AddressingModeField::decode(instr_->opcode());
switch (*mode) {
case kMode_None:
@ -93,7 +93,7 @@ class PPCOperandConverter FINAL : public InstructionOperandConverter {
return MemOperand(r0);
}
MemOperand MemoryOperand(AddressingMode* mode, int first_index = 0) {
MemOperand MemoryOperand(AddressingMode* mode, size_t first_index = 0) {
return MemoryOperand(mode, &first_index);
}
@ -109,7 +109,7 @@ class PPCOperandConverter FINAL : public InstructionOperandConverter {
};
static inline bool HasRegisterInput(Instruction* instr, int index) {
static inline bool HasRegisterInput(Instruction* instr, size_t index) {
return instr->InputAt(index)->IsRegister();
}
@ -369,7 +369,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
#define ASSEMBLE_STORE_FLOAT(asm_instr, asm_instrx) \
do { \
int index = 0; \
size_t index = 0; \
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, &index); \
DoubleRegister value = i.InputDoubleRegister(index); \
@ -384,7 +384,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
#define ASSEMBLE_STORE_INTEGER(asm_instr, asm_instrx) \
do { \
int index = 0; \
size_t index = 0; \
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, &index); \
Register value = i.InputRegister(index); \
@ -401,8 +401,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, asm_instrx, width) \
do { \
DoubleRegister result = i.OutputDoubleRegister(); \
size_t index = 0; \
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, 0); \
MemOperand operand = i.MemoryOperand(&mode, index); \
DCHECK_EQ(kMode_MRR, mode); \
Register offset = operand.rb(); \
__ extsw(offset, offset); \
@ -427,8 +428,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr, asm_instrx) \
do { \
Register result = i.OutputRegister(); \
size_t index = 0; \
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, 0); \
MemOperand operand = i.MemoryOperand(&mode, index); \
DCHECK_EQ(kMode_MRR, mode); \
Register offset = operand.rb(); \
__ extsw(offset, offset); \
@ -453,8 +455,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr, asm_instrx) \
do { \
Label done; \
size_t index = 0; \
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, 0); \
MemOperand operand = i.MemoryOperand(&mode, index); \
DCHECK_EQ(kMode_MRR, mode); \
Register offset = operand.rb(); \
__ extsw(offset, offset); \
@ -479,8 +482,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr, asm_instrx) \
do { \
Label done; \
size_t index = 0; \
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, 0); \
MemOperand operand = i.MemoryOperand(&mode, index); \
DCHECK_EQ(kMode_MRR, mode); \
Register offset = operand.rb(); \
__ extsw(offset, offset); \
@ -1087,8 +1091,8 @@ void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
PPCOperandConverter i(this, instr);
Register input = i.InputRegister(0);
for (size_t index = 2; index < instr->InputCount(); index += 2) {
__ Cmpi(input, Operand(i.InputInt32(static_cast<int>(index + 0))), r0);
__ beq(GetLabel(i.InputRpo(static_cast<int>(index + 1))));
__ Cmpi(input, Operand(i.InputInt32(index + 0)), r0);
__ beq(GetLabel(i.InputRpo(index + 1)));
}
AssembleArchJump(i.InputRpo(1));
}
@ -1127,16 +1131,8 @@ void CodeGenerator::AssemblePrologue() {
int register_save_area_size = 0;
RegList frame_saves = fp.bit();
__ mflr(r0);
#if V8_OOL_CONSTANT_POOL
__ Push(r0, fp, kConstantPoolRegister);
// Adjust FP to point to saved FP.
__ subi(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
register_save_area_size += kPointerSize;
frame_saves |= kConstantPoolRegister.bit();
#else
__ Push(r0, fp);
__ mr(fp, sp);
#endif
// Save callee-saved registers.
const RegList saves = descriptor->CalleeSavedRegisters() & ~frame_saves;
for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
@ -1187,9 +1183,6 @@ void CodeGenerator::AssembleReturn() {
}
// Restore registers.
RegList frame_saves = fp.bit();
#if V8_OOL_CONSTANT_POOL
frame_saves |= kConstantPoolRegister.bit();
#endif
const RegList saves = descriptor->CalleeSavedRegisters() & ~frame_saves;
if (saves != 0) {
__ MultiPop(saves);

View File

@ -25,7 +25,7 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
// -- lr : return address
// -----------------------------------
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
FrameScope scope(masm, StackFrame::INTERNAL);
if (accessor_index >= 0) {
DCHECK(!holder.is(scratch));
@ -62,7 +62,7 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
// -- lr : return address
// -----------------------------------
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
FrameScope scope(masm, StackFrame::INTERNAL);
// Save value register, so we can restore it later.
__ push(value());
@ -617,7 +617,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
// Save necessary data before invoking an interceptor.
// Requires a frame to make GC aware of pushed pointers.
{
FrameAndConstantPoolScope frame_scope(masm(), StackFrame::INTERNAL);
FrameScope frame_scope(masm(), StackFrame::INTERNAL);
if (must_preserve_receiver_reg) {
__ Push(receiver(), holder_reg, this->name());
} else {
@ -669,11 +669,20 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Handle<JSObject> object, Handle<Name> name, int accessor_index) {
Handle<JSObject> object, Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
Register holder_reg = Frontend(name);
__ Push(receiver(), holder_reg); // receiver
__ LoadSmiLiteral(ip, Smi::FromInt(accessor_index));
// If the callback cannot leak, then push the callback directly,
// otherwise wrap it in a weak cell.
if (callback->data()->IsUndefined() || callback->data()->IsSmi()) {
__ mov(ip, Operand(callback));
} else {
Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
__ mov(ip, Operand(cell));
}
__ push(ip);
__ mov(ip, Operand(name));
__ Push(ip, value());

View File

@ -69,14 +69,6 @@ Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) ||
rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE);
#if V8_OOL_CONSTANT_POOL
if (Assembler::IsConstantPoolLoadStart(pc_)) {
// We return the PC for ool constant pool since this function is used by the
// serializerer and expects the address to reside within the code object.
return reinterpret_cast<Address>(pc_);
}
#endif
// Read the address of the word containing the target_address in an
// instruction stream.
// The only architecture-independent user of this function is the serializer.
@ -91,13 +83,8 @@ Address RelocInfo::target_address_address() {
Address RelocInfo::constant_pool_entry_address() {
#if V8_OOL_CONSTANT_POOL
return Assembler::target_constant_pool_address_at(pc_,
host_->constant_pool());
#else
UNREACHABLE();
return NULL;
#endif
}
@ -131,22 +118,12 @@ Address Assembler::target_address_from_return_address(Address pc) {
// mtlr ip
// blrl
// @ return address
#if V8_OOL_CONSTANT_POOL
if (IsConstantPoolLoadEnd(pc - 3 * kInstrSize)) {
return pc - (kMovInstructionsConstantPool + 2) * kInstrSize;
}
#endif
return pc - (kMovInstructionsNoConstantPool + 2) * kInstrSize;
return pc - (kMovInstructions + 2) * kInstrSize;
}
Address Assembler::return_address_from_call_start(Address pc) {
#if V8_OOL_CONSTANT_POOL
Address load_address = pc + (kMovInstructionsConstantPool - 1) * kInstrSize;
if (IsConstantPoolLoadEnd(load_address))
return pc + (kMovInstructionsConstantPool + 2) * kInstrSize;
#endif
return pc + (kMovInstructionsNoConstantPool + 2) * kInstrSize;
return pc + (kMovInstructions + 2) * kInstrSize;
}
@ -224,13 +201,8 @@ void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode write_barrier_mode,
}
#if V8_OOL_CONSTANT_POOL
static const int kNoCodeAgeInstructions = 7;
#else
static const int kNoCodeAgeInstructions = 6;
#endif
static const int kCodeAgingInstructions =
Assembler::kMovInstructionsNoConstantPool + 3;
static const int kCodeAgingInstructions = Assembler::kMovInstructions + 3;
static const int kNoCodeAgeSequenceInstructions =
((kNoCodeAgeInstructions >= kCodeAgingInstructions)
? kNoCodeAgeInstructions
@ -456,61 +428,12 @@ Address Assembler::target_address_at(Address pc,
(instr2 & kImm16Mask));
#endif
}
#if V8_OOL_CONSTANT_POOL
return Memory::Address_at(target_constant_pool_address_at(pc, constant_pool));
#else
DCHECK(false);
return (Address)0;
#endif
UNREACHABLE();
return NULL;
}
#if V8_OOL_CONSTANT_POOL
bool Assembler::IsConstantPoolLoadStart(Address pc) {
#if V8_TARGET_ARCH_PPC64
if (!IsLi(instr_at(pc))) return false;
pc += kInstrSize;
#endif
return GetRA(instr_at(pc)).is(kConstantPoolRegister);
}
bool Assembler::IsConstantPoolLoadEnd(Address pc) {
#if V8_TARGET_ARCH_PPC64
pc -= kInstrSize;
#endif
return IsConstantPoolLoadStart(pc);
}
int Assembler::GetConstantPoolOffset(Address pc) {
DCHECK(IsConstantPoolLoadStart(pc));
Instr instr = instr_at(pc);
int offset = SIGN_EXT_IMM16((instr & kImm16Mask));
return offset;
}
void Assembler::SetConstantPoolOffset(Address pc, int offset) {
DCHECK(IsConstantPoolLoadStart(pc));
DCHECK(is_int16(offset));
Instr instr = instr_at(pc);
instr &= ~kImm16Mask;
instr |= (offset & kImm16Mask);
instr_at_put(pc, instr);
}
Address Assembler::target_constant_pool_address_at(
Address pc, ConstantPoolArray* constant_pool) {
Address addr = reinterpret_cast<Address>(constant_pool);
DCHECK(addr);
addr += GetConstantPoolOffset(pc);
return addr;
}
#endif
// This sets the branch destination (which gets loaded at the call address).
// This is for calls and branches within generated code. The serializer
// has already deserialized the mov instructions etc.
@ -575,14 +498,9 @@ void Assembler::set_target_address_at(Address pc,
CpuFeatures::FlushICache(p, 2 * kInstrSize);
}
#endif
} else {
#if V8_OOL_CONSTANT_POOL
Memory::Address_at(target_constant_pool_address_at(pc, constant_pool)) =
target;
#else
UNREACHABLE();
#endif
return;
}
UNREACHABLE();
}
}
} // namespace v8::internal

View File

@ -149,18 +149,14 @@ const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE |
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially
// coded. Being specially coded on PPC means that it is a lis/ori
// instruction sequence or is an out of line constant pool entry,
// and these are always the case inside code objects.
// instruction sequence, and these are always the case inside code
// objects.
return true;
}
bool RelocInfo::IsInConstantPool() {
#if V8_OOL_CONSTANT_POOL
return Assembler::IsConstantPoolLoadStart(pc_);
#else
return false;
#endif
}
@ -227,9 +223,6 @@ MemOperand::MemOperand(Register ra, Register rb) {
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size),
recorded_ast_id_(TypeFeedbackId::None()),
#if V8_OOL_CONSTANT_POOL
constant_pool_builder_(),
#endif
positions_recorder_(this) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
@ -250,6 +243,7 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
void Assembler::GetCode(CodeDesc* desc) {
reloc_info_writer.Finish();
// Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
@ -486,8 +480,7 @@ void Assembler::target_at_put(int pos, int target_pos) {
Register dst = Register::from_code(instr_at(pos + kInstrSize));
intptr_t addr = reinterpret_cast<uintptr_t>(buffer_ + target_pos);
CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
kMovInstructionsNoConstantPool,
CodePatcher::DONT_FLUSH);
kMovInstructions, CodePatcher::DONT_FLUSH);
AddBoundInternalReferenceLoad(pos);
patcher.masm()->bitwise_mov(dst, addr);
break;
@ -1546,48 +1539,11 @@ void Assembler::RelocateInternalReference(Address pc, intptr_t delta,
}
int Assembler::instructions_required_for_mov(const Operand& x) const {
#if V8_OOL_CONSTANT_POOL || DEBUG
bool canOptimize =
!(x.must_output_reloc_info(this) || is_trampoline_pool_blocked());
#endif
#if V8_OOL_CONSTANT_POOL
if (use_constant_pool_for_mov(x, canOptimize)) {
// Current usage guarantees that all constant pool references can
// use the same sequence.
return kMovInstructionsConstantPool;
}
#endif
DCHECK(!canOptimize);
return kMovInstructionsNoConstantPool;
}
#if V8_OOL_CONSTANT_POOL
bool Assembler::use_constant_pool_for_mov(const Operand& x,
bool canOptimize) const {
if (!is_ool_constant_pool_available() || is_constant_pool_full()) {
// If there is no constant pool available, we must use a mov
// immediate sequence.
return false;
}
intptr_t value = x.immediate();
if (canOptimize && is_int16(value)) {
// Prefer a single-instruction load-immediate.
return false;
}
return true;
}
void Assembler::EnsureSpaceFor(int space_needed) {
if (buffer_space() <= (kGap + space_needed)) {
GrowBuffer();
GrowBuffer(space_needed);
}
}
#endif
bool Operand::must_output_reloc_info(const Assembler* assembler) const {
@ -1612,30 +1568,9 @@ void Assembler::mov(Register dst, const Operand& src) {
bool canOptimize;
RelocInfo rinfo(pc_, src.rmode_, value, NULL);
if (src.must_output_reloc_info(this)) {
RecordRelocInfo(rinfo);
}
canOptimize = !(src.must_output_reloc_info(this) ||
(is_trampoline_pool_blocked() && !is_int16(value)));
#if V8_OOL_CONSTANT_POOL
if (use_constant_pool_for_mov(src, canOptimize)) {
DCHECK(is_ool_constant_pool_available());
ConstantPoolAddEntry(rinfo);
#if V8_TARGET_ARCH_PPC64
BlockTrampolinePoolScope block_trampoline_pool(this);
// We are forced to use 2 instruction sequence since the constant
// pool pointer is tagged.
li(dst, Operand::Zero());
ldx(dst, MemOperand(kConstantPoolRegister, dst));
#else
lwz(dst, MemOperand(kConstantPoolRegister, 0));
#endif
return;
}
#endif
if (canOptimize) {
if (is_int16(value)) {
li(dst, Operand(value));
@ -1672,6 +1607,9 @@ void Assembler::mov(Register dst, const Operand& src) {
}
DCHECK(!canOptimize);
if (src.must_output_reloc_info(this)) {
RecordRelocInfo(rinfo);
}
bitwise_mov(dst, value);
}
@ -1735,17 +1673,14 @@ void Assembler::mov_label_offset(Register dst, Label* label) {
}
// TODO(mbrandy): allow loading internal reference from constant pool
void Assembler::mov_label_addr(Register dst, Label* label) {
CheckBuffer();
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
int position = link(label);
if (label->is_bound()) {
// CheckBuffer() is called too frequently. This will pre-grow
// the buffer if needed to avoid spliting the relocation and instructions
#if V8_OOL_CONSTANT_POOL
EnsureSpaceFor(kMovInstructionsNoConstantPool * kInstrSize);
#endif
// CheckBuffer() is called too frequently. This will pre-grow
// the buffer if needed to avoid spliting the relocation and instructions
EnsureSpaceFor(kMovInstructions * kInstrSize);
intptr_t addr = reinterpret_cast<uintptr_t>(buffer_ + position);
AddBoundInternalReferenceLoad(pc_offset());
@ -1767,8 +1702,8 @@ void Assembler::mov_label_addr(Register dst, Label* label) {
BlockTrampolinePoolScope block_trampoline_pool(this);
emit(kUnboundMovLabelAddrOpcode | (link & kImm26Mask));
emit(dst.code());
DCHECK(kMovInstructionsNoConstantPool >= 2);
for (int i = 0; i < kMovInstructionsNoConstantPool - 2; i++) nop();
DCHECK(kMovInstructions >= 2);
for (int i = 0; i < kMovInstructions - 2; i++) nop();
}
}
@ -1778,11 +1713,9 @@ void Assembler::emit_label_addr(Label* label) {
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
int position = link(label);
if (label->is_bound()) {
// CheckBuffer() is called too frequently. This will pre-grow
// the buffer if needed to avoid spliting the relocation and entry.
#if V8_OOL_CONSTANT_POOL
// CheckBuffer() is called too frequently. This will pre-grow
// the buffer if needed to avoid spliting the relocation and entry.
EnsureSpaceFor(kPointerSize);
#endif
intptr_t addr = reinterpret_cast<uintptr_t>(buffer_ + position);
AddBoundInternalReference(pc_offset());
@ -2266,8 +2199,7 @@ bool Assembler::IsNop(Instr instr, int type) {
}
// Debugging.
void Assembler::GrowBuffer() {
void Assembler::GrowBuffer(int needed) {
if (!own_buffer_) FATAL("external code buffer is too small");
// Compute new buffer size.
@ -2279,6 +2211,10 @@ void Assembler::GrowBuffer() {
} else {
desc.buffer_size = buffer_size_ + 1 * MB;
}
int space = buffer_space() + (desc.buffer_size - buffer_size_);
if (space < needed) {
desc.buffer_size += needed - space;
}
CHECK_GT(desc.buffer_size, 0); // no overflow
// Set up new buffer.
@ -2312,9 +2248,6 @@ void Assembler::GrowBuffer() {
RelocateInternalReference(buffer_ + pos, pc_delta, 0,
RelocInfo::INTERNAL_REFERENCE_ENCODED);
}
#if V8_OOL_CONSTANT_POOL
constant_pool_builder_.Relocate(pc_delta);
#endif
}
@ -2334,8 +2267,8 @@ void Assembler::dd(uint32_t data) {
void Assembler::emit_ptr(intptr_t data) {
CheckBuffer();
*reinterpret_cast<uintptr_t*>(pc_) = data;
pc_ += sizeof(uintptr_t);
*reinterpret_cast<intptr_t*>(pc_) = data;
pc_ += sizeof(intptr_t);
}
@ -2429,193 +2362,14 @@ void Assembler::CheckTrampolinePool() {
Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
#if V8_OOL_CONSTANT_POOL
return constant_pool_builder_.New(isolate);
#else
// No out-of-line constant pool support.
DCHECK(!FLAG_enable_ool_constant_pool);
return isolate->factory()->empty_constant_pool_array();
#endif
}
void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
#if V8_OOL_CONSTANT_POOL
constant_pool_builder_.Populate(this, constant_pool);
#else
// No out-of-line constant pool support.
DCHECK(!FLAG_enable_ool_constant_pool);
#endif
}
#if V8_OOL_CONSTANT_POOL
ConstantPoolBuilder::ConstantPoolBuilder()
: size_(0),
entries_(),
current_section_(ConstantPoolArray::SMALL_SECTION) {}
bool ConstantPoolBuilder::IsEmpty() { return entries_.size() == 0; }
ConstantPoolArray::Type ConstantPoolBuilder::GetConstantPoolType(
RelocInfo::Mode rmode) {
#if V8_TARGET_ARCH_PPC64
// We don't support 32-bit entries at this time.
if (!RelocInfo::IsGCRelocMode(rmode)) {
return ConstantPoolArray::INT64;
#else
if (rmode == RelocInfo::NONE64) {
return ConstantPoolArray::INT64;
} else if (!RelocInfo::IsGCRelocMode(rmode)) {
return ConstantPoolArray::INT32;
#endif
} else if (RelocInfo::IsCodeTarget(rmode)) {
return ConstantPoolArray::CODE_PTR;
} else {
DCHECK(RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode));
return ConstantPoolArray::HEAP_PTR;
}
}
ConstantPoolArray::LayoutSection ConstantPoolBuilder::AddEntry(
Assembler* assm, const RelocInfo& rinfo) {
RelocInfo::Mode rmode = rinfo.rmode();
DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::POSITION &&
rmode != RelocInfo::STATEMENT_POSITION &&
rmode != RelocInfo::CONST_POOL);
// Try to merge entries which won't be patched.
int merged_index = -1;
ConstantPoolArray::LayoutSection entry_section = current_section_;
if (RelocInfo::IsNone(rmode) ||
(!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) {
size_t i;
std::vector<ConstantPoolEntry>::const_iterator it;
for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) {
if (RelocInfo::IsEqual(rinfo, it->rinfo_)) {
// Merge with found entry.
merged_index = i;
entry_section = entries_[i].section_;
break;
}
}
}
DCHECK(entry_section <= current_section_);
entries_.push_back(ConstantPoolEntry(rinfo, entry_section, merged_index));
if (merged_index == -1) {
// Not merged, so update the appropriate count.
number_of_entries_[entry_section].increment(GetConstantPoolType(rmode));
}
// Check if we still have room for another entry in the small section
// given the limitations of the header's layout fields.
if (current_section_ == ConstantPoolArray::SMALL_SECTION) {
size_ = ConstantPoolArray::SizeFor(*small_entries());
if (!is_uint12(size_)) {
current_section_ = ConstantPoolArray::EXTENDED_SECTION;
}
} else {
size_ = ConstantPoolArray::SizeForExtended(*small_entries(),
*extended_entries());
}
return entry_section;
}
void ConstantPoolBuilder::Relocate(intptr_t pc_delta) {
for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
entry != entries_.end(); entry++) {
DCHECK(entry->rinfo_.rmode() != RelocInfo::JS_RETURN);
entry->rinfo_.set_pc(entry->rinfo_.pc() + pc_delta);
}
}
Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) {
if (IsEmpty()) {
return isolate->factory()->empty_constant_pool_array();
} else if (extended_entries()->is_empty()) {
return isolate->factory()->NewConstantPoolArray(*small_entries());
} else {
DCHECK(current_section_ == ConstantPoolArray::EXTENDED_SECTION);
return isolate->factory()->NewExtendedConstantPoolArray(
*small_entries(), *extended_entries());
}
}
void ConstantPoolBuilder::Populate(Assembler* assm,
ConstantPoolArray* constant_pool) {
DCHECK_EQ(extended_entries()->is_empty(),
!constant_pool->is_extended_layout());
DCHECK(small_entries()->equals(ConstantPoolArray::NumberOfEntries(
constant_pool, ConstantPoolArray::SMALL_SECTION)));
if (constant_pool->is_extended_layout()) {
DCHECK(extended_entries()->equals(ConstantPoolArray::NumberOfEntries(
constant_pool, ConstantPoolArray::EXTENDED_SECTION)));
}
// Set up initial offsets.
int offsets[ConstantPoolArray::NUMBER_OF_LAYOUT_SECTIONS]
[ConstantPoolArray::NUMBER_OF_TYPES];
for (int section = 0; section <= constant_pool->final_section(); section++) {
int section_start = (section == ConstantPoolArray::EXTENDED_SECTION)
? small_entries()->total_count()
: 0;
for (int i = 0; i < ConstantPoolArray::NUMBER_OF_TYPES; i++) {
ConstantPoolArray::Type type = static_cast<ConstantPoolArray::Type>(i);
if (number_of_entries_[section].count_of(type) != 0) {
offsets[section][type] = constant_pool->OffsetOfElementAt(
number_of_entries_[section].base_of(type) + section_start);
}
}
}
for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
entry != entries_.end(); entry++) {
RelocInfo rinfo = entry->rinfo_;
RelocInfo::Mode rmode = entry->rinfo_.rmode();
ConstantPoolArray::Type type = GetConstantPoolType(rmode);
// Update constant pool if necessary and get the entry's offset.
int offset;
if (entry->merged_index_ == -1) {
offset = offsets[entry->section_][type];
offsets[entry->section_][type] += ConstantPoolArray::entry_size(type);
if (type == ConstantPoolArray::INT64) {
#if V8_TARGET_ARCH_PPC64
constant_pool->set_at_offset(offset, rinfo.data());
#else
constant_pool->set_at_offset(offset, rinfo.data64());
} else if (type == ConstantPoolArray::INT32) {
constant_pool->set_at_offset(offset,
static_cast<int32_t>(rinfo.data()));
#endif
} else if (type == ConstantPoolArray::CODE_PTR) {
constant_pool->set_at_offset(offset,
reinterpret_cast<Address>(rinfo.data()));
} else {
DCHECK(type == ConstantPoolArray::HEAP_PTR);
constant_pool->set_at_offset(offset,
reinterpret_cast<Object*>(rinfo.data()));
}
offset -= kHeapObjectTag;
entry->merged_index_ = offset; // Stash offset for merged entries.
} else {
DCHECK(entry->merged_index_ < (entry - entries_.begin()));
offset = entries_[entry->merged_index_].merged_index_;
}
// Patch load instruction with correct offset.
Assembler::SetConstantPoolOffset(rinfo.pc(), offset);
}
}
#endif
}
} // namespace v8::internal

View File

@ -109,11 +109,7 @@ struct Register {
static const int kAllocatableLowRangeBegin = 3;
static const int kAllocatableLowRangeEnd = 10;
static const int kAllocatableHighRangeBegin = 14;
#if V8_OOL_CONSTANT_POOL
static const int kAllocatableHighRangeEnd = 27;
#else
static const int kAllocatableHighRangeEnd = 28;
#endif
static const int kAllocatableContext = 30;
static const int kNumAllocatableLow =
@ -179,14 +175,18 @@ struct Register {
"r25",
"r26",
"r27",
#if !V8_OOL_CONSTANT_POOL
"r28",
#endif
"cp",
};
return names[index];
}
static const RegList kAllocatable =
1 << 3 | 1 << 4 | 1 << 5 | 1 << 6 | 1 << 7 | 1 << 8 | 1 << 9 | 1 << 10 |
1 << 14 | 1 << 15 | 1 << 16 | 1 << 17 | 1 << 18 | 1 << 19 | 1 << 20 |
1 << 21 | 1 << 22 | 1 << 23 | 1 << 24 | 1 << 25 | 1 << 26 | 1 << 27 |
1 << 28 | 1 << 30;
static Register from_code(int code) {
Register r = {code};
return r;
@ -243,7 +243,7 @@ const int kRegister_r24_Code = 24;
const int kRegister_r25_Code = 25;
const int kRegister_r26_Code = 26;
const int kRegister_r27_Code = 27;
const int kRegister_r28_Code = 28; // constant pool pointer
const int kRegister_r28_Code = 28;
const int kRegister_r29_Code = 29; // roots array pointer
const int kRegister_r30_Code = 30; // context pointer
const int kRegister_fp_Code = 31; // frame pointer
@ -287,9 +287,6 @@ const Register fp = {kRegister_fp_Code};
// Give alias names to registers
const Register cp = {kRegister_r30_Code}; // JavaScript context pointer
const Register kRootRegister = {kRegister_r29_Code}; // Roots array pointer.
#if V8_OOL_CONSTANT_POOL
const Register kConstantPoolRegister = {kRegister_r28_Code}; // Constant pool
#endif
// Double word FP register.
struct DoubleRegister {
@ -468,13 +465,6 @@ class Operand BASE_EMBEDDED {
// Return true if this is a register operand.
INLINE(bool is_reg() const);
// For mov. Return the number of actual instructions required to
// load the operand into a register. This can be anywhere from
// one (constant pool small section) to five instructions (full
// 64-bit sequence).
//
// The value returned is only valid as long as no entries are added to the
// constant pool between this call and the actual instruction being emitted.
bool must_output_reloc_info(const Assembler* assembler) const;
inline intptr_t immediate() const {
@ -528,77 +518,6 @@ class MemOperand BASE_EMBEDDED {
};
#if V8_OOL_CONSTANT_POOL
// Class used to build a constant pool.
class ConstantPoolBuilder BASE_EMBEDDED {
public:
ConstantPoolBuilder();
ConstantPoolArray::LayoutSection AddEntry(Assembler* assm,
const RelocInfo& rinfo);
void Relocate(intptr_t pc_delta);
bool IsEmpty();
Handle<ConstantPoolArray> New(Isolate* isolate);
void Populate(Assembler* assm, ConstantPoolArray* constant_pool);
inline ConstantPoolArray::LayoutSection current_section() const {
return current_section_;
}
// Rather than increasing the capacity of the ConstantPoolArray's
// small section to match the longer (16-bit) reach of PPC's load
// instruction (at the expense of a larger header to describe the
// layout), the PPC implementation utilizes the extended section to
// satisfy that reach. I.e. all entries (regardless of their
// section) are reachable with a single load instruction.
//
// This implementation does not support an unlimited constant pool
// size (which would require a multi-instruction sequence). [See
// ARM commit e27ab337 for a reference on the changes required to
// support the longer instruction sequence.] Note, however, that
// going down that path will necessarily generate that longer
// sequence for all extended section accesses since the placement of
// a given entry within the section is not known at the time of
// code generation.
//
// TODO(mbrandy): Determine whether there is a benefit to supporting
// the longer sequence given that nops could be used for those
// entries which are reachable with a single instruction.
inline bool is_full() const { return !is_int16(size_); }
inline ConstantPoolArray::NumberOfEntries* number_of_entries(
ConstantPoolArray::LayoutSection section) {
return &number_of_entries_[section];
}
inline ConstantPoolArray::NumberOfEntries* small_entries() {
return number_of_entries(ConstantPoolArray::SMALL_SECTION);
}
inline ConstantPoolArray::NumberOfEntries* extended_entries() {
return number_of_entries(ConstantPoolArray::EXTENDED_SECTION);
}
private:
struct ConstantPoolEntry {
ConstantPoolEntry(RelocInfo rinfo, ConstantPoolArray::LayoutSection section,
int merged_index)
: rinfo_(rinfo), section_(section), merged_index_(merged_index) {}
RelocInfo rinfo_;
ConstantPoolArray::LayoutSection section_;
int merged_index_;
};
ConstantPoolArray::Type GetConstantPoolType(RelocInfo::Mode rmode);
uint32_t size_;
std::vector<ConstantPoolEntry> entries_;
ConstantPoolArray::LayoutSection current_section_;
ConstantPoolArray::NumberOfEntries number_of_entries_[2];
};
#endif
class Assembler : public AssemblerBase {
public:
// Create an assembler. Instructions and relocation information are emitted
@ -660,18 +579,6 @@ class Assembler : public AssemblerBase {
// The high 8 bits are set to zero.
void label_at_put(Label* L, int at_offset);
#if V8_OOL_CONSTANT_POOL
INLINE(static bool IsConstantPoolLoadStart(Address pc));
INLINE(static bool IsConstantPoolLoadEnd(Address pc));
INLINE(static int GetConstantPoolOffset(Address pc));
INLINE(static void SetConstantPoolOffset(Address pc, int offset));
// Return the address in the constant pool of the code target address used by
// the branch/call instruction at pc, or the object in a mov.
INLINE(static Address target_constant_pool_address_at(
Address pc, ConstantPoolArray* constant_pool));
#endif
// Read/Modify the code target address in the branch/call instruction at pc.
INLINE(static Address target_address_at(Address pc,
ConstantPoolArray* constant_pool));
@ -679,13 +586,13 @@ class Assembler : public AssemblerBase {
Address pc, ConstantPoolArray* constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(static Address target_address_at(Address pc, Code* code)) {
ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
ConstantPoolArray* constant_pool = NULL;
return target_address_at(pc, constant_pool);
}
INLINE(static void set_target_address_at(
Address pc, Code* code, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
ConstantPoolArray* constant_pool = NULL;
set_target_address_at(pc, constant_pool, target, icache_flush_mode);
}
@ -718,16 +625,11 @@ class Assembler : public AssemblerBase {
// Number of instructions to load an address via a mov sequence.
#if V8_TARGET_ARCH_PPC64
static const int kMovInstructionsConstantPool = 2;
static const int kMovInstructionsNoConstantPool = 5;
static const int kMovInstructions = 5;
static const int kTaggedLoadInstructions = 2;
#else
static const int kMovInstructionsConstantPool = 1;
static const int kMovInstructionsNoConstantPool = 2;
#endif
#if V8_OOL_CONSTANT_POOL
static const int kMovInstructions = kMovInstructionsConstantPool;
#else
static const int kMovInstructions = kMovInstructionsNoConstantPool;
static const int kMovInstructions = 2;
static const int kTaggedLoadInstructions = 1;
#endif
// Distance between the instruction referring to the address of the call
@ -759,13 +661,11 @@ class Assembler : public AssemblerBase {
// This is the length of the BreakLocationIterator::SetDebugBreakAtReturn()
// code patch FIXED_SEQUENCE
static const int kJSReturnSequenceInstructions =
kMovInstructionsNoConstantPool + 3;
static const int kJSReturnSequenceInstructions = kMovInstructions + 3;
// This is the length of the code sequence from SetDebugBreakAtSlot()
// FIXED_SEQUENCE
static const int kDebugBreakSlotInstructions =
kMovInstructionsNoConstantPool + 2;
static const int kDebugBreakSlotInstructions = kMovInstructions + 2;
static const int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstrSize;
@ -1355,12 +1255,6 @@ class Assembler : public AssemblerBase {
void BlockTrampolinePoolFor(int instructions);
void CheckTrampolinePool();
int instructions_required_for_mov(const Operand& x) const;
#if V8_OOL_CONSTANT_POOL
// Decide between using the constant pool vs. a mov immediate sequence.
bool use_constant_pool_for_mov(const Operand& x, bool canOptimize) const;
// The code currently calls CheckBuffer() too often. This has the side
// effect of randomly growing the buffer in the middle of multi-instruction
// sequences.
@ -1370,7 +1264,6 @@ class Assembler : public AssemblerBase {
//
// This function allows outside callers to check and grow the buffer
void EnsureSpaceFor(int space_needed);
#endif
// Allocate a constant pool of the correct size for the generated code.
Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
@ -1378,17 +1271,6 @@ class Assembler : public AssemblerBase {
// Generate the constant pool for the generated code.
void PopulateConstantPool(ConstantPoolArray* constant_pool);
#if V8_OOL_CONSTANT_POOL
bool is_constant_pool_full() const {
return constant_pool_builder_.is_full();
}
bool use_extended_constant_pool() const {
return constant_pool_builder_.current_section() ==
ConstantPoolArray::EXTENDED_SECTION;
}
#endif
static void RelocateInternalReference(
Address pc, intptr_t delta, Address code_start, RelocInfo::Mode rmode,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
@ -1418,12 +1300,6 @@ class Assembler : public AssemblerBase {
// Record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
void RecordRelocInfo(const RelocInfo& rinfo);
#if V8_OOL_CONSTANT_POOL
ConstantPoolArray::LayoutSection ConstantPoolAddEntry(
const RelocInfo& rinfo) {
return constant_pool_builder_.AddEntry(this, rinfo);
}
#endif
// Block the emission of the trampoline pool before pc_offset.
void BlockTrampolinePoolBefore(int pc_offset) {
@ -1432,9 +1308,7 @@ class Assembler : public AssemblerBase {
}
void StartBlockTrampolinePool() { trampoline_pool_blocked_nesting_++; }
void EndBlockTrampolinePool() { trampoline_pool_blocked_nesting_--; }
bool is_trampoline_pool_blocked() const {
return trampoline_pool_blocked_nesting_ > 0;
}
@ -1474,13 +1348,9 @@ class Assembler : public AssemblerBase {
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
#if V8_OOL_CONSTANT_POOL
ConstantPoolBuilder constant_pool_builder_;
#endif
// Code emission
inline void CheckBuffer();
void GrowBuffer();
void GrowBuffer(int needed = 0);
inline void emit(Instr x);
inline void CheckTrampolinePoolQuick();

View File

@ -232,7 +232,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ push(function); // Preserve the function.
__ IncrementCounter(counters->string_ctor_conversions(), 1, r6, r7);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(r3);
__ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
}
@ -252,7 +252,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ bind(&gc_required);
__ IncrementCounter(counters->string_ctor_gc_required(), 1, r6, r7);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(argument);
__ CallRuntime(Runtime::kNewStringWrapper, 1);
}
@ -262,7 +262,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
static void CallRuntimePassFunction(MacroAssembler* masm,
Runtime::FunctionId function_id) {
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
// Push function as parameter to the runtime call.
__ Push(r4, r4);
@ -353,7 +353,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Enter a construct frame.
{
FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
FrameScope scope(masm, StackFrame::CONSTRUCT);
if (create_memento) {
__ AssertUndefinedOrAllocationSite(r5, r7);
@ -752,7 +752,7 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
CHECK(!FLAG_pretenuring_call_new);
{
FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
FrameScope scope(masm, StackFrame::CONSTRUCT);
// Smi-tagged arguments count.
__ mr(r7, r3);
@ -916,7 +916,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
// Push function as parameter to the runtime call.
__ Push(r4, r4);
@ -1027,7 +1027,7 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
SaveFPRegsMode save_doubles) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
FrameScope scope(masm, StackFrame::INTERNAL);
// Preserve registers across notification, this is important for compiled
// stubs that tail call the runtime on deopts passing their parameters in
@ -1056,7 +1056,7 @@ void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass the function and deoptimization type to the runtime system.
__ LoadSmiLiteral(r3, Smi::FromInt(static_cast<int>(type)));
__ push(r3);
@ -1104,7 +1104,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
__ push(r3);
__ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
@ -1122,12 +1122,8 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// <deopt_data> = <code>[#deoptimization_data_offset]
__ LoadP(r4, FieldMemOperand(r3, Code::kDeoptimizationDataOffset));
#if V8_OOL_CONSTANT_POOL
{
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
__ LoadP(kConstantPoolRegister,
FieldMemOperand(r3, Code::kConstantPoolOffset));
#endif
__ addi(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
@ -1136,17 +1132,13 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
DeoptimizationInputData::kOsrPcOffsetIndex)));
__ SmiUntag(r4);
// Compute the target address = code_obj + header_size + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
__ add(r3, r3, r4);
__ addi(r0, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
__ mtlr(r0);
// Compute the target address = code start + osr_offset
__ add(r0, r3, r4);
// And "return" to the OSR entry point of the function.
__ Ret();
#if V8_OOL_CONSTANT_POOL
__ mtlr(r0);
__ blr();
}
#endif
}
@ -1157,7 +1149,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ cmpl(sp, ip);
__ bge(&ok);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kStackGuard, 0);
}
__ Jump(masm->isolate()->builtins()->OnStackReplacement(),
@ -1248,7 +1240,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
{
// Enter an internal frame in order to preserve argument count.
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
FrameScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(r3);
__ Push(r3, r5);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
@ -1381,7 +1373,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
const int kFunctionOffset = 4 * kPointerSize;
{
FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
FrameScope frame_scope(masm, StackFrame::INTERNAL);
__ LoadP(r3, MemOperand(fp, kFunctionOffset)); // get the function
__ push(r3);
@ -1563,11 +1555,7 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ LoadSmiLiteral(r7, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ mflr(r0);
__ push(r0);
#if V8_OOL_CONSTANT_POOL
__ Push(fp, kConstantPoolRegister, r7, r4, r3);
#else
__ Push(fp, r7, r4, r3);
#endif
__ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
kPointerSize));
}

View File

@ -110,7 +110,7 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
int param_count = descriptor.GetEnvironmentParameterCount();
{
// Call the runtime system in a fresh internal frame.
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
FrameScope scope(masm, StackFrame::INTERNAL);
DCHECK(param_count == 0 ||
r3.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
// Push arguments
@ -1184,11 +1184,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// r7: argv
__ li(r0, Operand(-1)); // Push a bad frame pointer to fail if it is used.
__ push(r0);
#if V8_OOL_CONSTANT_POOL
__ mov(kConstantPoolRegister,
Operand(isolate()->factory()->empty_constant_pool_array()));
__ push(kConstantPoolRegister);
#endif
int marker = type();
__ LoadSmiLiteral(r0, Smi::FromInt(marker));
__ push(r0);
@ -1336,14 +1331,10 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
const Register scratch = r5;
Register scratch3 = no_reg;
// delta = mov + unaligned LoadP + cmp + bne
#if V8_TARGET_ARCH_PPC64
// delta = mov + tagged LoadP + cmp + bne
const int32_t kDeltaToLoadBoolResult =
(Assembler::kMovInstructions + 4) * Assembler::kInstrSize;
#else
const int32_t kDeltaToLoadBoolResult =
(Assembler::kMovInstructions + 3) * Assembler::kInstrSize;
#endif
(Assembler::kMovInstructions + Assembler::kTaggedLoadInstructions + 2) *
Assembler::kInstrSize;
Label slow, loop, is_instance, is_not_instance, not_js_object;
@ -1503,7 +1494,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
} else {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(r3, r4);
__ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
}
@ -2585,7 +2576,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Create an AllocationSite if we don't already have it, store it in the
// slot.
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
FrameScope scope(masm, StackFrame::INTERNAL);
// Arguments register must be smi-tagged to call out.
__ SmiTag(r3);
@ -2671,7 +2662,7 @@ static void EmitSlowCase(MacroAssembler* masm, int argc, Label* non_function) {
static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
// Wrap the receiver and patch it back onto the stack.
{
FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
FrameScope frame_scope(masm, StackFrame::INTERNAL);
__ Push(r4, r6);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ pop(r4);
@ -2988,7 +2979,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// r6 - slot
// r4 - function
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
FrameScope scope(masm, StackFrame::INTERNAL);
CreateWeakCellStub create_stub(masm->isolate());
__ Push(r4);
__ CallStub(&create_stub);
@ -3016,7 +3007,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
void CallICStub::GenerateMiss(MacroAssembler* masm) {
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
FrameScope scope(masm, StackFrame::INTERNAL);
// Push the function and feedback info.
__ Push(r4, r5, r6);
@ -3974,7 +3965,7 @@ void CompareICStub::GenerateMiss(MacroAssembler* masm) {
ExternalReference miss =
ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(r4, r3);
__ Push(r4, r3);
__ LoadSmiLiteral(r0, Smi::FromInt(op()));

View File

@ -108,7 +108,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs,
RegList non_object_regs) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
FrameScope scope(masm, StackFrame::INTERNAL);
// Load padding words on stack.
__ LoadSmiLiteral(ip, Smi::FromInt(LiveEdit::kFramePaddingValue));
@ -317,8 +317,7 @@ void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
__ LoadP(r4, MemOperand(fp, StandardFrameConstants::kConstantPoolOffset -
kPointerSize));
// Pop return address, frame and constant pool pointer (if
// FLAG_enable_ool_constant_pool).
// Pop return address and frame
__ LeaveFrame(StackFrame::INTERNAL);
// Load context from the function.

View File

@ -356,13 +356,8 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
#if V8_OOL_CONSTANT_POOL
DCHECK(FLAG_enable_ool_constant_pool);
SetFrameSlot(offset, value);
#else
// No out-of-line constant pool support.
UNREACHABLE();
#endif
}

View File

@ -21,38 +21,22 @@ namespace internal {
Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() {
#if V8_OOL_CONSTANT_POOL
DCHECK(FLAG_enable_ool_constant_pool);
return kConstantPoolRegister;
#else
UNREACHABLE();
return no_reg;
#endif
}
Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
Register StubFailureTrampolineFrame::context_register() { return cp; }
Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
#if V8_OOL_CONSTANT_POOL
DCHECK(FLAG_enable_ool_constant_pool);
return kConstantPoolRegister;
#else
UNREACHABLE();
return no_reg;
#endif
}
Object*& ExitFrame::constant_pool_slot() const {
#if V8_OOL_CONSTANT_POOL
DCHECK(FLAG_enable_ool_constant_pool);
const int offset = ExitFrameConstants::kConstantPoolOffset;
return Memory::Object_at(fp() + offset);
#else
UNREACHABLE();
return Memory::Object_at(NULL);
#endif
}
}
} // namespace v8::internal

View File

@ -57,15 +57,8 @@ const int kNumCalleeSaved = 18;
// Number of registers for which space is reserved in safepoints. Must be a
// multiple of 8.
// TODO(regis): Only 8 registers may actually be sufficient. Revisit.
const int kNumSafepointRegisters = 32;
// Define the list of registers actually saved at safepoints.
// Note that the number of saved registers may be smaller than the reserved
// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
// The following constants describe the stack frame linkage area as
// defined by the ABI. Note that kNumRequiredStackFrameSlots must
// satisfy alignment requirements (rounding up if required).
@ -123,13 +116,8 @@ class EntryFrameConstants : public AllStatic {
class ExitFrameConstants : public AllStatic {
public:
#if V8_OOL_CONSTANT_POOL
static const int kFrameSize = 3 * kPointerSize;
static const int kConstantPoolOffset = -3 * kPointerSize;
#else
static const int kFrameSize = 2 * kPointerSize;
static const int kConstantPoolOffset = 0; // Not used.
#endif
static const int kCodeOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize;

View File

@ -467,9 +467,7 @@ void FullCodeGenerator::EmitReturnSequence() {
// With 64bit we may need nop() instructions to ensure we have
// enough space to SetDebugBreakAtReturn()
if (is_int16(sp_delta)) {
#if !V8_OOL_CONSTANT_POOL
masm_->nop();
#endif
masm_->nop();
}
#endif
@ -2294,13 +2292,7 @@ void FullCodeGenerator::EmitGeneratorResume(
Label slow_resume;
__ bne(&slow_resume, cr0);
__ LoadP(ip, FieldMemOperand(r7, JSFunction::kCodeEntryOffset));
#if V8_OOL_CONSTANT_POOL
{
ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
// Load the new code object's constant pool pointer.
__ LoadP(kConstantPoolRegister,
MemOperand(ip, Code::kConstantPoolOffset - Code::kHeaderSize));
#endif
__ LoadP(r5, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset));
__ SmiUntag(r5);
__ add(ip, ip, r5);
@ -2310,9 +2302,7 @@ void FullCodeGenerator::EmitGeneratorResume(
r0);
__ Jump(ip);
__ bind(&slow_resume);
#if V8_OOL_CONSTANT_POOL
}
#endif
} else {
__ beq(&call_resume, cr0);
}
@ -3783,8 +3773,9 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
// Check if the constructor in the map is a JS function.
__ LoadP(r3, FieldMemOperand(r3, Map::kConstructorOffset));
__ CompareObjectType(r3, r4, r4, JS_FUNCTION_TYPE);
Register instance_type = r5;
__ GetMapConstructor(r3, r3, r4, instance_type);
__ cmpi(instance_type, Operand(JS_FUNCTION_TYPE));
__ bne(&non_function_constructor);
// r3 now contains the constructor function. Grab the

View File

@ -110,7 +110,7 @@ bool LCodeGen::GeneratePrologue() {
// r4: Callee's JS function.
// cp: Callee's context.
// pp: Callee's constant pool pointer (if FLAG_enable_ool_constant_pool)
// pp: Callee's constant pool pointer (if enabled)
// fp: Caller's frame pointer.
// lr: Caller's pc.
// ip: Our own function entry (required by the prologue)
@ -942,12 +942,6 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
safepoint.DefinePointerRegister(ToRegister(pointer), zone());
}
}
#if V8_OOL_CONSTANT_POOL
if (kind & Safepoint::kWithRegisters) {
// Register always contains a pointer to the constant pool.
safepoint.DefinePointerRegister(kConstantPoolRegister, zone());
}
#endif
}
@ -2788,10 +2782,11 @@ void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
__ LoadP(temp, FieldMemOperand(temp, Map::kConstructorOffset));
Register instance_type = ip;
__ GetMapConstructor(temp, temp, temp2, instance_type);
// Objects with a non-function constructor have class 'Object'.
__ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
__ cmpi(instance_type, Operand(JS_FUNCTION_TYPE));
if (class_name->IsOneByteEqualTo(STATIC_CHAR_VECTOR("Object"))) {
__ bne(is_true);
} else {
@ -3094,7 +3089,8 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
}
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode,
PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@ -3236,7 +3232,9 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
if (FLAG_vector_ics) {
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
}
Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
isolate(), NOT_CONTEXTUAL,
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@ -3591,7 +3589,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
Handle<Code> ic =
CodeFactory::KeyedLoadICInOptimizedCode(
isolate(), instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@ -4530,7 +4530,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
__ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->language_mode());
Handle<Code> ic =
StoreIC::initialize_stub(isolate(), instr->language_mode(),
instr->hydrogen()->initialization_state());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@ -4794,8 +4796,9 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), instr->language_mode()).code();
Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
isolate(), instr->language_mode(),
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@ -5520,6 +5523,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
Register temp = ToRegister(instr->temp());
{
PushSafepointRegistersScope scope(this);
__ push(object);
@ -5527,9 +5531,9 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
RecordSafepointWithRegisters(instr->pointer_map(), 1,
Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(r3, scratch0());
__ StoreToSafepointRegisterSlot(r3, temp);
}
__ TestIfSmi(scratch0(), r0);
__ TestIfSmi(temp, r0);
DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, cr0);
}
@ -5561,17 +5565,14 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
return;
}
Register map_reg = scratch0();
Register object = ToRegister(instr->value());
Register map_reg = ToRegister(instr->temp());
LOperand* input = instr->value();
DCHECK(input->IsRegister());
Register reg = ToRegister(input);
__ LoadP(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
__ LoadP(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
DeferredCheckMaps* deferred = NULL;
if (instr->hydrogen()->HasMigrationTarget()) {
deferred = new (zone()) DeferredCheckMaps(this, instr, reg);
deferred = new (zone()) DeferredCheckMaps(this, instr, object);
__ bind(deferred->check_maps());
}

View File

@ -2029,7 +2029,9 @@ LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
if (instr->IsStabilityCheck()) return new (zone()) LCheckMaps;
LOperand* value = UseRegisterAtStart(instr->value());
LInstruction* result = AssignEnvironment(new (zone()) LCheckMaps(value));
LOperand* temp = TempRegister();
LInstruction* result =
AssignEnvironment(new (zone()) LCheckMaps(value, temp));
if (instr->HasMigrationTarget()) {
info()->MarkAsDeferredCalling();
result = AssignPointerMap(result);

View File

@ -2319,11 +2319,15 @@ class LCheckInstanceType FINAL : public LTemplateInstruction<0, 1, 0> {
};
class LCheckMaps FINAL : public LTemplateInstruction<0, 1, 0> {
class LCheckMaps FINAL : public LTemplateInstruction<0, 1, 1> {
public:
explicit LCheckMaps(LOperand* value = NULL) { inputs_[0] = value; }
explicit LCheckMaps(LOperand* value = NULL, LOperand* temp = NULL) {
inputs_[0] = value;
temps_[0] = temp;
}
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
DECLARE_HYDROGEN_ACCESSOR(CheckMaps)

View File

@ -104,15 +104,14 @@ void MacroAssembler::CallJSEntry(Register target) {
int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode,
Condition cond) {
Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
return (2 + instructions_required_for_mov(mov_operand)) * kInstrSize;
return (2 + kMovInstructions) * kInstrSize;
}
int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
RelocInfo::Mode rmode,
Condition cond) {
return (2 + kMovInstructionsNoConstantPool) * kInstrSize;
return (2 + kMovInstructions) * kInstrSize;
}
@ -514,40 +513,28 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
void MacroAssembler::PushFixedFrame(Register marker_reg) {
mflr(r0);
#if V8_OOL_CONSTANT_POOL
if (marker_reg.is_valid()) {
Push(r0, fp, kConstantPoolRegister, cp, marker_reg);
} else {
Push(r0, fp, kConstantPoolRegister, cp);
}
#else
if (marker_reg.is_valid()) {
Push(r0, fp, cp, marker_reg);
} else {
Push(r0, fp, cp);
}
#endif
}
void MacroAssembler::PopFixedFrame(Register marker_reg) {
#if V8_OOL_CONSTANT_POOL
if (marker_reg.is_valid()) {
Pop(r0, fp, kConstantPoolRegister, cp, marker_reg);
} else {
Pop(r0, fp, kConstantPoolRegister, cp);
}
#else
if (marker_reg.is_valid()) {
Pop(r0, fp, cp, marker_reg);
} else {
Pop(r0, fp, cp);
}
#endif
mtlr(r0);
}
const RegList MacroAssembler::kSafepointSavedRegisters = Register::kAllocatable;
const int MacroAssembler::kNumSafepointSavedRegisters =
Register::kMaxNumAllocatableRegisters;
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of kNumSafepointRegisters values on the
@ -664,42 +651,11 @@ void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
}
#if V8_OOL_CONSTANT_POOL
void MacroAssembler::LoadConstantPoolPointerRegister(
CodeObjectAccessMethod access_method, int ip_code_entry_delta) {
Register base;
int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize;
if (access_method == CAN_USE_IP) {
base = ip;
constant_pool_offset += ip_code_entry_delta;
} else {
DCHECK(access_method == CONSTRUCT_INTERNAL_REFERENCE);
base = kConstantPoolRegister;
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
// CheckBuffer() is called too frequently. This will pre-grow
// the buffer if needed to avoid spliting the relocation and instructions
EnsureSpaceFor(kMovInstructionsNoConstantPool * kInstrSize);
intptr_t code_start = reinterpret_cast<intptr_t>(pc_) - pc_offset();
AddBoundInternalReferenceLoad(pc_offset());
bitwise_mov(base, code_start);
}
LoadP(kConstantPoolRegister, MemOperand(base, constant_pool_offset));
}
#endif
void MacroAssembler::StubPrologue(int prologue_offset) {
LoadSmiLiteral(r11, Smi::FromInt(StackFrame::STUB));
PushFixedFrame(r11);
// Adjust FP to point to saved FP.
addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
#if V8_OOL_CONSTANT_POOL
// ip contains prologue address
LoadConstantPoolPointerRegister(CAN_USE_IP, -prologue_offset);
set_ool_constant_pool_available(true);
#endif
}
@ -732,28 +688,13 @@ void MacroAssembler::Prologue(bool code_pre_aging, int prologue_offset) {
}
}
}
#if V8_OOL_CONSTANT_POOL
// ip contains prologue address
LoadConstantPoolPointerRegister(CAN_USE_IP, -prologue_offset);
set_ool_constant_pool_available(true);
#endif
}
void MacroAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
if (FLAG_enable_ool_constant_pool && load_constant_pool_pointer_reg) {
PushFixedFrame();
#if V8_OOL_CONSTANT_POOL
// This path should not rely on ip containing code entry.
LoadConstantPoolPointerRegister(CONSTRUCT_INTERNAL_REFERENCE);
#endif
LoadSmiLiteral(ip, Smi::FromInt(type));
push(ip);
} else {
LoadSmiLiteral(ip, Smi::FromInt(type));
PushFixedFrame(ip);
}
LoadSmiLiteral(ip, Smi::FromInt(type));
PushFixedFrame(ip);
// Adjust FP to point to saved FP.
addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
@ -763,24 +704,15 @@ void MacroAssembler::EnterFrame(StackFrame::Type type,
int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
#if V8_OOL_CONSTANT_POOL
ConstantPoolUnavailableScope constant_pool_unavailable(this);
#endif
// r3: preserved
// r4: preserved
// r5: preserved
// Drop the execution stack down to the frame pointer and restore
// the caller frame pointer, return address and constant pool pointer.
// the caller's state.
int frame_ends;
LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
LoadP(ip, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
#if V8_OOL_CONSTANT_POOL
const int exitOffset = ExitFrameConstants::kConstantPoolOffset;
const int standardOffset = StandardFrameConstants::kConstantPoolOffset;
const int offset = ((type == StackFrame::EXIT) ? exitOffset : standardOffset);
LoadP(kConstantPoolRegister, MemOperand(fp, offset));
#endif
mtlr(r0);
frame_ends = pc_offset();
Add(sp, fp, StandardFrameConstants::kCallerSPOffset + stack_adjustment, r0);
@ -827,10 +759,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
li(r8, Operand::Zero());
StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
#if V8_OOL_CONSTANT_POOL
StoreP(kConstantPoolRegister,
MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
#endif
mov(r8, Operand(CodeObject()));
StoreP(r8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
@ -900,9 +828,6 @@ int MacroAssembler::ActivationFrameAlignment() {
void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
bool restore_context,
bool argument_count_is_length) {
#if V8_OOL_CONSTANT_POOL
ConstantPoolUnavailableScope constant_pool_unavailable(this);
#endif
// Optionally restore all double registers.
if (save_doubles) {
// Calculate the stack location of the saved doubles and restore them.
@ -1218,21 +1143,16 @@ void MacroAssembler::JumpToHandlerEntry() {
// Compute the handler entry address and jump to it. The handler table is
// a fixed array of (smi-tagged) code offsets.
// r3 = exception, r4 = code object, r5 = state.
#if V8_OOL_CONSTANT_POOL
ConstantPoolUnavailableScope constant_pool_unavailable(this);
LoadP(kConstantPoolRegister, FieldMemOperand(r4, Code::kConstantPoolOffset));
#endif
LoadP(r6, FieldMemOperand(r4, Code::kHandlerTableOffset)); // Handler table.
addi(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
addi(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
srwi(r5, r5, Operand(StackHandler::kKindWidth)); // Handler index.
slwi(ip, r5, Operand(kPointerSizeLog2));
add(ip, r6, ip);
LoadP(r5, MemOperand(ip)); // Smi-tagged offset.
addi(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
SmiUntag(ip, r5);
add(r0, r4, ip);
mtctr(r0);
bctr();
add(ip, r4, ip);
Jump(ip);
}
@ -2108,6 +2028,20 @@ void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
}
void MacroAssembler::GetMapConstructor(Register result, Register map,
Register temp, Register temp2) {
Label done, loop;
LoadP(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
bind(&loop);
JumpIfSmi(result, &done);
CompareObjectType(result, temp, temp2, MAP_TYPE);
bne(&done);
LoadP(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
b(&loop);
bind(&done);
}
void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
Register scratch, Label* miss,
bool miss_on_bound_function) {
@ -2164,7 +2098,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
// Non-instance prototype: Fetch prototype from constructor field
// in initial map.
bind(&non_instance);
LoadP(result, FieldMemOperand(result, Map::kConstructorOffset));
GetMapConstructor(result, result, scratch, ip);
}
// All done.
@ -3371,25 +3305,6 @@ void MacroAssembler::SetRelocatedValue(Register location, Register scratch,
Register new_value) {
lwz(scratch, MemOperand(location));
#if V8_OOL_CONSTANT_POOL
if (emit_debug_code()) {
// Check that the instruction sequence is a load from the constant pool
#if V8_TARGET_ARCH_PPC64
And(scratch, scratch, Operand(kOpcodeMask | (0x1f * B16)));
Cmpi(scratch, Operand(ADDI), r0);
Check(eq, kTheInstructionShouldBeALi);
lwz(scratch, MemOperand(location, kInstrSize));
#endif
ExtractBitMask(scratch, scratch, 0x1f * B16);
cmpi(scratch, Operand(kConstantPoolRegister.code()));
Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
// Scratch was clobbered. Restore it.
lwz(scratch, MemOperand(location));
}
// Get the address of the constant and patch it.
andi(scratch, scratch, Operand(kImm16Mask));
StorePX(new_value, MemOperand(kConstantPoolRegister, scratch));
#else
// This code assumes a FIXED_SEQUENCE for lis/ori
// At this point scratch is a lis instruction.
@ -3466,7 +3381,6 @@ void MacroAssembler::SetRelocatedValue(Register location, Register scratch,
#else
FlushICache(location, 2 * kInstrSize, scratch);
#endif
#endif
}
@ -3474,24 +3388,6 @@ void MacroAssembler::GetRelocatedValue(Register location, Register result,
Register scratch) {
lwz(result, MemOperand(location));
#if V8_OOL_CONSTANT_POOL
if (emit_debug_code()) {
// Check that the instruction sequence is a load from the constant pool
#if V8_TARGET_ARCH_PPC64
And(result, result, Operand(kOpcodeMask | (0x1f * B16)));
Cmpi(result, Operand(ADDI), r0);
Check(eq, kTheInstructionShouldBeALi);
lwz(result, MemOperand(location, kInstrSize));
#endif
ExtractBitMask(result, result, 0x1f * B16);
cmpi(result, Operand(kConstantPoolRegister.code()));
Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
lwz(result, MemOperand(location));
}
// Get the address of the constant and retrieve it.
andi(result, result, Operand(kImm16Mask));
LoadPX(result, MemOperand(kConstantPoolRegister, result));
#else
// This code assumes a FIXED_SEQUENCE for lis/ori
if (emit_debug_code()) {
And(result, result, Operand(kOpcodeMask | (0x1f * B16)));
@ -3544,7 +3440,6 @@ void MacroAssembler::GetRelocatedValue(Register location, Register result,
sldi(result, result, Operand(16));
rldimi(result, scratch, 0, 48);
#endif
#endif
}
@ -3930,23 +3825,6 @@ void MacroAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
Register scratch) {
#if V8_OOL_CONSTANT_POOL
// TODO(mbrandy): enable extended constant pool usage for doubles.
// See ARM commit e27ab337 for a reference.
if (is_ool_constant_pool_available() && !is_constant_pool_full()) {
RelocInfo rinfo(pc_, value);
ConstantPoolAddEntry(rinfo);
#if V8_TARGET_ARCH_PPC64
// We use 2 instruction sequence here for consistency with mov.
li(scratch, Operand::Zero());
lfdx(result, MemOperand(kConstantPoolRegister, scratch));
#else
lfd(result, MemOperand(kConstantPoolRegister, 0));
#endif
return;
}
#endif
// avoid gcc strict aliasing error using union cast
union {
double dval;

View File

@ -102,9 +102,7 @@ class MacroAssembler : public Assembler {
MacroAssembler(Isolate* isolate, void* buffer, int size);
// Returns the size of a call in instructions. Note, the value returned is
// only valid as long as no entries are added to the constant pool between
// checking the call size and emitting the actual call.
// Returns the size of a call in instructions.
static int CallSize(Register target);
int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
static int CallSizeNotPredictableCodeSize(Address target,
@ -684,6 +682,11 @@ class MacroAssembler : public Assembler {
// ---------------------------------------------------------------------------
// Support functions.
// Machine code version of Map::GetConstructor().
// |temp| holds |result|'s map when done, and |temp2| its instance type.
void GetMapConstructor(Register result, Register map, Register temp,
Register temp2);
// Try to get function prototype of a function and puts the value in
// the result register. Checks that the function really is a
// function and jumps to the miss label if the fast checks fail. The
@ -1361,7 +1364,7 @@ class MacroAssembler : public Assembler {
// ---------------------------------------------------------------------------
// Patching helpers.
// Retrieve/patch the relocated value (lis/ori pair or constant pool load).
// Retrieve/patch the relocated value (lis/ori pair).
void GetRelocatedValue(Register location, Register result, Register scratch);
void SetRelocatedValue(Register location, Register scratch,
Register new_value);
@ -1485,18 +1488,14 @@ class MacroAssembler : public Assembler {
// it. See the implementation for register usage.
void JumpToHandlerEntry();
static const RegList kSafepointSavedRegisters;
static const int kNumSafepointSavedRegisters;
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
#if V8_OOL_CONSTANT_POOL
// Loads the constant pool pointer (kConstantPoolRegister).
enum CodeObjectAccessMethod { CAN_USE_IP, CONSTRUCT_INTERNAL_REFERENCE };
void LoadConstantPoolPointerRegister(CodeObjectAccessMethod access_method,
int ip_code_entry_delta = 0);
#endif
bool generating_stub_;
bool has_frame_;
// This handle will be patched with the code object on installation.

View File

@ -401,5 +401,20 @@
['arch == ppc64', {
#issue 2857
'test-log/EquivalenceOfLoggingAndTraversal' : [SKIP],
#test has assumption that does not hold for larger PPC page sizes
'test-heap/FirstPageFitsStartup' : [SKIP],
}], # 'arch == ppc64'
##############################################################################
['arch == ppc and simulator_run == True or arch == ppc64 and simulator_run == True', {
# Pass but take too long with the simulator.
'test-api/Threading1': [SKIP],
'test-api/Threading2': [SKIP],
'test-api/ExternalArrays': [SKIP],
# isses to be investigated
'test-run-machops/RunWord64EqualInBranchP': [SKIP],
}], # 'arch == ppc64 and simulator_run == True'
]

View File

@ -588,4 +588,15 @@
# Deopt every n garbage collections collides with deopt every n times.
'regress/regress-2653': [SKIP],
}], # 'deopt_fuzzer == True'
##############################################################################
['arch == ppc and simulator_run == True or arch == ppc64 and simulator_run == True', {
# take too long with the simulator.
'regress/regress-1132': [SKIP],
'asm/embenchen/box2d': [SKIP],
# issues to be investigate4d
'es6/collections': [SKIP],
}], # 'arch == ppc and simulator_run == True'
]