Merge from experimental code generator branch to bleeding edge.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@1389 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
kmillikin@chromium.org 2009-02-27 13:00:32 +00:00
parent d77a0468cb
commit f4735247cf
48 changed files with 11045 additions and 3202 deletions

View File

@ -39,25 +39,32 @@ SOURCES = {
'bootstrapper.cc', 'builtins.cc', 'checks.cc', 'code-stubs.cc',
'codegen.cc', 'compilation-cache.cc', 'compiler.cc', 'contexts.cc',
'conversions.cc', 'counters.cc', 'dateparser.cc', 'debug.cc',
'disassembler.cc', 'execution.cc', 'factory.cc', 'flags.cc', 'frames.cc',
'global-handles.cc', 'handles.cc', 'hashmap.cc', 'heap.cc', 'ic.cc',
'interpreter-irregexp.cc', 'jsregexp.cc', 'log.cc', 'mark-compact.cc',
'messages.cc', 'objects.cc', 'parser.cc', 'property.cc',
'regexp-macro-assembler.cc', 'regexp-macro-assembler-irregexp.cc',
'regexp-stack.cc', 'rewriter.cc', 'runtime.cc', 'scanner.cc',
'disassembler.cc', 'execution.cc', 'factory.cc', 'flags.cc',
'frames.cc', 'global-handles.cc', 'handles.cc', 'hashmap.cc', 'heap.cc',
'ic.cc', 'interpreter-irregexp.cc', 'jsregexp.cc', 'jump-target.cc',
'log.cc', 'mark-compact.cc', 'messages.cc', 'objects.cc', 'parser.cc',
'property.cc', 'regexp-macro-assembler.cc',
'regexp-macro-assembler-irregexp.cc', 'regexp-stack.cc',
'register-allocator.cc', 'rewriter.cc', 'runtime.cc', 'scanner.cc',
'scopeinfo.cc', 'scopes.cc', 'serialize.cc', 'snapshot-common.cc',
'spaces.cc', 'string-stream.cc', 'stub-cache.cc', 'token.cc', 'top.cc',
'unicode.cc', 'usage-analyzer.cc', 'utils.cc', 'v8-counters.cc', 'v8.cc',
'v8threads.cc', 'variables.cc', 'zone.cc'
'unicode.cc', 'usage-analyzer.cc', 'utils.cc', 'v8-counters.cc',
'v8.cc', 'v8threads.cc', 'variables.cc', 'virtual-frame.cc', 'zone.cc'
],
'arch:arm': [
'assembler-arm.cc', 'builtins-arm.cc', 'codegen-arm.cc', 'cpu-arm.cc',
'disasm-arm.cc', 'debug-arm.cc', 'frames-arm.cc', 'ic-arm.cc',
'jump-target-arm.cc', 'macro-assembler-arm.cc',
'regexp-macro-assembler-arm.cc', 'register-allocator-arm.cc',
'stub-cache-arm.cc', 'virtual-frame-arm.cc'
],
'arch:ia32': [
'assembler-ia32.cc', 'builtins-ia32.cc', 'codegen-ia32.cc',
'cpu-ia32.cc', 'disasm-ia32.cc', 'debug-ia32.cc', 'frames-ia32.cc',
'ic-ia32.cc', 'jump-target-ia32.cc', 'macro-assembler-ia32.cc',
'regexp-macro-assembler-ia32.cc', 'register-allocator-ia32.cc',
'stub-cache-ia32.cc', 'virtual-frame-ia32.cc'
],
'arch:arm': ['assembler-arm.cc', 'builtins-arm.cc', 'codegen-arm.cc',
'cpu-arm.cc', 'debug-arm.cc', 'disasm-arm.cc', 'frames-arm.cc',
'ic-arm.cc', 'macro-assembler-arm.cc', 'regexp-macro-assembler-arm.cc',
'stub-cache-arm.cc'],
'arch:ia32': ['assembler-ia32.cc', 'builtins-ia32.cc', 'codegen-ia32.cc',
'cpu-ia32.cc', 'debug-ia32.cc', 'disasm-ia32.cc', 'frames-ia32.cc',
'ic-ia32.cc', 'macro-assembler-ia32.cc', 'regexp-macro-assembler-ia32.cc',
'stub-cache-ia32.cc'],
'simulator:arm': ['simulator-arm.cc'],
'os:freebsd': ['platform-freebsd.cc'],
'os:linux': ['platform-linux.cc'],

View File

@ -83,6 +83,8 @@ struct Register {
};
const int kNumRegisters = 16;
extern Register no_reg;
extern Register r0;
extern Register r1;
@ -211,6 +213,12 @@ inline Condition ReverseCondition(Condition cc) {
}
// Branch hints are not used on the ARM. They are defined so that they can
// appear in shared function signatures, but will be ignored in ARM
// implementations.
enum Hint { no_hint };
// The pc store offset may be 8 or 12 depending on the processor implementation.
int PcStoreOffset();

View File

@ -751,6 +751,18 @@ void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
}
void Assembler::xchg(Register dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
if (src.is(eax) || dst.is(eax)) { // Single-byte encoding
EMIT(0x90 | (src.is(eax) ? dst.code() : src.code()));
} else {
EMIT(0x87);
EMIT(0xC0 | src.code() << 3 | dst.code());
}
}
void Assembler::adc(Register dst, int32_t imm32) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@ -1847,6 +1859,16 @@ void Assembler::sahf() {
}
void Assembler::setcc(Condition cc, Register reg) {
ASSERT(reg.is_byte_register());
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x0F);
EMIT(0x90 | cc);
EMIT(0xC0 | reg.code());
}
void Assembler::cvttss2si(Register dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
EnsureSpace ensure_space(this);

View File

@ -63,6 +63,8 @@ namespace v8 { namespace internal {
struct Register {
bool is_valid() const { return 0 <= code_ && code_ < 8; }
bool is(Register reg) const { return code_ == reg.code_; }
// eax, ebx, ecx and edx are byte registers, the rest are not.
bool is_byte_register() const { return code_ <= 3; }
int code() const {
ASSERT(is_valid());
return code_;
@ -76,6 +78,8 @@ struct Register {
int code_;
};
const int kNumRegisters = 8;
extern Register eax;
extern Register ecx;
extern Register edx;
@ -174,6 +178,15 @@ enum Hint {
};
// The result of negating a hint is as if the corresponding condition
// were negated by NegateCondition. That is, no_hint is mapped to
// itself and not_taken and taken are mapped to each other.
inline Hint NegateHint(Hint hint) {
return (hint == no_hint)
? no_hint
: ((hint == not_taken) ? taken : not_taken);
}
// -----------------------------------------------------------------------------
// Machine instruction Immediates
@ -494,6 +507,9 @@ class Assembler : public Malloced {
void cmov(Condition cc, Register dst, Handle<Object> handle);
void cmov(Condition cc, Register dst, const Operand& src);
// Exchange two registers
void xchg(Register dst, Register src);
// Arithmetics
void adc(Register dst, int32_t imm32);
void adc(Register dst, const Operand& src);
@ -674,6 +690,7 @@ class Assembler : public Malloced {
void frndint();
void sahf();
void setcc(Condition cc, Register reg);
void cpuid();

View File

@ -48,7 +48,7 @@ namespace v8 { namespace internal {
// unknown pc location. Assembler::bind() is used to bind a label to the
// current pc. A label can be bound only once.
class Label : public ZoneObject { // LabelShadows are dynamically allocated.
class Label BASE_EMBEDDED {
public:
INLINE(Label()) { Unuse(); }
INLINE(~Label()) { ASSERT(!is_linked()); }
@ -84,58 +84,11 @@ class Label : public ZoneObject { // LabelShadows are dynamically allocated.
friend class Assembler;
friend class RegexpAssembler;
friend class Displacement;
friend class LabelShadow;
friend class ShadowTarget;
friend class RegExpMacroAssemblerIrregexp;
};
// A LabelShadow represents a label that is temporarily shadowed by another
// label (represented by the original label during shadowing). They are used
// to catch jumps to labels in certain contexts, e.g. try blocks. After
// shadowing ends, the formerly shadowed label is again represented by the
// original label and the LabelShadow can be used as a label in its own
// right, representing the formerly shadowing label.
class LabelShadow : public Label {
public:
explicit LabelShadow(Label* original) {
ASSERT(original != NULL);
original_label_ = original;
original_pos_ = original->pos_;
original->Unuse();
#ifdef DEBUG
is_shadowing_ = true;
#endif
}
~LabelShadow() {
ASSERT(!is_shadowing_);
}
void StopShadowing() {
ASSERT(is_shadowing_ && is_unused());
pos_ = original_label_->pos_;
original_label_->pos_ = original_pos_;
#ifdef DEBUG
is_shadowing_ = false;
#endif
}
Label* original_label() const { return original_label_; }
private:
// During shadowing, the currently shadowing label. After shadowing, the
// label that was shadowed.
Label* original_label_;
// During shadowing, the saved state of the original label.
int original_pos_;
#ifdef DEBUG
bool is_shadowing_;
#endif
};
// -----------------------------------------------------------------------------
// Relocation information

View File

@ -148,13 +148,13 @@ ObjectLiteral::Property::Property(bool is_getter, FunctionLiteral* value) {
}
void LabelCollector::AddLabel(Label* label) {
void TargetCollector::AddTarget(JumpTarget* target) {
// Add the label to the collector, but discard duplicates.
int length = labels_->length();
int length = targets_->length();
for (int i = 0; i < length; i++) {
if (labels_->at(i) == label) return;
if (targets_->at(i) == target) return;
}
labels_->Add(label);
targets_->Add(target);
}

View File

@ -35,6 +35,7 @@
#include "variables.h"
#include "macro-assembler.h"
#include "jsregexp.h"
#include "jump-target.h"
namespace v8 { namespace internal {
@ -92,6 +93,9 @@ namespace v8 { namespace internal {
V(ThisFunction)
// Forward declarations
class TargetCollector;
#define DEF_FORWARD_DECLARATION(type) class type;
NODE_LIST(DEF_FORWARD_DECLARATION)
#undef DEF_FORWARD_DECLARATION
@ -118,7 +122,7 @@ class Node: public ZoneObject {
virtual VariableProxy* AsVariableProxy() { return NULL; }
virtual Property* AsProperty() { return NULL; }
virtual Call* AsCall() { return NULL; }
virtual LabelCollector* AsLabelCollector() { return NULL; }
virtual TargetCollector* AsTargetCollector() { return NULL; }
virtual BreakableStatement* AsBreakableStatement() { return NULL; }
virtual IterationStatement* AsIterationStatement() { return NULL; }
virtual UnaryOperation* AsUnaryOperation() { return NULL; }
@ -192,7 +196,7 @@ class BreakableStatement: public Statement {
virtual BreakableStatement* AsBreakableStatement() { return this; }
// Code generation
Label* break_target() { return &break_target_; }
JumpTarget* break_target() { return &break_target_; }
// Used during code generation for restoring the stack when a
// break/continue crosses a statement that keeps stuff on the stack.
@ -211,7 +215,7 @@ class BreakableStatement: public Statement {
private:
ZoneStringList* labels_;
Type type_;
Label break_target_;
JumpTarget break_target_;
int break_stack_height_;
};
@ -268,7 +272,7 @@ class IterationStatement: public BreakableStatement {
Statement* body() const { return body_; }
// Code generation
Label* continue_target() { return &continue_target_; }
JumpTarget* continue_target() { return &continue_target_; }
protected:
explicit IterationStatement(ZoneStringList* labels)
@ -280,7 +284,7 @@ class IterationStatement: public BreakableStatement {
private:
Statement* body_;
Label continue_target_;
JumpTarget continue_target_;
};
@ -503,43 +507,45 @@ class IfStatement: public Statement {
};
// NOTE: LabelCollectors are represented as nodes to fit in the target
// NOTE: TargetCollectors are represented as nodes to fit in the target
// stack in the compiler; this should probably be reworked.
class LabelCollector: public Node {
class TargetCollector: public Node {
public:
explicit LabelCollector(ZoneList<Label*>* labels) : labels_(labels) { }
explicit TargetCollector(ZoneList<JumpTarget*>* targets)
: targets_(targets) {
}
// Adds a label to the collector. The collector stores a pointer not
// a copy of the label to make binding work, so make sure not to
// pass in references to something on the stack.
void AddLabel(Label* label);
// Adds a jump target to the collector. The collector stores a pointer not
// a copy of the target to make binding work, so make sure not to pass in
// references to something on the stack.
void AddTarget(JumpTarget* target);
// Virtual behaviour. LabelCollectors are never part of the AST.
// Virtual behaviour. TargetCollectors are never part of the AST.
virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
virtual LabelCollector* AsLabelCollector() { return this; }
virtual TargetCollector* AsTargetCollector() { return this; }
ZoneList<Label*>* labels() { return labels_; }
ZoneList<JumpTarget*>* targets() { return targets_; }
private:
ZoneList<Label*>* labels_;
ZoneList<JumpTarget*>* targets_;
};
class TryStatement: public Statement {
public:
explicit TryStatement(Block* try_block)
: try_block_(try_block), escaping_labels_(NULL) { }
: try_block_(try_block), escaping_targets_(NULL) { }
void set_escaping_labels(ZoneList<Label*>* labels) {
escaping_labels_ = labels;
void set_escaping_targets(ZoneList<JumpTarget*>* targets) {
escaping_targets_ = targets;
}
Block* try_block() const { return try_block_; }
ZoneList<Label*>* escaping_labels() const { return escaping_labels_; }
ZoneList<JumpTarget*>* escaping_targets() const { return escaping_targets_; }
private:
Block* try_block_;
ZoneList<Label*>* escaping_labels_;
ZoneList<JumpTarget*>* escaping_targets_;
};

File diff suppressed because it is too large Load Diff

View File

@ -42,57 +42,6 @@ enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
// -------------------------------------------------------------------------
// Virtual frame
class VirtualFrame BASE_EMBEDDED {
public:
explicit VirtualFrame(CodeGenerator* cgen);
void Enter();
void Exit();
void AllocateLocals();
MemOperand Top() const { return MemOperand(sp, 0); }
MemOperand Element(int index) const {
return MemOperand(sp, index * kPointerSize);
}
MemOperand Local(int index) const {
ASSERT(0 <= index && index < frame_local_count_);
return MemOperand(fp, kLocal0Offset - index * kPointerSize);
}
MemOperand Function() const { return MemOperand(fp, kFunctionOffset); }
MemOperand Context() const { return MemOperand(fp, kContextOffset); }
MemOperand Parameter(int index) const {
// Index -1 corresponds to the receiver.
ASSERT(-1 <= index && index <= parameter_count_);
return MemOperand(fp, (1 + parameter_count_ - index) * kPointerSize);
}
inline void Drop(int count);
inline void Pop();
inline void Pop(Register reg);
inline void Push(Register reg);
private:
static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
static const int kContextOffset = StandardFrameConstants::kContextOffset;
MacroAssembler* masm_;
int frame_local_count_;
int parameter_count_;
};
// -------------------------------------------------------------------------
// Reference support
@ -132,6 +81,11 @@ class Reference BASE_EMBEDDED {
// the expression stack, and it is left in place with its value above it.
void GetValue(TypeofState typeof_state);
// Generate code to push the value of a reference on top of the expression
// stack and then spill the stack frame. This function is used temporarily
// while the code generator is being transformed.
inline void GetValueAndSpill(TypeofState typeof_state);
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
// on the expression stack. The stored value is left in place (with the
@ -164,22 +118,22 @@ class CodeGenState BASE_EMBEDDED {
// labels.
CodeGenState(CodeGenerator* owner,
TypeofState typeof_state,
Label* true_target,
Label* false_target);
JumpTarget* true_target,
JumpTarget* false_target);
// Destroy a code generator state and restore the owning code generator's
// previous state.
~CodeGenState();
TypeofState typeof_state() const { return typeof_state_; }
Label* true_target() const { return true_target_; }
Label* false_target() const { return false_target_; }
JumpTarget* true_target() const { return true_target_; }
JumpTarget* false_target() const { return false_target_; }
private:
CodeGenerator* owner_;
TypeofState typeof_state_;
Label* true_target_;
Label* false_target_;
JumpTarget* true_target_;
JumpTarget* false_target_;
CodeGenState* previous_;
};
@ -213,11 +167,26 @@ class CodeGenerator: public AstVisitor {
VirtualFrame* frame() const { return frame_; }
bool has_valid_frame() const { return frame_ != NULL; }
// Set the virtual frame to be new_frame, with non-frame register
// reference counts given by non_frame_registers. The non-frame
// register reference counts of the old frame are returned in
// non_frame_registers.
void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
void DeleteFrame();
RegisterAllocator* allocator() const { return allocator_; }
CodeGenState* state() { return state_; }
void set_state(CodeGenState* state) { state_ = state; }
void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
bool in_spilled_code() const { return in_spilled_code_; }
void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
private:
// Construction/Destruction
CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
@ -233,16 +202,44 @@ class CodeGenerator: public AstVisitor {
// State
bool has_cc() const { return cc_reg_ != al; }
TypeofState typeof_state() const { return state_->typeof_state(); }
Label* true_target() const { return state_->true_target(); }
Label* false_target() const { return state_->false_target(); }
JumpTarget* true_target() const { return state_->true_target(); }
JumpTarget* false_target() const { return state_->false_target(); }
// Node visitors.
void VisitStatements(ZoneList<Statement*>* statements);
#define DEF_VISIT(type) \
void Visit##type(type* node);
NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
// Visit a statement and then spill the virtual frame if control flow can
// reach the end of the statement (ie, it does not exit via break,
// continue, return, or throw). This function is used temporarily while
// the code generator is being transformed.
void VisitAndSpill(Statement* statement) {
ASSERT(in_spilled_code());
set_in_spilled_code(false);
Visit(statement);
if (frame_ != NULL) {
frame_->SpillAll();
}
set_in_spilled_code(true);
}
// Visit a list of statements and then spill the virtual frame if control
// flow can reach the end of the list.
void VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
ASSERT(in_spilled_code());
set_in_spilled_code(false);
VisitStatements(statements);
if (frame_ != NULL) {
frame_->SpillAll();
}
set_in_spilled_code(true);
}
// Main code generation function
void GenCode(FunctionLiteral* fun);
@ -259,7 +256,7 @@ class CodeGenerator: public AstVisitor {
MemOperand ContextSlotOperandCheckExtensions(Slot* slot,
Register tmp,
Register tmp2,
Label* slow);
JumpTarget* slow);
// Expressions
MemOperand GlobalObject() const {
@ -268,20 +265,50 @@ class CodeGenerator: public AstVisitor {
void LoadCondition(Expression* x,
TypeofState typeof_state,
Label* true_target,
Label* false_target,
JumpTarget* true_target,
JumpTarget* false_target,
bool force_cc);
void Load(Expression* x, TypeofState typeof_state = NOT_INSIDE_TYPEOF);
void LoadGlobal();
void LoadGlobalReceiver(Register scratch);
// Generate code to push the value of an expression on top of the frame
// and then spill the frame fully to memory. This function is used
// temporarily while the code generator is being transformed.
void LoadAndSpill(Expression* expression,
TypeofState typeof_state = NOT_INSIDE_TYPEOF) {
ASSERT(in_spilled_code());
set_in_spilled_code(false);
Load(expression, typeof_state);
frame_->SpillAll();
set_in_spilled_code(true);
}
// Call LoadCondition and then spill the virtual frame unless control flow
// cannot reach the end of the expression (ie, by emitting only
// unconditional jumps to the control targets).
void LoadConditionAndSpill(Expression* expression,
TypeofState typeof_state,
JumpTarget* true_target,
JumpTarget* false_target,
bool force_control) {
ASSERT(in_spilled_code());
set_in_spilled_code(false);
LoadCondition(expression, typeof_state, true_target, false_target,
force_control);
if (frame_ != NULL) {
frame_->SpillAll();
}
set_in_spilled_code(true);
}
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
void LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
Register tmp,
Register tmp2,
Label* slow);
JumpTarget* slow);
// Special code for typeof expressions: Unfortunately, we must
// be careful when loading the expression in 'typeof'
@ -291,7 +318,7 @@ class CodeGenerator: public AstVisitor {
// through the context chain.
void LoadTypeofExpression(Expression* x);
void ToBoolean(Label* true_target, Label* false_target);
void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
void GenericBinaryOperation(Token::Value op);
void Comparison(Condition cc, bool strict = false);
@ -301,7 +328,7 @@ class CodeGenerator: public AstVisitor {
void CallWithArguments(ZoneList<Expression*>* arguments, int position);
// Control flow
void Branch(bool if_true, Label* L);
void Branch(bool if_true, JumpTarget* target);
void CheckStack();
void CleanStack(int num_bytes);
@ -371,14 +398,15 @@ class CodeGenerator: public AstVisitor {
void GenerateFastCaseSwitchJumpTable(SwitchStatement* node,
int min_index,
int range,
Label* fail_label,
Label* default_label,
Vector<Label*> case_targets,
Vector<Label> case_labels);
// Generate the code for cases for the fast case switch.
// Called by GenerateFastCaseSwitchJumpTable.
void GenerateFastCaseSwitchCases(SwitchStatement* node,
Vector<Label> case_labels);
Vector<Label> case_labels,
VirtualFrame* start_frame);
// Fast support for constant-Smi switches.
void GenerateFastCaseSwitchStatement(SwitchStatement* node,
@ -395,10 +423,21 @@ class CodeGenerator: public AstVisitor {
// Methods used to indicate which source code is generated for. Source
// positions are collected by the assembler and emitted with the relocation
// information.
void CodeForStatement(Node* node);
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForStatementPosition(Node* node);
void CodeForSourcePosition(int pos);
// Is the given jump target the actual (ie, non-shadowed) function return
// target?
bool IsActualFunctionReturn(JumpTarget* target);
#ifdef DEBUG
// True if the registers are valid for entry to a block.
bool HasValidEntryRegisters();
#endif
bool is_eval_; // Tells whether code is generated for eval.
Handle<Script> script_;
List<DeferredCode*> deferred_;
@ -408,20 +447,42 @@ class CodeGenerator: public AstVisitor {
// Code generation state
Scope* scope_;
VirtualFrame* frame_;
RegisterAllocator* allocator_;
Condition cc_reg_;
CodeGenState* state_;
bool is_inside_try_;
int break_stack_height_;
// Labels
Label function_return_;
// Jump targets
JumpTarget function_return_;
// True if the function return is shadowed (ie, jumping to the target
// function_return_ does not jump to the true function return, but rather
// to some unlinking code).
bool function_return_is_shadowed_;
// True when we are in code that expects the virtual frame to be fully
// spilled. Some virtual frame function are disabled in DEBUG builds when
// called from spilled code, because they do not leave the virtual frame
// in a spilled state.
bool in_spilled_code_;
friend class VirtualFrame;
friend class JumpTarget;
friend class Reference;
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
void Reference::GetValueAndSpill(TypeofState typeof_state) {
ASSERT(cgen_->in_spilled_code());
cgen_->set_in_spilled_code(false);
GetValue(typeof_state);
cgen_->frame()->SpillAll();
cgen_->set_in_spilled_code(true);
}
} } // namespace v8::internal
#endif // V8_CODEGEN_ARM_H_

File diff suppressed because it is too large Load Diff

View File

@ -29,6 +29,7 @@
#define V8_CODEGEN_IA32_H_
#include "scopes.h"
#include "register-allocator.h"
namespace v8 { namespace internal {
@ -42,61 +43,6 @@ enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
// -------------------------------------------------------------------------
// Virtual frame
class VirtualFrame BASE_EMBEDDED {
public:
explicit VirtualFrame(CodeGenerator* cgen);
void Enter();
void Exit();
void AllocateLocals();
Operand Top() const { return Operand(esp, 0); }
Operand Element(int index) const {
return Operand(esp, index * kPointerSize);
}
Operand Local(int index) const {
ASSERT(0 <= index && index < frame_local_count_);
return Operand(ebp, kLocal0Offset - index * kPointerSize);
}
Operand Function() const { return Operand(ebp, kFunctionOffset); }
Operand Context() const { return Operand(ebp, kContextOffset); }
Operand Parameter(int index) const {
ASSERT(-1 <= index && index < parameter_count_);
return Operand(ebp, (1 + parameter_count_ - index) * kPointerSize);
}
Operand Receiver() const { return Parameter(-1); }
inline void Drop(int count);
inline void Pop();
inline void Pop(Register reg);
inline void Pop(Operand operand);
inline void Push(Register reg);
inline void Push(Operand operand);
inline void Push(Immediate immediate);
private:
static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
static const int kContextOffset = StandardFrameConstants::kContextOffset;
MacroAssembler* masm_;
int frame_local_count_;
int parameter_count_;
};
// -------------------------------------------------------------------------
// Reference support
@ -136,6 +82,16 @@ class Reference BASE_EMBEDDED {
// the expression stack, and it is left in place with its value above it.
void GetValue(TypeofState typeof_state);
// Generate code to push the value of a reference on top of the expression
// stack and then spill the stack frame. This function is used temporarily
// while the code generator is being transformed.
inline void GetValueAndSpill(TypeofState typeof_state);
// Like GetValue except that the slot is expected to be written to before
// being read from again. Thae value of the reference may be invalidated,
// causing subsequent attempts to read it to fail.
void TakeValue(TypeofState typeof_state);
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
// on the expression stack. The stored value is left in place (with the
@ -149,13 +105,142 @@ class Reference BASE_EMBEDDED {
};
// -------------------------------------------------------------------------
// Control destinations.
// A control destination encapsulates a pair of jump targets and a
// flag indicating which one is the preferred fall-through. The
// preferred fall-through must be unbound, the other may be already
// bound (ie, a backward target).
//
// The true and false targets may be jumped to unconditionally or
// control may split conditionally. Unconditional jumping and
// splitting should be emitted in tail position (as the last thing
// when compiling an expression) because they can cause either label
// to be bound or the non-fall through to be jumped to leaving an
// invalid virtual frame.
//
// The labels in the control destination can be extracted and
// manipulated normally without affecting the state of the
// destination.
class ControlDestination BASE_EMBEDDED {
public:
ControlDestination(JumpTarget* true_target,
JumpTarget* false_target,
bool true_is_fall_through)
: true_target_(true_target),
false_target_(false_target),
true_is_fall_through_(true_is_fall_through),
is_used_(false) {
ASSERT(true_is_fall_through ? !true_target->is_bound()
: !false_target->is_bound());
}
// Accessors for the jump targets. Directly jumping or branching to
// or binding the targets will not update the destination's state.
JumpTarget* true_target() const { return true_target_; }
JumpTarget* false_target() const { return false_target_; }
// True if the the destination has been jumped to unconditionally or
// control has been split to both targets. This predicate does not
// test whether the targets have been extracted and manipulated as
// raw jump targets.
bool is_used() const { return is_used_; }
// True if the destination is used and the true target (respectively
// false target) was the fall through. If the target is backward,
// "fall through" included jumping unconditionally to it.
bool true_was_fall_through() const {
return is_used_ && true_is_fall_through_;
}
bool false_was_fall_through() const {
return is_used_ && !true_is_fall_through_;
}
// Emit a branch to one of the true or false targets, and bind the
// other target. Because this binds the fall-through target, it
// should be emitted in tail position (as the last thing when
// compiling an expression).
void Split(Condition cc) {
ASSERT(!is_used_);
if (true_is_fall_through_) {
false_target_->Branch(NegateCondition(cc));
true_target_->Bind();
} else {
true_target_->Branch(cc);
false_target_->Bind();
}
is_used_ = true;
}
// Emit an unconditional jump in tail position, to the true target
// (if the argument is true) or the false target. The "jump" will
// actually bind the jump target if it is forward, jump to it if it
// is backward.
void Goto(bool where) {
ASSERT(!is_used_);
JumpTarget* target = where ? true_target_ : false_target_;
if (target->is_bound()) {
target->Jump();
} else {
target->Bind();
}
is_used_ = true;
true_is_fall_through_ = where;
}
// Mark this jump target as used as if Goto had been called, but
// without generating a jump or binding a label (the control effect
// should have already happened). This is used when the left
// subexpression of the short-circuit boolean operators are
// compiled.
void Use(bool where) {
ASSERT(!is_used_);
ASSERT((where ? true_target_ : false_target_)->is_bound());
is_used_ = true;
true_is_fall_through_ = where;
}
// Swap the true and false targets but keep the same actual label as
// the fall through. This is used when compiling negated
// expressions, where we want to swap the targets but preserve the
// state.
void Invert() {
JumpTarget* temp_target = true_target_;
true_target_ = false_target_;
false_target_ = temp_target;
true_is_fall_through_ = !true_is_fall_through_;
}
private:
// True and false jump targets.
JumpTarget* true_target_;
JumpTarget* false_target_;
// Before using the destination: true if the true target is the
// preferred fall through, false if the false target is. After
// using the destination: true if the true target was actually used
// as the fall through, false if the false target was.
bool true_is_fall_through_;
// True if the Split or Goto functions have been called.
bool is_used_;
};
// -------------------------------------------------------------------------
// Code generation state
// The state is passed down the AST by the code generator (and back up, in
// the form of the state of the label pair). It is threaded through the
// call stack. Constructing a state implicitly pushes it on the owning code
// generator's stack of states, and destroying one implicitly pops it.
// the form of the state of the jump target pair). It is threaded through
// the call stack. Constructing a state implicitly pushes it on the owning
// code generator's stack of states, and destroying one implicitly pops it.
//
// The code generator state is only used for expressions, so statements have
// the initial state.
class CodeGenState BASE_EMBEDDED {
public:
@ -164,26 +249,34 @@ class CodeGenState BASE_EMBEDDED {
explicit CodeGenState(CodeGenerator* owner);
// Create a code generator state based on a code generator's current
// state. The new state has its own access type and pair of branch
// labels, and no reference.
// state. The new state may or may not be inside a typeof, and has its
// own control destination.
CodeGenState(CodeGenerator* owner,
TypeofState typeof_state,
Label* true_target,
Label* false_target);
ControlDestination* destination);
// Destroy a code generator state and restore the owning code generator's
// previous state.
~CodeGenState();
// Accessors for the state.
TypeofState typeof_state() const { return typeof_state_; }
Label* true_target() const { return true_target_; }
Label* false_target() const { return false_target_; }
ControlDestination* destination() const { return destination_; }
private:
// The owning code generator.
CodeGenerator* owner_;
// A flag indicating whether we are compiling the immediate subexpression
// of a typeof expression.
TypeofState typeof_state_;
Label* true_target_;
Label* false_target_;
// A control destination in case the expression has a control-flow
// effect.
ControlDestination* destination_;
// The previous state of the owning code generator, restored when
// this state is destroyed.
CodeGenState* previous_;
};
@ -219,11 +312,26 @@ class CodeGenerator: public AstVisitor {
VirtualFrame* frame() const { return frame_; }
bool has_valid_frame() const { return frame_ != NULL; }
// Set the virtual frame to be new_frame, with non-frame register
// reference counts given by non_frame_registers. The non-frame
// register reference counts of the old frame are returned in
// non_frame_registers.
void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
void DeleteFrame();
RegisterAllocator* allocator() const { return allocator_; }
CodeGenState* state() { return state_; }
void set_state(CodeGenState* state) { state_ = state; }
void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
bool in_spilled_code() const { return in_spilled_code_; }
void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
private:
// Construction/Destruction
CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
@ -237,10 +345,8 @@ class CodeGenerator: public AstVisitor {
bool is_eval() { return is_eval_; }
// State
bool has_cc() const { return cc_reg_ >= 0; }
TypeofState typeof_state() const { return state_->typeof_state(); }
Label* true_target() const { return state_->true_target(); }
Label* false_target() const { return state_->false_target(); }
ControlDestination* destination() const { return state_->destination(); }
// Track loop nesting level.
int loop_nesting() const { return loop_nesting_; }
@ -249,14 +355,48 @@ class CodeGenerator: public AstVisitor {
// Node visitors.
void VisitStatements(ZoneList<Statement*>* statements);
#define DEF_VISIT(type) \
void Visit##type(type* node);
NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
// Visit a statement and then spill the virtual frame if control flow can
// reach the end of the statement (ie, it does not exit via break,
// continue, return, or throw). This function is used temporarily while
// the code generator is being transformed.
void VisitAndSpill(Statement* statement) {
ASSERT(in_spilled_code());
set_in_spilled_code(false);
Visit(statement);
if (frame_ != NULL) {
frame_->SpillAll();
}
set_in_spilled_code(true);
}
// Visit a list of statements and then spill the virtual frame if control
// flow can reach the end of the list.
void VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
ASSERT(in_spilled_code());
set_in_spilled_code(false);
VisitStatements(statements);
if (frame_ != NULL) {
frame_->SpillAll();
}
set_in_spilled_code(true);
}
// Main code generation function
void GenCode(FunctionLiteral* fun);
// Generate the return sequence code. Should be called no more than once
// per compiled function (it binds the return target, which can not be
// done more than once). The return value is assumed to be in eax by the
// code generated.
void GenerateReturnSequence();
// The following are used by class Reference.
void LoadReference(Reference* ref);
void UnloadReference(Reference* ref);
@ -268,8 +408,8 @@ class CodeGenerator: public AstVisitor {
Operand SlotOperand(Slot* slot, Register tmp);
Operand ContextSlotOperandCheckExtensions(Slot* slot,
Register tmp,
Label* slow);
Result tmp,
JumpTarget* slow);
// Expressions
Operand GlobalObject() const {
@ -278,19 +418,49 @@ class CodeGenerator: public AstVisitor {
void LoadCondition(Expression* x,
TypeofState typeof_state,
Label* true_target,
Label* false_target,
bool force_cc);
ControlDestination* destination,
bool force_control);
void Load(Expression* x, TypeofState typeof_state = NOT_INSIDE_TYPEOF);
void LoadGlobal();
void LoadGlobalReceiver(Register scratch);
void LoadGlobalReceiver();
// Generate code to push the value of an expression on top of the frame
// and then spill the frame fully to memory. This function is used
// temporarily while the code generator is being transformed.
void LoadAndSpill(Expression* expression,
TypeofState typeof_state = NOT_INSIDE_TYPEOF) {
ASSERT(in_spilled_code());
set_in_spilled_code(false);
Load(expression, typeof_state);
frame_->SpillAll();
set_in_spilled_code(true);
}
// Call LoadCondition and then spill the virtual frame unless control flow
// cannot reach the end of the expression (ie, by emitting only
// unconditional jumps to the control targets).
void LoadConditionAndSpill(Expression* expression,
TypeofState typeof_state,
ControlDestination* destination,
bool force_control) {
ASSERT(in_spilled_code());
set_in_spilled_code(false);
LoadCondition(expression, typeof_state, destination, force_control);
if (frame_ != NULL) {
frame_->SpillAll();
}
set_in_spilled_code(true);
}
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
void LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
Register tmp,
Label* slow);
Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
JumpTarget* slow);
// Store the value on top of the expression stack into a slot, leaving the
// value in place.
void StoreToSlot(Slot* slot, InitState init_state);
// Special code for typeof expressions: Unfortunately, we must
// be careful when loading the expression in 'typeof'
@ -300,19 +470,28 @@ class CodeGenerator: public AstVisitor {
// through the context chain.
void LoadTypeofExpression(Expression* x);
void ToBoolean(Label* true_target, Label* false_target);
// Translate the value on top of the frame into control flow to the
// control destination.
void ToBoolean(ControlDestination* destination);
void GenericBinaryOperation(Token::Value op,
StaticType* type,
const OverwriteMode overwrite_mode = NO_OVERWRITE);
void Comparison(Condition cc, bool strict = false);
void Comparison(Condition cc,
bool strict,
ControlDestination* destination);
// Inline small integer literals. To prevent long attacker-controlled byte
// sequences, we only inline small Smis.
// To prevent long attacker-controlled byte sequences, integer constants
// from the JavaScript source are loaded in two parts if they are larger
// than 16 bits.
static const int kMaxSmiInlinedBits = 16;
bool IsUnsafeSmi(Handle<Object> value);
// Load an integer constant x into a register target using
// at most 16 bits of user-controlled data per assembly operation.
void LoadUnsafeSmi(Register target, Handle<Object> value);
bool IsInlineSmi(Literal* literal);
void SmiComparison(Condition cc, Handle<Object> value, bool strict = false);
void SmiOperation(Token::Value op,
StaticType* type,
Handle<Object> value,
@ -321,8 +500,6 @@ class CodeGenerator: public AstVisitor {
void CallWithArguments(ZoneList<Expression*>* arguments, int position);
// Control flow
void Branch(bool if_true, Label* L);
void CheckStack();
void CleanStack(int num_bytes);
@ -400,7 +577,8 @@ class CodeGenerator: public AstVisitor {
// Generate the code for cases for the fast case switch.
// Called by GenerateFastCaseSwitchJumpTable.
void GenerateFastCaseSwitchCases(SwitchStatement* node,
Vector<Label> case_labels);
Vector<Label> case_labels,
VirtualFrame* start_frame);
// Fast support for constant-Smi switches.
void GenerateFastCaseSwitchStatement(SwitchStatement* node,
@ -416,10 +594,18 @@ class CodeGenerator: public AstVisitor {
// Methods used to indicate which source code is generated for. Source
// positions are collected by the assembler and emitted with the relocation
// information.
void CodeForStatement(Node* node);
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForStatementPosition(Node* node);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
// True if the registers are valid for entry to a block. There should be
// no frame-external references to eax, ebx, ecx, edx, or edi.
bool HasValidEntryRegisters();
#endif
bool is_eval_; // Tells whether code is generated for eval.
Handle<Script> script_;
List<DeferredCode*> deferred_;
@ -429,22 +615,44 @@ class CodeGenerator: public AstVisitor {
// Code generation state
Scope* scope_;
VirtualFrame* frame_;
Condition cc_reg_;
RegisterAllocator* allocator_;
CodeGenState* state_;
bool is_inside_try_;
int break_stack_height_;
int loop_nesting_;
// Labels
Label function_return_;
// Jump targets.
// The target of the return from the function.
JumpTarget function_return_;
// True if the function return is shadowed (ie, jumping to the target
// function_return_ does not jump to the true function return, but rather
// to some unlinking code).
bool function_return_is_shadowed_;
// True when we are in code that expects the virtual frame to be fully
// spilled. Some virtual frame function are disabled in DEBUG builds when
// called from spilled code, because they do not leave the virtual frame
// in a spilled state.
bool in_spilled_code_;
friend class VirtualFrame;
friend class JumpTarget;
friend class Reference;
friend class Result;
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
void Reference::GetValueAndSpill(TypeofState typeof_state) {
ASSERT(cgen_->in_spilled_code());
cgen_->set_in_spilled_code(false);
GetValue(typeof_state);
cgen_->frame()->SpillAll();
cgen_->set_in_spilled_code(true);
}
} } // namespace v8::internal
#endif // V8_CODEGEN_IA32_H_

View File

@ -38,8 +38,10 @@
namespace v8 { namespace internal {
DeferredCode::DeferredCode(CodeGenerator* generator)
: masm_(generator->masm()),
generator_(generator),
: generator_(generator),
masm_(generator->masm()),
enter_(generator),
exit_(generator, JumpTarget::BIDIRECTIONAL),
statement_position_(masm_->current_statement_position()),
position_(masm_->current_position()) {
generator->AddDeferred(this);
@ -60,13 +62,39 @@ void CodeGenerator::ProcessDeferred() {
if (code->position() != RelocInfo::kNoPosition) {
masm->RecordPosition(code->position());
}
// Bind labels and generate the code.
masm->bind(code->enter());
// Generate the code.
Comment cmnt(masm, code->comment());
code->Generate();
if (code->exit()->is_bound()) {
masm->jmp(code->exit()); // platform independent?
}
ASSERT(code->enter()->is_bound());
}
}
void CodeGenerator::SetFrame(VirtualFrame* new_frame,
RegisterFile* non_frame_registers) {
RegisterFile saved_counts;
if (has_valid_frame()) {
frame_->DetachFromCodeGenerator();
// The remaining register reference counts are the non-frame ones.
allocator_->SaveTo(&saved_counts);
}
if (new_frame != NULL) {
// Restore the non-frame register references that go with the new frame.
allocator_->RestoreFrom(non_frame_registers);
new_frame->AttachToCodeGenerator();
}
frame_ = new_frame;
saved_counts.CopyTo(non_frame_registers);
}
void CodeGenerator::DeleteFrame() {
if (has_valid_frame()) {
frame_->DetachFromCodeGenerator();
delete frame_;
frame_ = NULL;
}
}
@ -122,9 +150,6 @@ Handle<Code> CodeGenerator::MakeCode(FunctionLiteral* flit,
return Handle<Code>::null();
}
// Process any deferred code.
cgen.ProcessDeferred();
// Allocate and install the code.
CodeDesc desc;
cgen.masm()->GetCode(&desc);
@ -386,14 +411,14 @@ void CodeGenerator::GenerateFastCaseSwitchStatement(SwitchStatement* node,
ZoneList<CaseClause*>* cases = node->cases();
int length = cases->length();
// Label pointer per number in range
// Label pointer per number in range.
SmartPointer<Label*> case_targets(NewArray<Label*>(range));
// Label per switch case
// Label per switch case.
SmartPointer<Label> case_labels(NewArray<Label>(length));
Label* fail_label = default_index >= 0 ? &(case_labels[default_index])
: node->break_target();
Label* fail_label =
default_index >= 0 ? &(case_labels[default_index]) : NULL;
// Populate array of label pointers for each number in the range.
// Initally put the failure label everywhere.
@ -404,7 +429,7 @@ void CodeGenerator::GenerateFastCaseSwitchStatement(SwitchStatement* node,
// Overwrite with label of a case for the number value of that case.
// (In reverse order, so that if the same label occurs twice, the
// first one wins).
for (int i = length-1; i >= 0 ; i--) {
for (int i = length - 1; i >= 0 ; i--) {
CaseClause* clause = cases->at(i);
if (!clause->is_default()) {
Object* label_value = *(clause->label()->AsLiteral()->handle());
@ -424,21 +449,36 @@ void CodeGenerator::GenerateFastCaseSwitchStatement(SwitchStatement* node,
void CodeGenerator::GenerateFastCaseSwitchCases(
SwitchStatement* node,
Vector<Label> case_labels) {
Vector<Label> case_labels,
VirtualFrame* start_frame) {
ZoneList<CaseClause*>* cases = node->cases();
int length = cases->length();
for (int i = 0; i < length; i++) {
Comment cmnt(masm(), "[ Case clause");
masm()->bind(&(case_labels[i]));
// We may not have a virtual frame if control flow did not fall
// off the end of the previous case. In that case, use the start
// frame. Otherwise, we have to merge the existing one to the
// start frame as part of the previous case.
if (!has_valid_frame()) {
RegisterFile non_frame_registers = RegisterAllocator::Reserved();
SetFrame(new VirtualFrame(start_frame), &non_frame_registers);
} else {
frame_->MergeTo(start_frame);
}
masm()->bind(&case_labels[i]);
VisitStatements(cases->at(i)->statements());
}
masm()->bind(node->break_target());
}
bool CodeGenerator::TryGenerateFastCaseSwitchStatement(SwitchStatement* node) {
// TODO(238): Due to issue 238, fast case switches can crash on ARM
// and possibly IA32. They are disabled for now.
// See http://code.google.com/p/v8/issues/detail?id=238
return false;
ZoneList<CaseClause*>* cases = node->cases();
int length = cases->length();
@ -454,9 +494,10 @@ bool CodeGenerator::TryGenerateFastCaseSwitchStatement(SwitchStatement* node) {
CaseClause* clause = cases->at(i);
if (clause->is_default()) {
if (default_index >= 0) {
return false; // More than one default label:
// Defer to normal case for error.
}
// There is more than one default label. Defer to the normal case
// for error.
return false;
}
default_index = i;
} else {
Expression* label = clause->label();
@ -468,9 +509,9 @@ bool CodeGenerator::TryGenerateFastCaseSwitchStatement(SwitchStatement* node) {
if (!value->IsSmi()) {
return false;
}
int smi = Smi::cast(value)->value();
if (smi < min_index) { min_index = smi; }
if (smi > max_index) { max_index = smi; }
int int_value = Smi::cast(value)->value();
min_index = Min(int_value, min_index);
max_index = Max(int_value, max_index);
}
}
@ -486,7 +527,18 @@ bool CodeGenerator::TryGenerateFastCaseSwitchStatement(SwitchStatement* node) {
}
void CodeGenerator::CodeForStatement(Node* node) {
void CodeGenerator::CodeForFunctionPosition(FunctionLiteral* fun) {
if (FLAG_debug_info) {
int pos = fun->start_position();
if (pos != RelocInfo::kNoPosition) {
masm()->RecordStatementPosition(pos);
masm()->RecordPosition(pos);
}
}
}
void CodeGenerator::CodeForStatementPosition(Node* node) {
if (FLAG_debug_info) {
int pos = node->statement_pos();
if (pos != RelocInfo::kNoPosition) {

View File

@ -37,8 +37,15 @@
// of Visitor and that the following methods are available publicly:
// CodeGenerator::MakeCode
// CodeGenerator::SetFunctionInfo
// CodeGenerator::AddDeferred
// CodeGenerator::masm
// CodeGenerator::frame
// CodeGenerator::has_valid_frame
// CodeGenerator::SetFrame
// CodeGenerator::DeleteFrame
// CodeGenerator::allocator
// CodeGenerator::AddDeferred
// CodeGenerator::in_spilled_code
// CodeGenerator::set_in_spilled_code
//
// These methods are either used privately by the shared code or implemented as
// shared code:
@ -88,8 +95,12 @@ class DeferredCode: public ZoneObject {
MacroAssembler* masm() const { return masm_; }
CodeGenerator* generator() const { return generator_; }
Label* enter() { return &enter_; }
Label* exit() { return &exit_; }
JumpTarget* enter() { return &enter_; }
void BindExit() { exit_.Bind(0); }
void BindExit(Result* result) { exit_.Bind(result, 1); }
void BindExit(Result* result0, Result* result1, Result* result2) {
exit_.Bind(result0, result1, result2, 3);
}
int statement_position() const { return statement_position_; }
int position() const { return position_; }
@ -103,15 +114,12 @@ class DeferredCode: public ZoneObject {
#endif
protected:
// The masm_ field is manipulated when compiling stubs with the
// BEGIN_STUB and END_STUB macros. For that reason, it cannot be
// constant.
MacroAssembler* masm_;
CodeGenerator* const generator_;
MacroAssembler* const masm_;
JumpTarget enter_;
JumpTarget exit_;
private:
CodeGenerator* const generator_;
Label enter_;
Label exit_;
int statement_position_;
int position_;
#ifdef DEBUG

View File

@ -829,6 +829,12 @@ const char* NameConverter::NameOfCPURegister(int reg) const {
}
const char* NameConverter::NameOfByteCPURegister(int reg) const {
UNREACHABLE(); // ARM does not have the concept of a byte register
return "nobytereg";
}
const char* NameConverter::NameOfXMMRegister(int reg) const {
UNREACHABLE(); // ARM does not have any XMM registers
return "noxmmreg";

View File

@ -65,6 +65,7 @@ static ByteMnemonic two_operands_instr[] = {
{0x85, "test", REG_OPER_OP_ORDER},
{0x31, "xor", OPER_REG_OP_ORDER},
{0x33, "xor", REG_OPER_OP_ORDER},
{0x87, "xchg", REG_OPER_OP_ORDER},
{0x8A, "mov_b", REG_OPER_OP_ORDER},
{0x8B, "mov", REG_OPER_OP_ORDER},
{-1, "", UNSET_OP_ORDER}
@ -115,6 +116,14 @@ static const char* jump_conditional_mnem[] = {
};
static const char* set_conditional_mnem[] = {
/*0*/ "seto", "setno", "setc", "setnc",
/*4*/ "setz", "setnz", "setna", "seta",
/*8*/ "sets", "setns", "setpe", "setpo",
/*12*/ "setl", "setnl", "setng", "setg"
};
enum InstructionType {
NO_INSTR,
ZERO_OPERANDS_INSTR,
@ -177,6 +186,7 @@ void InstructionTable::Init() {
SetTableRange(REGISTER_INSTR, 0x48, 0x4F, "dec");
SetTableRange(REGISTER_INSTR, 0x50, 0x57, "push");
SetTableRange(REGISTER_INSTR, 0x58, 0x5F, "pop");
SetTableRange(REGISTER_INSTR, 0x91, 0x97, "xchg eax,"); // 0x90 is nop.
SetTableRange(MOVE_REG_INSTR, 0xB8, 0xBF, "mov");
}
@ -259,6 +269,11 @@ class DisassemblerIA32 {
}
const char* NameOfByteCPURegister(int reg) const {
return converter_.NameOfByteCPURegister(reg);
}
const char* NameOfXMMRegister(int reg) const {
return converter_.NameOfXMMRegister(reg);
}
@ -283,8 +298,11 @@ class DisassemblerIA32 {
*base = data & 7;
}
typedef const char* (DisassemblerIA32::*RegisterNameMapping)(int reg) const;
int PrintRightOperandHelper(byte* modrmp, RegisterNameMapping register_name);
int PrintRightOperand(byte* modrmp);
int PrintRightByteOperand(byte* modrmp);
int PrintOperands(const char* mnem, OperandOrder op_order, byte* data);
int PrintImmediateOp(byte* data);
int F7Instruction(byte* data);
@ -292,6 +310,7 @@ class DisassemblerIA32 {
int JumpShort(byte* data);
int JumpConditional(byte* data, const char* comment);
int JumpConditionalShort(byte* data, const char* comment);
int SetCC(byte* data);
int FPUInstruction(byte* data);
void AppendToBuffer(const char* format, ...);
@ -315,10 +334,9 @@ void DisassemblerIA32::AppendToBuffer(const char* format, ...) {
tmp_buffer_pos_ += result;
}
// Returns number of bytes used including the current *modrmp.
// Writes instruction's right operand to 'tmp_buffer_'.
int DisassemblerIA32::PrintRightOperand(byte* modrmp) {
int DisassemblerIA32::PrintRightOperandHelper(
byte* modrmp,
RegisterNameMapping register_name) {
int mod, regop, rm;
get_modrm(*modrmp, &mod, &regop, &rm);
switch (mod) {
@ -332,20 +350,20 @@ int DisassemblerIA32::PrintRightOperand(byte* modrmp) {
int scale, index, base;
get_sib(sib, &scale, &index, &base);
if (index == esp && base == esp && scale == 0 /*times_1*/) {
AppendToBuffer("[%s]", NameOfCPURegister(rm));
AppendToBuffer("[%s]", (this->*register_name)(rm));
return 2;
} else if (base == ebp) {
int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
AppendToBuffer("[%s*%d+0x%x]",
NameOfCPURegister(index),
(this->*register_name)(index),
1 << scale,
disp);
return 6;
} else if (index != esp && base != ebp) {
// [base+index*scale]
AppendToBuffer("[%s+%s*%d]",
NameOfCPURegister(base),
NameOfCPURegister(index),
(this->*register_name)(base),
(this->*register_name)(index),
1 << scale);
return 2;
} else {
@ -353,7 +371,7 @@ int DisassemblerIA32::PrintRightOperand(byte* modrmp) {
return 1;
}
} else {
AppendToBuffer("[%s]", NameOfCPURegister(rm));
AppendToBuffer("[%s]", (this->*register_name)(rm));
return 1;
}
break;
@ -366,11 +384,11 @@ int DisassemblerIA32::PrintRightOperand(byte* modrmp) {
int disp =
mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 2) : *(modrmp + 2);
if (index == base && index == rm /*esp*/ && scale == 0 /*times_1*/) {
AppendToBuffer("[%s+0x%x]", NameOfCPURegister(rm), disp);
AppendToBuffer("[%s+0x%x]", (this->*register_name)(rm), disp);
} else {
AppendToBuffer("[%s+%s*%d+0x%x]",
NameOfCPURegister(base),
NameOfCPURegister(index),
(this->*register_name)(base),
(this->*register_name)(index),
1 << scale,
disp);
}
@ -379,12 +397,12 @@ int DisassemblerIA32::PrintRightOperand(byte* modrmp) {
// No sib.
int disp =
mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 1) : *(modrmp + 1);
AppendToBuffer("[%s+0x%x]", NameOfCPURegister(rm), disp);
AppendToBuffer("[%s+0x%x]", (this->*register_name)(rm), disp);
return mod == 2 ? 5 : 2;
}
break;
case 3:
AppendToBuffer("%s", NameOfCPURegister(rm));
AppendToBuffer("%s", (this->*register_name)(rm));
return 1;
default:
UnimplementedInstruction();
@ -394,6 +412,17 @@ int DisassemblerIA32::PrintRightOperand(byte* modrmp) {
}
int DisassemblerIA32::PrintRightOperand(byte* modrmp) {
return PrintRightOperandHelper(modrmp, &DisassemblerIA32::NameOfCPURegister);
}
int DisassemblerIA32::PrintRightByteOperand(byte* modrmp) {
return PrintRightOperandHelper(modrmp,
&DisassemblerIA32::NameOfByteCPURegister);
}
// Returns number of bytes used including the current *data.
// Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'.
int DisassemblerIA32::PrintOperands(const char* mnem,
@ -574,6 +603,17 @@ int DisassemblerIA32::JumpConditionalShort(byte* data, const char* comment) {
}
// Returns number of bytes used, including *data.
int DisassemblerIA32::SetCC(byte* data) {
assert(*data == 0x0F);
byte cond = *(data+1) & 0x0F;
const char* mnem = set_conditional_mnem[cond];
AppendToBuffer("%s ", mnem);
PrintRightByteOperand(data+2);
return 3; // includes 0x0F
}
// Returns number of bytes used, including *data.
int DisassemblerIA32::FPUInstruction(byte* data) {
byte b1 = *data;
@ -819,6 +859,8 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
f0byte == 0xB7 || f0byte == 0xAF) {
data += 2;
data += PrintOperands(f0mnem, REG_OPER_OP_ORDER, data);
} else if ((f0byte & 0xF0) == 0x90) {
data += SetCC(data);
} else {
data += 2;
if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) {
@ -1054,12 +1096,17 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
static const char* cpu_regs[8] = {
"eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi",
"eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi"
};
static const char* byte_cpu_regs[8] = {
"al", "cl", "dl", "bl", "ah", "ch", "dh", "bh"
};
static const char* xmm_regs[8] = {
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
};
@ -1081,6 +1128,12 @@ const char* NameConverter::NameOfCPURegister(int reg) const {
}
const char* NameConverter::NameOfByteCPURegister(int reg) const {
if (0 <= reg && reg < 8) return byte_cpu_regs[reg];
return "noreg";
}
const char* NameConverter::NameOfXMMRegister(int reg) const {
if (0 <= reg && reg < 8) return xmm_regs[reg];
return "noxmmreg";

View File

@ -39,6 +39,7 @@ class NameConverter {
public:
virtual ~NameConverter() {}
virtual const char* NameOfCPURegister(int reg) const;
virtual const char* NameOfByteCPURegister(int reg) const;
virtual const char* NameOfXMMRegister(int reg) const;
virtual const char* NameOfAddress(byte* addr) const;
virtual const char* NameOfConstant(byte* addr) const;

View File

@ -99,7 +99,7 @@ static void DumpBuffer(FILE* f, char* buff) {
}
}
static const int kOutBufferSize = 256 + String::kMaxShortPrintLength;
static const int kOutBufferSize = 1024 + String::kMaxShortPrintLength;
static const int kRelocInfoPosition = 57;
static int DecodeIt(FILE* f,

View File

@ -170,7 +170,6 @@ class IterationStatement;
class JSArray;
class JSFunction;
class JSObject;
class LabelCollector;
class LargeObjectSpace;
template <typename T, class P = FreeStoreAllocationPolicy> class List;
class LookupResult;

View File

@ -739,15 +739,15 @@ void KeyedLoadIC::PatchInlinedMapCheck(Address address, Object* value) {
// The keyed load has a fast inlined case if the IC call instruction
// is immediately followed by a test instruction.
if (*test_instruction_address == kTestEaxByte) {
// Fetch the offset from the call instruction to the map cmp
// Fetch the offset from the test instruction to the map cmp
// instruction. This offset is stored in the last 4 bytes of the
// 5 byte test instruction.
Address offset_address = test_instruction_address + 1;
int offset_value = *(reinterpret_cast<int*>(offset_address));
// Compute the map address. The operand-immediate compare
// instruction is two bytes larger than a call instruction so we
// add 2 to get to the map address.
Address map_address = address + offset_value + 2;
// Compute the map address. The map address is in the last 4
// bytes of the 7-byte operand-immediate compare instruction, so
// we add 3 to the offset to get the map address.
Address map_address = test_instruction_address + offset_value + 3;
// patch the map check.
(*(reinterpret_cast<Object**>(map_address))) = value;
}

258
src/jump-target-arm.cc Normal file
View File

@ -0,0 +1,258 @@
// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen.h"
#include "jump-target.h"
namespace v8 { namespace internal {
// -------------------------------------------------------------------------
// JumpTarget implementation.
#define __ masm_->
void JumpTarget::Jump() {
ASSERT(cgen_ != NULL);
ASSERT(cgen_->has_valid_frame());
// Live non-frame registers are not allowed at unconditional jumps
// because we have no way of invalidating the corresponding results
// which are still live in the C++ code.
ASSERT(cgen_->HasValidEntryRegisters());
if (is_bound()) {
// Backward jump. There is an expected frame to merge to.
ASSERT(direction_ == BIDIRECTIONAL);
cgen_->frame()->MergeTo(entry_frame_);
cgen_->DeleteFrame();
__ jmp(&entry_label_);
} else {
// Forward jump. The current frame is added to the end of the list
// of frames reaching the target block and a jump to the merge code
// is emitted.
AddReachingFrame(cgen_->frame());
RegisterFile empty;
cgen_->SetFrame(NULL, &empty);
__ jmp(&merge_labels_.last());
}
is_linked_ = !is_bound_;
}
void JumpTarget::Branch(Condition cc, Hint ignored) {
ASSERT(cgen_ != NULL);
ASSERT(cgen_->has_valid_frame());
if (is_bound()) {
// Backward branch. We have an expected frame to merge to on the
// backward edge. We negate the condition and emit the merge code
// here.
//
// TODO(210): we should try to avoid negating the condition in the
// case where there is no merge code to emit. Otherwise, we emit
// a branch around an unconditional jump.
ASSERT(direction_ == BIDIRECTIONAL);
Label original_fall_through;
__ b(NegateCondition(cc), &original_fall_through);
// Swap the current frame for a copy of it, saving non-frame
// register reference counts and invalidating all non-frame register
// references except the reserved ones on the backward edge.
VirtualFrame* original_frame = cgen_->frame();
VirtualFrame* working_frame = new VirtualFrame(original_frame);
RegisterFile non_frame_registers = RegisterAllocator::Reserved();
cgen_->SetFrame(working_frame, &non_frame_registers);
working_frame->MergeTo(entry_frame_);
cgen_->DeleteFrame();
__ jmp(&entry_label_);
// Restore the frame and its associated non-frame registers.
cgen_->SetFrame(original_frame, &non_frame_registers);
__ bind(&original_fall_through);
} else {
// Forward branch. A copy of the current frame is added to the end
// of the list of frames reaching the target block and a branch to
// the merge code is emitted.
AddReachingFrame(new VirtualFrame(cgen_->frame()));
__ b(cc, &merge_labels_.last());
}
is_linked_ = !is_bound_;
}
void JumpTarget::Call() {
// Call is used to push the address of the catch block on the stack as
// a return address when compiling try/catch and try/finally. We
// fully spill the frame before making the call. The expected frame
// at the label (which should be the only one) is the spilled current
// frame plus an in-memory return address. The "fall-through" frame
// at the return site is the spilled current frame.
ASSERT(cgen_ != NULL);
ASSERT(cgen_->has_valid_frame());
// There are no non-frame references across the call.
ASSERT(cgen_->HasValidEntryRegisters());
ASSERT(!is_linked());
cgen_->frame()->SpillAll();
VirtualFrame* target_frame = new VirtualFrame(cgen_->frame());
target_frame->Adjust(1);
AddReachingFrame(target_frame);
__ bl(&merge_labels_.last());
is_linked_ = !is_bound_;
}
void JumpTarget::Bind(int mergable_elements) {
ASSERT(cgen_ != NULL);
ASSERT(!is_bound());
// Live non-frame registers are not allowed at the start of a basic
// block.
ASSERT(!cgen_->has_valid_frame() || cgen_->HasValidEntryRegisters());
// Compute the frame to use for entry to the block.
ComputeEntryFrame(mergable_elements);
if (is_linked()) {
// There were forward jumps. Handle merging the reaching frames
// and possible fall through to the entry frame.
// Some moves required to merge to an expected frame require
// purely frame state changes, and do not require any code
// generation. Perform those first to increase the possibility of
// finding equal frames below.
if (cgen_->has_valid_frame()) {
cgen_->frame()->PrepareMergeTo(entry_frame_);
}
for (int i = 0; i < reaching_frames_.length(); i++) {
reaching_frames_[i]->PrepareMergeTo(entry_frame_);
}
// If there is a fall through to the jump target and it needs
// merge code, process it first.
if (cgen_->has_valid_frame() && !cgen_->frame()->Equals(entry_frame_)) {
// Loop over all the reaching frames, looking for any that can
// share merge code with this one.
for (int i = 0; i < reaching_frames_.length(); i++) {
if (cgen_->frame()->Equals(reaching_frames_[i])) {
// Set the reaching frames element to null to avoid
// processing it later, and then bind its entry label.
delete reaching_frames_[i];
reaching_frames_[i] = NULL;
__ bind(&merge_labels_[i]);
}
}
// Emit the merge code.
cgen_->frame()->MergeTo(entry_frame_);
}
// Loop over the (non-null) reaching frames and process any that
// need merge code.
for (int i = 0; i < reaching_frames_.length(); i++) {
VirtualFrame* frame = reaching_frames_[i];
if (frame != NULL && !frame->Equals(entry_frame_)) {
// Set the reaching frames element to null to avoid processing
// it later. Do not delete it as it is needed for merging.
reaching_frames_[i] = NULL;
// If the code generator has a current frame (a fall-through
// or a previously merged frame), insert a jump around the
// merge code we are about to generate.
if (cgen_->has_valid_frame()) {
cgen_->DeleteFrame();
__ jmp(&entry_label_);
}
// Set the frame to merge as the code generator's current
// frame and bind its merge label.
RegisterFile reserved_registers = RegisterAllocator::Reserved();
cgen_->SetFrame(frame, &reserved_registers);
__ bind(&merge_labels_[i]);
// Loop over the remaining (non-null) reaching frames, looking
// for any that can share merge code with this one.
for (int j = i + 1; j < reaching_frames_.length(); j++) {
VirtualFrame* other = reaching_frames_[j];
if (other != NULL && frame->Equals(other)) {
delete other;
reaching_frames_[j] = NULL;
__ bind(&merge_labels_[j]);
}
}
// Emit the merge code.
cgen_->frame()->MergeTo(entry_frame_);
}
}
// The code generator may not have a current frame if there was no
// fall through and none of the reaching frames needed merging.
// In that case, clone the entry frame as the current frame.
if (!cgen_->has_valid_frame()) {
RegisterFile reserved_registers = RegisterAllocator::Reserved();
cgen_->SetFrame(new VirtualFrame(entry_frame_), &reserved_registers);
}
// There is certainly a current frame equal to the entry frame.
// Bind the entry frame label.
__ bind(&entry_label_);
// There may be unprocessed reaching frames that did not need
// merge code. Bind their merge labels to be the same as the
// entry label.
for (int i = 0; i < reaching_frames_.length(); i++) {
if (reaching_frames_[i] != NULL) {
delete reaching_frames_[i];
__ bind(&merge_labels_[i]);
}
}
// All the reaching frames except the one that is the current
// frame (if it is one of the reaching frames) have been deleted.
reaching_frames_.Clear();
merge_labels_.Clear();
} else {
// There were no forward jumps. The current frame is merged to
// the entry frame.
cgen_->frame()->MergeTo(entry_frame_);
__ bind(&entry_label_);
}
is_linked_ = false;
is_bound_ = true;
}
#undef __
} } // namespace v8::internal

258
src/jump-target-ia32.cc Normal file
View File

@ -0,0 +1,258 @@
// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen.h"
#include "jump-target.h"
namespace v8 { namespace internal {
// -------------------------------------------------------------------------
// JumpTarget implementation.
#define __ masm_->
void JumpTarget::Jump() {
ASSERT(cgen_ != NULL);
ASSERT(cgen_->has_valid_frame());
// Live non-frame registers are not allowed at unconditional jumps
// because we have no way of invalidating the corresponding results
// which are still live in the C++ code.
ASSERT(cgen_->HasValidEntryRegisters());
if (is_bound()) {
// Backward jump. There is an expected frame to merge to.
ASSERT(direction_ == BIDIRECTIONAL);
cgen_->frame()->MergeTo(entry_frame_);
cgen_->DeleteFrame();
__ jmp(&entry_label_);
} else {
// Forward jump. The current frame is added to the end of the list
// of frames reaching the target block and a jump to the merge code
// is emitted.
AddReachingFrame(cgen_->frame());
RegisterFile empty;
cgen_->SetFrame(NULL, &empty);
__ jmp(&merge_labels_.last());
}
is_linked_ = !is_bound_;
}
void JumpTarget::Branch(Condition cc, Hint hint) {
ASSERT(cgen_ != NULL);
ASSERT(cgen_->has_valid_frame());
if (is_bound()) {
// Backward branch. We have an expected frame to merge to on the
// backward edge. We negate the condition and emit the merge code
// here.
//
// TODO(210): we should try to avoid negating the condition in the
// case where there is no merge code to emit. Otherwise, we emit
// a branch around an unconditional jump.
ASSERT(direction_ == BIDIRECTIONAL);
Label original_fall_through;
__ j(NegateCondition(cc), &original_fall_through, NegateHint(hint));
// Swap the current frame for a copy of it, saving non-frame
// register reference counts and invalidating all non-frame register
// references except the reserved ones on the backward edge.
VirtualFrame* original_frame = cgen_->frame();
VirtualFrame* working_frame = new VirtualFrame(original_frame);
RegisterFile non_frame_registers = RegisterAllocator::Reserved();
cgen_->SetFrame(working_frame, &non_frame_registers);
working_frame->MergeTo(entry_frame_);
cgen_->DeleteFrame();
__ jmp(&entry_label_);
// Restore the frame and its associated non-frame registers.
cgen_->SetFrame(original_frame, &non_frame_registers);
__ bind(&original_fall_through);
} else {
// Forward branch. A copy of the current frame is added to the end
// of the list of frames reaching the target block and a branch to
// the merge code is emitted.
AddReachingFrame(new VirtualFrame(cgen_->frame()));
__ j(cc, &merge_labels_.last(), hint);
}
is_linked_ = !is_bound_;
}
void JumpTarget::Call() {
// Call is used to push the address of the catch block on the stack as
// a return address when compiling try/catch and try/finally. We
// fully spill the frame before making the call. The expected frame
// at the label (which should be the only one) is the spilled current
// frame plus an in-memory return address. The "fall-through" frame
// at the return site is the spilled current frame.
ASSERT(cgen_ != NULL);
ASSERT(cgen_->has_valid_frame());
// There are no non-frame references across the call.
ASSERT(cgen_->HasValidEntryRegisters());
ASSERT(!is_linked());
cgen_->frame()->SpillAll();
VirtualFrame* target_frame = new VirtualFrame(cgen_->frame());
target_frame->Adjust(1);
AddReachingFrame(target_frame);
__ call(&merge_labels_.last());
is_linked_ = !is_bound_;
}
void JumpTarget::Bind(int mergable_elements) {
ASSERT(cgen_ != NULL);
ASSERT(!is_bound());
// Live non-frame registers are not allowed at the start of a basic
// block.
ASSERT(!cgen_->has_valid_frame() || cgen_->HasValidEntryRegisters());
// Compute the frame to use for entry to the block.
ComputeEntryFrame(mergable_elements);
if (is_linked()) {
// There were forward jumps. Handle merging the reaching frames
// and possible fall through to the entry frame.
// Some moves required to merge to an expected frame require
// purely frame state changes, and do not require any code
// generation. Perform those first to increase the possibility of
// finding equal frames below.
if (cgen_->has_valid_frame()) {
cgen_->frame()->PrepareMergeTo(entry_frame_);
}
for (int i = 0; i < reaching_frames_.length(); i++) {
reaching_frames_[i]->PrepareMergeTo(entry_frame_);
}
// If there is a fall through to the jump target and it needs
// merge code, process it first.
if (cgen_->has_valid_frame() && !cgen_->frame()->Equals(entry_frame_)) {
// Loop over all the reaching frames, looking for any that can
// share merge code with this one.
for (int i = 0; i < reaching_frames_.length(); i++) {
if (cgen_->frame()->Equals(reaching_frames_[i])) {
// Set the reaching frames element to null to avoid
// processing it later, and then bind its entry label.
delete reaching_frames_[i];
reaching_frames_[i] = NULL;
__ bind(&merge_labels_[i]);
}
}
// Emit the merge code.
cgen_->frame()->MergeTo(entry_frame_);
}
// Loop over the (non-null) reaching frames and process any that
// need merge code.
for (int i = 0; i < reaching_frames_.length(); i++) {
VirtualFrame* frame = reaching_frames_[i];
if (frame != NULL && !frame->Equals(entry_frame_)) {
// Set the reaching frames element to null to avoid processing
// it later. Do not delete it as it is needed for merging.
reaching_frames_[i] = NULL;
// If the code generator has a current frame (a fall-through
// or a previously merged frame), insert a jump around the
// merge code we are about to generate.
if (cgen_->has_valid_frame()) {
cgen_->DeleteFrame();
__ jmp(&entry_label_);
}
// Set the frame to merge as the code generator's current
// frame and bind its merge label.
RegisterFile reserved_registers = RegisterAllocator::Reserved();
cgen_->SetFrame(frame, &reserved_registers);
__ bind(&merge_labels_[i]);
// Loop over the remaining (non-null) reaching frames, looking
// for any that can share merge code with this one.
for (int j = i + 1; j < reaching_frames_.length(); j++) {
VirtualFrame* other = reaching_frames_[j];
if (other != NULL && frame->Equals(other)) {
delete other;
reaching_frames_[j] = NULL;
__ bind(&merge_labels_[j]);
}
}
// Emit the merge code.
cgen_->frame()->MergeTo(entry_frame_);
}
}
// The code generator may not have a current frame if there was no
// fall through and none of the reaching frames needed merging.
// In that case, clone the entry frame as the current frame.
if (!cgen_->has_valid_frame()) {
RegisterFile reserved_registers = RegisterAllocator::Reserved();
cgen_->SetFrame(new VirtualFrame(entry_frame_), &reserved_registers);
}
// There is certainly a current frame equal to the entry frame.
// Bind the entry frame label.
__ bind(&entry_label_);
// There may be unprocessed reaching frames that did not need
// merge code. Bind their merge labels to be the same as the
// entry label.
for (int i = 0; i < reaching_frames_.length(); i++) {
if (reaching_frames_[i] != NULL) {
delete reaching_frames_[i];
__ bind(&merge_labels_[i]);
}
}
// All the reaching frames except the one that is the current
// frame (if it is one of the reaching frames) have been deleted.
reaching_frames_.Clear();
merge_labels_.Clear();
} else {
// There were no forward jumps. The current frame is merged to
// the entry frame.
cgen_->frame()->MergeTo(entry_frame_);
__ bind(&entry_label_);
}
is_linked_ = false;
is_bound_ = true;
}
#undef __
} } // namespace v8::internal

589
src/jump-target.cc Normal file
View File

@ -0,0 +1,589 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen.h"
#include "jump-target.h"
namespace v8 { namespace internal {
// -------------------------------------------------------------------------
// JumpTarget implementation.
JumpTarget::JumpTarget(CodeGenerator* cgen, Directionality direction)
: cgen_(cgen),
direction_(direction),
reaching_frames_(0),
merge_labels_(0),
entry_frame_(NULL),
is_bound_(false),
is_linked_(false) {
ASSERT(cgen_ != NULL);
masm_ = cgen_->masm();
}
JumpTarget::JumpTarget()
: cgen_(NULL),
masm_(NULL),
direction_(FORWARD_ONLY),
reaching_frames_(0),
merge_labels_(0),
entry_frame_(NULL),
is_bound_(false),
is_linked_(false) {
}
void JumpTarget::Initialize(CodeGenerator* cgen, Directionality direction) {
ASSERT(cgen != NULL);
ASSERT(cgen_ == NULL);
cgen_ = cgen;
masm_ = cgen->masm();
direction_ = direction;
}
void JumpTarget::Unuse() {
ASSERT(!is_linked());
entry_label_.Unuse();
delete entry_frame_;
entry_frame_ = NULL;
is_bound_ = false;
is_linked_ = false;
}
void JumpTarget::Reset() {
reaching_frames_.Clear();
merge_labels_.Clear();
entry_frame_ = NULL;
entry_label_.Unuse();
is_bound_ = false;
is_linked_ = false;
}
FrameElement* JumpTarget::Combine(FrameElement* left, FrameElement* right) {
// Given a pair of non-null frame element pointers, return one of
// them as an entry frame candidate or null if they are
// incompatible.
// If either is invalid, the result is.
if (!left->is_valid()) return left;
if (!right->is_valid()) return right;
// If they have the same value, the result is the same. (Exception:
// bidirectional frames cannot have constants or copies.) If either
// is unsynced, the result is.
if (left->is_memory() && right->is_memory()) return left;
if (left->is_register() && right->is_register() &&
left->reg().is(right->reg())) {
if (!left->is_synced()) {
return left;
} else {
return right;
}
}
if (direction_ == FORWARD_ONLY &&
left->is_constant() &&
right->is_constant() &&
left->handle().is_identical_to(right->handle())) {
if (!left->is_synced()) {
return left;
} else {
return right;
}
}
if (direction_ == FORWARD_ONLY &&
left->is_copy() &&
right->is_copy() &&
left->index() == right->index()) {
if (!left->is_synced()) {
return left;
} else {
return right;
}
}
// Otherwise they are incompatible and we will reallocate them.
return NULL;
}
void JumpTarget::ComputeEntryFrame(int mergable_elements) {
// Given: a collection of frames reaching by forward CFG edges
// (including the code generator's current frame) and the
// directionality of the block. Compute: an entry frame for the
// block.
// Choose an initial frame, either the code generator's current
// frame if there is one, or the first reaching frame if not.
VirtualFrame* initial_frame = cgen_->frame();
int start_index = 0; // Begin iteration with the 1st reaching frame.
if (initial_frame == NULL) {
initial_frame = reaching_frames_[0];
start_index = 1; // Begin iteration with the 2nd reaching frame.
}
// A list of pointers to frame elements in the entry frame. NULL
// indicates that the element has not yet been determined.
int length = initial_frame->elements_.length();
List<FrameElement*> elements(length);
// Convert the number of mergable elements (counted from the top
// down) to a frame high-water mark (counted from the bottom up).
// Elements strictly above the high-water index will be mergable in
// entry frames for bidirectional jump targets.
int high_water_mark = (mergable_elements == kAllElements)
? VirtualFrame::kIllegalIndex // All frame indices are above this.
: length - mergable_elements - 1; // Top index if m_e == 0.
// Initially populate the list of elements based on the initial
// frame.
for (int i = 0; i < length; i++) {
FrameElement element = initial_frame->elements_[i];
// We do not allow copies or constants in bidirectional frames.
if (direction_ == BIDIRECTIONAL &&
i > high_water_mark &&
(element.is_constant() || element.is_copy())) {
elements.Add(NULL);
} else {
elements.Add(&initial_frame->elements_[i]);
}
}
// Compute elements based on the other reaching frames.
if (start_index < reaching_frames_.length()) {
for (int i = 0; i < length; i++) {
for (int j = start_index; j < reaching_frames_.length(); j++) {
FrameElement* element = elements[i];
// Element computation is monotonic: new information will not
// change our decision about undetermined or invalid elements.
if (element == NULL || !element->is_valid()) break;
elements[i] = Combine(element, &reaching_frames_[j]->elements_[i]);
}
}
}
// Compute the registers already reserved by values in the frame.
// Count the reserved registers to avoid using them.
RegisterFile frame_registers = RegisterAllocator::Reserved();
for (int i = 0; i < length; i++) {
FrameElement* element = elements[i];
if (element != NULL && element->is_register()) {
frame_registers.Use(element->reg());
}
}
// Build the new frame. The frame already has memory elements for
// the parameters (including the receiver) and the return address.
// We will fill it up with memory elements.
entry_frame_ = new VirtualFrame(cgen_);
while (entry_frame_->elements_.length() < length) {
entry_frame_->elements_.Add(FrameElement::MemoryElement());
}
// Copy the already-determined frame elements to the entry frame,
// and allocate any still-undetermined frame elements to registers
// or memory, from the top down.
for (int i = length - 1; i >= 0; i--) {
if (elements[i] == NULL) {
// If the value is synced on all frames, put it in memory. This
// costs nothing at the merge code but will incur a
// memory-to-register move when the value is needed later.
bool is_synced = initial_frame->elements_[i].is_synced();
int j = start_index;
while (is_synced && j < reaching_frames_.length()) {
is_synced = reaching_frames_[j]->elements_[i].is_synced();
j++;
}
// There is nothing to be done if the elements are all synced.
// It is already recorded as a memory element.
if (is_synced) continue;
// Choose an available register. Prefer ones that the element
// is already occupying on some reaching frame.
RegisterFile candidate_registers;
int max_count = kMinInt;
int best_reg_code = no_reg.code_;
// Consider the initial frame.
FrameElement element = initial_frame->elements_[i];
if (element.is_register() &&
!frame_registers.is_used(element.reg())) {
candidate_registers.Use(element.reg());
max_count = 1;
best_reg_code = element.reg().code();
}
// Consider the other frames.
for (int j = start_index; j < reaching_frames_.length(); j++) {
element = reaching_frames_[j]->elements_[i];
if (element.is_register() &&
!frame_registers.is_used(element.reg())) {
candidate_registers.Use(element.reg());
if (candidate_registers.count(element.reg()) > max_count) {
max_count = candidate_registers.count(element.reg());
best_reg_code = element.reg().code();
}
}
}
// If there was no preferred choice consider any free register.
if (best_reg_code == no_reg.code_) {
for (int j = 0; j < kNumRegisters; j++) {
if (!frame_registers.is_used(j)) {
best_reg_code = j;
break;
}
}
}
// If there was a register choice, use it. If not do nothing
// (the element is already recorded as in memory)
if (best_reg_code != no_reg.code_) {
Register reg = { best_reg_code };
frame_registers.Use(reg);
entry_frame_->elements_[i] =
FrameElement::RegisterElement(reg,
FrameElement::NOT_SYNCED);
}
} else {
// The element is already determined.
entry_frame_->elements_[i] = *elements[i];
}
}
// Fill in the other fields of the entry frame.
entry_frame_->local_count_ = initial_frame->local_count_;
entry_frame_->frame_pointer_ = initial_frame->frame_pointer_;
// The stack pointer is at the highest synced element or the base of
// the expression stack.
int stack_pointer = length - 1;
while (stack_pointer >= entry_frame_->expression_base_index() &&
!entry_frame_->elements_[stack_pointer].is_synced()) {
stack_pointer--;
}
entry_frame_->stack_pointer_ = stack_pointer;
// Unuse the reserved registers---they do not actually appear in
// the entry frame.
RegisterAllocator::UnuseReserved(&frame_registers);
entry_frame_->frame_registers_ = frame_registers;
}
void JumpTarget::Jump(Result* arg) {
ASSERT(cgen_ != NULL);
ASSERT(cgen_->has_valid_frame());
cgen_->frame()->Push(arg);
Jump();
}
void JumpTarget::Jump(Result* arg0, Result* arg1) {
ASSERT(cgen_ != NULL);
ASSERT(cgen_->has_valid_frame());
cgen_->frame()->Push(arg0);
cgen_->frame()->Push(arg1);
Jump();
}
void JumpTarget::Jump(Result* arg0, Result* arg1, Result* arg2) {
ASSERT(cgen_ != NULL);
ASSERT(cgen_->has_valid_frame());
cgen_->frame()->Push(arg0);
cgen_->frame()->Push(arg1);
cgen_->frame()->Push(arg2);
Jump();
}
#ifdef DEBUG
#define DECLARE_ARGCHECK_VARS(name) \
Result::Type name##_type = name->type(); \
Register name##_reg = name->is_register() ? name->reg() : no_reg
#define ASSERT_ARGCHECK(name) \
ASSERT(name->type() == name##_type); \
ASSERT(!name->is_register() || name->reg().is(name##_reg))
#else
#define DECLARE_ARGCHECK_VARS(name) do {} while (false)
#define ASSERT_ARGCHECK(name) do {} while (false)
#endif
void JumpTarget::Branch(Condition cc, Result* arg, Hint hint) {
ASSERT(cgen_ != NULL);
ASSERT(cgen_->has_valid_frame());
// We want to check that non-frame registers at the call site stay in
// the same registers on the fall-through branch.
DECLARE_ARGCHECK_VARS(arg);
cgen_->frame()->Push(arg);
Branch(cc, hint);
*arg = cgen_->frame()->Pop();
ASSERT_ARGCHECK(arg);
}
void JumpTarget::Branch(Condition cc, Result* arg0, Result* arg1, Hint hint) {
ASSERT(cgen_ != NULL);
ASSERT(cgen_->frame() != NULL);
// We want to check that non-frame registers at the call site stay in
// the same registers on the fall-through branch.
DECLARE_ARGCHECK_VARS(arg0);
DECLARE_ARGCHECK_VARS(arg1);
cgen_->frame()->Push(arg0);
cgen_->frame()->Push(arg1);
Branch(cc, hint);
*arg1 = cgen_->frame()->Pop();
*arg0 = cgen_->frame()->Pop();
ASSERT_ARGCHECK(arg0);
ASSERT_ARGCHECK(arg1);
}
void JumpTarget::Branch(Condition cc,
Result* arg0,
Result* arg1,
Result* arg2,
Hint hint) {
ASSERT(cgen_ != NULL);
ASSERT(cgen_->frame() != NULL);
// We want to check that non-frame registers at the call site stay in
// the same registers on the fall-through branch.
DECLARE_ARGCHECK_VARS(arg0);
DECLARE_ARGCHECK_VARS(arg1);
DECLARE_ARGCHECK_VARS(arg2);
cgen_->frame()->Push(arg0);
cgen_->frame()->Push(arg1);
cgen_->frame()->Push(arg2);
Branch(cc, hint);
*arg2 = cgen_->frame()->Pop();
*arg1 = cgen_->frame()->Pop();
*arg0 = cgen_->frame()->Pop();
ASSERT_ARGCHECK(arg0);
ASSERT_ARGCHECK(arg1);
ASSERT_ARGCHECK(arg2);
}
void JumpTarget::Branch(Condition cc,
Result* arg0,
Result* arg1,
Result* arg2,
Result* arg3,
Hint hint) {
ASSERT(cgen_ != NULL);
ASSERT(cgen_->frame() != NULL);
// We want to check that non-frame registers at the call site stay in
// the same registers on the fall-through branch.
DECLARE_ARGCHECK_VARS(arg0);
DECLARE_ARGCHECK_VARS(arg1);
DECLARE_ARGCHECK_VARS(arg2);
DECLARE_ARGCHECK_VARS(arg3);
cgen_->frame()->Push(arg0);
cgen_->frame()->Push(arg1);
cgen_->frame()->Push(arg2);
cgen_->frame()->Push(arg3);
Branch(cc, hint);
*arg3 = cgen_->frame()->Pop();
*arg2 = cgen_->frame()->Pop();
*arg1 = cgen_->frame()->Pop();
*arg0 = cgen_->frame()->Pop();
ASSERT_ARGCHECK(arg0);
ASSERT_ARGCHECK(arg1);
ASSERT_ARGCHECK(arg2);
ASSERT_ARGCHECK(arg3);
}
#undef DECLARE_ARGCHECK_VARS
#undef ASSERT_ARGCHECK
void JumpTarget::Bind(Result* arg, int mergable_elements) {
ASSERT(cgen_ != NULL);
if (cgen_->has_valid_frame()) {
cgen_->frame()->Push(arg);
}
Bind(mergable_elements);
*arg = cgen_->frame()->Pop();
}
void JumpTarget::Bind(Result* arg0, Result* arg1, int mergable_elements) {
ASSERT(cgen_ != NULL);
if (cgen_->has_valid_frame()) {
cgen_->frame()->Push(arg0);
cgen_->frame()->Push(arg1);
}
Bind(mergable_elements);
*arg1 = cgen_->frame()->Pop();
*arg0 = cgen_->frame()->Pop();
}
void JumpTarget::Bind(Result* arg0,
Result* arg1,
Result* arg2,
int mergable_elements) {
ASSERT(cgen_ != NULL);
if (cgen_->has_valid_frame()) {
cgen_->frame()->Push(arg0);
cgen_->frame()->Push(arg1);
cgen_->frame()->Push(arg2);
}
Bind(mergable_elements);
*arg2 = cgen_->frame()->Pop();
*arg1 = cgen_->frame()->Pop();
*arg0 = cgen_->frame()->Pop();
}
void JumpTarget::Bind(Result* arg0,
Result* arg1,
Result* arg2,
Result* arg3,
int mergable_elements) {
ASSERT(cgen_ != NULL);
if (cgen_->has_valid_frame()) {
cgen_->frame()->Push(arg0);
cgen_->frame()->Push(arg1);
cgen_->frame()->Push(arg2);
cgen_->frame()->Push(arg3);
}
Bind(mergable_elements);
*arg3 = cgen_->frame()->Pop();
*arg2 = cgen_->frame()->Pop();
*arg1 = cgen_->frame()->Pop();
*arg0 = cgen_->frame()->Pop();
}
void JumpTarget::CopyTo(JumpTarget* destination) {
ASSERT(destination != NULL);
destination->cgen_ = cgen_;
destination->masm_ = masm_;
destination->direction_ = direction_;
destination->reaching_frames_.Clear();
destination->merge_labels_.Clear();
ASSERT(reaching_frames_.length() == merge_labels_.length());
for (int i = 0; i < reaching_frames_.length(); i++) {
destination->reaching_frames_.Add(reaching_frames_[i]);
destination->merge_labels_.Add(merge_labels_[i]);
}
destination->entry_frame_ = entry_frame_;
destination->entry_label_ = entry_label_;
destination->is_bound_ = is_bound_;
destination->is_linked_ = is_linked_;
}
void JumpTarget::AddReachingFrame(VirtualFrame* frame) {
ASSERT(reaching_frames_.length() == merge_labels_.length());
Label fresh;
merge_labels_.Add(fresh);
reaching_frames_.Add(frame);
}
// -------------------------------------------------------------------------
// ShadowTarget implementation.
ShadowTarget::ShadowTarget(JumpTarget* shadowed) {
ASSERT(shadowed != NULL);
other_target_ = shadowed;
#ifdef DEBUG
is_shadowing_ = true;
#endif
// While shadowing this shadow target saves the state of the original.
shadowed->CopyTo(this);
// Setting the code generator to null prevents the shadow target from
// being used until shadowing stops.
cgen_ = NULL;
masm_ = NULL;
// The original's state is reset. We do not Unuse it because that
// would delete the expected frame and assert that the target is not
// linked.
shadowed->Reset();
}
void ShadowTarget::StopShadowing() {
ASSERT(is_shadowing_);
// This target does not have a valid code generator yet.
cgen_ = other_target_->code_generator();
ASSERT(cgen_ != NULL);
masm_ = cgen_->masm();
// The states of this target, which was shadowed, and the original
// target, which was shadowing, are swapped.
JumpTarget temp;
other_target_->CopyTo(&temp);
CopyTo(other_target_);
temp.CopyTo(this);
temp.Reset(); // So the destructor does not deallocate virtual frames.
#ifdef DEBUG
is_shadowing_ = false;
#endif
}
} } // namespace v8::internal

259
src/jump-target.h Normal file
View File

@ -0,0 +1,259 @@
// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_JUMP_TARGET_H_
#define V8_JUMP_TARGET_H_
#include "virtual-frame.h"
namespace v8 { namespace internal {
// -------------------------------------------------------------------------
// Jump targets
//
// A jump target is an abstraction of a basic-block entry in generated
// code. It collects all the virtual frames reaching the block by
// forward jumps and pairs them with labels for the merge code along
// all forward-reaching paths. When bound, an expected frame for the
// block is determined and code is generated to merge to the expected
// frame. For backward jumps, the merge code is generated at the edge
// leaving the predecessor block.
//
// A jump target must have been reached via control flow (either by
// jumping, branching, or falling through) at the time it is bound.
// In particular, this means that at least one of the control-flow
// graph edges reaching the target must be a forward edge.
class JumpTarget : public ZoneObject { // Shadows are dynamically allocated.
public:
// Forward-only jump targets can only be reached by forward CFG edges.
enum Directionality { FORWARD_ONLY, BIDIRECTIONAL };
// Construct a jump target with a given code generator used to generate
// code and to provide access to a current frame.
explicit JumpTarget(CodeGenerator* cgen,
Directionality direction = FORWARD_ONLY);
// Construct a jump target without a code generator. A code generator
// must be supplied before using the jump target as a label. This is
// useful, eg, when jump targets are embedded in AST nodes.
JumpTarget();
virtual ~JumpTarget() { Unuse(); }
// Supply a code generator and directionality to an already
// constructed jump target. This function expects to be given a
// non-null code generator, and to be called only when the code
// generator is not yet set.
void Initialize(CodeGenerator* cgen,
Directionality direction = FORWARD_ONLY);
// Accessors.
CodeGenerator* code_generator() const { return cgen_; }
Label* entry_label() { return &entry_label_; }
VirtualFrame* entry_frame() const { return entry_frame_; }
void set_entry_frame(VirtualFrame* frame) {
entry_frame_ = frame;
}
// Predicates testing the state of the encapsulated label.
bool is_bound() const { return is_bound_; }
bool is_linked() const { return is_linked_; }
bool is_unused() const { return !is_bound() && !is_linked(); }
// Treat the jump target as a fresh one. The expected frame if any
// will be deallocated and there should be no dangling jumps to the
// target (thus no reaching frames).
void Unuse();
// Reset the internal state of this jump target. Pointed-to virtual
// frames are not deallocated and dangling jumps to the target are
// left dangling.
void Reset();
// Copy the state of this jump target to the destination. The lists
// of forward-reaching frames and merge-point labels are copied.
// All virtual frame pointers are copied, not the pointed-to frames.
// The previous state of the destination is overwritten, without
// deallocating pointed-to virtual frames.
void CopyTo(JumpTarget* destination);
// Emit a jump to the target. There must be a current frame at the
// jump and there will be no current frame after the jump.
void Jump();
void Jump(Result* arg);
void Jump(Result* arg0, Result* arg1);
void Jump(Result* arg0, Result* arg1, Result* arg2);
// Emit a conditional branch to the target. There must be a current
// frame at the branch. The current frame will fall through to the
// code after the branch.
void Branch(Condition cc, Hint hint = no_hint);
void Branch(Condition cc, Result* arg, Hint hint = no_hint);
void Branch(Condition cc, Result* arg0, Result* arg1, Hint hint = no_hint);
void Branch(Condition cc,
Result* arg0,
Result* arg1,
Result* arg2,
Hint hint = no_hint);
void Branch(Condition cc,
Result* arg0,
Result* arg1,
Result* arg2,
Result* arg3,
Hint hint = no_hint);
// Bind a jump target. If there is no current frame at the binding
// site, there must be at least one frame reaching via a forward
// jump.
//
// The number of mergable elements is a number of frame elements
// counting from the top down which must be "mergable" (not
// constants or copies) in the entry frame at the jump target.
// Backward jumps to the target must contain the same constants and
// sharing as the entry frame, except for the mergable elements.
//
// A mergable elements argument of kAllElements indicates that all
// frame elements must be mergable. Mergable elements are ignored
// completely for forward-only jump targets.
void Bind(int mergable_elements = kAllElements);
void Bind(Result* arg, int mergable_elements = kAllElements);
void Bind(Result* arg0, Result* arg1, int mergable_elements = kAllElements);
void Bind(Result* arg0,
Result* arg1,
Result* arg2,
int mergable_elements = kAllElements);
void Bind(Result* arg0,
Result* arg1,
Result* arg2,
Result* arg3,
int mergable_elements = kAllElements);
// Emit a call to a jump target. There must be a current frame at
// the call. The frame at the target is the same as the current
// frame except for an extra return address on top of it. The frame
// after the call is the same as the frame before the call.
void Call();
static const int kAllElements = -1; // Not a valid number of elements.
protected:
// The code generator gives access to its current frame.
CodeGenerator* cgen_;
// Used to emit code.
MacroAssembler* masm_;
private:
// Directionality flag set at initialization time.
Directionality direction_;
// A list of frames reaching this block via forward jumps.
List<VirtualFrame*> reaching_frames_;
// A parallel list of labels for merge code.
List<Label> merge_labels_;
// The frame used on entry to the block and expected at backward
// jumps to the block. Set when the jump target is bound, but may
// or may not be set for forward-only blocks.
VirtualFrame* entry_frame_;
// The actual entry label of the block.
Label entry_label_;
// A target is bound if its Bind member function has been called.
// It is linked if it is not bound but its Jump, Branch, or Call
// member functions have been called.
bool is_bound_;
bool is_linked_;
// Add a virtual frame reaching this labeled block via a forward
// jump, and a fresh label for its merge code.
void AddReachingFrame(VirtualFrame* frame);
// Choose an element from a pair of frame elements to be in the
// expected frame. Return null if they are incompatible.
FrameElement* Combine(FrameElement* left, FrameElement* right);
// Compute a frame to use for entry to this block. Mergable
// elements is as described for the Bind function.
void ComputeEntryFrame(int mergable_elements);
DISALLOW_COPY_AND_ASSIGN(JumpTarget);
};
// -------------------------------------------------------------------------
// Shadow jump targets
//
// Shadow jump targets represent a jump target that is temporarily shadowed
// by another one (represented by the original during shadowing). They are
// used to catch jumps to labels in certain contexts, e.g. try blocks.
// After shadowing ends, the formerly shadowed target is again represented
// by the original and the ShadowTarget can be used as a jump target in its
// own right, representing the formerly shadowing target.
class ShadowTarget : public JumpTarget {
public:
// Construct a shadow jump target. After construction the shadow
// target object holds the state of the original jump target, and
// the original target is actually a fresh one that intercepts jumps
// intended for the shadowed one.
explicit ShadowTarget(JumpTarget* shadowed);
virtual ~ShadowTarget() {
ASSERT(!is_shadowing_);
}
// End shadowing. After shadowing ends, the original jump target
// again gives access to the formerly shadowed target and the shadow
// target object gives access to the formerly shadowing target.
void StopShadowing();
// During shadowing, the currently shadowing target. After
// shadowing, the target that was shadowed.
JumpTarget* other_target() const { return other_target_; }
private:
// During shadowing, the currently shadowing target. After
// shadowing, the target that was shadowed.
JumpTarget* other_target_;
#ifdef DEBUG
bool is_shadowing_;
#endif
DISALLOW_COPY_AND_ASSIGN(ShadowTarget);
};
} } // namespace v8::internal
#endif // V8_JUMP_TARGET_H_

View File

@ -57,10 +57,10 @@ class List {
ASSERT(0 <= i && i < length_);
return data_[i];
}
inline T& at(int i) const { return this->operator[](i); }
INLINE(const T& last() const) {
inline T& at(int i) const { return operator[](i); }
inline T& last() const {
ASSERT(!is_empty());
return this->at(length_ - 1);
return at(length_ - 1);
}
INLINE(bool is_empty() const) { return length_ == 0; }

View File

@ -35,6 +35,9 @@
namespace v8 { namespace internal {
// -------------------------------------------------------------------------
// MacroAssembler implementation.
MacroAssembler::MacroAssembler(void* buffer, int size)
: Assembler(buffer, size),
unresolved_(0),
@ -111,8 +114,7 @@ class RecordWriteStub : public CodeStub {
// scratch) OOOOAAAASSSS.
class ScratchBits: public BitField<uint32_t, 0, 4> {};
class AddressBits: public BitField<uint32_t, 4, 4> {};
class ObjectBits: public BitField<uint32_t, 8, 4> {
};
class ObjectBits: public BitField<uint32_t, 8, 4> {};
Major MajorKey() { return RecordWrite; }
@ -606,6 +608,19 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
}
void MacroAssembler::NegativeZeroTest(CodeGenerator* cgen,
Register result,
Register op,
JumpTarget* then_target) {
JumpTarget ok(cgen);
test(result, Operand(result));
ok.Branch(not_zero, taken);
test(op, Operand(op));
then_target->Branch(sign, not_taken);
ok.Bind();
}
void MacroAssembler::NegativeZeroTest(Register result,
Register op,
Label* then_label) {

View File

@ -32,8 +32,11 @@
namespace v8 { namespace internal {
// Forward declaration.
class JumpTarget;
// Helper type to make boolean flag easier to read at call-site.
// Helper types to make flags easier to read at call sites.
enum InvokeFlag {
CALL_FUNCTION,
JUMP_FUNCTION
@ -179,6 +182,12 @@ class MacroAssembler: public Assembler {
// Check if result is zero and op is negative.
void NegativeZeroTest(Register result, Register op, Label* then_label);
// Check if result is zero and op is negative in code using jump targets.
void NegativeZeroTest(CodeGenerator* cgen,
Register result,
Register op,
JumpTarget* then_target);
// Check if result is zero and any of op1 and op2 are negative.
// Register scratch is destroyed, and it must be different from op2.
void NegativeZeroTest(Register result, Register op1, Register op2,
@ -327,7 +336,6 @@ static inline Operand FieldOperand(Register object,
return Operand(object, index, scale, offset - kHeapObjectTag);
}
} } // namespace v8::internal
#endif // V8_MACRO_ASSEMBLER_IA32_H_

View File

@ -205,7 +205,7 @@ class Parser {
BreakableStatement* LookupBreakTarget(Handle<String> label, bool* ok);
IterationStatement* LookupContinueTarget(Handle<String> label, bool* ok);
void RegisterLabelUse(Label* label, int index);
void RegisterTargetUse(JumpTarget* target, int index);
// Create a number literal.
Literal* NewNumberLiteral(double value);
@ -2050,8 +2050,8 @@ Block* Parser::WithHelper(Expression* obj,
bool is_catch_block,
bool* ok) {
// Parse the statement and collect escaping labels.
ZoneList<Label*>* label_list = NEW(ZoneList<Label*>(0));
LabelCollector collector(label_list);
ZoneList<JumpTarget*>* target_list = NEW(ZoneList<JumpTarget*>(0));
TargetCollector collector(target_list);
Statement* stat;
{ Target target(this, &collector);
with_nesting_level_++;
@ -2064,7 +2064,7 @@ Block* Parser::WithHelper(Expression* obj,
// 2: The try-finally block evaluating the body.
Block* result = NEW(Block(NULL, 2, false));
if (result) {
if (result != NULL) {
result->AddStatement(NEW(WithEnterStatement(obj, is_catch_block)));
// Create body block.
@ -2077,12 +2077,10 @@ Block* Parser::WithHelper(Expression* obj,
// Return a try-finally statement.
TryFinally* wrapper = NEW(TryFinally(body, exit));
wrapper->set_escaping_labels(collector.labels());
wrapper->set_escaping_targets(collector.targets());
result->AddStatement(wrapper);
return result;
} else {
return NULL;
}
return result;
}
@ -2197,8 +2195,8 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
Expect(Token::TRY, CHECK_OK);
ZoneList<Label*>* label_list = NEW(ZoneList<Label*>(0));
LabelCollector collector(label_list);
ZoneList<JumpTarget*>* target_list = NEW(ZoneList<JumpTarget*>(0));
TargetCollector collector(target_list);
Block* try_block;
{ Target target(this, &collector);
@ -2217,10 +2215,11 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
}
// If we can break out from the catch block and there is a finally block,
// then we will need to collect labels from the catch block. Since we don't
// know yet if there will be a finally block, we always collect the labels.
ZoneList<Label*>* catch_label_list = NEW(ZoneList<Label*>(0));
LabelCollector catch_collector(catch_label_list);
// then we will need to collect jump targets from the catch block. Since
// we don't know yet if there will be a finally block, we always collect
// the jump targets.
ZoneList<JumpTarget*>* catch_target_list = NEW(ZoneList<JumpTarget*>(0));
TargetCollector catch_collector(catch_target_list);
bool has_catch = false;
if (tok == Token::CATCH) {
has_catch = true;
@ -2260,7 +2259,7 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
if (!is_pre_parsing_ && catch_block != NULL && finally_block != NULL) {
TryCatch* statement = NEW(TryCatch(try_block, catch_var, catch_block));
statement->set_escaping_labels(collector.labels());
statement->set_escaping_targets(collector.targets());
try_block = NEW(Block(NULL, 1, false));
try_block->AddStatement(statement);
catch_block = NULL;
@ -2271,15 +2270,15 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
if (catch_block != NULL) {
ASSERT(finally_block == NULL);
result = NEW(TryCatch(try_block, catch_var, catch_block));
result->set_escaping_labels(collector.labels());
result->set_escaping_targets(collector.targets());
} else {
ASSERT(finally_block != NULL);
result = NEW(TryFinally(try_block, finally_block));
// Add the labels of the try block and the catch block.
for (int i = 0; i < collector.labels()->length(); i++) {
catch_collector.labels()->Add(collector.labels()->at(i));
// Add the jump targets of the try block and the catch block.
for (int i = 0; i < collector.targets()->length(); i++) {
catch_collector.targets()->Add(collector.targets()->at(i));
}
result->set_escaping_labels(catch_collector.labels());
result->set_escaping_targets(catch_collector.targets());
}
}
@ -3506,7 +3505,7 @@ BreakableStatement* Parser::LookupBreakTarget(Handle<String> label, bool* ok) {
if ((anonymous && stat->is_target_for_anonymous()) ||
(!anonymous && ContainsLabel(stat->labels(), label))) {
RegisterLabelUse(stat->break_target(), i);
RegisterTargetUse(stat->break_target(), i);
return stat;
}
}
@ -3523,7 +3522,7 @@ IterationStatement* Parser::LookupContinueTarget(Handle<String> label,
ASSERT(stat->is_target_for_anonymous());
if (anonymous || ContainsLabel(stat->labels(), label)) {
RegisterLabelUse(stat->continue_target(), i);
RegisterTargetUse(stat->continue_target(), i);
return stat;
}
}
@ -3531,13 +3530,13 @@ IterationStatement* Parser::LookupContinueTarget(Handle<String> label,
}
void Parser::RegisterLabelUse(Label* label, int index) {
// Register that a label found at the given index in the target
// stack has been used from the top of the target stack. Add the
// label to any LabelCollectors passed on the stack.
void Parser::RegisterTargetUse(JumpTarget* target, int index) {
// Register that a jump target found at the given index in the target
// stack has been used from the top of the target stack. Add the jump
// target to any TargetCollectors passed on the stack.
for (int i = target_stack_->length(); i-- > index;) {
LabelCollector* collector = target_stack_->at(i)->AsLabelCollector();
if (collector != NULL) collector->AddLabel(label);
TargetCollector* collector = target_stack_->at(i)->AsTargetCollector();
if (collector != NULL) collector->AddTarget(target);
}
}

View File

@ -0,0 +1,96 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen.h"
#include "register-allocator.h"
namespace v8 { namespace internal {
// -------------------------------------------------------------------------
// Result implementation.
void Result::ToRegister() {
UNIMPLEMENTED();
}
void Result::ToRegister(Register target) {
UNIMPLEMENTED();
}
// -------------------------------------------------------------------------
// RegisterAllocator implementation.
RegisterFile RegisterAllocator::Reserved() {
RegisterFile reserved;
reserved.Use(sp);
reserved.Use(fp);
reserved.Use(cp);
reserved.Use(pc);
return reserved;
}
void RegisterAllocator::UnuseReserved(RegisterFile* register_file) {
register_file->ref_counts_[sp.code()] = 0;
register_file->ref_counts_[fp.code()] = 0;
register_file->ref_counts_[cp.code()] = 0;
register_file->ref_counts_[pc.code()] = 0;
}
void RegisterAllocator::Initialize() {
Reset();
// The following registers are live on function entry, saved in the
// frame, and available for allocation during execution.
Use(r1); // JS function.
Use(lr); // Return address.
}
void RegisterAllocator::Reset() {
registers_.Reset();
// The following registers are live on function entry and reserved
// during execution.
Use(sp); // Stack pointer.
Use(fp); // Frame pointer (caller's frame pointer on entry).
Use(cp); // Context context (callee's context on entry).
Use(pc); // Program counter.
}
Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
UNIMPLEMENTED();
Result invalid(cgen_);
return invalid;
}
} } // namespace v8::internal

View File

@ -0,0 +1,130 @@
// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen.h"
#include "register-allocator.h"
namespace v8 { namespace internal {
// -------------------------------------------------------------------------
// Result implementation.
void Result::ToRegister() {
ASSERT(is_valid());
if (is_constant()) {
Result fresh = cgen_->allocator()->Allocate();
ASSERT(fresh.is_valid());
if (cgen_->IsUnsafeSmi(handle())) {
cgen_->LoadUnsafeSmi(fresh.reg(), handle());
} else {
cgen_->masm()->Set(fresh.reg(), Immediate(handle()));
}
// This result becomes a copy of the fresh one.
*this = fresh;
}
ASSERT(is_register());
}
void Result::ToRegister(Register target) {
ASSERT(is_valid());
if (!is_register() || !reg().is(target)) {
Result fresh = cgen_->allocator()->Allocate(target);
ASSERT(fresh.is_valid());
if (is_register()) {
cgen_->masm()->mov(fresh.reg(), reg());
} else {
ASSERT(is_constant());
if (cgen_->IsUnsafeSmi(handle())) {
cgen_->LoadUnsafeSmi(fresh.reg(), handle());
} else {
cgen_->masm()->Set(fresh.reg(), Immediate(handle()));
}
}
*this = fresh;
} else if (is_register() && reg().is(target)) {
ASSERT(cgen_->has_valid_frame());
cgen_->frame()->Spill(target);
ASSERT(cgen_->allocator()->count(target) == 1);
}
ASSERT(is_register());
ASSERT(reg().is(target));
}
// -------------------------------------------------------------------------
// RegisterAllocator implementation.
RegisterFile RegisterAllocator::Reserved() {
RegisterFile reserved;
reserved.Use(esp);
reserved.Use(ebp);
reserved.Use(esi);
return reserved;
}
void RegisterAllocator::UnuseReserved(RegisterFile* register_file) {
register_file->ref_counts_[esp.code()] = 0;
register_file->ref_counts_[ebp.code()] = 0;
register_file->ref_counts_[esi.code()] = 0;
}
void RegisterAllocator::Initialize() {
Reset();
// The following register is live on function entry, saved in the
// frame, and available for allocation during execution.
Use(edi); // JS function.
}
void RegisterAllocator::Reset() {
registers_.Reset();
// The following registers are live on function entry and reserved
// during execution.
Use(esp); // Stack pointer.
Use(ebp); // Frame pointer (caller's frame pointer on entry).
Use(esi); // Context (callee's context on entry).
}
Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
Result result = AllocateWithoutSpilling();
// Check that the register is a byte register. If not, unuse the
// register if valid and return an invalid result.
if (result.is_valid() && !result.reg().is_byte_register()) {
result.Unuse();
return Result(cgen_);
}
return result;
}
} } // namespace v8::internal

129
src/register-allocator.cc Normal file
View File

@ -0,0 +1,129 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen.h"
#include "register-allocator.h"
namespace v8 { namespace internal {
// -------------------------------------------------------------------------
// Result implementation.
Result::Result(Register reg, CodeGenerator* cgen)
: type_(REGISTER),
cgen_(cgen) {
data_.reg_ = reg;
ASSERT(reg.is_valid());
cgen_->allocator()->Use(reg);
}
void Result::CopyTo(Result* destination) const {
destination->type_ = type();
destination->cgen_ = cgen_;
if (is_register()) {
destination->data_.reg_ = reg();
cgen_->allocator()->Use(reg());
} else if (is_constant()) {
destination->data_.handle_ = data_.handle_;
} else {
ASSERT(!is_valid());
}
}
void Result::Unuse() {
if (is_register()) {
cgen_->allocator()->Unuse(reg());
}
type_ = INVALID;
}
// -------------------------------------------------------------------------
// RegisterFile implementation.
void RegisterFile::CopyTo(RegisterFile* other) {
for (int i = 0; i < kNumRegisters; i++) {
other->ref_counts_[i] = ref_counts_[i];
}
}
// -------------------------------------------------------------------------
// RegisterAllocator implementation.
Result RegisterAllocator::AllocateWithoutSpilling() {
// Return the first free register, if any.
for (int i = 0; i < kNumRegisters; i++) {
if (!is_used(i)) {
Register free_reg = { i };
return Result(free_reg, cgen_);
}
}
return Result(cgen_);
}
Result RegisterAllocator::Allocate() {
Result result = AllocateWithoutSpilling();
if (!result.is_valid()) {
// Ask the current frame to spill a register.
ASSERT(cgen_->has_valid_frame());
Register free_reg = cgen_->frame()->SpillAnyRegister();
if (free_reg.is_valid()) {
ASSERT(!is_used(free_reg));
return Result(free_reg, cgen_);
}
}
return result;
}
Result RegisterAllocator::Allocate(Register target) {
// If the target is not referenced, it can simply be allocated.
if (!is_used(target)) {
return Result(target, cgen_);
}
// If the target is only referenced in the frame, it can be spilled and
// then allocated.
ASSERT(cgen_->has_valid_frame());
if (count(target) == cgen_->frame()->register_count(target)) {
cgen_->frame()->Spill(target);
ASSERT(!is_used(target));
return Result(target, cgen_);
}
// Otherwise (if it's referenced outside the frame) we cannot allocate it.
return Result(cgen_);
}
} } // namespace v8::internal

237
src/register-allocator.h Normal file
View File

@ -0,0 +1,237 @@
// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_REGISTER_ALLOCATOR_H_
#define V8_REGISTER_ALLOCATOR_H_
#include "macro-assembler.h"
namespace v8 { namespace internal {
// -------------------------------------------------------------------------
// Results
//
// Results encapsulate the compile-time values manipulated by the code
// generator. They can represent registers or constants.
class Result BASE_EMBEDDED {
public:
enum Type {
INVALID,
REGISTER,
CONSTANT
};
// Construct an invalid result.
explicit Result(CodeGenerator* cgen) : type_(INVALID), cgen_(cgen) {}
// Construct a register Result.
Result(Register reg, CodeGenerator* cgen);
// Construct a Result whose value is a compile-time constant.
Result(Handle<Object> value, CodeGenerator * cgen)
: type_(CONSTANT),
cgen_(cgen) {
data_.handle_ = value.location();
}
// The copy constructor and assignment operators could each create a new
// register reference.
Result(const Result& other) {
other.CopyTo(this);
}
Result& operator=(const Result& other) {
if (this != &other) {
Unuse();
other.CopyTo(this);
}
return *this;
}
~Result() { Unuse(); }
void Unuse();
Type type() const { return type_; }
bool is_valid() const { return type() != INVALID; }
bool is_register() const { return type() == REGISTER; }
bool is_constant() const { return type() == CONSTANT; }
Register reg() const {
ASSERT(type() == REGISTER);
return data_.reg_;
}
Handle<Object> handle() const {
ASSERT(type() == CONSTANT);
return Handle<Object>(data_.handle_);
}
// Move this result to an arbitrary register. The register is not
// necessarily spilled from the frame or even singly-referenced outside
// it.
void ToRegister();
// Move this result to a specified register. The register is spilled from
// the frame, and the register is singly-referenced (by this result)
// outside the frame.
void ToRegister(Register reg);
private:
Type type_;
union {
Register reg_;
Object** handle_;
} data_;
CodeGenerator* cgen_;
void CopyTo(Result* destination) const;
};
// -------------------------------------------------------------------------
// Register file
//
// The register file tracks reference counts for the processor registers.
// It is used by both the register allocator and the virtual frame.
class RegisterFile BASE_EMBEDDED {
public:
RegisterFile() { Reset(); }
void Reset() {
for (int i = 0; i < kNumRegisters; i++) {
ref_counts_[i] = 0;
}
}
// Predicates and accessors for the reference counts. The versions
// that take a register code rather than a register are for
// convenience in loops over the register codes.
bool is_used(int reg_code) const { return ref_counts_[reg_code] > 0; }
bool is_used(Register reg) const { return is_used(reg.code()); }
int count(int reg_code) const { return ref_counts_[reg_code]; }
int count(Register reg) const { return count(reg.code()); }
// Record a use of a register by incrementing its reference count.
void Use(Register reg) {
ref_counts_[reg.code()]++;
}
// Record that a register will no longer be used by decrementing its
// reference count.
void Unuse(Register reg) {
ASSERT(is_used(reg.code()));
if (is_used(reg.code())) {
ref_counts_[reg.code()]--;
}
}
// Copy the reference counts from this register file to the other.
void CopyTo(RegisterFile* other);
private:
int ref_counts_[kNumRegisters];
friend class RegisterAllocator;
};
// -------------------------------------------------------------------------
// Register allocator
//
class RegisterAllocator BASE_EMBEDDED {
public:
explicit RegisterAllocator(CodeGenerator* cgen) : cgen_(cgen) {}
// A register file with each of the reserved registers counted once.
static RegisterFile Reserved();
// Unuse all the reserved registers in a register file.
static void UnuseReserved(RegisterFile* register_file);
// Predicates and accessors for the registers' reference counts.
bool is_used(int reg_code) const { return registers_.is_used(reg_code); }
bool is_used(Register reg) const { return registers_.is_used(reg.code()); }
int count(int reg_code) const { return registers_.count(reg_code); }
int count(Register reg) const { return registers_.count(reg.code()); }
// Explicitly record a reference to a register.
void Use(Register reg) { registers_.Use(reg); }
// Explicitly record that a register will no longer be used.
void Unuse(Register reg) { registers_.Unuse(reg); }
// Initialize the register allocator for entry to a JS function. On
// entry, the registers used by the JS calling convention are
// externally referenced (ie, outside the virtual frame); and the
// other registers are free.
void Initialize();
// Reset the register reference counts to free all non-reserved registers.
// A frame-external reference is kept to each of the reserved registers.
void Reset();
// Allocate a free register and return a register result if possible or
// fail and return an invalid result.
Result Allocate();
// Allocate a specific register if possible, spilling it from the frame if
// necessary, or else fail and return an invalid result.
Result Allocate(Register target);
// Allocate a free register without spilling any from the current frame or
// fail and return an invalid result.
Result AllocateWithoutSpilling();
// Allocate a free byte register without spilling any from the
// current frame or fail and return an invalid result.
Result AllocateByteRegisterWithoutSpilling();
// Copy the internal state to a register file, to be restored later by
// RestoreFrom.
void SaveTo(RegisterFile* register_file) {
registers_.CopyTo(register_file);
}
void RestoreFrom(RegisterFile* register_file) {
register_file->CopyTo(&registers_);
}
private:
CodeGenerator* cgen_;
RegisterFile registers_;
};
} } // namespace v8::internal
#endif // V8_REGISTER_ALLOCATOR_H_

View File

@ -1525,7 +1525,7 @@ void Simulator::Execute() {
Object* Simulator::Call(int32_t entry, int32_t p0, int32_t p1, int32_t p2,
int32_t p3, int32_t p4) {
int32_t p3, int32_t p4) {
// Setup parameters
set_register(r0, p0);
set_register(r1, p1);

481
src/virtual-frame-arm.cc Normal file
View File

@ -0,0 +1,481 @@
// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen.h"
#include "codegen-inl.h"
#include "virtual-frame.h"
namespace v8 { namespace internal {
// -------------------------------------------------------------------------
// VirtualFrame implementation.
#define __ masm_->
// On entry to a function, the virtual frame already contains the
// receiver and the parameters. All initial frame elements are in
// memory.
VirtualFrame::VirtualFrame(CodeGenerator* cgen)
: cgen_(cgen),
masm_(cgen->masm()),
elements_(0),
parameter_count_(cgen->scope()->num_parameters()),
local_count_(0),
stack_pointer_(parameter_count_), // 0-based index of TOS.
frame_pointer_(kIllegalIndex) {
for (int i = 0; i < parameter_count_ + 1; i++) {
elements_.Add(FrameElement::MemoryElement());
}
}
// Clear the dirty bit for the element at a given index if it is a
// valid element. The stack address corresponding to the element must
// be allocated on the physical stack, or the first element above the
// stack pointer so it can be allocated by a single push instruction.
void VirtualFrame::RawSyncElementAt(int index) {
FrameElement element = elements_[index];
if (!element.is_valid() || element.is_synced()) return;
if (index <= stack_pointer_) {
// Emit code to write elements below the stack pointer to their
// (already allocated) stack address.
switch (element.type()) {
case FrameElement::INVALID: // Fall through.
case FrameElement::MEMORY:
// There was an early bailout for invalid and synced elements
// (memory elements are always synced).
UNREACHABLE();
break;
case FrameElement::REGISTER:
__ str(element.reg(), MemOperand(fp, fp_relative(index)));
break;
case FrameElement::CONSTANT: {
Result temp = cgen_->allocator()->Allocate();
ASSERT(temp.is_valid());
__ mov(temp.reg(), Operand(element.handle()));
__ str(temp.reg(), MemOperand(fp, fp_relative(index)));
break;
}
case FrameElement::COPY: {
int backing_index = element.index();
FrameElement backing_element = elements_[backing_index];
if (backing_element.is_memory()) {
Result temp = cgen_->allocator()->Allocate();
ASSERT(temp.is_valid());
__ ldr(temp.reg(), MemOperand(fp, fp_relative(backing_index)));
__ str(temp.reg(), MemOperand(fp, fp_relative(index)));
} else {
ASSERT(backing_element.is_register());
__ str(backing_element.reg(), MemOperand(fp, fp_relative(index)));
}
break;
}
}
} else {
// Push elements above the stack pointer to allocate space and
// sync them. Space should have already been allocated in the
// actual frame for all the elements below this one.
ASSERT(index == stack_pointer_ + 1);
stack_pointer_++;
switch (element.type()) {
case FrameElement::INVALID: // Fall through.
case FrameElement::MEMORY:
// There was an early bailout for invalid and synced elements
// (memory elements are always synced).
UNREACHABLE();
break;
case FrameElement::REGISTER:
__ push(element.reg());
break;
case FrameElement::CONSTANT: {
Result temp = cgen_->allocator()->Allocate();
ASSERT(temp.is_valid());
__ mov(temp.reg(), Operand(element.handle()));
__ push(temp.reg());
break;
}
case FrameElement::COPY: {
int backing_index = element.index();
FrameElement backing = elements_[backing_index];
ASSERT(backing.is_memory() || backing.is_register());
if (backing.is_memory()) {
Result temp = cgen_->allocator()->Allocate();
ASSERT(temp.is_valid());
__ ldr(temp.reg(), MemOperand(fp, fp_relative(backing_index)));
__ push(temp.reg());
} else {
__ push(backing.reg());
}
break;
}
}
}
elements_[index].set_sync();
}
void VirtualFrame::MergeTo(VirtualFrame* expected) {
Comment cmnt(masm_, "[ Merge frame");
// We should always be merging the code generator's current frame to an
// expected frame.
ASSERT(cgen_->frame() == this);
// Adjust the stack pointer upward (toward the top of the virtual
// frame) if necessary.
if (stack_pointer_ < expected->stack_pointer_) {
int difference = expected->stack_pointer_ - stack_pointer_;
stack_pointer_ = expected->stack_pointer_;
__ sub(sp, sp, Operand(difference * kPointerSize));
}
MergeMoveRegistersToMemory(expected);
MergeMoveRegistersToRegisters(expected);
MergeMoveMemoryToRegisters(expected);
// Fix any sync bit problems.
for (int i = 0; i <= stack_pointer_; i++) {
FrameElement source = elements_[i];
FrameElement target = expected->elements_[i];
if (source.is_synced() && !target.is_synced()) {
elements_[i].clear_sync();
} else if (!source.is_synced() && target.is_synced()) {
SyncElementAt(i);
}
}
// Adjust the stack point downard if necessary.
if (stack_pointer_ > expected->stack_pointer_) {
int difference = stack_pointer_ - expected->stack_pointer_;
stack_pointer_ = expected->stack_pointer_;
__ add(sp, sp, Operand(difference * kPointerSize));
}
// At this point, the frames should be identical.
ASSERT(Equals(expected));
}
void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
ASSERT(stack_pointer_ >= expected->stack_pointer_);
// Move registers, constants, and copies to memory. Perform moves
// from the top downward in the frame in order to leave the backing
// stores of copies in registers.
//
// Moving memory-backed copies to memory requires a spare register
// for the memory-to-memory moves. Since we are performing a merge,
// we use esi (which is already saved in the frame). We keep track
// of the index of the frame element esi is caching or kIllegalIndex
// if esi has not been disturbed.
for (int i = 0; i < elements_.length(); i++) {
ASSERT(elements_[i].is_memory());
ASSERT(expected->elements_[i].is_memory());
}
}
void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
}
void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame *expected) {
}
void VirtualFrame::Enter() {
Comment cmnt(masm_, "[ Enter JS frame");
#ifdef DEBUG
// Verify that r1 contains a JS function. The following code relies
// on r2 being available for use.
{ Label map_check, done;
__ tst(r1, Operand(kSmiTagMask));
__ b(ne, &map_check);
__ stop("VirtualFrame::Enter - r1 is not a function (smi check).");
__ bind(&map_check);
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
__ cmp(r2, Operand(JS_FUNCTION_TYPE));
__ b(eq, &done);
__ stop("VirtualFrame::Enter - r1 is not a function (map check).");
__ bind(&done);
}
#endif // DEBUG
// We are about to push four values to the frame.
Adjust(4);
__ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
// Adjust FP to point to saved FP.
frame_pointer_ = elements_.length() - 2;
__ add(fp, sp, Operand(2 * kPointerSize));
cgen_->allocator()->Unuse(r1);
cgen_->allocator()->Unuse(lr);
}
void VirtualFrame::Exit() {
Comment cmnt(masm_, "[ Exit JS frame");
// Drop the execution stack down to the frame pointer and restore the caller
// frame pointer and return address.
__ mov(sp, fp);
__ ldm(ia_w, sp, fp.bit() | lr.bit());
}
void VirtualFrame::AllocateStackSlots(int count) {
ASSERT(height() == 0);
local_count_ = count;
Adjust(count);
if (count > 0) {
Comment cmnt(masm_, "[ Allocate space for locals");
// Initialize stack slots with 'undefined' value.
__ mov(ip, Operand(Factory::undefined_value()));
for (int i = 0; i < count; i++) {
__ push(ip);
}
}
}
void VirtualFrame::SaveContextRegister() {
UNIMPLEMENTED();
}
void VirtualFrame::RestoreContextRegister() {
UNIMPLEMENTED();
}
void VirtualFrame::PushReceiverSlotAddress() {
UNIMPLEMENTED();
}
// Before changing an element which is copied, adjust so that the
// first copy becomes the new backing store and all the other copies
// are updated. If the original was in memory, the new backing store
// is allocated to a register. Return a copy of the new backing store
// or an invalid element if the original was not a copy.
FrameElement VirtualFrame::AdjustCopies(int index) {
UNIMPLEMENTED();
return FrameElement::InvalidElement();
}
void VirtualFrame::TakeFrameSlotAt(int index) {
UNIMPLEMENTED();
}
void VirtualFrame::StoreToFrameSlotAt(int index) {
UNIMPLEMENTED();
}
void VirtualFrame::PushTryHandler(HandlerType type) {
// Grow the expression stack by handler size less one (the return address
// is already pushed by a call instruction).
Adjust(kHandlerSize - 1);
__ PushTryHandler(IN_JAVASCRIPT, type);
}
Result VirtualFrame::RawCallStub(CodeStub* stub, int frame_arg_count) {
ASSERT(cgen_->HasValidEntryRegisters());
__ CallStub(stub);
Result result = cgen_->allocator()->Allocate(r0);
ASSERT(result.is_valid());
return result;
}
Result VirtualFrame::CallRuntime(Runtime::Function* f,
int frame_arg_count) {
PrepareForCall(frame_arg_count, frame_arg_count);
ASSERT(cgen_->HasValidEntryRegisters());
__ CallRuntime(f, frame_arg_count);
Result result = cgen_->allocator()->Allocate(r0);
ASSERT(result.is_valid());
return result;
}
Result VirtualFrame::CallRuntime(Runtime::FunctionId id,
int frame_arg_count) {
PrepareForCall(frame_arg_count, frame_arg_count);
ASSERT(cgen_->HasValidEntryRegisters());
__ CallRuntime(id, frame_arg_count);
Result result = cgen_->allocator()->Allocate(r0);
ASSERT(result.is_valid());
return result;
}
Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags,
Result* arg_count_register,
int frame_arg_count) {
ASSERT(arg_count_register->reg().is(r0));
PrepareForCall(frame_arg_count, frame_arg_count);
arg_count_register->Unuse();
__ InvokeBuiltin(id, flags);
Result result = cgen_->allocator()->Allocate(r0);
return result;
}
Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode) {
ASSERT(cgen_->HasValidEntryRegisters());
__ Call(code, rmode);
Result result = cgen_->allocator()->Allocate(r0);
ASSERT(result.is_valid());
return result;
}
Result VirtualFrame::CallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode,
Result* arg,
int dropped_args) {
int spilled_args = 0;
switch (code->kind()) {
case Code::LOAD_IC:
ASSERT(arg->reg().is(r2));
ASSERT(dropped_args == 0);
spilled_args = 1;
break;
case Code::KEYED_STORE_IC:
ASSERT(arg->reg().is(r0));
ASSERT(dropped_args == 0);
spilled_args = 2;
break;
default:
// No other types of code objects are called with values
// in exactly one register.
UNREACHABLE();
break;
}
PrepareForCall(spilled_args, dropped_args);
arg->Unuse();
return RawCallCodeObject(code, rmode);
}
Result VirtualFrame::CallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode,
Result* arg0,
Result* arg1,
int dropped_args) {
int spilled_args = 1;
switch (code->kind()) {
case Code::STORE_IC:
ASSERT(arg0->reg().is(r0));
ASSERT(arg1->reg().is(r2));
ASSERT(dropped_args == 0);
spilled_args = 1;
break;
case Code::BUILTIN:
ASSERT(*code == Builtins::builtin(Builtins::JSConstructCall));
ASSERT(arg0->reg().is(r0));
ASSERT(arg1->reg().is(r1));
spilled_args = dropped_args + 1;
break;
default:
// No other types of code objects are called with values
// in exactly two registers.
UNREACHABLE();
break;
}
PrepareForCall(spilled_args, dropped_args);
arg0->Unuse();
arg1->Unuse();
return RawCallCodeObject(code, rmode);
}
void VirtualFrame::Drop(int count) {
ASSERT(height() >= count);
int num_virtual_elements = (elements_.length() - 1) - stack_pointer_;
// Emit code to lower the stack pointer if necessary.
if (num_virtual_elements < count) {
int num_dropped = count - num_virtual_elements;
stack_pointer_ -= num_dropped;
__ add(sp, sp, Operand(num_dropped * kPointerSize));
}
// Discard elements from the virtual frame and free any registers.
for (int i = 0; i < count; i++) {
FrameElement dropped = elements_.RemoveLast();
if (dropped.is_register()) {
Unuse(dropped.reg());
}
}
}
Result VirtualFrame::Pop() {
UNIMPLEMENTED();
Result invalid(cgen_);
return invalid;
}
void VirtualFrame::EmitPop(Register reg) {
ASSERT(stack_pointer_ == elements_.length() - 1);
stack_pointer_--;
elements_.RemoveLast();
__ pop(reg);
}
void VirtualFrame::EmitPush(Register reg) {
ASSERT(stack_pointer_ == elements_.length() - 1);
elements_.Add(FrameElement::MemoryElement());
stack_pointer_++;
__ push(reg);
}
#undef __
} } // namespace v8::internal

460
src/virtual-frame-arm.h Normal file
View File

@ -0,0 +1,460 @@
// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_VIRTUAL_FRAME_ARM_H_
#define V8_VIRTUAL_FRAME_ARM_H_
namespace v8 { namespace internal {
// -------------------------------------------------------------------------
// Virtual frames
//
// The virtual frame is an abstraction of the physical stack frame. It
// encapsulates the parameters, frame-allocated locals, and the expression
// stack. It supports push/pop operations on the expression stack, as well
// as random access to the expression stack elements, locals, and
// parameters.
class VirtualFrame : public Malloced {
public:
// A utility class to introduce a scope where the virtual frame is
// expected to remain spilled. The constructor spills the code
// generator's current frame, but no attempt is made to require it
// to stay spilled. It is intended as documentation while the code
// generator is being transformed.
class SpilledScope BASE_EMBEDDED {
public:
explicit SpilledScope(CodeGenerator* cgen);
~SpilledScope();
private:
CodeGenerator* cgen_;
bool previous_state_;
};
// Construct an initial virtual frame on entry to a JS function.
explicit VirtualFrame(CodeGenerator* cgen);
// Construct a virtual frame as a clone of an existing one.
explicit VirtualFrame(VirtualFrame* original);
// Create a duplicate of an existing valid frame element.
FrameElement CopyElementAt(int index);
// The height of the virtual expression stack.
int height() const {
return elements_.length() - expression_base_index();
}
int register_count(Register reg) {
return frame_registers_.count(reg);
}
// Add extra in-memory elements to the top of the frame to match an actual
// frame (eg, the frame after an exception handler is pushed). No code is
// emitted.
void Adjust(int count);
// Forget elements from the top of the frame to match an actual frame (eg,
// the frame after a runtime call). No code is emitted.
void Forget(int count);
// Spill all values from the frame to memory.
void SpillAll();
// Spill all occurrences of a specific register from the frame.
void Spill(Register reg);
// Spill all occurrences of an arbitrary register if possible. Return the
// register spilled or no_reg if it was not possible to free any register
// (ie, they all have frame-external references).
Register SpillAnyRegister();
// Prepare this virtual frame for merging to an expected frame by
// performing some state changes that do not require generating
// code. It is guaranteed that no code will be generated.
void PrepareMergeTo(VirtualFrame* expected);
// Make this virtual frame have a state identical to an expected virtual
// frame. As a side effect, code may be emitted to make this frame match
// the expected one.
void MergeTo(VirtualFrame* expected);
// Detach a frame from its code generator, perhaps temporarily. This
// tells the register allocator that it is free to use frame-internal
// registers. Used when the code generator's frame is switched from this
// one to NULL by an unconditional jump.
void DetachFromCodeGenerator();
// (Re)attach a frame to its code generator. This informs the register
// allocator that the frame-internal register references are active again.
// Used when a code generator's frame is switched from NULL to this one by
// binding a label.
void AttachToCodeGenerator();
// Emit code for the physical JS entry and exit frame sequences. After
// calling Enter, the virtual frame is ready for use; and after calling
// Exit it should not be used. Note that Enter does not allocate space in
// the physical frame for storing frame-allocated locals.
void Enter();
void Exit();
// Prepare for returning from the frame by spilling locals and
// dropping all non-locals elements in the virtual frame. This
// avoids generating unnecessary merge code when jumping to the
// shared return site. Emits code for spills.
void PrepareForReturn();
// Allocate and initialize the frame-allocated locals.
void AllocateStackSlots(int count);
// The current top of the expression stack as an assembly operand.
MemOperand Top() const { return MemOperand(sp, 0); }
// An element of the expression stack as an assembly operand.
MemOperand ElementAt(int index) const {
return MemOperand(sp, index * kPointerSize);
}
// Random-access store to a frame-top relative frame element. The result
// becomes owned by the frame and is invalidated.
void SetElementAt(int index, Result* value);
// Set a frame element to a constant. The index is frame-top relative.
void SetElementAt(int index, Handle<Object> value) {
Result temp(value, cgen_);
SetElementAt(index, &temp);
}
void PushElementAt(int index) {
PushFrameSlotAt(elements_.length() - index - 1);
}
// A frame-allocated local as an assembly operand.
MemOperand LocalAt(int index) const {
ASSERT(0 <= index);
ASSERT(index < local_count_);
return MemOperand(fp, kLocal0Offset - index * kPointerSize);
}
// Push a copy of the value of a local frame slot on top of the frame.
void PushLocalAt(int index) {
PushFrameSlotAt(local0_index() + index);
}
// Push the value of a local frame slot on top of the frame and invalidate
// the local slot. The slot should be written to before trying to read
// from it again.
void TakeLocalAt(int index) {
TakeFrameSlotAt(local0_index() + index);
}
// Store the top value on the virtual frame into a local frame slot. The
// value is left in place on top of the frame.
void StoreToLocalAt(int index) {
StoreToFrameSlotAt(local0_index() + index);
}
// Push the address of the receiver slot on the frame.
void PushReceiverSlotAddress();
// The function frame slot.
MemOperand Function() const { return MemOperand(fp, kFunctionOffset); }
// Push the function on top of the frame.
void PushFunction() { PushFrameSlotAt(function_index()); }
// The context frame slot.
MemOperand Context() const { return MemOperand(fp, kContextOffset); }
// Save the value of the esi register to the context frame slot.
void SaveContextRegister();
// Restore the esi register from the value of the context frame
// slot.
void RestoreContextRegister();
// A parameter as an assembly operand.
MemOperand ParameterAt(int index) const {
// Index -1 corresponds to the receiver.
ASSERT(-1 <= index && index <= parameter_count_);
return MemOperand(fp, (1 + parameter_count_ - index) * kPointerSize);
}
// Push a copy of the value of a parameter frame slot on top of the frame.
void PushParameterAt(int index) {
PushFrameSlotAt(param0_index() + index);
}
// Push the value of a paramter frame slot on top of the frame and
// invalidate the parameter slot. The slot should be written to before
// trying to read from it again.
void TakeParameterAt(int index) {
TakeFrameSlotAt(param0_index() + index);
}
// Store the top value on the virtual frame into a parameter frame slot.
// The value is left in place on top of the frame.
void StoreToParameterAt(int index) {
StoreToFrameSlotAt(param0_index() + index);
}
// The receiver frame slot.
MemOperand Receiver() const { return ParameterAt(-1); }
// Push a try-catch or try-finally handler on top of the virtual frame.
void PushTryHandler(HandlerType type);
// Call a code stub, given the number of arguments it expects on (and
// removes from) the top of the physical frame.
Result CallStub(CodeStub* stub, int frame_arg_count);
Result CallStub(CodeStub* stub, Result* arg, int frame_arg_count);
Result CallStub(CodeStub* stub,
Result* arg0,
Result* arg1,
int frame_arg_count);
// Call the runtime, given the number of arguments expected on (and
// removed from) the top of the physical frame.
Result CallRuntime(Runtime::Function* f, int frame_arg_count);
Result CallRuntime(Runtime::FunctionId id, int frame_arg_count);
// Invoke a builtin, given the number of arguments it expects on (and
// removes from) the top of the physical frame.
Result InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flag,
Result* arg_count_register,
int frame_arg_count);
// Call into a JS code object, given the number of arguments it
// removes from the top of the physical frame.
// Register arguments are passed as results and consumed by the call.
Result CallCodeObject(Handle<Code> ic,
RelocInfo::Mode rmode,
int dropped_args);
Result CallCodeObject(Handle<Code> ic,
RelocInfo::Mode rmode,
Result* arg,
int dropped_args);
Result CallCodeObject(Handle<Code> ic,
RelocInfo::Mode rmode,
Result* arg0,
Result* arg1,
int dropped_args);
// Drop a number of elements from the top of the expression stack. May
// emit code to affect the physical frame. Does not clobber any registers
// excepting possibly the stack pointer.
void Drop(int count);
// Drop one element.
void Drop() { Drop(1); }
// Duplicate the top element of the frame.
void Dup() { PushFrameSlotAt(elements_.length() - 1); }
// Pop an element from the top of the expression stack. Returns a
// Result, which may be a constant or a register.
Result Pop();
// Pop and save an element from the top of the expression stack and
// emit a corresponding pop instruction.
void EmitPop(Register reg);
// Push an element on top of the expression stack and emit a
// corresponding push instruction.
void EmitPush(Register reg);
// Push an element on the virtual frame.
void Push(Register reg);
void Push(Handle<Object> value);
void Push(Smi* value) { Push(Handle<Object>(value)); }
// Pushing a result invalidates it (its contents become owned by the
// frame).
void Push(Result* result);
// Nip removes zero or more elements from immediately below the top
// of the frame, leaving the previous top-of-frame value on top of
// the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
void Nip(int num_dropped);
private:
// An illegal index into the virtual frame.
static const int kIllegalIndex = -1;
static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
static const int kContextOffset = StandardFrameConstants::kContextOffset;
static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
CodeGenerator* cgen_;
MacroAssembler* masm_;
List<FrameElement> elements_;
// The number of frame-allocated locals and parameters respectively.
int parameter_count_;
int local_count_;
// The index of the element that is at the processor's stack pointer
// (the sp register).
int stack_pointer_;
// The index of the element that is at the processor's frame pointer
// (the fp register).
int frame_pointer_;
// The frame has an embedded register file that it uses to track registers
// used in the frame.
RegisterFile frame_registers_;
// The index of the first parameter. The receiver lies below the first
// parameter.
int param0_index() const { return 1; }
// The index of the context slot in the frame.
int context_index() const {
ASSERT(frame_pointer_ != kIllegalIndex);
return frame_pointer_ - 1;
}
// The index of the function slot in the frame. It lies above the context
// slot.
int function_index() const {
ASSERT(frame_pointer_ != kIllegalIndex);
return frame_pointer_ - 2;
}
// The index of the first local. Between the parameters and the locals
// lie the return address, the saved frame pointer, the context, and the
// function.
int local0_index() const {
ASSERT(frame_pointer_ != kIllegalIndex);
return frame_pointer_ + 2;
}
// The index of the base of the expression stack.
int expression_base_index() const { return local0_index() + local_count_; }
// Convert a frame index into a frame pointer relative offset into the
// actual stack.
int fp_relative(int index) const {
return (frame_pointer_ - index) * kPointerSize;
}
// Record an occurrence of a register in the virtual frame. This has the
// effect of incrementing both the register's frame-internal reference
// count and its external reference count.
void Use(Register reg);
// Record that a register reference has been dropped from the frame. This
// decrements both the register's internal and external reference counts.
void Unuse(Register reg);
// Spill the element at a particular index---write it to memory if
// necessary, free any associated register, and forget its value if
// constant.
void SpillElementAt(int index);
// Sync the element at a particular index. If it is a register or
// constant that disagrees with the value on the stack, write it to memory.
// Keep the element type as register or constant, and clear the dirty bit.
void SyncElementAt(int index);
// Sync the range of elements in [begin, end).
void SyncRange(int begin, int end);
// Sync a single element, assuming that its index is less than
// or equal to stack pointer + 1.
void RawSyncElementAt(int index);
// Push a copy of a frame slot (typically a local or parameter) on top of
// the frame.
void PushFrameSlotAt(int index);
// Push a the value of a frame slot (typically a local or parameter) on
// top of the frame and invalidate the slot.
void TakeFrameSlotAt(int index);
// Store the value on top of the frame to a frame slot (typically a local
// or parameter).
void StoreToFrameSlotAt(int index);
// Spill all elements in registers. Spill the top spilled_args elements
// on the frame. Sync all other frame elements.
// Then drop dropped_args elements from the virtual frame, to match
// the effect of an upcoming call that will drop them from the stack.
void PrepareForCall(int spilled_args, int dropped_args);
// Move frame elements currently in registers or constants, that
// should be in memory in the expected frame, to memory.
void MergeMoveRegistersToMemory(VirtualFrame* expected);
// Make the register-to-register moves necessary to
// merge this frame with the expected frame.
// Register to memory moves must already have been made,
// and memory to register moves must follow this call.
// This is because some new memory-to-register moves are
// created in order to break cycles of register moves.
// Used in the implementation of MergeTo().
void MergeMoveRegistersToRegisters(VirtualFrame* expected);
// Make the memory-to-register and constant-to-register moves
// needed to make this frame equal the expected frame.
// Called after all register-to-memory and register-to-register
// moves have been made. After this function returns, the frames
// should be equal.
void MergeMoveMemoryToRegisters(VirtualFrame* expected);
// Helper function to implement the copy-on-write semantics of an
// element's copies just before writing to the element. The copies
// are updated, but the element is not changed. A copy of the new
// backing store of all the copies is returned if there were any
// copies and in invalid frame element is returned if there were no
// copies.
FrameElement AdjustCopies(int index);
// Call a code stub that has already been prepared for calling (via
// PrepareForCall).
Result RawCallStub(CodeStub* stub, int frame_arg_count);
// Calls a code object which has already been prepared for calling
// (via PrepareForCall).
Result RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
bool Equals(VirtualFrame* other);
friend class JumpTarget;
};
} } // namespace v8::internal
#endif // V8_VIRTUAL_FRAME_ARM_H_

991
src/virtual-frame-ia32.cc Normal file
View File

@ -0,0 +1,991 @@
// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen.h"
#include "codegen-inl.h"
#include "virtual-frame.h"
namespace v8 { namespace internal {
#define __ masm_->
// -------------------------------------------------------------------------
// VirtualFrame implementation.
// On entry to a function, the virtual frame already contains the receiver,
// the parameters, and a return address. All frame elements are in memory.
VirtualFrame::VirtualFrame(CodeGenerator* cgen)
: cgen_(cgen),
masm_(cgen->masm()),
elements_(0),
parameter_count_(cgen->scope()->num_parameters()),
local_count_(0),
stack_pointer_(parameter_count_ + 1), // 0-based index of TOS.
frame_pointer_(kIllegalIndex) {
for (int i = 0; i < parameter_count_ + 2; i++) {
elements_.Add(FrameElement::MemoryElement());
}
}
// Clear the dirty bit for the element at a given index if it is a
// valid element. The stack address corresponding to the element must
// be allocated on the physical stack, or the first element above the
// stack pointer so it can be allocated by a single push instruction.
void VirtualFrame::RawSyncElementAt(int index) {
FrameElement element = elements_[index];
if (!element.is_valid() || element.is_synced()) return;
if (index <= stack_pointer_) {
// Emit code to write elements below the stack pointer to their
// (already allocated) stack address.
switch (element.type()) {
case FrameElement::INVALID: // Fall through.
case FrameElement::MEMORY:
// There was an early bailout for invalid and synced elements
// (memory elements are always synced).
UNREACHABLE();
break;
case FrameElement::REGISTER:
__ mov(Operand(ebp, fp_relative(index)), element.reg());
break;
case FrameElement::CONSTANT:
if (cgen_->IsUnsafeSmi(element.handle())) {
Result temp = cgen_->allocator()->Allocate();
ASSERT(temp.is_valid());
cgen_->LoadUnsafeSmi(temp.reg(), element.handle());
__ mov(Operand(ebp, fp_relative(index)), temp.reg());
} else {
__ Set(Operand(ebp, fp_relative(index)),
Immediate(element.handle()));
}
break;
case FrameElement::COPY: {
int backing_index = element.index();
FrameElement backing_element = elements_[backing_index];
if (backing_element.is_memory()) {
Result temp = cgen_->allocator()->Allocate();
ASSERT(temp.is_valid());
__ mov(temp.reg(), Operand(ebp, fp_relative(backing_index)));
__ mov(Operand(ebp, fp_relative(index)), temp.reg());
} else {
ASSERT(backing_element.is_register());
__ mov(Operand(ebp, fp_relative(index)), backing_element.reg());
}
break;
}
}
} else {
// Push elements above the stack pointer to allocate space and
// sync them. Space should have already been allocated in the
// actual frame for all the elements below this one.
ASSERT(index == stack_pointer_ + 1);
stack_pointer_++;
switch (element.type()) {
case FrameElement::INVALID: // Fall through.
case FrameElement::MEMORY:
// There was an early bailout for invalid and synced elements
// (memory elements are always synced).
UNREACHABLE();
break;
case FrameElement::REGISTER:
__ push(element.reg());
break;
case FrameElement::CONSTANT:
if (cgen_->IsUnsafeSmi(element.handle())) {
Result temp = cgen_->allocator()->Allocate();
ASSERT(temp.is_valid());
cgen_->LoadUnsafeSmi(temp.reg(), element.handle());
__ push(temp.reg());
} else {
__ push(Immediate(element.handle()));
}
break;
case FrameElement::COPY: {
int backing_index = element.index();
FrameElement backing = elements_[backing_index];
ASSERT(backing.is_memory() || backing.is_register());
if (backing.is_memory()) {
__ push(Operand(ebp, fp_relative(backing_index)));
} else {
__ push(backing.reg());
}
break;
}
}
}
elements_[index].set_sync();
}
void VirtualFrame::MergeTo(VirtualFrame* expected) {
Comment cmnt(masm_, "[ Merge frame");
// We should always be merging the code generator's current frame to an
// expected frame.
ASSERT(cgen_->frame() == this);
// Adjust the stack pointer upward (toward the top of the virtual
// frame) if necessary.
if (stack_pointer_ < expected->stack_pointer_) {
int difference = expected->stack_pointer_ - stack_pointer_;
stack_pointer_ = expected->stack_pointer_;
__ sub(Operand(esp), Immediate(difference * kPointerSize));
}
MergeMoveRegistersToMemory(expected);
MergeMoveRegistersToRegisters(expected);
MergeMoveMemoryToRegisters(expected);
// Fix any sync bit problems.
for (int i = 0; i <= stack_pointer_; i++) {
FrameElement source = elements_[i];
FrameElement target = expected->elements_[i];
if (source.is_synced() && !target.is_synced()) {
elements_[i].clear_sync();
} else if (!source.is_synced() && target.is_synced()) {
SyncElementAt(i);
}
}
// Adjust the stack point downard if necessary.
if (stack_pointer_ > expected->stack_pointer_) {
int difference = stack_pointer_ - expected->stack_pointer_;
stack_pointer_ = expected->stack_pointer_;
__ add(Operand(esp), Immediate(difference * kPointerSize));
}
// At this point, the frames should be identical.
ASSERT(Equals(expected));
}
void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
ASSERT(stack_pointer_ >= expected->stack_pointer_);
// Move registers, constants, and copies to memory. Perform moves
// from the top downward in the frame in order to leave the backing
// stores of copies in registers.
//
// Moving memory-backed copies to memory requires a spare register
// for the memory-to-memory moves. Since we are performing a merge,
// we use esi (which is already saved in the frame). We keep track
// of the index of the frame element esi is caching or kIllegalIndex
// if esi has not been disturbed.
int esi_caches = kIllegalIndex;
// A "singleton" memory element.
FrameElement memory_element = FrameElement::MemoryElement();
for (int i = stack_pointer_; i >= 0; i--) {
FrameElement target = expected->elements_[i];
if (target.is_memory()) {
FrameElement source = elements_[i];
switch (source.type()) {
case FrameElement::INVALID:
// Not a legal merge move.
UNREACHABLE();
break;
case FrameElement::MEMORY:
// Already in place.
break;
case FrameElement::REGISTER:
Unuse(source.reg());
if (!source.is_synced()) {
__ mov(Operand(ebp, fp_relative(i)), source.reg());
}
break;
case FrameElement::CONSTANT:
if (!source.is_synced()) {
if (cgen_->IsUnsafeSmi(source.handle())) {
esi_caches = i;
cgen_->LoadUnsafeSmi(esi, source.handle());
__ mov(Operand(ebp, fp_relative(i)), esi);
} else {
__ Set(Operand(ebp, fp_relative(i)), Immediate(source.handle()));
}
}
break;
case FrameElement::COPY:
if (!source.is_synced()) {
int backing_index = source.index();
FrameElement backing_element = elements_[backing_index];
if (backing_element.is_memory()) {
// If we have to spill a register, we spill esi.
if (esi_caches != backing_index) {
esi_caches = backing_index;
__ mov(esi, Operand(ebp, fp_relative(backing_index)));
}
__ mov(Operand(ebp, fp_relative(i)), esi);
} else {
ASSERT(backing_element.is_register());
__ mov(Operand(ebp, fp_relative(i)), backing_element.reg());
}
}
break;
}
elements_[i] = memory_element;
}
}
if (esi_caches != kIllegalIndex) {
__ mov(esi, Operand(ebp, fp_relative(context_index())));
}
}
void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
// We have already done X-to-memory moves.
ASSERT(stack_pointer_ >= expected->stack_pointer_);
// Perform register-to-register moves.
int start = 0;
int end = elements_.length() - 1;
bool any_moves_blocked; // Did we fail to make some moves this iteration?
bool should_break_cycles = false;
bool any_moves_made; // Did we make any progress this iteration?
do {
any_moves_blocked = false;
any_moves_made = false;
int first_move_blocked = kIllegalIndex;
int last_move_blocked = kIllegalIndex;
for (int i = start; i <= end; i++) {
FrameElement source = elements_[i];
FrameElement target = expected->elements_[i];
if (source.is_register() && target.is_register()) {
if (target.reg().is(source.reg())) {
if (target.is_synced() && !source.is_synced()) {
__ mov(Operand(ebp, fp_relative(i)), source.reg());
}
elements_[i] = target;
} else {
// We need to move source to target.
if (frame_registers_.is_used(target.reg())) {
// The move is blocked because the target contains valid data.
// If we are stuck with only cycles remaining, then we spill source.
// Otherwise, we just need more iterations.
if (should_break_cycles) {
SpillElementAt(i);
should_break_cycles = false;
} else { // Record a blocked move.
if (!any_moves_blocked) {
first_move_blocked = i;
}
last_move_blocked = i;
any_moves_blocked = true;
}
} else {
// The move is not blocked. This frame element can be moved from
// its source register to its target register.
if (target.is_synced() && !source.is_synced()) {
SyncElementAt(i);
}
Use(target.reg());
Unuse(source.reg());
elements_[i] = target;
__ mov(target.reg(), source.reg());
any_moves_made = true;
}
}
}
}
// Update control flags for next iteration.
should_break_cycles = (any_moves_blocked && !any_moves_made);
if (any_moves_blocked) {
start = first_move_blocked;
end = last_move_blocked;
}
} while (any_moves_blocked);
}
void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame *expected) {
// Move memory, constants, and copies to registers. This is the
// final step and is done from the bottom up so that the backing
// elements of copies are in their correct locations when we
// encounter the copies.
for (int i = 0; i < elements_.length(); i++) {
FrameElement source = elements_[i];
FrameElement target = expected->elements_[i];
if (target.is_register() && !source.is_register()) {
switch (source.type()) {
case FrameElement::INVALID: // Fall through.
case FrameElement::REGISTER:
UNREACHABLE();
break;
case FrameElement::MEMORY:
ASSERT(i <= stack_pointer_);
__ mov(target.reg(), Operand(ebp, fp_relative(i)));
break;
case FrameElement::CONSTANT:
if (cgen_->IsUnsafeSmi(source.handle())) {
cgen_->LoadUnsafeSmi(target.reg(), source.handle());
} else {
__ Set(target.reg(), Immediate(source.handle()));
}
break;
case FrameElement::COPY: {
FrameElement backing = elements_[source.index()];
ASSERT(backing.is_memory() || backing.is_register());
if (backing.is_memory()) {
ASSERT(source.index() <= stack_pointer_);
__ mov(target.reg(), Operand(ebp, fp_relative(source.index())));
} else {
__ mov(target.reg(), backing.reg());
}
}
}
// Ensure the proper sync state. If the source was memory no
// code needs to be emitted.
if (target.is_synced() && !source.is_memory()) {
SyncElementAt(i);
}
Use(target.reg());
elements_[i] = target;
}
}
}
void VirtualFrame::Enter() {
// Registers live on entry: esp, ebp, esi, edi.
Comment cmnt(masm_, "[ Enter JS frame");
#ifdef DEBUG
// Verify that edi contains a JS function. The following code
// relies on eax being available for use.
__ test(edi, Immediate(kSmiTagMask));
__ Check(not_zero,
"VirtualFrame::Enter - edi is not a function (smi check).");
__ mov(eax, FieldOperand(edi, HeapObject::kMapOffset));
__ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
__ cmp(eax, JS_FUNCTION_TYPE);
__ Check(equal,
"VirtualFrame::Enter - edi is not a function (map check).");
#endif
EmitPush(ebp);
frame_pointer_ = stack_pointer_;
__ mov(ebp, Operand(esp));
// Store the context in the frame. The context is kept in esi and a
// copy is stored in the frame. The external reference to esi
// remains.
EmitPush(esi);
// Store the function in the frame. The frame owns the register
// reference now (ie, it can keep it in edi or spill it later).
Push(edi);
SyncElementAt(elements_.length() - 1);
cgen_->allocator()->Unuse(edi);
}
void VirtualFrame::Exit() {
Comment cmnt(masm_, "[ Exit JS frame");
// Record the location of the JS exit code for patching when setting
// break point.
__ RecordJSReturn();
// Avoid using the leave instruction here, because it is too
// short. We need the return sequence to be a least the size of a
// call instruction to support patching the exit code in the
// debugger. See VisitReturnStatement for the full return sequence.
__ mov(esp, Operand(ebp));
stack_pointer_ = frame_pointer_;
for (int i = elements_.length() - 1; i > stack_pointer_; i--) {
FrameElement last = elements_.RemoveLast();
if (last.is_register()) {
Unuse(last.reg());
}
}
frame_pointer_ = kIllegalIndex;
EmitPop(ebp);
}
void VirtualFrame::AllocateStackSlots(int count) {
ASSERT(height() == 0);
local_count_ = count;
if (count > 0) {
Comment cmnt(masm_, "[ Allocate space for locals");
// The locals are initialized to a constant (the undefined value), but
// we sync them with the actual frame to allocate space for spilling
// them later. First sync everything above the stack pointer so we can
// use pushes to allocate and initialize the locals.
SyncRange(stack_pointer_ + 1, elements_.length());
Handle<Object> undefined = Factory::undefined_value();
FrameElement initial_value =
FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
Result temp = cgen_->allocator()->Allocate();
ASSERT(temp.is_valid());
__ Set(temp.reg(), Immediate(undefined));
for (int i = 0; i < count; i++) {
elements_.Add(initial_value);
stack_pointer_++;
__ push(temp.reg());
}
}
}
void VirtualFrame::SaveContextRegister() {
ASSERT(elements_[context_index()].is_memory());
__ mov(Operand(ebp, fp_relative(context_index())), esi);
}
void VirtualFrame::RestoreContextRegister() {
ASSERT(elements_[context_index()].is_memory());
__ mov(esi, Operand(ebp, fp_relative(context_index())));
}
void VirtualFrame::PushReceiverSlotAddress() {
Result temp = cgen_->allocator()->Allocate();
ASSERT(temp.is_valid());
__ lea(temp.reg(), ParameterAt(-1));
Push(&temp);
}
// Before changing an element which is copied, adjust so that the
// first copy becomes the new backing store and all the other copies
// are updated. If the original was in memory, the new backing store
// is allocated to a register. Return a copy of the new backing store
// or an invalid element if the original was not a copy.
FrameElement VirtualFrame::AdjustCopies(int index) {
FrameElement original = elements_[index];
ASSERT(original.is_memory() || original.is_register());
// Go looking for a first copy above index.
int i = index + 1;
while (i < elements_.length()) {
FrameElement elt = elements_[i];
if (elt.is_copy() && elt.index() == index) break;
i++;
}
if (i < elements_.length()) {
// There was a first copy. Make it the new backing element.
Register backing_reg;
if (original.is_memory()) {
Result fresh = cgen_->allocator()->Allocate();
ASSERT(fresh.is_valid());
backing_reg = fresh.reg();
__ mov(backing_reg, Operand(ebp, fp_relative(index)));
} else {
// The original was in a register.
backing_reg = original.reg();
}
FrameElement new_backing_element =
FrameElement::RegisterElement(backing_reg, FrameElement::NOT_SYNCED);
if (elements_[i].is_synced()) {
new_backing_element.set_sync();
}
Use(backing_reg);
elements_[i] = new_backing_element;
// Update the other copies.
FrameElement copy = CopyElementAt(i);
for (int j = i; j < elements_.length(); j++) {
FrameElement elt = elements_[j];
if (elt.is_copy() && elt.index() == index) {
if (elt.is_synced()) {
copy.set_sync();
} else {
copy.clear_sync();
}
elements_[j] = copy;
}
}
copy.clear_sync();
return copy;
}
return FrameElement::InvalidElement();
}
void VirtualFrame::TakeFrameSlotAt(int index) {
ASSERT(index >= 0);
ASSERT(index <= elements_.length());
FrameElement original = elements_[index];
switch (original.type()) {
case FrameElement::INVALID:
UNREACHABLE();
break;
case FrameElement::MEMORY: {
// Allocate the element to a register. If it is not copied,
// push that register on top of the frame. If it is copied,
// make the first copy the backing store and push a fresh copy
// on top of the frame.
FrameElement copy = AdjustCopies(index);
if (copy.is_valid()) {
// The original element was a copy. Push the copy of the new
// backing store.
elements_.Add(copy);
} else {
// The element was not a copy. Move it to a register and push
// that.
Result fresh = cgen_->allocator()->Allocate();
ASSERT(fresh.is_valid());
FrameElement new_element =
FrameElement::RegisterElement(fresh.reg(),
FrameElement::NOT_SYNCED);
Use(fresh.reg());
elements_.Add(new_element);
__ mov(fresh.reg(), Operand(ebp, fp_relative(index)));
}
break;
}
case FrameElement::REGISTER: {
// If the element is not copied, push it on top of the frame.
// If it is copied, make the first copy be the new backing store
// and push a fresh copy on top of the frame.
FrameElement copy = AdjustCopies(index);
if (copy.is_valid()) {
// The original element was a copy. Push the copy of the new
// backing store.
elements_.Add(copy);
// This is the only case where we have to unuse the original
// register. The original is still counted and so is the new
// backing store of the copies.
Unuse(original.reg());
} else {
// The element was not a copy. Push it.
original.clear_sync();
elements_.Add(original);
}
break;
}
case FrameElement::CONSTANT:
original.clear_sync();
elements_.Add(original);
break;
case FrameElement::COPY:
original.clear_sync();
elements_.Add(original);
break;
}
elements_[index] = FrameElement::InvalidElement();
}
void VirtualFrame::StoreToFrameSlotAt(int index) {
// Store the value on top of the frame to the virtual frame slot at
// a given index. The value on top of the frame is left in place.
// This is a duplicating operation, so it can create copies.
ASSERT(index >= 0);
ASSERT(index < elements_.length());
FrameElement original = elements_[index];
// If the stored-to slot may be copied, adjust to preserve the
// copy-on-write semantics of copied elements.
if (original.is_register() || original.is_memory()) {
FrameElement ignored = AdjustCopies(index);
}
// If the stored-to slot is a register reference, deallocate it.
if (original.is_register()) {
Unuse(original.reg());
}
int top_index = elements_.length() - 1;
FrameElement top = elements_[top_index];
ASSERT(top.is_valid());
if (top.is_copy()) {
// There are two cases based on the relative positions of the
// stored-to slot and the backing slot of the top element.
int backing_index = top.index();
ASSERT(backing_index != index);
if (backing_index < index) {
// 1. The top element is a copy of a slot below the stored-to
// slot. The stored-to slot becomes an unsynced copy of that
// same backing slot.
elements_[index] = CopyElementAt(backing_index);
} else {
// 2. The top element is a copy of a slot above the stored-to
// slot. The stored-to slot becomes the new (unsynced) backing
// slot and both the top element and the element at the former
// backing slot become copies of it. The sync state of the top
// and former backing elements is preserved.
FrameElement backing_element = elements_[backing_index];
ASSERT(backing_element.is_memory() || backing_element.is_register());
if (backing_element.is_memory()) {
// Because sets of copies are canonicalized to be backed by
// their lowest frame element, and because memory frame
// elements are backed by the corresponding stack address, we
// have to move the actual value down in the stack.
//
// TODO(209): considering allocating the stored-to slot to the
// temp register. Alternatively, allow copies to appear in
// any order in the frame and lazily move the value down to
// the slot.
Result temp = cgen_->allocator()->Allocate();
ASSERT(temp.is_valid());
__ mov(temp.reg(), Operand(ebp, fp_relative(backing_index)));
__ mov(Operand(ebp, fp_relative(index)), temp.reg());
} else if (backing_element.is_synced()) {
// If the element is a register, we will not actually move
// anything on the stack but only update the virtual frame
// element.
backing_element.clear_sync();
}
elements_[index] = backing_element;
// The old backing element becomes a copy of the new backing
// element.
FrameElement new_element = CopyElementAt(index);
elements_[backing_index] = new_element;
if (backing_element.is_synced()) {
elements_[backing_index].set_sync();
}
// All the copies of the old backing element (including the top
// element) become copies of the new backing element.
for (int i = backing_index + 1; i < elements_.length(); i++) {
FrameElement current = elements_[i];
if (current.is_copy() && current.index() == backing_index) {
elements_[i] = new_element;
if (current.is_synced()) {
elements_[i].set_sync();
}
}
}
}
return;
}
// Move the top element to the stored-to slot and replace it (the
// top element) with a copy.
elements_[index] = top;
if (top.is_memory()) {
// TODO(209): consider allocating the stored-to slot to the temp
// register. Alternatively, allow copies to appear in any order
// in the frame and lazily move the value down to the slot.
FrameElement new_top = CopyElementAt(index);
new_top.set_sync();
elements_[top_index] = new_top;
// The sync state of the former top element is correct (synced).
// Emit code to move the value down in the frame.
Result temp = cgen_->allocator()->Allocate();
ASSERT(temp.is_valid());
__ mov(temp.reg(), Operand(esp, 0));
__ mov(Operand(ebp, fp_relative(index)), temp.reg());
} else if (top.is_register()) {
// The stored-to slot has the (unsynced) register reference and
// the top element becomes a copy. The sync state of the top is
// preserved.
FrameElement new_top = CopyElementAt(index);
if (top.is_synced()) {
new_top.set_sync();
elements_[index].clear_sync();
}
elements_[top_index] = new_top;
} else {
// The stored-to slot holds the same value as the top but
// unsynced. (We do not have copies of constants yet.)
ASSERT(top.is_constant());
elements_[index].clear_sync();
}
}
void VirtualFrame::PushTryHandler(HandlerType type) {
ASSERT(cgen_->HasValidEntryRegisters());
// Grow the expression stack by handler size less two (the return address
// is already pushed by a call instruction, and PushTryHandler from the
// macro assembler will leave the top of stack in the eax register to be
// pushed separately).
Adjust(kHandlerSize - 2);
__ PushTryHandler(IN_JAVASCRIPT, type);
// TODO(1222589): remove the reliance of PushTryHandler on a cached TOS
EmitPush(eax);
}
Result VirtualFrame::RawCallStub(CodeStub* stub, int frame_arg_count) {
ASSERT(cgen_->HasValidEntryRegisters());
__ CallStub(stub);
Result result = cgen_->allocator()->Allocate(eax);
ASSERT(result.is_valid());
return result;
}
Result VirtualFrame::CallRuntime(Runtime::Function* f,
int frame_arg_count) {
PrepareForCall(frame_arg_count, frame_arg_count);
ASSERT(cgen_->HasValidEntryRegisters());
__ CallRuntime(f, frame_arg_count);
Result result = cgen_->allocator()->Allocate(eax);
ASSERT(result.is_valid());
return result;
}
Result VirtualFrame::CallRuntime(Runtime::FunctionId id,
int frame_arg_count) {
PrepareForCall(frame_arg_count, frame_arg_count);
ASSERT(cgen_->HasValidEntryRegisters());
__ CallRuntime(id, frame_arg_count);
Result result = cgen_->allocator()->Allocate(eax);
ASSERT(result.is_valid());
return result;
}
Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
int frame_arg_count) {
PrepareForCall(frame_arg_count, frame_arg_count);
ASSERT(cgen_->HasValidEntryRegisters());
__ InvokeBuiltin(id, flag);
Result result = cgen_->allocator()->Allocate(eax);
ASSERT(result.is_valid());
return result;
}
Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode) {
ASSERT(cgen_->HasValidEntryRegisters());
__ call(code, rmode);
Result result = cgen_->allocator()->Allocate(eax);
ASSERT(result.is_valid());
return result;
}
Result VirtualFrame::CallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode,
Result* arg,
int dropped_args) {
int spilled_args = 0;
switch (code->kind()) {
case Code::CALL_IC:
ASSERT(arg->reg().is(eax));
spilled_args = dropped_args + 1;
break;
case Code::LOAD_IC:
ASSERT(arg->reg().is(ecx));
ASSERT(dropped_args == 0);
spilled_args = 1;
break;
case Code::KEYED_STORE_IC:
ASSERT(arg->reg().is(eax));
ASSERT(dropped_args == 0);
spilled_args = 2;
break;
default:
// No other types of code objects are called with values
// in exactly one register.
UNREACHABLE();
break;
}
PrepareForCall(spilled_args, dropped_args);
arg->Unuse();
return RawCallCodeObject(code, rmode);
}
Result VirtualFrame::CallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode,
Result* arg0,
Result* arg1,
int dropped_args) {
int spilled_args = 1;
switch (code->kind()) {
case Code::STORE_IC:
ASSERT(arg0->reg().is(eax));
ASSERT(arg1->reg().is(ecx));
ASSERT(dropped_args == 0);
spilled_args = 1;
break;
case Code::BUILTIN:
ASSERT(*code == Builtins::builtin(Builtins::JSConstructCall));
ASSERT(arg0->reg().is(eax));
ASSERT(arg1->reg().is(edi));
spilled_args = dropped_args + 1;
break;
default:
// No other types of code objects are called with values
// in exactly two registers.
UNREACHABLE();
break;
}
PrepareForCall(spilled_args, dropped_args);
arg0->Unuse();
arg1->Unuse();
return RawCallCodeObject(code, rmode);
}
void VirtualFrame::Drop(int count) {
ASSERT(height() >= count);
int num_virtual_elements = (elements_.length() - 1) - stack_pointer_;
// Emit code to lower the stack pointer if necessary.
if (num_virtual_elements < count) {
int num_dropped = count - num_virtual_elements;
stack_pointer_ -= num_dropped;
__ add(Operand(esp), Immediate(num_dropped * kPointerSize));
}
// Discard elements from the virtual frame and free any registers.
for (int i = 0; i < count; i++) {
FrameElement dropped = elements_.RemoveLast();
if (dropped.is_register()) {
Unuse(dropped.reg());
}
}
}
Result VirtualFrame::Pop() {
FrameElement element = elements_.RemoveLast();
int index = elements_.length();
ASSERT(element.is_valid());
bool pop_needed = (stack_pointer_ == index);
if (pop_needed) {
stack_pointer_--;
if (element.is_memory()) {
Result temp = cgen_->allocator()->Allocate();
ASSERT(temp.is_valid());
__ pop(temp.reg());
return temp;
}
__ add(Operand(esp), Immediate(kPointerSize));
}
ASSERT(!element.is_memory());
// The top element is a register, constant, or a copy. Unuse
// registers and follow copies to their backing store.
if (element.is_register()) {
Unuse(element.reg());
} else if (element.is_copy()) {
ASSERT(element.index() < index);
index = element.index();
element = elements_[index];
}
ASSERT(!element.is_copy());
// The element is memory, a register, or a constant.
if (element.is_memory()) {
// Memory elements could only be the backing store of a copy.
// Allocate the original to a register.
ASSERT(index <= stack_pointer_);
Result temp = cgen_->allocator()->Allocate();
ASSERT(temp.is_valid());
Use(temp.reg());
FrameElement new_element =
FrameElement::RegisterElement(temp.reg(), FrameElement::SYNCED);
elements_[index] = new_element;
__ mov(temp.reg(), Operand(ebp, fp_relative(index)));
return Result(temp.reg(), cgen_);
} else if (element.is_register()) {
return Result(element.reg(), cgen_);
} else {
ASSERT(element.is_constant());
return Result(element.handle(), cgen_);
}
}
void VirtualFrame::EmitPop(Register reg) {
ASSERT(stack_pointer_ == elements_.length() - 1);
stack_pointer_--;
elements_.RemoveLast();
__ pop(reg);
}
void VirtualFrame::EmitPop(Operand operand) {
ASSERT(stack_pointer_ == elements_.length() - 1);
stack_pointer_--;
elements_.RemoveLast();
__ pop(operand);
}
void VirtualFrame::EmitPush(Register reg) {
ASSERT(stack_pointer_ == elements_.length() - 1);
elements_.Add(FrameElement::MemoryElement());
stack_pointer_++;
__ push(reg);
}
void VirtualFrame::EmitPush(Operand operand) {
ASSERT(stack_pointer_ == elements_.length() - 1);
elements_.Add(FrameElement::MemoryElement());
stack_pointer_++;
__ push(operand);
}
void VirtualFrame::EmitPush(Immediate immediate) {
ASSERT(stack_pointer_ == elements_.length() - 1);
elements_.Add(FrameElement::MemoryElement());
stack_pointer_++;
__ push(immediate);
}
#undef __
} } // namespace v8::internal

452
src/virtual-frame-ia32.h Normal file
View File

@ -0,0 +1,452 @@
// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_VIRTUAL_FRAME_IA32_H_
#define V8_VIRTUAL_FRAME_IA32_H_
namespace v8 { namespace internal {
// -------------------------------------------------------------------------
// Virtual frames
//
// The virtual frame is an abstraction of the physical stack frame. It
// encapsulates the parameters, frame-allocated locals, and the expression
// stack. It supports push/pop operations on the expression stack, as well
// as random access to the expression stack elements, locals, and
// parameters.
class VirtualFrame : public Malloced {
public:
// A utility class to introduce a scope where the virtual frame is
// expected to remain spilled. The constructor spills the code
// generator's current frame, but no attempt is made to require it
// to stay spilled. It is intended as documentation while the code
// generator is being transformed.
class SpilledScope BASE_EMBEDDED {
public:
explicit SpilledScope(CodeGenerator* cgen);
~SpilledScope();
private:
CodeGenerator* cgen_;
bool previous_state_;
};
// Construct an initial virtual frame on entry to a JS function.
explicit VirtualFrame(CodeGenerator* cgen);
// Construct a virtual frame as a clone of an existing one.
explicit VirtualFrame(VirtualFrame* original);
// Create a duplicate of an existing valid frame element.
FrameElement CopyElementAt(int index);
// The height of the virtual expression stack.
int height() const {
return elements_.length() - expression_base_index();
}
int register_count(Register reg) {
return frame_registers_.count(reg);
}
// Add extra in-memory elements to the top of the frame to match an actual
// frame (eg, the frame after an exception handler is pushed). No code is
// emitted.
void Adjust(int count);
// Forget elements from the top of the frame to match an actual frame (eg,
// the frame after a runtime call). No code is emitted.
void Forget(int count);
// Spill all values from the frame to memory.
void SpillAll();
// Spill all occurrences of a specific register from the frame.
void Spill(Register reg);
// Spill all occurrences of an arbitrary register if possible. Return the
// register spilled or no_reg if it was not possible to free any register
// (ie, they all have frame-external references).
Register SpillAnyRegister();
// Prepare this virtual frame for merging to an expected frame by
// performing some state changes that do not require generating
// code. It is guaranteed that no code will be generated.
void PrepareMergeTo(VirtualFrame* expected);
// Make this virtual frame have a state identical to an expected virtual
// frame. As a side effect, code may be emitted to make this frame match
// the expected one.
void MergeTo(VirtualFrame* expected);
// Detach a frame from its code generator, perhaps temporarily. This
// tells the register allocator that it is free to use frame-internal
// registers. Used when the code generator's frame is switched from this
// one to NULL by an unconditional jump.
void DetachFromCodeGenerator();
// (Re)attach a frame to its code generator. This informs the register
// allocator that the frame-internal register references are active again.
// Used when a code generator's frame is switched from NULL to this one by
// binding a label.
void AttachToCodeGenerator();
// Emit code for the physical JS entry and exit frame sequences. After
// calling Enter, the virtual frame is ready for use; and after calling
// Exit it should not be used. Note that Enter does not allocate space in
// the physical frame for storing frame-allocated locals.
void Enter();
void Exit();
// Prepare for returning from the frame by spilling locals and
// dropping all non-locals elements in the virtual frame. This
// avoids generating unnecessary merge code when jumping to the
// shared return site. Emits code for spills.
void PrepareForReturn();
// Allocate and initialize the frame-allocated locals.
void AllocateStackSlots(int count);
// An element of the expression stack as an assembly operand.
Operand ElementAt(int index) const {
return Operand(esp, index * kPointerSize);
}
// Random-access store to a frame-top relative frame element. The result
// becomes owned by the frame and is invalidated.
void SetElementAt(int index, Result* value);
// Set a frame element to a constant. The index is frame-top relative.
void SetElementAt(int index, Handle<Object> value) {
Result temp(value, cgen_);
SetElementAt(index, &temp);
}
void PushElementAt(int index) {
PushFrameSlotAt(elements_.length() - index - 1);
}
// A frame-allocated local as an assembly operand.
Operand LocalAt(int index) const {
ASSERT(0 <= index);
ASSERT(index < local_count_);
return Operand(ebp, kLocal0Offset - index * kPointerSize);
}
// Push a copy of the value of a local frame slot on top of the frame.
void PushLocalAt(int index) {
PushFrameSlotAt(local0_index() + index);
}
// Push the value of a local frame slot on top of the frame and invalidate
// the local slot. The slot should be written to before trying to read
// from it again.
void TakeLocalAt(int index) {
TakeFrameSlotAt(local0_index() + index);
}
// Store the top value on the virtual frame into a local frame slot. The
// value is left in place on top of the frame.
void StoreToLocalAt(int index) {
StoreToFrameSlotAt(local0_index() + index);
}
// Push the address of the receiver slot on the frame.
void PushReceiverSlotAddress();
// Push the function on top of the frame.
void PushFunction() { PushFrameSlotAt(function_index()); }
// Save the value of the esi register to the context frame slot.
void SaveContextRegister();
// Restore the esi register from the value of the context frame
// slot.
void RestoreContextRegister();
// A parameter as an assembly operand.
Operand ParameterAt(int index) const {
ASSERT(-1 <= index); // -1 is the receiver.
ASSERT(index < parameter_count_);
return Operand(ebp, (1 + parameter_count_ - index) * kPointerSize);
}
// Push a copy of the value of a parameter frame slot on top of the frame.
void PushParameterAt(int index) {
PushFrameSlotAt(param0_index() + index);
}
// Push the value of a paramter frame slot on top of the frame and
// invalidate the parameter slot. The slot should be written to before
// trying to read from it again.
void TakeParameterAt(int index) {
TakeFrameSlotAt(param0_index() + index);
}
// Store the top value on the virtual frame into a parameter frame slot.
// The value is left in place on top of the frame.
void StoreToParameterAt(int index) {
StoreToFrameSlotAt(param0_index() + index);
}
// The receiver frame slot.
Operand Receiver() const { return ParameterAt(-1); }
// Push a try-catch or try-finally handler on top of the virtual frame.
void PushTryHandler(HandlerType type);
// Call a code stub, given the number of arguments it expects on (and
// removes from) the top of the physical frame.
Result CallStub(CodeStub* stub, int frame_arg_count);
Result CallStub(CodeStub* stub, Result* arg, int frame_arg_count);
Result CallStub(CodeStub* stub,
Result* arg0,
Result* arg1,
int frame_arg_count);
// Call the runtime, given the number of arguments expected on (and
// removed from) the top of the physical frame.
Result CallRuntime(Runtime::Function* f, int frame_arg_count);
Result CallRuntime(Runtime::FunctionId id, int frame_arg_count);
// Invoke a builtin, given the number of arguments it expects on (and
// removes from) the top of the physical frame.
Result InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
int frame_arg_count);
// Call into a JS code object, given the number of arguments it
// removes from the top of the physical frame.
// Register arguments are passed as results and consumed by the call.
Result CallCodeObject(Handle<Code> ic,
RelocInfo::Mode rmode,
int dropped_args);
Result CallCodeObject(Handle<Code> ic,
RelocInfo::Mode rmode,
Result* arg,
int dropped_args);
Result CallCodeObject(Handle<Code> ic,
RelocInfo::Mode rmode,
Result* arg0,
Result* arg1,
int dropped_args);
// Drop a number of elements from the top of the expression stack. May
// emit code to affect the physical frame. Does not clobber any registers
// excepting possibly the stack pointer.
void Drop(int count);
// Drop one element.
void Drop() { Drop(1); }
// Duplicate the top element of the frame.
void Dup() { PushFrameSlotAt(elements_.length() - 1); }
// Pop an element from the top of the expression stack. Returns a
// Result, which may be a constant or a register.
Result Pop();
// Pop and save an element from the top of the expression stack and
// emit a corresponding pop instruction.
void EmitPop(Register reg);
void EmitPop(Operand operand);
// Push an element on top of the expression stack and emit a
// corresponding push instruction.
void EmitPush(Register reg);
void EmitPush(Operand operand);
void EmitPush(Immediate immediate);
// Push an element on the virtual frame.
void Push(Register reg);
void Push(Handle<Object> value);
void Push(Smi* value) { Push(Handle<Object>(value)); }
// Pushing a result invalidates it (its contents become owned by the
// frame).
void Push(Result* result);
// Nip removes zero or more elements from immediately below the top
// of the frame, leaving the previous top-of-frame value on top of
// the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
void Nip(int num_dropped);
private:
// An illegal index into the virtual frame.
static const int kIllegalIndex = -1;
static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
static const int kContextOffset = StandardFrameConstants::kContextOffset;
static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
CodeGenerator* cgen_;
MacroAssembler* masm_;
List<FrameElement> elements_;
// The number of frame-allocated locals and parameters respectively.
int parameter_count_;
int local_count_;
// The index of the element that is at the processor's stack pointer
// (the esp register).
int stack_pointer_;
// The index of the element that is at the processor's frame pointer
// (the ebp register).
int frame_pointer_;
// The frame has an embedded register file that it uses to track registers
// used in the frame.
RegisterFile frame_registers_;
// The index of the first parameter. The receiver lies below the first
// parameter.
int param0_index() const { return 1; }
// The index of the context slot in the frame.
int context_index() const {
ASSERT(frame_pointer_ != kIllegalIndex);
return frame_pointer_ + 1;
}
// The index of the function slot in the frame. It lies above the context
// slot.
int function_index() const {
ASSERT(frame_pointer_ != kIllegalIndex);
return frame_pointer_ + 2;
}
// The index of the first local. Between the parameters and the locals
// lie the return address, the saved frame pointer, the context, and the
// function.
int local0_index() const {
ASSERT(frame_pointer_ != kIllegalIndex);
return frame_pointer_ + 3;
}
// The index of the base of the expression stack.
int expression_base_index() const { return local0_index() + local_count_; }
// Convert a frame index into a frame pointer relative offset into the
// actual stack.
int fp_relative(int index) const {
return (frame_pointer_ - index) * kPointerSize;
}
// Record an occurrence of a register in the virtual frame. This has the
// effect of incrementing both the register's frame-internal reference
// count and its external reference count.
void Use(Register reg);
// Record that a register reference has been dropped from the frame. This
// decrements both the register's internal and external reference counts.
void Unuse(Register reg);
// Spill the element at a particular index---write it to memory if
// necessary, free any associated register, and forget its value if
// constant.
void SpillElementAt(int index);
// Sync the element at a particular index. If it is a register or
// constant that disagrees with the value on the stack, write it to memory.
// Keep the element type as register or constant, and clear the dirty bit.
void SyncElementAt(int index);
// Sync the range of elements in [begin, end).
void SyncRange(int begin, int end);
// Sync a single element, assuming that its index is less than
// or equal to stack pointer + 1.
void RawSyncElementAt(int index);
// Push a copy of a frame slot (typically a local or parameter) on top of
// the frame.
void PushFrameSlotAt(int index);
// Push a the value of a frame slot (typically a local or parameter) on
// top of the frame and invalidate the slot.
void TakeFrameSlotAt(int index);
// Store the value on top of the frame to a frame slot (typically a local
// or parameter).
void StoreToFrameSlotAt(int index);
// Spill all elements in registers. Spill the top spilled_args elements
// on the frame. Sync all other frame elements.
// Then drop dropped_args elements from the virtual frame, to match
// the effect of an upcoming call that will drop them from the stack.
void PrepareForCall(int spilled_args, int dropped_args);
// Move frame elements currently in registers or constants, that
// should be in memory in the expected frame, to memory.
void MergeMoveRegistersToMemory(VirtualFrame* expected);
// Make the register-to-register moves necessary to
// merge this frame with the expected frame.
// Register to memory moves must already have been made,
// and memory to register moves must follow this call.
// This is because some new memory-to-register moves are
// created in order to break cycles of register moves.
// Used in the implementation of MergeTo().
void MergeMoveRegistersToRegisters(VirtualFrame* expected);
// Make the memory-to-register and constant-to-register moves
// needed to make this frame equal the expected frame.
// Called after all register-to-memory and register-to-register
// moves have been made. After this function returns, the frames
// should be equal.
void MergeMoveMemoryToRegisters(VirtualFrame* expected);
// Helper function to implement the copy-on-write semantics of an
// element's copies just before writing to the element. The copies
// are updated, but the element is not changed. A copy of the new
// backing store of all the copies is returned if there were any
// copies and in invalid frame element is returned if there were no
// copies.
FrameElement AdjustCopies(int index);
// Call a code stub that has already been prepared for calling (via
// PrepareForCall).
Result RawCallStub(CodeStub* stub, int frame_arg_count);
// Calls a code object which has already been prepared for calling
// (via PrepareForCall).
Result RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
bool Equals(VirtualFrame* other);
friend class JumpTarget;
};
} } // namespace v8::internal
#endif // V8_VIRTUAL_FRAME_IA32_H_

542
src/virtual-frame.cc Normal file
View File

@ -0,0 +1,542 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen.h"
#include "codegen-inl.h"
#include "virtual-frame.h"
namespace v8 { namespace internal {
// -------------------------------------------------------------------------
// VirtualFrame implementation.
VirtualFrame::SpilledScope::SpilledScope(CodeGenerator* cgen)
: cgen_(cgen),
previous_state_(cgen->in_spilled_code()) {
ASSERT(cgen->has_valid_frame());
cgen->frame()->SpillAll();
cgen->set_in_spilled_code(true);
}
VirtualFrame::SpilledScope::~SpilledScope() {
cgen_->set_in_spilled_code(previous_state_);
}
// When cloned, a frame is a deep copy of the original.
VirtualFrame::VirtualFrame(VirtualFrame* original)
: cgen_(original->cgen_),
masm_(original->masm_),
elements_(original->elements_.length()),
parameter_count_(original->parameter_count_),
local_count_(original->local_count_),
stack_pointer_(original->stack_pointer_),
frame_pointer_(original->frame_pointer_),
frame_registers_(original->frame_registers_) {
// Copy all the elements from the original.
for (int i = 0; i < original->elements_.length(); i++) {
elements_.Add(original->elements_[i]);
}
}
FrameElement VirtualFrame::CopyElementAt(int index) {
ASSERT(index >= 0);
ASSERT(index < elements_.length());
FrameElement target = elements_[index];
FrameElement result;
switch (target.type()) {
case FrameElement::CONSTANT:
// We do not copy constants and instead return a fresh unsynced
// constant.
result = FrameElement::ConstantElement(target.handle(),
FrameElement::NOT_SYNCED);
break;
case FrameElement::COPY:
// We do not allow copies of copies, so we follow one link to
// the actual backing store of a copy before making a copy.
index = target.index();
ASSERT(elements_[index].is_memory() || elements_[index].is_register());
// Fall through.
case FrameElement::MEMORY: // Fall through.
case FrameElement::REGISTER:
// All copies are backed by memory or register locations.
result.type_ =
FrameElement::TypeField::encode(FrameElement::COPY) |
FrameElement::SyncField::encode(FrameElement::NOT_SYNCED);
result.data_.index_ = index;
break;
case FrameElement::INVALID:
// We should not try to copy invalid elements.
UNREACHABLE();
break;
}
return result;
}
// Modify the state of the virtual frame to match the actual frame by adding
// extra in-memory elements to the top of the virtual frame. The extra
// elements will be externally materialized on the actual frame (eg, by
// pushing an exception handler). No code is emitted.
void VirtualFrame::Adjust(int count) {
ASSERT(count >= 0);
ASSERT(stack_pointer_ == elements_.length() - 1);
for (int i = 0; i < count; i++) {
elements_.Add(FrameElement::MemoryElement());
}
stack_pointer_ += count;
}
// Modify the state of the virtual frame to match the actual frame by
// removing elements from the top of the virtual frame. The elements will
// be externally popped from the actual frame (eg, by a runtime call). No
// code is emitted.
void VirtualFrame::Forget(int count) {
ASSERT(count >= 0);
ASSERT(stack_pointer_ == elements_.length() - 1);
ASSERT(elements_.length() >= count);
stack_pointer_ -= count;
for (int i = 0; i < count; i++) {
FrameElement last = elements_.RemoveLast();
if (last.is_register()) {
Unuse(last.reg());
}
}
}
void VirtualFrame::Use(Register reg) {
frame_registers_.Use(reg);
cgen_->allocator()->Use(reg);
}
void VirtualFrame::Unuse(Register reg) {
frame_registers_.Unuse(reg);
cgen_->allocator()->Unuse(reg);
}
void VirtualFrame::Spill(Register target) {
if (!frame_registers_.is_used(target)) return;
for (int i = 0; i < elements_.length(); i++) {
if (elements_[i].is_register() && elements_[i].reg().is(target)) {
SpillElementAt(i);
}
}
}
// Spill any register if possible, making its external reference count zero.
Register VirtualFrame::SpillAnyRegister() {
// Find the leftmost (ordered by register code), least
// internally-referenced register whose internal reference count matches
// its external reference count (so that spilling it from the frame frees
// it for use).
int min_count = kMaxInt;
int best_register_code = no_reg.code_;
for (int i = 0; i < kNumRegisters; i++) {
int count = frame_registers_.count(i);
if (count < min_count && count == cgen_->allocator()->count(i)) {
min_count = count;
best_register_code = i;
}
}
Register result = { best_register_code };
if (result.is_valid()) {
Spill(result);
ASSERT(!cgen_->allocator()->is_used(result));
}
return result;
}
// Make the type of the element at a given index be MEMORY.
void VirtualFrame::SpillElementAt(int index) {
if (!elements_[index].is_valid()) return;
SyncElementAt(index);
if (elements_[index].is_register()) {
Unuse(elements_[index].reg());
}
// The element is now in memory.
elements_[index] = FrameElement::MemoryElement();
}
// Clear the dirty bits for the range of elements in [begin, end).
void VirtualFrame::SyncRange(int begin, int end) {
ASSERT(begin >= 0);
ASSERT(end <= elements_.length());
for (int i = begin; i < end; i++) {
RawSyncElementAt(i);
}
}
// Clear the dirty bit for the element at a given index.
void VirtualFrame::SyncElementAt(int index) {
if (index > stack_pointer_ + 1) {
SyncRange(stack_pointer_ + 1, index);
}
RawSyncElementAt(index);
}
// Make the type of all elements be MEMORY.
void VirtualFrame::SpillAll() {
for (int i = 0; i < elements_.length(); i++) {
SpillElementAt(i);
}
}
void VirtualFrame::PrepareMergeTo(VirtualFrame* expected) {
// No code needs to be generated to invalidate valid elements. No
// code needs to be generated to move values to memory if they are
// already synced.
for (int i = 0; i < elements_.length(); i++) {
FrameElement source = elements_[i];
FrameElement target = expected->elements_[i];
if (!target.is_valid() ||
(target.is_memory() && !source.is_memory() && source.is_synced())) {
if (source.is_register()) {
// If the frame is the code generator's current frame, we have
// to decrement both the frame-internal and global register
// counts.
if (cgen_->frame() == this) {
Unuse(source.reg());
} else {
frame_registers_.Unuse(source.reg());
}
}
elements_[i] = target;
}
}
}
void VirtualFrame::PrepareForCall(int spilled_args, int dropped_args) {
ASSERT(height() >= dropped_args);
ASSERT(height() >= spilled_args);
ASSERT(dropped_args <= spilled_args);
int arg_base_index = elements_.length() - spilled_args;
// Spill the arguments. We spill from the top down so that the
// backing stores of register copies will be spilled only after all
// the copies are spilled---it is better to spill via a
// register-to-memory move than a memory-to-memory move.
for (int i = elements_.length() - 1; i >= arg_base_index; i--) {
SpillElementAt(i);
}
// Below the arguments, spill registers and sync everything else.
// Syncing is necessary for the locals and parameters to give the
// debugger a consistent view of the frame.
for (int i = arg_base_index - 1; i >= 0; i--) {
FrameElement element = elements_[i];
if (element.is_register()) {
SpillElementAt(i);
} else if (element.is_valid()) {
SyncElementAt(i);
}
}
// Forget the frame elements that will be popped by the call.
Forget(dropped_args);
}
void VirtualFrame::DetachFromCodeGenerator() {
// Tell the global register allocator that it is free to reallocate all
// register references contained in this frame. The frame elements remain
// register references, so the frame-internal reference count is not
// decremented.
for (int i = 0; i < elements_.length(); i++) {
if (elements_[i].is_register()) {
cgen_->allocator()->Unuse(elements_[i].reg());
}
}
}
void VirtualFrame::AttachToCodeGenerator() {
// Tell the global register allocator that the frame-internal register
// references are live again.
for (int i = 0; i < elements_.length(); i++) {
if (elements_[i].is_register()) {
cgen_->allocator()->Use(elements_[i].reg());
}
}
}
void VirtualFrame::PrepareForReturn() {
// Spill all locals. This is necessary to make sure all locals have
// the right value when breaking at the return site in the debugger.
for (int i = 0; i < expression_base_index(); i++) SpillElementAt(i);
// Drop all non-local stack elements.
Drop(height());
// Validate state: The expression stack should be empty and the
// stack pointer should have been updated to reflect this.
ASSERT(height() == 0);
ASSERT(stack_pointer_ == expression_base_index() - 1);
}
void VirtualFrame::SetElementAt(int index, Result* value) {
int frame_index = elements_.length() - index - 1;
ASSERT(frame_index >= 0);
ASSERT(frame_index < elements_.length());
ASSERT(value->is_valid());
FrameElement original = elements_[frame_index];
// Early exit if the element is the same as the one being set.
bool same_register = original.is_register()
&& value->is_register()
&& original.reg().is(value->reg());
bool same_constant = original.is_constant()
&& value->is_constant()
&& original.handle().is_identical_to(value->handle());
if (same_register || same_constant) {
value->Unuse();
return;
}
// If the original may be a copy, adjust to preserve the copy-on-write
// semantics of copied elements.
if (original.is_register() || original.is_memory()) {
FrameElement ignored = AdjustCopies(frame_index);
}
// If the original is a register reference, deallocate it.
if (original.is_register()) {
Unuse(original.reg());
}
FrameElement new_element;
if (value->is_register()) {
// There are two cases depending no whether the register already
// occurs in the frame or not.
if (register_count(value->reg()) == 0) {
Use(value->reg());
elements_[frame_index] =
FrameElement::RegisterElement(value->reg(),
FrameElement::NOT_SYNCED);
} else {
for (int i = 0; i < elements_.length(); i++) {
FrameElement element = elements_[i];
if (element.is_register() && element.reg().is(value->reg())) {
// The register backing store is lower in the frame than its
// copy.
if (i < frame_index) {
elements_[frame_index] = CopyElementAt(i);
} else {
// There was an early bailout for the case of setting a
// register element to itself.
ASSERT(i != frame_index);
element.clear_sync();
elements_[frame_index] = element;
elements_[i] = CopyElementAt(frame_index);
}
// Exit the loop once the appropriate copy is inserted.
break;
}
}
}
} else {
ASSERT(value->is_constant());
elements_[frame_index] =
FrameElement::ConstantElement(value->handle(),
FrameElement::NOT_SYNCED);
}
value->Unuse();
}
void VirtualFrame::PushFrameSlotAt(int index) {
FrameElement new_element = CopyElementAt(index);
elements_.Add(new_element);
}
Result VirtualFrame::CallStub(CodeStub* stub, int frame_arg_count) {
PrepareForCall(frame_arg_count, frame_arg_count);
return RawCallStub(stub, frame_arg_count);
}
Result VirtualFrame::CallStub(CodeStub* stub,
Result* arg,
int frame_arg_count) {
PrepareForCall(frame_arg_count, frame_arg_count);
arg->Unuse();
return RawCallStub(stub, frame_arg_count);
}
Result VirtualFrame::CallStub(CodeStub* stub,
Result* arg0,
Result* arg1,
int frame_arg_count) {
PrepareForCall(frame_arg_count, frame_arg_count);
arg0->Unuse();
arg1->Unuse();
return RawCallStub(stub, frame_arg_count);
}
Result VirtualFrame::CallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode,
int dropped_args) {
int spilled_args = 0;
switch (code->kind()) {
case Code::CALL_IC:
spilled_args = dropped_args + 1;
break;
case Code::FUNCTION:
spilled_args = dropped_args + 1;
break;
case Code::KEYED_LOAD_IC:
ASSERT(dropped_args == 0);
spilled_args = 2;
break;
default:
// The other types of code objects are called with values
// in specific registers, and are handled in functions with
// a different signature.
UNREACHABLE();
break;
}
PrepareForCall(spilled_args, dropped_args);
return RawCallCodeObject(code, rmode);
}
void VirtualFrame::Push(Register reg) {
FrameElement new_element;
if (register_count(reg) == 0) {
Use(reg);
new_element =
FrameElement::RegisterElement(reg, FrameElement::NOT_SYNCED);
} else {
for (int i = 0; i < elements_.length(); i++) {
FrameElement element = elements_[i];
if (element.is_register() && element.reg().is(reg)) {
new_element = CopyElementAt(i);
break;
}
}
}
elements_.Add(new_element);
}
void VirtualFrame::Push(Handle<Object> value) {
elements_.Add(FrameElement::ConstantElement(value,
FrameElement::NOT_SYNCED));
}
void VirtualFrame::Push(Result* result) {
if (result->is_register()) {
Push(result->reg());
} else {
ASSERT(result->is_constant());
Push(result->handle());
}
result->Unuse();
}
void VirtualFrame::Nip(int num_dropped) {
ASSERT(num_dropped >= 0);
if (num_dropped == 0) return;
Result tos = Pop();
if (num_dropped > 1) {
Drop(num_dropped - 1);
}
SetElementAt(0, &tos);
}
bool FrameElement::Equals(FrameElement other) {
if (type() != other.type()) return false;
if (is_synced() != other.is_synced()) return false;
if (is_register()) {
if (!reg().is(other.reg())) return false;
} else if (is_constant()) {
if (!handle().is_identical_to(other.handle())) return false;
} else if (is_copy()) {
if (index() != other.index()) return false;
}
return true;
}
bool VirtualFrame::Equals(VirtualFrame* other) {
if (cgen_ != other->cgen_) return false;
if (masm_ != other->masm_) return false;
if (elements_.length() != other->elements_.length()) return false;
for (int i = 0; i < elements_.length(); i++) {
if (!elements_[i].Equals(other->elements_[i])) return false;
}
if (parameter_count_ != other->parameter_count_) return false;
if (local_count_ != other->local_count_) return false;
if (stack_pointer_ != other->stack_pointer_) return false;
if (frame_pointer_ != other->frame_pointer_) return false;
for (int i = 0; i < kNumRegisters; i++) {
if (frame_registers_.count(i) != other->frame_registers_.count(i)) {
return false;
}
}
return true;
}
} } // namespace v8::internal

167
src/virtual-frame.h Normal file
View File

@ -0,0 +1,167 @@
// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_VIRTUAL_FRAME_H_
#define V8_VIRTUAL_FRAME_H_
#include "macro-assembler.h"
#include "register-allocator.h"
namespace v8 { namespace internal {
// -------------------------------------------------------------------------
// Virtual frame elements
//
// The internal elements of the virtual frames. There are several kinds of
// elements:
// * Invalid: elements that are uninitialized or not actually part
// of the virtual frame. They should not be read.
// * Memory: an element that resides in the actual frame. Its address is
// given by its position in the virtual frame.
// * Register: an element that resides in a register.
// * Constant: an element whose value is known at compile time.
class FrameElement BASE_EMBEDDED {
public:
enum SyncFlag {
SYNCED,
NOT_SYNCED
};
// The default constructor creates an invalid frame element.
FrameElement() {
type_ = TypeField::encode(INVALID) | SyncField::encode(NOT_SYNCED);
data_.reg_ = no_reg;
}
// Factory function to construct an invalid frame element.
static FrameElement InvalidElement() {
FrameElement result;
return result;
}
// Factory function to construct an in-memory frame element.
static FrameElement MemoryElement() {
FrameElement result;
result.type_ = TypeField::encode(MEMORY) | SyncField::encode(SYNCED);
// In-memory elements have no useful data.
result.data_.reg_ = no_reg;
return result;
}
// Factory function to construct an in-register frame element.
static FrameElement RegisterElement(Register reg, SyncFlag is_synced) {
FrameElement result;
result.type_ = TypeField::encode(REGISTER) | SyncField::encode(is_synced);
result.data_.reg_ = reg;
return result;
}
// Factory function to construct a frame element whose value is known at
// compile time.
static FrameElement ConstantElement(Handle<Object> value,
SyncFlag is_synced) {
FrameElement result;
result.type_ = TypeField::encode(CONSTANT) | SyncField::encode(is_synced);
result.data_.handle_ = value.location();
return result;
}
bool is_synced() const { return SyncField::decode(type_) == SYNCED; }
void set_sync() {
ASSERT(type() != MEMORY);
type_ = (type_ & ~SyncField::mask()) | SyncField::encode(SYNCED);
}
void clear_sync() {
ASSERT(type() != MEMORY);
type_ = (type_ & ~SyncField::mask()) | SyncField::encode(NOT_SYNCED);
}
bool is_valid() const { return type() != INVALID; }
bool is_memory() const { return type() == MEMORY; }
bool is_register() const { return type() == REGISTER; }
bool is_constant() const { return type() == CONSTANT; }
bool is_copy() const { return type() == COPY; }
Register reg() const {
ASSERT(is_register());
return data_.reg_;
}
Handle<Object> handle() const {
ASSERT(is_constant());
return Handle<Object>(data_.handle_);
}
int index() const {
ASSERT(is_copy());
return data_.index_;
}
bool Equals(FrameElement other);
private:
enum Type {
INVALID,
MEMORY,
REGISTER,
CONSTANT,
COPY
};
// BitField is <type, shift, size>.
class SyncField : public BitField<SyncFlag, 0, 1> {};
class TypeField : public BitField<Type, 1, 32 - 1> {};
Type type() const { return TypeField::decode(type_); }
// The element's type and a dirty bit. The dirty bit can be cleared
// for non-memory elements to indicate that the element agrees with
// the value in memory in the actual frame.
int type_;
union {
Register reg_;
Object** handle_;
int index_;
} data_;
friend class VirtualFrame;
};
} } // namespace v8::internal
#ifdef ARM
#include "virtual-frame-arm.h"
#else // ia32
#include "virtual-frame-ia32.h"
#endif
#endif // V8_VIRTUAL_FRAME_H_

View File

@ -0,0 +1,121 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Test comparison operations that involve one or two constant smis.
function test() {
var i = 5;
var j = 3;
assertTrue( j < i );
i = 5; j = 3;
assertTrue( j <= i );
i = 5; j = 3;
assertTrue( i > j );
i = 5; j = 3;
assertTrue( i >= j );
i = 5; j = 3;
assertTrue( i != j );
i = 5; j = 3;
assertTrue( i == i );
i = 5; j = 3;
assertFalse( i < j );
i = 5; j = 3;
assertFalse( i <= j );
i = 5; j = 3;
assertFalse( j > i );
i = 5; j = 3;
assertFalse(j >= i );
i = 5; j = 3;
assertFalse( j == i);
i = 5; j = 3;
assertFalse( i != i);
i = 10 * 10;
while ( i < 107 ) {
++i;
}
j = 21;
assertTrue( j < i );
j = 21;
assertTrue( j <= i );
j = 21;
assertTrue( i > j );
j = 21;
assertTrue( i >= j );
j = 21;
assertTrue( i != j );
j = 21;
assertTrue( i == i );
j = 21;
assertFalse( i < j );
j = 21;
assertFalse( i <= j );
j = 21;
assertFalse( j > i );
j = 21;
assertFalse(j >= i );
j = 21;
assertFalse( j == i);
j = 21;
assertFalse( i != i);
j = 21;
assertTrue( j == j );
j = 21;
assertFalse( j != j );
assertTrue( 100 > 99 );
assertTrue( 101 >= 90 );
assertTrue( 11111 > -234 );
assertTrue( -888 <= -20 );
while ( 234 > 456 ) {
i = i + 1;
}
switch(3) {
case 5:
assertUnreachable();
break;
case 3:
j = 13;
default:
i = 2;
case 7:
j = 17;
break;
case 9:
j = 19;
assertUnreachable();
break;
}
assertEquals(17, j, "switch with constant value");
}
test();

View File

@ -64,33 +64,33 @@ function testRequest(dcp, arguments, success, result) {
function listener(event, exec_state, event_data, data) {
try {
if (event == Debug.DebugEvent.Break) {
// Get the debug command processor.
var dcp = exec_state.debugCommandProcessor();
if (event == Debug.DebugEvent.Break) {
// Get the debug command processor.
var dcp = exec_state.debugCommandProcessor();
// Test some illegal evaluate requests.
testRequest(dcp, void 0, false);
testRequest(dcp, '{"expression":"1","global"=true}', false);
testRequest(dcp, '{"expression":"a","frame":4}', false);
// Test some illegal evaluate requests.
testRequest(dcp, void 0, false);
testRequest(dcp, '{"expression":"1","global"=true}', false);
testRequest(dcp, '{"expression":"a","frame":4}', false);
// Test some legal evaluate requests.
testRequest(dcp, '{"expression":"1+2"}', true, 3);
testRequest(dcp, '{"expression":"a+2"}', true, 5);
testRequest(dcp, '{"expression":"({\\"a\\":1,\\"b\\":2}).b+2"}', true, 4);
// Test some legal evaluate requests.
testRequest(dcp, '{"expression":"1+2"}', true, 3);
testRequest(dcp, '{"expression":"a+2"}', true, 5);
testRequest(dcp, '{"expression":"({\\"a\\":1,\\"b\\":2}).b+2"}', true, 4);
// Test evaluation of a in the stack frames and the global context.
testRequest(dcp, '{"expression":"a"}', true, 3);
testRequest(dcp, '{"expression":"a","frame":0}', true, 3);
testRequest(dcp, '{"expression":"a","frame":1}', true, 2);
testRequest(dcp, '{"expression":"a","frame":2}', true, 1);
testRequest(dcp, '{"expression":"a","global":true}', true, 1);
testRequest(dcp, '{"expression":"this.a","global":true}', true, 1);
// Test evaluation of a in the stack frames and the global context.
testRequest(dcp, '{"expression":"a"}', true, 3);
testRequest(dcp, '{"expression":"a","frame":0}', true, 3);
testRequest(dcp, '{"expression":"a","frame":1}', true, 2);
testRequest(dcp, '{"expression":"a","frame":2}', true, 1);
testRequest(dcp, '{"expression":"a","global":true}', true, 1);
testRequest(dcp, '{"expression":"this.a","global":true}', true, 1);
// Indicate that all was processed.
listenerComplete = true;
}
// Indicate that all was processed.
listenerComplete = true;
}
} catch (e) {
exception = e
exception = e
};
};

View File

@ -36,8 +36,7 @@ Debug = debug.Debug
var bp1, bp2;
function listener(event, exec_state, event_data, data) {
if (event == Debug.DebugEvent.Break)
{
if (event == Debug.DebugEvent.Break) {
if (state == 0) {
exec_state.prepareStep(Debug.StepAction.StepIn, 1000);
state = 1;
@ -68,7 +67,6 @@ bp1 = Debug.setBreakPoint(f, 1);
state = 0;
result = -1;
f();
print(state);
assertEquals(499, result);
// Check that performing 1000 steps with a break point on the statement in the

View File

@ -0,0 +1,62 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
function F() {
for (var x in [1,2,3]) {
return 42;
}
return 87;
}
function G() {
for (var x in [1,2,3]) {
try {
return 42;
} finally {
// Do nothing.
}
}
return 87;
}
function H() {
for (var x in [1,2,3]) {
try {
return 42;
} catch (e) {
// Do nothing.
}
}
return 87;
}
assertEquals(42, F());
assertEquals(42, G());
assertEquals(42, H());

View File

@ -0,0 +1,46 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Test some code paths through the compiler for short-circuited
// boolean expressions.
function andTest0() {
var a = 0;
// Left subexpression is known false at compile time.
return a != 0 && "failure";
}
assertFalse(andTest0());
function orTest0() {
var a = 0;
// Left subexpression is known true at compile time.
return a == 0 || "failure";
}
assertTrue(orTest0());

View File

@ -25,6 +25,18 @@
/* End PBXAggregateTarget section */
/* Begin PBXBuildFile section */
58950D5E0F55519800F3E8BA /* jump-target.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D500F55514900F3E8BA /* jump-target.cc */; };
58950D5F0F55519D00F3E8BA /* jump-target-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D4F0F55514900F3E8BA /* jump-target-ia32.cc */; };
58950D600F5551A300F3E8BA /* jump-target.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D500F55514900F3E8BA /* jump-target.cc */; };
58950D610F5551A400F3E8BA /* jump-target-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D4E0F55514900F3E8BA /* jump-target-arm.cc */; };
58950D620F5551AF00F3E8BA /* register-allocator-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D530F55514900F3E8BA /* register-allocator-ia32.cc */; };
58950D630F5551AF00F3E8BA /* register-allocator.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D540F55514900F3E8BA /* register-allocator.cc */; };
58950D640F5551B500F3E8BA /* register-allocator.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D540F55514900F3E8BA /* register-allocator.cc */; };
58950D650F5551B600F3E8BA /* register-allocator-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D520F55514900F3E8BA /* register-allocator-arm.cc */; };
58950D660F5551C200F3E8BA /* virtual-frame.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D5A0F55514900F3E8BA /* virtual-frame.cc */; };
58950D670F5551C400F3E8BA /* virtual-frame-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D580F55514900F3E8BA /* virtual-frame-ia32.cc */; };
58950D680F5551CB00F3E8BA /* virtual-frame.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D5A0F55514900F3E8BA /* virtual-frame.cc */; };
58950D690F5551CE00F3E8BA /* virtual-frame-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D560F55514900F3E8BA /* virtual-frame-arm.cc */; };
8900116C0E71CA2300F91F35 /* libraries.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8900116B0E71CA2300F91F35 /* libraries.cc */; };
890A13FE0EE9C47F00E49346 /* interpreter-irregexp.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C660EE4665300B48DEB /* interpreter-irregexp.cc */; };
890A14010EE9C4B000E49346 /* regexp-macro-assembler-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C700EE466D000B48DEB /* regexp-macro-assembler-arm.cc */; };
@ -248,6 +260,20 @@
/* End PBXContainerItemProxy section */
/* Begin PBXFileReference section */
58950D4E0F55514900F3E8BA /* jump-target-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "jump-target-arm.cc"; sourceTree = "<group>"; };
58950D4F0F55514900F3E8BA /* jump-target-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "jump-target-ia32.cc"; sourceTree = "<group>"; };
58950D500F55514900F3E8BA /* jump-target.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "jump-target.cc"; sourceTree = "<group>"; };
58950D510F55514900F3E8BA /* jump-target.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "jump-target.h"; sourceTree = "<group>"; };
58950D520F55514900F3E8BA /* register-allocator-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "register-allocator-arm.cc"; sourceTree = "<group>"; };
58950D530F55514900F3E8BA /* register-allocator-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "register-allocator-ia32.cc"; sourceTree = "<group>"; };
58950D540F55514900F3E8BA /* register-allocator.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "register-allocator.cc"; sourceTree = "<group>"; };
58950D550F55514900F3E8BA /* register-allocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "register-allocator.h"; sourceTree = "<group>"; };
58950D560F55514900F3E8BA /* virtual-frame-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "virtual-frame-arm.cc"; sourceTree = "<group>"; };
58950D570F55514900F3E8BA /* virtual-frame-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "virtual-frame-arm.h"; sourceTree = "<group>"; };
58950D580F55514900F3E8BA /* virtual-frame-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "virtual-frame-ia32.cc"; sourceTree = "<group>"; };
58950D590F55514900F3E8BA /* virtual-frame-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "virtual-frame-ia32.h"; sourceTree = "<group>"; };
58950D5A0F55514900F3E8BA /* virtual-frame.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "virtual-frame.cc"; sourceTree = "<group>"; };
58950D5B0F55514900F3E8BA /* virtual-frame.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "virtual-frame.h"; sourceTree = "<group>"; };
8900116B0E71CA2300F91F35 /* libraries.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = libraries.cc; sourceTree = "<group>"; };
893986D40F29020C007D5254 /* apiutils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = apiutils.h; sourceTree = "<group>"; };
8939880B0F2A35FA007D5254 /* v8_shell */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = v8_shell; sourceTree = BUILT_PRODUCTS_DIR; };
@ -667,6 +693,10 @@
89A15C680EE4665300B48DEB /* jsregexp-inl.h */,
897FF14E0E719B8F00D62E90 /* jsregexp.cc */,
897FF14F0E719B8F00D62E90 /* jsregexp.h */,
58950D4E0F55514900F3E8BA /* jump-target-arm.cc */,
58950D4F0F55514900F3E8BA /* jump-target-ia32.cc */,
58950D500F55514900F3E8BA /* jump-target.cc */,
58950D510F55514900F3E8BA /* jump-target.h */,
897FF1500E719B8F00D62E90 /* list-inl.h */,
897FF1510E719B8F00D62E90 /* list.h */,
897FF1520E719B8F00D62E90 /* log.cc */,
@ -712,6 +742,10 @@
89A15C7A0EE466D000B48DEB /* regexp-macro-assembler.h */,
8944AD0E0F1D4D3A0028D560 /* regexp-stack.cc */,
8944AD0F0F1D4D3A0028D560 /* regexp-stack.h */,
58950D520F55514900F3E8BA /* register-allocator-arm.cc */,
58950D530F55514900F3E8BA /* register-allocator-ia32.cc */,
58950D540F55514900F3E8BA /* register-allocator.cc */,
58950D550F55514900F3E8BA /* register-allocator.h */,
897FF16F0E719B8F00D62E90 /* rewriter.cc */,
897FF1700E719B8F00D62E90 /* rewriter.h */,
897FF1710E719B8F00D62E90 /* runtime.cc */,
@ -762,6 +796,12 @@
897FF19E0E719B8F00D62E90 /* v8threads.h */,
897FF19F0E719B8F00D62E90 /* variables.cc */,
897FF1A00E719B8F00D62E90 /* variables.h */,
58950D560F55514900F3E8BA /* virtual-frame-arm.cc */,
58950D570F55514900F3E8BA /* virtual-frame-arm.h */,
58950D580F55514900F3E8BA /* virtual-frame-ia32.cc */,
58950D590F55514900F3E8BA /* virtual-frame-ia32.h */,
58950D5A0F55514900F3E8BA /* virtual-frame.cc */,
58950D5B0F55514900F3E8BA /* virtual-frame.h */,
897FF1A10E719B8F00D62E90 /* zone-inl.h */,
897FF1A20E719B8F00D62E90 /* zone.cc */,
897FF1A30E719B8F00D62E90 /* zone.h */,
@ -1056,6 +1096,8 @@
89A88E0D0E71A66E0043BA31 /* ic.cc in Sources */,
89A15C850EE4678B00B48DEB /* interpreter-irregexp.cc in Sources */,
89A88E0E0E71A66F0043BA31 /* jsregexp.cc in Sources */,
58950D5E0F55519800F3E8BA /* jump-target.cc in Sources */,
58950D5F0F55519D00F3E8BA /* jump-target-ia32.cc in Sources */,
8900116C0E71CA2300F91F35 /* libraries.cc in Sources */,
89A88E0F0E71A6740043BA31 /* log.cc in Sources */,
89A88E100E71A6770043BA31 /* macro-assembler-ia32.cc in Sources */,
@ -1072,6 +1114,8 @@
89A15C8A0EE467D100B48DEB /* regexp-macro-assembler-tracer.cc in Sources */,
89A15C810EE4674900B48DEB /* regexp-macro-assembler.cc in Sources */,
8944AD100F1D4D500028D560 /* regexp-stack.cc in Sources */,
58950D620F5551AF00F3E8BA /* register-allocator-ia32.cc in Sources */,
58950D630F5551AF00F3E8BA /* register-allocator.cc in Sources */,
89A88E190E71A6970043BA31 /* rewriter.cc in Sources */,
89A88E1A0E71A69B0043BA31 /* runtime.cc in Sources */,
89A88E1B0E71A69D0043BA31 /* scanner.cc in Sources */,
@ -1093,6 +1137,8 @@
89A88E2B0E71A6D10043BA31 /* v8.cc in Sources */,
89A88E2C0E71A6D20043BA31 /* v8threads.cc in Sources */,
89A88E2D0E71A6D50043BA31 /* variables.cc in Sources */,
58950D660F5551C200F3E8BA /* virtual-frame.cc in Sources */,
58950D670F5551C400F3E8BA /* virtual-frame-ia32.cc in Sources */,
89A88E2E0E71A6D60043BA31 /* zone.cc in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
@ -1147,6 +1193,8 @@
89F23C600E78D5B2006B2466 /* ic.cc in Sources */,
890A13FE0EE9C47F00E49346 /* interpreter-irregexp.cc in Sources */,
89F23C610E78D5B2006B2466 /* jsregexp.cc in Sources */,
58950D600F5551A300F3E8BA /* jump-target.cc in Sources */,
58950D610F5551A400F3E8BA /* jump-target-arm.cc in Sources */,
89F23C620E78D5B2006B2466 /* libraries.cc in Sources */,
89F23C630E78D5B2006B2466 /* log.cc in Sources */,
89F23C9E0E78D5FD006B2466 /* macro-assembler-arm.cc in Sources */,
@ -1163,6 +1211,8 @@
890A14030EE9C4B500E49346 /* regexp-macro-assembler-tracer.cc in Sources */,
890A14040EE9C4B700E49346 /* regexp-macro-assembler.cc in Sources */,
8944AD110F1D4D570028D560 /* regexp-stack.cc in Sources */,
58950D640F5551B500F3E8BA /* register-allocator.cc in Sources */,
58950D650F5551B600F3E8BA /* register-allocator-arm.cc in Sources */,
89F23C6D0E78D5B2006B2466 /* rewriter.cc in Sources */,
89F23C6E0E78D5B2006B2466 /* runtime.cc in Sources */,
89F23C6F0E78D5B2006B2466 /* scanner.cc in Sources */,
@ -1185,6 +1235,8 @@
89F23C7F0E78D5B2006B2466 /* v8.cc in Sources */,
89F23C800E78D5B2006B2466 /* v8threads.cc in Sources */,
89F23C810E78D5B2006B2466 /* variables.cc in Sources */,
58950D680F5551CB00F3E8BA /* virtual-frame.cc in Sources */,
58950D690F5551CE00F3E8BA /* virtual-frame-arm.cc in Sources */,
89F23C820E78D5B2006B2466 /* zone.cc in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;

View File

@ -480,6 +480,18 @@
RelativePath="..\..\src\interpreter-irregexp.h"
>
</File>
<File
RelativePath="..\..\src\jump-target.h"
>
</File>
<File
RelativePath="..\..\src\jump-target.cc"
>
</File>
<File
RelativePath="..\..\src\jump-target-ia32.cc"
>
</File>
<File
RelativePath="..\..\src\jsregexp-inl.h"
>
@ -644,6 +656,18 @@
RelativePath="..\..\src\regexp-stack.cc"
>
</File>
<File
RelativePath="..\..\src\register-allocator.h"
>
</File>
<File
RelativePath="..\..\src\register-allocator.cc"
>
</File>
<File
RelativePath="..\..\src\register-allocator-ia32.cc"
>
</File>
<File
RelativePath="..\..\src\rewriter.cc"
>
@ -808,6 +832,22 @@
RelativePath="..\..\src\variables.h"
>
</File>
<File
RelativePath="..\..\src\virtual-frame.h"
>
</File>
<File
RelativePath="..\..\src\virtual-frame-ia32.h"
>
</File>
<File
RelativePath="..\..\src\virtual-frame.cc"
>
</File>
<File
RelativePath="..\..\src\virtual-frame-ia32.cc"
>
</File>
<File
RelativePath="..\..\src\zone-inl.h"
>

View File

@ -484,6 +484,18 @@
RelativePath="..\..\src\interpreter-irregexp.h"
>
</File>
<File
RelativePath="..\..\src\jump-target.h"
>
</File>
<File
RelativePath="..\..\src\jump-target.cc"
>
</File>
<File
RelativePath="..\..\src\jump-target-arm.cc"
>
</File>
<File
RelativePath="..\..\src\jsregexp-inl.h"
>
@ -648,6 +660,18 @@
RelativePath="..\..\src\regexp-stack.cc"
>
</File>
<File
RelativePath="..\..\src\register-allocator.h"
>
</File>
<File
RelativePath="..\..\src\register-allocator.cc"
>
</File>
<File
RelativePath="..\..\src\register-allocator-arm.cc"
>
</File>
<File
RelativePath="..\..\src\rewriter.cc"
>
@ -820,6 +844,22 @@
RelativePath="..\..\src\variables.h"
>
</File>
<File
RelativePath="..\..\src\virtual-frame.h"
>
</File>
<File
RelativePath="..\..\src\virtual-frame-arm.h"
>
</File>
<File
RelativePath="..\..\src\virtual-frame.cc"
>
</File>
<File
RelativePath="..\..\src\virtual-frame-arm.cc"
>
</File>
<File
RelativePath="..\..\src\zone-inl.h"
>