Cosmetic changes ("set up" is a verb, "setup" is a noun).
Review URL: http://codereview.chromium.org/9139051 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@10399 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
daf1020f13
commit
b3e0761e38
@ -317,7 +317,7 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
|
||||
own_buffer_ = false;
|
||||
}
|
||||
|
||||
// Setup buffer pointers.
|
||||
// Set up buffer pointers.
|
||||
ASSERT(buffer_ != NULL);
|
||||
pc_ = buffer_;
|
||||
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
|
||||
@ -349,7 +349,7 @@ void Assembler::GetCode(CodeDesc* desc) {
|
||||
CheckConstPool(true, false);
|
||||
ASSERT(num_pending_reloc_info_ == 0);
|
||||
|
||||
// Setup code descriptor.
|
||||
// Set up code descriptor.
|
||||
desc->buffer = buffer_;
|
||||
desc->buffer_size = buffer_size_;
|
||||
desc->instr_size = pc_offset();
|
||||
@ -2446,7 +2446,7 @@ void Assembler::GrowBuffer() {
|
||||
}
|
||||
CHECK_GT(desc.buffer_size, 0); // no overflow
|
||||
|
||||
// Setup new buffer.
|
||||
// Set up new buffer.
|
||||
desc.buffer = NewArray<byte>(desc.buffer_size);
|
||||
|
||||
desc.instr_size = pc_offset();
|
||||
|
@ -333,7 +333,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
|
||||
r5,
|
||||
call_generic_code);
|
||||
__ IncrementCounter(counters->array_function_native(), 1, r3, r4);
|
||||
// Setup return value, remove receiver from stack and return.
|
||||
// Set up return value, remove receiver from stack and return.
|
||||
__ mov(r0, r2);
|
||||
__ add(sp, sp, Operand(kPointerSize));
|
||||
__ Jump(lr);
|
||||
@ -376,7 +376,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
|
||||
true,
|
||||
call_generic_code);
|
||||
__ IncrementCounter(counters->array_function_native(), 1, r2, r4);
|
||||
// Setup return value, remove receiver and argument from stack and return.
|
||||
// Set up return value, remove receiver and argument from stack and return.
|
||||
__ mov(r0, r3);
|
||||
__ add(sp, sp, Operand(2 * kPointerSize));
|
||||
__ Jump(lr);
|
||||
@ -951,10 +951,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
// sp[4]: number of arguments (smi-tagged)
|
||||
__ ldr(r3, MemOperand(sp, 4 * kPointerSize));
|
||||
|
||||
// Setup pointer to last argument.
|
||||
// Set up pointer to last argument.
|
||||
__ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
|
||||
|
||||
// Setup number of arguments for function call below
|
||||
// Set up number of arguments for function call below
|
||||
__ mov(r0, Operand(r3, LSR, kSmiTagSize));
|
||||
|
||||
// Copy arguments and receiver to the expression stack.
|
||||
|
@ -156,13 +156,13 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
|
||||
// Load the function from the stack.
|
||||
__ ldr(r3, MemOperand(sp, 0));
|
||||
|
||||
// Setup the object header.
|
||||
// Set up the object header.
|
||||
__ LoadRoot(r2, Heap::kFunctionContextMapRootIndex);
|
||||
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
|
||||
__ mov(r2, Operand(Smi::FromInt(length)));
|
||||
__ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
|
||||
|
||||
// Setup the fixed slots.
|
||||
// Set up the fixed slots.
|
||||
__ mov(r1, Operand(Smi::FromInt(0)));
|
||||
__ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
|
||||
__ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
|
||||
@ -207,7 +207,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
|
||||
// Load the serialized scope info from the stack.
|
||||
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
|
||||
|
||||
// Setup the object header.
|
||||
// Set up the object header.
|
||||
__ LoadRoot(r2, Heap::kBlockContextMapRootIndex);
|
||||
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
|
||||
__ mov(r2, Operand(Smi::FromInt(length)));
|
||||
@ -229,7 +229,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
|
||||
__ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
|
||||
__ bind(&after_sentinel);
|
||||
|
||||
// Setup the fixed slots.
|
||||
// Set up the fixed slots.
|
||||
__ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
|
||||
__ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
|
||||
__ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
|
||||
@ -3842,7 +3842,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
||||
FrameScope scope(masm, StackFrame::MANUAL);
|
||||
__ EnterExitFrame(save_doubles_);
|
||||
|
||||
// Setup argc and the builtin function in callee-saved registers.
|
||||
// Set up argc and the builtin function in callee-saved registers.
|
||||
__ mov(r4, Operand(r0));
|
||||
__ mov(r5, Operand(r1));
|
||||
|
||||
@ -3919,7 +3919,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
|
||||
// r2: receiver
|
||||
// r3: argc
|
||||
|
||||
// Setup argv in r4.
|
||||
// Set up argv in r4.
|
||||
int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
|
||||
if (CpuFeatures::IsSupported(VFP3)) {
|
||||
offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
|
||||
@ -3942,7 +3942,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
|
||||
__ ldr(r5, MemOperand(r5));
|
||||
__ Push(r8, r7, r6, r5);
|
||||
|
||||
// Setup frame pointer for the frame to be pushed.
|
||||
// Set up frame pointer for the frame to be pushed.
|
||||
__ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
|
||||
|
||||
// If this is the outermost JS call, set js_entry_sp value.
|
||||
@ -4402,7 +4402,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
|
||||
__ str(r3, FieldMemOperand(r0, i));
|
||||
}
|
||||
|
||||
// Setup the callee in-object property.
|
||||
// Set up the callee in-object property.
|
||||
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
|
||||
__ ldr(r3, MemOperand(sp, 2 * kPointerSize));
|
||||
const int kCalleeOffset = JSObject::kHeaderSize +
|
||||
@ -4415,7 +4415,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
|
||||
Heap::kArgumentsLengthIndex * kPointerSize;
|
||||
__ str(r2, FieldMemOperand(r0, kLengthOffset));
|
||||
|
||||
// Setup the elements pointer in the allocated arguments object.
|
||||
// Set up the elements pointer in the allocated arguments object.
|
||||
// If we allocated a parameter map, r4 will point there, otherwise
|
||||
// it will point to the backing store.
|
||||
__ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
|
||||
@ -4583,7 +4583,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
|
||||
// Get the parameters pointer from the stack.
|
||||
__ ldr(r2, MemOperand(sp, 1 * kPointerSize));
|
||||
|
||||
// Setup the elements pointer in the allocated arguments object and
|
||||
// Set up the elements pointer in the allocated arguments object and
|
||||
// initialize the header in the elements fixed array.
|
||||
__ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict));
|
||||
__ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
|
||||
@ -4595,7 +4595,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
|
||||
|
||||
// Copy the fixed array slots.
|
||||
Label loop;
|
||||
// Setup r4 to point to the first array slot.
|
||||
// Set up r4 to point to the first array slot.
|
||||
__ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ bind(&loop);
|
||||
// Pre-decrement r2 with kPointerSize on each iteration.
|
||||
@ -5210,7 +5210,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
|
||||
// of the original receiver from the call site).
|
||||
__ bind(&non_function);
|
||||
__ str(r1, MemOperand(sp, argc_ * kPointerSize));
|
||||
__ mov(r0, Operand(argc_)); // Setup the number of arguments.
|
||||
__ mov(r0, Operand(argc_)); // Set up the number of arguments.
|
||||
__ mov(r2, Operand(0, RelocInfo::NONE));
|
||||
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
|
||||
__ SetCallKind(r5, CALL_AS_METHOD);
|
||||
|
@ -41,7 +41,7 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
void CPU::Setup() {
|
||||
void CPU::SetUp() {
|
||||
CpuFeatures::Probe();
|
||||
}
|
||||
|
||||
|
@ -319,7 +319,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
|
||||
output_[0] = input_;
|
||||
output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
|
||||
} else {
|
||||
// Setup the frame pointer and the context pointer.
|
||||
// Set up the frame pointer and the context pointer.
|
||||
output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
|
||||
output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
|
||||
|
||||
|
@ -1009,7 +1009,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
|
||||
__ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
|
||||
__ ldr(r2, FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
|
||||
|
||||
// Setup the four remaining stack slots.
|
||||
// Set up the four remaining stack slots.
|
||||
__ push(r0); // Map.
|
||||
__ ldr(r1, FieldMemOperand(r2, FixedArray::kLengthOffset));
|
||||
__ mov(r0, Operand(Smi::FromInt(0)));
|
||||
|
@ -2906,7 +2906,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
|
||||
__ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
|
||||
__ Call(ip);
|
||||
|
||||
// Setup deoptimization.
|
||||
// Set up deoptimization.
|
||||
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
|
||||
|
||||
// Restore context.
|
||||
|
@ -412,7 +412,7 @@ class LDeferredCode: public ZoneObject {
|
||||
virtual void Generate() = 0;
|
||||
virtual LInstruction* instr() = 0;
|
||||
|
||||
void SetExit(Label *exit) { external_exit_ = exit; }
|
||||
void SetExit(Label* exit) { external_exit_ = exit; }
|
||||
Label* entry() { return &entry_; }
|
||||
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
|
||||
int instruction_index() const { return instruction_index_; }
|
||||
|
@ -817,12 +817,12 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
|
||||
|
||||
|
||||
void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
|
||||
// Setup the frame structure on the stack.
|
||||
// Set up the frame structure on the stack.
|
||||
ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
|
||||
ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
|
||||
ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
|
||||
Push(lr, fp);
|
||||
mov(fp, Operand(sp)); // Setup new frame pointer.
|
||||
mov(fp, Operand(sp)); // Set up new frame pointer.
|
||||
// Reserve room for saved entry sp and code object.
|
||||
sub(sp, sp, Operand(2 * kPointerSize));
|
||||
if (emit_debug_code()) {
|
||||
|
@ -508,7 +508,7 @@ class MacroAssembler: public Assembler {
|
||||
// ---------------------------------------------------------------------------
|
||||
// JavaScript invokes
|
||||
|
||||
// Setup call kind marking in ecx. The method takes ecx as an
|
||||
// Set up call kind marking in ecx. The method takes ecx as an
|
||||
// explicit first parameter to make the code more readable at the
|
||||
// call sites.
|
||||
void SetCallKind(Register dst, CallKind kind);
|
||||
|
@ -741,7 +741,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
|
||||
isolate_->set_simulator_i_cache(i_cache_);
|
||||
}
|
||||
Initialize(isolate);
|
||||
// Setup simulator support first. Some of this information is needed to
|
||||
// Set up simulator support first. Some of this information is needed to
|
||||
// setup the architecture state.
|
||||
size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack
|
||||
stack_ = reinterpret_cast<char*>(malloc(stack_size));
|
||||
@ -750,7 +750,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
|
||||
break_pc_ = NULL;
|
||||
break_instr_ = 0;
|
||||
|
||||
// Setup architecture state.
|
||||
// Set up architecture state.
|
||||
// All registers are initialized to zero to start with.
|
||||
for (int i = 0; i < num_registers; i++) {
|
||||
registers_[i] = 0;
|
||||
@ -3324,7 +3324,7 @@ void Simulator::Execute() {
|
||||
int32_t Simulator::Call(byte* entry, int argument_count, ...) {
|
||||
va_list parameters;
|
||||
va_start(parameters, argument_count);
|
||||
// Setup arguments
|
||||
// Set up arguments
|
||||
|
||||
// First four arguments passed in registers.
|
||||
ASSERT(argument_count >= 4);
|
||||
@ -3367,7 +3367,7 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
|
||||
int32_t r10_val = get_register(r10);
|
||||
int32_t r11_val = get_register(r11);
|
||||
|
||||
// Setup the callee-saved registers with a known value. To be able to check
|
||||
// Set up the callee-saved registers with a known value. To be able to check
|
||||
// that they are preserved properly across JS execution.
|
||||
int32_t callee_saved_value = icount_;
|
||||
set_register(r4, callee_saved_value);
|
||||
|
@ -1144,7 +1144,7 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
|
||||
__ EnterExitFrame(false, kApiStackSpace);
|
||||
|
||||
// Create AccessorInfo instance on the stack above the exit frame with
|
||||
// scratch2 (internal::Object **args_) as the data.
|
||||
// scratch2 (internal::Object** args_) as the data.
|
||||
__ str(scratch2, MemOperand(sp, 1 * kPointerSize));
|
||||
__ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
|
||||
|
||||
@ -2405,7 +2405,7 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
|
||||
__ str(r3, MemOperand(sp, argc * kPointerSize));
|
||||
}
|
||||
|
||||
// Setup the context (function already in r1).
|
||||
// Set up the context (function already in r1).
|
||||
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
|
||||
|
||||
// Jump to the cached code (tail call).
|
||||
|
@ -371,7 +371,7 @@ class RelocInfo BASE_EMBEDDED {
|
||||
// routines expect to access these pointers indirectly. The following
|
||||
// location provides a place for these pointers to exist natually
|
||||
// when accessed via the Iterator.
|
||||
Object *reconstructed_obj_ptr_;
|
||||
Object* reconstructed_obj_ptr_;
|
||||
// External-reference pointers are also split across instruction-pairs
|
||||
// in mips, but are accessed via indirect pointers. This location
|
||||
// provides a place for that pointer to exist naturally. Its address
|
||||
|
@ -35,7 +35,7 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
|
||||
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
|
||||
Atomic32 old_value,
|
||||
Atomic32 new_value) {
|
||||
Atomic32 prev_value;
|
||||
@ -49,7 +49,7 @@ inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
|
||||
return prev_value;
|
||||
}
|
||||
|
||||
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
|
||||
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
|
||||
Atomic32 new_value) {
|
||||
Atomic32 old_value;
|
||||
do {
|
||||
@ -59,12 +59,12 @@ inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
|
||||
return old_value;
|
||||
}
|
||||
|
||||
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
|
||||
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
|
||||
Atomic32 increment) {
|
||||
return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
|
||||
}
|
||||
|
||||
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
|
||||
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
|
||||
Atomic32 increment) {
|
||||
return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
|
||||
}
|
||||
@ -73,7 +73,7 @@ inline void MemoryBarrier() {
|
||||
OSMemoryBarrier();
|
||||
}
|
||||
|
||||
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
|
||||
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
|
||||
Atomic32 old_value,
|
||||
Atomic32 new_value) {
|
||||
Atomic32 prev_value;
|
||||
@ -87,7 +87,7 @@ inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
|
||||
return prev_value;
|
||||
}
|
||||
|
||||
inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
|
||||
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
|
||||
Atomic32 old_value,
|
||||
Atomic32 new_value) {
|
||||
return Acquire_CompareAndSwap(ptr, old_value, new_value);
|
||||
@ -97,12 +97,12 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
|
||||
*ptr = value;
|
||||
}
|
||||
|
||||
inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
|
||||
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
|
||||
*ptr = value;
|
||||
MemoryBarrier();
|
||||
}
|
||||
|
||||
inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
|
||||
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
|
||||
MemoryBarrier();
|
||||
*ptr = value;
|
||||
}
|
||||
@ -111,13 +111,13 @@ inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
|
||||
return *ptr;
|
||||
}
|
||||
|
||||
inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
|
||||
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
|
||||
Atomic32 value = *ptr;
|
||||
MemoryBarrier();
|
||||
return value;
|
||||
}
|
||||
|
||||
inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
|
||||
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
|
||||
MemoryBarrier();
|
||||
return *ptr;
|
||||
}
|
||||
@ -126,7 +126,7 @@ inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
|
||||
|
||||
// 64-bit implementation on 64-bit platform
|
||||
|
||||
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
|
||||
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
|
||||
Atomic64 old_value,
|
||||
Atomic64 new_value) {
|
||||
Atomic64 prev_value;
|
||||
@ -140,7 +140,7 @@ inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
|
||||
return prev_value;
|
||||
}
|
||||
|
||||
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
|
||||
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
|
||||
Atomic64 new_value) {
|
||||
Atomic64 old_value;
|
||||
do {
|
||||
@ -150,17 +150,17 @@ inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
|
||||
return old_value;
|
||||
}
|
||||
|
||||
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
|
||||
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
|
||||
Atomic64 increment) {
|
||||
return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr));
|
||||
}
|
||||
|
||||
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
|
||||
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
|
||||
Atomic64 increment) {
|
||||
return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr));
|
||||
}
|
||||
|
||||
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
|
||||
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
|
||||
Atomic64 old_value,
|
||||
Atomic64 new_value) {
|
||||
Atomic64 prev_value;
|
||||
@ -174,7 +174,7 @@ inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
|
||||
return prev_value;
|
||||
}
|
||||
|
||||
inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
|
||||
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
|
||||
Atomic64 old_value,
|
||||
Atomic64 new_value) {
|
||||
// The lib kern interface does not distinguish between
|
||||
@ -186,12 +186,12 @@ inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
|
||||
*ptr = value;
|
||||
}
|
||||
|
||||
inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
|
||||
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
|
||||
*ptr = value;
|
||||
MemoryBarrier();
|
||||
}
|
||||
|
||||
inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
|
||||
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
|
||||
MemoryBarrier();
|
||||
*ptr = value;
|
||||
}
|
||||
@ -200,13 +200,13 @@ inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
|
||||
return *ptr;
|
||||
}
|
||||
|
||||
inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
|
||||
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
|
||||
Atomic64 value = *ptr;
|
||||
MemoryBarrier();
|
||||
return value;
|
||||
}
|
||||
|
||||
inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
|
||||
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
|
||||
MemoryBarrier();
|
||||
return *ptr;
|
||||
}
|
||||
@ -264,7 +264,7 @@ inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
|
||||
old_value, new_value);
|
||||
}
|
||||
|
||||
inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
|
||||
inline void NoBarrier_Store(volatile AtomicWord* ptr, AtomicWord value) {
|
||||
NoBarrier_Store(
|
||||
reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
|
||||
}
|
||||
@ -279,7 +279,7 @@ inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
|
||||
reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
|
||||
}
|
||||
|
||||
inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
|
||||
inline AtomicWord NoBarrier_Load(volatile const AtomicWord* ptr) {
|
||||
return NoBarrier_Load(
|
||||
reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
|
||||
}
|
||||
|
@ -835,7 +835,7 @@ void Genesis::HookUpInnerGlobal(Handle<GlobalObject> inner_global) {
|
||||
factory()->LookupAsciiSymbol("global"),
|
||||
inner_global,
|
||||
attributes);
|
||||
// Setup the reference from the global object to the builtins object.
|
||||
// Set up the reference from the global object to the builtins object.
|
||||
JSGlobalObject::cast(*inner_global)->set_builtins(*builtins_global);
|
||||
TransferNamedProperties(inner_global_from_snapshot, inner_global);
|
||||
TransferIndexedProperties(inner_global_from_snapshot, inner_global);
|
||||
@ -1360,7 +1360,7 @@ bool Genesis::CompileScriptCached(Vector<const char> name,
|
||||
if (cache != NULL) cache->Add(name, function_info);
|
||||
}
|
||||
|
||||
// Setup the function context. Conceptually, we should clone the
|
||||
// Set up the function context. Conceptually, we should clone the
|
||||
// function before overwriting the context but since we're in a
|
||||
// single-threaded environment it is not strictly necessary.
|
||||
ASSERT(top_context->IsGlobalContext());
|
||||
@ -1447,7 +1447,7 @@ bool Genesis::InstallNatives() {
|
||||
builtins->set_global_context(*global_context());
|
||||
builtins->set_global_receiver(*builtins);
|
||||
|
||||
// Setup the 'global' properties of the builtins object. The
|
||||
// Set up the 'global' properties of the builtins object. The
|
||||
// 'global' property that refers to the global object is the only
|
||||
// way to get from code running in the builtins context to the
|
||||
// global object.
|
||||
@ -1459,7 +1459,7 @@ bool Genesis::InstallNatives() {
|
||||
JSObject::SetLocalPropertyIgnoreAttributes(
|
||||
builtins, global_symbol, global_obj, attributes));
|
||||
|
||||
// Setup the reference from the global object to the builtins object.
|
||||
// Set up the reference from the global object to the builtins object.
|
||||
JSGlobalObject::cast(global_context()->global())->set_builtins(*builtins);
|
||||
|
||||
// Create a bridge function that has context in the global context.
|
||||
@ -1683,7 +1683,7 @@ bool Genesis::InstallNatives() {
|
||||
InstallNativeFunctions();
|
||||
|
||||
// Store the map for the string prototype after the natives has been compiled
|
||||
// and the String function has been setup.
|
||||
// and the String function has been set up.
|
||||
Handle<JSFunction> string_function(global_context()->string_function());
|
||||
ASSERT(JSObject::cast(
|
||||
string_function->initial_map()->prototype())->HasFastProperties());
|
||||
|
@ -88,7 +88,7 @@ class SourceCodeCache BASE_EMBEDDED {
|
||||
// context.
|
||||
class Bootstrapper {
|
||||
public:
|
||||
// Requires: Heap::Setup has been called.
|
||||
// Requires: Heap::SetUp has been called.
|
||||
void Initialize(bool create_heap_objects);
|
||||
void TearDown();
|
||||
|
||||
|
@ -1719,7 +1719,7 @@ void Builtins::InitBuiltinFunctionTable() {
|
||||
#undef DEF_FUNCTION_PTR_A
|
||||
}
|
||||
|
||||
void Builtins::Setup(bool create_heap_objects) {
|
||||
void Builtins::SetUp(bool create_heap_objects) {
|
||||
ASSERT(!initialized_);
|
||||
Isolate* isolate = Isolate::Current();
|
||||
Heap* heap = isolate->heap();
|
||||
|
@ -265,7 +265,7 @@ class Builtins {
|
||||
|
||||
// Generate all builtin code objects. Should be called once during
|
||||
// isolate initialization.
|
||||
void Setup(bool create_heap_objects);
|
||||
void SetUp(bool create_heap_objects);
|
||||
void TearDown();
|
||||
|
||||
// Garbage collection support.
|
||||
|
@ -493,7 +493,7 @@ void CpuProfiler::StartProcessorIfNotStarted() {
|
||||
NoBarrier_Store(&is_profiling_, true);
|
||||
processor_->Start();
|
||||
// Enumerate stuff we already have in the heap.
|
||||
if (isolate->heap()->HasBeenSetup()) {
|
||||
if (isolate->heap()->HasBeenSetUp()) {
|
||||
if (!FLAG_prof_browser_mode) {
|
||||
bool saved_log_code_flag = FLAG_log_code;
|
||||
FLAG_log_code = true;
|
||||
@ -562,7 +562,7 @@ void CpuProfiler::StopProcessor() {
|
||||
}
|
||||
|
||||
|
||||
void CpuProfiler::Setup() {
|
||||
void CpuProfiler::SetUp() {
|
||||
Isolate* isolate = Isolate::Current();
|
||||
if (isolate->cpu_profiler() == NULL) {
|
||||
isolate->set_cpu_profiler(new CpuProfiler());
|
||||
|
@ -204,7 +204,7 @@ namespace internal {
|
||||
// TODO(isolates): isolatify this class.
|
||||
class CpuProfiler {
|
||||
public:
|
||||
static void Setup();
|
||||
static void SetUp();
|
||||
static void TearDown();
|
||||
|
||||
static void StartProfiling(const char* title);
|
||||
@ -230,11 +230,11 @@ class CpuProfiler {
|
||||
Code* code, String* name);
|
||||
static void CodeCreateEvent(Logger::LogEventsAndTags tag,
|
||||
Code* code,
|
||||
SharedFunctionInfo *shared,
|
||||
SharedFunctionInfo* shared,
|
||||
String* name);
|
||||
static void CodeCreateEvent(Logger::LogEventsAndTags tag,
|
||||
Code* code,
|
||||
SharedFunctionInfo *shared,
|
||||
SharedFunctionInfo* shared,
|
||||
String* source, int line);
|
||||
static void CodeCreateEvent(Logger::LogEventsAndTags tag,
|
||||
Code* code, int args_count);
|
||||
|
@ -53,7 +53,7 @@ namespace internal {
|
||||
class CPU : public AllStatic {
|
||||
public:
|
||||
// Initializes the cpu architecture support. Called once at VM startup.
|
||||
static void Setup();
|
||||
static void SetUp();
|
||||
|
||||
static bool SupportsCrankshaft();
|
||||
|
||||
|
@ -169,7 +169,7 @@ void RemoteDebugger::Run() {
|
||||
bool ok;
|
||||
|
||||
// Make sure that socket support is initialized.
|
||||
ok = i::Socket::Setup();
|
||||
ok = i::Socket::SetUp();
|
||||
if (!ok) {
|
||||
printf("Unable to initialize socket support %d\n", i::Socket::LastError());
|
||||
return;
|
||||
|
@ -1547,7 +1547,7 @@ DebugCommandProcessor.prototype.continueRequest_ = function(request, response) {
|
||||
}
|
||||
}
|
||||
|
||||
// Setup the VM for stepping.
|
||||
// Set up the VM for stepping.
|
||||
this.exec_state_.prepareStep(action, count);
|
||||
}
|
||||
|
||||
|
@ -682,7 +682,7 @@ void ScriptCache::HandleWeakScript(v8::Persistent<v8::Value> obj, void* data) {
|
||||
}
|
||||
|
||||
|
||||
void Debug::Setup(bool create_heap_objects) {
|
||||
void Debug::SetUp(bool create_heap_objects) {
|
||||
ThreadInit();
|
||||
if (create_heap_objects) {
|
||||
// Get code to handle debug break on return.
|
||||
@ -1213,7 +1213,7 @@ void Debug::ClearAllBreakPoints() {
|
||||
|
||||
void Debug::FloodWithOneShot(Handle<SharedFunctionInfo> shared) {
|
||||
PrepareForBreakPoints();
|
||||
// Make sure the function has setup the debug info.
|
||||
// Make sure the function has set up the debug info.
|
||||
if (!EnsureDebugInfo(shared)) {
|
||||
// Return if we failed to retrieve the debug info.
|
||||
return;
|
||||
@ -3065,7 +3065,7 @@ bool Debugger::StartAgent(const char* name, int port,
|
||||
v8::Debug::DebugBreak();
|
||||
}
|
||||
|
||||
if (Socket::Setup()) {
|
||||
if (Socket::SetUp()) {
|
||||
if (agent_ == NULL) {
|
||||
agent_ = new DebuggerAgent(name, port);
|
||||
agent_->Start();
|
||||
|
@ -224,7 +224,7 @@ class DebugInfoListNode {
|
||||
// DebugInfo.
|
||||
class Debug {
|
||||
public:
|
||||
void Setup(bool create_heap_objects);
|
||||
void SetUp(bool create_heap_objects);
|
||||
bool Load();
|
||||
void Unload();
|
||||
bool IsLoaded() { return !debug_context_.is_null(); }
|
||||
|
@ -705,7 +705,7 @@ Handle<JSFunction> Factory::NewFunction(Handle<String> name,
|
||||
// Allocate the function
|
||||
Handle<JSFunction> function = NewFunction(name, the_hole_value());
|
||||
|
||||
// Setup the code pointer in both the shared function info and in
|
||||
// Set up the code pointer in both the shared function info and in
|
||||
// the function itself.
|
||||
function->shared()->set_code(*code);
|
||||
function->set_code(*code);
|
||||
@ -736,7 +736,7 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
|
||||
// Allocate the function.
|
||||
Handle<JSFunction> function = NewFunction(name, prototype);
|
||||
|
||||
// Setup the code pointer in both the shared function info and in
|
||||
// Set up the code pointer in both the shared function info and in
|
||||
// the function itself.
|
||||
function->shared()->set_code(*code);
|
||||
function->set_code(*code);
|
||||
|
@ -485,7 +485,7 @@ Code* ExitFrame::unchecked_code() const {
|
||||
|
||||
|
||||
void ExitFrame::ComputeCallerState(State* state) const {
|
||||
// Setup the caller state.
|
||||
// Set up the caller state.
|
||||
state->sp = caller_sp();
|
||||
state->fp = Memory::Address_at(fp() + ExitFrameConstants::kCallerFPOffset);
|
||||
state->pc_address
|
||||
|
@ -1178,7 +1178,7 @@ void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
|
||||
}
|
||||
ExitFinallyBlock(); // Return to the calling code.
|
||||
|
||||
// Setup try handler.
|
||||
// Set up try handler.
|
||||
__ bind(&try_entry);
|
||||
__ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER, stmt->index());
|
||||
{ TryFinally try_body(this, &finally_entry);
|
||||
@ -1284,7 +1284,7 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryCatch::Exit(
|
||||
|
||||
|
||||
bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) {
|
||||
Expression *sub_expr;
|
||||
Expression* sub_expr;
|
||||
Handle<String> check;
|
||||
if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
|
||||
EmitLiteralCompareTypeof(expr, sub_expr, check);
|
||||
|
@ -1556,23 +1556,23 @@ class DebugLineSection : public DebugSection {
|
||||
|
||||
class UnwindInfoSection : public DebugSection {
|
||||
public:
|
||||
explicit UnwindInfoSection(CodeDescription *desc);
|
||||
virtual bool WriteBody(Writer *w);
|
||||
explicit UnwindInfoSection(CodeDescription* desc);
|
||||
virtual bool WriteBody(Writer* w);
|
||||
|
||||
int WriteCIE(Writer *w);
|
||||
void WriteFDE(Writer *w, int);
|
||||
int WriteCIE(Writer* w);
|
||||
void WriteFDE(Writer* w, int);
|
||||
|
||||
void WriteFDEStateOnEntry(Writer *w);
|
||||
void WriteFDEStateAfterRBPPush(Writer *w);
|
||||
void WriteFDEStateAfterRBPSet(Writer *w);
|
||||
void WriteFDEStateAfterRBPPop(Writer *w);
|
||||
void WriteFDEStateOnEntry(Writer* w);
|
||||
void WriteFDEStateAfterRBPPush(Writer* w);
|
||||
void WriteFDEStateAfterRBPSet(Writer* w);
|
||||
void WriteFDEStateAfterRBPPop(Writer* w);
|
||||
|
||||
void WriteLength(Writer *w,
|
||||
void WriteLength(Writer* w,
|
||||
Writer::Slot<uint32_t>* length_slot,
|
||||
int initial_position);
|
||||
|
||||
private:
|
||||
CodeDescription *desc_;
|
||||
CodeDescription* desc_;
|
||||
|
||||
// DWARF3 Specification, Table 7.23
|
||||
enum CFIInstructions {
|
||||
@ -1623,7 +1623,7 @@ class UnwindInfoSection : public DebugSection {
|
||||
};
|
||||
|
||||
|
||||
void UnwindInfoSection::WriteLength(Writer *w,
|
||||
void UnwindInfoSection::WriteLength(Writer* w,
|
||||
Writer::Slot<uint32_t>* length_slot,
|
||||
int initial_position) {
|
||||
uint32_t align = (w->position() - initial_position) % kPointerSize;
|
||||
@ -1639,7 +1639,7 @@ void UnwindInfoSection::WriteLength(Writer *w,
|
||||
}
|
||||
|
||||
|
||||
UnwindInfoSection::UnwindInfoSection(CodeDescription *desc)
|
||||
UnwindInfoSection::UnwindInfoSection(CodeDescription* desc)
|
||||
#ifdef __ELF
|
||||
: ELFSection(".eh_frame", TYPE_X86_64_UNWIND, 1),
|
||||
#else
|
||||
@ -1648,7 +1648,7 @@ UnwindInfoSection::UnwindInfoSection(CodeDescription *desc)
|
||||
#endif
|
||||
desc_(desc) { }
|
||||
|
||||
int UnwindInfoSection::WriteCIE(Writer *w) {
|
||||
int UnwindInfoSection::WriteCIE(Writer* w) {
|
||||
Writer::Slot<uint32_t> cie_length_slot = w->CreateSlotHere<uint32_t>();
|
||||
uint32_t cie_position = w->position();
|
||||
|
||||
@ -1668,7 +1668,7 @@ int UnwindInfoSection::WriteCIE(Writer *w) {
|
||||
}
|
||||
|
||||
|
||||
void UnwindInfoSection::WriteFDE(Writer *w, int cie_position) {
|
||||
void UnwindInfoSection::WriteFDE(Writer* w, int cie_position) {
|
||||
// The only FDE for this function. The CFA is the current RBP.
|
||||
Writer::Slot<uint32_t> fde_length_slot = w->CreateSlotHere<uint32_t>();
|
||||
int fde_position = w->position();
|
||||
@ -1686,7 +1686,7 @@ void UnwindInfoSection::WriteFDE(Writer *w, int cie_position) {
|
||||
}
|
||||
|
||||
|
||||
void UnwindInfoSection::WriteFDEStateOnEntry(Writer *w) {
|
||||
void UnwindInfoSection::WriteFDEStateOnEntry(Writer* w) {
|
||||
// The first state, just after the control has been transferred to the the
|
||||
// function.
|
||||
|
||||
@ -1713,7 +1713,7 @@ void UnwindInfoSection::WriteFDEStateOnEntry(Writer *w) {
|
||||
}
|
||||
|
||||
|
||||
void UnwindInfoSection::WriteFDEStateAfterRBPPush(Writer *w) {
|
||||
void UnwindInfoSection::WriteFDEStateAfterRBPPush(Writer* w) {
|
||||
// The second state, just after RBP has been pushed.
|
||||
|
||||
// RBP / CFA for this function is now the current RSP, so just set the
|
||||
@ -1734,7 +1734,7 @@ void UnwindInfoSection::WriteFDEStateAfterRBPPush(Writer *w) {
|
||||
}
|
||||
|
||||
|
||||
void UnwindInfoSection::WriteFDEStateAfterRBPSet(Writer *w) {
|
||||
void UnwindInfoSection::WriteFDEStateAfterRBPSet(Writer* w) {
|
||||
// The third state, after the RBP has been set.
|
||||
|
||||
// The CFA can now directly be set to RBP.
|
||||
@ -1749,7 +1749,7 @@ void UnwindInfoSection::WriteFDEStateAfterRBPSet(Writer *w) {
|
||||
}
|
||||
|
||||
|
||||
void UnwindInfoSection::WriteFDEStateAfterRBPPop(Writer *w) {
|
||||
void UnwindInfoSection::WriteFDEStateAfterRBPPop(Writer* w) {
|
||||
// The fourth (final) state. The RBP has been popped (just before issuing a
|
||||
// return).
|
||||
|
||||
@ -1769,7 +1769,7 @@ void UnwindInfoSection::WriteFDEStateAfterRBPPop(Writer *w) {
|
||||
}
|
||||
|
||||
|
||||
bool UnwindInfoSection::WriteBody(Writer *w) {
|
||||
bool UnwindInfoSection::WriteBody(Writer* w) {
|
||||
uint32_t cie_position = WriteCIE(w);
|
||||
WriteFDE(w, cie_position);
|
||||
return true;
|
||||
@ -1810,8 +1810,8 @@ extern "C" {
|
||||
struct JITDescriptor {
|
||||
uint32_t version_;
|
||||
uint32_t action_flag_;
|
||||
JITCodeEntry *relevant_entry_;
|
||||
JITCodeEntry *first_entry_;
|
||||
JITCodeEntry* relevant_entry_;
|
||||
JITCodeEntry* first_entry_;
|
||||
};
|
||||
|
||||
// GDB will place breakpoint into this function.
|
||||
@ -1998,7 +1998,7 @@ void GDBJITInterface::AddCode(Handle<String> name,
|
||||
}
|
||||
}
|
||||
|
||||
static void AddUnwindInfo(CodeDescription *desc) {
|
||||
static void AddUnwindInfo(CodeDescription* desc) {
|
||||
#ifdef V8_TARGET_ARCH_X64
|
||||
if (desc->tag() == GDBJITInterface::FUNCTION) {
|
||||
// To avoid propagating unwinding information through
|
||||
|
@ -463,7 +463,7 @@ MaybeObject* Heap::PrepareForCompare(String* str) {
|
||||
|
||||
|
||||
int Heap::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
|
||||
ASSERT(HasBeenSetup());
|
||||
ASSERT(HasBeenSetUp());
|
||||
int amount = amount_of_external_allocated_memory_ + change_in_bytes;
|
||||
if (change_in_bytes >= 0) {
|
||||
// Avoid overflow.
|
||||
|
@ -51,7 +51,7 @@ void HeapProfiler::ResetSnapshots() {
|
||||
}
|
||||
|
||||
|
||||
void HeapProfiler::Setup() {
|
||||
void HeapProfiler::SetUp() {
|
||||
Isolate* isolate = Isolate::Current();
|
||||
if (isolate->heap_profiler() == NULL) {
|
||||
isolate->set_heap_profiler(new HeapProfiler());
|
||||
|
@ -48,7 +48,7 @@ class HeapSnapshotsCollection;
|
||||
// to generate .hp files for use by the GHC/Valgrind tool hp2ps.
|
||||
class HeapProfiler {
|
||||
public:
|
||||
static void Setup();
|
||||
static void SetUp();
|
||||
static void TearDown();
|
||||
|
||||
static HeapSnapshot* TakeSnapshot(const char* name,
|
||||
|
50
src/heap.cc
50
src/heap.cc
@ -176,7 +176,7 @@ Heap::Heap()
|
||||
|
||||
|
||||
intptr_t Heap::Capacity() {
|
||||
if (!HasBeenSetup()) return 0;
|
||||
if (!HasBeenSetUp()) return 0;
|
||||
|
||||
return new_space_.Capacity() +
|
||||
old_pointer_space_->Capacity() +
|
||||
@ -188,7 +188,7 @@ intptr_t Heap::Capacity() {
|
||||
|
||||
|
||||
intptr_t Heap::CommittedMemory() {
|
||||
if (!HasBeenSetup()) return 0;
|
||||
if (!HasBeenSetUp()) return 0;
|
||||
|
||||
return new_space_.CommittedMemory() +
|
||||
old_pointer_space_->CommittedMemory() +
|
||||
@ -200,14 +200,14 @@ intptr_t Heap::CommittedMemory() {
|
||||
}
|
||||
|
||||
intptr_t Heap::CommittedMemoryExecutable() {
|
||||
if (!HasBeenSetup()) return 0;
|
||||
if (!HasBeenSetUp()) return 0;
|
||||
|
||||
return isolate()->memory_allocator()->SizeExecutable();
|
||||
}
|
||||
|
||||
|
||||
intptr_t Heap::Available() {
|
||||
if (!HasBeenSetup()) return 0;
|
||||
if (!HasBeenSetUp()) return 0;
|
||||
|
||||
return new_space_.Available() +
|
||||
old_pointer_space_->Available() +
|
||||
@ -218,7 +218,7 @@ intptr_t Heap::Available() {
|
||||
}
|
||||
|
||||
|
||||
bool Heap::HasBeenSetup() {
|
||||
bool Heap::HasBeenSetUp() {
|
||||
return old_pointer_space_ != NULL &&
|
||||
old_data_space_ != NULL &&
|
||||
code_space_ != NULL &&
|
||||
@ -3807,7 +3807,7 @@ MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
|
||||
}
|
||||
Map* new_map = Map::cast(obj);
|
||||
|
||||
// Setup the global object as a normalized object.
|
||||
// Set up the global object as a normalized object.
|
||||
global->set_map(new_map);
|
||||
global->map()->clear_instance_descriptors();
|
||||
global->set_properties(dictionary);
|
||||
@ -4740,7 +4740,7 @@ bool Heap::IdleGlobalGC() {
|
||||
#ifdef DEBUG
|
||||
|
||||
void Heap::Print() {
|
||||
if (!HasBeenSetup()) return;
|
||||
if (!HasBeenSetUp()) return;
|
||||
isolate()->PrintStack();
|
||||
AllSpaces spaces;
|
||||
for (Space* space = spaces.next(); space != NULL; space = spaces.next())
|
||||
@ -4805,7 +4805,7 @@ bool Heap::Contains(HeapObject* value) {
|
||||
|
||||
bool Heap::Contains(Address addr) {
|
||||
if (OS::IsOutsideAllocatedSpace(addr)) return false;
|
||||
return HasBeenSetup() &&
|
||||
return HasBeenSetUp() &&
|
||||
(new_space_.ToSpaceContains(addr) ||
|
||||
old_pointer_space_->Contains(addr) ||
|
||||
old_data_space_->Contains(addr) ||
|
||||
@ -4823,7 +4823,7 @@ bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
|
||||
|
||||
bool Heap::InSpace(Address addr, AllocationSpace space) {
|
||||
if (OS::IsOutsideAllocatedSpace(addr)) return false;
|
||||
if (!HasBeenSetup()) return false;
|
||||
if (!HasBeenSetUp()) return false;
|
||||
|
||||
switch (space) {
|
||||
case NEW_SPACE:
|
||||
@ -4848,7 +4848,7 @@ bool Heap::InSpace(Address addr, AllocationSpace space) {
|
||||
|
||||
#ifdef DEBUG
|
||||
void Heap::Verify() {
|
||||
ASSERT(HasBeenSetup());
|
||||
ASSERT(HasBeenSetUp());
|
||||
|
||||
store_buffer()->Verify();
|
||||
|
||||
@ -5275,7 +5275,7 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
|
||||
bool Heap::ConfigureHeap(int max_semispace_size,
|
||||
intptr_t max_old_gen_size,
|
||||
intptr_t max_executable_size) {
|
||||
if (HasBeenSetup()) return false;
|
||||
if (HasBeenSetUp()) return false;
|
||||
|
||||
if (max_semispace_size > 0) {
|
||||
if (max_semispace_size < Page::kPageSize) {
|
||||
@ -5564,7 +5564,7 @@ class HeapDebugUtils {
|
||||
|
||||
#endif
|
||||
|
||||
bool Heap::Setup(bool create_heap_objects) {
|
||||
bool Heap::SetUp(bool create_heap_objects) {
|
||||
#ifdef DEBUG
|
||||
allocation_timeout_ = FLAG_gc_interval;
|
||||
debug_utils_ = new HeapDebugUtils(this);
|
||||
@ -5594,12 +5594,12 @@ bool Heap::Setup(bool create_heap_objects) {
|
||||
|
||||
MarkMapPointersAsEncoded(false);
|
||||
|
||||
// Setup memory allocator.
|
||||
if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize()))
|
||||
// Set up memory allocator.
|
||||
if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
|
||||
return false;
|
||||
|
||||
// Setup new space.
|
||||
if (!new_space_.Setup(reserved_semispace_size_, max_semispace_size_)) {
|
||||
// Set up new space.
|
||||
if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -5610,7 +5610,7 @@ bool Heap::Setup(bool create_heap_objects) {
|
||||
OLD_POINTER_SPACE,
|
||||
NOT_EXECUTABLE);
|
||||
if (old_pointer_space_ == NULL) return false;
|
||||
if (!old_pointer_space_->Setup()) return false;
|
||||
if (!old_pointer_space_->SetUp()) return false;
|
||||
|
||||
// Initialize old data space.
|
||||
old_data_space_ =
|
||||
@ -5619,14 +5619,14 @@ bool Heap::Setup(bool create_heap_objects) {
|
||||
OLD_DATA_SPACE,
|
||||
NOT_EXECUTABLE);
|
||||
if (old_data_space_ == NULL) return false;
|
||||
if (!old_data_space_->Setup()) return false;
|
||||
if (!old_data_space_->SetUp()) return false;
|
||||
|
||||
// Initialize the code space, set its maximum capacity to the old
|
||||
// generation size. It needs executable memory.
|
||||
// On 64-bit platform(s), we put all code objects in a 2 GB range of
|
||||
// virtual address space, so that they can call each other with near calls.
|
||||
if (code_range_size_ > 0) {
|
||||
if (!isolate_->code_range()->Setup(code_range_size_)) {
|
||||
if (!isolate_->code_range()->SetUp(code_range_size_)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -5634,7 +5634,7 @@ bool Heap::Setup(bool create_heap_objects) {
|
||||
code_space_ =
|
||||
new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
|
||||
if (code_space_ == NULL) return false;
|
||||
if (!code_space_->Setup()) return false;
|
||||
if (!code_space_->SetUp()) return false;
|
||||
|
||||
// Initialize map space.
|
||||
map_space_ = new MapSpace(this,
|
||||
@ -5642,21 +5642,21 @@ bool Heap::Setup(bool create_heap_objects) {
|
||||
FLAG_max_map_space_pages,
|
||||
MAP_SPACE);
|
||||
if (map_space_ == NULL) return false;
|
||||
if (!map_space_->Setup()) return false;
|
||||
if (!map_space_->SetUp()) return false;
|
||||
|
||||
// Initialize global property cell space.
|
||||
cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
|
||||
if (cell_space_ == NULL) return false;
|
||||
if (!cell_space_->Setup()) return false;
|
||||
if (!cell_space_->SetUp()) return false;
|
||||
|
||||
// The large object code space may contain code or data. We set the memory
|
||||
// to be non-executable here for safety, but this means we need to enable it
|
||||
// explicitly when allocating large code objects.
|
||||
lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
|
||||
if (lo_space_ == NULL) return false;
|
||||
if (!lo_space_->Setup()) return false;
|
||||
if (!lo_space_->SetUp()) return false;
|
||||
|
||||
// Setup the seed that is used to randomize the string hash function.
|
||||
// Set up the seed that is used to randomize the string hash function.
|
||||
ASSERT(hash_seed() == 0);
|
||||
if (FLAG_randomize_hashes) {
|
||||
if (FLAG_hash_seed == 0) {
|
||||
@ -5681,7 +5681,7 @@ bool Heap::Setup(bool create_heap_objects) {
|
||||
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
|
||||
LOG(isolate_, IntPtrTEvent("heap-available", Available()));
|
||||
|
||||
store_buffer()->Setup();
|
||||
store_buffer()->SetUp();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
10
src/heap.h
10
src/heap.h
@ -434,7 +434,7 @@ class ExternalStringTable {
|
||||
class Heap {
|
||||
public:
|
||||
// Configure heap size before setup. Return false if the heap has been
|
||||
// setup already.
|
||||
// set up already.
|
||||
bool ConfigureHeap(int max_semispace_size,
|
||||
intptr_t max_old_gen_size,
|
||||
intptr_t max_executable_size);
|
||||
@ -443,7 +443,7 @@ class Heap {
|
||||
// Initializes the global object heap. If create_heap_objects is true,
|
||||
// also creates the basic non-mutable objects.
|
||||
// Returns whether it succeeded.
|
||||
bool Setup(bool create_heap_objects);
|
||||
bool SetUp(bool create_heap_objects);
|
||||
|
||||
// Destroys all memory allocated by the heap.
|
||||
void TearDown();
|
||||
@ -453,8 +453,8 @@ class Heap {
|
||||
// jslimit_/real_jslimit_ variable in the StackGuard.
|
||||
void SetStackLimits();
|
||||
|
||||
// Returns whether Setup has been called.
|
||||
bool HasBeenSetup();
|
||||
// Returns whether SetUp has been called.
|
||||
bool HasBeenSetUp();
|
||||
|
||||
// Returns the maximum amount of memory reserved for the heap. For
|
||||
// the young generation, we reserve 4 times the amount needed for a
|
||||
@ -1914,7 +1914,7 @@ class Heap {
|
||||
PromotionQueue promotion_queue_;
|
||||
|
||||
// Flag is set when the heap has been configured. The heap can be repeatedly
|
||||
// configured through the API until it is setup.
|
||||
// configured through the API until it is set up.
|
||||
bool configured_;
|
||||
|
||||
ExternalStringTable external_string_table_;
|
||||
|
@ -2305,7 +2305,7 @@ HGraph* HGraphBuilder::CreateGraph() {
|
||||
Bailout("function with illegal redeclaration");
|
||||
return NULL;
|
||||
}
|
||||
SetupScope(scope);
|
||||
SetUpScope(scope);
|
||||
|
||||
// Add an edge to the body entry. This is warty: the graph's start
|
||||
// environment will be used by the Lithium translation as the initial
|
||||
@ -2469,7 +2469,7 @@ HInstruction* HGraphBuilder::PreProcessCall(HCall<V>* call) {
|
||||
}
|
||||
|
||||
|
||||
void HGraphBuilder::SetupScope(Scope* scope) {
|
||||
void HGraphBuilder::SetUpScope(Scope* scope) {
|
||||
HConstant* undefined_constant = new(zone()) HConstant(
|
||||
isolate()->factory()->undefined_value(), Representation::Tagged());
|
||||
AddInstruction(undefined_constant);
|
||||
|
@ -870,7 +870,7 @@ class HGraphBuilder: public AstVisitor {
|
||||
Representation rep);
|
||||
static Representation ToRepresentation(TypeInfo info);
|
||||
|
||||
void SetupScope(Scope* scope);
|
||||
void SetUpScope(Scope* scope);
|
||||
virtual void VisitStatements(ZoneList<Statement*>* statements);
|
||||
|
||||
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
|
||||
|
@ -350,7 +350,7 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
|
||||
}
|
||||
#endif
|
||||
|
||||
// Setup buffer pointers.
|
||||
// Set up buffer pointers.
|
||||
ASSERT(buffer_ != NULL);
|
||||
pc_ = buffer_;
|
||||
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
|
||||
@ -377,7 +377,7 @@ void Assembler::GetCode(CodeDesc* desc) {
|
||||
// Finalize code (at this point overflow() may be true, but the gap ensures
|
||||
// that we are still not overlapping instructions and relocation info).
|
||||
ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
|
||||
// Setup code descriptor.
|
||||
// Set up code descriptor.
|
||||
desc->buffer = buffer_;
|
||||
desc->buffer_size = buffer_size_;
|
||||
desc->instr_size = pc_offset();
|
||||
@ -2457,7 +2457,7 @@ void Assembler::GrowBuffer() {
|
||||
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
|
||||
}
|
||||
|
||||
// Setup new buffer.
|
||||
// Set up new buffer.
|
||||
desc.buffer = NewArray<byte>(desc.buffer_size);
|
||||
desc.instr_size = pc_offset();
|
||||
desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());
|
||||
|
@ -333,7 +333,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
__ push(ebx);
|
||||
__ push(ebx);
|
||||
|
||||
// Setup pointer to last argument.
|
||||
// Set up pointer to last argument.
|
||||
__ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
|
||||
|
||||
// Copy arguments and receiver to the expression stack.
|
||||
|
@ -128,14 +128,14 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
|
||||
// Get the function from the stack.
|
||||
__ mov(ecx, Operand(esp, 1 * kPointerSize));
|
||||
|
||||
// Setup the object header.
|
||||
// Set up the object header.
|
||||
Factory* factory = masm->isolate()->factory();
|
||||
__ mov(FieldOperand(eax, HeapObject::kMapOffset),
|
||||
factory->function_context_map());
|
||||
__ mov(FieldOperand(eax, Context::kLengthOffset),
|
||||
Immediate(Smi::FromInt(length)));
|
||||
|
||||
// Setup the fixed slots.
|
||||
// Set up the fixed slots.
|
||||
__ Set(ebx, Immediate(0)); // Set to NULL.
|
||||
__ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
|
||||
__ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), esi);
|
||||
@ -179,7 +179,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
|
||||
// Get the serialized scope info from the stack.
|
||||
__ mov(ebx, Operand(esp, 2 * kPointerSize));
|
||||
|
||||
// Setup the object header.
|
||||
// Set up the object header.
|
||||
Factory* factory = masm->isolate()->factory();
|
||||
__ mov(FieldOperand(eax, HeapObject::kMapOffset),
|
||||
factory->block_context_map());
|
||||
@ -202,7 +202,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
|
||||
__ mov(ecx, ContextOperand(ecx, Context::CLOSURE_INDEX));
|
||||
__ bind(&after_sentinel);
|
||||
|
||||
// Setup the fixed slots.
|
||||
// Set up the fixed slots.
|
||||
__ mov(ContextOperand(eax, Context::CLOSURE_INDEX), ecx);
|
||||
__ mov(ContextOperand(eax, Context::PREVIOUS_INDEX), esi);
|
||||
__ mov(ContextOperand(eax, Context::EXTENSION_INDEX), ebx);
|
||||
@ -3379,7 +3379,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
|
||||
__ mov(FieldOperand(eax, i), edx);
|
||||
}
|
||||
|
||||
// Setup the callee in-object property.
|
||||
// Set up the callee in-object property.
|
||||
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
|
||||
__ mov(edx, Operand(esp, 4 * kPointerSize));
|
||||
__ mov(FieldOperand(eax, JSObject::kHeaderSize +
|
||||
@ -3392,7 +3392,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
|
||||
Heap::kArgumentsLengthIndex * kPointerSize),
|
||||
ecx);
|
||||
|
||||
// Setup the elements pointer in the allocated arguments object.
|
||||
// Set up the elements pointer in the allocated arguments object.
|
||||
// If we allocated a parameter map, edi will point there, otherwise to the
|
||||
// backing store.
|
||||
__ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
|
||||
@ -3571,7 +3571,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
|
||||
// Get the parameters pointer from the stack.
|
||||
__ mov(edx, Operand(esp, 2 * kPointerSize));
|
||||
|
||||
// Setup the elements pointer in the allocated arguments object and
|
||||
// Set up the elements pointer in the allocated arguments object and
|
||||
// initialize the header in the elements fixed array.
|
||||
__ lea(edi, Operand(eax, Heap::kArgumentsObjectSizeStrict));
|
||||
__ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
|
||||
@ -4950,7 +4950,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
|
||||
Label invoke, handler_entry, exit;
|
||||
Label not_outermost_js, not_outermost_js_2;
|
||||
|
||||
// Setup frame.
|
||||
// Set up frame.
|
||||
__ push(ebp);
|
||||
__ mov(ebp, esp);
|
||||
|
||||
|
@ -41,7 +41,7 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
void CPU::Setup() {
|
||||
void CPU::SetUp() {
|
||||
CpuFeatures::Probe();
|
||||
}
|
||||
|
||||
|
@ -406,7 +406,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
|
||||
output_[0] = input_;
|
||||
output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
|
||||
} else {
|
||||
// Setup the frame pointer and the context pointer.
|
||||
// Set up the frame pointer and the context pointer.
|
||||
// All OSR stack frames are dynamically aligned to an 8-byte boundary.
|
||||
int frame_pointer = input_->GetRegister(ebp.code());
|
||||
if ((frame_pointer & 0x4) == 0) {
|
||||
|
@ -967,7 +967,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
|
||||
__ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset));
|
||||
__ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
|
||||
|
||||
// Setup the four remaining stack slots.
|
||||
// Set up the four remaining stack slots.
|
||||
__ push(eax); // Map.
|
||||
__ push(edx); // Enumeration cache.
|
||||
__ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
|
||||
|
@ -4189,7 +4189,7 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
|
||||
DeoptimizeIf(not_equal, instr->environment());
|
||||
}
|
||||
|
||||
// Setup the parameters to the stub/runtime call.
|
||||
// Set up the parameters to the stub/runtime call.
|
||||
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
|
||||
__ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
|
||||
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
|
||||
@ -4297,7 +4297,7 @@ void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) {
|
||||
Handle<FixedArray> constant_properties =
|
||||
instr->hydrogen()->constant_properties();
|
||||
|
||||
// Setup the parameters to the stub/runtime call.
|
||||
// Set up the parameters to the stub/runtime call.
|
||||
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
|
||||
__ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
|
||||
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
|
||||
|
@ -385,7 +385,7 @@ class LDeferredCode: public ZoneObject {
|
||||
virtual void Generate() = 0;
|
||||
virtual LInstruction* instr() = 0;
|
||||
|
||||
void SetExit(Label *exit) { external_exit_ = exit; }
|
||||
void SetExit(Label* exit) { external_exit_ = exit; }
|
||||
Label* entry() { return &entry_; }
|
||||
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
|
||||
int instruction_index() const { return instruction_index_; }
|
||||
|
@ -251,7 +251,7 @@ class MacroAssembler: public Assembler {
|
||||
// ---------------------------------------------------------------------------
|
||||
// JavaScript invokes
|
||||
|
||||
// Setup call kind marking in ecx. The method takes ecx as an
|
||||
// Set up call kind marking in ecx. The method takes ecx as an
|
||||
// explicit first parameter to make the code more readable at the
|
||||
// call sites.
|
||||
void SetCallKind(Register dst, CallKind kind);
|
||||
|
@ -2330,7 +2330,7 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
|
||||
__ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
|
||||
}
|
||||
|
||||
// Setup the context (function already in edi).
|
||||
// Set up the context (function already in edi).
|
||||
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
|
||||
|
||||
// Jump to the cached code (tail call).
|
||||
|
@ -38,11 +38,11 @@ namespace internal {
|
||||
//============================================================================
|
||||
// The Inspector.
|
||||
|
||||
void Inspector::DumpObjectType(FILE* out, Object *obj, bool print_more) {
|
||||
void Inspector::DumpObjectType(FILE* out, Object* obj, bool print_more) {
|
||||
// Dump the object pointer.
|
||||
OS::FPrint(out, "%p:", reinterpret_cast<void*>(obj));
|
||||
if (obj->IsHeapObject()) {
|
||||
HeapObject *hobj = HeapObject::cast(obj);
|
||||
HeapObject* hobj = HeapObject::cast(obj);
|
||||
OS::FPrint(out, " size %d :", hobj->Size());
|
||||
}
|
||||
|
||||
|
@ -41,14 +41,14 @@ namespace internal {
|
||||
|
||||
class Inspector {
|
||||
public:
|
||||
static void DumpObjectType(FILE* out, Object *obj, bool print_more);
|
||||
static void DumpObjectType(FILE* out, Object *obj) {
|
||||
static void DumpObjectType(FILE* out, Object* obj, bool print_more);
|
||||
static void DumpObjectType(FILE* out, Object* obj) {
|
||||
DumpObjectType(out, obj, false);
|
||||
}
|
||||
static void DumpObjectType(Object *obj, bool print_more) {
|
||||
static void DumpObjectType(Object* obj, bool print_more) {
|
||||
DumpObjectType(stdout, obj, print_more);
|
||||
}
|
||||
static void DumpObjectType(Object *obj) {
|
||||
static void DumpObjectType(Object* obj) {
|
||||
DumpObjectType(stdout, obj, false);
|
||||
}
|
||||
};
|
||||
|
@ -1751,10 +1751,10 @@ bool Isolate::Init(Deserializer* des) {
|
||||
regexp_stack_->isolate_ = this;
|
||||
|
||||
// Enable logging before setting up the heap
|
||||
logger_->Setup();
|
||||
logger_->SetUp();
|
||||
|
||||
CpuProfiler::Setup();
|
||||
HeapProfiler::Setup();
|
||||
CpuProfiler::SetUp();
|
||||
HeapProfiler::SetUp();
|
||||
|
||||
// Initialize other runtime facilities
|
||||
#if defined(USE_SIMULATOR)
|
||||
@ -1771,10 +1771,10 @@ bool Isolate::Init(Deserializer* des) {
|
||||
stack_guard_.InitThread(lock);
|
||||
}
|
||||
|
||||
// Setup the object heap.
|
||||
// SetUp the object heap.
|
||||
const bool create_heap_objects = (des == NULL);
|
||||
ASSERT(!heap_.HasBeenSetup());
|
||||
if (!heap_.Setup(create_heap_objects)) {
|
||||
ASSERT(!heap_.HasBeenSetUp());
|
||||
if (!heap_.SetUp(create_heap_objects)) {
|
||||
V8::SetFatalError();
|
||||
return false;
|
||||
}
|
||||
@ -1782,7 +1782,7 @@ bool Isolate::Init(Deserializer* des) {
|
||||
InitializeThreadLocal();
|
||||
|
||||
bootstrapper_->Initialize(create_heap_objects);
|
||||
builtins_.Setup(create_heap_objects);
|
||||
builtins_.SetUp(create_heap_objects);
|
||||
|
||||
// Only preallocate on the first initialization.
|
||||
if (FLAG_preallocate_message_memory && preallocated_message_space_ == NULL) {
|
||||
@ -1801,7 +1801,7 @@ bool Isolate::Init(Deserializer* des) {
|
||||
}
|
||||
|
||||
#ifdef ENABLE_DEBUGGER_SUPPORT
|
||||
debug_->Setup(create_heap_objects);
|
||||
debug_->SetUp(create_heap_objects);
|
||||
#endif
|
||||
stub_cache_->Initialize(create_heap_objects);
|
||||
|
||||
@ -1822,7 +1822,7 @@ bool Isolate::Init(Deserializer* des) {
|
||||
|
||||
deoptimizer_data_ = new DeoptimizerData;
|
||||
runtime_profiler_ = new RuntimeProfiler(this);
|
||||
runtime_profiler_->Setup();
|
||||
runtime_profiler_->SetUp();
|
||||
|
||||
// If we are deserializing, log non-function code objects and compiled
|
||||
// functions found in the snapshot.
|
||||
|
@ -49,13 +49,13 @@ namespace internal {
|
||||
|
||||
#define DEFINE_OPERAND_CACHE(name, type) \
|
||||
name name::cache[name::kNumCachedOperands]; \
|
||||
void name::SetupCache() { \
|
||||
void name::SetUpCache() { \
|
||||
for (int i = 0; i < kNumCachedOperands; i++) { \
|
||||
cache[i].ConvertTo(type, i); \
|
||||
} \
|
||||
} \
|
||||
static bool name##_initialize() { \
|
||||
name::SetupCache(); \
|
||||
name::SetUpCache(); \
|
||||
return true; \
|
||||
} \
|
||||
static bool name##_cache_initialized = name##_initialize();
|
||||
|
@ -265,7 +265,7 @@ class LConstantOperand: public LOperand {
|
||||
return reinterpret_cast<LConstantOperand*>(op);
|
||||
}
|
||||
|
||||
static void SetupCache();
|
||||
static void SetUpCache();
|
||||
|
||||
private:
|
||||
static const int kNumCachedOperands = 128;
|
||||
@ -300,7 +300,7 @@ class LStackSlot: public LOperand {
|
||||
return reinterpret_cast<LStackSlot*>(op);
|
||||
}
|
||||
|
||||
static void SetupCache();
|
||||
static void SetUpCache();
|
||||
|
||||
private:
|
||||
static const int kNumCachedOperands = 128;
|
||||
@ -324,7 +324,7 @@ class LDoubleStackSlot: public LOperand {
|
||||
return reinterpret_cast<LDoubleStackSlot*>(op);
|
||||
}
|
||||
|
||||
static void SetupCache();
|
||||
static void SetUpCache();
|
||||
|
||||
private:
|
||||
static const int kNumCachedOperands = 128;
|
||||
@ -348,7 +348,7 @@ class LRegister: public LOperand {
|
||||
return reinterpret_cast<LRegister*>(op);
|
||||
}
|
||||
|
||||
static void SetupCache();
|
||||
static void SetUpCache();
|
||||
|
||||
private:
|
||||
static const int kNumCachedOperands = 16;
|
||||
@ -372,7 +372,7 @@ class LDoubleRegister: public LOperand {
|
||||
return reinterpret_cast<LDoubleRegister*>(op);
|
||||
}
|
||||
|
||||
static void SetupCache();
|
||||
static void SetUpCache();
|
||||
|
||||
private:
|
||||
static const int kNumCachedOperands = 16;
|
||||
|
@ -1228,7 +1228,7 @@ class RelocInfoBuffer {
|
||||
V8::FatalProcessOutOfMemory("RelocInfoBuffer::GrowBuffer");
|
||||
}
|
||||
|
||||
// Setup new buffer.
|
||||
// Set up new buffer.
|
||||
byte* new_buffer = NewArray<byte>(new_buffer_size);
|
||||
|
||||
// Copy the data.
|
||||
|
@ -59,7 +59,7 @@ void LiveObjectList::IterateElements(ObjectVisitor* v) {
|
||||
}
|
||||
|
||||
|
||||
void LiveObjectList::ProcessNonLive(HeapObject *obj) {
|
||||
void LiveObjectList::ProcessNonLive(HeapObject* obj) {
|
||||
// Only do work if we have at least one list to process.
|
||||
if (last()) DoProcessNonLive(obj);
|
||||
}
|
||||
@ -93,7 +93,7 @@ LiveObjectList* LiveObjectList::FindLolForId(int id,
|
||||
template <typename T>
|
||||
inline LiveObjectList::Element*
|
||||
LiveObjectList::FindElementFor(T (*GetValue)(LiveObjectList::Element*), T key) {
|
||||
LiveObjectList *lol = last();
|
||||
LiveObjectList* lol = last();
|
||||
while (lol != NULL) {
|
||||
Element* elements = lol->elements_;
|
||||
for (int i = 0; i < lol->obj_count_; i++) {
|
||||
|
@ -165,7 +165,7 @@ const char* GetObjectTypeDesc(HeapObject* heap_obj) {
|
||||
}
|
||||
|
||||
|
||||
bool IsOfType(LiveObjectType type, HeapObject *obj) {
|
||||
bool IsOfType(LiveObjectType type, HeapObject* obj) {
|
||||
// Note: there are types that are more general (e.g. JSObject) that would
|
||||
// have passed the Is##type_() test for more specialized types (e.g.
|
||||
// JSFunction). If we find a more specialized match but we're looking for
|
||||
@ -211,7 +211,7 @@ static AllocationSpace FindSpaceFor(String* space_str) {
|
||||
}
|
||||
|
||||
|
||||
static bool InSpace(AllocationSpace space, HeapObject *heap_obj) {
|
||||
static bool InSpace(AllocationSpace space, HeapObject* heap_obj) {
|
||||
Heap* heap = ISOLATE->heap();
|
||||
if (space != LO_SPACE) {
|
||||
return heap->InSpace(heap_obj, space);
|
||||
@ -498,7 +498,7 @@ static void GenerateObjectDesc(HeapObject* obj,
|
||||
length);
|
||||
|
||||
} else if (obj->IsString()) {
|
||||
String *str = String::cast(obj);
|
||||
String* str = String::cast(obj);
|
||||
// Only grab up to 160 chars in case they are double byte.
|
||||
// We'll only dump 80 of them after we compact them.
|
||||
const int kMaxCharToDump = 80;
|
||||
@ -842,7 +842,7 @@ class LiveObjectSummary {
|
||||
bool found_root_;
|
||||
bool found_weak_root_;
|
||||
|
||||
LolFilter *filter_;
|
||||
LolFilter* filter_;
|
||||
};
|
||||
|
||||
|
||||
@ -857,8 +857,8 @@ class SummaryWriter {
|
||||
// A summary writer for filling in a summary of lol lists and diffs.
|
||||
class LolSummaryWriter: public SummaryWriter {
|
||||
public:
|
||||
LolSummaryWriter(LiveObjectList *older_lol,
|
||||
LiveObjectList *newer_lol)
|
||||
LolSummaryWriter(LiveObjectList* older_lol,
|
||||
LiveObjectList* newer_lol)
|
||||
: older_(older_lol), newer_(newer_lol) {
|
||||
}
|
||||
|
||||
@ -944,7 +944,7 @@ LiveObjectList::~LiveObjectList() {
|
||||
int LiveObjectList::GetTotalObjCountAndSize(int* size_p) {
|
||||
int size = 0;
|
||||
int count = 0;
|
||||
LiveObjectList *lol = this;
|
||||
LiveObjectList* lol = this;
|
||||
do {
|
||||
// Only compute total size if requested i.e. when size_p is not null.
|
||||
if (size_p != NULL) {
|
||||
@ -1183,7 +1183,7 @@ MaybeObject* LiveObjectList::Capture() {
|
||||
// only time we'll actually delete the lol is when we Reset() or if the lol is
|
||||
// invisible, and its element count reaches 0.
|
||||
bool LiveObjectList::Delete(int id) {
|
||||
LiveObjectList *lol = last();
|
||||
LiveObjectList* lol = last();
|
||||
while (lol != NULL) {
|
||||
if (lol->id() == id) {
|
||||
break;
|
||||
@ -1246,8 +1246,8 @@ MaybeObject* LiveObjectList::Dump(int older_id,
|
||||
newer_id = temp;
|
||||
}
|
||||
|
||||
LiveObjectList *newer_lol = FindLolForId(newer_id, last());
|
||||
LiveObjectList *older_lol = FindLolForId(older_id, newer_lol);
|
||||
LiveObjectList* newer_lol = FindLolForId(newer_id, last());
|
||||
LiveObjectList* older_lol = FindLolForId(older_id, newer_lol);
|
||||
|
||||
// If the id is defined, and we can't find a LOL for it, then we have an
|
||||
// invalid id.
|
||||
@ -1365,8 +1365,8 @@ MaybeObject* LiveObjectList::Summarize(int older_id,
|
||||
newer_id = temp;
|
||||
}
|
||||
|
||||
LiveObjectList *newer_lol = FindLolForId(newer_id, last());
|
||||
LiveObjectList *older_lol = FindLolForId(older_id, newer_lol);
|
||||
LiveObjectList* newer_lol = FindLolForId(newer_id, last());
|
||||
LiveObjectList* older_lol = FindLolForId(older_id, newer_lol);
|
||||
|
||||
// If the id is defined, and we can't find a LOL for it, then we have an
|
||||
// invalid id.
|
||||
@ -1626,7 +1626,7 @@ MaybeObject* LiveObjectList::Info(int start_idx, int dump_limit) {
|
||||
|
||||
// Deletes all captured lols.
|
||||
void LiveObjectList::Reset() {
|
||||
LiveObjectList *lol = last();
|
||||
LiveObjectList* lol = last();
|
||||
// Just delete the last. Each lol will delete it's prev automatically.
|
||||
delete lol;
|
||||
|
||||
@ -1715,8 +1715,8 @@ class LolVisitor: public ObjectVisitor {
|
||||
|
||||
inline bool AddRootRetainerIfFound(const LolVisitor& visitor,
|
||||
LolFilter* filter,
|
||||
LiveObjectSummary *summary,
|
||||
void (*SetRootFound)(LiveObjectSummary *s),
|
||||
LiveObjectSummary* summary,
|
||||
void (*SetRootFound)(LiveObjectSummary* s),
|
||||
int start,
|
||||
int dump_limit,
|
||||
int* total_count,
|
||||
@ -1762,12 +1762,12 @@ inline bool AddRootRetainerIfFound(const LolVisitor& visitor,
|
||||
}
|
||||
|
||||
|
||||
inline void SetFoundRoot(LiveObjectSummary *summary) {
|
||||
inline void SetFoundRoot(LiveObjectSummary* summary) {
|
||||
summary->set_found_root();
|
||||
}
|
||||
|
||||
|
||||
inline void SetFoundWeakRoot(LiveObjectSummary *summary) {
|
||||
inline void SetFoundWeakRoot(LiveObjectSummary* summary) {
|
||||
summary->set_found_weak_root();
|
||||
}
|
||||
|
||||
@ -1779,7 +1779,7 @@ int LiveObjectList::GetRetainers(Handle<HeapObject> target,
|
||||
int dump_limit,
|
||||
int* total_count,
|
||||
LolFilter* filter,
|
||||
LiveObjectSummary *summary,
|
||||
LiveObjectSummary* summary,
|
||||
JSFunction* arguments_function,
|
||||
Handle<Object> error) {
|
||||
HandleScope scope;
|
||||
@ -2267,7 +2267,7 @@ Object* LiveObjectList::GetPath(int obj_id1,
|
||||
}
|
||||
|
||||
|
||||
void LiveObjectList::DoProcessNonLive(HeapObject *obj) {
|
||||
void LiveObjectList::DoProcessNonLive(HeapObject* obj) {
|
||||
// We should only be called if we have at least one lol to search.
|
||||
ASSERT(last() != NULL);
|
||||
Element* element = last()->Find(obj);
|
||||
@ -2284,7 +2284,7 @@ void LiveObjectList::IterateElementsPrivate(ObjectVisitor* v) {
|
||||
int count = lol->obj_count_;
|
||||
for (int i = 0; i < count; i++) {
|
||||
HeapObject** p = &elements[i].obj_;
|
||||
v->VisitPointer(reinterpret_cast<Object **>(p));
|
||||
v->VisitPointer(reinterpret_cast<Object** >(p));
|
||||
}
|
||||
lol = lol->prev_;
|
||||
}
|
||||
@ -2389,11 +2389,11 @@ void LiveObjectList::GCEpiloguePrivate() {
|
||||
PurgeDuplicates();
|
||||
|
||||
// After the GC, sweep away all free'd Elements and compact.
|
||||
LiveObjectList *prev = NULL;
|
||||
LiveObjectList *next = NULL;
|
||||
LiveObjectList* prev = NULL;
|
||||
LiveObjectList* next = NULL;
|
||||
|
||||
// Iterating from the youngest lol to the oldest lol.
|
||||
for (LiveObjectList *lol = last(); lol; lol = prev) {
|
||||
for (LiveObjectList* lol = last(); lol; lol = prev) {
|
||||
Element* elements = lol->elements_;
|
||||
prev = lol->prev(); // Save the prev.
|
||||
|
||||
@ -2446,7 +2446,7 @@ void LiveObjectList::GCEpiloguePrivate() {
|
||||
const int kMaxUnusedSpace = 64;
|
||||
if (diff > kMaxUnusedSpace) { // Threshold for shrinking.
|
||||
// Shrink the list.
|
||||
Element *new_elements = NewArray<Element>(new_count);
|
||||
Element* new_elements = NewArray<Element>(new_count);
|
||||
memcpy(new_elements, elements, new_count * sizeof(Element));
|
||||
|
||||
DeleteArray<Element>(elements);
|
||||
|
@ -77,7 +77,7 @@ class LiveObjectList {
|
||||
inline static void GCEpilogue();
|
||||
inline static void GCPrologue();
|
||||
inline static void IterateElements(ObjectVisitor* v);
|
||||
inline static void ProcessNonLive(HeapObject *obj);
|
||||
inline static void ProcessNonLive(HeapObject* obj);
|
||||
inline static void UpdateReferencesForScavengeGC();
|
||||
|
||||
// Note: LOLs can be listed by calling Dump(0, <lol id>), and 2 LOLs can be
|
||||
@ -125,7 +125,7 @@ class LiveObjectList {
|
||||
static void GCEpiloguePrivate();
|
||||
static void IterateElementsPrivate(ObjectVisitor* v);
|
||||
|
||||
static void DoProcessNonLive(HeapObject *obj);
|
||||
static void DoProcessNonLive(HeapObject* obj);
|
||||
|
||||
static int CompareElement(const Element* a, const Element* b);
|
||||
|
||||
@ -138,7 +138,7 @@ class LiveObjectList {
|
||||
int dump_limit,
|
||||
int* total_count,
|
||||
LolFilter* filter,
|
||||
LiveObjectSummary *summary,
|
||||
LiveObjectSummary* summary,
|
||||
JSFunction* arguments_function,
|
||||
Handle<Object> error);
|
||||
|
||||
@ -151,7 +151,7 @@ class LiveObjectList {
|
||||
bool is_tracking_roots);
|
||||
|
||||
static bool NeedLOLProcessing() { return (last() != NULL); }
|
||||
static void NullifyNonLivePointer(HeapObject **p) {
|
||||
static void NullifyNonLivePointer(HeapObject** p) {
|
||||
// Mask out the low bit that marks this as a heap object. We'll use this
|
||||
// cleared bit as an indicator that this pointer needs to be collected.
|
||||
//
|
||||
@ -202,7 +202,7 @@ class LiveObjectList {
|
||||
int id_;
|
||||
int capacity_;
|
||||
int obj_count_;
|
||||
Element *elements_;
|
||||
Element* elements_;
|
||||
|
||||
// Statics for managing all the lists.
|
||||
static uint32_t next_element_id_;
|
||||
|
@ -1615,7 +1615,7 @@ void Logger::LogAccessorCallbacks() {
|
||||
}
|
||||
|
||||
|
||||
bool Logger::Setup() {
|
||||
bool Logger::SetUp() {
|
||||
// Tests and EnsureInitialize() can call this twice in a row. It's harmless.
|
||||
if (is_initialized_) return true;
|
||||
is_initialized_ = true;
|
||||
@ -1708,9 +1708,9 @@ FILE* Logger::TearDown() {
|
||||
|
||||
|
||||
void Logger::EnableSlidingStateWindow() {
|
||||
// If the ticker is NULL, Logger::Setup has not been called yet. In
|
||||
// If the ticker is NULL, Logger::SetUp has not been called yet. In
|
||||
// that case, we set the sliding_state_window flag so that the
|
||||
// sliding window computation will be started when Logger::Setup is
|
||||
// sliding window computation will be started when Logger::SetUp is
|
||||
// called.
|
||||
if (ticker_ == NULL) {
|
||||
FLAG_sliding_state_window = true;
|
||||
|
@ -150,14 +150,14 @@ class Logger {
|
||||
#undef DECLARE_ENUM
|
||||
|
||||
// Acquires resources for logging if the right flags are set.
|
||||
bool Setup();
|
||||
bool SetUp();
|
||||
|
||||
void EnsureTickerStarted();
|
||||
void EnsureTickerStopped();
|
||||
|
||||
Sampler* sampler();
|
||||
|
||||
// Frees resources acquired in Setup.
|
||||
// Frees resources acquired in SetUp.
|
||||
// When a temporary file is used for the log, returns its stream descriptor,
|
||||
// leaving the file open.
|
||||
FILE* TearDown();
|
||||
@ -411,7 +411,7 @@ class Logger {
|
||||
NameMap* address_to_name_map_;
|
||||
|
||||
// Guards against multiple calls to TearDown() that can happen in some tests.
|
||||
// 'true' between Setup() and TearDown().
|
||||
// 'true' between SetUp() and TearDown().
|
||||
bool is_initialized_;
|
||||
|
||||
// Support for 'incremental addresses' in compressed logs:
|
||||
|
@ -133,7 +133,7 @@ Object* RelocInfo::target_object() {
|
||||
}
|
||||
|
||||
|
||||
Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
|
||||
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
|
||||
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
|
||||
return Handle<Object>(reinterpret_cast<Object**>(
|
||||
Assembler::target_address_at(pc_)));
|
||||
|
@ -301,7 +301,7 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
|
||||
own_buffer_ = false;
|
||||
}
|
||||
|
||||
// Setup buffer pointers.
|
||||
// Set up buffer pointers.
|
||||
ASSERT(buffer_ != NULL);
|
||||
pc_ = buffer_;
|
||||
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
|
||||
@ -337,7 +337,7 @@ Assembler::~Assembler() {
|
||||
|
||||
void Assembler::GetCode(CodeDesc* desc) {
|
||||
ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
|
||||
// Setup code descriptor.
|
||||
// Set up code descriptor.
|
||||
desc->buffer = buffer_;
|
||||
desc->buffer_size = buffer_size_;
|
||||
desc->instr_size = pc_offset();
|
||||
@ -1970,7 +1970,7 @@ void Assembler::GrowBuffer() {
|
||||
}
|
||||
CHECK_GT(desc.buffer_size, 0); // No overflow.
|
||||
|
||||
// Setup new buffer.
|
||||
// Set up new buffer.
|
||||
desc.buffer = NewArray<byte>(desc.buffer_size);
|
||||
|
||||
desc.instr_size = pc_offset();
|
||||
|
@ -339,7 +339,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
|
||||
t1,
|
||||
call_generic_code);
|
||||
__ IncrementCounter(counters->array_function_native(), 1, a3, t0);
|
||||
// Setup return value, remove receiver from stack and return.
|
||||
// Set up return value, remove receiver from stack and return.
|
||||
__ mov(v0, a2);
|
||||
__ Addu(sp, sp, Operand(kPointerSize));
|
||||
__ Ret();
|
||||
@ -382,7 +382,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
|
||||
call_generic_code);
|
||||
__ IncrementCounter(counters->array_function_native(), 1, a2, t0);
|
||||
|
||||
// Setup return value, remove receiver and argument from stack and return.
|
||||
// Set up return value, remove receiver and argument from stack and return.
|
||||
__ mov(v0, a3);
|
||||
__ Addu(sp, sp, Operand(2 * kPointerSize));
|
||||
__ Ret();
|
||||
@ -981,10 +981,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
// sp[4]: number of arguments (smi-tagged)
|
||||
__ lw(a3, MemOperand(sp, 4 * kPointerSize));
|
||||
|
||||
// Setup pointer to last argument.
|
||||
// Set up pointer to last argument.
|
||||
__ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
|
||||
|
||||
// Setup number of arguments for function call below.
|
||||
// Set up number of arguments for function call below.
|
||||
__ srl(a0, a3, kSmiTagSize);
|
||||
|
||||
// Copy arguments and receiver to the expression stack.
|
||||
|
@ -157,13 +157,13 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
|
||||
// Load the function from the stack.
|
||||
__ lw(a3, MemOperand(sp, 0));
|
||||
|
||||
// Setup the object header.
|
||||
// Set up the object header.
|
||||
__ LoadRoot(a2, Heap::kFunctionContextMapRootIndex);
|
||||
__ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
|
||||
__ li(a2, Operand(Smi::FromInt(length)));
|
||||
__ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
|
||||
|
||||
// Setup the fixed slots.
|
||||
// Set up the fixed slots.
|
||||
__ li(a1, Operand(Smi::FromInt(0)));
|
||||
__ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
|
||||
__ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
|
||||
@ -208,7 +208,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
|
||||
// Load the serialized scope info from the stack.
|
||||
__ lw(a1, MemOperand(sp, 1 * kPointerSize));
|
||||
|
||||
// Setup the object header.
|
||||
// Set up the object header.
|
||||
__ LoadRoot(a2, Heap::kBlockContextMapRootIndex);
|
||||
__ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
|
||||
__ li(a2, Operand(Smi::FromInt(length)));
|
||||
@ -229,7 +229,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
|
||||
__ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX));
|
||||
__ bind(&after_sentinel);
|
||||
|
||||
// Setup the fixed slots.
|
||||
// Set up the fixed slots.
|
||||
__ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX));
|
||||
__ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX));
|
||||
__ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX));
|
||||
@ -4005,7 +4005,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
||||
FrameScope scope(masm, StackFrame::MANUAL);
|
||||
__ EnterExitFrame(save_doubles_);
|
||||
|
||||
// Setup argc and the builtin function in callee-saved registers.
|
||||
// Set up argc and the builtin function in callee-saved registers.
|
||||
__ mov(s0, a0);
|
||||
__ mov(s2, a1);
|
||||
|
||||
@ -4097,7 +4097,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
|
||||
isolate)));
|
||||
__ lw(t0, MemOperand(t0));
|
||||
__ Push(t3, t2, t1, t0);
|
||||
// Setup frame pointer for the frame to be pushed.
|
||||
// Set up frame pointer for the frame to be pushed.
|
||||
__ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
|
||||
|
||||
// Registers:
|
||||
@ -4584,7 +4584,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
|
||||
__ sw(a3, FieldMemOperand(v0, i));
|
||||
}
|
||||
|
||||
// Setup the callee in-object property.
|
||||
// Set up the callee in-object property.
|
||||
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
|
||||
__ lw(a3, MemOperand(sp, 2 * kPointerSize));
|
||||
const int kCalleeOffset = JSObject::kHeaderSize +
|
||||
@ -4597,7 +4597,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
|
||||
Heap::kArgumentsLengthIndex * kPointerSize;
|
||||
__ sw(a2, FieldMemOperand(v0, kLengthOffset));
|
||||
|
||||
// Setup the elements pointer in the allocated arguments object.
|
||||
// Set up the elements pointer in the allocated arguments object.
|
||||
// If we allocated a parameter map, t0 will point there, otherwise
|
||||
// it will point to the backing store.
|
||||
__ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize));
|
||||
@ -4774,7 +4774,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
|
||||
// Get the parameters pointer from the stack.
|
||||
__ lw(a2, MemOperand(sp, 1 * kPointerSize));
|
||||
|
||||
// Setup the elements pointer in the allocated arguments object and
|
||||
// Set up the elements pointer in the allocated arguments object and
|
||||
// initialize the header in the elements fixed array.
|
||||
__ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict));
|
||||
__ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
|
||||
@ -4786,7 +4786,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
|
||||
|
||||
// Copy the fixed array slots.
|
||||
Label loop;
|
||||
// Setup t0 to point to the first array slot.
|
||||
// Set up t0 to point to the first array slot.
|
||||
__ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ bind(&loop);
|
||||
// Pre-decrement a2 with kPointerSize on each iteration.
|
||||
@ -5425,7 +5425,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
|
||||
// of the original receiver from the call site).
|
||||
__ bind(&non_function);
|
||||
__ sw(a1, MemOperand(sp, argc_ * kPointerSize));
|
||||
__ li(a0, Operand(argc_)); // Setup the number of arguments.
|
||||
__ li(a0, Operand(argc_)); // Set up the number of arguments.
|
||||
__ mov(a2, zero_reg);
|
||||
__ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
|
||||
__ SetCallKind(t1, CALL_AS_METHOD);
|
||||
|
@ -125,7 +125,7 @@ class Registers {
|
||||
|
||||
struct RegisterAlias {
|
||||
int reg;
|
||||
const char *name;
|
||||
const char* name;
|
||||
};
|
||||
|
||||
static const int32_t kMaxValue = 0x7fffffff;
|
||||
@ -147,7 +147,7 @@ class FPURegisters {
|
||||
|
||||
struct RegisterAlias {
|
||||
int creg;
|
||||
const char *name;
|
||||
const char* name;
|
||||
};
|
||||
|
||||
private:
|
||||
|
@ -47,7 +47,7 @@ namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
|
||||
void CPU::Setup() {
|
||||
void CPU::SetUp() {
|
||||
CpuFeatures::Probe();
|
||||
}
|
||||
|
||||
|
@ -326,7 +326,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
|
||||
output_[0] = input_;
|
||||
output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
|
||||
} else {
|
||||
// Setup the frame pointer and the context pointer.
|
||||
// Set up the frame pointer and the context pointer.
|
||||
output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
|
||||
output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
|
||||
|
||||
|
@ -1017,7 +1017,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
|
||||
__ lw(a1, FieldMemOperand(a1, DescriptorArray::kEnumerationIndexOffset));
|
||||
__ lw(a2, FieldMemOperand(a1, DescriptorArray::kEnumCacheBridgeCacheOffset));
|
||||
|
||||
// Setup the four remaining stack slots.
|
||||
// Set up the four remaining stack slots.
|
||||
__ push(v0); // Map.
|
||||
__ lw(a1, FieldMemOperand(a2, FixedArray::kLengthOffset));
|
||||
__ li(a0, Operand(Smi::FromInt(0)));
|
||||
|
@ -2794,7 +2794,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
|
||||
__ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
|
||||
__ Call(at);
|
||||
|
||||
// Setup deoptimization.
|
||||
// Set up deoptimization.
|
||||
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
|
||||
|
||||
// Restore context.
|
||||
|
@ -423,7 +423,7 @@ class LDeferredCode: public ZoneObject {
|
||||
virtual void Generate() = 0;
|
||||
virtual LInstruction* instr() = 0;
|
||||
|
||||
void SetExit(Label *exit) { external_exit_ = exit; }
|
||||
void SetExit(Label* exit) { external_exit_ = exit; }
|
||||
Label* entry() { return &entry_; }
|
||||
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
|
||||
int instruction_index() const { return instruction_index_; }
|
||||
|
@ -4279,7 +4279,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
|
||||
|
||||
void MacroAssembler::EnterExitFrame(bool save_doubles,
|
||||
int stack_space) {
|
||||
// Setup the frame structure on the stack.
|
||||
// Set up the frame structure on the stack.
|
||||
STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
|
||||
STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
|
||||
STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
|
||||
@ -4297,7 +4297,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
|
||||
addiu(sp, sp, -4 * kPointerSize);
|
||||
sw(ra, MemOperand(sp, 3 * kPointerSize));
|
||||
sw(fp, MemOperand(sp, 2 * kPointerSize));
|
||||
addiu(fp, sp, 2 * kPointerSize); // Setup new frame pointer.
|
||||
addiu(fp, sp, 2 * kPointerSize); // Set up new frame pointer.
|
||||
|
||||
if (emit_debug_code()) {
|
||||
sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
|
||||
|
@ -799,7 +799,7 @@ class MacroAssembler: public Assembler {
|
||||
// -------------------------------------------------------------------------
|
||||
// JavaScript invokes.
|
||||
|
||||
// Setup call kind marking in t1. The method takes t1 as an
|
||||
// Set up call kind marking in t1. The method takes t1 as an
|
||||
// explicit first parameter to make the code more readable at the
|
||||
// call sites.
|
||||
void SetCallKind(Register dst, CallKind kind);
|
||||
|
@ -888,7 +888,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
|
||||
isolate_->set_simulator_i_cache(i_cache_);
|
||||
}
|
||||
Initialize(isolate);
|
||||
// Setup simulator support first. Some of this information is needed to
|
||||
// Set up simulator support first. Some of this information is needed to
|
||||
// setup the architecture state.
|
||||
stack_ = reinterpret_cast<char*>(malloc(stack_size_));
|
||||
pc_modified_ = false;
|
||||
@ -897,7 +897,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
|
||||
break_pc_ = NULL;
|
||||
break_instr_ = 0;
|
||||
|
||||
// Setup architecture state.
|
||||
// Set up architecture state.
|
||||
// All registers are initialized to zero to start with.
|
||||
for (int i = 0; i < kNumSimuRegisters; i++) {
|
||||
registers_[i] = 0;
|
||||
@ -1944,7 +1944,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
|
||||
// Next pc
|
||||
int32_t next_pc = 0;
|
||||
|
||||
// Setup the variables if needed before executing the instruction.
|
||||
// Set up the variables if needed before executing the instruction.
|
||||
ConfigureTypeRegister(instr,
|
||||
alu_out,
|
||||
i64hilo,
|
||||
@ -2711,7 +2711,7 @@ void Simulator::Execute() {
|
||||
int32_t Simulator::Call(byte* entry, int argument_count, ...) {
|
||||
va_list parameters;
|
||||
va_start(parameters, argument_count);
|
||||
// Setup arguments.
|
||||
// Set up arguments.
|
||||
|
||||
// First four arguments passed in registers.
|
||||
ASSERT(argument_count >= 4);
|
||||
@ -2758,7 +2758,7 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
|
||||
int32_t sp_val = get_register(sp);
|
||||
int32_t fp_val = get_register(fp);
|
||||
|
||||
// Setup the callee-saved registers with a known value. To be able to check
|
||||
// Set up the callee-saved registers with a known value. To be able to check
|
||||
// that they are preserved properly across JS execution.
|
||||
int32_t callee_saved_value = icount_;
|
||||
set_register(s0, callee_saved_value);
|
||||
|
@ -1173,7 +1173,7 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
|
||||
__ EnterExitFrame(false, kApiStackSpace);
|
||||
|
||||
// Create AccessorInfo instance on the stack above the exit frame with
|
||||
// scratch2 (internal::Object **args_) as the data.
|
||||
// scratch2 (internal::Object** args_) as the data.
|
||||
__ sw(a2, MemOperand(sp, kPointerSize));
|
||||
// a2 (second argument - see note above) = AccessorInfo&
|
||||
__ Addu(a2, sp, kPointerSize);
|
||||
@ -2430,7 +2430,7 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
|
||||
__ sw(a3, MemOperand(sp, argc * kPointerSize));
|
||||
}
|
||||
|
||||
// Setup the context (function already in r1).
|
||||
// Set up the context (function already in r1).
|
||||
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
|
||||
|
||||
// Jump to the cached code (tail call).
|
||||
|
@ -1186,8 +1186,8 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
|
||||
|
||||
if (directive_prologue) {
|
||||
// A shot at a directive.
|
||||
ExpressionStatement *e_stat;
|
||||
Literal *literal;
|
||||
ExpressionStatement* e_stat;
|
||||
Literal* literal;
|
||||
// Still processing directive prologue?
|
||||
if ((e_stat = stat->AsExpressionStatement()) != NULL &&
|
||||
(literal = e_stat->expression()->AsLiteral()) != NULL &&
|
||||
@ -1562,7 +1562,7 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
|
||||
|
||||
// TODO(1240846): It's weird that native function declarations are
|
||||
// introduced dynamically when we meet their declarations, whereas
|
||||
// other functions are setup when entering the surrounding scope.
|
||||
// other functions are set up when entering the surrounding scope.
|
||||
SharedFunctionInfoLiteral* lit =
|
||||
new(zone()) SharedFunctionInfoLiteral(isolate(), shared);
|
||||
VariableProxy* var = Declare(name, VAR, NULL, true, CHECK_OK);
|
||||
@ -3607,7 +3607,7 @@ void ObjectLiteralPropertyChecker::CheckProperty(
|
||||
|
||||
ASSERT(property != NULL);
|
||||
|
||||
Literal *lit = property->key();
|
||||
Literal* lit = property->key();
|
||||
Handle<Object> handle = lit->handle();
|
||||
|
||||
uint32_t hash;
|
||||
|
@ -61,7 +61,7 @@ double ceiling(double x) {
|
||||
static Mutex* limit_mutex = NULL;
|
||||
|
||||
|
||||
void OS::Setup() {
|
||||
void OS::SetUp() {
|
||||
// Seed the random number generator.
|
||||
// Convert the current time to a 64-bit integer first, before converting it
|
||||
// to an unsigned. Going directly can cause an overflow and the seed to be
|
||||
@ -290,7 +290,7 @@ void OS::LogSharedLibraryAddresses() {
|
||||
}
|
||||
LOG(isolate, SharedLibraryEvent(lib_name, start, end));
|
||||
} else {
|
||||
// Entry not describing executable data. Skip to end of line to setup
|
||||
// Entry not describing executable data. Skip to end of line to set up
|
||||
// reading the next entry.
|
||||
do {
|
||||
c = getc(fp);
|
||||
|
@ -79,7 +79,7 @@ double ceiling(double x) {
|
||||
static Mutex* limit_mutex = NULL;
|
||||
|
||||
|
||||
void OS::Setup() {
|
||||
void OS::SetUp() {
|
||||
// Seed the random number generator.
|
||||
// Convert the current time to a 64-bit integer first, before converting it
|
||||
// to an unsigned. Going directly can cause an overflow and the seed to be
|
||||
|
@ -78,7 +78,7 @@ double ceiling(double x) {
|
||||
static Mutex* limit_mutex = NULL;
|
||||
|
||||
|
||||
void OS::Setup() {
|
||||
void OS::SetUp() {
|
||||
// Seed the random number generator. We preserve microsecond resolution.
|
||||
uint64_t seed = Ticks() ^ (getpid() << 16);
|
||||
srandom(static_cast<unsigned int>(seed));
|
||||
@ -512,7 +512,7 @@ void OS::LogSharedLibraryAddresses() {
|
||||
}
|
||||
LOG(isolate, SharedLibraryEvent(lib_name, start, end));
|
||||
} else {
|
||||
// Entry not describing executable data. Skip to end of line to setup
|
||||
// Entry not describing executable data. Skip to end of line to set up
|
||||
// reading the next entry.
|
||||
do {
|
||||
c = getc(fp);
|
||||
|
@ -93,7 +93,7 @@ double ceiling(double x) {
|
||||
static Mutex* limit_mutex = NULL;
|
||||
|
||||
|
||||
void OS::Setup() {
|
||||
void OS::SetUp() {
|
||||
// Seed the random number generator. We preserve microsecond resolution.
|
||||
uint64_t seed = Ticks() ^ (getpid() << 16);
|
||||
srandom(static_cast<unsigned int>(seed));
|
||||
|
@ -56,7 +56,7 @@ double modulo(double x, double y) {
|
||||
|
||||
|
||||
// Initialize OS class early in the V8 startup.
|
||||
void OS::Setup() {
|
||||
void OS::SetUp() {
|
||||
// Seed the random number generator.
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ static void* GetRandomMmapAddr() {
|
||||
}
|
||||
|
||||
|
||||
void OS::Setup() {
|
||||
void OS::SetUp() {
|
||||
// Seed the random number generator. We preserve microsecond resolution.
|
||||
uint64_t seed = Ticks() ^ (getpid() << 16);
|
||||
srandom(static_cast<unsigned int>(seed));
|
||||
@ -312,7 +312,7 @@ void OS::LogSharedLibraryAddresses() {
|
||||
}
|
||||
LOG(isolate, SharedLibraryEvent(lib_name, start, end));
|
||||
} else {
|
||||
// Entry not describing executable data. Skip to end of line to setup
|
||||
// Entry not describing executable data. Skip to end of line to set up
|
||||
// reading the next entry.
|
||||
do {
|
||||
c = getc(fp);
|
||||
|
@ -461,7 +461,7 @@ bool POSIXSocket::SetReuseAddress(bool reuse_address) {
|
||||
}
|
||||
|
||||
|
||||
bool Socket::Setup() {
|
||||
bool Socket::SetUp() {
|
||||
// Nothing to do on POSIX.
|
||||
return true;
|
||||
}
|
||||
|
@ -89,7 +89,7 @@ double ceiling(double x) {
|
||||
|
||||
|
||||
static Mutex* limit_mutex = NULL;
|
||||
void OS::Setup() {
|
||||
void OS::SetUp() {
|
||||
// Seed the random number generator.
|
||||
// Convert the current time to a 64-bit integer first, before converting it
|
||||
// to an unsigned. Going directly will cause an overflow and the seed to be
|
||||
|
@ -528,7 +528,7 @@ char* Time::LocalTimezone() {
|
||||
}
|
||||
|
||||
|
||||
void OS::Setup() {
|
||||
void OS::SetUp() {
|
||||
// Seed the random number generator.
|
||||
// Convert the current time to a 64-bit integer first, before converting it
|
||||
// to an unsigned. Going directly can cause an overflow and the seed to be
|
||||
@ -1825,7 +1825,7 @@ bool Win32Socket::SetReuseAddress(bool reuse_address) {
|
||||
}
|
||||
|
||||
|
||||
bool Socket::Setup() {
|
||||
bool Socket::SetUp() {
|
||||
// Initialize Winsock32
|
||||
int err;
|
||||
WSADATA winsock_data;
|
||||
|
@ -109,7 +109,7 @@ class Socket;
|
||||
class OS {
|
||||
public:
|
||||
// Initializes the platform OS support. Called once at VM startup.
|
||||
static void Setup();
|
||||
static void SetUp();
|
||||
|
||||
// Returns the accumulated user time for thread. This routine
|
||||
// can be used for profiling. The implementation should
|
||||
@ -477,7 +477,7 @@ class Thread {
|
||||
PlatformData* data() { return data_; }
|
||||
|
||||
private:
|
||||
void set_name(const char *name);
|
||||
void set_name(const char* name);
|
||||
|
||||
PlatformData* data_;
|
||||
|
||||
@ -593,7 +593,7 @@ class Socket {
|
||||
|
||||
virtual bool IsValid() const = 0;
|
||||
|
||||
static bool Setup();
|
||||
static bool SetUp();
|
||||
static int LastError();
|
||||
static uint16_t HToN(uint16_t value);
|
||||
static uint16_t NToH(uint16_t value);
|
||||
|
@ -630,7 +630,7 @@ class PreParser {
|
||||
|
||||
void SetStrictModeViolation(i::Scanner::Location,
|
||||
const char* type,
|
||||
bool *ok);
|
||||
bool* ok);
|
||||
|
||||
void CheckDelayedStrictModeViolation(int beg_pos, int end_pos, bool* ok);
|
||||
|
||||
|
@ -904,7 +904,7 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
|
||||
entry++;
|
||||
}
|
||||
|
||||
for (const Address *stack_pos = sample.stack,
|
||||
for (const Address* stack_pos = sample.stack,
|
||||
*stack_end = stack_pos + sample.frames_count;
|
||||
stack_pos != stack_end;
|
||||
++stack_pos) {
|
||||
@ -1595,7 +1595,7 @@ Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById(uint64_t id) {
|
||||
}
|
||||
|
||||
|
||||
HeapEntry *const HeapEntriesMap::kHeapEntryPlaceholder =
|
||||
HeapEntry* const HeapEntriesMap::kHeapEntryPlaceholder =
|
||||
reinterpret_cast<HeapEntry*>(1);
|
||||
|
||||
HeapEntriesMap::HeapEntriesMap()
|
||||
@ -1724,16 +1724,16 @@ void HeapObjectsSet::SetTag(Object* obj, const char* tag) {
|
||||
}
|
||||
|
||||
|
||||
HeapObject *const V8HeapExplorer::kInternalRootObject =
|
||||
HeapObject* const V8HeapExplorer::kInternalRootObject =
|
||||
reinterpret_cast<HeapObject*>(
|
||||
static_cast<intptr_t>(HeapObjectsMap::kInternalRootObjectId));
|
||||
HeapObject *const V8HeapExplorer::kGcRootsObject =
|
||||
HeapObject* const V8HeapExplorer::kGcRootsObject =
|
||||
reinterpret_cast<HeapObject*>(
|
||||
static_cast<intptr_t>(HeapObjectsMap::kGcRootsObjectId));
|
||||
HeapObject *const V8HeapExplorer::kFirstGcSubrootObject =
|
||||
HeapObject* const V8HeapExplorer::kFirstGcSubrootObject =
|
||||
reinterpret_cast<HeapObject*>(
|
||||
static_cast<intptr_t>(HeapObjectsMap::kGcRootsFirstSubrootId));
|
||||
HeapObject *const V8HeapExplorer::kLastGcSubrootObject =
|
||||
HeapObject* const V8HeapExplorer::kLastGcSubrootObject =
|
||||
reinterpret_cast<HeapObject*>(
|
||||
static_cast<intptr_t>(HeapObjectsMap::kFirstAvailableObjectId));
|
||||
|
||||
|
@ -834,7 +834,7 @@ class HeapEntriesMap {
|
||||
int total_children_count() { return total_children_count_; }
|
||||
int total_retainers_count() { return total_retainers_count_; }
|
||||
|
||||
static HeapEntry *const kHeapEntryPlaceholder;
|
||||
static HeapEntry* const kHeapEntryPlaceholder;
|
||||
|
||||
private:
|
||||
struct EntryInfo {
|
||||
|
@ -65,7 +65,7 @@ Atomic32 RuntimeProfiler::state_ = 0;
|
||||
Semaphore* RuntimeProfiler::semaphore_ = OS::CreateSemaphore(0);
|
||||
|
||||
#ifdef DEBUG
|
||||
bool RuntimeProfiler::has_been_globally_setup_ = false;
|
||||
bool RuntimeProfiler::has_been_globally_set_up_ = false;
|
||||
#endif
|
||||
bool RuntimeProfiler::enabled_ = false;
|
||||
|
||||
@ -82,10 +82,10 @@ RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
|
||||
|
||||
|
||||
void RuntimeProfiler::GlobalSetup() {
|
||||
ASSERT(!has_been_globally_setup_);
|
||||
ASSERT(!has_been_globally_set_up_);
|
||||
enabled_ = V8::UseCrankshaft() && FLAG_opt;
|
||||
#ifdef DEBUG
|
||||
has_been_globally_setup_ = true;
|
||||
has_been_globally_set_up_ = true;
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -245,8 +245,8 @@ void RuntimeProfiler::NotifyTick() {
|
||||
}
|
||||
|
||||
|
||||
void RuntimeProfiler::Setup() {
|
||||
ASSERT(has_been_globally_setup_);
|
||||
void RuntimeProfiler::SetUp() {
|
||||
ASSERT(has_been_globally_set_up_);
|
||||
ClearSampleBuffer();
|
||||
// If the ticker hasn't already started, make sure to do so to get
|
||||
// the ticks for the runtime profiler.
|
||||
|
@ -46,7 +46,7 @@ class RuntimeProfiler {
|
||||
static void GlobalSetup();
|
||||
|
||||
static inline bool IsEnabled() {
|
||||
ASSERT(has_been_globally_setup_);
|
||||
ASSERT(has_been_globally_set_up_);
|
||||
return enabled_;
|
||||
}
|
||||
|
||||
@ -54,7 +54,7 @@ class RuntimeProfiler {
|
||||
|
||||
void NotifyTick();
|
||||
|
||||
void Setup();
|
||||
void SetUp();
|
||||
void Reset();
|
||||
void TearDown();
|
||||
|
||||
@ -126,7 +126,7 @@ class RuntimeProfiler {
|
||||
static Semaphore* semaphore_;
|
||||
|
||||
#ifdef DEBUG
|
||||
static bool has_been_globally_setup_;
|
||||
static bool has_been_globally_set_up_;
|
||||
#endif
|
||||
static bool enabled_;
|
||||
};
|
||||
|
@ -132,7 +132,7 @@ CodeRange::CodeRange(Isolate* isolate)
|
||||
}
|
||||
|
||||
|
||||
bool CodeRange::Setup(const size_t requested) {
|
||||
bool CodeRange::SetUp(const size_t requested) {
|
||||
ASSERT(code_range_ == NULL);
|
||||
|
||||
code_range_ = new VirtualMemory(requested);
|
||||
@ -268,7 +268,7 @@ MemoryAllocator::MemoryAllocator(Isolate* isolate)
|
||||
}
|
||||
|
||||
|
||||
bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) {
|
||||
bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
|
||||
capacity_ = RoundUp(capacity, Page::kPageSize);
|
||||
capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
|
||||
ASSERT_GE(capacity_, capacity_executable_);
|
||||
@ -671,12 +671,12 @@ PagedSpace::PagedSpace(Heap* heap,
|
||||
}
|
||||
|
||||
|
||||
bool PagedSpace::Setup() {
|
||||
bool PagedSpace::SetUp() {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool PagedSpace::HasBeenSetup() {
|
||||
bool PagedSpace::HasBeenSetUp() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -874,9 +874,9 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
|
||||
// NewSpace implementation
|
||||
|
||||
|
||||
bool NewSpace::Setup(int reserved_semispace_capacity,
|
||||
bool NewSpace::SetUp(int reserved_semispace_capacity,
|
||||
int maximum_semispace_capacity) {
|
||||
// Setup new space based on the preallocated memory block defined by
|
||||
// Set up new space based on the preallocated memory block defined by
|
||||
// start and size. The provided space is divided into two semi-spaces.
|
||||
// To support fast containment testing in the new space, the size of
|
||||
// this chunk must be a power of two and it must be aligned to its size.
|
||||
@ -895,7 +895,7 @@ bool NewSpace::Setup(int reserved_semispace_capacity,
|
||||
ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
|
||||
ASSERT(IsPowerOf2(maximum_semispace_capacity));
|
||||
|
||||
// Allocate and setup the histogram arrays if necessary.
|
||||
// Allocate and set up the histogram arrays if necessary.
|
||||
allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
|
||||
promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
|
||||
|
||||
@ -909,12 +909,12 @@ bool NewSpace::Setup(int reserved_semispace_capacity,
|
||||
2 * heap()->ReservedSemiSpaceSize());
|
||||
ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
|
||||
|
||||
if (!to_space_.Setup(chunk_base_,
|
||||
if (!to_space_.SetUp(chunk_base_,
|
||||
initial_semispace_capacity,
|
||||
maximum_semispace_capacity)) {
|
||||
return false;
|
||||
}
|
||||
if (!from_space_.Setup(chunk_base_ + reserved_semispace_capacity,
|
||||
if (!from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
|
||||
initial_semispace_capacity,
|
||||
maximum_semispace_capacity)) {
|
||||
return false;
|
||||
@ -1149,7 +1149,7 @@ void NewSpace::Verify() {
|
||||
// -----------------------------------------------------------------------------
|
||||
// SemiSpace implementation
|
||||
|
||||
bool SemiSpace::Setup(Address start,
|
||||
bool SemiSpace::SetUp(Address start,
|
||||
int initial_capacity,
|
||||
int maximum_capacity) {
|
||||
// Creates a space in the young generation. The constructor does not
|
||||
@ -2411,7 +2411,7 @@ LargeObjectSpace::LargeObjectSpace(Heap* heap,
|
||||
objects_size_(0) {}
|
||||
|
||||
|
||||
bool LargeObjectSpace::Setup() {
|
||||
bool LargeObjectSpace::SetUp() {
|
||||
first_page_ = NULL;
|
||||
size_ = 0;
|
||||
page_count_ = 0;
|
||||
@ -2431,7 +2431,7 @@ void LargeObjectSpace::TearDown() {
|
||||
space, kAllocationActionFree, page->size());
|
||||
heap()->isolate()->memory_allocator()->Free(page);
|
||||
}
|
||||
Setup();
|
||||
SetUp();
|
||||
}
|
||||
|
||||
|
||||
|
20
src/spaces.h
20
src/spaces.h
@ -815,7 +815,7 @@ class CodeRange {
|
||||
// Reserves a range of virtual memory, but does not commit any of it.
|
||||
// Can only be called once, at heap initialization time.
|
||||
// Returns false on failure.
|
||||
bool Setup(const size_t requested_size);
|
||||
bool SetUp(const size_t requested_size);
|
||||
|
||||
// Frees the range of virtual memory, and frees the data structures used to
|
||||
// manage it.
|
||||
@ -943,7 +943,7 @@ class MemoryAllocator {
|
||||
|
||||
// Initializes its internal bookkeeping structures.
|
||||
// Max capacity of the total space and executable memory limit.
|
||||
bool Setup(intptr_t max_capacity, intptr_t capacity_executable);
|
||||
bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
|
||||
|
||||
void TearDown();
|
||||
|
||||
@ -1419,11 +1419,11 @@ class PagedSpace : public Space {
|
||||
// the memory allocator's initial chunk) if possible. If the block of
|
||||
// addresses is not big enough to contain a single page-aligned page, a
|
||||
// fresh chunk will be allocated.
|
||||
bool Setup();
|
||||
bool SetUp();
|
||||
|
||||
// Returns true if the space has been successfully set up and not
|
||||
// subsequently torn down.
|
||||
bool HasBeenSetup();
|
||||
bool HasBeenSetUp();
|
||||
|
||||
// Cleans up the space, frees all pages in this space except those belonging
|
||||
// to the initial chunk, uncommits addresses in the initial chunk.
|
||||
@ -1821,14 +1821,14 @@ class SemiSpace : public Space {
|
||||
current_page_(NULL) { }
|
||||
|
||||
// Sets up the semispace using the given chunk.
|
||||
bool Setup(Address start, int initial_capacity, int maximum_capacity);
|
||||
bool SetUp(Address start, int initial_capacity, int maximum_capacity);
|
||||
|
||||
// Tear down the space. Heap memory was not allocated by the space, so it
|
||||
// is not deallocated here.
|
||||
void TearDown();
|
||||
|
||||
// True if the space has been set up but not torn down.
|
||||
bool HasBeenSetup() { return start_ != NULL; }
|
||||
bool HasBeenSetUp() { return start_ != NULL; }
|
||||
|
||||
// Grow the semispace to the new capacity. The new capacity
|
||||
// requested must be larger than the current capacity and less than
|
||||
@ -2067,15 +2067,15 @@ class NewSpace : public Space {
|
||||
inline_allocation_limit_step_(0) {}
|
||||
|
||||
// Sets up the new space using the given chunk.
|
||||
bool Setup(int reserved_semispace_size_, int max_semispace_size);
|
||||
bool SetUp(int reserved_semispace_size_, int max_semispace_size);
|
||||
|
||||
// Tears down the space. Heap memory was not allocated by the space, so it
|
||||
// is not deallocated here.
|
||||
void TearDown();
|
||||
|
||||
// True if the space has been set up but not torn down.
|
||||
bool HasBeenSetup() {
|
||||
return to_space_.HasBeenSetup() && from_space_.HasBeenSetup();
|
||||
bool HasBeenSetUp() {
|
||||
return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
|
||||
}
|
||||
|
||||
// Flip the pair of spaces.
|
||||
@ -2474,7 +2474,7 @@ class LargeObjectSpace : public Space {
|
||||
virtual ~LargeObjectSpace() {}
|
||||
|
||||
// Initializes internal data structures.
|
||||
bool Setup();
|
||||
bool SetUp();
|
||||
|
||||
// Releases internal resources, frees objects in this space.
|
||||
void TearDown();
|
||||
|
@ -55,7 +55,7 @@ StoreBuffer::StoreBuffer(Heap* heap)
|
||||
}
|
||||
|
||||
|
||||
void StoreBuffer::Setup() {
|
||||
void StoreBuffer::SetUp() {
|
||||
virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3);
|
||||
uintptr_t start_as_int =
|
||||
reinterpret_cast<uintptr_t>(virtual_memory_->address());
|
||||
|
@ -54,7 +54,7 @@ class StoreBuffer {
|
||||
|
||||
inline Address TopAddress();
|
||||
|
||||
void Setup();
|
||||
void SetUp();
|
||||
void TearDown();
|
||||
|
||||
// This is used by the mutator to enter addresses into the store buffer.
|
||||
|
12
src/v8.cc
12
src/v8.cc
@ -47,7 +47,7 @@ static Mutex* init_once_mutex = OS::CreateMutex();
|
||||
static bool init_once_called = false;
|
||||
|
||||
bool V8::is_running_ = false;
|
||||
bool V8::has_been_setup_ = false;
|
||||
bool V8::has_been_set_up_ = false;
|
||||
bool V8::has_been_disposed_ = false;
|
||||
bool V8::has_fatal_error_ = false;
|
||||
bool V8::use_crankshaft_ = true;
|
||||
@ -82,7 +82,7 @@ bool V8::Initialize(Deserializer* des) {
|
||||
if (isolate->IsInitialized()) return true;
|
||||
|
||||
is_running_ = true;
|
||||
has_been_setup_ = true;
|
||||
has_been_set_up_ = true;
|
||||
has_fatal_error_ = false;
|
||||
has_been_disposed_ = false;
|
||||
|
||||
@ -100,7 +100,7 @@ void V8::TearDown() {
|
||||
Isolate* isolate = Isolate::Current();
|
||||
ASSERT(isolate->IsDefaultIsolate());
|
||||
|
||||
if (!has_been_setup_ || has_been_disposed_) return;
|
||||
if (!has_been_set_up_ || has_been_disposed_) return;
|
||||
isolate->TearDown();
|
||||
|
||||
is_running_ = false;
|
||||
@ -239,8 +239,8 @@ void V8::InitializeOncePerProcess() {
|
||||
if (init_once_called) return;
|
||||
init_once_called = true;
|
||||
|
||||
// Setup the platform OS support.
|
||||
OS::Setup();
|
||||
// Set up the platform OS support.
|
||||
OS::SetUp();
|
||||
|
||||
use_crankshaft_ = FLAG_crankshaft;
|
||||
|
||||
@ -248,7 +248,7 @@ void V8::InitializeOncePerProcess() {
|
||||
use_crankshaft_ = false;
|
||||
}
|
||||
|
||||
CPU::Setup();
|
||||
CPU::SetUp();
|
||||
if (!CPU::SupportsCrankshaft()) {
|
||||
use_crankshaft_ = false;
|
||||
}
|
||||
|
2
src/v8.h
2
src/v8.h
@ -118,7 +118,7 @@ class V8 : public AllStatic {
|
||||
// True if engine is currently running
|
||||
static bool is_running_;
|
||||
// True if V8 has ever been run
|
||||
static bool has_been_setup_;
|
||||
static bool has_been_set_up_;
|
||||
// True if error has been signaled for current engine
|
||||
// (reset to false if engine is restarted)
|
||||
static bool has_fatal_error_;
|
||||
|
@ -262,7 +262,7 @@ Object* RelocInfo::target_object() {
|
||||
}
|
||||
|
||||
|
||||
Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
|
||||
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
|
||||
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
|
||||
if (rmode_ == EMBEDDED_OBJECT) {
|
||||
return Memory::Object_Handle_at(pc_);
|
||||
|
@ -383,7 +383,7 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
|
||||
}
|
||||
#endif
|
||||
|
||||
// Setup buffer pointers.
|
||||
// Set up buffer pointers.
|
||||
ASSERT(buffer_ != NULL);
|
||||
pc_ = buffer_;
|
||||
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
|
||||
@ -412,7 +412,7 @@ void Assembler::GetCode(CodeDesc* desc) {
|
||||
// Finalize code (at this point overflow() may be true, but the gap ensures
|
||||
// that we are still not overlapping instructions and relocation info).
|
||||
ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
|
||||
// Setup code descriptor.
|
||||
// Set up code descriptor.
|
||||
desc->buffer = buffer_;
|
||||
desc->buffer_size = buffer_size_;
|
||||
desc->instr_size = pc_offset();
|
||||
@ -502,7 +502,7 @@ void Assembler::GrowBuffer() {
|
||||
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
|
||||
}
|
||||
|
||||
// Setup new buffer.
|
||||
// Set up new buffer.
|
||||
desc.buffer = NewArray<byte>(desc.buffer_size);
|
||||
desc.instr_size = pc_offset();
|
||||
desc.reloc_size =
|
||||
|
@ -337,7 +337,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
__ push(rbx);
|
||||
__ push(rbx);
|
||||
|
||||
// Setup pointer to last argument.
|
||||
// Set up pointer to last argument.
|
||||
__ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
|
||||
|
||||
// Copy arguments and receiver to the expression stack.
|
||||
@ -1198,7 +1198,7 @@ static void AllocateJSArray(MacroAssembler* masm,
|
||||
// Both registers are preserved by this code so no need to differentiate between
|
||||
// a construct call and a normal call.
|
||||
static void ArrayNativeCode(MacroAssembler* masm,
|
||||
Label *call_generic_code) {
|
||||
Label* call_generic_code) {
|
||||
Label argc_one_or_more, argc_two_or_more, empty_array, not_empty_array,
|
||||
has_non_smi_element;
|
||||
|
||||
|
@ -124,12 +124,12 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
|
||||
// Get the function from the stack.
|
||||
__ movq(rcx, Operand(rsp, 1 * kPointerSize));
|
||||
|
||||
// Setup the object header.
|
||||
// Set up the object header.
|
||||
__ LoadRoot(kScratchRegister, Heap::kFunctionContextMapRootIndex);
|
||||
__ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
|
||||
__ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
|
||||
|
||||
// Setup the fixed slots.
|
||||
// Set up the fixed slots.
|
||||
__ Set(rbx, 0); // Set to NULL.
|
||||
__ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
|
||||
__ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rsi);
|
||||
@ -173,7 +173,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
|
||||
// Get the serialized scope info from the stack.
|
||||
__ movq(rbx, Operand(rsp, 2 * kPointerSize));
|
||||
|
||||
// Setup the object header.
|
||||
// Set up the object header.
|
||||
__ LoadRoot(kScratchRegister, Heap::kBlockContextMapRootIndex);
|
||||
__ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
|
||||
__ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
|
||||
@ -194,7 +194,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
|
||||
__ movq(rcx, ContextOperand(rcx, Context::CLOSURE_INDEX));
|
||||
__ bind(&after_sentinel);
|
||||
|
||||
// Setup the fixed slots.
|
||||
// Set up the fixed slots.
|
||||
__ movq(ContextOperand(rax, Context::CLOSURE_INDEX), rcx);
|
||||
__ movq(ContextOperand(rax, Context::PREVIOUS_INDEX), rsi);
|
||||
__ movq(ContextOperand(rax, Context::EXTENSION_INDEX), rbx);
|
||||
@ -2399,7 +2399,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
|
||||
__ movq(FieldOperand(rax, i), rdx);
|
||||
}
|
||||
|
||||
// Setup the callee in-object property.
|
||||
// Set up the callee in-object property.
|
||||
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
|
||||
__ movq(rdx, Operand(rsp, 3 * kPointerSize));
|
||||
__ movq(FieldOperand(rax, JSObject::kHeaderSize +
|
||||
@ -2414,7 +2414,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
|
||||
Heap::kArgumentsLengthIndex * kPointerSize),
|
||||
rcx);
|
||||
|
||||
// Setup the elements pointer in the allocated arguments object.
|
||||
// Set up the elements pointer in the allocated arguments object.
|
||||
// If we allocated a parameter map, edi will point there, otherwise to the
|
||||
// backing store.
|
||||
__ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
|
||||
@ -2621,7 +2621,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
|
||||
// Get the parameters pointer from the stack.
|
||||
__ movq(rdx, Operand(rsp, 2 * kPointerSize));
|
||||
|
||||
// Setup the elements pointer in the allocated arguments object and
|
||||
// Set up the elements pointer in the allocated arguments object and
|
||||
// initialize the header in the elements fixed array.
|
||||
__ lea(rdi, Operand(rax, Heap::kArgumentsObjectSizeStrict));
|
||||
__ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
|
||||
@ -3942,7 +3942,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
|
||||
Label not_outermost_js, not_outermost_js_2;
|
||||
{ // NOLINT. Scope block confuses linter.
|
||||
MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
|
||||
// Setup frame.
|
||||
// Set up frame.
|
||||
__ push(rbp);
|
||||
__ movq(rbp, rsp);
|
||||
|
||||
@ -5081,7 +5081,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
|
||||
__ ret(3 * kPointerSize);
|
||||
|
||||
__ bind(&make_two_character_string);
|
||||
// Setup registers for allocating the two character string.
|
||||
// Set up registers for allocating the two character string.
|
||||
__ movzxwq(rbx, FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize));
|
||||
__ AllocateAsciiString(rax, rcx, r11, r14, r15, &runtime);
|
||||
__ movw(FieldOperand(rax, SeqAsciiString::kHeaderSize), rbx);
|
||||
|
@ -41,7 +41,7 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
void CPU::Setup() {
|
||||
void CPU::SetUp() {
|
||||
CpuFeatures::Probe();
|
||||
}
|
||||
|
||||
|
@ -314,7 +314,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
|
||||
output_[0] = input_;
|
||||
output_[0]->SetPc(reinterpret_cast<intptr_t>(from_));
|
||||
} else {
|
||||
// Setup the frame pointer and the context pointer.
|
||||
// Set up the frame pointer and the context pointer.
|
||||
output_[0]->SetRegister(rbp.code(), input_->GetRegister(rbp.code()));
|
||||
output_[0]->SetRegister(rsi.code(), input_->GetRegister(rsi.code()));
|
||||
|
||||
|
@ -967,7 +967,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
|
||||
__ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
|
||||
__ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
|
||||
|
||||
// Setup the four remaining stack slots.
|
||||
// Set up the four remaining stack slots.
|
||||
__ push(rax); // Map.
|
||||
__ push(rdx); // Enumeration cache.
|
||||
__ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user