From b3e0761e380e8a59406664bbc108aaf5cecff9ef Mon Sep 17 00:00:00 2001 From: "erik.corry@gmail.com" Date: Fri, 13 Jan 2012 13:09:52 +0000 Subject: [PATCH] Cosmetic changes ("set up" is a verb, "setup" is a noun). Review URL: http://codereview.chromium.org/9139051 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@10399 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- src/arm/assembler-arm.cc | 6 ++-- src/arm/builtins-arm.cc | 8 ++--- src/arm/code-stubs-arm.cc | 24 ++++++------- src/arm/cpu-arm.cc | 2 +- src/arm/deoptimizer-arm.cc | 2 +- src/arm/full-codegen-arm.cc | 2 +- src/arm/lithium-codegen-arm.cc | 2 +- src/arm/lithium-codegen-arm.h | 2 +- src/arm/macro-assembler-arm.cc | 4 +-- src/arm/macro-assembler-arm.h | 2 +- src/arm/simulator-arm.cc | 8 ++--- src/arm/stub-cache-arm.cc | 4 +-- src/assembler.h | 2 +- src/atomicops_internals_x86_macosx.h | 44 ++++++++++++------------ src/bootstrapper.cc | 10 +++--- src/bootstrapper.h | 2 +- src/builtins.cc | 2 +- src/builtins.h | 2 +- src/cpu-profiler.cc | 4 +-- src/cpu-profiler.h | 6 ++-- src/cpu.h | 2 +- src/d8-debug.cc | 2 +- src/debug-debugger.js | 2 +- src/debug.cc | 6 ++-- src/debug.h | 2 +- src/factory.cc | 4 +-- src/frames.cc | 2 +- src/full-codegen.cc | 4 +-- src/gdb-jit.cc | 44 ++++++++++++------------ src/heap-inl.h | 2 +- src/heap-profiler.cc | 2 +- src/heap-profiler.h | 2 +- src/heap.cc | 50 ++++++++++++++-------------- src/heap.h | 10 +++--- src/hydrogen.cc | 4 +-- src/hydrogen.h | 2 +- src/ia32/assembler-ia32.cc | 6 ++-- src/ia32/builtins-ia32.cc | 2 +- src/ia32/code-stubs-ia32.cc | 16 ++++----- src/ia32/cpu-ia32.cc | 2 +- src/ia32/deoptimizer-ia32.cc | 2 +- src/ia32/full-codegen-ia32.cc | 2 +- src/ia32/lithium-codegen-ia32.cc | 4 +-- src/ia32/lithium-codegen-ia32.h | 2 +- src/ia32/macro-assembler-ia32.h | 2 +- src/ia32/stub-cache-ia32.cc | 2 +- src/inspector.cc | 4 +-- src/inspector.h | 8 ++--- src/isolate.cc | 18 +++++----- src/lithium-allocator.cc | 4 +-- src/lithium.h | 10 +++--- src/liveedit.cc | 2 +- src/liveobjectlist-inl.h | 4 +-- src/liveobjectlist.cc | 48 +++++++++++++------------- src/liveobjectlist.h | 10 +++--- src/log.cc | 6 ++-- src/log.h | 6 ++-- src/mips/assembler-mips-inl.h | 2 +- src/mips/assembler-mips.cc | 6 ++-- src/mips/builtins-mips.cc | 8 ++--- src/mips/code-stubs-mips.cc | 22 ++++++------ src/mips/constants-mips.h | 4 +-- src/mips/cpu-mips.cc | 2 +- src/mips/deoptimizer-mips.cc | 2 +- src/mips/full-codegen-mips.cc | 2 +- src/mips/lithium-codegen-mips.cc | 2 +- src/mips/lithium-codegen-mips.h | 2 +- src/mips/macro-assembler-mips.cc | 4 +-- src/mips/macro-assembler-mips.h | 2 +- src/mips/simulator-mips.cc | 10 +++--- src/mips/stub-cache-mips.cc | 4 +-- src/parser.cc | 8 ++--- src/platform-cygwin.cc | 4 +-- src/platform-freebsd.cc | 2 +- src/platform-linux.cc | 4 +-- src/platform-macos.cc | 2 +- src/platform-nullos.cc | 2 +- src/platform-openbsd.cc | 4 +-- src/platform-posix.cc | 2 +- src/platform-solaris.cc | 2 +- src/platform-win32.cc | 4 +-- src/platform.h | 6 ++-- src/preparser.h | 2 +- src/profile-generator.cc | 12 +++---- src/profile-generator.h | 2 +- src/runtime-profiler.cc | 10 +++--- src/runtime-profiler.h | 6 ++-- src/spaces.cc | 24 ++++++------- src/spaces.h | 20 +++++------ src/store-buffer.cc | 2 +- src/store-buffer.h | 2 +- src/v8.cc | 12 +++---- src/v8.h | 2 +- src/x64/assembler-x64-inl.h | 2 +- src/x64/assembler-x64.cc | 6 ++-- src/x64/builtins-x64.cc | 4 +-- src/x64/code-stubs-x64.cc | 18 +++++----- src/x64/cpu-x64.cc | 2 +- src/x64/deoptimizer-x64.cc | 2 +- src/x64/full-codegen-x64.cc | 2 +- src/x64/lithium-codegen-x64.cc | 6 ++-- src/x64/lithium-codegen-x64.h | 2 +- src/x64/macro-assembler-x64.cc | 6 ++-- src/x64/macro-assembler-x64.h | 2 +- src/x64/stub-cache-x64.cc | 2 +- test/cctest/cctest.h | 2 +- test/cctest/test-alloc.cc | 4 +-- test/cctest/test-api.cc | 18 +++++----- test/cctest/test-assembler-x64.cc | 14 ++++---- test/cctest/test-cpu-profiler.cc | 4 +-- test/cctest/test-debug.cc | 16 ++++----- test/cctest/test-disasm-arm.cc | 14 ++++---- test/cctest/test-disasm-mips.cc | 6 ++-- test/cctest/test-platform-linux.cc | 2 +- test/cctest/test-platform-win32.cc | 2 +- test/cctest/test-sockets.cc | 2 +- test/cctest/test-spaces.cc | 18 +++++----- test/cctest/test-utils.cc | 2 +- 118 files changed, 401 insertions(+), 401 deletions(-) diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc index 329493a340..25922361a2 100644 --- a/src/arm/assembler-arm.cc +++ b/src/arm/assembler-arm.cc @@ -317,7 +317,7 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size) own_buffer_ = false; } - // Setup buffer pointers. + // Set up buffer pointers. ASSERT(buffer_ != NULL); pc_ = buffer_; reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); @@ -349,7 +349,7 @@ void Assembler::GetCode(CodeDesc* desc) { CheckConstPool(true, false); ASSERT(num_pending_reloc_info_ == 0); - // Setup code descriptor. + // Set up code descriptor. desc->buffer = buffer_; desc->buffer_size = buffer_size_; desc->instr_size = pc_offset(); @@ -2446,7 +2446,7 @@ void Assembler::GrowBuffer() { } CHECK_GT(desc.buffer_size, 0); // no overflow - // Setup new buffer. + // Set up new buffer. desc.buffer = NewArray(desc.buffer_size); desc.instr_size = pc_offset(); diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc index c8a4b213fb..2a650a44a5 100644 --- a/src/arm/builtins-arm.cc +++ b/src/arm/builtins-arm.cc @@ -333,7 +333,7 @@ static void ArrayNativeCode(MacroAssembler* masm, r5, call_generic_code); __ IncrementCounter(counters->array_function_native(), 1, r3, r4); - // Setup return value, remove receiver from stack and return. + // Set up return value, remove receiver from stack and return. __ mov(r0, r2); __ add(sp, sp, Operand(kPointerSize)); __ Jump(lr); @@ -376,7 +376,7 @@ static void ArrayNativeCode(MacroAssembler* masm, true, call_generic_code); __ IncrementCounter(counters->array_function_native(), 1, r2, r4); - // Setup return value, remove receiver and argument from stack and return. + // Set up return value, remove receiver and argument from stack and return. __ mov(r0, r3); __ add(sp, sp, Operand(2 * kPointerSize)); __ Jump(lr); @@ -951,10 +951,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, // sp[4]: number of arguments (smi-tagged) __ ldr(r3, MemOperand(sp, 4 * kPointerSize)); - // Setup pointer to last argument. + // Set up pointer to last argument. __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset)); - // Setup number of arguments for function call below + // Set up number of arguments for function call below __ mov(r0, Operand(r3, LSR, kSmiTagSize)); // Copy arguments and receiver to the expression stack. diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc index 2e1291e309..ff1f29dacf 100644 --- a/src/arm/code-stubs-arm.cc +++ b/src/arm/code-stubs-arm.cc @@ -156,13 +156,13 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { // Load the function from the stack. __ ldr(r3, MemOperand(sp, 0)); - // Setup the object header. + // Set up the object header. __ LoadRoot(r2, Heap::kFunctionContextMapRootIndex); __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); __ mov(r2, Operand(Smi::FromInt(length))); __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset)); - // Setup the fixed slots. + // Set up the fixed slots. __ mov(r1, Operand(Smi::FromInt(0))); __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX))); __ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX))); @@ -207,7 +207,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) { // Load the serialized scope info from the stack. __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); - // Setup the object header. + // Set up the object header. __ LoadRoot(r2, Heap::kBlockContextMapRootIndex); __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); __ mov(r2, Operand(Smi::FromInt(length))); @@ -229,7 +229,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) { __ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX)); __ bind(&after_sentinel); - // Setup the fixed slots. + // Set up the fixed slots. __ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX)); __ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX)); __ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX)); @@ -3842,7 +3842,7 @@ void CEntryStub::Generate(MacroAssembler* masm) { FrameScope scope(masm, StackFrame::MANUAL); __ EnterExitFrame(save_doubles_); - // Setup argc and the builtin function in callee-saved registers. + // Set up argc and the builtin function in callee-saved registers. __ mov(r4, Operand(r0)); __ mov(r5, Operand(r1)); @@ -3919,7 +3919,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // r2: receiver // r3: argc - // Setup argv in r4. + // Set up argv in r4. int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; if (CpuFeatures::IsSupported(VFP3)) { offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize; @@ -3942,7 +3942,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ ldr(r5, MemOperand(r5)); __ Push(r8, r7, r6, r5); - // Setup frame pointer for the frame to be pushed. + // Set up frame pointer for the frame to be pushed. __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); // If this is the outermost JS call, set js_entry_sp value. @@ -4402,7 +4402,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { __ str(r3, FieldMemOperand(r0, i)); } - // Setup the callee in-object property. + // Set up the callee in-object property. STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); __ ldr(r3, MemOperand(sp, 2 * kPointerSize)); const int kCalleeOffset = JSObject::kHeaderSize + @@ -4415,7 +4415,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { Heap::kArgumentsLengthIndex * kPointerSize; __ str(r2, FieldMemOperand(r0, kLengthOffset)); - // Setup the elements pointer in the allocated arguments object. + // Set up the elements pointer in the allocated arguments object. // If we allocated a parameter map, r4 will point there, otherwise // it will point to the backing store. __ add(r4, r0, Operand(Heap::kArgumentsObjectSize)); @@ -4583,7 +4583,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { // Get the parameters pointer from the stack. __ ldr(r2, MemOperand(sp, 1 * kPointerSize)); - // Setup the elements pointer in the allocated arguments object and + // Set up the elements pointer in the allocated arguments object and // initialize the header in the elements fixed array. __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict)); __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset)); @@ -4595,7 +4595,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { // Copy the fixed array slots. Label loop; - // Setup r4 to point to the first array slot. + // Set up r4 to point to the first array slot. __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); __ bind(&loop); // Pre-decrement r2 with kPointerSize on each iteration. @@ -5210,7 +5210,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { // of the original receiver from the call site). __ bind(&non_function); __ str(r1, MemOperand(sp, argc_ * kPointerSize)); - __ mov(r0, Operand(argc_)); // Setup the number of arguments. + __ mov(r0, Operand(argc_)); // Set up the number of arguments. __ mov(r2, Operand(0, RelocInfo::NONE)); __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); __ SetCallKind(r5, CALL_AS_METHOD); diff --git a/src/arm/cpu-arm.cc b/src/arm/cpu-arm.cc index 51cfeb6c87..7b08ed8c2f 100644 --- a/src/arm/cpu-arm.cc +++ b/src/arm/cpu-arm.cc @@ -41,7 +41,7 @@ namespace v8 { namespace internal { -void CPU::Setup() { +void CPU::SetUp() { CpuFeatures::Probe(); } diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc index 042cfe8d39..3689a9f6b6 100644 --- a/src/arm/deoptimizer-arm.cc +++ b/src/arm/deoptimizer-arm.cc @@ -319,7 +319,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() { output_[0] = input_; output_[0]->SetPc(reinterpret_cast(from_)); } else { - // Setup the frame pointer and the context pointer. + // Set up the frame pointer and the context pointer. output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code())); output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code())); diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc index 7e9a889116..38999a8e36 100644 --- a/src/arm/full-codegen-arm.cc +++ b/src/arm/full-codegen-arm.cc @@ -1009,7 +1009,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset)); __ ldr(r2, FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset)); - // Setup the four remaining stack slots. + // Set up the four remaining stack slots. __ push(r0); // Map. __ ldr(r1, FieldMemOperand(r2, FixedArray::kLengthOffset)); __ mov(r0, Operand(Smi::FromInt(0))); diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc index 8b96f9b292..b5ed517087 100644 --- a/src/arm/lithium-codegen-arm.cc +++ b/src/arm/lithium-codegen-arm.cc @@ -2906,7 +2906,7 @@ void LCodeGen::CallKnownFunction(Handle function, __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); __ Call(ip); - // Setup deoptimization. + // Set up deoptimization. RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); // Restore context. diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h index e3cc7309da..00823e1638 100644 --- a/src/arm/lithium-codegen-arm.h +++ b/src/arm/lithium-codegen-arm.h @@ -412,7 +412,7 @@ class LDeferredCode: public ZoneObject { virtual void Generate() = 0; virtual LInstruction* instr() = 0; - void SetExit(Label *exit) { external_exit_ = exit; } + void SetExit(Label* exit) { external_exit_ = exit; } Label* entry() { return &entry_; } Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } int instruction_index() const { return instruction_index_; } diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc index 8d4431cb73..fcb82994fb 100644 --- a/src/arm/macro-assembler-arm.cc +++ b/src/arm/macro-assembler-arm.cc @@ -817,12 +817,12 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) { void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) { - // Setup the frame structure on the stack. + // Set up the frame structure on the stack. ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement); ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset); ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset); Push(lr, fp); - mov(fp, Operand(sp)); // Setup new frame pointer. + mov(fp, Operand(sp)); // Set up new frame pointer. // Reserve room for saved entry sp and code object. sub(sp, sp, Operand(2 * kPointerSize)); if (emit_debug_code()) { diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h index a20ba79fba..4b55a3b064 100644 --- a/src/arm/macro-assembler-arm.h +++ b/src/arm/macro-assembler-arm.h @@ -508,7 +508,7 @@ class MacroAssembler: public Assembler { // --------------------------------------------------------------------------- // JavaScript invokes - // Setup call kind marking in ecx. The method takes ecx as an + // Set up call kind marking in ecx. The method takes ecx as an // explicit first parameter to make the code more readable at the // call sites. void SetCallKind(Register dst, CallKind kind); diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc index 0525529fde..1ae172c008 100644 --- a/src/arm/simulator-arm.cc +++ b/src/arm/simulator-arm.cc @@ -741,7 +741,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) { isolate_->set_simulator_i_cache(i_cache_); } Initialize(isolate); - // Setup simulator support first. Some of this information is needed to + // Set up simulator support first. Some of this information is needed to // setup the architecture state. size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack stack_ = reinterpret_cast(malloc(stack_size)); @@ -750,7 +750,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) { break_pc_ = NULL; break_instr_ = 0; - // Setup architecture state. + // Set up architecture state. // All registers are initialized to zero to start with. for (int i = 0; i < num_registers; i++) { registers_[i] = 0; @@ -3324,7 +3324,7 @@ void Simulator::Execute() { int32_t Simulator::Call(byte* entry, int argument_count, ...) { va_list parameters; va_start(parameters, argument_count); - // Setup arguments + // Set up arguments // First four arguments passed in registers. ASSERT(argument_count >= 4); @@ -3367,7 +3367,7 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) { int32_t r10_val = get_register(r10); int32_t r11_val = get_register(r11); - // Setup the callee-saved registers with a known value. To be able to check + // Set up the callee-saved registers with a known value. To be able to check // that they are preserved properly across JS execution. int32_t callee_saved_value = icount_; set_register(r4, callee_saved_value); diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc index 9ab4783442..c3a82ff934 100644 --- a/src/arm/stub-cache-arm.cc +++ b/src/arm/stub-cache-arm.cc @@ -1144,7 +1144,7 @@ void StubCompiler::GenerateLoadCallback(Handle object, __ EnterExitFrame(false, kApiStackSpace); // Create AccessorInfo instance on the stack above the exit frame with - // scratch2 (internal::Object **args_) as the data. + // scratch2 (internal::Object** args_) as the data. __ str(scratch2, MemOperand(sp, 1 * kPointerSize)); __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo& @@ -2405,7 +2405,7 @@ Handle CallStubCompiler::CompileCallGlobal( __ str(r3, MemOperand(sp, argc * kPointerSize)); } - // Setup the context (function already in r1). + // Set up the context (function already in r1). __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); // Jump to the cached code (tail call). diff --git a/src/assembler.h b/src/assembler.h index cec20fca07..8c705a84b4 100644 --- a/src/assembler.h +++ b/src/assembler.h @@ -371,7 +371,7 @@ class RelocInfo BASE_EMBEDDED { // routines expect to access these pointers indirectly. The following // location provides a place for these pointers to exist natually // when accessed via the Iterator. - Object *reconstructed_obj_ptr_; + Object* reconstructed_obj_ptr_; // External-reference pointers are also split across instruction-pairs // in mips, but are accessed via indirect pointers. This location // provides a place for that pointer to exist naturally. Its address diff --git a/src/atomicops_internals_x86_macosx.h b/src/atomicops_internals_x86_macosx.h index 2bac006bdc..bfb02b3851 100644 --- a/src/atomicops_internals_x86_macosx.h +++ b/src/atomicops_internals_x86_macosx.h @@ -35,7 +35,7 @@ namespace v8 { namespace internal { -inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr, +inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { Atomic32 prev_value; @@ -49,7 +49,7 @@ inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr, return prev_value; } -inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr, +inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value) { Atomic32 old_value; do { @@ -59,12 +59,12 @@ inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr, return old_value; } -inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr, +inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment) { return OSAtomicAdd32(increment, const_cast(ptr)); } -inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr, +inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment) { return OSAtomicAdd32Barrier(increment, const_cast(ptr)); } @@ -73,7 +73,7 @@ inline void MemoryBarrier() { OSMemoryBarrier(); } -inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr, +inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { Atomic32 prev_value; @@ -87,7 +87,7 @@ inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr, return prev_value; } -inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr, +inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { return Acquire_CompareAndSwap(ptr, old_value, new_value); @@ -97,12 +97,12 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { *ptr = value; } -inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) { +inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { *ptr = value; MemoryBarrier(); } -inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) { +inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { MemoryBarrier(); *ptr = value; } @@ -111,13 +111,13 @@ inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; } -inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) { +inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { Atomic32 value = *ptr; MemoryBarrier(); return value; } -inline Atomic32 Release_Load(volatile const Atomic32 *ptr) { +inline Atomic32 Release_Load(volatile const Atomic32* ptr) { MemoryBarrier(); return *ptr; } @@ -126,7 +126,7 @@ inline Atomic32 Release_Load(volatile const Atomic32 *ptr) { // 64-bit implementation on 64-bit platform -inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr, +inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value) { Atomic64 prev_value; @@ -140,7 +140,7 @@ inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr, return prev_value; } -inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr, +inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value) { Atomic64 old_value; do { @@ -150,17 +150,17 @@ inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr, return old_value; } -inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr, +inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment) { return OSAtomicAdd64(increment, const_cast(ptr)); } -inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr, +inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment) { return OSAtomicAdd64Barrier(increment, const_cast(ptr)); } -inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr, +inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value) { Atomic64 prev_value; @@ -174,7 +174,7 @@ inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr, return prev_value; } -inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr, +inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value) { // The lib kern interface does not distinguish between @@ -186,12 +186,12 @@ inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { *ptr = value; } -inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) { +inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { *ptr = value; MemoryBarrier(); } -inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) { +inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { MemoryBarrier(); *ptr = value; } @@ -200,13 +200,13 @@ inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { return *ptr; } -inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) { +inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { Atomic64 value = *ptr; MemoryBarrier(); return value; } -inline Atomic64 Release_Load(volatile const Atomic64 *ptr) { +inline Atomic64 Release_Load(volatile const Atomic64* ptr) { MemoryBarrier(); return *ptr; } @@ -264,7 +264,7 @@ inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr, old_value, new_value); } -inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) { +inline void NoBarrier_Store(volatile AtomicWord* ptr, AtomicWord value) { NoBarrier_Store( reinterpret_cast(ptr), value); } @@ -279,7 +279,7 @@ inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) { reinterpret_cast(ptr), value); } -inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) { +inline AtomicWord NoBarrier_Load(volatile const AtomicWord* ptr) { return NoBarrier_Load( reinterpret_cast(ptr)); } diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc index fe59383b9c..752b220e5b 100644 --- a/src/bootstrapper.cc +++ b/src/bootstrapper.cc @@ -835,7 +835,7 @@ void Genesis::HookUpInnerGlobal(Handle inner_global) { factory()->LookupAsciiSymbol("global"), inner_global, attributes); - // Setup the reference from the global object to the builtins object. + // Set up the reference from the global object to the builtins object. JSGlobalObject::cast(*inner_global)->set_builtins(*builtins_global); TransferNamedProperties(inner_global_from_snapshot, inner_global); TransferIndexedProperties(inner_global_from_snapshot, inner_global); @@ -1360,7 +1360,7 @@ bool Genesis::CompileScriptCached(Vector name, if (cache != NULL) cache->Add(name, function_info); } - // Setup the function context. Conceptually, we should clone the + // Set up the function context. Conceptually, we should clone the // function before overwriting the context but since we're in a // single-threaded environment it is not strictly necessary. ASSERT(top_context->IsGlobalContext()); @@ -1447,7 +1447,7 @@ bool Genesis::InstallNatives() { builtins->set_global_context(*global_context()); builtins->set_global_receiver(*builtins); - // Setup the 'global' properties of the builtins object. The + // Set up the 'global' properties of the builtins object. The // 'global' property that refers to the global object is the only // way to get from code running in the builtins context to the // global object. @@ -1459,7 +1459,7 @@ bool Genesis::InstallNatives() { JSObject::SetLocalPropertyIgnoreAttributes( builtins, global_symbol, global_obj, attributes)); - // Setup the reference from the global object to the builtins object. + // Set up the reference from the global object to the builtins object. JSGlobalObject::cast(global_context()->global())->set_builtins(*builtins); // Create a bridge function that has context in the global context. @@ -1683,7 +1683,7 @@ bool Genesis::InstallNatives() { InstallNativeFunctions(); // Store the map for the string prototype after the natives has been compiled - // and the String function has been setup. + // and the String function has been set up. Handle string_function(global_context()->string_function()); ASSERT(JSObject::cast( string_function->initial_map()->prototype())->HasFastProperties()); diff --git a/src/bootstrapper.h b/src/bootstrapper.h index abf61b9fe5..101c2e1b1f 100644 --- a/src/bootstrapper.h +++ b/src/bootstrapper.h @@ -88,7 +88,7 @@ class SourceCodeCache BASE_EMBEDDED { // context. class Bootstrapper { public: - // Requires: Heap::Setup has been called. + // Requires: Heap::SetUp has been called. void Initialize(bool create_heap_objects); void TearDown(); diff --git a/src/builtins.cc b/src/builtins.cc index 916799499f..69e5161ce5 100644 --- a/src/builtins.cc +++ b/src/builtins.cc @@ -1719,7 +1719,7 @@ void Builtins::InitBuiltinFunctionTable() { #undef DEF_FUNCTION_PTR_A } -void Builtins::Setup(bool create_heap_objects) { +void Builtins::SetUp(bool create_heap_objects) { ASSERT(!initialized_); Isolate* isolate = Isolate::Current(); Heap* heap = isolate->heap(); diff --git a/src/builtins.h b/src/builtins.h index 6a84f2ddd1..f20d97df5b 100644 --- a/src/builtins.h +++ b/src/builtins.h @@ -265,7 +265,7 @@ class Builtins { // Generate all builtin code objects. Should be called once during // isolate initialization. - void Setup(bool create_heap_objects); + void SetUp(bool create_heap_objects); void TearDown(); // Garbage collection support. diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc index d74c034ac5..2bd62ad390 100644 --- a/src/cpu-profiler.cc +++ b/src/cpu-profiler.cc @@ -493,7 +493,7 @@ void CpuProfiler::StartProcessorIfNotStarted() { NoBarrier_Store(&is_profiling_, true); processor_->Start(); // Enumerate stuff we already have in the heap. - if (isolate->heap()->HasBeenSetup()) { + if (isolate->heap()->HasBeenSetUp()) { if (!FLAG_prof_browser_mode) { bool saved_log_code_flag = FLAG_log_code; FLAG_log_code = true; @@ -562,7 +562,7 @@ void CpuProfiler::StopProcessor() { } -void CpuProfiler::Setup() { +void CpuProfiler::SetUp() { Isolate* isolate = Isolate::Current(); if (isolate->cpu_profiler() == NULL) { isolate->set_cpu_profiler(new CpuProfiler()); diff --git a/src/cpu-profiler.h b/src/cpu-profiler.h index a71c0e0ab4..3f4fec5f45 100644 --- a/src/cpu-profiler.h +++ b/src/cpu-profiler.h @@ -204,7 +204,7 @@ namespace internal { // TODO(isolates): isolatify this class. class CpuProfiler { public: - static void Setup(); + static void SetUp(); static void TearDown(); static void StartProfiling(const char* title); @@ -230,11 +230,11 @@ class CpuProfiler { Code* code, String* name); static void CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code, - SharedFunctionInfo *shared, + SharedFunctionInfo* shared, String* name); static void CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code, - SharedFunctionInfo *shared, + SharedFunctionInfo* shared, String* source, int line); static void CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code, int args_count); diff --git a/src/cpu.h b/src/cpu.h index 2525484a01..247af71aa3 100644 --- a/src/cpu.h +++ b/src/cpu.h @@ -53,7 +53,7 @@ namespace internal { class CPU : public AllStatic { public: // Initializes the cpu architecture support. Called once at VM startup. - static void Setup(); + static void SetUp(); static bool SupportsCrankshaft(); diff --git a/src/d8-debug.cc b/src/d8-debug.cc index 8fbc876dab..1cbc0b39a0 100644 --- a/src/d8-debug.cc +++ b/src/d8-debug.cc @@ -169,7 +169,7 @@ void RemoteDebugger::Run() { bool ok; // Make sure that socket support is initialized. - ok = i::Socket::Setup(); + ok = i::Socket::SetUp(); if (!ok) { printf("Unable to initialize socket support %d\n", i::Socket::LastError()); return; diff --git a/src/debug-debugger.js b/src/debug-debugger.js index 8cbe0b362c..120a297007 100644 --- a/src/debug-debugger.js +++ b/src/debug-debugger.js @@ -1547,7 +1547,7 @@ DebugCommandProcessor.prototype.continueRequest_ = function(request, response) { } } - // Setup the VM for stepping. + // Set up the VM for stepping. this.exec_state_.prepareStep(action, count); } diff --git a/src/debug.cc b/src/debug.cc index babe78d5c1..7ac834647e 100644 --- a/src/debug.cc +++ b/src/debug.cc @@ -682,7 +682,7 @@ void ScriptCache::HandleWeakScript(v8::Persistent obj, void* data) { } -void Debug::Setup(bool create_heap_objects) { +void Debug::SetUp(bool create_heap_objects) { ThreadInit(); if (create_heap_objects) { // Get code to handle debug break on return. @@ -1213,7 +1213,7 @@ void Debug::ClearAllBreakPoints() { void Debug::FloodWithOneShot(Handle shared) { PrepareForBreakPoints(); - // Make sure the function has setup the debug info. + // Make sure the function has set up the debug info. if (!EnsureDebugInfo(shared)) { // Return if we failed to retrieve the debug info. return; @@ -3065,7 +3065,7 @@ bool Debugger::StartAgent(const char* name, int port, v8::Debug::DebugBreak(); } - if (Socket::Setup()) { + if (Socket::SetUp()) { if (agent_ == NULL) { agent_ = new DebuggerAgent(name, port); agent_->Start(); diff --git a/src/debug.h b/src/debug.h index 05a6b59d53..582aadae8d 100644 --- a/src/debug.h +++ b/src/debug.h @@ -224,7 +224,7 @@ class DebugInfoListNode { // DebugInfo. class Debug { public: - void Setup(bool create_heap_objects); + void SetUp(bool create_heap_objects); bool Load(); void Unload(); bool IsLoaded() { return !debug_context_.is_null(); } diff --git a/src/factory.cc b/src/factory.cc index 84c157311b..630d9456a0 100644 --- a/src/factory.cc +++ b/src/factory.cc @@ -705,7 +705,7 @@ Handle Factory::NewFunction(Handle name, // Allocate the function Handle function = NewFunction(name, the_hole_value()); - // Setup the code pointer in both the shared function info and in + // Set up the code pointer in both the shared function info and in // the function itself. function->shared()->set_code(*code); function->set_code(*code); @@ -736,7 +736,7 @@ Handle Factory::NewFunctionWithPrototype(Handle name, // Allocate the function. Handle function = NewFunction(name, prototype); - // Setup the code pointer in both the shared function info and in + // Set up the code pointer in both the shared function info and in // the function itself. function->shared()->set_code(*code); function->set_code(*code); diff --git a/src/frames.cc b/src/frames.cc index 2e363fd26a..3a46e0869f 100644 --- a/src/frames.cc +++ b/src/frames.cc @@ -485,7 +485,7 @@ Code* ExitFrame::unchecked_code() const { void ExitFrame::ComputeCallerState(State* state) const { - // Setup the caller state. + // Set up the caller state. state->sp = caller_sp(); state->fp = Memory::Address_at(fp() + ExitFrameConstants::kCallerFPOffset); state->pc_address diff --git a/src/full-codegen.cc b/src/full-codegen.cc index 04086d483d..5c7a23d54d 100644 --- a/src/full-codegen.cc +++ b/src/full-codegen.cc @@ -1178,7 +1178,7 @@ void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) { } ExitFinallyBlock(); // Return to the calling code. - // Setup try handler. + // Set up try handler. __ bind(&try_entry); __ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER, stmt->index()); { TryFinally try_body(this, &finally_entry); @@ -1284,7 +1284,7 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryCatch::Exit( bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) { - Expression *sub_expr; + Expression* sub_expr; Handle check; if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) { EmitLiteralCompareTypeof(expr, sub_expr, check); diff --git a/src/gdb-jit.cc b/src/gdb-jit.cc index b386bed177..4192222f90 100644 --- a/src/gdb-jit.cc +++ b/src/gdb-jit.cc @@ -1556,23 +1556,23 @@ class DebugLineSection : public DebugSection { class UnwindInfoSection : public DebugSection { public: - explicit UnwindInfoSection(CodeDescription *desc); - virtual bool WriteBody(Writer *w); + explicit UnwindInfoSection(CodeDescription* desc); + virtual bool WriteBody(Writer* w); - int WriteCIE(Writer *w); - void WriteFDE(Writer *w, int); + int WriteCIE(Writer* w); + void WriteFDE(Writer* w, int); - void WriteFDEStateOnEntry(Writer *w); - void WriteFDEStateAfterRBPPush(Writer *w); - void WriteFDEStateAfterRBPSet(Writer *w); - void WriteFDEStateAfterRBPPop(Writer *w); + void WriteFDEStateOnEntry(Writer* w); + void WriteFDEStateAfterRBPPush(Writer* w); + void WriteFDEStateAfterRBPSet(Writer* w); + void WriteFDEStateAfterRBPPop(Writer* w); - void WriteLength(Writer *w, + void WriteLength(Writer* w, Writer::Slot* length_slot, int initial_position); private: - CodeDescription *desc_; + CodeDescription* desc_; // DWARF3 Specification, Table 7.23 enum CFIInstructions { @@ -1623,7 +1623,7 @@ class UnwindInfoSection : public DebugSection { }; -void UnwindInfoSection::WriteLength(Writer *w, +void UnwindInfoSection::WriteLength(Writer* w, Writer::Slot* length_slot, int initial_position) { uint32_t align = (w->position() - initial_position) % kPointerSize; @@ -1639,7 +1639,7 @@ void UnwindInfoSection::WriteLength(Writer *w, } -UnwindInfoSection::UnwindInfoSection(CodeDescription *desc) +UnwindInfoSection::UnwindInfoSection(CodeDescription* desc) #ifdef __ELF : ELFSection(".eh_frame", TYPE_X86_64_UNWIND, 1), #else @@ -1648,7 +1648,7 @@ UnwindInfoSection::UnwindInfoSection(CodeDescription *desc) #endif desc_(desc) { } -int UnwindInfoSection::WriteCIE(Writer *w) { +int UnwindInfoSection::WriteCIE(Writer* w) { Writer::Slot cie_length_slot = w->CreateSlotHere(); uint32_t cie_position = w->position(); @@ -1668,7 +1668,7 @@ int UnwindInfoSection::WriteCIE(Writer *w) { } -void UnwindInfoSection::WriteFDE(Writer *w, int cie_position) { +void UnwindInfoSection::WriteFDE(Writer* w, int cie_position) { // The only FDE for this function. The CFA is the current RBP. Writer::Slot fde_length_slot = w->CreateSlotHere(); int fde_position = w->position(); @@ -1686,7 +1686,7 @@ void UnwindInfoSection::WriteFDE(Writer *w, int cie_position) { } -void UnwindInfoSection::WriteFDEStateOnEntry(Writer *w) { +void UnwindInfoSection::WriteFDEStateOnEntry(Writer* w) { // The first state, just after the control has been transferred to the the // function. @@ -1713,7 +1713,7 @@ void UnwindInfoSection::WriteFDEStateOnEntry(Writer *w) { } -void UnwindInfoSection::WriteFDEStateAfterRBPPush(Writer *w) { +void UnwindInfoSection::WriteFDEStateAfterRBPPush(Writer* w) { // The second state, just after RBP has been pushed. // RBP / CFA for this function is now the current RSP, so just set the @@ -1734,7 +1734,7 @@ void UnwindInfoSection::WriteFDEStateAfterRBPPush(Writer *w) { } -void UnwindInfoSection::WriteFDEStateAfterRBPSet(Writer *w) { +void UnwindInfoSection::WriteFDEStateAfterRBPSet(Writer* w) { // The third state, after the RBP has been set. // The CFA can now directly be set to RBP. @@ -1749,7 +1749,7 @@ void UnwindInfoSection::WriteFDEStateAfterRBPSet(Writer *w) { } -void UnwindInfoSection::WriteFDEStateAfterRBPPop(Writer *w) { +void UnwindInfoSection::WriteFDEStateAfterRBPPop(Writer* w) { // The fourth (final) state. The RBP has been popped (just before issuing a // return). @@ -1769,7 +1769,7 @@ void UnwindInfoSection::WriteFDEStateAfterRBPPop(Writer *w) { } -bool UnwindInfoSection::WriteBody(Writer *w) { +bool UnwindInfoSection::WriteBody(Writer* w) { uint32_t cie_position = WriteCIE(w); WriteFDE(w, cie_position); return true; @@ -1810,8 +1810,8 @@ extern "C" { struct JITDescriptor { uint32_t version_; uint32_t action_flag_; - JITCodeEntry *relevant_entry_; - JITCodeEntry *first_entry_; + JITCodeEntry* relevant_entry_; + JITCodeEntry* first_entry_; }; // GDB will place breakpoint into this function. @@ -1998,7 +1998,7 @@ void GDBJITInterface::AddCode(Handle name, } } -static void AddUnwindInfo(CodeDescription *desc) { +static void AddUnwindInfo(CodeDescription* desc) { #ifdef V8_TARGET_ARCH_X64 if (desc->tag() == GDBJITInterface::FUNCTION) { // To avoid propagating unwinding information through diff --git a/src/heap-inl.h b/src/heap-inl.h index ef6e58ed0b..4d98fbad10 100644 --- a/src/heap-inl.h +++ b/src/heap-inl.h @@ -463,7 +463,7 @@ MaybeObject* Heap::PrepareForCompare(String* str) { int Heap::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) { - ASSERT(HasBeenSetup()); + ASSERT(HasBeenSetUp()); int amount = amount_of_external_allocated_memory_ + change_in_bytes; if (change_in_bytes >= 0) { // Avoid overflow. diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc index 46c63c27c8..8be6f27685 100644 --- a/src/heap-profiler.cc +++ b/src/heap-profiler.cc @@ -51,7 +51,7 @@ void HeapProfiler::ResetSnapshots() { } -void HeapProfiler::Setup() { +void HeapProfiler::SetUp() { Isolate* isolate = Isolate::Current(); if (isolate->heap_profiler() == NULL) { isolate->set_heap_profiler(new HeapProfiler()); diff --git a/src/heap-profiler.h b/src/heap-profiler.h index b1bc91c307..ef5c4f4b4a 100644 --- a/src/heap-profiler.h +++ b/src/heap-profiler.h @@ -48,7 +48,7 @@ class HeapSnapshotsCollection; // to generate .hp files for use by the GHC/Valgrind tool hp2ps. class HeapProfiler { public: - static void Setup(); + static void SetUp(); static void TearDown(); static HeapSnapshot* TakeSnapshot(const char* name, diff --git a/src/heap.cc b/src/heap.cc index 2abb48d9fc..bda2b7065d 100644 --- a/src/heap.cc +++ b/src/heap.cc @@ -176,7 +176,7 @@ Heap::Heap() intptr_t Heap::Capacity() { - if (!HasBeenSetup()) return 0; + if (!HasBeenSetUp()) return 0; return new_space_.Capacity() + old_pointer_space_->Capacity() + @@ -188,7 +188,7 @@ intptr_t Heap::Capacity() { intptr_t Heap::CommittedMemory() { - if (!HasBeenSetup()) return 0; + if (!HasBeenSetUp()) return 0; return new_space_.CommittedMemory() + old_pointer_space_->CommittedMemory() + @@ -200,14 +200,14 @@ intptr_t Heap::CommittedMemory() { } intptr_t Heap::CommittedMemoryExecutable() { - if (!HasBeenSetup()) return 0; + if (!HasBeenSetUp()) return 0; return isolate()->memory_allocator()->SizeExecutable(); } intptr_t Heap::Available() { - if (!HasBeenSetup()) return 0; + if (!HasBeenSetUp()) return 0; return new_space_.Available() + old_pointer_space_->Available() + @@ -218,7 +218,7 @@ intptr_t Heap::Available() { } -bool Heap::HasBeenSetup() { +bool Heap::HasBeenSetUp() { return old_pointer_space_ != NULL && old_data_space_ != NULL && code_space_ != NULL && @@ -3807,7 +3807,7 @@ MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) { } Map* new_map = Map::cast(obj); - // Setup the global object as a normalized object. + // Set up the global object as a normalized object. global->set_map(new_map); global->map()->clear_instance_descriptors(); global->set_properties(dictionary); @@ -4740,7 +4740,7 @@ bool Heap::IdleGlobalGC() { #ifdef DEBUG void Heap::Print() { - if (!HasBeenSetup()) return; + if (!HasBeenSetUp()) return; isolate()->PrintStack(); AllSpaces spaces; for (Space* space = spaces.next(); space != NULL; space = spaces.next()) @@ -4805,7 +4805,7 @@ bool Heap::Contains(HeapObject* value) { bool Heap::Contains(Address addr) { if (OS::IsOutsideAllocatedSpace(addr)) return false; - return HasBeenSetup() && + return HasBeenSetUp() && (new_space_.ToSpaceContains(addr) || old_pointer_space_->Contains(addr) || old_data_space_->Contains(addr) || @@ -4823,7 +4823,7 @@ bool Heap::InSpace(HeapObject* value, AllocationSpace space) { bool Heap::InSpace(Address addr, AllocationSpace space) { if (OS::IsOutsideAllocatedSpace(addr)) return false; - if (!HasBeenSetup()) return false; + if (!HasBeenSetUp()) return false; switch (space) { case NEW_SPACE: @@ -4848,7 +4848,7 @@ bool Heap::InSpace(Address addr, AllocationSpace space) { #ifdef DEBUG void Heap::Verify() { - ASSERT(HasBeenSetup()); + ASSERT(HasBeenSetUp()); store_buffer()->Verify(); @@ -5275,7 +5275,7 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) { bool Heap::ConfigureHeap(int max_semispace_size, intptr_t max_old_gen_size, intptr_t max_executable_size) { - if (HasBeenSetup()) return false; + if (HasBeenSetUp()) return false; if (max_semispace_size > 0) { if (max_semispace_size < Page::kPageSize) { @@ -5564,7 +5564,7 @@ class HeapDebugUtils { #endif -bool Heap::Setup(bool create_heap_objects) { +bool Heap::SetUp(bool create_heap_objects) { #ifdef DEBUG allocation_timeout_ = FLAG_gc_interval; debug_utils_ = new HeapDebugUtils(this); @@ -5594,12 +5594,12 @@ bool Heap::Setup(bool create_heap_objects) { MarkMapPointersAsEncoded(false); - // Setup memory allocator. - if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize())) + // Set up memory allocator. + if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize())) return false; - // Setup new space. - if (!new_space_.Setup(reserved_semispace_size_, max_semispace_size_)) { + // Set up new space. + if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) { return false; } @@ -5610,7 +5610,7 @@ bool Heap::Setup(bool create_heap_objects) { OLD_POINTER_SPACE, NOT_EXECUTABLE); if (old_pointer_space_ == NULL) return false; - if (!old_pointer_space_->Setup()) return false; + if (!old_pointer_space_->SetUp()) return false; // Initialize old data space. old_data_space_ = @@ -5619,14 +5619,14 @@ bool Heap::Setup(bool create_heap_objects) { OLD_DATA_SPACE, NOT_EXECUTABLE); if (old_data_space_ == NULL) return false; - if (!old_data_space_->Setup()) return false; + if (!old_data_space_->SetUp()) return false; // Initialize the code space, set its maximum capacity to the old // generation size. It needs executable memory. // On 64-bit platform(s), we put all code objects in a 2 GB range of // virtual address space, so that they can call each other with near calls. if (code_range_size_ > 0) { - if (!isolate_->code_range()->Setup(code_range_size_)) { + if (!isolate_->code_range()->SetUp(code_range_size_)) { return false; } } @@ -5634,7 +5634,7 @@ bool Heap::Setup(bool create_heap_objects) { code_space_ = new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE); if (code_space_ == NULL) return false; - if (!code_space_->Setup()) return false; + if (!code_space_->SetUp()) return false; // Initialize map space. map_space_ = new MapSpace(this, @@ -5642,21 +5642,21 @@ bool Heap::Setup(bool create_heap_objects) { FLAG_max_map_space_pages, MAP_SPACE); if (map_space_ == NULL) return false; - if (!map_space_->Setup()) return false; + if (!map_space_->SetUp()) return false; // Initialize global property cell space. cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE); if (cell_space_ == NULL) return false; - if (!cell_space_->Setup()) return false; + if (!cell_space_->SetUp()) return false; // The large object code space may contain code or data. We set the memory // to be non-executable here for safety, but this means we need to enable it // explicitly when allocating large code objects. lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE); if (lo_space_ == NULL) return false; - if (!lo_space_->Setup()) return false; + if (!lo_space_->SetUp()) return false; - // Setup the seed that is used to randomize the string hash function. + // Set up the seed that is used to randomize the string hash function. ASSERT(hash_seed() == 0); if (FLAG_randomize_hashes) { if (FLAG_hash_seed == 0) { @@ -5681,7 +5681,7 @@ bool Heap::Setup(bool create_heap_objects) { LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity())); LOG(isolate_, IntPtrTEvent("heap-available", Available())); - store_buffer()->Setup(); + store_buffer()->SetUp(); return true; } diff --git a/src/heap.h b/src/heap.h index a04c6dfa6b..cb7f12acb9 100644 --- a/src/heap.h +++ b/src/heap.h @@ -434,7 +434,7 @@ class ExternalStringTable { class Heap { public: // Configure heap size before setup. Return false if the heap has been - // setup already. + // set up already. bool ConfigureHeap(int max_semispace_size, intptr_t max_old_gen_size, intptr_t max_executable_size); @@ -443,7 +443,7 @@ class Heap { // Initializes the global object heap. If create_heap_objects is true, // also creates the basic non-mutable objects. // Returns whether it succeeded. - bool Setup(bool create_heap_objects); + bool SetUp(bool create_heap_objects); // Destroys all memory allocated by the heap. void TearDown(); @@ -453,8 +453,8 @@ class Heap { // jslimit_/real_jslimit_ variable in the StackGuard. void SetStackLimits(); - // Returns whether Setup has been called. - bool HasBeenSetup(); + // Returns whether SetUp has been called. + bool HasBeenSetUp(); // Returns the maximum amount of memory reserved for the heap. For // the young generation, we reserve 4 times the amount needed for a @@ -1914,7 +1914,7 @@ class Heap { PromotionQueue promotion_queue_; // Flag is set when the heap has been configured. The heap can be repeatedly - // configured through the API until it is setup. + // configured through the API until it is set up. bool configured_; ExternalStringTable external_string_table_; diff --git a/src/hydrogen.cc b/src/hydrogen.cc index 6062c3662d..47dcc80536 100644 --- a/src/hydrogen.cc +++ b/src/hydrogen.cc @@ -2305,7 +2305,7 @@ HGraph* HGraphBuilder::CreateGraph() { Bailout("function with illegal redeclaration"); return NULL; } - SetupScope(scope); + SetUpScope(scope); // Add an edge to the body entry. This is warty: the graph's start // environment will be used by the Lithium translation as the initial @@ -2469,7 +2469,7 @@ HInstruction* HGraphBuilder::PreProcessCall(HCall* call) { } -void HGraphBuilder::SetupScope(Scope* scope) { +void HGraphBuilder::SetUpScope(Scope* scope) { HConstant* undefined_constant = new(zone()) HConstant( isolate()->factory()->undefined_value(), Representation::Tagged()); AddInstruction(undefined_constant); diff --git a/src/hydrogen.h b/src/hydrogen.h index ded1356d18..9705859066 100644 --- a/src/hydrogen.h +++ b/src/hydrogen.h @@ -870,7 +870,7 @@ class HGraphBuilder: public AstVisitor { Representation rep); static Representation ToRepresentation(TypeInfo info); - void SetupScope(Scope* scope); + void SetUpScope(Scope* scope); virtual void VisitStatements(ZoneList* statements); #define DECLARE_VISIT(type) virtual void Visit##type(type* node); diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc index 7a5a191644..bb050b63f9 100644 --- a/src/ia32/assembler-ia32.cc +++ b/src/ia32/assembler-ia32.cc @@ -350,7 +350,7 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size) } #endif - // Setup buffer pointers. + // Set up buffer pointers. ASSERT(buffer_ != NULL); pc_ = buffer_; reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); @@ -377,7 +377,7 @@ void Assembler::GetCode(CodeDesc* desc) { // Finalize code (at this point overflow() may be true, but the gap ensures // that we are still not overlapping instructions and relocation info). ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap. - // Setup code descriptor. + // Set up code descriptor. desc->buffer = buffer_; desc->buffer_size = buffer_size_; desc->instr_size = pc_offset(); @@ -2457,7 +2457,7 @@ void Assembler::GrowBuffer() { V8::FatalProcessOutOfMemory("Assembler::GrowBuffer"); } - // Setup new buffer. + // Set up new buffer. desc.buffer = NewArray(desc.buffer_size); desc.instr_size = pc_offset(); desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos()); diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc index 55f66f1df8..4666311af6 100644 --- a/src/ia32/builtins-ia32.cc +++ b/src/ia32/builtins-ia32.cc @@ -333,7 +333,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, __ push(ebx); __ push(ebx); - // Setup pointer to last argument. + // Set up pointer to last argument. __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset)); // Copy arguments and receiver to the expression stack. diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc index c51e2fecbb..b654390c2b 100644 --- a/src/ia32/code-stubs-ia32.cc +++ b/src/ia32/code-stubs-ia32.cc @@ -128,14 +128,14 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { // Get the function from the stack. __ mov(ecx, Operand(esp, 1 * kPointerSize)); - // Setup the object header. + // Set up the object header. Factory* factory = masm->isolate()->factory(); __ mov(FieldOperand(eax, HeapObject::kMapOffset), factory->function_context_map()); __ mov(FieldOperand(eax, Context::kLengthOffset), Immediate(Smi::FromInt(length))); - // Setup the fixed slots. + // Set up the fixed slots. __ Set(ebx, Immediate(0)); // Set to NULL. __ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx); __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), esi); @@ -179,7 +179,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) { // Get the serialized scope info from the stack. __ mov(ebx, Operand(esp, 2 * kPointerSize)); - // Setup the object header. + // Set up the object header. Factory* factory = masm->isolate()->factory(); __ mov(FieldOperand(eax, HeapObject::kMapOffset), factory->block_context_map()); @@ -202,7 +202,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) { __ mov(ecx, ContextOperand(ecx, Context::CLOSURE_INDEX)); __ bind(&after_sentinel); - // Setup the fixed slots. + // Set up the fixed slots. __ mov(ContextOperand(eax, Context::CLOSURE_INDEX), ecx); __ mov(ContextOperand(eax, Context::PREVIOUS_INDEX), esi); __ mov(ContextOperand(eax, Context::EXTENSION_INDEX), ebx); @@ -3379,7 +3379,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { __ mov(FieldOperand(eax, i), edx); } - // Setup the callee in-object property. + // Set up the callee in-object property. STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); __ mov(edx, Operand(esp, 4 * kPointerSize)); __ mov(FieldOperand(eax, JSObject::kHeaderSize + @@ -3392,7 +3392,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { Heap::kArgumentsLengthIndex * kPointerSize), ecx); - // Setup the elements pointer in the allocated arguments object. + // Set up the elements pointer in the allocated arguments object. // If we allocated a parameter map, edi will point there, otherwise to the // backing store. __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize)); @@ -3571,7 +3571,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { // Get the parameters pointer from the stack. __ mov(edx, Operand(esp, 2 * kPointerSize)); - // Setup the elements pointer in the allocated arguments object and + // Set up the elements pointer in the allocated arguments object and // initialize the header in the elements fixed array. __ lea(edi, Operand(eax, Heap::kArgumentsObjectSizeStrict)); __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi); @@ -4950,7 +4950,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { Label invoke, handler_entry, exit; Label not_outermost_js, not_outermost_js_2; - // Setup frame. + // Set up frame. __ push(ebp); __ mov(ebp, esp); diff --git a/src/ia32/cpu-ia32.cc b/src/ia32/cpu-ia32.cc index 57e66df9e3..9eabb2a969 100644 --- a/src/ia32/cpu-ia32.cc +++ b/src/ia32/cpu-ia32.cc @@ -41,7 +41,7 @@ namespace v8 { namespace internal { -void CPU::Setup() { +void CPU::SetUp() { CpuFeatures::Probe(); } diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc index 98c240079e..292315d10c 100644 --- a/src/ia32/deoptimizer-ia32.cc +++ b/src/ia32/deoptimizer-ia32.cc @@ -406,7 +406,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() { output_[0] = input_; output_[0]->SetPc(reinterpret_cast(from_)); } else { - // Setup the frame pointer and the context pointer. + // Set up the frame pointer and the context pointer. // All OSR stack frames are dynamically aligned to an 8-byte boundary. int frame_pointer = input_->GetRegister(ebp.code()); if ((frame_pointer & 0x4) == 0) { diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc index 6e2391110b..4f3274436a 100644 --- a/src/ia32/full-codegen-ia32.cc +++ b/src/ia32/full-codegen-ia32.cc @@ -967,7 +967,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset)); __ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset)); - // Setup the four remaining stack slots. + // Set up the four remaining stack slots. __ push(eax); // Map. __ push(edx); // Enumeration cache. __ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset)); diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc index 98ef28d3ac..8d412fdb53 100644 --- a/src/ia32/lithium-codegen-ia32.cc +++ b/src/ia32/lithium-codegen-ia32.cc @@ -4189,7 +4189,7 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { DeoptimizeIf(not_equal, instr->environment()); } - // Setup the parameters to the stub/runtime call. + // Set up the parameters to the stub/runtime call. __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); __ push(FieldOperand(eax, JSFunction::kLiteralsOffset)); __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index()))); @@ -4297,7 +4297,7 @@ void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) { Handle constant_properties = instr->hydrogen()->constant_properties(); - // Setup the parameters to the stub/runtime call. + // Set up the parameters to the stub/runtime call. __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); __ push(FieldOperand(eax, JSFunction::kLiteralsOffset)); __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index()))); diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h index 4b226e14ae..d86d48cd8c 100644 --- a/src/ia32/lithium-codegen-ia32.h +++ b/src/ia32/lithium-codegen-ia32.h @@ -385,7 +385,7 @@ class LDeferredCode: public ZoneObject { virtual void Generate() = 0; virtual LInstruction* instr() = 0; - void SetExit(Label *exit) { external_exit_ = exit; } + void SetExit(Label* exit) { external_exit_ = exit; } Label* entry() { return &entry_; } Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } int instruction_index() const { return instruction_index_; } diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h index 3588859baa..c969a6f71b 100644 --- a/src/ia32/macro-assembler-ia32.h +++ b/src/ia32/macro-assembler-ia32.h @@ -251,7 +251,7 @@ class MacroAssembler: public Assembler { // --------------------------------------------------------------------------- // JavaScript invokes - // Setup call kind marking in ecx. The method takes ecx as an + // Set up call kind marking in ecx. The method takes ecx as an // explicit first parameter to make the code more readable at the // call sites. void SetCallKind(Register dst, CallKind kind); diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc index ca8883640f..0da51c857e 100644 --- a/src/ia32/stub-cache-ia32.cc +++ b/src/ia32/stub-cache-ia32.cc @@ -2330,7 +2330,7 @@ Handle CallStubCompiler::CompileCallGlobal( __ mov(Operand(esp, (argc + 1) * kPointerSize), edx); } - // Setup the context (function already in edi). + // Set up the context (function already in edi). __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); // Jump to the cached code (tail call). diff --git a/src/inspector.cc b/src/inspector.cc index 8fb80f1a22..833d338439 100644 --- a/src/inspector.cc +++ b/src/inspector.cc @@ -38,11 +38,11 @@ namespace internal { //============================================================================ // The Inspector. -void Inspector::DumpObjectType(FILE* out, Object *obj, bool print_more) { +void Inspector::DumpObjectType(FILE* out, Object* obj, bool print_more) { // Dump the object pointer. OS::FPrint(out, "%p:", reinterpret_cast(obj)); if (obj->IsHeapObject()) { - HeapObject *hobj = HeapObject::cast(obj); + HeapObject* hobj = HeapObject::cast(obj); OS::FPrint(out, " size %d :", hobj->Size()); } diff --git a/src/inspector.h b/src/inspector.h index e328bcdfa5..6962e21f4f 100644 --- a/src/inspector.h +++ b/src/inspector.h @@ -41,14 +41,14 @@ namespace internal { class Inspector { public: - static void DumpObjectType(FILE* out, Object *obj, bool print_more); - static void DumpObjectType(FILE* out, Object *obj) { + static void DumpObjectType(FILE* out, Object* obj, bool print_more); + static void DumpObjectType(FILE* out, Object* obj) { DumpObjectType(out, obj, false); } - static void DumpObjectType(Object *obj, bool print_more) { + static void DumpObjectType(Object* obj, bool print_more) { DumpObjectType(stdout, obj, print_more); } - static void DumpObjectType(Object *obj) { + static void DumpObjectType(Object* obj) { DumpObjectType(stdout, obj, false); } }; diff --git a/src/isolate.cc b/src/isolate.cc index ec1e2fe2b6..35e9e284f9 100644 --- a/src/isolate.cc +++ b/src/isolate.cc @@ -1751,10 +1751,10 @@ bool Isolate::Init(Deserializer* des) { regexp_stack_->isolate_ = this; // Enable logging before setting up the heap - logger_->Setup(); + logger_->SetUp(); - CpuProfiler::Setup(); - HeapProfiler::Setup(); + CpuProfiler::SetUp(); + HeapProfiler::SetUp(); // Initialize other runtime facilities #if defined(USE_SIMULATOR) @@ -1771,10 +1771,10 @@ bool Isolate::Init(Deserializer* des) { stack_guard_.InitThread(lock); } - // Setup the object heap. + // SetUp the object heap. const bool create_heap_objects = (des == NULL); - ASSERT(!heap_.HasBeenSetup()); - if (!heap_.Setup(create_heap_objects)) { + ASSERT(!heap_.HasBeenSetUp()); + if (!heap_.SetUp(create_heap_objects)) { V8::SetFatalError(); return false; } @@ -1782,7 +1782,7 @@ bool Isolate::Init(Deserializer* des) { InitializeThreadLocal(); bootstrapper_->Initialize(create_heap_objects); - builtins_.Setup(create_heap_objects); + builtins_.SetUp(create_heap_objects); // Only preallocate on the first initialization. if (FLAG_preallocate_message_memory && preallocated_message_space_ == NULL) { @@ -1801,7 +1801,7 @@ bool Isolate::Init(Deserializer* des) { } #ifdef ENABLE_DEBUGGER_SUPPORT - debug_->Setup(create_heap_objects); + debug_->SetUp(create_heap_objects); #endif stub_cache_->Initialize(create_heap_objects); @@ -1822,7 +1822,7 @@ bool Isolate::Init(Deserializer* des) { deoptimizer_data_ = new DeoptimizerData; runtime_profiler_ = new RuntimeProfiler(this); - runtime_profiler_->Setup(); + runtime_profiler_->SetUp(); // If we are deserializing, log non-function code objects and compiled // functions found in the snapshot. diff --git a/src/lithium-allocator.cc b/src/lithium-allocator.cc index c4d8b1e5b7..1601dcf14e 100644 --- a/src/lithium-allocator.cc +++ b/src/lithium-allocator.cc @@ -49,13 +49,13 @@ namespace internal { #define DEFINE_OPERAND_CACHE(name, type) \ name name::cache[name::kNumCachedOperands]; \ - void name::SetupCache() { \ + void name::SetUpCache() { \ for (int i = 0; i < kNumCachedOperands; i++) { \ cache[i].ConvertTo(type, i); \ } \ } \ static bool name##_initialize() { \ - name::SetupCache(); \ + name::SetUpCache(); \ return true; \ } \ static bool name##_cache_initialized = name##_initialize(); diff --git a/src/lithium.h b/src/lithium.h index 48a533c57e..3253520090 100644 --- a/src/lithium.h +++ b/src/lithium.h @@ -265,7 +265,7 @@ class LConstantOperand: public LOperand { return reinterpret_cast(op); } - static void SetupCache(); + static void SetUpCache(); private: static const int kNumCachedOperands = 128; @@ -300,7 +300,7 @@ class LStackSlot: public LOperand { return reinterpret_cast(op); } - static void SetupCache(); + static void SetUpCache(); private: static const int kNumCachedOperands = 128; @@ -324,7 +324,7 @@ class LDoubleStackSlot: public LOperand { return reinterpret_cast(op); } - static void SetupCache(); + static void SetUpCache(); private: static const int kNumCachedOperands = 128; @@ -348,7 +348,7 @@ class LRegister: public LOperand { return reinterpret_cast(op); } - static void SetupCache(); + static void SetUpCache(); private: static const int kNumCachedOperands = 16; @@ -372,7 +372,7 @@ class LDoubleRegister: public LOperand { return reinterpret_cast(op); } - static void SetupCache(); + static void SetUpCache(); private: static const int kNumCachedOperands = 16; diff --git a/src/liveedit.cc b/src/liveedit.cc index a03670f2e0..5ff8ff9d3b 100644 --- a/src/liveedit.cc +++ b/src/liveedit.cc @@ -1228,7 +1228,7 @@ class RelocInfoBuffer { V8::FatalProcessOutOfMemory("RelocInfoBuffer::GrowBuffer"); } - // Setup new buffer. + // Set up new buffer. byte* new_buffer = NewArray(new_buffer_size); // Copy the data. diff --git a/src/liveobjectlist-inl.h b/src/liveobjectlist-inl.h index f742de3a03..2bc2296e29 100644 --- a/src/liveobjectlist-inl.h +++ b/src/liveobjectlist-inl.h @@ -59,7 +59,7 @@ void LiveObjectList::IterateElements(ObjectVisitor* v) { } -void LiveObjectList::ProcessNonLive(HeapObject *obj) { +void LiveObjectList::ProcessNonLive(HeapObject* obj) { // Only do work if we have at least one list to process. if (last()) DoProcessNonLive(obj); } @@ -93,7 +93,7 @@ LiveObjectList* LiveObjectList::FindLolForId(int id, template inline LiveObjectList::Element* LiveObjectList::FindElementFor(T (*GetValue)(LiveObjectList::Element*), T key) { - LiveObjectList *lol = last(); + LiveObjectList* lol = last(); while (lol != NULL) { Element* elements = lol->elements_; for (int i = 0; i < lol->obj_count_; i++) { diff --git a/src/liveobjectlist.cc b/src/liveobjectlist.cc index 408e2a3160..436204e3fa 100644 --- a/src/liveobjectlist.cc +++ b/src/liveobjectlist.cc @@ -165,7 +165,7 @@ const char* GetObjectTypeDesc(HeapObject* heap_obj) { } -bool IsOfType(LiveObjectType type, HeapObject *obj) { +bool IsOfType(LiveObjectType type, HeapObject* obj) { // Note: there are types that are more general (e.g. JSObject) that would // have passed the Is##type_() test for more specialized types (e.g. // JSFunction). If we find a more specialized match but we're looking for @@ -211,7 +211,7 @@ static AllocationSpace FindSpaceFor(String* space_str) { } -static bool InSpace(AllocationSpace space, HeapObject *heap_obj) { +static bool InSpace(AllocationSpace space, HeapObject* heap_obj) { Heap* heap = ISOLATE->heap(); if (space != LO_SPACE) { return heap->InSpace(heap_obj, space); @@ -498,7 +498,7 @@ static void GenerateObjectDesc(HeapObject* obj, length); } else if (obj->IsString()) { - String *str = String::cast(obj); + String* str = String::cast(obj); // Only grab up to 160 chars in case they are double byte. // We'll only dump 80 of them after we compact them. const int kMaxCharToDump = 80; @@ -842,7 +842,7 @@ class LiveObjectSummary { bool found_root_; bool found_weak_root_; - LolFilter *filter_; + LolFilter* filter_; }; @@ -857,8 +857,8 @@ class SummaryWriter { // A summary writer for filling in a summary of lol lists and diffs. class LolSummaryWriter: public SummaryWriter { public: - LolSummaryWriter(LiveObjectList *older_lol, - LiveObjectList *newer_lol) + LolSummaryWriter(LiveObjectList* older_lol, + LiveObjectList* newer_lol) : older_(older_lol), newer_(newer_lol) { } @@ -944,7 +944,7 @@ LiveObjectList::~LiveObjectList() { int LiveObjectList::GetTotalObjCountAndSize(int* size_p) { int size = 0; int count = 0; - LiveObjectList *lol = this; + LiveObjectList* lol = this; do { // Only compute total size if requested i.e. when size_p is not null. if (size_p != NULL) { @@ -1183,7 +1183,7 @@ MaybeObject* LiveObjectList::Capture() { // only time we'll actually delete the lol is when we Reset() or if the lol is // invisible, and its element count reaches 0. bool LiveObjectList::Delete(int id) { - LiveObjectList *lol = last(); + LiveObjectList* lol = last(); while (lol != NULL) { if (lol->id() == id) { break; @@ -1246,8 +1246,8 @@ MaybeObject* LiveObjectList::Dump(int older_id, newer_id = temp; } - LiveObjectList *newer_lol = FindLolForId(newer_id, last()); - LiveObjectList *older_lol = FindLolForId(older_id, newer_lol); + LiveObjectList* newer_lol = FindLolForId(newer_id, last()); + LiveObjectList* older_lol = FindLolForId(older_id, newer_lol); // If the id is defined, and we can't find a LOL for it, then we have an // invalid id. @@ -1365,8 +1365,8 @@ MaybeObject* LiveObjectList::Summarize(int older_id, newer_id = temp; } - LiveObjectList *newer_lol = FindLolForId(newer_id, last()); - LiveObjectList *older_lol = FindLolForId(older_id, newer_lol); + LiveObjectList* newer_lol = FindLolForId(newer_id, last()); + LiveObjectList* older_lol = FindLolForId(older_id, newer_lol); // If the id is defined, and we can't find a LOL for it, then we have an // invalid id. @@ -1626,7 +1626,7 @@ MaybeObject* LiveObjectList::Info(int start_idx, int dump_limit) { // Deletes all captured lols. void LiveObjectList::Reset() { - LiveObjectList *lol = last(); + LiveObjectList* lol = last(); // Just delete the last. Each lol will delete it's prev automatically. delete lol; @@ -1715,8 +1715,8 @@ class LolVisitor: public ObjectVisitor { inline bool AddRootRetainerIfFound(const LolVisitor& visitor, LolFilter* filter, - LiveObjectSummary *summary, - void (*SetRootFound)(LiveObjectSummary *s), + LiveObjectSummary* summary, + void (*SetRootFound)(LiveObjectSummary* s), int start, int dump_limit, int* total_count, @@ -1762,12 +1762,12 @@ inline bool AddRootRetainerIfFound(const LolVisitor& visitor, } -inline void SetFoundRoot(LiveObjectSummary *summary) { +inline void SetFoundRoot(LiveObjectSummary* summary) { summary->set_found_root(); } -inline void SetFoundWeakRoot(LiveObjectSummary *summary) { +inline void SetFoundWeakRoot(LiveObjectSummary* summary) { summary->set_found_weak_root(); } @@ -1779,7 +1779,7 @@ int LiveObjectList::GetRetainers(Handle target, int dump_limit, int* total_count, LolFilter* filter, - LiveObjectSummary *summary, + LiveObjectSummary* summary, JSFunction* arguments_function, Handle error) { HandleScope scope; @@ -2267,7 +2267,7 @@ Object* LiveObjectList::GetPath(int obj_id1, } -void LiveObjectList::DoProcessNonLive(HeapObject *obj) { +void LiveObjectList::DoProcessNonLive(HeapObject* obj) { // We should only be called if we have at least one lol to search. ASSERT(last() != NULL); Element* element = last()->Find(obj); @@ -2284,7 +2284,7 @@ void LiveObjectList::IterateElementsPrivate(ObjectVisitor* v) { int count = lol->obj_count_; for (int i = 0; i < count; i++) { HeapObject** p = &elements[i].obj_; - v->VisitPointer(reinterpret_cast(p)); + v->VisitPointer(reinterpret_cast(p)); } lol = lol->prev_; } @@ -2389,11 +2389,11 @@ void LiveObjectList::GCEpiloguePrivate() { PurgeDuplicates(); // After the GC, sweep away all free'd Elements and compact. - LiveObjectList *prev = NULL; - LiveObjectList *next = NULL; + LiveObjectList* prev = NULL; + LiveObjectList* next = NULL; // Iterating from the youngest lol to the oldest lol. - for (LiveObjectList *lol = last(); lol; lol = prev) { + for (LiveObjectList* lol = last(); lol; lol = prev) { Element* elements = lol->elements_; prev = lol->prev(); // Save the prev. @@ -2446,7 +2446,7 @@ void LiveObjectList::GCEpiloguePrivate() { const int kMaxUnusedSpace = 64; if (diff > kMaxUnusedSpace) { // Threshold for shrinking. // Shrink the list. - Element *new_elements = NewArray(new_count); + Element* new_elements = NewArray(new_count); memcpy(new_elements, elements, new_count * sizeof(Element)); DeleteArray(elements); diff --git a/src/liveobjectlist.h b/src/liveobjectlist.h index 65470d7ad9..1aa9196051 100644 --- a/src/liveobjectlist.h +++ b/src/liveobjectlist.h @@ -77,7 +77,7 @@ class LiveObjectList { inline static void GCEpilogue(); inline static void GCPrologue(); inline static void IterateElements(ObjectVisitor* v); - inline static void ProcessNonLive(HeapObject *obj); + inline static void ProcessNonLive(HeapObject* obj); inline static void UpdateReferencesForScavengeGC(); // Note: LOLs can be listed by calling Dump(0, ), and 2 LOLs can be @@ -125,7 +125,7 @@ class LiveObjectList { static void GCEpiloguePrivate(); static void IterateElementsPrivate(ObjectVisitor* v); - static void DoProcessNonLive(HeapObject *obj); + static void DoProcessNonLive(HeapObject* obj); static int CompareElement(const Element* a, const Element* b); @@ -138,7 +138,7 @@ class LiveObjectList { int dump_limit, int* total_count, LolFilter* filter, - LiveObjectSummary *summary, + LiveObjectSummary* summary, JSFunction* arguments_function, Handle error); @@ -151,7 +151,7 @@ class LiveObjectList { bool is_tracking_roots); static bool NeedLOLProcessing() { return (last() != NULL); } - static void NullifyNonLivePointer(HeapObject **p) { + static void NullifyNonLivePointer(HeapObject** p) { // Mask out the low bit that marks this as a heap object. We'll use this // cleared bit as an indicator that this pointer needs to be collected. // @@ -202,7 +202,7 @@ class LiveObjectList { int id_; int capacity_; int obj_count_; - Element *elements_; + Element* elements_; // Statics for managing all the lists. static uint32_t next_element_id_; diff --git a/src/log.cc b/src/log.cc index eab26392e3..5e82872da1 100644 --- a/src/log.cc +++ b/src/log.cc @@ -1615,7 +1615,7 @@ void Logger::LogAccessorCallbacks() { } -bool Logger::Setup() { +bool Logger::SetUp() { // Tests and EnsureInitialize() can call this twice in a row. It's harmless. if (is_initialized_) return true; is_initialized_ = true; @@ -1708,9 +1708,9 @@ FILE* Logger::TearDown() { void Logger::EnableSlidingStateWindow() { - // If the ticker is NULL, Logger::Setup has not been called yet. In + // If the ticker is NULL, Logger::SetUp has not been called yet. In // that case, we set the sliding_state_window flag so that the - // sliding window computation will be started when Logger::Setup is + // sliding window computation will be started when Logger::SetUp is // called. if (ticker_ == NULL) { FLAG_sliding_state_window = true; diff --git a/src/log.h b/src/log.h index 677dada03a..86bcad69aa 100644 --- a/src/log.h +++ b/src/log.h @@ -150,14 +150,14 @@ class Logger { #undef DECLARE_ENUM // Acquires resources for logging if the right flags are set. - bool Setup(); + bool SetUp(); void EnsureTickerStarted(); void EnsureTickerStopped(); Sampler* sampler(); - // Frees resources acquired in Setup. + // Frees resources acquired in SetUp. // When a temporary file is used for the log, returns its stream descriptor, // leaving the file open. FILE* TearDown(); @@ -411,7 +411,7 @@ class Logger { NameMap* address_to_name_map_; // Guards against multiple calls to TearDown() that can happen in some tests. - // 'true' between Setup() and TearDown(). + // 'true' between SetUp() and TearDown(). bool is_initialized_; // Support for 'incremental addresses' in compressed logs: diff --git a/src/mips/assembler-mips-inl.h b/src/mips/assembler-mips-inl.h index 2ba9760e2a..0788e73ef6 100644 --- a/src/mips/assembler-mips-inl.h +++ b/src/mips/assembler-mips-inl.h @@ -133,7 +133,7 @@ Object* RelocInfo::target_object() { } -Handle RelocInfo::target_object_handle(Assembler *origin) { +Handle RelocInfo::target_object_handle(Assembler* origin) { ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); return Handle(reinterpret_cast( Assembler::target_address_at(pc_))); diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc index e933181d41..85b6ed802a 100644 --- a/src/mips/assembler-mips.cc +++ b/src/mips/assembler-mips.cc @@ -301,7 +301,7 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size) own_buffer_ = false; } - // Setup buffer pointers. + // Set up buffer pointers. ASSERT(buffer_ != NULL); pc_ = buffer_; reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); @@ -337,7 +337,7 @@ Assembler::~Assembler() { void Assembler::GetCode(CodeDesc* desc) { ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap. - // Setup code descriptor. + // Set up code descriptor. desc->buffer = buffer_; desc->buffer_size = buffer_size_; desc->instr_size = pc_offset(); @@ -1970,7 +1970,7 @@ void Assembler::GrowBuffer() { } CHECK_GT(desc.buffer_size, 0); // No overflow. - // Setup new buffer. + // Set up new buffer. desc.buffer = NewArray(desc.buffer_size); desc.instr_size = pc_offset(); diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc index 754132e435..9e108c9d1f 100644 --- a/src/mips/builtins-mips.cc +++ b/src/mips/builtins-mips.cc @@ -339,7 +339,7 @@ static void ArrayNativeCode(MacroAssembler* masm, t1, call_generic_code); __ IncrementCounter(counters->array_function_native(), 1, a3, t0); - // Setup return value, remove receiver from stack and return. + // Set up return value, remove receiver from stack and return. __ mov(v0, a2); __ Addu(sp, sp, Operand(kPointerSize)); __ Ret(); @@ -382,7 +382,7 @@ static void ArrayNativeCode(MacroAssembler* masm, call_generic_code); __ IncrementCounter(counters->array_function_native(), 1, a2, t0); - // Setup return value, remove receiver and argument from stack and return. + // Set up return value, remove receiver and argument from stack and return. __ mov(v0, a3); __ Addu(sp, sp, Operand(2 * kPointerSize)); __ Ret(); @@ -981,10 +981,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, // sp[4]: number of arguments (smi-tagged) __ lw(a3, MemOperand(sp, 4 * kPointerSize)); - // Setup pointer to last argument. + // Set up pointer to last argument. __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset)); - // Setup number of arguments for function call below. + // Set up number of arguments for function call below. __ srl(a0, a3, kSmiTagSize); // Copy arguments and receiver to the expression stack. diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc index ea8c51235c..f021d166e4 100644 --- a/src/mips/code-stubs-mips.cc +++ b/src/mips/code-stubs-mips.cc @@ -157,13 +157,13 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { // Load the function from the stack. __ lw(a3, MemOperand(sp, 0)); - // Setup the object header. + // Set up the object header. __ LoadRoot(a2, Heap::kFunctionContextMapRootIndex); __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset)); __ li(a2, Operand(Smi::FromInt(length))); __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset)); - // Setup the fixed slots. + // Set up the fixed slots. __ li(a1, Operand(Smi::FromInt(0))); __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX))); __ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX))); @@ -208,7 +208,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) { // Load the serialized scope info from the stack. __ lw(a1, MemOperand(sp, 1 * kPointerSize)); - // Setup the object header. + // Set up the object header. __ LoadRoot(a2, Heap::kBlockContextMapRootIndex); __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset)); __ li(a2, Operand(Smi::FromInt(length))); @@ -229,7 +229,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) { __ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX)); __ bind(&after_sentinel); - // Setup the fixed slots. + // Set up the fixed slots. __ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX)); __ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX)); __ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX)); @@ -4005,7 +4005,7 @@ void CEntryStub::Generate(MacroAssembler* masm) { FrameScope scope(masm, StackFrame::MANUAL); __ EnterExitFrame(save_doubles_); - // Setup argc and the builtin function in callee-saved registers. + // Set up argc and the builtin function in callee-saved registers. __ mov(s0, a0); __ mov(s2, a1); @@ -4097,7 +4097,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { isolate))); __ lw(t0, MemOperand(t0)); __ Push(t3, t2, t1, t0); - // Setup frame pointer for the frame to be pushed. + // Set up frame pointer for the frame to be pushed. __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset); // Registers: @@ -4584,7 +4584,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { __ sw(a3, FieldMemOperand(v0, i)); } - // Setup the callee in-object property. + // Set up the callee in-object property. STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); __ lw(a3, MemOperand(sp, 2 * kPointerSize)); const int kCalleeOffset = JSObject::kHeaderSize + @@ -4597,7 +4597,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { Heap::kArgumentsLengthIndex * kPointerSize; __ sw(a2, FieldMemOperand(v0, kLengthOffset)); - // Setup the elements pointer in the allocated arguments object. + // Set up the elements pointer in the allocated arguments object. // If we allocated a parameter map, t0 will point there, otherwise // it will point to the backing store. __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize)); @@ -4774,7 +4774,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { // Get the parameters pointer from the stack. __ lw(a2, MemOperand(sp, 1 * kPointerSize)); - // Setup the elements pointer in the allocated arguments object and + // Set up the elements pointer in the allocated arguments object and // initialize the header in the elements fixed array. __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict)); __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset)); @@ -4786,7 +4786,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { // Copy the fixed array slots. Label loop; - // Setup t0 to point to the first array slot. + // Set up t0 to point to the first array slot. __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); __ bind(&loop); // Pre-decrement a2 with kPointerSize on each iteration. @@ -5425,7 +5425,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { // of the original receiver from the call site). __ bind(&non_function); __ sw(a1, MemOperand(sp, argc_ * kPointerSize)); - __ li(a0, Operand(argc_)); // Setup the number of arguments. + __ li(a0, Operand(argc_)); // Set up the number of arguments. __ mov(a2, zero_reg); __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION); __ SetCallKind(t1, CALL_AS_METHOD); diff --git a/src/mips/constants-mips.h b/src/mips/constants-mips.h index 4f486c1c04..210becb449 100644 --- a/src/mips/constants-mips.h +++ b/src/mips/constants-mips.h @@ -125,7 +125,7 @@ class Registers { struct RegisterAlias { int reg; - const char *name; + const char* name; }; static const int32_t kMaxValue = 0x7fffffff; @@ -147,7 +147,7 @@ class FPURegisters { struct RegisterAlias { int creg; - const char *name; + const char* name; }; private: diff --git a/src/mips/cpu-mips.cc b/src/mips/cpu-mips.cc index 26e95fb24c..a1e062c803 100644 --- a/src/mips/cpu-mips.cc +++ b/src/mips/cpu-mips.cc @@ -47,7 +47,7 @@ namespace v8 { namespace internal { -void CPU::Setup() { +void CPU::SetUp() { CpuFeatures::Probe(); } diff --git a/src/mips/deoptimizer-mips.cc b/src/mips/deoptimizer-mips.cc index 8717f14c1c..44af3d7b29 100644 --- a/src/mips/deoptimizer-mips.cc +++ b/src/mips/deoptimizer-mips.cc @@ -326,7 +326,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() { output_[0] = input_; output_[0]->SetPc(reinterpret_cast(from_)); } else { - // Setup the frame pointer and the context pointer. + // Set up the frame pointer and the context pointer. output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code())); output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code())); diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc index 1e950e5f51..7394077f0a 100644 --- a/src/mips/full-codegen-mips.cc +++ b/src/mips/full-codegen-mips.cc @@ -1017,7 +1017,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ lw(a1, FieldMemOperand(a1, DescriptorArray::kEnumerationIndexOffset)); __ lw(a2, FieldMemOperand(a1, DescriptorArray::kEnumCacheBridgeCacheOffset)); - // Setup the four remaining stack slots. + // Set up the four remaining stack slots. __ push(v0); // Map. __ lw(a1, FieldMemOperand(a2, FixedArray::kLengthOffset)); __ li(a0, Operand(Smi::FromInt(0))); diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc index 7c94730f12..de6400708f 100644 --- a/src/mips/lithium-codegen-mips.cc +++ b/src/mips/lithium-codegen-mips.cc @@ -2794,7 +2794,7 @@ void LCodeGen::CallKnownFunction(Handle function, __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); __ Call(at); - // Setup deoptimization. + // Set up deoptimization. RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); // Restore context. diff --git a/src/mips/lithium-codegen-mips.h b/src/mips/lithium-codegen-mips.h index 2bdb52782a..2a54681990 100644 --- a/src/mips/lithium-codegen-mips.h +++ b/src/mips/lithium-codegen-mips.h @@ -423,7 +423,7 @@ class LDeferredCode: public ZoneObject { virtual void Generate() = 0; virtual LInstruction* instr() = 0; - void SetExit(Label *exit) { external_exit_ = exit; } + void SetExit(Label* exit) { external_exit_ = exit; } Label* entry() { return &entry_; } Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } int instruction_index() const { return instruction_index_; } diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc index 98b4121ed9..87c9ce06e9 100644 --- a/src/mips/macro-assembler-mips.cc +++ b/src/mips/macro-assembler-mips.cc @@ -4279,7 +4279,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) { void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) { - // Setup the frame structure on the stack. + // Set up the frame structure on the stack. STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement); STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset); STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset); @@ -4297,7 +4297,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, addiu(sp, sp, -4 * kPointerSize); sw(ra, MemOperand(sp, 3 * kPointerSize)); sw(fp, MemOperand(sp, 2 * kPointerSize)); - addiu(fp, sp, 2 * kPointerSize); // Setup new frame pointer. + addiu(fp, sp, 2 * kPointerSize); // Set up new frame pointer. if (emit_debug_code()) { sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset)); diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h index 500ea74896..eb9cf6e574 100644 --- a/src/mips/macro-assembler-mips.h +++ b/src/mips/macro-assembler-mips.h @@ -799,7 +799,7 @@ class MacroAssembler: public Assembler { // ------------------------------------------------------------------------- // JavaScript invokes. - // Setup call kind marking in t1. The method takes t1 as an + // Set up call kind marking in t1. The method takes t1 as an // explicit first parameter to make the code more readable at the // call sites. void SetCallKind(Register dst, CallKind kind); diff --git a/src/mips/simulator-mips.cc b/src/mips/simulator-mips.cc index f70775d86b..191c2cafd5 100644 --- a/src/mips/simulator-mips.cc +++ b/src/mips/simulator-mips.cc @@ -888,7 +888,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) { isolate_->set_simulator_i_cache(i_cache_); } Initialize(isolate); - // Setup simulator support first. Some of this information is needed to + // Set up simulator support first. Some of this information is needed to // setup the architecture state. stack_ = reinterpret_cast(malloc(stack_size_)); pc_modified_ = false; @@ -897,7 +897,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) { break_pc_ = NULL; break_instr_ = 0; - // Setup architecture state. + // Set up architecture state. // All registers are initialized to zero to start with. for (int i = 0; i < kNumSimuRegisters; i++) { registers_[i] = 0; @@ -1944,7 +1944,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) { // Next pc int32_t next_pc = 0; - // Setup the variables if needed before executing the instruction. + // Set up the variables if needed before executing the instruction. ConfigureTypeRegister(instr, alu_out, i64hilo, @@ -2711,7 +2711,7 @@ void Simulator::Execute() { int32_t Simulator::Call(byte* entry, int argument_count, ...) { va_list parameters; va_start(parameters, argument_count); - // Setup arguments. + // Set up arguments. // First four arguments passed in registers. ASSERT(argument_count >= 4); @@ -2758,7 +2758,7 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) { int32_t sp_val = get_register(sp); int32_t fp_val = get_register(fp); - // Setup the callee-saved registers with a known value. To be able to check + // Set up the callee-saved registers with a known value. To be able to check // that they are preserved properly across JS execution. int32_t callee_saved_value = icount_; set_register(s0, callee_saved_value); diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc index a94e277a50..bf01861a61 100644 --- a/src/mips/stub-cache-mips.cc +++ b/src/mips/stub-cache-mips.cc @@ -1173,7 +1173,7 @@ void StubCompiler::GenerateLoadCallback(Handle object, __ EnterExitFrame(false, kApiStackSpace); // Create AccessorInfo instance on the stack above the exit frame with - // scratch2 (internal::Object **args_) as the data. + // scratch2 (internal::Object** args_) as the data. __ sw(a2, MemOperand(sp, kPointerSize)); // a2 (second argument - see note above) = AccessorInfo& __ Addu(a2, sp, kPointerSize); @@ -2430,7 +2430,7 @@ Handle CallStubCompiler::CompileCallGlobal( __ sw(a3, MemOperand(sp, argc * kPointerSize)); } - // Setup the context (function already in r1). + // Set up the context (function already in r1). __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); // Jump to the cached code (tail call). diff --git a/src/parser.cc b/src/parser.cc index 6531f10704..777436ee04 100644 --- a/src/parser.cc +++ b/src/parser.cc @@ -1186,8 +1186,8 @@ void* Parser::ParseSourceElements(ZoneList* processor, if (directive_prologue) { // A shot at a directive. - ExpressionStatement *e_stat; - Literal *literal; + ExpressionStatement* e_stat; + Literal* literal; // Still processing directive prologue? if ((e_stat = stat->AsExpressionStatement()) != NULL && (literal = e_stat->expression()->AsLiteral()) != NULL && @@ -1562,7 +1562,7 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) { // TODO(1240846): It's weird that native function declarations are // introduced dynamically when we meet their declarations, whereas - // other functions are setup when entering the surrounding scope. + // other functions are set up when entering the surrounding scope. SharedFunctionInfoLiteral* lit = new(zone()) SharedFunctionInfoLiteral(isolate(), shared); VariableProxy* var = Declare(name, VAR, NULL, true, CHECK_OK); @@ -3607,7 +3607,7 @@ void ObjectLiteralPropertyChecker::CheckProperty( ASSERT(property != NULL); - Literal *lit = property->key(); + Literal* lit = property->key(); Handle handle = lit->handle(); uint32_t hash; diff --git a/src/platform-cygwin.cc b/src/platform-cygwin.cc index a72f5da4b7..9b34de91ce 100644 --- a/src/platform-cygwin.cc +++ b/src/platform-cygwin.cc @@ -61,7 +61,7 @@ double ceiling(double x) { static Mutex* limit_mutex = NULL; -void OS::Setup() { +void OS::SetUp() { // Seed the random number generator. // Convert the current time to a 64-bit integer first, before converting it // to an unsigned. Going directly can cause an overflow and the seed to be @@ -290,7 +290,7 @@ void OS::LogSharedLibraryAddresses() { } LOG(isolate, SharedLibraryEvent(lib_name, start, end)); } else { - // Entry not describing executable data. Skip to end of line to setup + // Entry not describing executable data. Skip to end of line to set up // reading the next entry. do { c = getc(fp); diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc index 20bd837931..7d0d8d026d 100644 --- a/src/platform-freebsd.cc +++ b/src/platform-freebsd.cc @@ -79,7 +79,7 @@ double ceiling(double x) { static Mutex* limit_mutex = NULL; -void OS::Setup() { +void OS::SetUp() { // Seed the random number generator. // Convert the current time to a 64-bit integer first, before converting it // to an unsigned. Going directly can cause an overflow and the seed to be diff --git a/src/platform-linux.cc b/src/platform-linux.cc index e72d095b0a..a3cdc031ef 100644 --- a/src/platform-linux.cc +++ b/src/platform-linux.cc @@ -78,7 +78,7 @@ double ceiling(double x) { static Mutex* limit_mutex = NULL; -void OS::Setup() { +void OS::SetUp() { // Seed the random number generator. We preserve microsecond resolution. uint64_t seed = Ticks() ^ (getpid() << 16); srandom(static_cast(seed)); @@ -512,7 +512,7 @@ void OS::LogSharedLibraryAddresses() { } LOG(isolate, SharedLibraryEvent(lib_name, start, end)); } else { - // Entry not describing executable data. Skip to end of line to setup + // Entry not describing executable data. Skip to end of line to set up // reading the next entry. do { c = getc(fp); diff --git a/src/platform-macos.cc b/src/platform-macos.cc index 6e5d29da2f..417fb11ae1 100644 --- a/src/platform-macos.cc +++ b/src/platform-macos.cc @@ -93,7 +93,7 @@ double ceiling(double x) { static Mutex* limit_mutex = NULL; -void OS::Setup() { +void OS::SetUp() { // Seed the random number generator. We preserve microsecond resolution. uint64_t seed = Ticks() ^ (getpid() << 16); srandom(static_cast(seed)); diff --git a/src/platform-nullos.cc b/src/platform-nullos.cc index 8c2a8633d7..094f950f72 100644 --- a/src/platform-nullos.cc +++ b/src/platform-nullos.cc @@ -56,7 +56,7 @@ double modulo(double x, double y) { // Initialize OS class early in the V8 startup. -void OS::Setup() { +void OS::SetUp() { // Seed the random number generator. UNIMPLEMENTED(); } diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc index 772d08b587..6f582d4341 100644 --- a/src/platform-openbsd.cc +++ b/src/platform-openbsd.cc @@ -99,7 +99,7 @@ static void* GetRandomMmapAddr() { } -void OS::Setup() { +void OS::SetUp() { // Seed the random number generator. We preserve microsecond resolution. uint64_t seed = Ticks() ^ (getpid() << 16); srandom(static_cast(seed)); @@ -312,7 +312,7 @@ void OS::LogSharedLibraryAddresses() { } LOG(isolate, SharedLibraryEvent(lib_name, start, end)); } else { - // Entry not describing executable data. Skip to end of line to setup + // Entry not describing executable data. Skip to end of line to set up // reading the next entry. do { c = getc(fp); diff --git a/src/platform-posix.cc b/src/platform-posix.cc index 08417ff9bc..34fd5c4498 100644 --- a/src/platform-posix.cc +++ b/src/platform-posix.cc @@ -461,7 +461,7 @@ bool POSIXSocket::SetReuseAddress(bool reuse_address) { } -bool Socket::Setup() { +bool Socket::SetUp() { // Nothing to do on POSIX. return true; } diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc index 035d394453..d55ea8913b 100644 --- a/src/platform-solaris.cc +++ b/src/platform-solaris.cc @@ -89,7 +89,7 @@ double ceiling(double x) { static Mutex* limit_mutex = NULL; -void OS::Setup() { +void OS::SetUp() { // Seed the random number generator. // Convert the current time to a 64-bit integer first, before converting it // to an unsigned. Going directly will cause an overflow and the seed to be diff --git a/src/platform-win32.cc b/src/platform-win32.cc index 822f360642..c439ab91a0 100644 --- a/src/platform-win32.cc +++ b/src/platform-win32.cc @@ -528,7 +528,7 @@ char* Time::LocalTimezone() { } -void OS::Setup() { +void OS::SetUp() { // Seed the random number generator. // Convert the current time to a 64-bit integer first, before converting it // to an unsigned. Going directly can cause an overflow and the seed to be @@ -1825,7 +1825,7 @@ bool Win32Socket::SetReuseAddress(bool reuse_address) { } -bool Socket::Setup() { +bool Socket::SetUp() { // Initialize Winsock32 int err; WSADATA winsock_data; diff --git a/src/platform.h b/src/platform.h index 127f788f9d..e93a0543e3 100644 --- a/src/platform.h +++ b/src/platform.h @@ -109,7 +109,7 @@ class Socket; class OS { public: // Initializes the platform OS support. Called once at VM startup. - static void Setup(); + static void SetUp(); // Returns the accumulated user time for thread. This routine // can be used for profiling. The implementation should @@ -477,7 +477,7 @@ class Thread { PlatformData* data() { return data_; } private: - void set_name(const char *name); + void set_name(const char* name); PlatformData* data_; @@ -593,7 +593,7 @@ class Socket { virtual bool IsValid() const = 0; - static bool Setup(); + static bool SetUp(); static int LastError(); static uint16_t HToN(uint16_t value); static uint16_t NToH(uint16_t value); diff --git a/src/preparser.h b/src/preparser.h index fc8a4a0ca8..f17bac2eac 100644 --- a/src/preparser.h +++ b/src/preparser.h @@ -630,7 +630,7 @@ class PreParser { void SetStrictModeViolation(i::Scanner::Location, const char* type, - bool *ok); + bool* ok); void CheckDelayedStrictModeViolation(int beg_pos, int end_pos, bool* ok); diff --git a/src/profile-generator.cc b/src/profile-generator.cc index 8ac4ab57ad..852e28ddb1 100644 --- a/src/profile-generator.cc +++ b/src/profile-generator.cc @@ -904,7 +904,7 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) { entry++; } - for (const Address *stack_pos = sample.stack, + for (const Address* stack_pos = sample.stack, *stack_end = stack_pos + sample.frames_count; stack_pos != stack_end; ++stack_pos) { @@ -1595,7 +1595,7 @@ Handle HeapSnapshotsCollection::FindHeapObjectById(uint64_t id) { } -HeapEntry *const HeapEntriesMap::kHeapEntryPlaceholder = +HeapEntry* const HeapEntriesMap::kHeapEntryPlaceholder = reinterpret_cast(1); HeapEntriesMap::HeapEntriesMap() @@ -1724,16 +1724,16 @@ void HeapObjectsSet::SetTag(Object* obj, const char* tag) { } -HeapObject *const V8HeapExplorer::kInternalRootObject = +HeapObject* const V8HeapExplorer::kInternalRootObject = reinterpret_cast( static_cast(HeapObjectsMap::kInternalRootObjectId)); -HeapObject *const V8HeapExplorer::kGcRootsObject = +HeapObject* const V8HeapExplorer::kGcRootsObject = reinterpret_cast( static_cast(HeapObjectsMap::kGcRootsObjectId)); -HeapObject *const V8HeapExplorer::kFirstGcSubrootObject = +HeapObject* const V8HeapExplorer::kFirstGcSubrootObject = reinterpret_cast( static_cast(HeapObjectsMap::kGcRootsFirstSubrootId)); -HeapObject *const V8HeapExplorer::kLastGcSubrootObject = +HeapObject* const V8HeapExplorer::kLastGcSubrootObject = reinterpret_cast( static_cast(HeapObjectsMap::kFirstAvailableObjectId)); diff --git a/src/profile-generator.h b/src/profile-generator.h index 51f2882428..aefe1a0f60 100644 --- a/src/profile-generator.h +++ b/src/profile-generator.h @@ -834,7 +834,7 @@ class HeapEntriesMap { int total_children_count() { return total_children_count_; } int total_retainers_count() { return total_retainers_count_; } - static HeapEntry *const kHeapEntryPlaceholder; + static HeapEntry* const kHeapEntryPlaceholder; private: struct EntryInfo { diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc index eaa6e15603..f89d98529b 100644 --- a/src/runtime-profiler.cc +++ b/src/runtime-profiler.cc @@ -65,7 +65,7 @@ Atomic32 RuntimeProfiler::state_ = 0; Semaphore* RuntimeProfiler::semaphore_ = OS::CreateSemaphore(0); #ifdef DEBUG -bool RuntimeProfiler::has_been_globally_setup_ = false; +bool RuntimeProfiler::has_been_globally_set_up_ = false; #endif bool RuntimeProfiler::enabled_ = false; @@ -82,10 +82,10 @@ RuntimeProfiler::RuntimeProfiler(Isolate* isolate) void RuntimeProfiler::GlobalSetup() { - ASSERT(!has_been_globally_setup_); + ASSERT(!has_been_globally_set_up_); enabled_ = V8::UseCrankshaft() && FLAG_opt; #ifdef DEBUG - has_been_globally_setup_ = true; + has_been_globally_set_up_ = true; #endif } @@ -245,8 +245,8 @@ void RuntimeProfiler::NotifyTick() { } -void RuntimeProfiler::Setup() { - ASSERT(has_been_globally_setup_); +void RuntimeProfiler::SetUp() { + ASSERT(has_been_globally_set_up_); ClearSampleBuffer(); // If the ticker hasn't already started, make sure to do so to get // the ticks for the runtime profiler. diff --git a/src/runtime-profiler.h b/src/runtime-profiler.h index 15c209713e..d35b5df847 100644 --- a/src/runtime-profiler.h +++ b/src/runtime-profiler.h @@ -46,7 +46,7 @@ class RuntimeProfiler { static void GlobalSetup(); static inline bool IsEnabled() { - ASSERT(has_been_globally_setup_); + ASSERT(has_been_globally_set_up_); return enabled_; } @@ -54,7 +54,7 @@ class RuntimeProfiler { void NotifyTick(); - void Setup(); + void SetUp(); void Reset(); void TearDown(); @@ -126,7 +126,7 @@ class RuntimeProfiler { static Semaphore* semaphore_; #ifdef DEBUG - static bool has_been_globally_setup_; + static bool has_been_globally_set_up_; #endif static bool enabled_; }; diff --git a/src/spaces.cc b/src/spaces.cc index a2b8d43306..c8e94ddbe5 100644 --- a/src/spaces.cc +++ b/src/spaces.cc @@ -132,7 +132,7 @@ CodeRange::CodeRange(Isolate* isolate) } -bool CodeRange::Setup(const size_t requested) { +bool CodeRange::SetUp(const size_t requested) { ASSERT(code_range_ == NULL); code_range_ = new VirtualMemory(requested); @@ -268,7 +268,7 @@ MemoryAllocator::MemoryAllocator(Isolate* isolate) } -bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) { +bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) { capacity_ = RoundUp(capacity, Page::kPageSize); capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); ASSERT_GE(capacity_, capacity_executable_); @@ -671,12 +671,12 @@ PagedSpace::PagedSpace(Heap* heap, } -bool PagedSpace::Setup() { +bool PagedSpace::SetUp() { return true; } -bool PagedSpace::HasBeenSetup() { +bool PagedSpace::HasBeenSetUp() { return true; } @@ -874,9 +874,9 @@ void PagedSpace::Verify(ObjectVisitor* visitor) { // NewSpace implementation -bool NewSpace::Setup(int reserved_semispace_capacity, +bool NewSpace::SetUp(int reserved_semispace_capacity, int maximum_semispace_capacity) { - // Setup new space based on the preallocated memory block defined by + // Set up new space based on the preallocated memory block defined by // start and size. The provided space is divided into two semi-spaces. // To support fast containment testing in the new space, the size of // this chunk must be a power of two and it must be aligned to its size. @@ -895,7 +895,7 @@ bool NewSpace::Setup(int reserved_semispace_capacity, ASSERT(initial_semispace_capacity <= maximum_semispace_capacity); ASSERT(IsPowerOf2(maximum_semispace_capacity)); - // Allocate and setup the histogram arrays if necessary. + // Allocate and set up the histogram arrays if necessary. allocated_histogram_ = NewArray(LAST_TYPE + 1); promoted_histogram_ = NewArray(LAST_TYPE + 1); @@ -909,12 +909,12 @@ bool NewSpace::Setup(int reserved_semispace_capacity, 2 * heap()->ReservedSemiSpaceSize()); ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0)); - if (!to_space_.Setup(chunk_base_, + if (!to_space_.SetUp(chunk_base_, initial_semispace_capacity, maximum_semispace_capacity)) { return false; } - if (!from_space_.Setup(chunk_base_ + reserved_semispace_capacity, + if (!from_space_.SetUp(chunk_base_ + reserved_semispace_capacity, initial_semispace_capacity, maximum_semispace_capacity)) { return false; @@ -1149,7 +1149,7 @@ void NewSpace::Verify() { // ----------------------------------------------------------------------------- // SemiSpace implementation -bool SemiSpace::Setup(Address start, +bool SemiSpace::SetUp(Address start, int initial_capacity, int maximum_capacity) { // Creates a space in the young generation. The constructor does not @@ -2411,7 +2411,7 @@ LargeObjectSpace::LargeObjectSpace(Heap* heap, objects_size_(0) {} -bool LargeObjectSpace::Setup() { +bool LargeObjectSpace::SetUp() { first_page_ = NULL; size_ = 0; page_count_ = 0; @@ -2431,7 +2431,7 @@ void LargeObjectSpace::TearDown() { space, kAllocationActionFree, page->size()); heap()->isolate()->memory_allocator()->Free(page); } - Setup(); + SetUp(); } diff --git a/src/spaces.h b/src/spaces.h index c646aaa9bd..41c3ef929f 100644 --- a/src/spaces.h +++ b/src/spaces.h @@ -815,7 +815,7 @@ class CodeRange { // Reserves a range of virtual memory, but does not commit any of it. // Can only be called once, at heap initialization time. // Returns false on failure. - bool Setup(const size_t requested_size); + bool SetUp(const size_t requested_size); // Frees the range of virtual memory, and frees the data structures used to // manage it. @@ -943,7 +943,7 @@ class MemoryAllocator { // Initializes its internal bookkeeping structures. // Max capacity of the total space and executable memory limit. - bool Setup(intptr_t max_capacity, intptr_t capacity_executable); + bool SetUp(intptr_t max_capacity, intptr_t capacity_executable); void TearDown(); @@ -1419,11 +1419,11 @@ class PagedSpace : public Space { // the memory allocator's initial chunk) if possible. If the block of // addresses is not big enough to contain a single page-aligned page, a // fresh chunk will be allocated. - bool Setup(); + bool SetUp(); // Returns true if the space has been successfully set up and not // subsequently torn down. - bool HasBeenSetup(); + bool HasBeenSetUp(); // Cleans up the space, frees all pages in this space except those belonging // to the initial chunk, uncommits addresses in the initial chunk. @@ -1821,14 +1821,14 @@ class SemiSpace : public Space { current_page_(NULL) { } // Sets up the semispace using the given chunk. - bool Setup(Address start, int initial_capacity, int maximum_capacity); + bool SetUp(Address start, int initial_capacity, int maximum_capacity); // Tear down the space. Heap memory was not allocated by the space, so it // is not deallocated here. void TearDown(); // True if the space has been set up but not torn down. - bool HasBeenSetup() { return start_ != NULL; } + bool HasBeenSetUp() { return start_ != NULL; } // Grow the semispace to the new capacity. The new capacity // requested must be larger than the current capacity and less than @@ -2067,15 +2067,15 @@ class NewSpace : public Space { inline_allocation_limit_step_(0) {} // Sets up the new space using the given chunk. - bool Setup(int reserved_semispace_size_, int max_semispace_size); + bool SetUp(int reserved_semispace_size_, int max_semispace_size); // Tears down the space. Heap memory was not allocated by the space, so it // is not deallocated here. void TearDown(); // True if the space has been set up but not torn down. - bool HasBeenSetup() { - return to_space_.HasBeenSetup() && from_space_.HasBeenSetup(); + bool HasBeenSetUp() { + return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp(); } // Flip the pair of spaces. @@ -2474,7 +2474,7 @@ class LargeObjectSpace : public Space { virtual ~LargeObjectSpace() {} // Initializes internal data structures. - bool Setup(); + bool SetUp(); // Releases internal resources, frees objects in this space. void TearDown(); diff --git a/src/store-buffer.cc b/src/store-buffer.cc index 0f1fed0286..9022b3be83 100644 --- a/src/store-buffer.cc +++ b/src/store-buffer.cc @@ -55,7 +55,7 @@ StoreBuffer::StoreBuffer(Heap* heap) } -void StoreBuffer::Setup() { +void StoreBuffer::SetUp() { virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3); uintptr_t start_as_int = reinterpret_cast(virtual_memory_->address()); diff --git a/src/store-buffer.h b/src/store-buffer.h index 204fa3ff4e..951a9ca2bc 100644 --- a/src/store-buffer.h +++ b/src/store-buffer.h @@ -54,7 +54,7 @@ class StoreBuffer { inline Address TopAddress(); - void Setup(); + void SetUp(); void TearDown(); // This is used by the mutator to enter addresses into the store buffer. diff --git a/src/v8.cc b/src/v8.cc index c882d86f8d..36ee221948 100644 --- a/src/v8.cc +++ b/src/v8.cc @@ -47,7 +47,7 @@ static Mutex* init_once_mutex = OS::CreateMutex(); static bool init_once_called = false; bool V8::is_running_ = false; -bool V8::has_been_setup_ = false; +bool V8::has_been_set_up_ = false; bool V8::has_been_disposed_ = false; bool V8::has_fatal_error_ = false; bool V8::use_crankshaft_ = true; @@ -82,7 +82,7 @@ bool V8::Initialize(Deserializer* des) { if (isolate->IsInitialized()) return true; is_running_ = true; - has_been_setup_ = true; + has_been_set_up_ = true; has_fatal_error_ = false; has_been_disposed_ = false; @@ -100,7 +100,7 @@ void V8::TearDown() { Isolate* isolate = Isolate::Current(); ASSERT(isolate->IsDefaultIsolate()); - if (!has_been_setup_ || has_been_disposed_) return; + if (!has_been_set_up_ || has_been_disposed_) return; isolate->TearDown(); is_running_ = false; @@ -239,8 +239,8 @@ void V8::InitializeOncePerProcess() { if (init_once_called) return; init_once_called = true; - // Setup the platform OS support. - OS::Setup(); + // Set up the platform OS support. + OS::SetUp(); use_crankshaft_ = FLAG_crankshaft; @@ -248,7 +248,7 @@ void V8::InitializeOncePerProcess() { use_crankshaft_ = false; } - CPU::Setup(); + CPU::SetUp(); if (!CPU::SupportsCrankshaft()) { use_crankshaft_ = false; } diff --git a/src/v8.h b/src/v8.h index b9a3e05386..adfdb3ea88 100644 --- a/src/v8.h +++ b/src/v8.h @@ -118,7 +118,7 @@ class V8 : public AllStatic { // True if engine is currently running static bool is_running_; // True if V8 has ever been run - static bool has_been_setup_; + static bool has_been_set_up_; // True if error has been signaled for current engine // (reset to false if engine is restarted) static bool has_fatal_error_; diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h index ab387d6d0a..141d092d8e 100644 --- a/src/x64/assembler-x64-inl.h +++ b/src/x64/assembler-x64-inl.h @@ -262,7 +262,7 @@ Object* RelocInfo::target_object() { } -Handle RelocInfo::target_object_handle(Assembler *origin) { +Handle RelocInfo::target_object_handle(Assembler* origin) { ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); if (rmode_ == EMBEDDED_OBJECT) { return Memory::Object_Handle_at(pc_); diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc index ca3bece5f0..eb8d7d4d99 100644 --- a/src/x64/assembler-x64.cc +++ b/src/x64/assembler-x64.cc @@ -383,7 +383,7 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size) } #endif - // Setup buffer pointers. + // Set up buffer pointers. ASSERT(buffer_ != NULL); pc_ = buffer_; reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); @@ -412,7 +412,7 @@ void Assembler::GetCode(CodeDesc* desc) { // Finalize code (at this point overflow() may be true, but the gap ensures // that we are still not overlapping instructions and relocation info). ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap. - // Setup code descriptor. + // Set up code descriptor. desc->buffer = buffer_; desc->buffer_size = buffer_size_; desc->instr_size = pc_offset(); @@ -502,7 +502,7 @@ void Assembler::GrowBuffer() { V8::FatalProcessOutOfMemory("Assembler::GrowBuffer"); } - // Setup new buffer. + // Set up new buffer. desc.buffer = NewArray(desc.buffer_size); desc.instr_size = pc_offset(); desc.reloc_size = diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc index ef63c7a27b..4833e03c8e 100644 --- a/src/x64/builtins-x64.cc +++ b/src/x64/builtins-x64.cc @@ -337,7 +337,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, __ push(rbx); __ push(rbx); - // Setup pointer to last argument. + // Set up pointer to last argument. __ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset)); // Copy arguments and receiver to the expression stack. @@ -1198,7 +1198,7 @@ static void AllocateJSArray(MacroAssembler* masm, // Both registers are preserved by this code so no need to differentiate between // a construct call and a normal call. static void ArrayNativeCode(MacroAssembler* masm, - Label *call_generic_code) { + Label* call_generic_code) { Label argc_one_or_more, argc_two_or_more, empty_array, not_empty_array, has_non_smi_element; diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc index 3cd6740b68..03a5170bae 100644 --- a/src/x64/code-stubs-x64.cc +++ b/src/x64/code-stubs-x64.cc @@ -124,12 +124,12 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { // Get the function from the stack. __ movq(rcx, Operand(rsp, 1 * kPointerSize)); - // Setup the object header. + // Set up the object header. __ LoadRoot(kScratchRegister, Heap::kFunctionContextMapRootIndex); __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister); __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length)); - // Setup the fixed slots. + // Set up the fixed slots. __ Set(rbx, 0); // Set to NULL. __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx); __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rsi); @@ -173,7 +173,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) { // Get the serialized scope info from the stack. __ movq(rbx, Operand(rsp, 2 * kPointerSize)); - // Setup the object header. + // Set up the object header. __ LoadRoot(kScratchRegister, Heap::kBlockContextMapRootIndex); __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister); __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length)); @@ -194,7 +194,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) { __ movq(rcx, ContextOperand(rcx, Context::CLOSURE_INDEX)); __ bind(&after_sentinel); - // Setup the fixed slots. + // Set up the fixed slots. __ movq(ContextOperand(rax, Context::CLOSURE_INDEX), rcx); __ movq(ContextOperand(rax, Context::PREVIOUS_INDEX), rsi); __ movq(ContextOperand(rax, Context::EXTENSION_INDEX), rbx); @@ -2399,7 +2399,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { __ movq(FieldOperand(rax, i), rdx); } - // Setup the callee in-object property. + // Set up the callee in-object property. STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); __ movq(rdx, Operand(rsp, 3 * kPointerSize)); __ movq(FieldOperand(rax, JSObject::kHeaderSize + @@ -2414,7 +2414,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { Heap::kArgumentsLengthIndex * kPointerSize), rcx); - // Setup the elements pointer in the allocated arguments object. + // Set up the elements pointer in the allocated arguments object. // If we allocated a parameter map, edi will point there, otherwise to the // backing store. __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize)); @@ -2621,7 +2621,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { // Get the parameters pointer from the stack. __ movq(rdx, Operand(rsp, 2 * kPointerSize)); - // Setup the elements pointer in the allocated arguments object and + // Set up the elements pointer in the allocated arguments object and // initialize the header in the elements fixed array. __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSizeStrict)); __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi); @@ -3942,7 +3942,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { Label not_outermost_js, not_outermost_js_2; { // NOLINT. Scope block confuses linter. MacroAssembler::NoRootArrayScope uninitialized_root_register(masm); - // Setup frame. + // Set up frame. __ push(rbp); __ movq(rbp, rsp); @@ -5081,7 +5081,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ ret(3 * kPointerSize); __ bind(&make_two_character_string); - // Setup registers for allocating the two character string. + // Set up registers for allocating the two character string. __ movzxwq(rbx, FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize)); __ AllocateAsciiString(rax, rcx, r11, r14, r15, &runtime); __ movw(FieldOperand(rax, SeqAsciiString::kHeaderSize), rbx); diff --git a/src/x64/cpu-x64.cc b/src/x64/cpu-x64.cc index ae5045f0df..69e77eee9d 100644 --- a/src/x64/cpu-x64.cc +++ b/src/x64/cpu-x64.cc @@ -41,7 +41,7 @@ namespace v8 { namespace internal { -void CPU::Setup() { +void CPU::SetUp() { CpuFeatures::Probe(); } diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc index d684ad713f..a5a171a2a8 100644 --- a/src/x64/deoptimizer-x64.cc +++ b/src/x64/deoptimizer-x64.cc @@ -314,7 +314,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() { output_[0] = input_; output_[0]->SetPc(reinterpret_cast(from_)); } else { - // Setup the frame pointer and the context pointer. + // Set up the frame pointer and the context pointer. output_[0]->SetRegister(rbp.code(), input_->GetRegister(rbp.code())); output_[0]->SetRegister(rsi.code(), input_->GetRegister(rsi.code())); diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc index 24df20ba76..eeef0e94e8 100644 --- a/src/x64/full-codegen-x64.cc +++ b/src/x64/full-codegen-x64.cc @@ -967,7 +967,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset)); __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset)); - // Setup the four remaining stack slots. + // Set up the four remaining stack slots. __ push(rax); // Map. __ push(rdx); // Enumeration cache. __ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset)); diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc index c9db6875d6..392c74dc23 100644 --- a/src/x64/lithium-codegen-x64.cc +++ b/src/x64/lithium-codegen-x64.cc @@ -2641,7 +2641,7 @@ void LCodeGen::CallKnownFunction(Handle function, __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset)); } - // Setup deoptimization. + // Set up deoptimization. RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0); // Restore context. @@ -3911,7 +3911,7 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { DeoptimizeIf(not_equal, instr->environment()); } - // Setup the parameters to the stub/runtime call. + // Set up the parameters to the stub/runtime call. __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); __ push(FieldOperand(rax, JSFunction::kLiteralsOffset)); __ Push(Smi::FromInt(instr->hydrogen()->literal_index())); @@ -4012,7 +4012,7 @@ void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) { Handle constant_properties = instr->hydrogen()->constant_properties(); - // Setup the parameters to the stub/runtime call. + // Set up the parameters to the stub/runtime call. __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); __ push(FieldOperand(rax, JSFunction::kLiteralsOffset)); __ Push(Smi::FromInt(instr->hydrogen()->literal_index())); diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h index bbdc21e0c8..2890c530b7 100644 --- a/src/x64/lithium-codegen-x64.h +++ b/src/x64/lithium-codegen-x64.h @@ -376,7 +376,7 @@ class LDeferredCode: public ZoneObject { virtual void Generate() = 0; virtual LInstruction* instr() = 0; - void SetExit(Label *exit) { external_exit_ = exit; } + void SetExit(Label* exit) { external_exit_ = exit; } Label* entry() { return &entry_; } Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } int instruction_index() const { return instruction_index_; } diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc index 617ba5a9bd..9c12731e98 100644 --- a/src/x64/macro-assembler-x64.cc +++ b/src/x64/macro-assembler-x64.cc @@ -495,7 +495,7 @@ void MacroAssembler::Abort(const char* msg) { // from the real pointer as a smi. intptr_t p1 = reinterpret_cast(msg); intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag; - // Note: p0 might not be a valid Smi *value*, but it has a valid Smi tag. + // Note: p0 might not be a valid Smi _value_, but it has a valid Smi tag. ASSERT(reinterpret_cast(p0)->IsSmi()); #ifdef DEBUG if (msg != NULL) { @@ -3231,7 +3231,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) { void MacroAssembler::EnterExitFramePrologue(bool save_rax) { - // Setup the frame structure on the stack. + // Set up the frame structure on the stack. // All constants are relative to the frame pointer of the exit frame. ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize); ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize); @@ -3291,7 +3291,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space, void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) { EnterExitFramePrologue(true); - // Setup argv in callee-saved register r15. It is reused in LeaveExitFrame, + // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame, // so it must be retained across the C-call. int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize; lea(r15, Operand(rbp, r14, times_pointer_size, offset)); diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h index 858a982e55..8596852db4 100644 --- a/src/x64/macro-assembler-x64.h +++ b/src/x64/macro-assembler-x64.h @@ -328,7 +328,7 @@ class MacroAssembler: public Assembler { // --------------------------------------------------------------------------- // JavaScript invokes - // Setup call kind marking in rcx. The method takes rcx as an + // Set up call kind marking in rcx. The method takes rcx as an // explicit first parameter to make the code more readable at the // call sites. void SetCallKind(Register dst, CallKind kind); diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc index a938787e3c..3633fbbcee 100644 --- a/src/x64/stub-cache-x64.cc +++ b/src/x64/stub-cache-x64.cc @@ -2181,7 +2181,7 @@ Handle CallStubCompiler::CompileCallGlobal( __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx); } - // Setup the context (function already in rdi). + // Set up the context (function already in rdi). __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset)); // Jump to the cached code (tail call). diff --git a/test/cctest/cctest.h b/test/cctest/cctest.h index c04d893c10..0b93562216 100644 --- a/test/cctest/cctest.h +++ b/test/cctest/cctest.h @@ -104,7 +104,7 @@ class ApiTestFuzzer: public v8::internal::Thread { FOURTH_PART, LAST_PART = FOURTH_PART }; - static void Setup(PartOfTest part); + static void SetUp(PartOfTest part); static void RunAllTests(); static void TearDown(); // This method switches threads if we are running the Threading test. diff --git a/test/cctest/test-alloc.cc b/test/cctest/test-alloc.cc index 899c9021ff..c654dfa8bb 100644 --- a/test/cctest/test-alloc.cc +++ b/test/cctest/test-alloc.cc @@ -203,10 +203,10 @@ class Block { TEST(CodeRange) { const int code_range_size = 32*MB; - OS::Setup(); + OS::SetUp(); Isolate::Current()->InitializeLoggingAndCounters(); CodeRange* code_range = new CodeRange(Isolate::Current()); - code_range->Setup(code_range_size); + code_range->SetUp(code_range_size); int current_allocated = 0; int total_allocated = 0; List blocks(1000); diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc index 0c191c34c8..79746c606a 100644 --- a/test/cctest/test-api.cc +++ b/test/cctest/test-api.cc @@ -7951,7 +7951,7 @@ THREADED_TEST(CrossEval) { other->SetSecurityToken(token); current->SetSecurityToken(token); - // Setup reference from current to other. + // Set up reference from current to other. current->Global()->Set(v8_str("other"), other->Global()); // Check that new variables are introduced in other context. @@ -8031,7 +8031,7 @@ THREADED_TEST(EvalInDetachedGlobal) { v8::Persistent context0 = Context::New(); v8::Persistent context1 = Context::New(); - // Setup function in context0 that uses eval from context0. + // Set up function in context0 that uses eval from context0. context0->Enter(); v8::Handle fun = CompileRun("var x = 42;" @@ -8069,7 +8069,7 @@ THREADED_TEST(CrossLazyLoad) { other->SetSecurityToken(token); current->SetSecurityToken(token); - // Setup reference from current to other. + // Set up reference from current to other. current->Global()->Set(v8_str("other"), other->Global()); // Trigger lazy loading in other context. @@ -10180,7 +10180,7 @@ void ApiTestFuzzer::Run() { static unsigned linear_congruential_generator; -void ApiTestFuzzer::Setup(PartOfTest part) { +void ApiTestFuzzer::SetUp(PartOfTest part) { linear_congruential_generator = i::FLAG_testing_prng_seed; fuzzing_ = true; int count = RegisterThreadedTest::count(); @@ -10244,25 +10244,25 @@ void ApiTestFuzzer::TearDown() { // Lets not be needlessly self-referential. TEST(Threading) { - ApiTestFuzzer::Setup(ApiTestFuzzer::FIRST_PART); + ApiTestFuzzer::SetUp(ApiTestFuzzer::FIRST_PART); ApiTestFuzzer::RunAllTests(); ApiTestFuzzer::TearDown(); } TEST(Threading2) { - ApiTestFuzzer::Setup(ApiTestFuzzer::SECOND_PART); + ApiTestFuzzer::SetUp(ApiTestFuzzer::SECOND_PART); ApiTestFuzzer::RunAllTests(); ApiTestFuzzer::TearDown(); } TEST(Threading3) { - ApiTestFuzzer::Setup(ApiTestFuzzer::THIRD_PART); + ApiTestFuzzer::SetUp(ApiTestFuzzer::THIRD_PART); ApiTestFuzzer::RunAllTests(); ApiTestFuzzer::TearDown(); } TEST(Threading4) { - ApiTestFuzzer::Setup(ApiTestFuzzer::FOURTH_PART); + ApiTestFuzzer::SetUp(ApiTestFuzzer::FOURTH_PART); ApiTestFuzzer::RunAllTests(); ApiTestFuzzer::TearDown(); } @@ -12121,7 +12121,7 @@ THREADED_TEST(GetCallingContext) { callback_templ->GetFunction()); calling_context0->Exit(); - // Expose context0 in context1 and setup a function that calls the + // Expose context0 in context1 and set up a function that calls the // callback function. calling_context1->Enter(); calling_context1->Global()->Set(v8_str("context0"), diff --git a/test/cctest/test-assembler-x64.cc b/test/cctest/test-assembler-x64.cc index 959cf3fe52..d81923fa5c 100644 --- a/test/cctest/test-assembler-x64.cc +++ b/test/cctest/test-assembler-x64.cc @@ -99,7 +99,7 @@ static void InitializeVM() { TEST(AssemblerX64ReturnOperation) { - OS::Setup(); + OS::SetUp(); // Allocate an executable page of memory. size_t actual_size; byte* buffer = static_cast(OS::Allocate(Assembler::kMinimalBufferSize, @@ -121,7 +121,7 @@ TEST(AssemblerX64ReturnOperation) { } TEST(AssemblerX64StackOperations) { - OS::Setup(); + OS::SetUp(); // Allocate an executable page of memory. size_t actual_size; byte* buffer = static_cast(OS::Allocate(Assembler::kMinimalBufferSize, @@ -153,7 +153,7 @@ TEST(AssemblerX64StackOperations) { } TEST(AssemblerX64ArithmeticOperations) { - OS::Setup(); + OS::SetUp(); // Allocate an executable page of memory. size_t actual_size; byte* buffer = static_cast(OS::Allocate(Assembler::kMinimalBufferSize, @@ -175,7 +175,7 @@ TEST(AssemblerX64ArithmeticOperations) { } TEST(AssemblerX64ImulOperation) { - OS::Setup(); + OS::SetUp(); // Allocate an executable page of memory. size_t actual_size; byte* buffer = static_cast(OS::Allocate(Assembler::kMinimalBufferSize, @@ -203,7 +203,7 @@ TEST(AssemblerX64ImulOperation) { } TEST(AssemblerX64MemoryOperands) { - OS::Setup(); + OS::SetUp(); // Allocate an executable page of memory. size_t actual_size; byte* buffer = static_cast(OS::Allocate(Assembler::kMinimalBufferSize, @@ -237,7 +237,7 @@ TEST(AssemblerX64MemoryOperands) { } TEST(AssemblerX64ControlFlow) { - OS::Setup(); + OS::SetUp(); // Allocate an executable page of memory. size_t actual_size; byte* buffer = static_cast(OS::Allocate(Assembler::kMinimalBufferSize, @@ -266,7 +266,7 @@ TEST(AssemblerX64ControlFlow) { } TEST(AssemblerX64LoopImmediates) { - OS::Setup(); + OS::SetUp(); // Allocate an executable page of memory. size_t actual_size; byte* buffer = static_cast(OS::Allocate(Assembler::kMinimalBufferSize, diff --git a/test/cctest/test-cpu-profiler.cc b/test/cctest/test-cpu-profiler.cc index f567a0f770..b10e6889ec 100644 --- a/test/cctest/test-cpu-profiler.cc +++ b/test/cctest/test-cpu-profiler.cc @@ -216,7 +216,7 @@ TEST(TickEvents) { TEST(CrashIfStoppingLastNonExistentProfile) { InitializeVM(); TestSetup test_setup; - CpuProfiler::Setup(); + CpuProfiler::SetUp(); CpuProfiler::StartProfiling("1"); CpuProfiler::StopProfiling("2"); CpuProfiler::StartProfiling("1"); @@ -268,7 +268,7 @@ TEST(Issue1398) { TEST(DeleteAllCpuProfiles) { InitializeVM(); TestSetup test_setup; - CpuProfiler::Setup(); + CpuProfiler::SetUp(); CHECK_EQ(0, CpuProfiler::GetProfilesCount()); CpuProfiler::DeleteAllProfiles(); CHECK_EQ(0, CpuProfiler::GetProfilesCount()); diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc index 8543a37720..4fa7afa099 100644 --- a/test/cctest/test-debug.cc +++ b/test/cctest/test-debug.cc @@ -2743,7 +2743,7 @@ TEST(DebugStepKeyedLoadLoop) { v8::Handle args[kArgc] = { a }; foo->Call(env->Global(), kArgc, args); - // Setup break point and step through the function. + // Set up break point and step through the function. SetBreakPoint(foo, 3); step_action = StepNext; break_point_hit_count = 0; @@ -2790,7 +2790,7 @@ TEST(DebugStepKeyedStoreLoop) { v8::Handle args[kArgc] = { a }; foo->Call(env->Global(), kArgc, args); - // Setup break point and step through the function. + // Set up break point and step through the function. SetBreakPoint(foo, 3); step_action = StepNext; break_point_hit_count = 0; @@ -2834,7 +2834,7 @@ TEST(DebugStepNamedLoadLoop) { // Call function without any break points to ensure inlining is in place. foo->Call(env->Global(), 0, NULL); - // Setup break point and step through the function. + // Set up break point and step through the function. SetBreakPoint(foo, 4); step_action = StepNext; break_point_hit_count = 0; @@ -2869,7 +2869,7 @@ static void DoDebugStepNamedStoreLoop(int expected) { // Call function without any break points to ensure inlining is in place. foo->Call(env->Global(), 0, NULL); - // Setup break point and step through the function. + // Set up break point and step through the function. SetBreakPoint(foo, 3); step_action = StepNext; break_point_hit_count = 0; @@ -5709,7 +5709,7 @@ void HostDispatchV8Thread::Run() { v8::HandleScope scope; DebugLocalContext env; - // Setup message and host dispatch handlers. + // Set up message and host dispatch handlers. v8::Debug::SetMessageHandler2(HostDispatchMessageHandler); v8::Debug::SetHostDispatchHandler(HostDispatchDispatchHandler, 10 /* ms */); @@ -5797,7 +5797,7 @@ void DebugMessageDispatchV8Thread::Run() { v8::HandleScope scope; DebugLocalContext env; - // Setup debug message dispatch handler. + // Set up debug message dispatch handler. v8::Debug::SetDebugMessageDispatchHandler(DebugMessageHandler); CompileRun("var y = 1 + 2;\n"); @@ -5851,7 +5851,7 @@ TEST(DebuggerAgent) { bool ok; // Initialize the socket library. - i::Socket::Setup(); + i::Socket::SetUp(); // Test starting and stopping the agent without any client connection. debugger->StartAgent("test", kPort1); @@ -5949,7 +5949,7 @@ TEST(DebuggerAgentProtocolOverflowHeader) { OS::SNPrintF(i::Vector(port_str, kPortBufferLen), "%d", kPort); // Initialize the socket library. - i::Socket::Setup(); + i::Socket::SetUp(); // Create a socket server to receive a debugger agent message. DebuggerAgentProtocolServerThread* server = diff --git a/test/cctest/test-disasm-arm.cc b/test/cctest/test-disasm-arm.cc index 032e6bc0fc..0e9432d95d 100644 --- a/test/cctest/test-disasm-arm.cc +++ b/test/cctest/test-disasm-arm.cc @@ -69,10 +69,10 @@ bool DisassembleAndCompare(byte* pc, const char* compare_string) { } -// Setup V8 to a state where we can at least run the assembler and +// Set up V8 to a state where we can at least run the assembler and // disassembler. Declare the variables and allocate the data structures used // in the rest of the macros. -#define SETUP() \ +#define SET_UP() \ InitializeVM(); \ v8::HandleScope scope; \ byte *buffer = reinterpret_cast(malloc(4*1024)); \ @@ -102,7 +102,7 @@ if (failure) { \ TEST(Type0) { - SETUP(); + SET_UP(); COMPARE(and_(r0, r1, Operand(r2)), "e0010002 and r0, r1, r2"); @@ -329,7 +329,7 @@ TEST(Type0) { TEST(Type1) { - SETUP(); + SET_UP(); COMPARE(and_(r0, r1, Operand(0x00000000)), "e2010000 and r0, r1, #0"); @@ -358,7 +358,7 @@ TEST(Type1) { TEST(Type3) { - SETUP(); + SET_UP(); if (CpuFeatures::IsSupported(ARMv7)) { COMPARE(ubfx(r0, r1, 5, 10), @@ -413,7 +413,7 @@ TEST(Type3) { TEST(Vfp) { - SETUP(); + SET_UP(); if (CpuFeatures::IsSupported(VFP3)) { CpuFeatures::Scope scope(VFP3); @@ -546,7 +546,7 @@ TEST(Vfp) { TEST(LoadStore) { - SETUP(); + SET_UP(); COMPARE(ldrb(r0, MemOperand(r1)), "e5d10000 ldrb r0, [r1, #+0]"); diff --git a/test/cctest/test-disasm-mips.cc b/test/cctest/test-disasm-mips.cc index 5ad99d7a39..8eadc6483b 100644 --- a/test/cctest/test-disasm-mips.cc +++ b/test/cctest/test-disasm-mips.cc @@ -71,10 +71,10 @@ bool DisassembleAndCompare(byte* pc, const char* compare_string) { } -// Setup V8 to a state where we can at least run the assembler and +// Set up V8 to a state where we can at least run the assembler and // disassembler. Declare the variables and allocate the data structures used // in the rest of the macros. -#define SETUP() \ +#define SET_UP() \ InitializeVM(); \ v8::HandleScope scope; \ byte *buffer = reinterpret_cast(malloc(4*1024)); \ @@ -104,7 +104,7 @@ if (failure) { \ TEST(Type0) { - SETUP(); + SET_UP(); COMPARE(addu(a0, a1, a2), "00a62021 addu a0, a1, a2"); diff --git a/test/cctest/test-platform-linux.cc b/test/cctest/test-platform-linux.cc index 756b9473c9..2a8d497850 100644 --- a/test/cctest/test-platform-linux.cc +++ b/test/cctest/test-platform-linux.cc @@ -67,7 +67,7 @@ TEST(BusyLock) { TEST(VirtualMemory) { - OS::Setup(); + OS::SetUp(); VirtualMemory* vm = new VirtualMemory(1 * MB); CHECK(vm->IsReserved()); void* block_addr = vm->address(); diff --git a/test/cctest/test-platform-win32.cc b/test/cctest/test-platform-win32.cc index 9bd0014c6f..36b30aaceb 100644 --- a/test/cctest/test-platform-win32.cc +++ b/test/cctest/test-platform-win32.cc @@ -13,7 +13,7 @@ using namespace ::v8::internal; TEST(VirtualMemory) { - OS::Setup(); + OS::SetUp(); VirtualMemory* vm = new VirtualMemory(1 * MB); CHECK(vm->IsReserved()); void* block_addr = vm->address(); diff --git a/test/cctest/test-sockets.cc b/test/cctest/test-sockets.cc index 4af55dbe9b..ad7354002f 100644 --- a/test/cctest/test-sockets.cc +++ b/test/cctest/test-sockets.cc @@ -129,7 +129,7 @@ TEST(Socket) { bool ok; // Initialize socket support. - ok = Socket::Setup(); + ok = Socket::SetUp(); CHECK(ok); // Send and receive some data. diff --git a/test/cctest/test-spaces.cc b/test/cctest/test-spaces.cc index ee60086ed2..6e495bc169 100644 --- a/test/cctest/test-spaces.cc +++ b/test/cctest/test-spaces.cc @@ -125,14 +125,14 @@ class TestMemoryAllocatorScope { TEST(MemoryAllocator) { - OS::Setup(); + OS::SetUp(); Isolate* isolate = Isolate::Current(); isolate->InitializeLoggingAndCounters(); Heap* heap = isolate->heap(); CHECK(isolate->heap()->ConfigureHeapDefault()); MemoryAllocator* memory_allocator = new MemoryAllocator(isolate); - CHECK(memory_allocator->Setup(heap->MaxReserved(), + CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize())); int total_pages = 0; @@ -175,21 +175,21 @@ TEST(MemoryAllocator) { TEST(NewSpace) { - OS::Setup(); + OS::SetUp(); Isolate* isolate = Isolate::Current(); isolate->InitializeLoggingAndCounters(); Heap* heap = isolate->heap(); CHECK(heap->ConfigureHeapDefault()); MemoryAllocator* memory_allocator = new MemoryAllocator(isolate); - CHECK(memory_allocator->Setup(heap->MaxReserved(), + CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize())); TestMemoryAllocatorScope test_scope(isolate, memory_allocator); NewSpace new_space(heap); - CHECK(new_space.Setup(HEAP->ReservedSemiSpaceSize(), + CHECK(new_space.SetUp(HEAP->ReservedSemiSpaceSize(), HEAP->ReservedSemiSpaceSize())); - CHECK(new_space.HasBeenSetup()); + CHECK(new_space.HasBeenSetUp()); while (new_space.Available() >= Page::kMaxHeapObjectSize) { Object* obj = @@ -204,13 +204,13 @@ TEST(NewSpace) { TEST(OldSpace) { - OS::Setup(); + OS::SetUp(); Isolate* isolate = Isolate::Current(); isolate->InitializeLoggingAndCounters(); Heap* heap = isolate->heap(); CHECK(heap->ConfigureHeapDefault()); MemoryAllocator* memory_allocator = new MemoryAllocator(isolate); - CHECK(memory_allocator->Setup(heap->MaxReserved(), + CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize())); TestMemoryAllocatorScope test_scope(isolate, memory_allocator); @@ -220,7 +220,7 @@ TEST(OldSpace) { NOT_EXECUTABLE); CHECK(s != NULL); - CHECK(s->Setup()); + CHECK(s->SetUp()); while (s->Available() > 0) { s->AllocateRaw(Page::kMaxHeapObjectSize)->ToObjectUnchecked(); diff --git a/test/cctest/test-utils.cc b/test/cctest/test-utils.cc index e4f70df409..df8ff72e4f 100644 --- a/test/cctest/test-utils.cc +++ b/test/cctest/test-utils.cc @@ -105,7 +105,7 @@ void TestMemCopy(Vector src, TEST(MemCopy) { v8::V8::Initialize(); - OS::Setup(); + OS::SetUp(); const int N = OS::kMinComplexMemCopy + 128; Vector buffer1 = Vector::New(N); Vector buffer2 = Vector::New(N);