Fix common misspellings

Bug: chromium:750830
Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_chromium_rel_ng;master.tryserver.v8:v8_linux_noi18n_rel_ng
Change-Id: Icab7b5a1c469d5e77d04df8bfca8319784e92af4
Reviewed-on: https://chromium-review.googlesource.com/595655
Commit-Queue: Julien Brianceau <jbriance@cisco.com>
Reviewed-by: Yang Guo <yangguo@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: Clemens Hammacher <clemensh@chromium.org>
Reviewed-by: Daniel Ehrenberg <littledan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47072}
This commit is contained in:
Julien Brianceau 2017-08-02 10:23:36 +02:00 committed by Commit Bot
parent 31173f92e5
commit b41f857b9e
140 changed files with 204 additions and 204 deletions

View File

@ -790,7 +790,7 @@ Plan.prototype.execute = function () {
* In case 1, the added constraint is stronger than the stay
* constraint and values must propagate down the entire length of the
* chain. In case 2, the added constraint is weaker than the stay
* constraint so it cannot be accomodated. The cost in this case is,
* constraint so it cannot be accommodated. The cost in this case is,
* of course, very low. Typical situations lie somewhere between these
* two extremes.
*/

View File

@ -76,7 +76,7 @@ def distributor():
@memoize()
def platform():
"""
Returns a string representing the platform this build is targetted for.
Returns a string representing the platform this build is targeted for.
Possible values: 'win', 'mac', 'linux', 'ios', 'android'
"""
if 'OS' in gyp_defines():

View File

@ -389,7 +389,7 @@ class V8_EXPORT HeapGraphNode {
kRegExp = 6, // RegExp.
kHeapNumber = 7, // Number stored in the heap.
kNative = 8, // Native object (not from V8 heap).
kSynthetic = 9, // Synthetic object, usualy used for grouping
kSynthetic = 9, // Synthetic object, usually used for grouping
// snapshot items together.
kConsString = 10, // Concatenated string. A pair of pointers to strings.
kSlicedString = 11, // Sliced string. A fragment of another string.
@ -784,7 +784,7 @@ class V8_EXPORT HeapProfiler {
/**
* Returns the sampled profile of allocations allocated (and still live) since
* StartSamplingHeapProfiler was called. The ownership of the pointer is
* transfered to the caller. Returns nullptr if sampling heap profiler is not
* transferred to the caller. Returns nullptr if sampling heap profiler is not
* active.
*/
AllocationProfile* GetAllocationProfile();

View File

@ -6538,7 +6538,7 @@ struct JitCodeEvent {
struct line_info_t {
// PC offset
size_t offset;
// Code postion
// Code position
size_t pos;
// The position type.
PositionType position_type;
@ -7746,7 +7746,7 @@ typedef bool (*EntropySource)(unsigned char* buffer, size_t length);
* ReturnAddressLocationResolver is used as a callback function when v8 is
* resolving the location of a return address on the stack. Profilers that
* change the return address on the stack can use this to resolve the stack
* location to whereever the profiler stashed the original return address.
* location to wherever the profiler stashed the original return address.
*
* \param return_addr_location A location on stack where a machine
* return address resides.

View File

@ -305,7 +305,7 @@ bool JsHttpRequestProcessor::Process(HttpRequest* request) {
JsHttpRequestProcessor::~JsHttpRequestProcessor() {
// Dispose the persistent handles. When noone else has any
// Dispose the persistent handles. When no one else has any
// references to the objects stored in the handles they will be
// automatically reclaimed.
context_.Reset();

View File

@ -946,7 +946,7 @@ class FrameFunctionIterator {
}
}
// Iterate through functions until the first occurence of 'function'.
// Iterate through functions until the first occurrence of 'function'.
// Returns true if 'function' is found, and false if the iterator ends
// without finding it.
bool Find(JSFunction* function) {

View File

@ -1053,7 +1053,7 @@ void Assembler::next(Label* L) {
DCHECK(L->is_linked());
int link = target_at(L->pos());
if (link == L->pos()) {
// Branch target points to the same instuction. This is the end of the link
// Branch target points to the same instruction. This is the end of the link
// chain.
L->Unuse();
} else {
@ -1361,7 +1361,7 @@ void Assembler::AddrMode3(Instr instr, Register rd, const MemOperand& x) {
DCHECK(offset_8 >= 0); // no masking needed
instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
} else if (x.shift_imm_ != 0) {
// Scaled register offsets are not supported, compute the offset seperately
// Scaled register offsets are not supported, compute the offset separately
// to a scratch register.
UseScratchRegisterScope temps(this);
// Allow re-using rd for load instructions if possible.

View File

@ -866,7 +866,7 @@ class Assembler : public AssemblerBase {
// Distance between start of patched debug break slot and the emitted address
// to jump to.
// Patched debug break slot code is:
// ldr ip, [pc, #0] @ emited address and start
// ldr ip, [pc, #0] @ emitted address and start
// blx ip
static constexpr int kPatchDebugBreakSlotAddressOffset = 2 * kInstrSize;

View File

@ -726,7 +726,7 @@ void Decoder::Format(Instruction* instr, const char* format) {
// The disassembler may end up decoding data inlined in the code. We do not want
// it to crash if the data does not ressemble any known instruction.
// it to crash if the data does not resemble any known instruction.
#define VERIFY(condition) \
if(!(condition)) { \
Unknown(instr); \
@ -2602,7 +2602,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
DecodeConstantPoolLength(instruction_bits));
return Instruction::kInstrSize;
} else if (instruction_bits == kCodeAgeJumpInstruction) {
// The code age prologue has a constant immediatly following the jump
// The code age prologue has a constant immediately following the jump
// instruction.
Instruction* target = Instruction::At(instr_ptr + Instruction::kInstrSize);
DecodeType2(instr);

View File

@ -5833,7 +5833,7 @@ void Simulator::Execute() {
}
} else {
// FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
// we reach the particular instuction count.
// we reach the particular instruction count.
while (program_counter != end_sim_pc) {
Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
icount_++;

View File

@ -444,7 +444,7 @@ class Simulator {
};
StopCountAndDesc watched_stops_[kNumOfWatchedStops];
// Syncronization primitives. See ARM DDI 0406C.b, A2.9.
// Synchronization primitives. See ARM DDI 0406C.b, A2.9.
enum class MonitorAccess {
Open,
Exclusive,

View File

@ -171,7 +171,7 @@ typedef uint16_t float16;
V_(ImmAddSub, 21, 10, Bits) \
V_(ShiftAddSub, 23, 22, Bits) \
\
/* Add/substract extend */ \
/* Add/subtract extend */ \
V_(ImmExtendShift, 12, 10, Bits) \
V_(ExtendMode, 15, 13, Bits) \
\

View File

@ -2455,7 +2455,7 @@ void TurboAssembler::TryConvertDoubleToInt64(Register result,
// The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not
// representable using a double, so if the result is one of those then we know
// that saturation occured, and we need to manually handle the conversion.
// that saturation occurred, and we need to manually handle the conversion.
//
// It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting
// 1 will cause signed overflow.

View File

@ -2390,7 +2390,7 @@ class MacroAssembler : public TurboAssembler {
};
// Use this scope when you need a one-to-one mapping bewteen methods and
// Use this scope when you need a one-to-one mapping between methods and
// instructions. This scope prevents the MacroAssembler from being called and
// literal pools from being emitted. It also asserts the number of instructions
// emitted is what you specified when creating the scope.

View File

@ -373,7 +373,7 @@ class RelocInfo {
NUMBER_OF_MODES,
NONE32, // never recorded 32-bit value
NONE64, // never recorded 64-bit value
CODE_AGE_SEQUENCE, // Not stored in RelocInfo array, used explictly by
CODE_AGE_SEQUENCE, // Not stored in RelocInfo array, used explicitly by
// code aging.
FIRST_REAL_RELOC_MODE = CODE_TARGET,

View File

@ -333,7 +333,7 @@ FromCheckedNumeric(const internal::CheckedNumeric<int64_t> value);
// checks and returns the result.
V8_BASE_EXPORT int64_t SignedSaturatedAdd64(int64_t lhs, int64_t rhs);
// SignedSaturatedSub64(lhs, rhs) substracts |lhs| by |rhs|,
// SignedSaturatedSub64(lhs, rhs) subtracts |lhs| by |rhs|,
// checks and returns the result.
V8_BASE_EXPORT int64_t SignedSaturatedSub64(int64_t lhs, int64_t rhs);

View File

@ -221,14 +221,14 @@ class CPUInfo final {
delete[] data_;
}
// Extract the content of a the first occurence of a given field in
// Extract the content of a the first occurrence of a given field in
// the content of the cpuinfo file and return it as a heap-allocated
// string that must be freed by the caller using delete[].
// Return NULL if not found.
char* ExtractField(const char* field) const {
DCHECK(field != NULL);
// Look for first field occurence, and ensure it starts the line.
// Look for first field occurrence, and ensure it starts the line.
size_t fieldlen = strlen(field);
char* p = data_;
for (;;) {

View File

@ -25,7 +25,7 @@ namespace base {
// the Draft Technical Report on C++ Library Extensions (TR1)).
//
// base::hash is implemented by calling the hash_value function. The namespace
// isn't specified so that it can detect overloads via argument dependant
// isn't specified so that it can detect overloads via argument dependent
// lookup. So if there is a free function hash_value in the same namespace as a
// custom type, it will get called.
//

View File

@ -2401,7 +2401,7 @@ double cbrt(double x) {
* error of about 1 in 16. Adding a bias of -0.03306235651 to the
* (e%3+m)/3 term reduces the error to about 1 in 32. With the IEEE
* floating point representation, for finite positive normal values,
* ordinary integer divison of the value in bits magically gives
* ordinary integer division of the value in bits magically gives
* almost exactly the RHS of the above provided we first subtract the
* exponent bias (1023 for doubles) and later add it back. We do the
* subtraction virtually to keep e >= 0 so that ordinary integer

View File

@ -237,7 +237,7 @@ class V8_BASE_EXPORT OS {
static void StrNCpy(char* dest, int length, const char* src, size_t n);
// Support for the profiler. Can do nothing, in which case ticks
// occuring in shared libraries will not be properly accounted for.
// occurring in shared libraries will not be properly accounted for.
struct SharedLibraryAddress {
SharedLibraryAddress(const std::string& library_path, uintptr_t start,
uintptr_t end)

View File

@ -764,7 +764,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
&runtime);
// We need to be conservative and start with holey because the builtins
// that create output arrays aren't gauranteed to be called for every
// that create output arrays aren't guaranteed to be called for every
// element in the input array (maybe the callback deletes an element).
const ElementsKind elements_kind =
GetHoleyElementsKind(GetInitialFastElementsKind());

View File

@ -619,7 +619,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
Register args_count = scratch1;
Register return_pc = scratch2;
// Get the arguments + reciever count.
// Get the arguments + receiver count.
__ mov(args_count,
Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ mov(args_count,

View File

@ -195,7 +195,7 @@ static int PrintFunctionSource(CompilationInfo* info,
}
// Print information for the given inlining: which function was inlined and
// where the inlining occured.
// where the inlining occurred.
static void PrintInlinedFunctionInfo(
CompilationInfo* info, int source_id, int inlining_id,
const CompilationInfo::InlinedFunctionHolder& h) {

View File

@ -424,7 +424,7 @@ int BytecodeAnalysis::GetLoopOffsetFor(int offset) const {
if (loop_end_to_header == end_to_header_.end()) {
return -1;
}
// If the header preceeds the offset, this is the loop
// If the header precedes the offset, this is the loop
//
// .> header <--loop_end_to_header
// |

View File

@ -467,7 +467,7 @@ InstructionOperand OperandForDeopt(Isolate* isolate, OperandGenerator* g,
return g->UseUniqueSlot(input);
case FrameStateInputKind::kAny:
// Currently deopts "wrap" other operations, so the deopt's inputs
// are potentially needed untill the end of the deoptimising code.
// are potentially needed until the end of the deoptimising code.
return g->UseAnyAtEnd(input);
}
}

View File

@ -573,7 +573,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
value = effect = graph()->NewNode(simplified()->CheckHeapObject(),
value, effect, control);
// Check {value} map agains the {property_cell} map.
// Check {value} map against the {property_cell} map.
effect =
graph()->NewNode(simplified()->CheckMaps(
CheckMapsFlag::kNone,
@ -2289,12 +2289,12 @@ bool JSNativeContextSpecialization::ExtractReceiverMaps(
DCHECK_EQ(0, receiver_maps->size());
// See if we can infer a concrete type for the {receiver}.
if (InferReceiverMaps(receiver, effect, receiver_maps)) {
// We can assume that the {receiver} still has the infered {receiver_maps}.
// We can assume that the {receiver} still has the inferred {receiver_maps}.
return true;
}
// Try to extract some maps from the {nexus}.
if (nexus.ExtractMaps(receiver_maps) != 0) {
// Try to filter impossible candidates based on infered root map.
// Try to filter impossible candidates based on inferred root map.
Handle<Map> receiver_map;
if (InferReceiverRootMap(receiver).ToHandle(&receiver_map)) {
receiver_maps->erase(

View File

@ -19,7 +19,7 @@ class Factory;
namespace compiler {
// Foward declarations.
// Forward declarations.
class CommonOperatorBuilder;
struct FieldAccess;
class Graph;

View File

@ -1081,7 +1081,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputInt8(2));
break;
case kMipsCmpS:
// Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
// Pseudo-instruction used for FP cmp/branch. No opcode emitted here.
break;
case kMipsAddS:
// TODO(plind): add special case: combine mult & add.
@ -1131,7 +1131,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputDoubleRegister(1));
break;
case kMipsCmpD:
// Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
// Pseudo-instruction used for FP cmp/branch. No opcode emitted here.
break;
case kMipsAddPair:
__ AddPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
@ -2915,7 +2915,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
Condition cc = kNoCondition;
// MIPS does not have condition code flags, so compare and branch are
// implemented differently than on the other arch's. The compare operations
// emit mips psuedo-instructions, which are checked and handled here.
// emit mips pseudo-instructions, which are checked and handled here.
if (instr->arch_opcode() == kMipsTst) {
cc = FlagsConditionToConditionTst(condition);

View File

@ -1283,7 +1283,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMips64CmpS:
// Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
// Pseudo-instruction used for FP cmp/branch. No opcode emitted here.
break;
case kMips64AddS:
// TODO(plind): add special case: combine mult & add.
@ -1337,7 +1337,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputDoubleRegister(1));
break;
case kMips64CmpD:
// Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
// Pseudo-instruction used for FP cmp/branch. No opcode emitted here.
break;
case kMips64AddD:
// TODO(plind): add special case: combine mult & add.
@ -3022,7 +3022,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
Condition cc = kNoCondition;
// MIPS does not have condition code flags, so compare and branch are
// implemented differently than on the other arch's. The compare operations
// emit mips psuedo-instructions, which are handled here by branch
// emit mips pseudo-instructions, which are handled here by branch
// instructions that do the actual comparison. Essential that the input
// registers to compare pseudo-op are not modified before this branch op, as
// they are tested here.

View File

@ -45,7 +45,7 @@ class V8_EXPORT_PRIVATE MoveOptimizer final {
const Instruction* LastInstruction(const InstructionBlock* block) const;
// Consolidate common moves appearing accross all predecessors of a block.
// Consolidate common moves appearing across all predecessors of a block.
void OptimizeMerge(InstructionBlock* block);
void FinalizeMoves(Instruction* instr);

View File

@ -2234,7 +2234,7 @@ void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block,
// block.
int phi_vreg = phi->virtual_register();
live->Remove(phi_vreg);
// Select a hint from a predecessor block that preceeds this block in the
// Select a hint from a predecessor block that precedes this block in the
// rpo order. In order of priority:
// - Avoid hints from deferred blocks.
// - Prefer hints from allocated (or explicit) operands.

View File

@ -1396,7 +1396,7 @@ class ScheduleLateNodeVisitor {
block->loop_depth(), min_block->id().ToInt());
// Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
// into enclosing loop pre-headers until they would preceed their schedule
// into enclosing loop pre-headers until they would precede their schedule
// early position.
BasicBlock* hoist_block = GetHoistBlock(block);
if (hoist_block &&

View File

@ -489,7 +489,7 @@ double InternalStringToInt(UnicodeCache* unicode_cache,
// NOTE: The code for computing the value may seem a bit complex at
// first glance. It is structured to use 32-bit multiply-and-add
// loops as long as possible to avoid loosing precision.
// loops as long as possible to avoid losing precision.
double v = 0.0;
bool done = false;

View File

@ -359,7 +359,7 @@ static Local<Value> GetStdout(Isolate* isolate, int child_fd,
// we don't get here before the child has closed stdout and most programs don't
// do that before they exit.
//
// We're disabling usage of waitid in Mac OS X because it doens't work for us:
// We're disabling usage of waitid in Mac OS X because it doesn't work for us:
// a parent process hangs on waiting while a child process is already a zombie.
// See http://code.google.com/p/v8/issues/detail?id=401.
#if defined(WNOWAIT) && !defined(ANDROID) && !defined(__APPLE__) \

View File

@ -1792,7 +1792,7 @@ namespace {
v8::Local<v8::Context> GetDebugEventContext(Isolate* isolate) {
Handle<Context> context = isolate->debug()->debugger_entry()->GetContext();
// Isolate::context() may have been NULL when "script collected" event
// occured.
// occurred.
if (context.is_null()) return v8::Local<v8::Context>();
Handle<Context> native_context(context->native_context());
return v8::Utils::ToLocal(native_context);

View File

@ -29,7 +29,7 @@ var Debug = {};
// comments.
var sourceLineBeginningSkip = /^(?:\s*(?:\/\*.*?\*\/)*)*/;
// Debug events which can occour in the V8 JavaScript engine. These originate
// Debug events which can occur in the V8 JavaScript engine. These originate
// from the API include file debug.h.
Debug.DebugEvent = { Break: 1,
Exception: 2,

View File

@ -3976,7 +3976,7 @@ Handle<Object> TranslatedState::MaterializeAt(int frame_index,
int object_index = slot->object_index();
TranslatedState::ObjectPosition pos = object_positions_[object_index];
// Make sure the duplicate is refering to a previous object.
// Make sure the duplicate is referring to a previous object.
CHECK(pos.frame_index_ < frame_index ||
(pos.frame_index_ == frame_index &&
pos.value_index_ < *value_index - 1));

View File

@ -2265,7 +2265,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
}
return Just(false);
} else {
// Seach for The Hole in HOLEY_DOUBLE_ELEMENTS
// Search for The Hole in HOLEY_DOUBLE_ELEMENTS
DCHECK_EQ(Subclass::kind(), HOLEY_DOUBLE_ELEMENTS);
auto elements = FixedDoubleArray::cast(receiver->elements());

View File

@ -134,7 +134,7 @@ class FeedbackVector : public HeapObject {
DECL_ACCESSORS(shared_function_info, SharedFunctionInfo)
// [optimized_code_cell]: WeakCell containing optimized code or a Smi marker
// definining optimization behaviour.
// defining optimization behaviour.
DECL_ACCESSORS(optimized_code_cell, Object)
// [length]: The length of the feedback vector (not including the header, i.e.

View File

@ -153,7 +153,7 @@ Object* FutexEmulation::Wait(Isolate* isolate,
mutex_.Pointer()->Lock();
if (node->interrupted_) {
// An interrupt occured while the mutex_ was unlocked. Don't wait yet.
// An interrupt occurred while the mutex_ was unlocked. Don't wait yet.
continue;
}

View File

@ -115,7 +115,7 @@ GCIdleTimeAction GCIdleTimeHandler::NothingOrDone(double idle_time_in_ms) {
// a full GC.
// (2) If the context disposal rate is high and we cannot perform a full GC,
// we do nothing until the context disposal rate becomes lower.
// (3) If the new space is almost full and we can affort a scavenge or if the
// (3) If the new space is almost full and we can afford a scavenge or if the
// next scavenge will very likely take long, then a scavenge is performed.
// (4) If sweeping is in progress and we received a large enough idle time
// request, we finalize sweeping here.

View File

@ -493,7 +493,7 @@ Address MemoryAllocator::AllocateAlignedMemory(
if (base == NULL) {
// Failed to commit the body. Release the mapping and any partially
// commited regions inside it.
// committed regions inside it.
reservation.Release();
size_.Decrement(reserve_size);
return NULL;

View File

@ -2714,7 +2714,7 @@ class NewSpace : public Space {
void RecordAllocation(HeapObject* obj);
void RecordPromotion(HeapObject* obj);
// Return whether the operation succeded.
// Return whether the operation succeeded.
bool CommitFromSpaceIfNeeded() {
if (from_space_.is_committed()) return true;
return from_space_.Commit();

View File

@ -44,7 +44,7 @@ class StoreBuffer {
// method takes a lock.
void MoveEntriesToRememberedSet(int index);
// This method ensures that all used store buffer entries are transfered to
// This method ensures that all used store buffer entries are transferred to
// the remembered set.
void MoveAllEntriesToRememberedSet();

View File

@ -1804,7 +1804,7 @@ class Assembler : public AssemblerBase {
byte escape1, byte escape2, byte opcode);
void vinstr(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2,
SIMDPrefix pp, LeadingOpcode m, VexW w);
// Most BMI instructions are similiar.
// Most BMI instructions are similar.
void bmi1(byte op, Register reg, Register vreg, const Operand& rm);
void bmi2(SIMDPrefix pp, byte op, Register reg, Register vreg,
const Operand& rm);

View File

@ -8,7 +8,7 @@ pfeldman@chromium.org
yangguo@chromium.org
# Changes to remote debugging protocol require devtools review to
# ensure backwards compatibility and committment to maintain.
# ensure backwards compatibility and commitment to maintain.
per-file js_protocol.json=set noparent
per-file js_protocol.json=dgozman@chromium.org
per-file js_protocol.json=pfeldman@chromium.org

View File

@ -530,7 +530,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
void WriteSwitch(BytecodeNode* node, BytecodeJumpTable* label);
// Not implemented as the illegal bytecode is used inside internally
// to indicate a bytecode field is not valid or an error has occured
// to indicate a bytecode field is not valid or an error has occurred
// during bytecode generation.
BytecodeArrayBuilder& Illegal();

View File

@ -1008,7 +1008,7 @@ void ReportBootstrappingException(Handle<Object> exception,
// We are bootstrapping and caught an error where the location is set
// and we have a script for the location.
// In this case we could have an extension (or an internal error
// somewhere) and we print out the line number at which the error occured
// somewhere) and we print out the line number at which the error occurred
// to the console for easier debugging.
int line_number =
location->script()->GetLineNumber(location->start_pos()) + 1;

View File

@ -952,7 +952,7 @@ function InnerArraySort(array, length, comparefn) {
obj[i] = UNDEFINED;
}
for (i = length - num_holes; i < length; i++) {
// For compatability with Webkit, do not expose elements in the prototype.
// For compatibility with Webkit, do not expose elements in the prototype.
if (i in %object_get_prototype_of(obj)) {
obj[i] = UNDEFINED;
} else {

View File

@ -748,7 +748,7 @@ function canonicalizeLanguageTag(localeID) {
// ECMA 402 6.2.3
var tag = %CanonicalizeLanguageTag(localeString);
// TODO(jshin): This should not happen because the structual validity
// TODO(jshin): This should not happen because the structural validity
// is already checked. If that's the case, remove this.
if (tag === 'invalid-tag') {
throw %make_range_error(kInvalidLanguageTag, localeString);
@ -797,7 +797,7 @@ function initializeLocaleList(locales) {
}
/**
* Check the structual Validity of the language tag per ECMA 402 6.2.2:
* Check the structural Validity of the language tag per ECMA 402 6.2.2:
* - Well-formed per RFC 5646 2.1
* - There are no duplicate variant subtags
* - There are no duplicate singletion (extension) subtags

View File

@ -404,7 +404,7 @@ void Assembler::CheckForEmitInForbiddenSlot() {
CheckBuffer();
}
if (IsPrevInstrCompactBranch()) {
// Nop instruction to preceed a CTI in forbidden slot:
// Nop instruction to precede a CTI in forbidden slot:
Instr nop = SPECIAL | SLL;
*reinterpret_cast<Instr*>(pc_) = nop;
pc_ += kInstrSize;
@ -417,7 +417,7 @@ void Assembler::CheckForEmitInForbiddenSlot() {
void Assembler::EmitHelper(Instr x, CompactBranchType is_compact_branch) {
if (IsPrevInstrCompactBranch()) {
if (Instruction::IsForbiddenAfterBranchInstr(x)) {
// Nop instruction to preceed a CTI in forbidden slot:
// Nop instruction to precede a CTI in forbidden slot:
Instr nop = SPECIAL | SLL;
*reinterpret_cast<Instr*>(pc_) = nop;
pc_ += kInstrSize;

View File

@ -575,7 +575,7 @@ class Operand BASE_EMBEDDED {
};
// On MIPS we have only one adressing mode with base_reg + offset.
// On MIPS we have only one addressing mode with base_reg + offset.
// Class MemOperand represents a memory operand in load and store instructions.
class MemOperand : public Operand {
public:
@ -2288,7 +2288,7 @@ class Assembler : public AssemblerBase {
// - space for labels.
//
// Space for trampoline slots is equal to slot_count * 2 * kInstrSize.
// Space for trampoline slots preceeds space for labels. Each label is of one
// Space for trampoline slots precedes space for labels. Each label is of one
// instruction size, so total amount for labels is equal to
// label_count * kInstrSize.
class Trampoline {

View File

@ -556,7 +556,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (!IsMipsArchVariant(kMips32r6)) {
__ c(OLT, D, f12, f14);
__ Movt(v0, t0);
// Use previous check to store conditionally to v0 oposite condition
// Use previous check to store conditionally to v0 opposite condition
// (GREATER). If rhs is equal to lhs, this will be corrected in next
// check.
__ Movf(v0, t1);
@ -1693,7 +1693,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
// Test if less (unordered case is already handled).
__ BranchF(&fpu_lt, NULL, lt, f0, f2);
// Otherwise it's greater, so just fall thru, and return.
// Otherwise it's greater, so just fall through, and return.
DCHECK(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
__ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(GREATER));

View File

@ -208,7 +208,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
// one word at a time. Set a2 to count how many bytes we have to copy
// after all the word chunks are copied and a3 to the dst pointer after
// all the word chunks have been copied. We will loop, incrementing a0
// and a1 untill a0 equals a3.
// and a1 until a0 equals a3.
__ bind(&chk1w);
__ andi(a2, t8, loadstore_chunk - 1);
__ beq(a2, t8, &lastb);

View File

@ -1724,7 +1724,7 @@ void TurboAssembler::Neg_d(FPURegister fd, FPURegister fs) {
void TurboAssembler::Cvt_d_uw(FPURegister fd, Register rs,
FPURegister scratch) {
// In FP64Mode we do convertion from long.
// In FP64Mode we do conversion from long.
if (IsFp64Mode()) {
mtc1(rs, scratch);
Mthc1(zero_reg, scratch);
@ -3532,7 +3532,7 @@ int TurboAssembler::CallSize(Register target, int16_t offset, Condition cond,
}
// Note: To call gcc-compiled C code on mips, you must call thru t9.
// Note: To call gcc-compiled C code on mips, you must call through t9.
void TurboAssembler::Call(Register target, int16_t offset, Condition cond,
Register rs, const Operand& rt, BranchDelaySlot bd) {
DCHECK(is_int16(offset));
@ -3572,7 +3572,7 @@ void TurboAssembler::Call(Register target, int16_t offset, Condition cond,
#endif
}
// Note: To call gcc-compiled C code on mips, you must call thru t9.
// Note: To call gcc-compiled C code on mips, you must call through t9.
void TurboAssembler::Call(Register target, Register base, int16_t offset,
Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
@ -5216,7 +5216,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
if (save_doubles) {
// The stack must be allign to 0 modulo 8 for stores with sdc1.
// The stack must be align to 0 modulo 8 for stores with sdc1.
DCHECK(kDoubleSize == frame_alignment);
if (frame_alignment > 0) {
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));

View File

@ -487,7 +487,7 @@ class TurboAssembler : public Assembler {
Register scratch);
void PrepareCallCFunction(int num_reg_arguments, Register scratch);
// Arguments 1-4 are placed in registers a0 thru a3 respectively.
// Arguments 1-4 are placed in registers a0 through a3 respectively.
// Arguments 5..n are stored to stack using following:
// sw(t0, CFunctionArgumentOperand(5));

View File

@ -2542,7 +2542,7 @@ static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T& result) {
result = a;
} else if (b == a) {
// Handle -0.0 == 0.0 case.
// std::signbit() returns int 0 or 1 so substracting MaxMinKind::kMax
// std::signbit() returns int 0 or 1 so subtracting MaxMinKind::kMax
// negates the result.
result = std::signbit(b) - static_cast<int>(kind) ? b : a;
} else {
@ -5707,7 +5707,7 @@ void Simulator::Execute() {
}
} else {
// FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
// we reach the particular instuction count.
// we reach the particular instruction count.
while (program_counter != end_sim_pc) {
Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
icount_++;

View File

@ -379,7 +379,7 @@ void Assembler::CheckForEmitInForbiddenSlot() {
CheckBuffer();
}
if (IsPrevInstrCompactBranch()) {
// Nop instruction to preceed a CTI in forbidden slot:
// Nop instruction to precede a CTI in forbidden slot:
Instr nop = SPECIAL | SLL;
*reinterpret_cast<Instr*>(pc_) = nop;
pc_ += kInstrSize;
@ -392,7 +392,7 @@ void Assembler::CheckForEmitInForbiddenSlot() {
void Assembler::EmitHelper(Instr x, CompactBranchType is_compact_branch) {
if (IsPrevInstrCompactBranch()) {
if (Instruction::IsForbiddenAfterBranchInstr(x)) {
// Nop instruction to preceed a CTI in forbidden slot:
// Nop instruction to precede a CTI in forbidden slot:
Instr nop = SPECIAL | SLL;
*reinterpret_cast<Instr*>(pc_) = nop;
pc_ += kInstrSize;

View File

@ -580,7 +580,7 @@ class Operand BASE_EMBEDDED {
};
// On MIPS we have only one adressing mode with base_reg + offset.
// On MIPS we have only one addressing mode with base_reg + offset.
// Class MemOperand represents a memory operand in load and store instructions.
class MemOperand : public Operand {
public:
@ -2338,7 +2338,7 @@ class Assembler : public AssemblerBase {
// - space for labels.
//
// Space for trampoline slots is equal to slot_count * 2 * kInstrSize.
// Space for trampoline slots preceeds space for labels. Each label is of one
// Space for trampoline slots precedes space for labels. Each label is of one
// instruction size, so total amount for labels is equal to
// label_count * kInstrSize.
class Trampoline {

View File

@ -554,7 +554,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (kArchVariant != kMips64r6) {
__ c(OLT, D, f12, f14);
__ Movt(v0, a4);
// Use previous check to store conditionally to v0 oposite condition
// Use previous check to store conditionally to v0 opposite condition
// (GREATER). If rhs is equal to lhs, this will be corrected in next
// check.
__ Movf(v0, a5);
@ -1698,7 +1698,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
// Test if less (unordered case is already handled).
__ BranchF(&fpu_lt, NULL, lt, f0, f2);
// Otherwise it's greater, so just fall thru, and return.
// Otherwise it's greater, so just fall through, and return.
DCHECK(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
__ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(GREATER));

View File

@ -209,7 +209,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
// one word at a time. Set a2 to count how many bytes we have to copy
// after all the word chunks are copied and a3 to the dst pointer after
// all the word chunks have been copied. We will loop, incrementing a0
// and a1 untill a0 equals a3.
// and a1 until a0 equals a3.
__ bind(&chk1w);
__ andi(a2, t8, loadstore_chunk - 1);
__ beq(a2, t8, &lastb);

View File

@ -2895,7 +2895,7 @@ void Decoder::DecodeTypeMsa2RF(Instruction* instr) {
// Disassemble the instruction at *instr_ptr into the output buffer.
// All instructions are one word long, except for the simulator
// psuedo-instruction stop(msg). For that one special case, we return
// pseudo-instruction stop(msg). For that one special case, we return
// size larger than one kInstrSize.
int Decoder::InstructionDecode(byte* instr_ptr) {
Instruction* instr = Instruction::At(instr_ptr);

View File

@ -3895,7 +3895,7 @@ int TurboAssembler::CallSize(Register target, Condition cond, Register rs,
}
// Note: To call gcc-compiled C code on mips, you must call thru t9.
// Note: To call gcc-compiled C code on mips, you must call through t9.
void TurboAssembler::Call(Register target, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bd) {
#ifdef DEBUG

View File

@ -529,7 +529,7 @@ class TurboAssembler : public Assembler {
Register scratch);
void PrepareCallCFunction(int num_reg_arguments, Register scratch);
// Arguments 1-4 are placed in registers a0 thru a3 respectively.
// Arguments 1-4 are placed in registers a0 through a3 respectively.
// Arguments 5..n are stored to stack using following:
// Sw(a4, CFunctionArgumentOperand(5));

View File

@ -2488,7 +2488,7 @@ static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T& result) {
result = a;
} else if (b == a) {
// Handle -0.0 == 0.0 case.
// std::signbit() returns int 0 or 1 so substracting MaxMinKind::kMax
// std::signbit() returns int 0 or 1 so subtracting MaxMinKind::kMax
// negates the result.
result = std::signbit(b) - static_cast<int>(kind) ? b : a;
} else {
@ -6031,7 +6031,7 @@ void Simulator::Execute() {
}
} else {
// FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
// we reach the particular instuction count.
// we reach the particular instruction count.
while (program_counter != end_sim_pc) {
Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
icount_++;

View File

@ -13762,7 +13762,7 @@ std::ostream& operator<<(std::ostream& os, const SourceCodeOf& v) {
void SharedFunctionInfo::DisableOptimization(BailoutReason reason) {
// Disable optimization for the shared function info and mark the
// code as non-optimizable. The marker on the shared function info
// is there because we flush non-optimized code thereby loosing the
// is there because we flush non-optimized code thereby losing the
// non-optimizable information for the code. When the code is
// regenerated and set on the shared function info it is marked as
// non-optimizable if optimization is disabled for the shared

View File

@ -509,7 +509,7 @@ class Utf8ChunkSource : public ChunkSource {
if (V8_LIKELY(t < kUtf8Bom)) {
decoded_data[decoded_len++] = static_cast<uc16>(t);
} else if (V8_UNLIKELY(is_at_first_char_ && t == kUtf8Bom)) {
// Skip BOM at the beggining of the stream
// Skip BOM at the beginning of the stream
is_at_first_char_ = false;
} else if (t <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
decoded_data[decoded_len++] = static_cast<uc16>(t);

View File

@ -1675,7 +1675,7 @@ bool Scanner::ScanRegExpPattern() {
// or not, since any '/', '\\' or '[' is guaranteed to not be part
// of the escape sequence.
// TODO(896): At some point, parse RegExps more throughly to capture
// TODO(896): At some point, parse RegExps more thoroughly to capture
// octal esacpes in strict mode.
} else { // Unescaped character.
if (c0_ == '[') in_character_class = true;

View File

@ -871,8 +871,8 @@ class Assembler : public AssemblerBase {
void b(Label* L, LKBit lk = LeaveLK) { b(branch_offset(L), lk); }
inline CRegister cmpi_optimization(CRegister cr) {
// Check whether the branch is preceeded by an optimizable cmpi against 0.
// The cmpi can be deleted if it is also preceeded by an instruction that
// Check whether the branch is preceded by an optimizable cmpi against 0.
// The cmpi can be deleted if it is also preceded by an instruction that
// sets the register used by the compare and supports a dot form.
unsigned int sradi_mask = kOpcodeMask | kExt2OpcodeVariant2Mask;
unsigned int srawi_mask = kOpcodeMask | kExt2OpcodeMask;

View File

@ -345,7 +345,7 @@ void Decoder::Format(Instruction* instr, const char* format) {
// The disassembler may end up decoding data inlined in the code. We do not want
// it to crash if the data does not ressemble any known instruction.
// it to crash if the data does not resemble any known instruction.
#define VERIFY(condition) \
if (!(condition)) { \
Unknown(instr); \

View File

@ -1274,7 +1274,7 @@ class MacroAssembler : public TurboAssembler {
#if V8_TARGET_ARCH_PPC64
// Ensure it is permissable to read/write int value directly from
// Ensure it is permissible to read/write int value directly from
// upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);

View File

@ -4059,7 +4059,7 @@ void Simulator::Execute() {
}
} else {
// FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
// we reach the particular instuction count.
// we reach the particular instruction count.
while (program_counter != end_sim_pc) {
Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
icount_++;

View File

@ -402,7 +402,7 @@ class Simulator {
};
StopCountAndDesc watched_stops_[kNumOfWatchedStops];
// Syncronization primitives. See ARM DDI 0406C.b, A2.9.
// Synchronization primitives. See ARM DDI 0406C.b, A2.9.
enum class MonitorAccess {
Open,
Exclusive,

View File

@ -1252,7 +1252,7 @@ void Trace::PerformDeferredActions(RegExpMacroAssembler* assembler,
}
// SET_REGISTER is currently only used for newly introduced loop
// counters. They can have a significant previous value if they
// occour in a loop. TODO(lrn): Propagate this information, so
// occur in a loop. TODO(lrn): Propagate this information, so
// we can set undo_action to IGNORE if we know there is no value to
// restore.
undo_action = RESTORE;

View File

@ -238,7 +238,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
builder = state->builder();
builder->AddAtom(body);
// For compatability with JSC and ES3, we allow quantifiers after
// For compatibility with JSC and ES3, we allow quantifiers after
// lookaheads, and break in all cases.
break;
}

View File

@ -325,7 +325,7 @@ RUNTIME_FUNCTION(Runtime_DebugGetPropertyDetails) {
// Make sure to set the current context to the context before the debugger was
// entered (if the debugger is entered). The reason for switching context here
// is that for some property lookups (accessors and interceptors) callbacks
// into the embedding application can occour, and the embedding application
// into the embedding application can occur, and the embedding application
// could have the assumption that its own native context is the current
// context and not some internal debugger context.
SaveContext save(isolate);

View File

@ -643,7 +643,7 @@ MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithString(
int expected_parts = (compiled_replacement.parts() + 1) * 4 + 1;
ReplacementStringBuilder builder(isolate->heap(), subject, expected_parts);
// Number of parts added by compiled replacement plus preceeding
// Number of parts added by compiled replacement plus preceding
// string and possibly suffix after last match. It is possible for
// all components to use two elements when encoded as two smis.
const int parts_added_per_loop = 2 * (compiled_replacement.parts() + 2);

View File

@ -105,7 +105,7 @@ static bool supportsSTFLE() {
static uint32_t auxv_hwcap = 0;
if (!read_tried) {
// Open the AUXV (auxilliary vector) psuedo-file
// Open the AUXV (auxiliary vector) pseudo-file
int fd = open("/proc/self/auxv", O_RDONLY);
read_tried = true;
@ -1472,7 +1472,7 @@ void Assembler::risbgn(Register dst, Register src, const Operand& startBit,
// ---------------------------
// Move Character Instructions
// ---------------------------
// Move charactor - mem to mem operation
// Move character - mem to mem operation
void Assembler::mvc(const MemOperand& opnd1, const MemOperand& opnd2,
uint32_t length) {
ss_form(MVC, length - 1, opnd1.getBaseRegister(), opnd1.getDisplacement(),

View File

@ -218,7 +218,7 @@ void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
// We need to push lr on stack so that GenerateMakeCodeYoungAgainCommon
// knows where to pick up the return address
//
// Since we can no longer guarentee ip will hold the branch address
// Since we can no longer guarantee ip will hold the branch address
// because of BRASL, use Call so that GenerateMakeCodeYoungAgainCommon
// can calculate the branch address offset
patcher.masm()->nop(); // marker to detect sequence (see IsOld)

View File

@ -472,7 +472,7 @@ void Decoder::Format(Instruction* instr, const char* format) {
}
// The disassembler may end up decoding data inlined in the code. We do not want
// it to crash if the data does not ressemble any known instruction.
// it to crash if the data does not resemble any known instruction.
#define VERIFY(condition) \
if (!(condition)) { \
Unknown(instr); \

View File

@ -1438,7 +1438,7 @@ class MacroAssembler : public TurboAssembler {
void AssertSmi(Register object);
#if V8_TARGET_ARCH_S390X
// Ensure it is permissable to read/write int value directly from
// Ensure it is permissible to read/write int value directly from
// upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);

View File

@ -2408,7 +2408,7 @@ void Simulator::PrintStopInfo(uint32_t code) {
#define CheckOverflowForIntSub(src1, src2, type) \
OverflowFromSigned<type>(src1 - src2, src1, src2, false);
// Method for checking overflow on unsigned addtion
// Method for checking overflow on unsigned addition
#define CheckOverflowForUIntAdd(src1, src2) \
((src1) + (src2) < (src1) || (src1) + (src2) < (src2))
@ -2514,7 +2514,7 @@ void Simulator::Execute() {
}
} else {
// FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
// we reach the particular instuction count.
// we reach the particular instruction count.
while (program_counter != end_sim_pc) {
Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
@ -4177,7 +4177,7 @@ EVALUATE(STM) {
// Store Multiple 32-bits.
int offset = d2;
// Regs roll around if r3 is less than r1.
// Artifically increase r3 by 16 so we can calculate
// Artificially increase r3 by 16 so we can calculate
// the number of regs stored properly.
if (r3 < r1) r3 += 16;
@ -4259,7 +4259,7 @@ EVALUATE(LM) {
// Store Multiple 32-bits.
int offset = d2;
// Regs roll around if r3 is less than r1.
// Artifically increase r3 by 16 so we can calculate
// Artificially increase r3 by 16 so we can calculate
// the number of regs stored properly.
if (r3 < r1) r3 += 16;
@ -8782,7 +8782,7 @@ EVALUATE(LMG) {
int offset = d2;
// Regs roll around if r3 is less than r1.
// Artifically increase r3 by 16 so we can calculate
// Artificially increase r3 by 16 so we can calculate
// the number of regs stored properly.
if (r3 < r1) r3 += 16;
@ -8948,7 +8948,7 @@ EVALUATE(STMG) {
int offset = d2;
// Regs roll around if r3 is less than r1.
// Artifically increase r3 by 16 so we can calculate
// Artificially increase r3 by 16 so we can calculate
// the number of regs stored properly.
if (r3 < r1) r3 += 16;
@ -9159,7 +9159,7 @@ EVALUATE(STMY) {
int offset = d2;
// Regs roll around if r3 is less than r1.
// Artifically increase r3 by 16 so we can calculate
// Artificially increase r3 by 16 so we can calculate
// the number of regs stored properly.
if (r3 < r1) r3 += 16;
@ -9186,7 +9186,7 @@ EVALUATE(LMY) {
int offset = d2;
// Regs roll around if r3 is less than r1.
// Artifically increase r3 by 16 so we can calculate
// Artificially increase r3 by 16 so we can calculate
// the number of regs stored properly.
if (r3 < r1) r3 += 16;

View File

@ -39,7 +39,7 @@ void AddAndSetEntry(PositionTableEntry& value,
value.is_statement = other.is_statement;
}
// Helper: Substract the offsets from 'other' from 'value'.
// Helper: Subtract the offsets from 'other' from 'value'.
void SubtractFromEntry(PositionTableEntry& value,
const PositionTableEntry& other) {
value.code_offset -= other.code_offset;

View File

@ -3758,7 +3758,7 @@ VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
/* These requests allow control to move from the simulated CPU to the
real CPU, calling an arbitary function.
real CPU, calling an arbitrary function.
Note that the current ThreadId is inserted as the first argument.
So this call:

View File

@ -203,11 +203,11 @@ typedef struct _iJIT_Method_NIDS
typedef struct _LineNumberInfo
{
/* x86 Offset from the begining of the method*/
unsigned int Offset;
/* x86 Offset from the beginning of the method*/
unsigned int Offset;
/* source line number from the begining of the source file */
unsigned int LineNumber;
/* source line number from the beginning of the source file */
unsigned int LineNumber;
} *pLineNumberInfo, LineNumberInfo;
@ -232,7 +232,7 @@ typedef struct _iJIT_Method_Load
/* Line Table size in number of entries - Zero if none */
unsigned int line_number_size;
/* Pointer to the begining of the line numbers info array */
/* Pointer to the beginning of the line numbers info array */
pLineNumberInfo line_number_table;
/* unique class ID */

View File

@ -134,7 +134,7 @@ static JITCodeLineInfo* UntagLineInfo(void* ptr) {
// The parameter str is a mixed pattern which contains the
// function name and some other info. It comes from all the
// Logger::CodeCreateEvent(...) function. This funtion get the
// Logger::CodeCreateEvent(...) function. This function get the
// pure function name from the input parameter.
static char* GetFunctionNameFromMixedName(const char* str, int length) {
int index = 0;

View File

@ -455,7 +455,7 @@ class WasmDecoder : public Decoder {
}
case kExprTry:
case kExprIf: // fall thru
case kExprIf: // fall through
case kExprLoop:
case kExprBlock: {
BlockTypeOperand<true> operand(decoder, pc);

View File

@ -189,9 +189,9 @@ class InterpreterHandle {
return interpreter()->GetThread(0)->GetFrameCount();
}
// Returns true if exited regularly, false if a trap/exception occured and was
// not handled inside this activation. In the latter case, a pending exception
// will have been set on the isolate.
// Returns true if exited regularly, false if a trap/exception occurred and
// was not handled inside this activation. In the latter case, a pending
// exception will have been set on the isolate.
bool Execute(Handle<WasmInstanceObject> instance_object,
Address frame_pointer, uint32_t func_index,
uint8_t* arg_buffer) {

View File

@ -525,9 +525,9 @@ void UnpackAndRegisterProtectedInstructions(Isolate* isolate,
// Then triggers WasmCompiledModule::CompileLazy, taking care of correctly
// patching the call site or indirect function tables.
// Returns either the Code object that has been lazily compiled, or Illegal if
// an error occured. In the latter case, a pending exception has been set, which
// will be triggered when returning from the runtime function, i.e. the Illegal
// builtin will never be called.
// an error occurred. In the latter case, a pending exception has been set,
// which will be triggered when returning from the runtime function, i.e. the
// Illegal builtin will never be called.
Handle<Code> CompileLazy(Isolate* isolate);
// This class orchestrates the lazy compilation of wasm functions. It is

View File

@ -617,11 +617,11 @@ class WasmDebugInfo : public FixedArray {
void PrepareStep(StepAction);
// Execute the specified funtion in the interpreter. Read arguments from
// Execute the specified function in the interpreter. Read arguments from
// arg_buffer.
// The frame_pointer will be used to identify the new activation of the
// interpreter for unwinding and frame inspection.
// Returns true if exited regularly, false if a trap occured. In the latter
// Returns true if exited regularly, false if a trap occurred. In the latter
// case, a pending exception will have been set on the isolate.
bool RunInterpreter(Address frame_pointer, int func_index,
uint8_t* arg_buffer);

View File

@ -4794,7 +4794,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
!serializer_enabled() && !emit_debug_code()) {
return;
} else if (rmode == RelocInfo::CODE_AGE_SEQUENCE) {
// Don't record psuedo relocation info for code age sequence mode.
// Don't record pseudo relocation info for code age sequence mode.
return;
}
RelocInfo rinfo(pc_, rmode, data, NULL);

View File

@ -2492,7 +2492,7 @@ class Assembler : public AssemblerBase {
arithmetic_op(0x31, src, dst, size);
}
// Most BMI instructions are similiar.
// Most BMI instructions are similar.
void bmi1q(byte op, Register reg, Register vreg, Register rm);
void bmi1q(byte op, Register reg, Register vreg, const Operand& rm);
void bmi1l(byte op, Register reg, Register vreg, Register rm);

View File

@ -951,7 +951,7 @@ class MacroAssembler : public TurboAssembler {
Label* on_not_smi_result = NULL,
Label::Distance near_jump = Label::kFar);
// Shifts a smi value to the right, shifting in zero bits at the top, and
// returns the unsigned intepretation of the result if that is a smi.
// returns the unsigned interpretation of the result if that is a smi.
// Uses and clobbers rcx, so dst may not be rcx.
void SmiShiftLogicalRight(Register dst,
Register src1,
@ -959,7 +959,7 @@ class MacroAssembler : public TurboAssembler {
Label* on_not_smi_result,
Label::Distance near_jump = Label::kFar);
// Shifts a smi value to the right, sign extending the top, and
// returns the signed intepretation of the result. That will always
// returns the signed interpretation of the result. That will always
// be a valid smi value, since it's numerically smaller than the
// original.
// Uses and clobbers rcx, so dst may not be rcx.

View File

@ -25,7 +25,7 @@ class ReverseZoneChunkListIterator;
// collection that
// * needs to grow indefinitely,
// * will mostly grow at the back, but may sometimes grow in front as well
// (preferrably in batches),
// (preferably in batches),
// * needs to have very low overhead,
// * offers forward- and backwards-iteration,
// * offers relatively fast seeking,

View File

@ -171,7 +171,7 @@ Address Zone::NewExpand(size_t size) {
Address result = RoundUp(segment->start(), kAlignmentInBytes);
position_ = result + size;
// Check for address overflow.
// (Should not happen since the segment is guaranteed to accomodate
// (Should not happen since the segment is guaranteed to accommodate
// size bytes + header and alignment padding)
DCHECK(reinterpret_cast<uintptr_t>(position_) >=
reinterpret_cast<uintptr_t>(result));

View File

@ -678,11 +678,11 @@ TEST(ShrinkPageToHighWaterMarkFreeSpaceEnd) {
HeapObject* filler =
HeapObject::FromAddress(array->address() + array->Size());
CHECK(filler->IsFreeSpace());
size_t shrinked = page->ShrinkToHighWaterMark();
size_t should_have_shrinked =
size_t shrunk = page->ShrinkToHighWaterMark();
size_t should_have_shrunk =
RoundDown(static_cast<size_t>(Page::kAllocatableMemory - array->Size()),
base::OS::CommitPageSize());
CHECK_EQ(should_have_shrinked, shrinked);
CHECK_EQ(should_have_shrunk, shrunk);
}
TEST(ShrinkPageToHighWaterMarkNoFiller) {
@ -702,8 +702,8 @@ TEST(ShrinkPageToHighWaterMarkNoFiller) {
CcTest::heap()->old_space()->ResetFreeList();
CcTest::heap()->old_space()->EmptyAllocationInfo();
const size_t shrinked = page->ShrinkToHighWaterMark();
CHECK_EQ(0u, shrinked);
const size_t shrunk = page->ShrinkToHighWaterMark();
CHECK_EQ(0u, shrunk);
}
TEST(ShrinkPageToHighWaterMarkOneWordFiller) {
@ -728,8 +728,8 @@ TEST(ShrinkPageToHighWaterMarkOneWordFiller) {
HeapObject::FromAddress(array->address() + array->Size());
CHECK_EQ(filler->map(), CcTest::heap()->one_pointer_filler_map());
const size_t shrinked = page->ShrinkToHighWaterMark();
CHECK_EQ(0u, shrinked);
const size_t shrunk = page->ShrinkToHighWaterMark();
CHECK_EQ(0u, shrunk);
}
TEST(ShrinkPageToHighWaterMarkTwoWordFiller) {
@ -754,8 +754,8 @@ TEST(ShrinkPageToHighWaterMarkTwoWordFiller) {
HeapObject::FromAddress(array->address() + array->Size());
CHECK_EQ(filler->map(), CcTest::heap()->two_pointer_filler_map());
const size_t shrinked = page->ShrinkToHighWaterMark();
CHECK_EQ(0u, shrinked);
const size_t shrunk = page->ShrinkToHighWaterMark();
CHECK_EQ(0u, shrunk);
}
} // namespace internal

View File

@ -9724,7 +9724,7 @@ static void UnreachableGetter(
static void UnreachableSetter(Local<String>,
Local<Value>,
const v8::PropertyCallbackInfo<void>&) {
CHECK(false); // This function should nto be called.
CHECK(false); // This function should not be called.
}

View File

@ -4052,7 +4052,7 @@ TEST(NoBreakWhenBootstrapping) {
v8::HandleScope handle_scope(isolate);
v8::Context::New(isolate, &extensions);
}
// Check that no DebugBreak events occured during the context creation.
// Check that no DebugBreak events occurred during the context creation.
CHECK_EQ(0, break_point_hit_count);
// Get rid of the debug event listener.
@ -5176,7 +5176,7 @@ TEST(RegExpDebugBreak) {
frame_function_name_source,
"frame_function_name");
// Test RegExp which matches white spaces and comments at the begining of a
// Test RegExp which matches white spaces and comments at the beginning of a
// source line.
const char* script =
"var sourceLineBeginningSkip = /^(?:[ \\v\\h]*(?:\\/\\*.*?\\*\\/)*)*/;\n"
@ -5621,7 +5621,7 @@ static void DebugEventContextChecker(const v8::Debug::EventDetails& details) {
}
// Check that event details contain context where debug event occured.
// Check that event details contain context where debug event occurred.
TEST(DebugEventContext) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);

View File

@ -525,7 +525,7 @@ TEST(SubclassBasicNoInlineNew) {
}
// Creates class hierachy of length matching the |hierarchy_desc| length and
// Creates class hierarchy of length matching the |hierarchy_desc| length and
// with the number of fields at i'th level equal to |hierarchy_desc[i]|.
static void CreateClassHierarchy(const std::vector<int>& hierarchy_desc) {
std::ostringstream os;

View File

@ -702,7 +702,7 @@ TEST(OrderedHashMapDeletion) {
CHECK(!OrderedHashMap::HasKey(isolate, *map, *key2));
CHECK(!OrderedHashMap::HasKey(isolate, *map, *key3));
// Delete non existant key from non new hash table
// Delete non existent key from non new hash table
CHECK(!OrderedHashMap::Delete(isolate, *map, *key3));
Verify(map);
CHECK_EQ(2, map->NumberOfBuckets());
@ -712,7 +712,7 @@ TEST(OrderedHashMapDeletion) {
CHECK(!OrderedHashMap::HasKey(isolate, *map, *key2));
CHECK(!OrderedHashMap::HasKey(isolate, *map, *key3));
// Delete non existant key from non empty hash table
// Delete non existent key from non empty hash table
map = OrderedHashMap::Shrink(map);
map = OrderedHashMap::Add(map, key1, value);
Verify(map);
@ -852,7 +852,7 @@ TEST(OrderedHashSetDeletion) {
CHECK(!OrderedHashSet::HasKey(isolate, *set, *key2));
CHECK(!OrderedHashSet::HasKey(isolate, *set, *key3));
// Delete non existant key from non new hash table
// Delete non existent key from non new hash table
CHECK(!OrderedHashSet::Delete(isolate, *set, *key3));
Verify(set);
CHECK_EQ(2, set->NumberOfBuckets());
@ -862,7 +862,7 @@ TEST(OrderedHashSetDeletion) {
CHECK(!OrderedHashSet::HasKey(isolate, *set, *key2));
CHECK(!OrderedHashSet::HasKey(isolate, *set, *key3));
// Delete non existant key from non empty hash table
// Delete non existent key from non empty hash table
set = OrderedHashSet::Shrink(set);
set = OrderedHashSet::Add(set, key1);
Verify(set);

Some files were not shown because too many files have changed in this diff Show More