Rename ASSERT* to DCHECK*.
This way we don't clash with the ASSERT* macros defined by GoogleTest, and we are one step closer to being able to replace our homegrown base/ with base/ from Chrome. R=jochen@chromium.org, svenpanne@chromium.org Review URL: https://codereview.chromium.org/430503007 git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22812 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
f86ef7c9ef
commit
d07a2eb806
@ -165,7 +165,7 @@ Handle<Object> Accessors::FlattenNumber(Isolate* isolate,
|
||||
Handle<Object> value) {
|
||||
if (value->IsNumber() || !value->IsJSValue()) return value;
|
||||
Handle<JSValue> wrapper = Handle<JSValue>::cast(value);
|
||||
ASSERT(wrapper->GetIsolate()->native_context()->number_function()->
|
||||
DCHECK(wrapper->GetIsolate()->native_context()->number_function()->
|
||||
has_initial_map());
|
||||
if (wrapper->map() == isolate->number_function()->initial_map()) {
|
||||
return handle(wrapper->value(), isolate);
|
||||
@ -554,10 +554,10 @@ void Accessors::ScriptLineEndsGetter(
|
||||
Handle<Script> script(
|
||||
Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
|
||||
Script::InitLineEnds(script);
|
||||
ASSERT(script->line_ends()->IsFixedArray());
|
||||
DCHECK(script->line_ends()->IsFixedArray());
|
||||
Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
|
||||
// We do not want anyone to modify this array from JS.
|
||||
ASSERT(*line_ends == isolate->heap()->empty_fixed_array() ||
|
||||
DCHECK(*line_ends == isolate->heap()->empty_fixed_array() ||
|
||||
line_ends->map() == isolate->heap()->fixed_cow_array_map());
|
||||
Handle<JSArray> js_array =
|
||||
isolate->factory()->NewJSArrayWithElements(line_ends);
|
||||
@ -859,7 +859,7 @@ static Handle<Object> SetFunctionPrototype(Isolate* isolate,
|
||||
}
|
||||
|
||||
JSFunction::SetPrototype(function, value);
|
||||
ASSERT(function->prototype() == *value);
|
||||
DCHECK(function->prototype() == *value);
|
||||
|
||||
if (is_observed && !old_value->SameValue(*value)) {
|
||||
JSObject::EnqueueChangeRecord(
|
||||
@ -877,7 +877,7 @@ Handle<Object> Accessors::FunctionGetPrototype(Handle<JSFunction> function) {
|
||||
|
||||
Handle<Object> Accessors::FunctionSetPrototype(Handle<JSFunction> function,
|
||||
Handle<Object> prototype) {
|
||||
ASSERT(function->should_have_prototype());
|
||||
DCHECK(function->should_have_prototype());
|
||||
Isolate* isolate = function->GetIsolate();
|
||||
return SetFunctionPrototype(isolate, function, prototype);
|
||||
}
|
||||
@ -1095,7 +1095,7 @@ Handle<Object> GetFunctionArguments(Isolate* isolate,
|
||||
Handle<FixedArray> array = isolate->factory()->NewFixedArray(length);
|
||||
|
||||
// Copy the parameters to the arguments object.
|
||||
ASSERT(array->length() == length);
|
||||
DCHECK(array->length() == length);
|
||||
for (int i = 0; i < length; i++) array->set(i, frame->GetParameter(i));
|
||||
arguments->set_elements(*array);
|
||||
|
||||
@ -1196,7 +1196,7 @@ class FrameFunctionIterator {
|
||||
if (frame_iterator_.done()) return;
|
||||
JavaScriptFrame* frame = frame_iterator_.frame();
|
||||
frame->GetFunctions(&functions_);
|
||||
ASSERT(functions_.length() > 0);
|
||||
DCHECK(functions_.length() > 0);
|
||||
frame_iterator_.Advance();
|
||||
index_ = functions_.length() - 1;
|
||||
}
|
||||
@ -1304,7 +1304,7 @@ static void ModuleGetExport(
|
||||
const v8::PropertyCallbackInfo<v8::Value>& info) {
|
||||
JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
|
||||
Context* context = Context::cast(instance->context());
|
||||
ASSERT(context->IsModuleContext());
|
||||
DCHECK(context->IsModuleContext());
|
||||
int slot = info.Data()->Int32Value();
|
||||
Object* value = context->get(slot);
|
||||
Isolate* isolate = instance->GetIsolate();
|
||||
@ -1325,7 +1325,7 @@ static void ModuleSetExport(
|
||||
const v8::PropertyCallbackInfo<v8::Value>& info) {
|
||||
JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
|
||||
Context* context = Context::cast(instance->context());
|
||||
ASSERT(context->IsModuleContext());
|
||||
DCHECK(context->IsModuleContext());
|
||||
int slot = info.Data()->Int32Value();
|
||||
Object* old_value = context->get(slot);
|
||||
if (old_value->IsTheHole()) {
|
||||
|
@ -20,7 +20,7 @@ Handle<AllocationSite> AllocationSiteCreationContext::EnterNewScope() {
|
||||
static_cast<void*>(*scope_site));
|
||||
}
|
||||
} else {
|
||||
ASSERT(!current().is_null());
|
||||
DCHECK(!current().is_null());
|
||||
scope_site = isolate()->factory()->NewAllocationSite();
|
||||
if (FLAG_trace_creation_allocation_sites) {
|
||||
PrintF("Creating nested site (top, current, new) (%p, %p, %p)\n",
|
||||
@ -31,7 +31,7 @@ Handle<AllocationSite> AllocationSiteCreationContext::EnterNewScope() {
|
||||
current()->set_nested_site(*scope_site);
|
||||
update_current_site(*scope_site);
|
||||
}
|
||||
ASSERT(!scope_site.is_null());
|
||||
DCHECK(!scope_site.is_null());
|
||||
return scope_site;
|
||||
}
|
||||
|
||||
|
@ -75,7 +75,7 @@ class AllocationSiteUsageContext : public AllocationSiteContext {
|
||||
// Advance current site
|
||||
Object* nested_site = current()->nested_site();
|
||||
// Something is wrong if we advance to the end of the list here.
|
||||
ASSERT(nested_site->IsAllocationSite());
|
||||
DCHECK(nested_site->IsAllocationSite());
|
||||
update_current_site(AllocationSite::cast(nested_site));
|
||||
}
|
||||
return Handle<AllocationSite>(*current(), isolate());
|
||||
@ -85,7 +85,7 @@ class AllocationSiteUsageContext : public AllocationSiteContext {
|
||||
Handle<JSObject> object) {
|
||||
// This assert ensures that we are pointing at the right sub-object in a
|
||||
// recursive walk of a nested literal.
|
||||
ASSERT(object.is_null() || *object == scope_site->transition_info());
|
||||
DCHECK(object.is_null() || *object == scope_site->transition_info());
|
||||
}
|
||||
|
||||
bool ShouldCreateMemento(Handle<JSObject> object);
|
||||
|
@ -228,8 +228,8 @@ void AllocationTracker::AllocationEvent(Address addr, int size) {
|
||||
// Mark the new block as FreeSpace to make sure the heap is iterable
|
||||
// while we are capturing stack trace.
|
||||
FreeListNode::FromAddress(addr)->set_size(heap, size);
|
||||
ASSERT_EQ(HeapObject::FromAddress(addr)->Size(), size);
|
||||
ASSERT(FreeListNode::IsFreeListNode(HeapObject::FromAddress(addr)));
|
||||
DCHECK_EQ(HeapObject::FromAddress(addr)->Size(), size);
|
||||
DCHECK(FreeListNode::IsFreeListNode(HeapObject::FromAddress(addr)));
|
||||
|
||||
Isolate* isolate = heap->isolate();
|
||||
int length = 0;
|
||||
|
@ -83,7 +83,7 @@ char* StrNDup(const char* str, int n) {
|
||||
|
||||
|
||||
void* AlignedAlloc(size_t size, size_t alignment) {
|
||||
ASSERT(IsPowerOf2(alignment) && alignment >= V8_ALIGNOF(void*)); // NOLINT
|
||||
DCHECK(IsPowerOf2(alignment) && alignment >= V8_ALIGNOF(void*)); // NOLINT
|
||||
void* ptr;
|
||||
#if V8_OS_WIN
|
||||
ptr = _aligned_malloc(size, alignment);
|
||||
|
150
src/api.cc
150
src/api.cc
@ -51,7 +51,7 @@
|
||||
#define LOG_API(isolate, expr) LOG(isolate, ApiEntryCall(expr))
|
||||
|
||||
#define ENTER_V8(isolate) \
|
||||
ASSERT((isolate)->IsInitialized()); \
|
||||
DCHECK((isolate)->IsInitialized()); \
|
||||
i::VMState<i::OTHER> __state__((isolate))
|
||||
|
||||
namespace v8 {
|
||||
@ -65,7 +65,7 @@ namespace v8 {
|
||||
|
||||
#define EXCEPTION_PREAMBLE(isolate) \
|
||||
(isolate)->handle_scope_implementer()->IncrementCallDepth(); \
|
||||
ASSERT(!(isolate)->external_caught_exception()); \
|
||||
DCHECK(!(isolate)->external_caught_exception()); \
|
||||
bool has_pending_exception = false
|
||||
|
||||
|
||||
@ -255,7 +255,7 @@ int StartupDataDecompressor::Decompress() {
|
||||
compressed_data[i].compressed_size);
|
||||
if (result != 0) return result;
|
||||
} else {
|
||||
ASSERT_EQ(0, compressed_data[i].raw_size);
|
||||
DCHECK_EQ(0, compressed_data[i].raw_size);
|
||||
}
|
||||
compressed_data[i].data = decompressed;
|
||||
}
|
||||
@ -325,24 +325,24 @@ void V8::GetCompressedStartupData(StartupData* compressed_data) {
|
||||
|
||||
void V8::SetDecompressedStartupData(StartupData* decompressed_data) {
|
||||
#ifdef COMPRESS_STARTUP_DATA_BZ2
|
||||
ASSERT_EQ(i::Snapshot::raw_size(), decompressed_data[kSnapshot].raw_size);
|
||||
DCHECK_EQ(i::Snapshot::raw_size(), decompressed_data[kSnapshot].raw_size);
|
||||
i::Snapshot::set_raw_data(
|
||||
reinterpret_cast<const i::byte*>(decompressed_data[kSnapshot].data));
|
||||
|
||||
ASSERT_EQ(i::Snapshot::context_raw_size(),
|
||||
DCHECK_EQ(i::Snapshot::context_raw_size(),
|
||||
decompressed_data[kSnapshotContext].raw_size);
|
||||
i::Snapshot::set_context_raw_data(
|
||||
reinterpret_cast<const i::byte*>(
|
||||
decompressed_data[kSnapshotContext].data));
|
||||
|
||||
ASSERT_EQ(i::Natives::GetRawScriptsSize(),
|
||||
DCHECK_EQ(i::Natives::GetRawScriptsSize(),
|
||||
decompressed_data[kLibraries].raw_size);
|
||||
i::Vector<const char> libraries_source(
|
||||
decompressed_data[kLibraries].data,
|
||||
decompressed_data[kLibraries].raw_size);
|
||||
i::Natives::SetRawScriptsSource(libraries_source);
|
||||
|
||||
ASSERT_EQ(i::ExperimentalNatives::GetRawScriptsSize(),
|
||||
DCHECK_EQ(i::ExperimentalNatives::GetRawScriptsSize(),
|
||||
decompressed_data[kExperimentalLibraries].raw_size);
|
||||
i::Vector<const char> exp_libraries_source(
|
||||
decompressed_data[kExperimentalLibraries].data,
|
||||
@ -502,7 +502,7 @@ bool SetResourceConstraints(Isolate* v8_isolate,
|
||||
if (semi_space_size != 0 || old_space_size != 0 ||
|
||||
max_executable_size != 0 || code_range_size != 0) {
|
||||
// After initialization it's too late to change Heap constraints.
|
||||
ASSERT(!isolate->IsInitialized());
|
||||
DCHECK(!isolate->IsInitialized());
|
||||
bool result = isolate->heap()->ConfigureHeap(semi_space_size,
|
||||
old_space_size,
|
||||
max_executable_size,
|
||||
@ -611,7 +611,7 @@ i::Object** HandleScope::CreateHandle(i::Isolate* isolate, i::Object* value) {
|
||||
|
||||
i::Object** HandleScope::CreateHandle(i::HeapObject* heap_object,
|
||||
i::Object* value) {
|
||||
ASSERT(heap_object->IsHeapObject());
|
||||
DCHECK(heap_object->IsHeapObject());
|
||||
return i::HandleScope::CreateHandle(heap_object->GetIsolate(), value);
|
||||
}
|
||||
|
||||
@ -714,7 +714,7 @@ void Context::SetEmbedderData(int index, v8::Handle<Value> value) {
|
||||
if (data.is_null()) return;
|
||||
i::Handle<i::Object> val = Utils::OpenHandle(*value);
|
||||
data->set(index, *val);
|
||||
ASSERT_EQ(*Utils::OpenHandle(*value),
|
||||
DCHECK_EQ(*Utils::OpenHandle(*value),
|
||||
*Utils::OpenHandle(*GetEmbedderData(index)));
|
||||
}
|
||||
|
||||
@ -731,7 +731,7 @@ void Context::SetAlignedPointerInEmbedderData(int index, void* value) {
|
||||
const char* location = "v8::Context::SetAlignedPointerInEmbedderData()";
|
||||
i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, true, location);
|
||||
data->set(index, EncodeAlignedAsSmi(value, location));
|
||||
ASSERT_EQ(value, GetAlignedPointerFromEmbedderData(index));
|
||||
DCHECK_EQ(value, GetAlignedPointerFromEmbedderData(index));
|
||||
}
|
||||
|
||||
|
||||
@ -768,8 +768,8 @@ int NeanderArray::length() {
|
||||
|
||||
|
||||
i::Object* NeanderArray::get(int offset) {
|
||||
ASSERT(0 <= offset);
|
||||
ASSERT(offset < length());
|
||||
DCHECK(0 <= offset);
|
||||
DCHECK(offset < length());
|
||||
return obj_.get(offset + 1);
|
||||
}
|
||||
|
||||
@ -851,11 +851,11 @@ void Template::SetAccessorProperty(
|
||||
v8::PropertyAttribute attribute,
|
||||
v8::AccessControl access_control) {
|
||||
// TODO(verwaest): Remove |access_control|.
|
||||
ASSERT_EQ(v8::DEFAULT, access_control);
|
||||
DCHECK_EQ(v8::DEFAULT, access_control);
|
||||
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
|
||||
ENTER_V8(isolate);
|
||||
ASSERT(!name.IsEmpty());
|
||||
ASSERT(!getter.IsEmpty() || !setter.IsEmpty());
|
||||
DCHECK(!name.IsEmpty());
|
||||
DCHECK(!getter.IsEmpty() || !setter.IsEmpty());
|
||||
i::HandleScope scope(isolate);
|
||||
const int kSize = 5;
|
||||
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
|
||||
@ -1714,7 +1714,7 @@ Local<UnboundScript> ScriptCompiler::CompileUnbound(
|
||||
|
||||
i::ScriptData* script_data = NULL;
|
||||
if (options == kConsumeParserCache || options == kConsumeCodeCache) {
|
||||
ASSERT(source->cached_data);
|
||||
DCHECK(source->cached_data);
|
||||
// ScriptData takes care of pointer-aligning the data.
|
||||
script_data = new i::ScriptData(source->cached_data->data,
|
||||
source->cached_data->length);
|
||||
@ -1833,7 +1833,7 @@ v8::TryCatch::TryCatch()
|
||||
|
||||
|
||||
v8::TryCatch::~TryCatch() {
|
||||
ASSERT(isolate_ == i::Isolate::Current());
|
||||
DCHECK(isolate_ == i::Isolate::Current());
|
||||
if (rethrow_) {
|
||||
v8::Isolate* isolate = reinterpret_cast<Isolate*>(isolate_);
|
||||
v8::HandleScope scope(isolate);
|
||||
@ -1849,7 +1849,7 @@ v8::TryCatch::~TryCatch() {
|
||||
isolate_->UnregisterTryCatchHandler(this);
|
||||
v8::internal::SimulatorStack::UnregisterCTryCatch();
|
||||
reinterpret_cast<Isolate*>(isolate_)->ThrowException(exc);
|
||||
ASSERT(!isolate_->thread_local_top()->rethrowing_message_);
|
||||
DCHECK(!isolate_->thread_local_top()->rethrowing_message_);
|
||||
} else {
|
||||
if (HasCaught() && isolate_->has_scheduled_exception()) {
|
||||
// If an exception was caught but is still scheduled because no API call
|
||||
@ -1886,7 +1886,7 @@ v8::Handle<v8::Value> v8::TryCatch::ReThrow() {
|
||||
|
||||
|
||||
v8::Local<Value> v8::TryCatch::Exception() const {
|
||||
ASSERT(isolate_ == i::Isolate::Current());
|
||||
DCHECK(isolate_ == i::Isolate::Current());
|
||||
if (HasCaught()) {
|
||||
// Check for out of memory exception.
|
||||
i::Object* exception = reinterpret_cast<i::Object*>(exception_);
|
||||
@ -1898,7 +1898,7 @@ v8::Local<Value> v8::TryCatch::Exception() const {
|
||||
|
||||
|
||||
v8::Local<Value> v8::TryCatch::StackTrace() const {
|
||||
ASSERT(isolate_ == i::Isolate::Current());
|
||||
DCHECK(isolate_ == i::Isolate::Current());
|
||||
if (HasCaught()) {
|
||||
i::Object* raw_obj = reinterpret_cast<i::Object*>(exception_);
|
||||
if (!raw_obj->IsJSObject()) return v8::Local<Value>();
|
||||
@ -1922,9 +1922,9 @@ v8::Local<Value> v8::TryCatch::StackTrace() const {
|
||||
|
||||
|
||||
v8::Local<v8::Message> v8::TryCatch::Message() const {
|
||||
ASSERT(isolate_ == i::Isolate::Current());
|
||||
DCHECK(isolate_ == i::Isolate::Current());
|
||||
i::Object* message = reinterpret_cast<i::Object*>(message_obj_);
|
||||
ASSERT(message->IsJSMessageObject() || message->IsTheHole());
|
||||
DCHECK(message->IsJSMessageObject() || message->IsTheHole());
|
||||
if (HasCaught() && !message->IsTheHole()) {
|
||||
return v8::Utils::MessageToLocal(i::Handle<i::Object>(message, isolate_));
|
||||
} else {
|
||||
@ -1934,7 +1934,7 @@ v8::Local<v8::Message> v8::TryCatch::Message() const {
|
||||
|
||||
|
||||
void v8::TryCatch::Reset() {
|
||||
ASSERT(isolate_ == i::Isolate::Current());
|
||||
DCHECK(isolate_ == i::Isolate::Current());
|
||||
i::Object* the_hole = isolate_->heap()->the_hole_value();
|
||||
exception_ = the_hole;
|
||||
message_obj_ = the_hole;
|
||||
@ -2286,14 +2286,14 @@ Local<Value> JSON::Parse(Local<String> json_string) {
|
||||
|
||||
bool Value::FullIsUndefined() const {
|
||||
bool result = Utils::OpenHandle(this)->IsUndefined();
|
||||
ASSERT_EQ(result, QuickIsUndefined());
|
||||
DCHECK_EQ(result, QuickIsUndefined());
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
bool Value::FullIsNull() const {
|
||||
bool result = Utils::OpenHandle(this)->IsNull();
|
||||
ASSERT_EQ(result, QuickIsNull());
|
||||
DCHECK_EQ(result, QuickIsNull());
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -2315,7 +2315,7 @@ bool Value::IsFunction() const {
|
||||
|
||||
bool Value::FullIsString() const {
|
||||
bool result = Utils::OpenHandle(this)->IsString();
|
||||
ASSERT_EQ(result, QuickIsString());
|
||||
DCHECK_EQ(result, QuickIsString());
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -3469,7 +3469,7 @@ void Object::SetAccessorProperty(Local<String> name,
|
||||
PropertyAttribute attribute,
|
||||
AccessControl settings) {
|
||||
// TODO(verwaest): Remove |settings|.
|
||||
ASSERT_EQ(v8::DEFAULT, settings);
|
||||
DCHECK_EQ(v8::DEFAULT, settings);
|
||||
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
|
||||
ON_BAILOUT(isolate, "v8::Object::SetAccessorProperty()", return);
|
||||
ENTER_V8(isolate);
|
||||
@ -3956,7 +3956,7 @@ Local<v8::Value> Object::CallAsConstructor(int argc,
|
||||
has_pending_exception = !i::Execution::Call(
|
||||
isolate, fun, obj, argc, args).ToHandle(&returned);
|
||||
EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<v8::Object>());
|
||||
ASSERT(!delegate->IsUndefined());
|
||||
DCHECK(!delegate->IsUndefined());
|
||||
return Utils::ToLocal(scope.CloseAndEscape(returned));
|
||||
}
|
||||
return Local<v8::Object>();
|
||||
@ -4345,7 +4345,7 @@ class Utf8LengthHelper : public i::AllStatic {
|
||||
uint8_t leaf_state) {
|
||||
bool edge_surrogate = StartsWithSurrogate(leaf_state);
|
||||
if (!(*state & kLeftmostEdgeIsCalculated)) {
|
||||
ASSERT(!(*state & kLeftmostEdgeIsSurrogate));
|
||||
DCHECK(!(*state & kLeftmostEdgeIsSurrogate));
|
||||
*state |= kLeftmostEdgeIsCalculated
|
||||
| (edge_surrogate ? kLeftmostEdgeIsSurrogate : 0);
|
||||
} else if (EndsWithSurrogate(*state) && edge_surrogate) {
|
||||
@ -4363,7 +4363,7 @@ class Utf8LengthHelper : public i::AllStatic {
|
||||
uint8_t leaf_state) {
|
||||
bool edge_surrogate = EndsWithSurrogate(leaf_state);
|
||||
if (!(*state & kRightmostEdgeIsCalculated)) {
|
||||
ASSERT(!(*state & kRightmostEdgeIsSurrogate));
|
||||
DCHECK(!(*state & kRightmostEdgeIsSurrogate));
|
||||
*state |= (kRightmostEdgeIsCalculated
|
||||
| (edge_surrogate ? kRightmostEdgeIsSurrogate : 0));
|
||||
} else if (edge_surrogate && StartsWithSurrogate(*state)) {
|
||||
@ -4379,7 +4379,7 @@ class Utf8LengthHelper : public i::AllStatic {
|
||||
static inline void MergeTerminal(int* length,
|
||||
uint8_t state,
|
||||
uint8_t* state_out) {
|
||||
ASSERT((state & kLeftmostEdgeIsCalculated) &&
|
||||
DCHECK((state & kLeftmostEdgeIsCalculated) &&
|
||||
(state & kRightmostEdgeIsCalculated));
|
||||
if (EndsWithSurrogate(state) && StartsWithSurrogate(state)) {
|
||||
*length -= unibrow::Utf8::kBytesSavedByCombiningSurrogates;
|
||||
@ -4491,7 +4491,7 @@ class Utf8WriterVisitor {
|
||||
char* const buffer,
|
||||
bool replace_invalid_utf8) {
|
||||
using namespace unibrow;
|
||||
ASSERT(remaining > 0);
|
||||
DCHECK(remaining > 0);
|
||||
// We can't use a local buffer here because Encode needs to modify
|
||||
// previous characters in the stream. We know, however, that
|
||||
// exactly one character will be advanced.
|
||||
@ -4500,7 +4500,7 @@ class Utf8WriterVisitor {
|
||||
character,
|
||||
last_character,
|
||||
replace_invalid_utf8);
|
||||
ASSERT(written == 1);
|
||||
DCHECK(written == 1);
|
||||
return written;
|
||||
}
|
||||
// Use a scratch buffer to check the required characters.
|
||||
@ -4532,7 +4532,7 @@ class Utf8WriterVisitor {
|
||||
template<typename Char>
|
||||
void Visit(const Char* chars, const int length) {
|
||||
using namespace unibrow;
|
||||
ASSERT(!early_termination_);
|
||||
DCHECK(!early_termination_);
|
||||
if (length == 0) return;
|
||||
// Copy state to stack.
|
||||
char* buffer = buffer_;
|
||||
@ -4561,7 +4561,7 @@ class Utf8WriterVisitor {
|
||||
for (; i < fast_length; i++) {
|
||||
buffer +=
|
||||
Utf8::EncodeOneByte(buffer, static_cast<uint8_t>(*chars++));
|
||||
ASSERT(capacity_ == -1 || (buffer - start_) <= capacity_);
|
||||
DCHECK(capacity_ == -1 || (buffer - start_) <= capacity_);
|
||||
}
|
||||
} else {
|
||||
for (; i < fast_length; i++) {
|
||||
@ -4571,7 +4571,7 @@ class Utf8WriterVisitor {
|
||||
last_character,
|
||||
replace_invalid_utf8_);
|
||||
last_character = character;
|
||||
ASSERT(capacity_ == -1 || (buffer - start_) <= capacity_);
|
||||
DCHECK(capacity_ == -1 || (buffer - start_) <= capacity_);
|
||||
}
|
||||
}
|
||||
// Array is fully written. Exit.
|
||||
@ -4583,10 +4583,10 @@ class Utf8WriterVisitor {
|
||||
return;
|
||||
}
|
||||
}
|
||||
ASSERT(!skip_capacity_check_);
|
||||
DCHECK(!skip_capacity_check_);
|
||||
// Slow loop. Must check capacity on each iteration.
|
||||
int remaining_capacity = capacity_ - static_cast<int>(buffer - start_);
|
||||
ASSERT(remaining_capacity >= 0);
|
||||
DCHECK(remaining_capacity >= 0);
|
||||
for (; i < length && remaining_capacity > 0; i++) {
|
||||
uint16_t character = *chars++;
|
||||
// remaining_capacity is <= 3 bytes at this point, so we do not write out
|
||||
@ -4733,7 +4733,7 @@ static inline int WriteHelper(const String* string,
|
||||
i::Isolate* isolate = Utils::OpenHandle(string)->GetIsolate();
|
||||
LOG_API(isolate, "String::Write");
|
||||
ENTER_V8(isolate);
|
||||
ASSERT(start >= 0 && length >= -1);
|
||||
DCHECK(start >= 0 && length >= -1);
|
||||
i::Handle<i::String> str = Utils::OpenHandle(string);
|
||||
isolate->string_tracker()->RecordWrite(str);
|
||||
if (options & String::HINT_MANY_WRITES_EXPECTED) {
|
||||
@ -4918,7 +4918,7 @@ void v8::Object::SetInternalField(int index, v8::Handle<Value> value) {
|
||||
if (!InternalFieldOK(obj, index, location)) return;
|
||||
i::Handle<i::Object> val = Utils::OpenHandle(*value);
|
||||
obj->SetInternalField(index, *val);
|
||||
ASSERT_EQ(value, GetInternalField(index));
|
||||
DCHECK_EQ(value, GetInternalField(index));
|
||||
}
|
||||
|
||||
|
||||
@ -4935,7 +4935,7 @@ void v8::Object::SetAlignedPointerInInternalField(int index, void* value) {
|
||||
const char* location = "v8::Object::SetAlignedPointerInInternalField()";
|
||||
if (!InternalFieldOK(obj, index, location)) return;
|
||||
obj->SetInternalField(index, EncodeAlignedAsSmi(value, location));
|
||||
ASSERT_EQ(value, GetAlignedPointerFromInternalField(index));
|
||||
DCHECK_EQ(value, GetAlignedPointerFromInternalField(index));
|
||||
}
|
||||
|
||||
|
||||
@ -4982,8 +4982,8 @@ void v8::V8::SetReturnAddressLocationResolver(
|
||||
|
||||
bool v8::V8::SetFunctionEntryHook(Isolate* ext_isolate,
|
||||
FunctionEntryHook entry_hook) {
|
||||
ASSERT(ext_isolate != NULL);
|
||||
ASSERT(entry_hook != NULL);
|
||||
DCHECK(ext_isolate != NULL);
|
||||
DCHECK(entry_hook != NULL);
|
||||
|
||||
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(ext_isolate);
|
||||
|
||||
@ -5069,7 +5069,7 @@ void v8::V8::VisitHandlesWithClassIds(PersistentHandleVisitor* visitor) {
|
||||
void v8::V8::VisitHandlesForPartialDependence(
|
||||
Isolate* exported_isolate, PersistentHandleVisitor* visitor) {
|
||||
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(exported_isolate);
|
||||
ASSERT(isolate == i::Isolate::Current());
|
||||
DCHECK(isolate == i::Isolate::Current());
|
||||
i::DisallowHeapAllocation no_allocation;
|
||||
|
||||
VisitorAdapter visitor_adapter(visitor);
|
||||
@ -5141,8 +5141,8 @@ static i::Handle<i::Context> CreateEnvironment(
|
||||
|
||||
// Restore the access check info on the global template.
|
||||
if (!global_template.IsEmpty()) {
|
||||
ASSERT(!global_constructor.is_null());
|
||||
ASSERT(!proxy_constructor.is_null());
|
||||
DCHECK(!global_constructor.is_null());
|
||||
DCHECK(!proxy_constructor.is_null());
|
||||
global_constructor->set_access_check_info(
|
||||
proxy_constructor->access_check_info());
|
||||
global_constructor->set_needs_access_check(
|
||||
@ -5489,7 +5489,7 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
|
||||
|
||||
bool result = obj->MakeExternal(resource);
|
||||
if (result) {
|
||||
ASSERT(obj->IsExternalString());
|
||||
DCHECK(obj->IsExternalString());
|
||||
isolate->heap()->external_string_table()->AddString(*obj);
|
||||
}
|
||||
return result;
|
||||
@ -5529,7 +5529,7 @@ bool v8::String::MakeExternal(
|
||||
|
||||
bool result = obj->MakeExternal(resource);
|
||||
if (result) {
|
||||
ASSERT(obj->IsExternalString());
|
||||
DCHECK(obj->IsExternalString());
|
||||
isolate->heap()->external_string_table()->AddString(*obj);
|
||||
}
|
||||
return result;
|
||||
@ -5697,7 +5697,7 @@ void v8::Date::DateTimeConfigurationChangeNotification(Isolate* isolate) {
|
||||
i::Handle<i::FixedArray> date_cache_version =
|
||||
i::Handle<i::FixedArray>::cast(i_isolate->eternal_handles()->GetSingleton(
|
||||
i::EternalHandles::DATE_CACHE_VERSION));
|
||||
ASSERT_EQ(1, date_cache_version->length());
|
||||
DCHECK_EQ(1, date_cache_version->length());
|
||||
CHECK(date_cache_version->get(0)->IsSmi());
|
||||
date_cache_version->set(
|
||||
0,
|
||||
@ -5712,7 +5712,7 @@ static i::Handle<i::String> RegExpFlagsToString(RegExp::Flags flags) {
|
||||
if ((flags & RegExp::kGlobal) != 0) flags_buf[num_flags++] = 'g';
|
||||
if ((flags & RegExp::kMultiline) != 0) flags_buf[num_flags++] = 'm';
|
||||
if ((flags & RegExp::kIgnoreCase) != 0) flags_buf[num_flags++] = 'i';
|
||||
ASSERT(num_flags <= static_cast<int>(ARRAY_SIZE(flags_buf)));
|
||||
DCHECK(num_flags <= static_cast<int>(ARRAY_SIZE(flags_buf)));
|
||||
return isolate->factory()->InternalizeOneByteString(
|
||||
i::Vector<const uint8_t>(flags_buf, num_flags));
|
||||
}
|
||||
@ -6006,10 +6006,10 @@ Local<ArrayBuffer> v8::ArrayBufferView::Buffer() {
|
||||
i::Handle<i::JSArrayBuffer> buffer;
|
||||
if (obj->IsJSDataView()) {
|
||||
i::Handle<i::JSDataView> data_view(i::JSDataView::cast(*obj));
|
||||
ASSERT(data_view->buffer()->IsJSArrayBuffer());
|
||||
DCHECK(data_view->buffer()->IsJSArrayBuffer());
|
||||
buffer = i::handle(i::JSArrayBuffer::cast(data_view->buffer()));
|
||||
} else {
|
||||
ASSERT(obj->IsJSTypedArray());
|
||||
DCHECK(obj->IsJSTypedArray());
|
||||
buffer = i::JSTypedArray::cast(*obj)->GetBuffer();
|
||||
}
|
||||
return Utils::ToLocal(buffer);
|
||||
@ -6040,7 +6040,7 @@ static inline void SetupArrayBufferView(
|
||||
i::Handle<i::JSArrayBuffer> buffer,
|
||||
size_t byte_offset,
|
||||
size_t byte_length) {
|
||||
ASSERT(byte_offset + byte_length <=
|
||||
DCHECK(byte_offset + byte_length <=
|
||||
static_cast<size_t>(buffer->byte_length()->Number()));
|
||||
|
||||
obj->set_buffer(*buffer);
|
||||
@ -6067,7 +6067,7 @@ i::Handle<i::JSTypedArray> NewTypedArray(
|
||||
isolate->factory()->NewJSTypedArray(array_type);
|
||||
i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*array_buffer);
|
||||
|
||||
ASSERT(byte_offset % sizeof(ElementType) == 0);
|
||||
DCHECK(byte_offset % sizeof(ElementType) == 0);
|
||||
|
||||
CHECK(length <= (std::numeric_limits<size_t>::max() / sizeof(ElementType)));
|
||||
CHECK(length <= static_cast<size_t>(i::Smi::kMaxValue));
|
||||
@ -6152,7 +6152,7 @@ Local<Symbol> v8::Symbol::For(Isolate* isolate, Local<String> name) {
|
||||
i::Handle<i::Object> symbol =
|
||||
i::Object::GetPropertyOrElement(symbols, i_name).ToHandleChecked();
|
||||
if (!symbol->IsSymbol()) {
|
||||
ASSERT(symbol->IsUndefined());
|
||||
DCHECK(symbol->IsUndefined());
|
||||
symbol = i_isolate->factory()->NewSymbol();
|
||||
i::Handle<i::Symbol>::cast(symbol)->set_name(*i_name);
|
||||
i::JSObject::SetProperty(symbols, i_name, symbol, i::STRICT).Assert();
|
||||
@ -6172,7 +6172,7 @@ Local<Symbol> v8::Symbol::ForApi(Isolate* isolate, Local<String> name) {
|
||||
i::Handle<i::Object> symbol =
|
||||
i::Object::GetPropertyOrElement(symbols, i_name).ToHandleChecked();
|
||||
if (!symbol->IsSymbol()) {
|
||||
ASSERT(symbol->IsUndefined());
|
||||
DCHECK(symbol->IsUndefined());
|
||||
symbol = i_isolate->factory()->NewSymbol();
|
||||
i::Handle<i::Symbol>::cast(symbol)->set_name(*i_name);
|
||||
i::JSObject::SetProperty(symbols, i_name, symbol, i::STRICT).Assert();
|
||||
@ -6204,7 +6204,7 @@ Local<Private> v8::Private::ForApi(Isolate* isolate, Local<String> name) {
|
||||
i::Handle<i::Object> symbol =
|
||||
i::Object::GetPropertyOrElement(privates, i_name).ToHandleChecked();
|
||||
if (!symbol->IsSymbol()) {
|
||||
ASSERT(symbol->IsUndefined());
|
||||
DCHECK(symbol->IsUndefined());
|
||||
symbol = i_isolate->factory()->NewPrivateSymbol();
|
||||
i::Handle<i::Symbol>::cast(symbol)->set_name(*i_name);
|
||||
i::JSObject::SetProperty(privates, i_name, symbol, i::STRICT).Assert();
|
||||
@ -6216,7 +6216,7 @@ Local<Private> v8::Private::ForApi(Isolate* isolate, Local<String> name) {
|
||||
|
||||
Local<Number> v8::Number::New(Isolate* isolate, double value) {
|
||||
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
|
||||
ASSERT(internal_isolate->IsInitialized());
|
||||
DCHECK(internal_isolate->IsInitialized());
|
||||
if (std::isnan(value)) {
|
||||
// Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
|
||||
value = base::OS::nan_value();
|
||||
@ -6229,7 +6229,7 @@ Local<Number> v8::Number::New(Isolate* isolate, double value) {
|
||||
|
||||
Local<Integer> v8::Integer::New(Isolate* isolate, int32_t value) {
|
||||
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
|
||||
ASSERT(internal_isolate->IsInitialized());
|
||||
DCHECK(internal_isolate->IsInitialized());
|
||||
if (i::Smi::IsValid(value)) {
|
||||
return Utils::IntegerToLocal(i::Handle<i::Object>(i::Smi::FromInt(value),
|
||||
internal_isolate));
|
||||
@ -6242,7 +6242,7 @@ Local<Integer> v8::Integer::New(Isolate* isolate, int32_t value) {
|
||||
|
||||
Local<Integer> v8::Integer::NewFromUnsigned(Isolate* isolate, uint32_t value) {
|
||||
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
|
||||
ASSERT(internal_isolate->IsInitialized());
|
||||
DCHECK(internal_isolate->IsInitialized());
|
||||
bool fits_into_int32_t = (value & (1 << 31)) == 0;
|
||||
if (fits_into_int32_t) {
|
||||
return Integer::New(isolate, static_cast<int32_t>(value));
|
||||
@ -6517,7 +6517,7 @@ void Isolate::RequestGarbageCollectionForTesting(GarbageCollectionType type) {
|
||||
i::NEW_SPACE, "Isolate::RequestGarbageCollection",
|
||||
kGCCallbackFlagForced);
|
||||
} else {
|
||||
ASSERT_EQ(kFullGarbageCollection, type);
|
||||
DCHECK_EQ(kFullGarbageCollection, type);
|
||||
reinterpret_cast<i::Isolate*>(this)->heap()->CollectAllGarbage(
|
||||
i::Heap::kAbortIncrementalMarkingMask,
|
||||
"Isolate::RequestGarbageCollection", kGCCallbackFlagForced);
|
||||
@ -6569,7 +6569,7 @@ Isolate::DisallowJavascriptExecutionScope::DisallowJavascriptExecutionScope(
|
||||
internal_ = reinterpret_cast<void*>(
|
||||
new i::DisallowJavascriptExecution(i_isolate));
|
||||
} else {
|
||||
ASSERT_EQ(THROW_ON_FAILURE, on_failure);
|
||||
DCHECK_EQ(THROW_ON_FAILURE, on_failure);
|
||||
internal_ = reinterpret_cast<void*>(
|
||||
new i::ThrowOnJavascriptExecution(i_isolate));
|
||||
}
|
||||
@ -7071,7 +7071,7 @@ const CpuProfileNode* CpuProfileNode::GetChild(int index) const {
|
||||
void CpuProfile::Delete() {
|
||||
i::Isolate* isolate = i::Isolate::Current();
|
||||
i::CpuProfiler* profiler = isolate->cpu_profiler();
|
||||
ASSERT(profiler != NULL);
|
||||
DCHECK(profiler != NULL);
|
||||
profiler->DeleteProfile(reinterpret_cast<i::CpuProfile*>(this));
|
||||
}
|
||||
|
||||
@ -7121,7 +7121,7 @@ int CpuProfile::GetSamplesCount() const {
|
||||
|
||||
|
||||
void CpuProfiler::SetSamplingInterval(int us) {
|
||||
ASSERT(us >= 0);
|
||||
DCHECK(us >= 0);
|
||||
return reinterpret_cast<i::CpuProfiler*>(this)->set_sampling_interval(
|
||||
base::TimeDelta::FromMicroseconds(us));
|
||||
}
|
||||
@ -7153,7 +7153,7 @@ const CpuProfile* CpuProfiler::StopCpuProfiling(Handle<String> title) {
|
||||
void CpuProfiler::SetIdle(bool is_idle) {
|
||||
i::Isolate* isolate = reinterpret_cast<i::CpuProfiler*>(this)->isolate();
|
||||
i::StateTag state = isolate->current_vm_state();
|
||||
ASSERT(state == i::EXTERNAL || state == i::IDLE);
|
||||
DCHECK(state == i::EXTERNAL || state == i::IDLE);
|
||||
if (isolate->js_entry_sp() != NULL) return;
|
||||
if (is_idle) {
|
||||
isolate->set_current_vm_state(i::IDLE);
|
||||
@ -7514,7 +7514,7 @@ void HandleScopeImplementer::IterateThis(ObjectVisitor* v) {
|
||||
(last_handle_before_deferred_block_ <= &block[kHandleBlockSize]) &&
|
||||
(last_handle_before_deferred_block_ >= block)) {
|
||||
v->VisitPointers(block, last_handle_before_deferred_block_);
|
||||
ASSERT(!found_block_before_deferred);
|
||||
DCHECK(!found_block_before_deferred);
|
||||
#ifdef DEBUG
|
||||
found_block_before_deferred = true;
|
||||
#endif
|
||||
@ -7523,7 +7523,7 @@ void HandleScopeImplementer::IterateThis(ObjectVisitor* v) {
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(last_handle_before_deferred_block_ == NULL ||
|
||||
DCHECK(last_handle_before_deferred_block_ == NULL ||
|
||||
found_block_before_deferred);
|
||||
|
||||
// Iterate over live handles in the last block (if any).
|
||||
@ -7563,7 +7563,7 @@ DeferredHandles* HandleScopeImplementer::Detach(Object** prev_limit) {
|
||||
Object** block_start = blocks_.last();
|
||||
Object** block_limit = &block_start[kHandleBlockSize];
|
||||
// We should not need to check for SealHandleScope here. Assert this.
|
||||
ASSERT(prev_limit == block_limit ||
|
||||
DCHECK(prev_limit == block_limit ||
|
||||
!(block_start <= prev_limit && prev_limit <= block_limit));
|
||||
if (prev_limit == block_limit) break;
|
||||
deferred->blocks_.Add(blocks_.last());
|
||||
@ -7574,17 +7574,17 @@ DeferredHandles* HandleScopeImplementer::Detach(Object** prev_limit) {
|
||||
// HandleScope stack since BeginDeferredScope was called, but in
|
||||
// reverse order.
|
||||
|
||||
ASSERT(prev_limit == NULL || !blocks_.is_empty());
|
||||
DCHECK(prev_limit == NULL || !blocks_.is_empty());
|
||||
|
||||
ASSERT(!blocks_.is_empty() && prev_limit != NULL);
|
||||
ASSERT(last_handle_before_deferred_block_ != NULL);
|
||||
DCHECK(!blocks_.is_empty() && prev_limit != NULL);
|
||||
DCHECK(last_handle_before_deferred_block_ != NULL);
|
||||
last_handle_before_deferred_block_ = NULL;
|
||||
return deferred;
|
||||
}
|
||||
|
||||
|
||||
void HandleScopeImplementer::BeginDeferredScope() {
|
||||
ASSERT(last_handle_before_deferred_block_ == NULL);
|
||||
DCHECK(last_handle_before_deferred_block_ == NULL);
|
||||
last_handle_before_deferred_block_ = isolate()->handle_scope_data()->next;
|
||||
}
|
||||
|
||||
@ -7602,9 +7602,9 @@ DeferredHandles::~DeferredHandles() {
|
||||
|
||||
|
||||
void DeferredHandles::Iterate(ObjectVisitor* v) {
|
||||
ASSERT(!blocks_.is_empty());
|
||||
DCHECK(!blocks_.is_empty());
|
||||
|
||||
ASSERT((first_block_limit_ >= blocks_.first()) &&
|
||||
DCHECK((first_block_limit_ >= blocks_.first()) &&
|
||||
(first_block_limit_ <= &(blocks_.first())[kHandleBlockSize]));
|
||||
|
||||
v->VisitPointers(blocks_.first(), first_block_limit_);
|
||||
|
20
src/api.h
20
src/api.h
@ -81,13 +81,13 @@ NeanderArray::NeanderArray(v8::internal::Handle<v8::internal::Object> obj)
|
||||
|
||||
|
||||
v8::internal::Object* NeanderObject::get(int offset) {
|
||||
ASSERT(value()->HasFastObjectElements());
|
||||
DCHECK(value()->HasFastObjectElements());
|
||||
return v8::internal::FixedArray::cast(value()->elements())->get(offset);
|
||||
}
|
||||
|
||||
|
||||
void NeanderObject::set(int offset, v8::internal::Object* value) {
|
||||
ASSERT(value_->HasFastObjectElements());
|
||||
DCHECK(value_->HasFastObjectElements());
|
||||
v8::internal::FixedArray::cast(value_->elements())->set(offset, value);
|
||||
}
|
||||
|
||||
@ -264,7 +264,7 @@ OPEN_HANDLE_LIST(DECLARE_OPEN_HANDLE)
|
||||
|
||||
template<class From, class To>
|
||||
static inline Local<To> Convert(v8::internal::Handle<From> obj) {
|
||||
ASSERT(obj.is_null() || !obj->IsTheHole());
|
||||
DCHECK(obj.is_null() || !obj->IsTheHole());
|
||||
return Local<To>(reinterpret_cast<To*>(obj.location()));
|
||||
}
|
||||
|
||||
@ -325,7 +325,7 @@ inline v8::Local<T> ToApiHandle(
|
||||
#define MAKE_TO_LOCAL_TYPED_ARRAY(Type, typeName, TYPE, ctype, size) \
|
||||
Local<v8::Type##Array> Utils::ToLocal##Type##Array( \
|
||||
v8::internal::Handle<v8::internal::JSTypedArray> obj) { \
|
||||
ASSERT(obj->type() == kExternal##Type##Array); \
|
||||
DCHECK(obj->type() == kExternal##Type##Array); \
|
||||
return Convert<v8::internal::JSTypedArray, v8::Type##Array>(obj); \
|
||||
}
|
||||
|
||||
@ -534,7 +534,7 @@ class HandleScopeImplementer {
|
||||
Isolate* isolate() const { return isolate_; }
|
||||
|
||||
void ReturnBlock(Object** block) {
|
||||
ASSERT(block != NULL);
|
||||
DCHECK(block != NULL);
|
||||
if (spare_ != NULL) DeleteArray(spare_);
|
||||
spare_ = block;
|
||||
}
|
||||
@ -550,9 +550,9 @@ class HandleScopeImplementer {
|
||||
}
|
||||
|
||||
void Free() {
|
||||
ASSERT(blocks_.length() == 0);
|
||||
ASSERT(entered_contexts_.length() == 0);
|
||||
ASSERT(saved_contexts_.length() == 0);
|
||||
DCHECK(blocks_.length() == 0);
|
||||
DCHECK(entered_contexts_.length() == 0);
|
||||
DCHECK(saved_contexts_.length() == 0);
|
||||
blocks_.Free();
|
||||
entered_contexts_.Free();
|
||||
saved_contexts_.Free();
|
||||
@ -560,7 +560,7 @@ class HandleScopeImplementer {
|
||||
DeleteArray(spare_);
|
||||
spare_ = NULL;
|
||||
}
|
||||
ASSERT(call_depth_ == 0);
|
||||
DCHECK(call_depth_ == 0);
|
||||
}
|
||||
|
||||
void BeginDeferredScope();
|
||||
@ -663,7 +663,7 @@ void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) {
|
||||
}
|
||||
spare_ = block_start;
|
||||
}
|
||||
ASSERT((blocks_.is_empty() && prev_limit == NULL) ||
|
||||
DCHECK((blocks_.is_empty() && prev_limit == NULL) ||
|
||||
(!blocks_.is_empty() && prev_limit != NULL));
|
||||
}
|
||||
|
||||
|
@ -32,7 +32,7 @@ class Arguments BASE_EMBEDDED {
|
||||
: length_(length), arguments_(arguments) { }
|
||||
|
||||
Object*& operator[] (int index) {
|
||||
ASSERT(0 <= index && index < length_);
|
||||
DCHECK(0 <= index && index < length_);
|
||||
return *(reinterpret_cast<Object**>(reinterpret_cast<intptr_t>(arguments_) -
|
||||
index * kPointerSize));
|
||||
}
|
||||
@ -176,8 +176,8 @@ class PropertyCallbackArguments
|
||||
values[T::kReturnValueDefaultValueIndex] =
|
||||
isolate->heap()->the_hole_value();
|
||||
values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
|
||||
ASSERT(values[T::kHolderIndex]->IsHeapObject());
|
||||
ASSERT(values[T::kIsolateIndex]->IsSmi());
|
||||
DCHECK(values[T::kHolderIndex]->IsHeapObject());
|
||||
DCHECK(values[T::kIsolateIndex]->IsSmi());
|
||||
}
|
||||
|
||||
/*
|
||||
@ -248,9 +248,9 @@ class FunctionCallbackArguments
|
||||
values[T::kReturnValueDefaultValueIndex] =
|
||||
isolate->heap()->the_hole_value();
|
||||
values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
|
||||
ASSERT(values[T::kCalleeIndex]->IsJSFunction());
|
||||
ASSERT(values[T::kHolderIndex]->IsHeapObject());
|
||||
ASSERT(values[T::kIsolateIndex]->IsSmi());
|
||||
DCHECK(values[T::kCalleeIndex]->IsJSFunction());
|
||||
DCHECK(values[T::kHolderIndex]->IsHeapObject());
|
||||
DCHECK(values[T::kIsolateIndex]->IsSmi());
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -71,8 +71,8 @@ int DwVfpRegister::NumAllocatableRegisters() {
|
||||
|
||||
|
||||
int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) {
|
||||
ASSERT(!reg.is(kDoubleRegZero));
|
||||
ASSERT(!reg.is(kScratchDoubleReg));
|
||||
DCHECK(!reg.is(kDoubleRegZero));
|
||||
DCHECK(!reg.is(kScratchDoubleReg));
|
||||
if (reg.code() > kDoubleRegZero.code()) {
|
||||
return reg.code() - kNumReservedRegisters;
|
||||
}
|
||||
@ -81,8 +81,8 @@ int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) {
|
||||
|
||||
|
||||
DwVfpRegister DwVfpRegister::FromAllocationIndex(int index) {
|
||||
ASSERT(index >= 0 && index < NumAllocatableRegisters());
|
||||
ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
|
||||
DCHECK(index >= 0 && index < NumAllocatableRegisters());
|
||||
DCHECK(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
|
||||
kNumReservedRegisters - 1);
|
||||
if (index >= kDoubleRegZero.code()) {
|
||||
return from_code(index + kNumReservedRegisters);
|
||||
@ -103,13 +103,13 @@ void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
|
||||
|
||||
|
||||
Address RelocInfo::target_address() {
|
||||
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
|
||||
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
|
||||
return Assembler::target_address_at(pc_, host_);
|
||||
}
|
||||
|
||||
|
||||
Address RelocInfo::target_address_address() {
|
||||
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|
||||
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|
||||
|| rmode_ == EMBEDDED_OBJECT
|
||||
|| rmode_ == EXTERNAL_REFERENCE);
|
||||
if (FLAG_enable_ool_constant_pool ||
|
||||
@ -118,14 +118,14 @@ Address RelocInfo::target_address_address() {
|
||||
// serializerer and expects the address to reside within the code object.
|
||||
return reinterpret_cast<Address>(pc_);
|
||||
} else {
|
||||
ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
|
||||
DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
|
||||
return constant_pool_entry_address();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Address RelocInfo::constant_pool_entry_address() {
|
||||
ASSERT(IsInConstantPool());
|
||||
DCHECK(IsInConstantPool());
|
||||
return Assembler::constant_pool_entry_address(pc_, host_->constant_pool());
|
||||
}
|
||||
|
||||
@ -138,7 +138,7 @@ int RelocInfo::target_address_size() {
|
||||
void RelocInfo::set_target_address(Address target,
|
||||
WriteBarrierMode write_barrier_mode,
|
||||
ICacheFlushMode icache_flush_mode) {
|
||||
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
|
||||
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
|
||||
Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
|
||||
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
|
||||
host() != NULL && IsCodeTarget(rmode_)) {
|
||||
@ -150,13 +150,13 @@ void RelocInfo::set_target_address(Address target,
|
||||
|
||||
|
||||
Object* RelocInfo::target_object() {
|
||||
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
|
||||
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
|
||||
return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
|
||||
}
|
||||
|
||||
|
||||
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
|
||||
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
|
||||
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
|
||||
return Handle<Object>(reinterpret_cast<Object**>(
|
||||
Assembler::target_address_at(pc_, host_)));
|
||||
}
|
||||
@ -165,7 +165,7 @@ Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
|
||||
void RelocInfo::set_target_object(Object* target,
|
||||
WriteBarrierMode write_barrier_mode,
|
||||
ICacheFlushMode icache_flush_mode) {
|
||||
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
|
||||
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
|
||||
Assembler::set_target_address_at(pc_, host_,
|
||||
reinterpret_cast<Address>(target),
|
||||
icache_flush_mode);
|
||||
@ -179,13 +179,13 @@ void RelocInfo::set_target_object(Object* target,
|
||||
|
||||
|
||||
Address RelocInfo::target_reference() {
|
||||
ASSERT(rmode_ == EXTERNAL_REFERENCE);
|
||||
DCHECK(rmode_ == EXTERNAL_REFERENCE);
|
||||
return Assembler::target_address_at(pc_, host_);
|
||||
}
|
||||
|
||||
|
||||
Address RelocInfo::target_runtime_entry(Assembler* origin) {
|
||||
ASSERT(IsRuntimeEntry(rmode_));
|
||||
DCHECK(IsRuntimeEntry(rmode_));
|
||||
return target_address();
|
||||
}
|
||||
|
||||
@ -193,21 +193,21 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
|
||||
void RelocInfo::set_target_runtime_entry(Address target,
|
||||
WriteBarrierMode write_barrier_mode,
|
||||
ICacheFlushMode icache_flush_mode) {
|
||||
ASSERT(IsRuntimeEntry(rmode_));
|
||||
DCHECK(IsRuntimeEntry(rmode_));
|
||||
if (target_address() != target)
|
||||
set_target_address(target, write_barrier_mode, icache_flush_mode);
|
||||
}
|
||||
|
||||
|
||||
Handle<Cell> RelocInfo::target_cell_handle() {
|
||||
ASSERT(rmode_ == RelocInfo::CELL);
|
||||
DCHECK(rmode_ == RelocInfo::CELL);
|
||||
Address address = Memory::Address_at(pc_);
|
||||
return Handle<Cell>(reinterpret_cast<Cell**>(address));
|
||||
}
|
||||
|
||||
|
||||
Cell* RelocInfo::target_cell() {
|
||||
ASSERT(rmode_ == RelocInfo::CELL);
|
||||
DCHECK(rmode_ == RelocInfo::CELL);
|
||||
return Cell::FromValueAddress(Memory::Address_at(pc_));
|
||||
}
|
||||
|
||||
@ -215,7 +215,7 @@ Cell* RelocInfo::target_cell() {
|
||||
void RelocInfo::set_target_cell(Cell* cell,
|
||||
WriteBarrierMode write_barrier_mode,
|
||||
ICacheFlushMode icache_flush_mode) {
|
||||
ASSERT(rmode_ == RelocInfo::CELL);
|
||||
DCHECK(rmode_ == RelocInfo::CELL);
|
||||
Address address = cell->address() + Cell::kValueOffset;
|
||||
Memory::Address_at(pc_) = address;
|
||||
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
|
||||
@ -237,7 +237,7 @@ Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
|
||||
|
||||
|
||||
Code* RelocInfo::code_age_stub() {
|
||||
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
|
||||
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
|
||||
return Code::GetCodeFromTargetAddress(
|
||||
Memory::Address_at(pc_ +
|
||||
(kNoCodeAgeSequenceLength - Assembler::kInstrSize)));
|
||||
@ -246,7 +246,7 @@ Code* RelocInfo::code_age_stub() {
|
||||
|
||||
void RelocInfo::set_code_age_stub(Code* stub,
|
||||
ICacheFlushMode icache_flush_mode) {
|
||||
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
|
||||
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
|
||||
Memory::Address_at(pc_ +
|
||||
(kNoCodeAgeSequenceLength - Assembler::kInstrSize)) =
|
||||
stub->instruction_start();
|
||||
@ -256,14 +256,14 @@ void RelocInfo::set_code_age_stub(Code* stub,
|
||||
Address RelocInfo::call_address() {
|
||||
// The 2 instructions offset assumes patched debug break slot or return
|
||||
// sequence.
|
||||
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
|
||||
DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
|
||||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
|
||||
return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
|
||||
}
|
||||
|
||||
|
||||
void RelocInfo::set_call_address(Address target) {
|
||||
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
|
||||
DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
|
||||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
|
||||
Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
|
||||
if (host() != NULL) {
|
||||
@ -285,14 +285,14 @@ void RelocInfo::set_call_object(Object* target) {
|
||||
|
||||
|
||||
Object** RelocInfo::call_object_address() {
|
||||
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
|
||||
DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
|
||||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
|
||||
return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
|
||||
}
|
||||
|
||||
|
||||
void RelocInfo::WipeOut() {
|
||||
ASSERT(IsEmbeddedObject(rmode_) ||
|
||||
DCHECK(IsEmbeddedObject(rmode_) ||
|
||||
IsCodeTarget(rmode_) ||
|
||||
IsRuntimeEntry(rmode_) ||
|
||||
IsExternalReference(rmode_));
|
||||
@ -446,12 +446,12 @@ Address Assembler::target_address_from_return_address(Address pc) {
|
||||
return candidate;
|
||||
} else if (IsLdrPpRegOffset(candidate_instr)) {
|
||||
candidate = pc - 4 * Assembler::kInstrSize;
|
||||
ASSERT(IsMovW(Memory::int32_at(candidate)) &&
|
||||
DCHECK(IsMovW(Memory::int32_at(candidate)) &&
|
||||
IsMovT(Memory::int32_at(candidate + Assembler::kInstrSize)));
|
||||
return candidate;
|
||||
} else {
|
||||
candidate = pc - 3 * Assembler::kInstrSize;
|
||||
ASSERT(IsMovW(Memory::int32_at(candidate)) &&
|
||||
DCHECK(IsMovW(Memory::int32_at(candidate)) &&
|
||||
IsMovT(Memory::int32_at(candidate + kInstrSize)));
|
||||
return candidate;
|
||||
}
|
||||
@ -464,8 +464,8 @@ Address Assembler::return_address_from_call_start(Address pc) {
|
||||
// Load from constant pool, small section.
|
||||
return pc + kInstrSize * 2;
|
||||
} else {
|
||||
ASSERT(IsMovW(Memory::int32_at(pc)));
|
||||
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
|
||||
DCHECK(IsMovW(Memory::int32_at(pc)));
|
||||
DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
|
||||
if (IsLdrPpRegOffset(Memory::int32_at(pc + kInstrSize))) {
|
||||
// Load from constant pool, extended section.
|
||||
return pc + kInstrSize * 4;
|
||||
@ -498,10 +498,10 @@ bool Assembler::is_constant_pool_load(Address pc) {
|
||||
Address Assembler::constant_pool_entry_address(
|
||||
Address pc, ConstantPoolArray* constant_pool) {
|
||||
if (FLAG_enable_ool_constant_pool) {
|
||||
ASSERT(constant_pool != NULL);
|
||||
DCHECK(constant_pool != NULL);
|
||||
int cp_offset;
|
||||
if (IsMovW(Memory::int32_at(pc))) {
|
||||
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)) &&
|
||||
DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)) &&
|
||||
IsLdrPpRegOffset(Memory::int32_at(pc + 2 * kInstrSize)));
|
||||
// This is an extended constant pool lookup.
|
||||
Instruction* movw_instr = Instruction::At(pc);
|
||||
@ -510,12 +510,12 @@ Address Assembler::constant_pool_entry_address(
|
||||
movw_instr->ImmedMovwMovtValue();
|
||||
} else {
|
||||
// This is a small constant pool lookup.
|
||||
ASSERT(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc)));
|
||||
DCHECK(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc)));
|
||||
cp_offset = GetLdrRegisterImmediateOffset(Memory::int32_at(pc));
|
||||
}
|
||||
return reinterpret_cast<Address>(constant_pool) + cp_offset;
|
||||
} else {
|
||||
ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc)));
|
||||
DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc)));
|
||||
Instr instr = Memory::int32_at(pc);
|
||||
return pc + GetLdrRegisterImmediateOffset(instr) + kPcLoadDelta;
|
||||
}
|
||||
@ -529,7 +529,7 @@ Address Assembler::target_address_at(Address pc,
|
||||
return Memory::Address_at(constant_pool_entry_address(pc, constant_pool));
|
||||
} else {
|
||||
// This is an movw_movt immediate load. Return the immediate.
|
||||
ASSERT(IsMovW(Memory::int32_at(pc)) &&
|
||||
DCHECK(IsMovW(Memory::int32_at(pc)) &&
|
||||
IsMovT(Memory::int32_at(pc + kInstrSize)));
|
||||
Instruction* movw_instr = Instruction::At(pc);
|
||||
Instruction* movt_instr = Instruction::At(pc + kInstrSize);
|
||||
@ -558,14 +558,14 @@ void Assembler::set_target_address_at(Address pc,
|
||||
} else {
|
||||
// This is an movw_movt immediate load. Patch the immediate embedded in the
|
||||
// instructions.
|
||||
ASSERT(IsMovW(Memory::int32_at(pc)));
|
||||
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
|
||||
DCHECK(IsMovW(Memory::int32_at(pc)));
|
||||
DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
|
||||
uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
|
||||
uint32_t immediate = reinterpret_cast<uint32_t>(target);
|
||||
instr_ptr[0] = PatchMovwImmediate(instr_ptr[0], immediate & 0xFFFF);
|
||||
instr_ptr[1] = PatchMovwImmediate(instr_ptr[1], immediate >> 16);
|
||||
ASSERT(IsMovW(Memory::int32_at(pc)));
|
||||
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
|
||||
DCHECK(IsMovW(Memory::int32_at(pc)));
|
||||
DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
|
||||
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
|
||||
CpuFeatures::FlushICache(pc, 2 * kInstrSize);
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -100,17 +100,17 @@ struct Register {
|
||||
inline static int NumAllocatableRegisters();
|
||||
|
||||
static int ToAllocationIndex(Register reg) {
|
||||
ASSERT(reg.code() < kMaxNumAllocatableRegisters);
|
||||
DCHECK(reg.code() < kMaxNumAllocatableRegisters);
|
||||
return reg.code();
|
||||
}
|
||||
|
||||
static Register FromAllocationIndex(int index) {
|
||||
ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
|
||||
DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
|
||||
return from_code(index);
|
||||
}
|
||||
|
||||
static const char* AllocationIndexToString(int index) {
|
||||
ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
|
||||
DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
|
||||
const char* const names[] = {
|
||||
"r0",
|
||||
"r1",
|
||||
@ -136,17 +136,17 @@ struct Register {
|
||||
bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
|
||||
bool is(Register reg) const { return code_ == reg.code_; }
|
||||
int code() const {
|
||||
ASSERT(is_valid());
|
||||
DCHECK(is_valid());
|
||||
return code_;
|
||||
}
|
||||
int bit() const {
|
||||
ASSERT(is_valid());
|
||||
DCHECK(is_valid());
|
||||
return 1 << code_;
|
||||
}
|
||||
|
||||
void set_code(int code) {
|
||||
code_ = code;
|
||||
ASSERT(is_valid());
|
||||
DCHECK(is_valid());
|
||||
}
|
||||
|
||||
// Unfortunately we can't make this private in a struct.
|
||||
@ -182,15 +182,15 @@ struct SwVfpRegister {
|
||||
bool is_valid() const { return 0 <= code_ && code_ < 32; }
|
||||
bool is(SwVfpRegister reg) const { return code_ == reg.code_; }
|
||||
int code() const {
|
||||
ASSERT(is_valid());
|
||||
DCHECK(is_valid());
|
||||
return code_;
|
||||
}
|
||||
int bit() const {
|
||||
ASSERT(is_valid());
|
||||
DCHECK(is_valid());
|
||||
return 1 << code_;
|
||||
}
|
||||
void split_code(int* vm, int* m) const {
|
||||
ASSERT(is_valid());
|
||||
DCHECK(is_valid());
|
||||
*m = code_ & 0x1;
|
||||
*vm = code_ >> 1;
|
||||
}
|
||||
@ -232,15 +232,15 @@ struct DwVfpRegister {
|
||||
}
|
||||
bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
|
||||
int code() const {
|
||||
ASSERT(is_valid());
|
||||
DCHECK(is_valid());
|
||||
return code_;
|
||||
}
|
||||
int bit() const {
|
||||
ASSERT(is_valid());
|
||||
DCHECK(is_valid());
|
||||
return 1 << code_;
|
||||
}
|
||||
void split_code(int* vm, int* m) const {
|
||||
ASSERT(is_valid());
|
||||
DCHECK(is_valid());
|
||||
*m = (code_ & 0x10) >> 4;
|
||||
*vm = code_ & 0x0F;
|
||||
}
|
||||
@ -271,21 +271,21 @@ struct LowDwVfpRegister {
|
||||
bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
|
||||
bool is(LowDwVfpRegister reg) const { return code_ == reg.code_; }
|
||||
int code() const {
|
||||
ASSERT(is_valid());
|
||||
DCHECK(is_valid());
|
||||
return code_;
|
||||
}
|
||||
SwVfpRegister low() const {
|
||||
SwVfpRegister reg;
|
||||
reg.code_ = code_ * 2;
|
||||
|
||||
ASSERT(reg.is_valid());
|
||||
DCHECK(reg.is_valid());
|
||||
return reg;
|
||||
}
|
||||
SwVfpRegister high() const {
|
||||
SwVfpRegister reg;
|
||||
reg.code_ = (code_ * 2) + 1;
|
||||
|
||||
ASSERT(reg.is_valid());
|
||||
DCHECK(reg.is_valid());
|
||||
return reg;
|
||||
}
|
||||
|
||||
@ -307,11 +307,11 @@ struct QwNeonRegister {
|
||||
}
|
||||
bool is(QwNeonRegister reg) const { return code_ == reg.code_; }
|
||||
int code() const {
|
||||
ASSERT(is_valid());
|
||||
DCHECK(is_valid());
|
||||
return code_;
|
||||
}
|
||||
void split_code(int* vm, int* m) const {
|
||||
ASSERT(is_valid());
|
||||
DCHECK(is_valid());
|
||||
int encoded_code = code_ << 1;
|
||||
*m = (encoded_code & 0x10) >> 4;
|
||||
*vm = encoded_code & 0x0F;
|
||||
@ -425,11 +425,11 @@ struct CRegister {
|
||||
bool is_valid() const { return 0 <= code_ && code_ < 16; }
|
||||
bool is(CRegister creg) const { return code_ == creg.code_; }
|
||||
int code() const {
|
||||
ASSERT(is_valid());
|
||||
DCHECK(is_valid());
|
||||
return code_;
|
||||
}
|
||||
int bit() const {
|
||||
ASSERT(is_valid());
|
||||
DCHECK(is_valid());
|
||||
return 1 << code_;
|
||||
}
|
||||
|
||||
@ -533,7 +533,7 @@ class Operand BASE_EMBEDDED {
|
||||
bool must_output_reloc_info(const Assembler* assembler) const;
|
||||
|
||||
inline int32_t immediate() const {
|
||||
ASSERT(!rm_.is_valid());
|
||||
DCHECK(!rm_.is_valid());
|
||||
return imm32_;
|
||||
}
|
||||
|
||||
@ -581,12 +581,12 @@ class MemOperand BASE_EMBEDDED {
|
||||
}
|
||||
|
||||
void set_offset(int32_t offset) {
|
||||
ASSERT(rm_.is(no_reg));
|
||||
DCHECK(rm_.is(no_reg));
|
||||
offset_ = offset;
|
||||
}
|
||||
|
||||
uint32_t offset() const {
|
||||
ASSERT(rm_.is(no_reg));
|
||||
DCHECK(rm_.is(no_reg));
|
||||
return offset_;
|
||||
}
|
||||
|
||||
@ -1353,12 +1353,12 @@ class Assembler : public AssemblerBase {
|
||||
// Record the AST id of the CallIC being compiled, so that it can be placed
|
||||
// in the relocation information.
|
||||
void SetRecordedAstId(TypeFeedbackId ast_id) {
|
||||
ASSERT(recorded_ast_id_.IsNone());
|
||||
DCHECK(recorded_ast_id_.IsNone());
|
||||
recorded_ast_id_ = ast_id;
|
||||
}
|
||||
|
||||
TypeFeedbackId RecordedAstId() {
|
||||
ASSERT(!recorded_ast_id_.IsNone());
|
||||
DCHECK(!recorded_ast_id_.IsNone());
|
||||
return recorded_ast_id_;
|
||||
}
|
||||
|
||||
@ -1517,10 +1517,10 @@ class Assembler : public AssemblerBase {
|
||||
// Max pool start (if we need a jump and an alignment).
|
||||
int start = pc_offset() + kInstrSize + 2 * kPointerSize;
|
||||
// Check the constant pool hasn't been blocked for too long.
|
||||
ASSERT((num_pending_32_bit_reloc_info_ == 0) ||
|
||||
DCHECK((num_pending_32_bit_reloc_info_ == 0) ||
|
||||
(start + num_pending_64_bit_reloc_info_ * kDoubleSize <
|
||||
(first_const_pool_32_use_ + kMaxDistToIntPool)));
|
||||
ASSERT((num_pending_64_bit_reloc_info_ == 0) ||
|
||||
DCHECK((num_pending_64_bit_reloc_info_ == 0) ||
|
||||
(start < (first_const_pool_64_use_ + kMaxDistToFPPool)));
|
||||
#endif
|
||||
// Two cases:
|
||||
|
@ -40,7 +40,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
|
||||
num_extra_args = 1;
|
||||
__ push(r1);
|
||||
} else {
|
||||
ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
|
||||
DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
|
||||
}
|
||||
|
||||
// JumpToExternalReference expects r0 to contain the number of arguments
|
||||
@ -323,7 +323,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
// -----------------------------------
|
||||
|
||||
// Should never create mementos for api functions.
|
||||
ASSERT(!is_api_function || !create_memento);
|
||||
DCHECK(!is_api_function || !create_memento);
|
||||
|
||||
Isolate* isolate = masm->isolate();
|
||||
|
||||
@ -411,11 +411,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
// r4: JSObject (not tagged)
|
||||
__ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
|
||||
__ mov(r5, r4);
|
||||
ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
|
||||
DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset);
|
||||
__ str(r2, MemOperand(r5, kPointerSize, PostIndex));
|
||||
ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
|
||||
DCHECK_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
|
||||
__ str(r6, MemOperand(r5, kPointerSize, PostIndex));
|
||||
ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
|
||||
DCHECK_EQ(2 * kPointerSize, JSObject::kElementsOffset);
|
||||
__ str(r6, MemOperand(r5, kPointerSize, PostIndex));
|
||||
|
||||
// Fill all the in-object properties with the appropriate filler.
|
||||
@ -424,7 +424,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
// r3: object size (in words, including memento if create_memento)
|
||||
// r4: JSObject (not tagged)
|
||||
// r5: First in-object property of JSObject (not tagged)
|
||||
ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
|
||||
DCHECK_EQ(3 * kPointerSize, JSObject::kHeaderSize);
|
||||
__ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
|
||||
|
||||
if (!is_api_function) {
|
||||
@ -463,11 +463,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
// Fill in memento fields.
|
||||
// r5: points to the allocated but uninitialized memento.
|
||||
__ LoadRoot(r6, Heap::kAllocationMementoMapRootIndex);
|
||||
ASSERT_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
|
||||
DCHECK_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
|
||||
__ str(r6, MemOperand(r5, kPointerSize, PostIndex));
|
||||
// Load the AllocationSite
|
||||
__ ldr(r6, MemOperand(sp, 2 * kPointerSize));
|
||||
ASSERT_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
|
||||
DCHECK_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
|
||||
__ str(r6, MemOperand(r5, kPointerSize, PostIndex));
|
||||
} else {
|
||||
__ add(r0, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
|
||||
@ -522,9 +522,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
// r5: FixedArray (not tagged)
|
||||
__ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
|
||||
__ mov(r2, r5);
|
||||
ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
|
||||
DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset);
|
||||
__ str(r6, MemOperand(r2, kPointerSize, PostIndex));
|
||||
ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
|
||||
DCHECK_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
|
||||
__ SmiTag(r0, r3);
|
||||
__ str(r0, MemOperand(r2, kPointerSize, PostIndex));
|
||||
|
||||
@ -535,7 +535,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
// r4: JSObject
|
||||
// r5: FixedArray (not tagged)
|
||||
__ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
|
||||
ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
|
||||
DCHECK_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
|
||||
{ Label loop, entry;
|
||||
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
|
||||
__ b(&entry);
|
||||
|
@ -377,7 +377,7 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
|
||||
{
|
||||
// Call the runtime system in a fresh internal frame.
|
||||
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
|
||||
ASSERT(param_count == 0 ||
|
||||
DCHECK(param_count == 0 ||
|
||||
r0.is(descriptor->GetEnvironmentParameterRegister(
|
||||
param_count - 1)));
|
||||
// Push arguments
|
||||
@ -491,7 +491,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
|
||||
Label out_of_range, only_low, negate, done;
|
||||
Register input_reg = source();
|
||||
Register result_reg = destination();
|
||||
ASSERT(is_truncating());
|
||||
DCHECK(is_truncating());
|
||||
|
||||
int double_offset = offset();
|
||||
// Account for saved regs if input is sp.
|
||||
@ -623,7 +623,7 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
|
||||
// but it just ends up combining harmlessly with the last digit of the
|
||||
// exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
|
||||
// the most significant 1 to hit the last bit of the 12 bit sign and exponent.
|
||||
ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
|
||||
DCHECK(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
|
||||
const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
|
||||
__ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
|
||||
__ str(scratch_, FieldMemOperand(the_heap_number_,
|
||||
@ -754,7 +754,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
|
||||
Label* lhs_not_nan,
|
||||
Label* slow,
|
||||
bool strict) {
|
||||
ASSERT((lhs.is(r0) && rhs.is(r1)) ||
|
||||
DCHECK((lhs.is(r0) && rhs.is(r1)) ||
|
||||
(lhs.is(r1) && rhs.is(r0)));
|
||||
|
||||
Label rhs_is_smi;
|
||||
@ -816,7 +816,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
|
||||
static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
|
||||
Register lhs,
|
||||
Register rhs) {
|
||||
ASSERT((lhs.is(r0) && rhs.is(r1)) ||
|
||||
DCHECK((lhs.is(r0) && rhs.is(r1)) ||
|
||||
(lhs.is(r1) && rhs.is(r0)));
|
||||
|
||||
// If either operand is a JS object or an oddball value, then they are
|
||||
@ -862,7 +862,7 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
|
||||
Label* both_loaded_as_doubles,
|
||||
Label* not_heap_numbers,
|
||||
Label* slow) {
|
||||
ASSERT((lhs.is(r0) && rhs.is(r1)) ||
|
||||
DCHECK((lhs.is(r0) && rhs.is(r1)) ||
|
||||
(lhs.is(r1) && rhs.is(r0)));
|
||||
|
||||
__ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
|
||||
@ -885,7 +885,7 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
|
||||
Register rhs,
|
||||
Label* possible_strings,
|
||||
Label* not_both_strings) {
|
||||
ASSERT((lhs.is(r0) && rhs.is(r1)) ||
|
||||
DCHECK((lhs.is(r0) && rhs.is(r1)) ||
|
||||
(lhs.is(r1) && rhs.is(r0)));
|
||||
|
||||
// r2 is object type of rhs.
|
||||
@ -975,7 +975,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
|
||||
// If either is a Smi (we know that not both are), then they can only
|
||||
// be strictly equal if the other is a HeapNumber.
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
ASSERT_EQ(0, Smi::FromInt(0));
|
||||
DCHECK_EQ(0, Smi::FromInt(0));
|
||||
__ and_(r2, lhs, Operand(rhs));
|
||||
__ JumpIfNotSmi(r2, ¬_smis);
|
||||
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
|
||||
@ -1087,7 +1087,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
|
||||
if (cc == lt || cc == le) {
|
||||
ncr = GREATER;
|
||||
} else {
|
||||
ASSERT(cc == gt || cc == ge); // remaining cases
|
||||
DCHECK(cc == gt || cc == ge); // remaining cases
|
||||
ncr = LESS;
|
||||
}
|
||||
__ mov(r0, Operand(Smi::FromInt(ncr)));
|
||||
@ -1305,7 +1305,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
|
||||
heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
|
||||
__ vstr(double_result,
|
||||
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
|
||||
ASSERT(heapnumber.is(r0));
|
||||
DCHECK(heapnumber.is(r0));
|
||||
__ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
|
||||
__ Ret(2);
|
||||
} else {
|
||||
@ -1405,7 +1405,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
||||
if (FLAG_debug_code) {
|
||||
if (frame_alignment > kPointerSize) {
|
||||
Label alignment_as_expected;
|
||||
ASSERT(IsPowerOf2(frame_alignment));
|
||||
DCHECK(IsPowerOf2(frame_alignment));
|
||||
__ tst(sp, Operand(frame_alignment_mask));
|
||||
__ b(eq, &alignment_as_expected);
|
||||
// Don't use Check here, as it will call Runtime_Abort re-entering here.
|
||||
@ -1681,7 +1681,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
|
||||
// (See LCodeGen::DoInstanceOfKnownGlobal)
|
||||
void InstanceofStub::Generate(MacroAssembler* masm) {
|
||||
// Call site inlining and patching implies arguments in registers.
|
||||
ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
|
||||
DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
|
||||
|
||||
// Fixed register usage throughout the stub:
|
||||
const Register object = r0; // Object (lhs).
|
||||
@ -1728,7 +1728,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
|
||||
__ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
|
||||
__ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
|
||||
} else {
|
||||
ASSERT(HasArgsInRegisters());
|
||||
DCHECK(HasArgsInRegisters());
|
||||
// Patch the (relocated) inlined map check.
|
||||
|
||||
// The map_load_offset was stored in r5
|
||||
@ -2697,9 +2697,9 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
|
||||
// r3 : slot in feedback vector (Smi)
|
||||
Label initialize, done, miss, megamorphic, not_array_function;
|
||||
|
||||
ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
|
||||
DCHECK_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
|
||||
masm->isolate()->heap()->megamorphic_symbol());
|
||||
ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
|
||||
DCHECK_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
|
||||
masm->isolate()->heap()->uninitialized_symbol());
|
||||
|
||||
// Load the cache state into r4.
|
||||
@ -3166,7 +3166,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
|
||||
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
|
||||
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
|
||||
} else {
|
||||
ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
|
||||
DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
|
||||
// NumberToSmi discards numbers that are not exact integers.
|
||||
__ CallRuntime(Runtime::kNumberToSmi, 1);
|
||||
}
|
||||
@ -3206,7 +3206,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
|
||||
// Fast case of Heap::LookupSingleCharacterStringFromCode.
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
STATIC_ASSERT(kSmiShiftSize == 0);
|
||||
ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
|
||||
DCHECK(IsPowerOf2(String::kMaxOneByteCharCode + 1));
|
||||
__ tst(code_,
|
||||
Operand(kSmiTagMask |
|
||||
((~String::kMaxOneByteCharCode) << kSmiTagSize)));
|
||||
@ -3613,7 +3613,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
|
||||
|
||||
// Compare lengths - strings up to min-length are equal.
|
||||
__ bind(&compare_lengths);
|
||||
ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
|
||||
DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
|
||||
// Use length_delta as result if it's zero.
|
||||
__ mov(r0, Operand(length_delta), SetCC);
|
||||
__ bind(&result_not_equal);
|
||||
@ -3725,7 +3725,7 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
|
||||
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
|
||||
ASSERT(state_ == CompareIC::SMI);
|
||||
DCHECK(state_ == CompareIC::SMI);
|
||||
Label miss;
|
||||
__ orr(r2, r1, r0);
|
||||
__ JumpIfNotSmi(r2, &miss);
|
||||
@ -3746,7 +3746,7 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
|
||||
|
||||
|
||||
void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
|
||||
ASSERT(state_ == CompareIC::NUMBER);
|
||||
DCHECK(state_ == CompareIC::NUMBER);
|
||||
|
||||
Label generic_stub;
|
||||
Label unordered, maybe_undefined1, maybe_undefined2;
|
||||
@ -3823,7 +3823,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
|
||||
|
||||
|
||||
void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
|
||||
ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
|
||||
DCHECK(state_ == CompareIC::INTERNALIZED_STRING);
|
||||
Label miss;
|
||||
|
||||
// Registers containing left and right operands respectively.
|
||||
@ -3849,7 +3849,7 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
|
||||
__ cmp(left, right);
|
||||
// Make sure r0 is non-zero. At this point input operands are
|
||||
// guaranteed to be non-zero.
|
||||
ASSERT(right.is(r0));
|
||||
DCHECK(right.is(r0));
|
||||
STATIC_ASSERT(EQUAL == 0);
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
__ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
|
||||
@ -3861,8 +3861,8 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
|
||||
|
||||
|
||||
void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
|
||||
ASSERT(state_ == CompareIC::UNIQUE_NAME);
|
||||
ASSERT(GetCondition() == eq);
|
||||
DCHECK(state_ == CompareIC::UNIQUE_NAME);
|
||||
DCHECK(GetCondition() == eq);
|
||||
Label miss;
|
||||
|
||||
// Registers containing left and right operands respectively.
|
||||
@ -3888,7 +3888,7 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
|
||||
__ cmp(left, right);
|
||||
// Make sure r0 is non-zero. At this point input operands are
|
||||
// guaranteed to be non-zero.
|
||||
ASSERT(right.is(r0));
|
||||
DCHECK(right.is(r0));
|
||||
STATIC_ASSERT(EQUAL == 0);
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
__ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
|
||||
@ -3900,7 +3900,7 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
|
||||
|
||||
|
||||
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
|
||||
ASSERT(state_ == CompareIC::STRING);
|
||||
DCHECK(state_ == CompareIC::STRING);
|
||||
Label miss;
|
||||
|
||||
bool equality = Token::IsEqualityOp(op_);
|
||||
@ -3940,13 +3940,13 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
|
||||
// because we already know they are not identical. We know they are both
|
||||
// strings.
|
||||
if (equality) {
|
||||
ASSERT(GetCondition() == eq);
|
||||
DCHECK(GetCondition() == eq);
|
||||
STATIC_ASSERT(kInternalizedTag == 0);
|
||||
__ orr(tmp3, tmp1, Operand(tmp2));
|
||||
__ tst(tmp3, Operand(kIsNotInternalizedMask));
|
||||
// Make sure r0 is non-zero. At this point input operands are
|
||||
// guaranteed to be non-zero.
|
||||
ASSERT(right.is(r0));
|
||||
DCHECK(right.is(r0));
|
||||
__ Ret(eq);
|
||||
}
|
||||
|
||||
@ -3979,7 +3979,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
|
||||
|
||||
|
||||
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
|
||||
ASSERT(state_ == CompareIC::OBJECT);
|
||||
DCHECK(state_ == CompareIC::OBJECT);
|
||||
Label miss;
|
||||
__ and_(r2, r1, Operand(r0));
|
||||
__ JumpIfSmi(r2, &miss);
|
||||
@ -3989,7 +3989,7 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
|
||||
__ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE);
|
||||
__ b(ne, &miss);
|
||||
|
||||
ASSERT(GetCondition() == eq);
|
||||
DCHECK(GetCondition() == eq);
|
||||
__ sub(r0, r0, Operand(r1));
|
||||
__ Ret();
|
||||
|
||||
@ -4068,7 +4068,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
|
||||
Register properties,
|
||||
Handle<Name> name,
|
||||
Register scratch0) {
|
||||
ASSERT(name->IsUniqueName());
|
||||
DCHECK(name->IsUniqueName());
|
||||
// If names of slots in range from 1 to kProbes - 1 for the hash value are
|
||||
// not equal to the name and kProbes-th slot is not used (its name is the
|
||||
// undefined value), it guarantees the hash table doesn't contain the
|
||||
@ -4085,17 +4085,17 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
|
||||
Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
|
||||
|
||||
// Scale the index by multiplying by the entry size.
|
||||
ASSERT(NameDictionary::kEntrySize == 3);
|
||||
DCHECK(NameDictionary::kEntrySize == 3);
|
||||
__ add(index, index, Operand(index, LSL, 1)); // index *= 3.
|
||||
|
||||
Register entity_name = scratch0;
|
||||
// Having undefined at this place means the name is not contained.
|
||||
ASSERT_EQ(kSmiTagSize, 1);
|
||||
DCHECK_EQ(kSmiTagSize, 1);
|
||||
Register tmp = properties;
|
||||
__ add(tmp, properties, Operand(index, LSL, 1));
|
||||
__ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
|
||||
|
||||
ASSERT(!tmp.is(entity_name));
|
||||
DCHECK(!tmp.is(entity_name));
|
||||
__ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
|
||||
__ cmp(entity_name, tmp);
|
||||
__ b(eq, done);
|
||||
@ -4151,10 +4151,10 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
|
||||
Register name,
|
||||
Register scratch1,
|
||||
Register scratch2) {
|
||||
ASSERT(!elements.is(scratch1));
|
||||
ASSERT(!elements.is(scratch2));
|
||||
ASSERT(!name.is(scratch1));
|
||||
ASSERT(!name.is(scratch2));
|
||||
DCHECK(!elements.is(scratch1));
|
||||
DCHECK(!elements.is(scratch2));
|
||||
DCHECK(!name.is(scratch1));
|
||||
DCHECK(!name.is(scratch2));
|
||||
|
||||
__ AssertName(name);
|
||||
|
||||
@ -4173,7 +4173,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
|
||||
// Add the probe offset (i + i * i) left shifted to avoid right shifting
|
||||
// the hash in a separate instruction. The value hash + i + i * i is right
|
||||
// shifted in the following and instruction.
|
||||
ASSERT(NameDictionary::GetProbeOffset(i) <
|
||||
DCHECK(NameDictionary::GetProbeOffset(i) <
|
||||
1 << (32 - Name::kHashFieldOffset));
|
||||
__ add(scratch2, scratch2, Operand(
|
||||
NameDictionary::GetProbeOffset(i) << Name::kHashShift));
|
||||
@ -4181,7 +4181,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
|
||||
__ and_(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
|
||||
|
||||
// Scale the index by multiplying by the element size.
|
||||
ASSERT(NameDictionary::kEntrySize == 3);
|
||||
DCHECK(NameDictionary::kEntrySize == 3);
|
||||
// scratch2 = scratch2 * 3.
|
||||
__ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
|
||||
|
||||
@ -4199,7 +4199,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
|
||||
|
||||
__ stm(db_w, sp, spill_mask);
|
||||
if (name.is(r0)) {
|
||||
ASSERT(!elements.is(r1));
|
||||
DCHECK(!elements.is(r1));
|
||||
__ Move(r1, name);
|
||||
__ Move(r0, elements);
|
||||
} else {
|
||||
@ -4255,7 +4255,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
|
||||
// Add the probe offset (i + i * i) left shifted to avoid right shifting
|
||||
// the hash in a separate instruction. The value hash + i + i * i is right
|
||||
// shifted in the following and instruction.
|
||||
ASSERT(NameDictionary::GetProbeOffset(i) <
|
||||
DCHECK(NameDictionary::GetProbeOffset(i) <
|
||||
1 << (32 - Name::kHashFieldOffset));
|
||||
__ add(index, hash, Operand(
|
||||
NameDictionary::GetProbeOffset(i) << Name::kHashShift));
|
||||
@ -4265,10 +4265,10 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
|
||||
__ and_(index, mask, Operand(index, LSR, Name::kHashShift));
|
||||
|
||||
// Scale the index by multiplying by the entry size.
|
||||
ASSERT(NameDictionary::kEntrySize == 3);
|
||||
DCHECK(NameDictionary::kEntrySize == 3);
|
||||
__ add(index, index, Operand(index, LSL, 1)); // index *= 3.
|
||||
|
||||
ASSERT_EQ(kSmiTagSize, 1);
|
||||
DCHECK_EQ(kSmiTagSize, 1);
|
||||
__ add(index, dictionary, Operand(index, LSL, 2));
|
||||
__ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
|
||||
|
||||
@ -4356,8 +4356,8 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
// Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
|
||||
// Will be checked in IncrementalMarking::ActivateGeneratedStub.
|
||||
ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
|
||||
ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
|
||||
DCHECK(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
|
||||
DCHECK(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
|
||||
PatchBranchIntoNop(masm, 0);
|
||||
PatchBranchIntoNop(masm, Assembler::kInstrSize);
|
||||
}
|
||||
@ -4409,8 +4409,8 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
|
||||
__ PrepareCallCFunction(argument_count, regs_.scratch0());
|
||||
Register address =
|
||||
r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
|
||||
ASSERT(!address.is(regs_.object()));
|
||||
ASSERT(!address.is(r0));
|
||||
DCHECK(!address.is(regs_.object()));
|
||||
DCHECK(!address.is(r0));
|
||||
__ Move(address, regs_.address());
|
||||
__ Move(r0, regs_.object());
|
||||
__ Move(r1, address);
|
||||
@ -4616,7 +4616,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
|
||||
// We also save lr, so the count here is one higher than the mask indicates.
|
||||
const int32_t kNumSavedRegs = 7;
|
||||
|
||||
ASSERT((kCallerSaved & kSavedRegs) == kCallerSaved);
|
||||
DCHECK((kCallerSaved & kSavedRegs) == kCallerSaved);
|
||||
|
||||
// Save all caller-save registers as this may be called from anywhere.
|
||||
__ stm(db_w, sp, kSavedRegs | lr.bit());
|
||||
@ -4632,7 +4632,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
|
||||
int frame_alignment = masm->ActivationFrameAlignment();
|
||||
if (frame_alignment > kPointerSize) {
|
||||
__ mov(r5, sp);
|
||||
ASSERT(IsPowerOf2(frame_alignment));
|
||||
DCHECK(IsPowerOf2(frame_alignment));
|
||||
__ and_(sp, sp, Operand(-frame_alignment));
|
||||
}
|
||||
|
||||
@ -4696,12 +4696,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
|
||||
// sp[0] - last argument
|
||||
Label normal_sequence;
|
||||
if (mode == DONT_OVERRIDE) {
|
||||
ASSERT(FAST_SMI_ELEMENTS == 0);
|
||||
ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
|
||||
ASSERT(FAST_ELEMENTS == 2);
|
||||
ASSERT(FAST_HOLEY_ELEMENTS == 3);
|
||||
ASSERT(FAST_DOUBLE_ELEMENTS == 4);
|
||||
ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
|
||||
DCHECK(FAST_SMI_ELEMENTS == 0);
|
||||
DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
|
||||
DCHECK(FAST_ELEMENTS == 2);
|
||||
DCHECK(FAST_HOLEY_ELEMENTS == 3);
|
||||
DCHECK(FAST_DOUBLE_ELEMENTS == 4);
|
||||
DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
|
||||
|
||||
// is the low bit set? If so, we are holey and that is good.
|
||||
__ tst(r3, Operand(1));
|
||||
@ -5020,7 +5020,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
|
||||
FrameScope frame_scope(masm, StackFrame::MANUAL);
|
||||
__ EnterExitFrame(false, kApiStackSpace);
|
||||
|
||||
ASSERT(!api_function_address.is(r0) && !scratch.is(r0));
|
||||
DCHECK(!api_function_address.is(r0) && !scratch.is(r0));
|
||||
// r0 = FunctionCallbackInfo&
|
||||
// Arguments is after the return address.
|
||||
__ add(r0, sp, Operand(1 * kPointerSize));
|
||||
|
@ -180,12 +180,12 @@ class RecordWriteStub: public PlatformCodeStub {
|
||||
|
||||
static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
|
||||
masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20));
|
||||
ASSERT(Assembler::IsTstImmediate(masm->instr_at(pos)));
|
||||
DCHECK(Assembler::IsTstImmediate(masm->instr_at(pos)));
|
||||
}
|
||||
|
||||
static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
|
||||
masm->instr_at_put(pos, (masm->instr_at(pos) & ~(B24 | B20)) | B27);
|
||||
ASSERT(Assembler::IsBranch(masm->instr_at(pos)));
|
||||
DCHECK(Assembler::IsBranch(masm->instr_at(pos)));
|
||||
}
|
||||
|
||||
static Mode GetMode(Code* stub) {
|
||||
@ -197,13 +197,13 @@ class RecordWriteStub: public PlatformCodeStub {
|
||||
return INCREMENTAL;
|
||||
}
|
||||
|
||||
ASSERT(Assembler::IsTstImmediate(first_instruction));
|
||||
DCHECK(Assembler::IsTstImmediate(first_instruction));
|
||||
|
||||
if (Assembler::IsBranch(second_instruction)) {
|
||||
return INCREMENTAL_COMPACTION;
|
||||
}
|
||||
|
||||
ASSERT(Assembler::IsTstImmediate(second_instruction));
|
||||
DCHECK(Assembler::IsTstImmediate(second_instruction));
|
||||
|
||||
return STORE_BUFFER_ONLY;
|
||||
}
|
||||
@ -214,21 +214,21 @@ class RecordWriteStub: public PlatformCodeStub {
|
||||
stub->instruction_size());
|
||||
switch (mode) {
|
||||
case STORE_BUFFER_ONLY:
|
||||
ASSERT(GetMode(stub) == INCREMENTAL ||
|
||||
DCHECK(GetMode(stub) == INCREMENTAL ||
|
||||
GetMode(stub) == INCREMENTAL_COMPACTION);
|
||||
PatchBranchIntoNop(&masm, 0);
|
||||
PatchBranchIntoNop(&masm, Assembler::kInstrSize);
|
||||
break;
|
||||
case INCREMENTAL:
|
||||
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
|
||||
DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
|
||||
PatchNopIntoBranch(&masm, 0);
|
||||
break;
|
||||
case INCREMENTAL_COMPACTION:
|
||||
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
|
||||
DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
|
||||
PatchNopIntoBranch(&masm, Assembler::kInstrSize);
|
||||
break;
|
||||
}
|
||||
ASSERT(GetMode(stub) == mode);
|
||||
DCHECK(GetMode(stub) == mode);
|
||||
CpuFeatures::FlushICache(stub->instruction_start(),
|
||||
2 * Assembler::kInstrSize);
|
||||
}
|
||||
@ -245,12 +245,12 @@ class RecordWriteStub: public PlatformCodeStub {
|
||||
: object_(object),
|
||||
address_(address),
|
||||
scratch0_(scratch0) {
|
||||
ASSERT(!AreAliased(scratch0, object, address, no_reg));
|
||||
DCHECK(!AreAliased(scratch0, object, address, no_reg));
|
||||
scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
|
||||
}
|
||||
|
||||
void Save(MacroAssembler* masm) {
|
||||
ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
|
||||
DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
|
||||
// We don't have to save scratch0_ because it was given to us as
|
||||
// a scratch register.
|
||||
masm->push(scratch1_);
|
||||
|
@ -65,7 +65,7 @@ UnaryMathFunction CreateExpFunction() {
|
||||
|
||||
CodeDesc desc;
|
||||
masm.GetCode(&desc);
|
||||
ASSERT(!RelocInfo::RequiresRelocation(desc));
|
||||
DCHECK(!RelocInfo::RequiresRelocation(desc));
|
||||
|
||||
CpuFeatures::FlushICache(buffer, actual_size);
|
||||
base::OS::ProtectCode(buffer, actual_size);
|
||||
@ -225,7 +225,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
|
||||
|
||||
CodeDesc desc;
|
||||
masm.GetCode(&desc);
|
||||
ASSERT(!RelocInfo::RequiresRelocation(desc));
|
||||
DCHECK(!RelocInfo::RequiresRelocation(desc));
|
||||
|
||||
CpuFeatures::FlushICache(buffer, actual_size);
|
||||
base::OS::ProtectCode(buffer, actual_size);
|
||||
@ -340,7 +340,7 @@ UnaryMathFunction CreateSqrtFunction() {
|
||||
|
||||
CodeDesc desc;
|
||||
masm.GetCode(&desc);
|
||||
ASSERT(!RelocInfo::RequiresRelocation(desc));
|
||||
DCHECK(!RelocInfo::RequiresRelocation(desc));
|
||||
|
||||
CpuFeatures::FlushICache(buffer, actual_size);
|
||||
base::OS::ProtectCode(buffer, actual_size);
|
||||
@ -356,14 +356,14 @@ UnaryMathFunction CreateSqrtFunction() {
|
||||
|
||||
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
|
||||
masm->EnterFrame(StackFrame::INTERNAL);
|
||||
ASSERT(!masm->has_frame());
|
||||
DCHECK(!masm->has_frame());
|
||||
masm->set_has_frame(true);
|
||||
}
|
||||
|
||||
|
||||
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
|
||||
masm->LeaveFrame(StackFrame::INTERNAL);
|
||||
ASSERT(masm->has_frame());
|
||||
DCHECK(masm->has_frame());
|
||||
masm->set_has_frame(false);
|
||||
}
|
||||
|
||||
@ -382,11 +382,11 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
|
||||
AllocationSiteMode mode,
|
||||
Label* allocation_memento_found) {
|
||||
Register scratch_elements = r4;
|
||||
ASSERT(!AreAliased(receiver, key, value, target_map,
|
||||
DCHECK(!AreAliased(receiver, key, value, target_map,
|
||||
scratch_elements));
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
ASSERT(allocation_memento_found != NULL);
|
||||
DCHECK(allocation_memento_found != NULL);
|
||||
__ JumpIfJSArrayHasAllocationMemento(
|
||||
receiver, scratch_elements, allocation_memento_found);
|
||||
}
|
||||
@ -424,7 +424,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
|
||||
Register scratch2 = r9;
|
||||
|
||||
// Verify input registers don't conflict with locals.
|
||||
ASSERT(!AreAliased(receiver, key, value, target_map,
|
||||
DCHECK(!AreAliased(receiver, key, value, target_map,
|
||||
elements, length, array, scratch2));
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
@ -562,7 +562,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
Register scratch = r9;
|
||||
|
||||
// Verify input registers don't conflict with locals.
|
||||
ASSERT(!AreAliased(receiver, key, value, target_map,
|
||||
DCHECK(!AreAliased(receiver, key, value, target_map,
|
||||
elements, array, length, scratch));
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
@ -787,16 +787,16 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
|
||||
Register temp1,
|
||||
Register temp2,
|
||||
Register temp3) {
|
||||
ASSERT(!input.is(result));
|
||||
ASSERT(!input.is(double_scratch1));
|
||||
ASSERT(!input.is(double_scratch2));
|
||||
ASSERT(!result.is(double_scratch1));
|
||||
ASSERT(!result.is(double_scratch2));
|
||||
ASSERT(!double_scratch1.is(double_scratch2));
|
||||
ASSERT(!temp1.is(temp2));
|
||||
ASSERT(!temp1.is(temp3));
|
||||
ASSERT(!temp2.is(temp3));
|
||||
ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
|
||||
DCHECK(!input.is(result));
|
||||
DCHECK(!input.is(double_scratch1));
|
||||
DCHECK(!input.is(double_scratch2));
|
||||
DCHECK(!result.is(double_scratch1));
|
||||
DCHECK(!result.is(double_scratch2));
|
||||
DCHECK(!double_scratch1.is(double_scratch2));
|
||||
DCHECK(!temp1.is(temp2));
|
||||
DCHECK(!temp1.is(temp3));
|
||||
DCHECK(!temp2.is(temp3));
|
||||
DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
|
||||
|
||||
Label zero, infinity, done;
|
||||
|
||||
@ -827,7 +827,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
|
||||
__ vmul(result, result, double_scratch2);
|
||||
__ vsub(result, result, double_scratch1);
|
||||
// Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
|
||||
ASSERT(*reinterpret_cast<double*>
|
||||
DCHECK(*reinterpret_cast<double*>
|
||||
(ExternalReference::math_exp_constants(8).address()) == 1);
|
||||
__ vmov(double_scratch2, 1);
|
||||
__ vadd(result, result, double_scratch2);
|
||||
@ -868,7 +868,7 @@ static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
|
||||
#endif
|
||||
|
||||
CodeAgingHelper::CodeAgingHelper() {
|
||||
ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength);
|
||||
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
|
||||
// Since patcher is a large object, allocate it dynamically when needed,
|
||||
// to avoid overloading the stack in stress conditions.
|
||||
// DONT_FLUSH is used because the CodeAgingHelper is initialized early in
|
||||
@ -894,7 +894,7 @@ bool CodeAgingHelper::IsOld(byte* candidate) const {
|
||||
|
||||
bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
|
||||
bool result = isolate->code_aging_helper()->IsYoung(sequence);
|
||||
ASSERT(result || isolate->code_aging_helper()->IsOld(sequence));
|
||||
DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -81,7 +81,7 @@ const char* VFPRegisters::names_[kNumVFPRegisters] = {
|
||||
|
||||
|
||||
const char* VFPRegisters::Name(int reg, bool is_double) {
|
||||
ASSERT((0 <= reg) && (reg < kNumVFPRegisters));
|
||||
DCHECK((0 <= reg) && (reg < kNumVFPRegisters));
|
||||
return names_[reg + (is_double ? kNumVFPSingleRegisters : 0)];
|
||||
}
|
||||
|
||||
|
@ -19,11 +19,11 @@ const int kConstantPoolMarkerMask = 0xfff000f0;
|
||||
const int kConstantPoolMarker = 0xe7f000f0;
|
||||
const int kConstantPoolLengthMaxMask = 0xffff;
|
||||
inline int EncodeConstantPoolLength(int length) {
|
||||
ASSERT((length & kConstantPoolLengthMaxMask) == length);
|
||||
DCHECK((length & kConstantPoolLengthMaxMask) == length);
|
||||
return ((length & 0xfff0) << 4) | (length & 0xf);
|
||||
}
|
||||
inline int DecodeConstantPoolLength(int instr) {
|
||||
ASSERT((instr & kConstantPoolMarkerMask) == kConstantPoolMarker);
|
||||
DCHECK((instr & kConstantPoolMarkerMask) == kConstantPoolMarker);
|
||||
return ((instr >> 4) & 0xfff0) | (instr & 0xf);
|
||||
}
|
||||
|
||||
@ -84,7 +84,7 @@ enum Condition {
|
||||
|
||||
|
||||
inline Condition NegateCondition(Condition cond) {
|
||||
ASSERT(cond != al);
|
||||
DCHECK(cond != al);
|
||||
return static_cast<Condition>(cond ^ ne);
|
||||
}
|
||||
|
||||
|
@ -47,20 +47,20 @@ void BreakLocationIterator::ClearDebugBreakAtReturn() {
|
||||
// A debug break in the frame exit code is identified by the JS frame exit code
|
||||
// having been patched with a call instruction.
|
||||
bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
|
||||
ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
|
||||
DCHECK(RelocInfo::IsJSReturn(rinfo->rmode()));
|
||||
return rinfo->IsPatchedReturnSequence();
|
||||
}
|
||||
|
||||
|
||||
bool BreakLocationIterator::IsDebugBreakAtSlot() {
|
||||
ASSERT(IsDebugBreakSlot());
|
||||
DCHECK(IsDebugBreakSlot());
|
||||
// Check whether the debug break slot instructions have been patched.
|
||||
return rinfo()->IsPatchedDebugBreakSlotSequence();
|
||||
}
|
||||
|
||||
|
||||
void BreakLocationIterator::SetDebugBreakAtSlot() {
|
||||
ASSERT(IsDebugBreakSlot());
|
||||
DCHECK(IsDebugBreakSlot());
|
||||
// Patch the code changing the debug break slot code from
|
||||
// mov r2, r2
|
||||
// mov r2, r2
|
||||
@ -78,7 +78,7 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
|
||||
|
||||
|
||||
void BreakLocationIterator::ClearDebugBreakAtSlot() {
|
||||
ASSERT(IsDebugBreakSlot());
|
||||
DCHECK(IsDebugBreakSlot());
|
||||
rinfo()->PatchCode(original_rinfo()->pc(),
|
||||
Assembler::kDebugBreakSlotInstructions);
|
||||
}
|
||||
@ -104,9 +104,9 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
|
||||
// Store the registers containing live values on the expression stack to
|
||||
// make sure that these are correctly updated during GC. Non object values
|
||||
// are stored as a smi causing it to be untouched by GC.
|
||||
ASSERT((object_regs & ~kJSCallerSaved) == 0);
|
||||
ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
|
||||
ASSERT((object_regs & non_object_regs) == 0);
|
||||
DCHECK((object_regs & ~kJSCallerSaved) == 0);
|
||||
DCHECK((non_object_regs & ~kJSCallerSaved) == 0);
|
||||
DCHECK((object_regs & non_object_regs) == 0);
|
||||
if ((object_regs | non_object_regs) != 0) {
|
||||
for (int i = 0; i < kNumJSCallerSaved; i++) {
|
||||
int r = JSCallerSavedCode(i);
|
||||
@ -267,7 +267,7 @@ void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
|
||||
for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
|
||||
__ nop(MacroAssembler::DEBUG_BREAK_NOP);
|
||||
}
|
||||
ASSERT_EQ(Assembler::kDebugBreakSlotInstructions,
|
||||
DCHECK_EQ(Assembler::kDebugBreakSlotInstructions,
|
||||
masm->InstructionsGeneratedSince(&check_codesize));
|
||||
}
|
||||
|
||||
|
@ -65,13 +65,13 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
|
||||
deopt_entry,
|
||||
RelocInfo::NONE32);
|
||||
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
|
||||
ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
|
||||
ASSERT(call_size_in_bytes <= patch_size());
|
||||
DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0);
|
||||
DCHECK(call_size_in_bytes <= patch_size());
|
||||
CodePatcher patcher(call_address, call_size_in_words);
|
||||
patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
|
||||
ASSERT(prev_call_address == NULL ||
|
||||
DCHECK(prev_call_address == NULL ||
|
||||
call_address >= prev_call_address + patch_size());
|
||||
ASSERT(call_address + patch_size() <= code->instruction_end());
|
||||
DCHECK(call_address + patch_size() <= code->instruction_end());
|
||||
#ifdef DEBUG
|
||||
prev_call_address = call_address;
|
||||
#endif
|
||||
@ -142,8 +142,8 @@ void Deoptimizer::EntryGenerator::Generate() {
|
||||
kDoubleSize * DwVfpRegister::kMaxNumAllocatableRegisters;
|
||||
|
||||
// Save all allocatable VFP registers before messing with them.
|
||||
ASSERT(kDoubleRegZero.code() == 14);
|
||||
ASSERT(kScratchDoubleReg.code() == 15);
|
||||
DCHECK(kDoubleRegZero.code() == 14);
|
||||
DCHECK(kScratchDoubleReg.code() == 15);
|
||||
|
||||
// Check CPU flags for number of registers, setting the Z condition flag.
|
||||
__ CheckFor32DRegs(ip);
|
||||
@ -194,7 +194,7 @@ void Deoptimizer::EntryGenerator::Generate() {
|
||||
__ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
|
||||
|
||||
// Copy core registers into FrameDescription::registers_[kNumRegisters].
|
||||
ASSERT(Register::kNumRegisters == kNumberOfRegisters);
|
||||
DCHECK(Register::kNumRegisters == kNumberOfRegisters);
|
||||
for (int i = 0; i < kNumberOfRegisters; i++) {
|
||||
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
|
||||
__ ldr(r2, MemOperand(sp, i * kPointerSize));
|
||||
@ -326,7 +326,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
|
||||
USE(start);
|
||||
__ mov(ip, Operand(i));
|
||||
__ b(&done);
|
||||
ASSERT(masm()->pc_offset() - start == table_entry_size_);
|
||||
DCHECK(masm()->pc_offset() - start == table_entry_size_);
|
||||
}
|
||||
__ bind(&done);
|
||||
__ push(ip);
|
||||
@ -344,7 +344,7 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
|
||||
|
||||
|
||||
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
|
||||
ASSERT(FLAG_enable_ool_constant_pool);
|
||||
DCHECK(FLAG_enable_ool_constant_pool);
|
||||
SetFrameSlot(offset, value);
|
||||
}
|
||||
|
||||
|
@ -299,7 +299,7 @@ void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes svc) {
|
||||
// Handle all register based formatting in this function to reduce the
|
||||
// complexity of FormatOption.
|
||||
int Decoder::FormatRegister(Instruction* instr, const char* format) {
|
||||
ASSERT(format[0] == 'r');
|
||||
DCHECK(format[0] == 'r');
|
||||
if (format[1] == 'n') { // 'rn: Rn register
|
||||
int reg = instr->RnValue();
|
||||
PrintRegister(reg);
|
||||
@ -322,7 +322,7 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) {
|
||||
return 2;
|
||||
} else if (format[1] == 'l') {
|
||||
// 'rlist: register list for load and store multiple instructions
|
||||
ASSERT(STRING_STARTS_WITH(format, "rlist"));
|
||||
DCHECK(STRING_STARTS_WITH(format, "rlist"));
|
||||
int rlist = instr->RlistValue();
|
||||
int reg = 0;
|
||||
Print("{");
|
||||
@ -348,7 +348,7 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) {
|
||||
// Handle all VFP register based formatting in this function to reduce the
|
||||
// complexity of FormatOption.
|
||||
int Decoder::FormatVFPRegister(Instruction* instr, const char* format) {
|
||||
ASSERT((format[0] == 'S') || (format[0] == 'D'));
|
||||
DCHECK((format[0] == 'S') || (format[0] == 'D'));
|
||||
|
||||
VFPRegPrecision precision =
|
||||
format[0] == 'D' ? kDoublePrecision : kSinglePrecision;
|
||||
@ -462,7 +462,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
|
||||
return 1;
|
||||
}
|
||||
case 'c': { // 'cond: conditional execution
|
||||
ASSERT(STRING_STARTS_WITH(format, "cond"));
|
||||
DCHECK(STRING_STARTS_WITH(format, "cond"));
|
||||
PrintCondition(instr);
|
||||
return 4;
|
||||
}
|
||||
@ -478,9 +478,9 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
|
||||
// BFC/BFI:
|
||||
// Bits 20-16 represent most-significant bit. Covert to width.
|
||||
width -= lsbit;
|
||||
ASSERT(width > 0);
|
||||
DCHECK(width > 0);
|
||||
}
|
||||
ASSERT((width + lsbit) <= 32);
|
||||
DCHECK((width + lsbit) <= 32);
|
||||
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
|
||||
"#%d, #%d", lsbit, width);
|
||||
return 1;
|
||||
@ -498,9 +498,9 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
|
||||
int width = (format[3] - '0') * 10 + (format[4] - '0');
|
||||
int lsb = (format[6] - '0') * 10 + (format[7] - '0');
|
||||
|
||||
ASSERT((width >= 1) && (width <= 32));
|
||||
ASSERT((lsb >= 0) && (lsb <= 31));
|
||||
ASSERT((width + lsb) <= 32);
|
||||
DCHECK((width >= 1) && (width <= 32));
|
||||
DCHECK((lsb >= 0) && (lsb <= 31));
|
||||
DCHECK((width + lsb) <= 32);
|
||||
|
||||
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
|
||||
"%d",
|
||||
@ -520,7 +520,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
|
||||
return 2;
|
||||
}
|
||||
if (format[1] == 'e') { // 'memop: load/store instructions.
|
||||
ASSERT(STRING_STARTS_WITH(format, "memop"));
|
||||
DCHECK(STRING_STARTS_WITH(format, "memop"));
|
||||
if (instr->HasL()) {
|
||||
Print("ldr");
|
||||
} else {
|
||||
@ -538,7 +538,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
|
||||
return 5;
|
||||
}
|
||||
// 'msg: for simulator break instructions
|
||||
ASSERT(STRING_STARTS_WITH(format, "msg"));
|
||||
DCHECK(STRING_STARTS_WITH(format, "msg"));
|
||||
byte* str =
|
||||
reinterpret_cast<byte*>(instr->InstructionBits() & 0x0fffffff);
|
||||
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
|
||||
@ -548,13 +548,13 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
|
||||
case 'o': {
|
||||
if ((format[3] == '1') && (format[4] == '2')) {
|
||||
// 'off12: 12-bit offset for load and store instructions
|
||||
ASSERT(STRING_STARTS_WITH(format, "off12"));
|
||||
DCHECK(STRING_STARTS_WITH(format, "off12"));
|
||||
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
|
||||
"%d", instr->Offset12Value());
|
||||
return 5;
|
||||
} else if (format[3] == '0') {
|
||||
// 'off0to3and8to19 16-bit immediate encoded in bits 19-8 and 3-0.
|
||||
ASSERT(STRING_STARTS_WITH(format, "off0to3and8to19"));
|
||||
DCHECK(STRING_STARTS_WITH(format, "off0to3and8to19"));
|
||||
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
|
||||
"%d",
|
||||
(instr->Bits(19, 8) << 4) +
|
||||
@ -562,13 +562,13 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
|
||||
return 15;
|
||||
}
|
||||
// 'off8: 8-bit offset for extra load and store instructions
|
||||
ASSERT(STRING_STARTS_WITH(format, "off8"));
|
||||
DCHECK(STRING_STARTS_WITH(format, "off8"));
|
||||
int offs8 = (instr->ImmedHValue() << 4) | instr->ImmedLValue();
|
||||
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", offs8);
|
||||
return 4;
|
||||
}
|
||||
case 'p': { // 'pu: P and U bits for load and store instructions
|
||||
ASSERT(STRING_STARTS_WITH(format, "pu"));
|
||||
DCHECK(STRING_STARTS_WITH(format, "pu"));
|
||||
PrintPU(instr);
|
||||
return 2;
|
||||
}
|
||||
@ -578,29 +578,29 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
|
||||
case 's': {
|
||||
if (format[1] == 'h') { // 'shift_op or 'shift_rm or 'shift_sat.
|
||||
if (format[6] == 'o') { // 'shift_op
|
||||
ASSERT(STRING_STARTS_WITH(format, "shift_op"));
|
||||
DCHECK(STRING_STARTS_WITH(format, "shift_op"));
|
||||
if (instr->TypeValue() == 0) {
|
||||
PrintShiftRm(instr);
|
||||
} else {
|
||||
ASSERT(instr->TypeValue() == 1);
|
||||
DCHECK(instr->TypeValue() == 1);
|
||||
PrintShiftImm(instr);
|
||||
}
|
||||
return 8;
|
||||
} else if (format[6] == 's') { // 'shift_sat.
|
||||
ASSERT(STRING_STARTS_WITH(format, "shift_sat"));
|
||||
DCHECK(STRING_STARTS_WITH(format, "shift_sat"));
|
||||
PrintShiftSat(instr);
|
||||
return 9;
|
||||
} else { // 'shift_rm
|
||||
ASSERT(STRING_STARTS_WITH(format, "shift_rm"));
|
||||
DCHECK(STRING_STARTS_WITH(format, "shift_rm"));
|
||||
PrintShiftRm(instr);
|
||||
return 8;
|
||||
}
|
||||
} else if (format[1] == 'v') { // 'svc
|
||||
ASSERT(STRING_STARTS_WITH(format, "svc"));
|
||||
DCHECK(STRING_STARTS_WITH(format, "svc"));
|
||||
PrintSoftwareInterrupt(instr->SvcValue());
|
||||
return 3;
|
||||
} else if (format[1] == 'i') { // 'sign: signed extra loads and stores
|
||||
ASSERT(STRING_STARTS_WITH(format, "sign"));
|
||||
DCHECK(STRING_STARTS_WITH(format, "sign"));
|
||||
if (instr->HasSign()) {
|
||||
Print("s");
|
||||
}
|
||||
@ -613,7 +613,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
|
||||
return 1;
|
||||
}
|
||||
case 't': { // 'target: target of branch instructions
|
||||
ASSERT(STRING_STARTS_WITH(format, "target"));
|
||||
DCHECK(STRING_STARTS_WITH(format, "target"));
|
||||
int off = (instr->SImmed24Value() << 2) + 8;
|
||||
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
|
||||
"%+d -> %s",
|
||||
|
@ -21,7 +21,7 @@ namespace internal {
|
||||
Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
|
||||
Register JavaScriptFrame::context_register() { return cp; }
|
||||
Register JavaScriptFrame::constant_pool_pointer_register() {
|
||||
ASSERT(FLAG_enable_ool_constant_pool);
|
||||
DCHECK(FLAG_enable_ool_constant_pool);
|
||||
return pp;
|
||||
}
|
||||
|
||||
@ -29,13 +29,13 @@ Register JavaScriptFrame::constant_pool_pointer_register() {
|
||||
Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
|
||||
Register StubFailureTrampolineFrame::context_register() { return cp; }
|
||||
Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
|
||||
ASSERT(FLAG_enable_ool_constant_pool);
|
||||
DCHECK(FLAG_enable_ool_constant_pool);
|
||||
return pp;
|
||||
}
|
||||
|
||||
|
||||
Object*& ExitFrame::constant_pool_slot() const {
|
||||
ASSERT(FLAG_enable_ool_constant_pool);
|
||||
DCHECK(FLAG_enable_ool_constant_pool);
|
||||
const int offset = ExitFrameConstants::kConstantPoolOffset;
|
||||
return Memory::Object_at(fp() + offset);
|
||||
}
|
||||
|
@ -40,13 +40,13 @@ class JumpPatchSite BASE_EMBEDDED {
|
||||
}
|
||||
|
||||
~JumpPatchSite() {
|
||||
ASSERT(patch_site_.is_bound() == info_emitted_);
|
||||
DCHECK(patch_site_.is_bound() == info_emitted_);
|
||||
}
|
||||
|
||||
// When initially emitting this ensure that a jump is always generated to skip
|
||||
// the inlined smi code.
|
||||
void EmitJumpIfNotSmi(Register reg, Label* target) {
|
||||
ASSERT(!patch_site_.is_bound() && !info_emitted_);
|
||||
DCHECK(!patch_site_.is_bound() && !info_emitted_);
|
||||
Assembler::BlockConstPoolScope block_const_pool(masm_);
|
||||
__ bind(&patch_site_);
|
||||
__ cmp(reg, Operand(reg));
|
||||
@ -56,7 +56,7 @@ class JumpPatchSite BASE_EMBEDDED {
|
||||
// When initially emitting this ensure that a jump is never generated to skip
|
||||
// the inlined smi code.
|
||||
void EmitJumpIfSmi(Register reg, Label* target) {
|
||||
ASSERT(!patch_site_.is_bound() && !info_emitted_);
|
||||
DCHECK(!patch_site_.is_bound() && !info_emitted_);
|
||||
Assembler::BlockConstPoolScope block_const_pool(masm_);
|
||||
__ bind(&patch_site_);
|
||||
__ cmp(reg, Operand(reg));
|
||||
@ -152,7 +152,7 @@ void FullCodeGenerator::Generate() {
|
||||
{ Comment cmnt(masm_, "[ Allocate locals");
|
||||
int locals_count = info->scope()->num_stack_slots();
|
||||
// Generators allocate locals, if any, in context slots.
|
||||
ASSERT(!info->function()->is_generator() || locals_count == 0);
|
||||
DCHECK(!info->function()->is_generator() || locals_count == 0);
|
||||
if (locals_count > 0) {
|
||||
if (locals_count >= 128) {
|
||||
Label ok;
|
||||
@ -292,9 +292,9 @@ void FullCodeGenerator::Generate() {
|
||||
// constant.
|
||||
if (scope()->is_function_scope() && scope()->function() != NULL) {
|
||||
VariableDeclaration* function = scope()->function();
|
||||
ASSERT(function->proxy()->var()->mode() == CONST ||
|
||||
DCHECK(function->proxy()->var()->mode() == CONST ||
|
||||
function->proxy()->var()->mode() == CONST_LEGACY);
|
||||
ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
|
||||
DCHECK(function->proxy()->var()->location() != Variable::UNALLOCATED);
|
||||
VisitVariableDeclaration(function);
|
||||
}
|
||||
VisitDeclarations(scope()->declarations());
|
||||
@ -314,9 +314,9 @@ void FullCodeGenerator::Generate() {
|
||||
}
|
||||
|
||||
{ Comment cmnt(masm_, "[ Body");
|
||||
ASSERT(loop_depth() == 0);
|
||||
DCHECK(loop_depth() == 0);
|
||||
VisitStatements(function()->body());
|
||||
ASSERT(loop_depth() == 0);
|
||||
DCHECK(loop_depth() == 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -363,7 +363,7 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
|
||||
__ mov(r2, Operand(profiling_counter_));
|
||||
// The mov instruction above can be either 1, 2 or 3 instructions depending
|
||||
// upon whether it is an extended constant pool - insert nop to compensate.
|
||||
ASSERT(masm_->InstructionsGeneratedSince(&start) <= 3);
|
||||
DCHECK(masm_->InstructionsGeneratedSince(&start) <= 3);
|
||||
while (masm_->InstructionsGeneratedSince(&start) != 3) {
|
||||
__ nop();
|
||||
}
|
||||
@ -379,7 +379,7 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
|
||||
Assembler::BlockConstPoolScope block_const_pool(masm_);
|
||||
Label ok;
|
||||
|
||||
ASSERT(back_edge_target->is_bound());
|
||||
DCHECK(back_edge_target->is_bound());
|
||||
int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
|
||||
int weight = Min(kMaxBackEdgeWeight,
|
||||
Max(1, distance / kCodeSizeMultiplier));
|
||||
@ -456,7 +456,7 @@ void FullCodeGenerator::EmitReturnSequence() {
|
||||
#ifdef DEBUG
|
||||
// Check that the size of the code used for returning is large enough
|
||||
// for the debugger's requirements.
|
||||
ASSERT(Assembler::kJSReturnSequenceInstructions <=
|
||||
DCHECK(Assembler::kJSReturnSequenceInstructions <=
|
||||
masm_->InstructionsGeneratedSince(&check_exit_codesize));
|
||||
#endif
|
||||
}
|
||||
@ -464,25 +464,25 @@ void FullCodeGenerator::EmitReturnSequence() {
|
||||
|
||||
|
||||
void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
|
||||
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
|
||||
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
|
||||
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
|
||||
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
|
||||
codegen()->GetVar(result_register(), var);
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
|
||||
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
|
||||
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
|
||||
codegen()->GetVar(result_register(), var);
|
||||
__ push(result_register());
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::TestContext::Plug(Variable* var) const {
|
||||
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
|
||||
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
|
||||
// For simplicity we always test the accumulator register.
|
||||
codegen()->GetVar(result_register(), var);
|
||||
codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
|
||||
@ -547,7 +547,7 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
|
||||
true,
|
||||
true_label_,
|
||||
false_label_);
|
||||
ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals.
|
||||
DCHECK(!lit->IsUndetectableObject()); // There are no undetectable literals.
|
||||
if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
|
||||
if (false_label_ != fall_through_) __ b(false_label_);
|
||||
} else if (lit->IsTrue() || lit->IsJSObject()) {
|
||||
@ -574,7 +574,7 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
|
||||
|
||||
void FullCodeGenerator::EffectContext::DropAndPlug(int count,
|
||||
Register reg) const {
|
||||
ASSERT(count > 0);
|
||||
DCHECK(count > 0);
|
||||
__ Drop(count);
|
||||
}
|
||||
|
||||
@ -582,7 +582,7 @@ void FullCodeGenerator::EffectContext::DropAndPlug(int count,
|
||||
void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
|
||||
int count,
|
||||
Register reg) const {
|
||||
ASSERT(count > 0);
|
||||
DCHECK(count > 0);
|
||||
__ Drop(count);
|
||||
__ Move(result_register(), reg);
|
||||
}
|
||||
@ -590,7 +590,7 @@ void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
|
||||
|
||||
void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
|
||||
Register reg) const {
|
||||
ASSERT(count > 0);
|
||||
DCHECK(count > 0);
|
||||
if (count > 1) __ Drop(count - 1);
|
||||
__ str(reg, MemOperand(sp, 0));
|
||||
}
|
||||
@ -598,7 +598,7 @@ void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
|
||||
|
||||
void FullCodeGenerator::TestContext::DropAndPlug(int count,
|
||||
Register reg) const {
|
||||
ASSERT(count > 0);
|
||||
DCHECK(count > 0);
|
||||
// For simplicity we always test the accumulator register.
|
||||
__ Drop(count);
|
||||
__ Move(result_register(), reg);
|
||||
@ -609,7 +609,7 @@ void FullCodeGenerator::TestContext::DropAndPlug(int count,
|
||||
|
||||
void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
|
||||
Label* materialize_false) const {
|
||||
ASSERT(materialize_true == materialize_false);
|
||||
DCHECK(materialize_true == materialize_false);
|
||||
__ bind(materialize_true);
|
||||
}
|
||||
|
||||
@ -643,8 +643,8 @@ void FullCodeGenerator::StackValueContext::Plug(
|
||||
|
||||
void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
|
||||
Label* materialize_false) const {
|
||||
ASSERT(materialize_true == true_label_);
|
||||
ASSERT(materialize_false == false_label_);
|
||||
DCHECK(materialize_true == true_label_);
|
||||
DCHECK(materialize_false == false_label_);
|
||||
}
|
||||
|
||||
|
||||
@ -707,7 +707,7 @@ void FullCodeGenerator::Split(Condition cond,
|
||||
|
||||
|
||||
MemOperand FullCodeGenerator::StackOperand(Variable* var) {
|
||||
ASSERT(var->IsStackAllocated());
|
||||
DCHECK(var->IsStackAllocated());
|
||||
// Offset is negative because higher indexes are at lower addresses.
|
||||
int offset = -var->index() * kPointerSize;
|
||||
// Adjust by a (parameter or local) base offset.
|
||||
@ -721,7 +721,7 @@ MemOperand FullCodeGenerator::StackOperand(Variable* var) {
|
||||
|
||||
|
||||
MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
|
||||
ASSERT(var->IsContextSlot() || var->IsStackAllocated());
|
||||
DCHECK(var->IsContextSlot() || var->IsStackAllocated());
|
||||
if (var->IsContextSlot()) {
|
||||
int context_chain_length = scope()->ContextChainLength(var->scope());
|
||||
__ LoadContext(scratch, context_chain_length);
|
||||
@ -743,10 +743,10 @@ void FullCodeGenerator::SetVar(Variable* var,
|
||||
Register src,
|
||||
Register scratch0,
|
||||
Register scratch1) {
|
||||
ASSERT(var->IsContextSlot() || var->IsStackAllocated());
|
||||
ASSERT(!scratch0.is(src));
|
||||
ASSERT(!scratch0.is(scratch1));
|
||||
ASSERT(!scratch1.is(src));
|
||||
DCHECK(var->IsContextSlot() || var->IsStackAllocated());
|
||||
DCHECK(!scratch0.is(src));
|
||||
DCHECK(!scratch0.is(scratch1));
|
||||
DCHECK(!scratch1.is(src));
|
||||
MemOperand location = VarOperand(var, scratch0);
|
||||
__ str(src, location);
|
||||
|
||||
@ -786,7 +786,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
|
||||
void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
|
||||
// The variable in the declaration always resides in the current function
|
||||
// context.
|
||||
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
|
||||
DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
|
||||
if (generate_debug_code_) {
|
||||
// Check that we're not inside a with or catch context.
|
||||
__ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset));
|
||||
@ -840,7 +840,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
|
||||
Comment cmnt(masm_, "[ VariableDeclaration");
|
||||
__ mov(r2, Operand(variable->name()));
|
||||
// Declaration nodes are always introduced in one of four modes.
|
||||
ASSERT(IsDeclaredVariableMode(mode));
|
||||
DCHECK(IsDeclaredVariableMode(mode));
|
||||
PropertyAttributes attr =
|
||||
IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
|
||||
__ mov(r1, Operand(Smi::FromInt(attr)));
|
||||
@ -920,8 +920,8 @@ void FullCodeGenerator::VisitFunctionDeclaration(
|
||||
|
||||
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
|
||||
Variable* variable = declaration->proxy()->var();
|
||||
ASSERT(variable->location() == Variable::CONTEXT);
|
||||
ASSERT(variable->interface()->IsFrozen());
|
||||
DCHECK(variable->location() == Variable::CONTEXT);
|
||||
DCHECK(variable->interface()->IsFrozen());
|
||||
|
||||
Comment cmnt(masm_, "[ ModuleDeclaration");
|
||||
EmitDebugCheckDeclarationContext(variable);
|
||||
@ -1410,7 +1410,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
|
||||
|
||||
MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
|
||||
Label* slow) {
|
||||
ASSERT(var->IsContextSlot());
|
||||
DCHECK(var->IsContextSlot());
|
||||
Register context = cp;
|
||||
Register next = r3;
|
||||
Register temp = r4;
|
||||
@ -1505,7 +1505,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
|
||||
// always looked up dynamically, i.e. in that case
|
||||
// var->location() == LOOKUP.
|
||||
// always holds.
|
||||
ASSERT(var->scope() != NULL);
|
||||
DCHECK(var->scope() != NULL);
|
||||
|
||||
// Check if the binding really needs an initialization check. The check
|
||||
// can be skipped in the following situation: we have a LET or CONST
|
||||
@ -1528,8 +1528,8 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
|
||||
skip_init_check = false;
|
||||
} else {
|
||||
// Check that we always have valid source position.
|
||||
ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
|
||||
ASSERT(proxy->position() != RelocInfo::kNoPosition);
|
||||
DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
|
||||
DCHECK(proxy->position() != RelocInfo::kNoPosition);
|
||||
skip_init_check = var->mode() != CONST_LEGACY &&
|
||||
var->initializer_position() < proxy->position();
|
||||
}
|
||||
@ -1549,7 +1549,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
|
||||
__ bind(&done);
|
||||
} else {
|
||||
// Uninitalized const bindings outside of harmony mode are unholed.
|
||||
ASSERT(var->mode() == CONST_LEGACY);
|
||||
DCHECK(var->mode() == CONST_LEGACY);
|
||||
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
|
||||
}
|
||||
context()->Plug(r0);
|
||||
@ -1688,13 +1688,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
|
||||
case ObjectLiteral::Property::CONSTANT:
|
||||
UNREACHABLE();
|
||||
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
|
||||
ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
|
||||
DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value()));
|
||||
// Fall through.
|
||||
case ObjectLiteral::Property::COMPUTED:
|
||||
if (key->value()->IsInternalizedString()) {
|
||||
if (property->emit_store()) {
|
||||
VisitForAccumulatorValue(value);
|
||||
ASSERT(StoreIC::ValueRegister().is(r0));
|
||||
DCHECK(StoreIC::ValueRegister().is(r0));
|
||||
__ mov(StoreIC::NameRegister(), Operand(key->value()));
|
||||
__ ldr(StoreIC::ReceiverRegister(), MemOperand(sp));
|
||||
CallStoreIC(key->LiteralFeedbackId());
|
||||
@ -1754,7 +1754,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
|
||||
}
|
||||
|
||||
if (expr->has_function()) {
|
||||
ASSERT(result_saved);
|
||||
DCHECK(result_saved);
|
||||
__ ldr(r0, MemOperand(sp));
|
||||
__ push(r0);
|
||||
__ CallRuntime(Runtime::kToFastProperties, 1);
|
||||
@ -1779,7 +1779,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
|
||||
ZoneList<Expression*>* subexprs = expr->values();
|
||||
int length = subexprs->length();
|
||||
Handle<FixedArray> constant_elements = expr->constant_elements();
|
||||
ASSERT_EQ(2, constant_elements->length());
|
||||
DCHECK_EQ(2, constant_elements->length());
|
||||
ElementsKind constant_elements_kind =
|
||||
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
|
||||
bool has_fast_elements = IsFastObjectElementsKind(constant_elements_kind);
|
||||
@ -1851,7 +1851,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
|
||||
|
||||
|
||||
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
|
||||
ASSERT(expr->target()->IsValidReferenceExpression());
|
||||
DCHECK(expr->target()->IsValidReferenceExpression());
|
||||
|
||||
Comment cmnt(masm_, "[ Assignment");
|
||||
|
||||
@ -1981,7 +1981,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
|
||||
|
||||
__ bind(&suspend);
|
||||
VisitForAccumulatorValue(expr->generator_object());
|
||||
ASSERT(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
|
||||
DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
|
||||
__ mov(r1, Operand(Smi::FromInt(continuation.pos())));
|
||||
__ str(r1, FieldMemOperand(r0, JSGeneratorObject::kContinuationOffset));
|
||||
__ str(cp, FieldMemOperand(r0, JSGeneratorObject::kContextOffset));
|
||||
@ -2054,7 +2054,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
|
||||
const int generator_object_depth = kPointerSize + handler_size;
|
||||
__ ldr(r0, MemOperand(sp, generator_object_depth));
|
||||
__ push(r0); // g
|
||||
ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
|
||||
DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
|
||||
__ mov(r1, Operand(Smi::FromInt(l_continuation.pos())));
|
||||
__ str(r1, FieldMemOperand(r0, JSGeneratorObject::kContinuationOffset));
|
||||
__ str(cp, FieldMemOperand(r0, JSGeneratorObject::kContextOffset));
|
||||
@ -2219,7 +2219,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
|
||||
__ push(r2);
|
||||
__ b(&push_operand_holes);
|
||||
__ bind(&call_resume);
|
||||
ASSERT(!result_register().is(r1));
|
||||
DCHECK(!result_register().is(r1));
|
||||
__ Push(r1, result_register());
|
||||
__ Push(Smi::FromInt(resume_mode));
|
||||
__ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
|
||||
@ -2271,7 +2271,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
|
||||
__ pop(r2);
|
||||
__ mov(r3, Operand(isolate()->factory()->ToBoolean(done)));
|
||||
__ mov(r4, Operand(isolate()->factory()->empty_fixed_array()));
|
||||
ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
|
||||
DCHECK_EQ(map->instance_size(), 5 * kPointerSize);
|
||||
__ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
|
||||
__ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
|
||||
__ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
|
||||
@ -2421,7 +2421,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
|
||||
|
||||
|
||||
void FullCodeGenerator::EmitAssignment(Expression* expr) {
|
||||
ASSERT(expr->IsValidReferenceExpression());
|
||||
DCHECK(expr->IsValidReferenceExpression());
|
||||
|
||||
// Left-hand side can only be a property, a global or a (parameter or local)
|
||||
// slot.
|
||||
@ -2490,14 +2490,14 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
|
||||
|
||||
} else if (op == Token::INIT_CONST_LEGACY) {
|
||||
// Const initializers need a write barrier.
|
||||
ASSERT(!var->IsParameter()); // No const parameters.
|
||||
DCHECK(!var->IsParameter()); // No const parameters.
|
||||
if (var->IsLookupSlot()) {
|
||||
__ push(r0);
|
||||
__ mov(r0, Operand(var->name()));
|
||||
__ Push(cp, r0); // Context and name.
|
||||
__ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
|
||||
} else {
|
||||
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
|
||||
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
|
||||
Label skip;
|
||||
MemOperand location = VarOperand(var, r1);
|
||||
__ ldr(r2, location);
|
||||
@ -2509,8 +2509,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
|
||||
|
||||
} else if (var->mode() == LET && op != Token::INIT_LET) {
|
||||
// Non-initializing assignment to let variable needs a write barrier.
|
||||
ASSERT(!var->IsLookupSlot());
|
||||
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
|
||||
DCHECK(!var->IsLookupSlot());
|
||||
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
|
||||
Label assign;
|
||||
MemOperand location = VarOperand(var, r1);
|
||||
__ ldr(r3, location);
|
||||
@ -2534,7 +2534,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
|
||||
} else {
|
||||
// Assignment to var or initializing assignment to let/const in harmony
|
||||
// mode.
|
||||
ASSERT((var->IsStackAllocated() || var->IsContextSlot()));
|
||||
DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
|
||||
MemOperand location = VarOperand(var, r1);
|
||||
if (generate_debug_code_ && op == Token::INIT_LET) {
|
||||
// Check for an uninitialized let binding.
|
||||
@ -2552,8 +2552,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
|
||||
void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
|
||||
// Assignment to a property, using a named store IC.
|
||||
Property* prop = expr->target()->AsProperty();
|
||||
ASSERT(prop != NULL);
|
||||
ASSERT(prop->key()->IsLiteral());
|
||||
DCHECK(prop != NULL);
|
||||
DCHECK(prop->key()->IsLiteral());
|
||||
|
||||
// Record source code position before IC call.
|
||||
SetSourcePosition(expr->position());
|
||||
@ -2572,7 +2572,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
|
||||
// Record source code position before IC call.
|
||||
SetSourcePosition(expr->position());
|
||||
__ Pop(KeyedStoreIC::ReceiverRegister(), KeyedStoreIC::NameRegister());
|
||||
ASSERT(KeyedStoreIC::ValueRegister().is(r0));
|
||||
DCHECK(KeyedStoreIC::ValueRegister().is(r0));
|
||||
|
||||
Handle<Code> ic = strict_mode() == SLOPPY
|
||||
? isolate()->builtins()->KeyedStoreIC_Initialize()
|
||||
@ -2634,7 +2634,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
|
||||
__ Push(isolate()->factory()->undefined_value());
|
||||
} else {
|
||||
// Load the function from the receiver.
|
||||
ASSERT(callee->IsProperty());
|
||||
DCHECK(callee->IsProperty());
|
||||
__ ldr(LoadIC::ReceiverRegister(), MemOperand(sp, 0));
|
||||
EmitNamedPropertyLoad(callee->AsProperty());
|
||||
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
|
||||
@ -2657,7 +2657,7 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
|
||||
Expression* callee = expr->expression();
|
||||
|
||||
// Load the function from the receiver.
|
||||
ASSERT(callee->IsProperty());
|
||||
DCHECK(callee->IsProperty());
|
||||
__ ldr(LoadIC::ReceiverRegister(), MemOperand(sp, 0));
|
||||
__ Move(LoadIC::NameRegister(), r0);
|
||||
EmitKeyedPropertyLoad(callee->AsProperty());
|
||||
@ -2790,7 +2790,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
|
||||
__ bind(&slow);
|
||||
// Call the runtime to find the function to call (returned in r0)
|
||||
// and the object holding it (returned in edx).
|
||||
ASSERT(!context_register().is(r2));
|
||||
DCHECK(!context_register().is(r2));
|
||||
__ mov(r2, Operand(proxy->name()));
|
||||
__ Push(context_register(), r2);
|
||||
__ CallRuntime(Runtime::kLoadLookupSlot, 2);
|
||||
@ -2826,7 +2826,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
|
||||
EmitKeyedCallWithLoadIC(expr, property->key());
|
||||
}
|
||||
} else {
|
||||
ASSERT(call_type == Call::OTHER_CALL);
|
||||
DCHECK(call_type == Call::OTHER_CALL);
|
||||
// Call to an arbitrary expression not handled specially above.
|
||||
{ PreservePositionScope scope(masm()->positions_recorder());
|
||||
VisitForStackValue(callee);
|
||||
@ -2839,7 +2839,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
|
||||
|
||||
#ifdef DEBUG
|
||||
// RecordJSReturnSite should have been called.
|
||||
ASSERT(expr->return_is_recorded_);
|
||||
DCHECK(expr->return_is_recorded_);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -2873,7 +2873,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
|
||||
// Record call targets in unoptimized code.
|
||||
if (FLAG_pretenuring_call_new) {
|
||||
EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
|
||||
ASSERT(expr->AllocationSiteFeedbackSlot() ==
|
||||
DCHECK(expr->AllocationSiteFeedbackSlot() ==
|
||||
expr->CallNewFeedbackSlot() + 1);
|
||||
}
|
||||
|
||||
@ -2889,7 +2889,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 1);
|
||||
DCHECK(args->length() == 1);
|
||||
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
|
||||
@ -2910,7 +2910,7 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 1);
|
||||
DCHECK(args->length() == 1);
|
||||
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
|
||||
@ -2931,7 +2931,7 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 1);
|
||||
DCHECK(args->length() == 1);
|
||||
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
|
||||
@ -2964,7 +2964,7 @@ void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 1);
|
||||
DCHECK(args->length() == 1);
|
||||
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
|
||||
@ -2986,7 +2986,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 1);
|
||||
DCHECK(args->length() == 1);
|
||||
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
|
||||
@ -3011,7 +3011,7 @@ void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
|
||||
void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
|
||||
CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 1);
|
||||
DCHECK(args->length() == 1);
|
||||
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
|
||||
@ -3099,7 +3099,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
|
||||
|
||||
void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 1);
|
||||
DCHECK(args->length() == 1);
|
||||
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
|
||||
@ -3121,7 +3121,7 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 1);
|
||||
DCHECK(args->length() == 1);
|
||||
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
|
||||
@ -3147,7 +3147,7 @@ void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 1);
|
||||
DCHECK(args->length() == 1);
|
||||
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
|
||||
@ -3169,7 +3169,7 @@ void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 1);
|
||||
DCHECK(args->length() == 1);
|
||||
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
|
||||
@ -3191,7 +3191,7 @@ void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
|
||||
|
||||
|
||||
void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
|
||||
ASSERT(expr->arguments()->length() == 0);
|
||||
DCHECK(expr->arguments()->length() == 0);
|
||||
|
||||
Label materialize_true, materialize_false;
|
||||
Label* if_true = NULL;
|
||||
@ -3220,7 +3220,7 @@ void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 2);
|
||||
DCHECK(args->length() == 2);
|
||||
|
||||
// Load the two objects into registers and perform the comparison.
|
||||
VisitForStackValue(args->at(0));
|
||||
@ -3244,7 +3244,7 @@ void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 1);
|
||||
DCHECK(args->length() == 1);
|
||||
|
||||
// ArgumentsAccessStub expects the key in edx and the formal
|
||||
// parameter count in r0.
|
||||
@ -3258,7 +3258,7 @@ void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
|
||||
|
||||
|
||||
void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
|
||||
ASSERT(expr->arguments()->length() == 0);
|
||||
DCHECK(expr->arguments()->length() == 0);
|
||||
|
||||
// Get the number of formal parameters.
|
||||
__ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
|
||||
@ -3278,7 +3278,7 @@ void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 1);
|
||||
DCHECK(args->length() == 1);
|
||||
Label done, null, function, non_function_constructor;
|
||||
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
@ -3341,7 +3341,7 @@ void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
|
||||
// Load the arguments on the stack and call the stub.
|
||||
SubStringStub stub(isolate());
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 3);
|
||||
DCHECK(args->length() == 3);
|
||||
VisitForStackValue(args->at(0));
|
||||
VisitForStackValue(args->at(1));
|
||||
VisitForStackValue(args->at(2));
|
||||
@ -3354,7 +3354,7 @@ void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
|
||||
// Load the arguments on the stack and call the stub.
|
||||
RegExpExecStub stub(isolate());
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 4);
|
||||
DCHECK(args->length() == 4);
|
||||
VisitForStackValue(args->at(0));
|
||||
VisitForStackValue(args->at(1));
|
||||
VisitForStackValue(args->at(2));
|
||||
@ -3366,7 +3366,7 @@ void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 1);
|
||||
DCHECK(args->length() == 1);
|
||||
VisitForAccumulatorValue(args->at(0)); // Load the object.
|
||||
|
||||
Label done;
|
||||
@ -3383,8 +3383,8 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 2);
|
||||
ASSERT_NE(NULL, args->at(1)->AsLiteral());
|
||||
DCHECK(args->length() == 2);
|
||||
DCHECK_NE(NULL, args->at(1)->AsLiteral());
|
||||
Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
|
||||
|
||||
VisitForAccumulatorValue(args->at(0)); // Load the object.
|
||||
@ -3430,7 +3430,7 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT_EQ(3, args->length());
|
||||
DCHECK_EQ(3, args->length());
|
||||
|
||||
Register string = r0;
|
||||
Register index = r1;
|
||||
@ -3463,7 +3463,7 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT_EQ(3, args->length());
|
||||
DCHECK_EQ(3, args->length());
|
||||
|
||||
Register string = r0;
|
||||
Register index = r1;
|
||||
@ -3499,7 +3499,7 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
|
||||
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
|
||||
// Load the arguments on the stack and call the runtime function.
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 2);
|
||||
DCHECK(args->length() == 2);
|
||||
VisitForStackValue(args->at(0));
|
||||
VisitForStackValue(args->at(1));
|
||||
MathPowStub stub(isolate(), MathPowStub::ON_STACK);
|
||||
@ -3510,7 +3510,7 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 2);
|
||||
DCHECK(args->length() == 2);
|
||||
VisitForStackValue(args->at(0)); // Load the object.
|
||||
VisitForAccumulatorValue(args->at(1)); // Load the value.
|
||||
__ pop(r1); // r0 = value. r1 = object.
|
||||
@ -3538,7 +3538,7 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT_EQ(args->length(), 1);
|
||||
DCHECK_EQ(args->length(), 1);
|
||||
// Load the argument into r0 and call the stub.
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
|
||||
@ -3550,7 +3550,7 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 1);
|
||||
DCHECK(args->length() == 1);
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
|
||||
Label done;
|
||||
@ -3568,7 +3568,7 @@ void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 2);
|
||||
DCHECK(args->length() == 2);
|
||||
VisitForStackValue(args->at(0));
|
||||
VisitForAccumulatorValue(args->at(1));
|
||||
|
||||
@ -3613,7 +3613,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 2);
|
||||
DCHECK(args->length() == 2);
|
||||
VisitForStackValue(args->at(0));
|
||||
VisitForAccumulatorValue(args->at(1));
|
||||
|
||||
@ -3660,7 +3660,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT_EQ(2, args->length());
|
||||
DCHECK_EQ(2, args->length());
|
||||
VisitForStackValue(args->at(0));
|
||||
VisitForAccumulatorValue(args->at(1));
|
||||
|
||||
@ -3673,7 +3673,7 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT_EQ(2, args->length());
|
||||
DCHECK_EQ(2, args->length());
|
||||
VisitForStackValue(args->at(0));
|
||||
VisitForStackValue(args->at(1));
|
||||
|
||||
@ -3685,7 +3685,7 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() >= 2);
|
||||
DCHECK(args->length() >= 2);
|
||||
|
||||
int arg_count = args->length() - 2; // 2 ~ receiver and function.
|
||||
for (int i = 0; i < arg_count + 1; i++) {
|
||||
@ -3718,7 +3718,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
|
||||
void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
|
||||
RegExpConstructResultStub stub(isolate());
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 3);
|
||||
DCHECK(args->length() == 3);
|
||||
VisitForStackValue(args->at(0));
|
||||
VisitForStackValue(args->at(1));
|
||||
VisitForAccumulatorValue(args->at(2));
|
||||
@ -3731,8 +3731,8 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT_EQ(2, args->length());
|
||||
ASSERT_NE(NULL, args->at(0)->AsLiteral());
|
||||
DCHECK_EQ(2, args->length());
|
||||
DCHECK_NE(NULL, args->at(0)->AsLiteral());
|
||||
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
|
||||
|
||||
Handle<FixedArray> jsfunction_result_caches(
|
||||
@ -3800,7 +3800,7 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 1);
|
||||
DCHECK(args->length() == 1);
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
|
||||
__ AssertString(r0);
|
||||
@ -3817,7 +3817,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
|
||||
not_size_one_array, loop, empty_separator_loop, one_char_separator_loop,
|
||||
one_char_separator_loop_entry, long_separator_loop;
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 2);
|
||||
DCHECK(args->length() == 2);
|
||||
VisitForStackValue(args->at(1));
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
|
||||
@ -3975,7 +3975,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
|
||||
__ CopyBytes(string, result_pos, string_length, scratch);
|
||||
__ cmp(element, elements_end);
|
||||
__ b(lt, &empty_separator_loop); // End while (element < elements_end).
|
||||
ASSERT(result.is(r0));
|
||||
DCHECK(result.is(r0));
|
||||
__ b(&done);
|
||||
|
||||
// One-character separator case
|
||||
@ -4007,7 +4007,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
|
||||
__ CopyBytes(string, result_pos, string_length, scratch);
|
||||
__ cmp(element, elements_end);
|
||||
__ b(lt, &one_char_separator_loop); // End while (element < elements_end).
|
||||
ASSERT(result.is(r0));
|
||||
DCHECK(result.is(r0));
|
||||
__ b(&done);
|
||||
|
||||
// Long separator case (separator is more than one character). Entry is at the
|
||||
@ -4037,7 +4037,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
|
||||
__ CopyBytes(string, result_pos, string_length, scratch);
|
||||
__ cmp(element, elements_end);
|
||||
__ b(lt, &long_separator_loop); // End while (element < elements_end).
|
||||
ASSERT(result.is(r0));
|
||||
DCHECK(result.is(r0));
|
||||
__ b(&done);
|
||||
|
||||
__ bind(&bailout);
|
||||
@ -4048,7 +4048,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
|
||||
|
||||
|
||||
void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
|
||||
ASSERT(expr->arguments()->length() == 0);
|
||||
DCHECK(expr->arguments()->length() == 0);
|
||||
ExternalReference debug_is_active =
|
||||
ExternalReference::debug_is_active_address(isolate());
|
||||
__ mov(ip, Operand(debug_is_active));
|
||||
@ -4139,7 +4139,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
|
||||
Variable* var = proxy->var();
|
||||
// Delete of an unqualified identifier is disallowed in strict mode
|
||||
// but "delete this" is allowed.
|
||||
ASSERT(strict_mode() == SLOPPY || var->is_this());
|
||||
DCHECK(strict_mode() == SLOPPY || var->is_this());
|
||||
if (var->IsUnallocated()) {
|
||||
__ ldr(r2, GlobalObjectOperand());
|
||||
__ mov(r1, Operand(var->name()));
|
||||
@ -4154,7 +4154,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
|
||||
} else {
|
||||
// Non-global variable. Call the runtime to try to delete from the
|
||||
// context where the variable was introduced.
|
||||
ASSERT(!context_register().is(r2));
|
||||
DCHECK(!context_register().is(r2));
|
||||
__ mov(r2, Operand(var->name()));
|
||||
__ Push(context_register(), r2);
|
||||
__ CallRuntime(Runtime::kDeleteLookupSlot, 2);
|
||||
@ -4195,7 +4195,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
|
||||
// for control and plugging the control flow into the context,
|
||||
// because we need to prepare a pair of extra administrative AST ids
|
||||
// for the optimizing compiler.
|
||||
ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
|
||||
DCHECK(context()->IsAccumulatorValue() || context()->IsStackValue());
|
||||
Label materialize_true, materialize_false, done;
|
||||
VisitForControl(expr->expression(),
|
||||
&materialize_false,
|
||||
@ -4232,7 +4232,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
|
||||
|
||||
|
||||
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
|
||||
ASSERT(expr->expression()->IsValidReferenceExpression());
|
||||
DCHECK(expr->expression()->IsValidReferenceExpression());
|
||||
|
||||
Comment cmnt(masm_, "[ CountOperation");
|
||||
SetSourcePosition(expr->position());
|
||||
@ -4251,7 +4251,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
|
||||
|
||||
// Evaluate expression and get value.
|
||||
if (assign_type == VARIABLE) {
|
||||
ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
|
||||
DCHECK(expr->expression()->AsVariableProxy()->var() != NULL);
|
||||
AccumulatorValueContext context(this);
|
||||
EmitVariableLoad(expr->expression()->AsVariableProxy());
|
||||
} else {
|
||||
@ -4412,8 +4412,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
|
||||
|
||||
|
||||
void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
|
||||
ASSERT(!context()->IsEffect());
|
||||
ASSERT(!context()->IsTest());
|
||||
DCHECK(!context()->IsEffect());
|
||||
DCHECK(!context()->IsTest());
|
||||
VariableProxy* proxy = expr->AsVariableProxy();
|
||||
if (proxy != NULL && proxy->var()->IsUnallocated()) {
|
||||
Comment cmnt(masm_, "[ Global variable");
|
||||
@ -4646,7 +4646,7 @@ Register FullCodeGenerator::context_register() {
|
||||
|
||||
|
||||
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
|
||||
ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
|
||||
DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
|
||||
__ str(value, MemOperand(fp, frame_offset));
|
||||
}
|
||||
|
||||
@ -4671,7 +4671,7 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
|
||||
// code. Fetch it from the context.
|
||||
__ ldr(ip, ContextOperand(cp, Context::CLOSURE_INDEX));
|
||||
} else {
|
||||
ASSERT(declaration_scope->is_function_scope());
|
||||
DCHECK(declaration_scope->is_function_scope());
|
||||
__ ldr(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
|
||||
}
|
||||
__ push(ip);
|
||||
@ -4682,7 +4682,7 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
|
||||
// Non-local control flow support.
|
||||
|
||||
void FullCodeGenerator::EnterFinallyBlock() {
|
||||
ASSERT(!result_register().is(r1));
|
||||
DCHECK(!result_register().is(r1));
|
||||
// Store result register while executing finally block.
|
||||
__ push(result_register());
|
||||
// Cook return address in link register to stack (smi encoded Code* delta)
|
||||
@ -4716,7 +4716,7 @@ void FullCodeGenerator::EnterFinallyBlock() {
|
||||
|
||||
|
||||
void FullCodeGenerator::ExitFinallyBlock() {
|
||||
ASSERT(!result_register().is(r1));
|
||||
DCHECK(!result_register().is(r1));
|
||||
// Restore pending message from stack.
|
||||
__ pop(r1);
|
||||
ExternalReference pending_message_script =
|
||||
@ -4782,20 +4782,20 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
|
||||
static Address GetInterruptImmediateLoadAddress(Address pc) {
|
||||
Address load_address = pc - 2 * Assembler::kInstrSize;
|
||||
if (!FLAG_enable_ool_constant_pool) {
|
||||
ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(load_address)));
|
||||
DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(load_address)));
|
||||
} else if (Assembler::IsLdrPpRegOffset(Memory::int32_at(load_address))) {
|
||||
// This is an extended constant pool lookup.
|
||||
load_address -= 2 * Assembler::kInstrSize;
|
||||
ASSERT(Assembler::IsMovW(Memory::int32_at(load_address)));
|
||||
ASSERT(Assembler::IsMovT(
|
||||
DCHECK(Assembler::IsMovW(Memory::int32_at(load_address)));
|
||||
DCHECK(Assembler::IsMovT(
|
||||
Memory::int32_at(load_address + Assembler::kInstrSize)));
|
||||
} else if (Assembler::IsMovT(Memory::int32_at(load_address))) {
|
||||
// This is a movw_movt immediate load.
|
||||
load_address -= Assembler::kInstrSize;
|
||||
ASSERT(Assembler::IsMovW(Memory::int32_at(load_address)));
|
||||
DCHECK(Assembler::IsMovW(Memory::int32_at(load_address)));
|
||||
} else {
|
||||
// This is a small constant pool lookup.
|
||||
ASSERT(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(load_address)));
|
||||
DCHECK(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(load_address)));
|
||||
}
|
||||
return load_address;
|
||||
}
|
||||
@ -4859,7 +4859,7 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
|
||||
Isolate* isolate,
|
||||
Code* unoptimized_code,
|
||||
Address pc) {
|
||||
ASSERT(Assembler::IsBlxIp(Memory::int32_at(pc - Assembler::kInstrSize)));
|
||||
DCHECK(Assembler::IsBlxIp(Memory::int32_at(pc - Assembler::kInstrSize)));
|
||||
|
||||
Address pc_immediate_load_address = GetInterruptImmediateLoadAddress(pc);
|
||||
Address branch_address = pc_immediate_load_address - Assembler::kInstrSize;
|
||||
@ -4867,19 +4867,19 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
|
||||
pc_immediate_load_address, unoptimized_code);
|
||||
|
||||
if (Assembler::IsBranch(Assembler::instr_at(branch_address))) {
|
||||
ASSERT(interrupt_address ==
|
||||
DCHECK(interrupt_address ==
|
||||
isolate->builtins()->InterruptCheck()->entry());
|
||||
return INTERRUPT;
|
||||
}
|
||||
|
||||
ASSERT(Assembler::IsNop(Assembler::instr_at(branch_address)));
|
||||
DCHECK(Assembler::IsNop(Assembler::instr_at(branch_address)));
|
||||
|
||||
if (interrupt_address ==
|
||||
isolate->builtins()->OnStackReplacement()->entry()) {
|
||||
return ON_STACK_REPLACEMENT;
|
||||
}
|
||||
|
||||
ASSERT(interrupt_address ==
|
||||
DCHECK(interrupt_address ==
|
||||
isolate->builtins()->OsrAfterStackCheck()->entry());
|
||||
return OSR_AFTER_STACK_CHECK;
|
||||
}
|
||||
|
@ -169,7 +169,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
|
||||
// In the case that the object is a value-wrapper object,
|
||||
// we enter the runtime system to make sure that indexing into string
|
||||
// objects work as intended.
|
||||
ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
|
||||
DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
|
||||
__ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
|
||||
__ cmp(scratch, Operand(JS_OBJECT_TYPE));
|
||||
__ b(lt, slow);
|
||||
@ -272,8 +272,8 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
|
||||
// The return address is in lr.
|
||||
Register receiver = ReceiverRegister();
|
||||
Register name = NameRegister();
|
||||
ASSERT(receiver.is(r1));
|
||||
ASSERT(name.is(r2));
|
||||
DCHECK(receiver.is(r1));
|
||||
DCHECK(name.is(r2));
|
||||
|
||||
// Probe the stub cache.
|
||||
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
|
||||
@ -288,8 +288,8 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
|
||||
|
||||
void LoadIC::GenerateNormal(MacroAssembler* masm) {
|
||||
Register dictionary = r0;
|
||||
ASSERT(!dictionary.is(ReceiverRegister()));
|
||||
ASSERT(!dictionary.is(NameRegister()));
|
||||
DCHECK(!dictionary.is(ReceiverRegister()));
|
||||
DCHECK(!dictionary.is(NameRegister()));
|
||||
|
||||
Label slow;
|
||||
|
||||
@ -423,8 +423,8 @@ void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
|
||||
// The return address is in lr.
|
||||
Register receiver = ReceiverRegister();
|
||||
Register key = NameRegister();
|
||||
ASSERT(receiver.is(r1));
|
||||
ASSERT(key.is(r2));
|
||||
DCHECK(receiver.is(r1));
|
||||
DCHECK(key.is(r2));
|
||||
|
||||
Label slow, notin;
|
||||
MemOperand mapped_location =
|
||||
@ -450,9 +450,9 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
|
||||
Register receiver = ReceiverRegister();
|
||||
Register key = NameRegister();
|
||||
Register value = ValueRegister();
|
||||
ASSERT(receiver.is(r1));
|
||||
ASSERT(key.is(r2));
|
||||
ASSERT(value.is(r0));
|
||||
DCHECK(receiver.is(r1));
|
||||
DCHECK(key.is(r2));
|
||||
DCHECK(value.is(r0));
|
||||
|
||||
Label slow, notin;
|
||||
MemOperand mapped_location = GenerateMappedArgumentsLookup(
|
||||
@ -498,13 +498,13 @@ const Register LoadIC::NameRegister() { return r2; }
|
||||
|
||||
|
||||
const Register LoadIC::SlotRegister() {
|
||||
ASSERT(FLAG_vector_ics);
|
||||
DCHECK(FLAG_vector_ics);
|
||||
return r0;
|
||||
}
|
||||
|
||||
|
||||
const Register LoadIC::VectorRegister() {
|
||||
ASSERT(FLAG_vector_ics);
|
||||
DCHECK(FLAG_vector_ics);
|
||||
return r3;
|
||||
}
|
||||
|
||||
@ -535,8 +535,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
|
||||
Register key = NameRegister();
|
||||
Register receiver = ReceiverRegister();
|
||||
ASSERT(key.is(r2));
|
||||
ASSERT(receiver.is(r1));
|
||||
DCHECK(key.is(r2));
|
||||
DCHECK(receiver.is(r1));
|
||||
|
||||
Isolate* isolate = masm->isolate();
|
||||
|
||||
@ -700,7 +700,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
|
||||
Register index = NameRegister();
|
||||
Register scratch = r3;
|
||||
Register result = r0;
|
||||
ASSERT(!scratch.is(receiver) && !scratch.is(index));
|
||||
DCHECK(!scratch.is(receiver) && !scratch.is(index));
|
||||
|
||||
StringCharAtGenerator char_at_generator(receiver,
|
||||
index,
|
||||
@ -729,8 +729,8 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
|
||||
Register key = NameRegister();
|
||||
Register scratch1 = r3;
|
||||
Register scratch2 = r4;
|
||||
ASSERT(!scratch1.is(receiver) && !scratch1.is(key));
|
||||
ASSERT(!scratch2.is(receiver) && !scratch2.is(key));
|
||||
DCHECK(!scratch1.is(receiver) && !scratch1.is(key));
|
||||
DCHECK(!scratch2.is(receiver) && !scratch2.is(key));
|
||||
|
||||
// Check that the receiver isn't a smi.
|
||||
__ JumpIfSmi(receiver, &slow);
|
||||
@ -988,9 +988,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
|
||||
Register value = ValueRegister();
|
||||
Register key = NameRegister();
|
||||
Register receiver = ReceiverRegister();
|
||||
ASSERT(receiver.is(r1));
|
||||
ASSERT(key.is(r2));
|
||||
ASSERT(value.is(r0));
|
||||
DCHECK(receiver.is(r1));
|
||||
DCHECK(key.is(r2));
|
||||
DCHECK(value.is(r0));
|
||||
Register receiver_map = r3;
|
||||
Register elements_map = r6;
|
||||
Register elements = r9; // Elements array of the receiver.
|
||||
@ -1078,9 +1078,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
|
||||
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
|
||||
Register receiver = ReceiverRegister();
|
||||
Register name = NameRegister();
|
||||
ASSERT(receiver.is(r1));
|
||||
ASSERT(name.is(r2));
|
||||
ASSERT(ValueRegister().is(r0));
|
||||
DCHECK(receiver.is(r1));
|
||||
DCHECK(name.is(r2));
|
||||
DCHECK(ValueRegister().is(r0));
|
||||
|
||||
// Get the receiver from the stack and probe the stub cache.
|
||||
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
|
||||
@ -1110,9 +1110,9 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
|
||||
Register name = NameRegister();
|
||||
Register value = ValueRegister();
|
||||
Register dictionary = r3;
|
||||
ASSERT(receiver.is(r1));
|
||||
ASSERT(name.is(r2));
|
||||
ASSERT(value.is(r0));
|
||||
DCHECK(receiver.is(r1));
|
||||
DCHECK(name.is(r2));
|
||||
DCHECK(value.is(r0));
|
||||
|
||||
__ ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
|
||||
|
||||
@ -1218,20 +1218,20 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
|
||||
CodePatcher patcher(patch_address, 2);
|
||||
Register reg = Assembler::GetRn(instr_at_patch);
|
||||
if (check == ENABLE_INLINED_SMI_CHECK) {
|
||||
ASSERT(Assembler::IsCmpRegister(instr_at_patch));
|
||||
ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
|
||||
DCHECK(Assembler::IsCmpRegister(instr_at_patch));
|
||||
DCHECK_EQ(Assembler::GetRn(instr_at_patch).code(),
|
||||
Assembler::GetRm(instr_at_patch).code());
|
||||
patcher.masm()->tst(reg, Operand(kSmiTagMask));
|
||||
} else {
|
||||
ASSERT(check == DISABLE_INLINED_SMI_CHECK);
|
||||
ASSERT(Assembler::IsTstImmediate(instr_at_patch));
|
||||
DCHECK(check == DISABLE_INLINED_SMI_CHECK);
|
||||
DCHECK(Assembler::IsTstImmediate(instr_at_patch));
|
||||
patcher.masm()->cmp(reg, reg);
|
||||
}
|
||||
ASSERT(Assembler::IsBranch(branch_instr));
|
||||
DCHECK(Assembler::IsBranch(branch_instr));
|
||||
if (Assembler::GetCondition(branch_instr) == eq) {
|
||||
patcher.EmitCondition(ne);
|
||||
} else {
|
||||
ASSERT(Assembler::GetCondition(branch_instr) == ne);
|
||||
DCHECK(Assembler::GetCondition(branch_instr) == ne);
|
||||
patcher.EmitCondition(eq);
|
||||
}
|
||||
}
|
||||
|
@ -24,17 +24,17 @@ void LInstruction::VerifyCall() {
|
||||
// outputs because all registers are blocked by the calling convention.
|
||||
// Inputs operands must use a fixed register or use-at-start policy or
|
||||
// a non-register policy.
|
||||
ASSERT(Output() == NULL ||
|
||||
DCHECK(Output() == NULL ||
|
||||
LUnallocated::cast(Output())->HasFixedPolicy() ||
|
||||
!LUnallocated::cast(Output())->HasRegisterPolicy());
|
||||
for (UseIterator it(this); !it.Done(); it.Advance()) {
|
||||
LUnallocated* operand = LUnallocated::cast(it.Current());
|
||||
ASSERT(operand->HasFixedPolicy() ||
|
||||
DCHECK(operand->HasFixedPolicy() ||
|
||||
operand->IsUsedAtStart());
|
||||
}
|
||||
for (TempIterator it(this); !it.Done(); it.Advance()) {
|
||||
LUnallocated* operand = LUnallocated::cast(it.Current());
|
||||
ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
|
||||
DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@ -355,7 +355,7 @@ void LStoreKeyed::PrintDataTo(StringStream* stream) {
|
||||
}
|
||||
|
||||
if (value() == NULL) {
|
||||
ASSERT(hydrogen()->IsConstantHoleStore() &&
|
||||
DCHECK(hydrogen()->IsConstantHoleStore() &&
|
||||
hydrogen()->value()->representation().IsDouble());
|
||||
stream->Add("<the hole(nan)>");
|
||||
} else {
|
||||
@ -391,14 +391,14 @@ LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
|
||||
if (kind == DOUBLE_REGISTERS) {
|
||||
return LDoubleStackSlot::Create(index, zone());
|
||||
} else {
|
||||
ASSERT(kind == GENERAL_REGISTERS);
|
||||
DCHECK(kind == GENERAL_REGISTERS);
|
||||
return LStackSlot::Create(index, zone());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
LPlatformChunk* LChunkBuilder::Build() {
|
||||
ASSERT(is_unused());
|
||||
DCHECK(is_unused());
|
||||
chunk_ = new(zone()) LPlatformChunk(info(), graph());
|
||||
LPhase phase("L_Building chunk", chunk_);
|
||||
status_ = BUILDING;
|
||||
@ -609,7 +609,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
|
||||
ASSERT(!instr->HasPointerMap());
|
||||
DCHECK(!instr->HasPointerMap());
|
||||
instr->set_pointer_map(new(zone()) LPointerMap(zone()));
|
||||
return instr;
|
||||
}
|
||||
@ -643,14 +643,14 @@ LUnallocated* LChunkBuilder::TempDoubleRegister() {
|
||||
|
||||
LOperand* LChunkBuilder::FixedTemp(Register reg) {
|
||||
LUnallocated* operand = ToUnallocated(reg);
|
||||
ASSERT(operand->HasFixedPolicy());
|
||||
DCHECK(operand->HasFixedPolicy());
|
||||
return operand;
|
||||
}
|
||||
|
||||
|
||||
LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
|
||||
LUnallocated* operand = ToUnallocated(reg);
|
||||
ASSERT(operand->HasFixedPolicy());
|
||||
DCHECK(operand->HasFixedPolicy());
|
||||
return operand;
|
||||
}
|
||||
|
||||
@ -679,8 +679,8 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
|
||||
LInstruction* LChunkBuilder::DoShift(Token::Value op,
|
||||
HBitwiseBinaryOperation* instr) {
|
||||
if (instr->representation().IsSmiOrInteger32()) {
|
||||
ASSERT(instr->left()->representation().Equals(instr->representation()));
|
||||
ASSERT(instr->right()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->left()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->right()->representation().Equals(instr->representation()));
|
||||
LOperand* left = UseRegisterAtStart(instr->left());
|
||||
|
||||
HValue* right_value = instr->right();
|
||||
@ -721,9 +721,9 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
|
||||
|
||||
LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
|
||||
HArithmeticBinaryOperation* instr) {
|
||||
ASSERT(instr->representation().IsDouble());
|
||||
ASSERT(instr->left()->representation().IsDouble());
|
||||
ASSERT(instr->right()->representation().IsDouble());
|
||||
DCHECK(instr->representation().IsDouble());
|
||||
DCHECK(instr->left()->representation().IsDouble());
|
||||
DCHECK(instr->right()->representation().IsDouble());
|
||||
if (op == Token::MOD) {
|
||||
LOperand* left = UseFixedDouble(instr->left(), d0);
|
||||
LOperand* right = UseFixedDouble(instr->right(), d1);
|
||||
@ -742,8 +742,8 @@ LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
|
||||
HBinaryOperation* instr) {
|
||||
HValue* left = instr->left();
|
||||
HValue* right = instr->right();
|
||||
ASSERT(left->representation().IsTagged());
|
||||
ASSERT(right->representation().IsTagged());
|
||||
DCHECK(left->representation().IsTagged());
|
||||
DCHECK(right->representation().IsTagged());
|
||||
LOperand* context = UseFixed(instr->context(), cp);
|
||||
LOperand* left_operand = UseFixed(left, r1);
|
||||
LOperand* right_operand = UseFixed(right, r0);
|
||||
@ -754,7 +754,7 @@ LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
|
||||
|
||||
|
||||
void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
|
||||
ASSERT(is_building());
|
||||
DCHECK(is_building());
|
||||
current_block_ = block;
|
||||
next_block_ = next_block;
|
||||
if (block->IsStartBlock()) {
|
||||
@ -763,13 +763,13 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
|
||||
} else if (block->predecessors()->length() == 1) {
|
||||
// We have a single predecessor => copy environment and outgoing
|
||||
// argument count from the predecessor.
|
||||
ASSERT(block->phis()->length() == 0);
|
||||
DCHECK(block->phis()->length() == 0);
|
||||
HBasicBlock* pred = block->predecessors()->at(0);
|
||||
HEnvironment* last_environment = pred->last_environment();
|
||||
ASSERT(last_environment != NULL);
|
||||
DCHECK(last_environment != NULL);
|
||||
// Only copy the environment, if it is later used again.
|
||||
if (pred->end()->SecondSuccessor() == NULL) {
|
||||
ASSERT(pred->end()->FirstSuccessor() == block);
|
||||
DCHECK(pred->end()->FirstSuccessor() == block);
|
||||
} else {
|
||||
if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
|
||||
pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
|
||||
@ -777,7 +777,7 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
|
||||
}
|
||||
}
|
||||
block->UpdateEnvironment(last_environment);
|
||||
ASSERT(pred->argument_count() >= 0);
|
||||
DCHECK(pred->argument_count() >= 0);
|
||||
argument_count_ = pred->argument_count();
|
||||
} else {
|
||||
// We are at a state join => process phis.
|
||||
@ -829,7 +829,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
|
||||
if (current->OperandCount() == 0) {
|
||||
instr = DefineAsRegister(new(zone()) LDummy());
|
||||
} else {
|
||||
ASSERT(!current->OperandAt(0)->IsControlInstruction());
|
||||
DCHECK(!current->OperandAt(0)->IsControlInstruction());
|
||||
instr = DefineAsRegister(new(zone())
|
||||
LDummyUse(UseAny(current->OperandAt(0))));
|
||||
}
|
||||
@ -852,7 +852,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
|
||||
}
|
||||
|
||||
argument_count_ += current->argument_delta();
|
||||
ASSERT(argument_count_ >= 0);
|
||||
DCHECK(argument_count_ >= 0);
|
||||
|
||||
if (instr != NULL) {
|
||||
AddInstruction(instr, current);
|
||||
@ -894,7 +894,7 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
|
||||
LUnallocated* operand = LUnallocated::cast(it.Current());
|
||||
if (operand->HasFixedPolicy()) ++fixed;
|
||||
}
|
||||
ASSERT(fixed == 0 || used_at_start == 0);
|
||||
DCHECK(fixed == 0 || used_at_start == 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -958,7 +958,7 @@ LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
|
||||
ASSERT(instr->value()->representation().IsTagged());
|
||||
DCHECK(instr->value()->representation().IsTagged());
|
||||
LOperand* value = UseRegisterAtStart(instr->value());
|
||||
LOperand* temp = TempRegister();
|
||||
return new(zone()) LCmpMapAndBranch(value, temp);
|
||||
@ -1171,8 +1171,8 @@ LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
|
||||
ASSERT(instr->representation().IsDouble());
|
||||
ASSERT(instr->value()->representation().IsDouble());
|
||||
DCHECK(instr->representation().IsDouble());
|
||||
DCHECK(instr->value()->representation().IsDouble());
|
||||
LOperand* input = UseFixedDouble(instr->value(), d0);
|
||||
return MarkAsCall(DefineFixedDouble(new(zone()) LMathLog(input), d0), instr);
|
||||
}
|
||||
@ -1186,8 +1186,8 @@ LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
|
||||
ASSERT(instr->representation().IsDouble());
|
||||
ASSERT(instr->value()->representation().IsDouble());
|
||||
DCHECK(instr->representation().IsDouble());
|
||||
DCHECK(instr->value()->representation().IsDouble());
|
||||
LOperand* input = UseRegister(instr->value());
|
||||
LOperand* temp1 = TempRegister();
|
||||
LOperand* temp2 = TempRegister();
|
||||
@ -1263,9 +1263,9 @@ LInstruction* LChunkBuilder::DoShl(HShl* instr) {
|
||||
|
||||
LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
|
||||
if (instr->representation().IsSmiOrInteger32()) {
|
||||
ASSERT(instr->left()->representation().Equals(instr->representation()));
|
||||
ASSERT(instr->right()->representation().Equals(instr->representation()));
|
||||
ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
|
||||
DCHECK(instr->left()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->right()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
|
||||
|
||||
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
|
||||
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
|
||||
@ -1277,9 +1277,9 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
|
||||
ASSERT(instr->representation().IsSmiOrInteger32());
|
||||
ASSERT(instr->left()->representation().Equals(instr->representation()));
|
||||
ASSERT(instr->right()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->representation().IsSmiOrInteger32());
|
||||
DCHECK(instr->left()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->right()->representation().Equals(instr->representation()));
|
||||
LOperand* dividend = UseRegister(instr->left());
|
||||
int32_t divisor = instr->right()->GetInteger32Constant();
|
||||
LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
|
||||
@ -1295,9 +1295,9 @@ LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
|
||||
ASSERT(instr->representation().IsInteger32());
|
||||
ASSERT(instr->left()->representation().Equals(instr->representation()));
|
||||
ASSERT(instr->right()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->representation().IsInteger32());
|
||||
DCHECK(instr->left()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->right()->representation().Equals(instr->representation()));
|
||||
LOperand* dividend = UseRegister(instr->left());
|
||||
int32_t divisor = instr->right()->GetInteger32Constant();
|
||||
LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI(
|
||||
@ -1312,9 +1312,9 @@ LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
|
||||
ASSERT(instr->representation().IsSmiOrInteger32());
|
||||
ASSERT(instr->left()->representation().Equals(instr->representation()));
|
||||
ASSERT(instr->right()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->representation().IsSmiOrInteger32());
|
||||
DCHECK(instr->left()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->right()->representation().Equals(instr->representation()));
|
||||
LOperand* dividend = UseRegister(instr->left());
|
||||
LOperand* divisor = UseRegister(instr->right());
|
||||
LOperand* temp =
|
||||
@ -1365,9 +1365,9 @@ LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
|
||||
ASSERT(instr->representation().IsInteger32());
|
||||
ASSERT(instr->left()->representation().Equals(instr->representation()));
|
||||
ASSERT(instr->right()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->representation().IsInteger32());
|
||||
DCHECK(instr->left()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->right()->representation().Equals(instr->representation()));
|
||||
LOperand* dividend = UseRegister(instr->left());
|
||||
int32_t divisor = instr->right()->GetInteger32Constant();
|
||||
LOperand* temp =
|
||||
@ -1385,9 +1385,9 @@ LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
|
||||
ASSERT(instr->representation().IsSmiOrInteger32());
|
||||
ASSERT(instr->left()->representation().Equals(instr->representation()));
|
||||
ASSERT(instr->right()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->representation().IsSmiOrInteger32());
|
||||
DCHECK(instr->left()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->right()->representation().Equals(instr->representation()));
|
||||
LOperand* dividend = UseRegister(instr->left());
|
||||
LOperand* divisor = UseRegister(instr->right());
|
||||
LOperand* temp =
|
||||
@ -1409,9 +1409,9 @@ LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
|
||||
ASSERT(instr->representation().IsSmiOrInteger32());
|
||||
ASSERT(instr->left()->representation().Equals(instr->representation()));
|
||||
ASSERT(instr->right()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->representation().IsSmiOrInteger32());
|
||||
DCHECK(instr->left()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->right()->representation().Equals(instr->representation()));
|
||||
LOperand* dividend = UseRegisterAtStart(instr->left());
|
||||
int32_t divisor = instr->right()->GetInteger32Constant();
|
||||
LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
|
||||
@ -1425,9 +1425,9 @@ LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
|
||||
ASSERT(instr->representation().IsSmiOrInteger32());
|
||||
ASSERT(instr->left()->representation().Equals(instr->representation()));
|
||||
ASSERT(instr->right()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->representation().IsSmiOrInteger32());
|
||||
DCHECK(instr->left()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->right()->representation().Equals(instr->representation()));
|
||||
LOperand* dividend = UseRegister(instr->left());
|
||||
int32_t divisor = instr->right()->GetInteger32Constant();
|
||||
LInstruction* result = DefineAsRegister(new(zone()) LModByConstI(
|
||||
@ -1440,9 +1440,9 @@ LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoModI(HMod* instr) {
|
||||
ASSERT(instr->representation().IsSmiOrInteger32());
|
||||
ASSERT(instr->left()->representation().Equals(instr->representation()));
|
||||
ASSERT(instr->right()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->representation().IsSmiOrInteger32());
|
||||
DCHECK(instr->left()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->right()->representation().Equals(instr->representation()));
|
||||
LOperand* dividend = UseRegister(instr->left());
|
||||
LOperand* divisor = UseRegister(instr->right());
|
||||
LOperand* temp =
|
||||
@ -1478,8 +1478,8 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
|
||||
|
||||
LInstruction* LChunkBuilder::DoMul(HMul* instr) {
|
||||
if (instr->representation().IsSmiOrInteger32()) {
|
||||
ASSERT(instr->left()->representation().Equals(instr->representation()));
|
||||
ASSERT(instr->right()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->left()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->right()->representation().Equals(instr->representation()));
|
||||
HValue* left = instr->BetterLeftOperand();
|
||||
HValue* right = instr->BetterRightOperand();
|
||||
LOperand* left_op;
|
||||
@ -1548,8 +1548,8 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
|
||||
|
||||
LInstruction* LChunkBuilder::DoSub(HSub* instr) {
|
||||
if (instr->representation().IsSmiOrInteger32()) {
|
||||
ASSERT(instr->left()->representation().Equals(instr->representation()));
|
||||
ASSERT(instr->right()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->left()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->right()->representation().Equals(instr->representation()));
|
||||
|
||||
if (instr->left()->IsConstant()) {
|
||||
// If lhs is constant, do reverse subtraction instead.
|
||||
@ -1577,9 +1577,9 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoRSub(HSub* instr) {
|
||||
ASSERT(instr->representation().IsSmiOrInteger32());
|
||||
ASSERT(instr->left()->representation().Equals(instr->representation()));
|
||||
ASSERT(instr->right()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->representation().IsSmiOrInteger32());
|
||||
DCHECK(instr->left()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->right()->representation().Equals(instr->representation()));
|
||||
|
||||
// Note: The lhs of the subtraction becomes the rhs of the
|
||||
// reverse-subtraction.
|
||||
@ -1616,8 +1616,8 @@ LInstruction* LChunkBuilder::DoMultiplySub(HValue* minuend, HMul* mul) {
|
||||
|
||||
LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
|
||||
if (instr->representation().IsSmiOrInteger32()) {
|
||||
ASSERT(instr->left()->representation().Equals(instr->representation()));
|
||||
ASSERT(instr->right()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->left()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->right()->representation().Equals(instr->representation()));
|
||||
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
|
||||
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
|
||||
LAddI* add = new(zone()) LAddI(left, right);
|
||||
@ -1627,9 +1627,9 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
|
||||
}
|
||||
return result;
|
||||
} else if (instr->representation().IsExternal()) {
|
||||
ASSERT(instr->left()->representation().IsExternal());
|
||||
ASSERT(instr->right()->representation().IsInteger32());
|
||||
ASSERT(!instr->CheckFlag(HValue::kCanOverflow));
|
||||
DCHECK(instr->left()->representation().IsExternal());
|
||||
DCHECK(instr->right()->representation().IsInteger32());
|
||||
DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
|
||||
LOperand* left = UseRegisterAtStart(instr->left());
|
||||
LOperand* right = UseOrConstantAtStart(instr->right());
|
||||
LAddI* add = new(zone()) LAddI(left, right);
|
||||
@ -1641,7 +1641,7 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
|
||||
}
|
||||
|
||||
if (instr->right()->IsMul() && instr->right()->HasOneUse()) {
|
||||
ASSERT(!instr->left()->IsMul() || !instr->left()->HasOneUse());
|
||||
DCHECK(!instr->left()->IsMul() || !instr->left()->HasOneUse());
|
||||
return DoMultiplyAdd(HMul::cast(instr->right()), instr->left());
|
||||
}
|
||||
|
||||
@ -1656,14 +1656,14 @@ LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
|
||||
LOperand* left = NULL;
|
||||
LOperand* right = NULL;
|
||||
if (instr->representation().IsSmiOrInteger32()) {
|
||||
ASSERT(instr->left()->representation().Equals(instr->representation()));
|
||||
ASSERT(instr->right()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->left()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->right()->representation().Equals(instr->representation()));
|
||||
left = UseRegisterAtStart(instr->BetterLeftOperand());
|
||||
right = UseOrConstantAtStart(instr->BetterRightOperand());
|
||||
} else {
|
||||
ASSERT(instr->representation().IsDouble());
|
||||
ASSERT(instr->left()->representation().IsDouble());
|
||||
ASSERT(instr->right()->representation().IsDouble());
|
||||
DCHECK(instr->representation().IsDouble());
|
||||
DCHECK(instr->left()->representation().IsDouble());
|
||||
DCHECK(instr->right()->representation().IsDouble());
|
||||
left = UseRegisterAtStart(instr->left());
|
||||
right = UseRegisterAtStart(instr->right());
|
||||
}
|
||||
@ -1672,11 +1672,11 @@ LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoPower(HPower* instr) {
|
||||
ASSERT(instr->representation().IsDouble());
|
||||
DCHECK(instr->representation().IsDouble());
|
||||
// We call a C function for double power. It can't trigger a GC.
|
||||
// We need to use fixed result register for the call.
|
||||
Representation exponent_type = instr->right()->representation();
|
||||
ASSERT(instr->left()->representation().IsDouble());
|
||||
DCHECK(instr->left()->representation().IsDouble());
|
||||
LOperand* left = UseFixedDouble(instr->left(), d0);
|
||||
LOperand* right = exponent_type.IsDouble() ?
|
||||
UseFixedDouble(instr->right(), d1) :
|
||||
@ -1689,8 +1689,8 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
|
||||
ASSERT(instr->left()->representation().IsTagged());
|
||||
ASSERT(instr->right()->representation().IsTagged());
|
||||
DCHECK(instr->left()->representation().IsTagged());
|
||||
DCHECK(instr->right()->representation().IsTagged());
|
||||
LOperand* context = UseFixed(instr->context(), cp);
|
||||
LOperand* left = UseFixed(instr->left(), r1);
|
||||
LOperand* right = UseFixed(instr->right(), r0);
|
||||
@ -1703,15 +1703,15 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
|
||||
HCompareNumericAndBranch* instr) {
|
||||
Representation r = instr->representation();
|
||||
if (r.IsSmiOrInteger32()) {
|
||||
ASSERT(instr->left()->representation().Equals(r));
|
||||
ASSERT(instr->right()->representation().Equals(r));
|
||||
DCHECK(instr->left()->representation().Equals(r));
|
||||
DCHECK(instr->right()->representation().Equals(r));
|
||||
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
|
||||
LOperand* right = UseRegisterOrConstantAtStart(instr->right());
|
||||
return new(zone()) LCompareNumericAndBranch(left, right);
|
||||
} else {
|
||||
ASSERT(r.IsDouble());
|
||||
ASSERT(instr->left()->representation().IsDouble());
|
||||
ASSERT(instr->right()->representation().IsDouble());
|
||||
DCHECK(r.IsDouble());
|
||||
DCHECK(instr->left()->representation().IsDouble());
|
||||
DCHECK(instr->right()->representation().IsDouble());
|
||||
LOperand* left = UseRegisterAtStart(instr->left());
|
||||
LOperand* right = UseRegisterAtStart(instr->right());
|
||||
return new(zone()) LCompareNumericAndBranch(left, right);
|
||||
@ -1743,7 +1743,7 @@ LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
|
||||
ASSERT(instr->value()->representation().IsTagged());
|
||||
DCHECK(instr->value()->representation().IsTagged());
|
||||
LOperand* value = UseRegisterAtStart(instr->value());
|
||||
LOperand* temp = TempRegister();
|
||||
return new(zone()) LIsObjectAndBranch(value, temp);
|
||||
@ -1751,7 +1751,7 @@ LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
|
||||
ASSERT(instr->value()->representation().IsTagged());
|
||||
DCHECK(instr->value()->representation().IsTagged());
|
||||
LOperand* value = UseRegisterAtStart(instr->value());
|
||||
LOperand* temp = TempRegister();
|
||||
return new(zone()) LIsStringAndBranch(value, temp);
|
||||
@ -1759,14 +1759,14 @@ LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
|
||||
ASSERT(instr->value()->representation().IsTagged());
|
||||
DCHECK(instr->value()->representation().IsTagged());
|
||||
return new(zone()) LIsSmiAndBranch(Use(instr->value()));
|
||||
}
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
|
||||
HIsUndetectableAndBranch* instr) {
|
||||
ASSERT(instr->value()->representation().IsTagged());
|
||||
DCHECK(instr->value()->representation().IsTagged());
|
||||
LOperand* value = UseRegisterAtStart(instr->value());
|
||||
return new(zone()) LIsUndetectableAndBranch(value, TempRegister());
|
||||
}
|
||||
@ -1774,8 +1774,8 @@ LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
|
||||
|
||||
LInstruction* LChunkBuilder::DoStringCompareAndBranch(
|
||||
HStringCompareAndBranch* instr) {
|
||||
ASSERT(instr->left()->representation().IsTagged());
|
||||
ASSERT(instr->right()->representation().IsTagged());
|
||||
DCHECK(instr->left()->representation().IsTagged());
|
||||
DCHECK(instr->right()->representation().IsTagged());
|
||||
LOperand* context = UseFixed(instr->context(), cp);
|
||||
LOperand* left = UseFixed(instr->left(), r1);
|
||||
LOperand* right = UseFixed(instr->right(), r0);
|
||||
@ -1787,7 +1787,7 @@ LInstruction* LChunkBuilder::DoStringCompareAndBranch(
|
||||
|
||||
LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
|
||||
HHasInstanceTypeAndBranch* instr) {
|
||||
ASSERT(instr->value()->representation().IsTagged());
|
||||
DCHECK(instr->value()->representation().IsTagged());
|
||||
LOperand* value = UseRegisterAtStart(instr->value());
|
||||
return new(zone()) LHasInstanceTypeAndBranch(value);
|
||||
}
|
||||
@ -1795,7 +1795,7 @@ LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
|
||||
|
||||
LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
|
||||
HGetCachedArrayIndex* instr) {
|
||||
ASSERT(instr->value()->representation().IsTagged());
|
||||
DCHECK(instr->value()->representation().IsTagged());
|
||||
LOperand* value = UseRegisterAtStart(instr->value());
|
||||
|
||||
return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
|
||||
@ -1804,7 +1804,7 @@ LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
|
||||
|
||||
LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
|
||||
HHasCachedArrayIndexAndBranch* instr) {
|
||||
ASSERT(instr->value()->representation().IsTagged());
|
||||
DCHECK(instr->value()->representation().IsTagged());
|
||||
return new(zone()) LHasCachedArrayIndexAndBranch(
|
||||
UseRegisterAtStart(instr->value()));
|
||||
}
|
||||
@ -1812,7 +1812,7 @@ LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
|
||||
|
||||
LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
|
||||
HClassOfTestAndBranch* instr) {
|
||||
ASSERT(instr->value()->representation().IsTagged());
|
||||
DCHECK(instr->value()->representation().IsTagged());
|
||||
LOperand* value = UseRegister(instr->value());
|
||||
return new(zone()) LClassOfTestAndBranch(value, TempRegister());
|
||||
}
|
||||
@ -1915,7 +1915,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
|
||||
}
|
||||
return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
|
||||
} else {
|
||||
ASSERT(to.IsInteger32());
|
||||
DCHECK(to.IsInteger32());
|
||||
if (val->type().IsSmi() || val->representation().IsSmi()) {
|
||||
LOperand* value = UseRegisterAtStart(val);
|
||||
return DefineAsRegister(new(zone()) LSmiUntag(value, false));
|
||||
@ -1943,7 +1943,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
|
||||
return AssignEnvironment(
|
||||
DefineAsRegister(new(zone()) LDoubleToSmi(value)));
|
||||
} else {
|
||||
ASSERT(to.IsInteger32());
|
||||
DCHECK(to.IsInteger32());
|
||||
LOperand* value = UseRegister(val);
|
||||
LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value));
|
||||
if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result);
|
||||
@ -1976,7 +1976,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
|
||||
}
|
||||
return result;
|
||||
} else {
|
||||
ASSERT(to.IsDouble());
|
||||
DCHECK(to.IsDouble());
|
||||
if (val->CheckFlag(HInstruction::kUint32)) {
|
||||
return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val)));
|
||||
} else {
|
||||
@ -2039,7 +2039,7 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
|
||||
} else if (input_rep.IsInteger32()) {
|
||||
return DefineAsRegister(new(zone()) LClampIToUint8(reg));
|
||||
} else {
|
||||
ASSERT(input_rep.IsSmiOrTagged());
|
||||
DCHECK(input_rep.IsSmiOrTagged());
|
||||
// Register allocator doesn't (yet) support allocation of double
|
||||
// temps. Reserve d1 explicitly.
|
||||
LClampTToUint8* result =
|
||||
@ -2051,7 +2051,7 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
|
||||
|
||||
LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
|
||||
HValue* value = instr->value();
|
||||
ASSERT(value->representation().IsDouble());
|
||||
DCHECK(value->representation().IsDouble());
|
||||
return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
|
||||
}
|
||||
|
||||
@ -2186,7 +2186,7 @@ LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
|
||||
ASSERT(instr->key()->representation().IsSmiOrInteger32());
|
||||
DCHECK(instr->key()->representation().IsSmiOrInteger32());
|
||||
ElementsKind elements_kind = instr->elements_kind();
|
||||
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
|
||||
LInstruction* result = NULL;
|
||||
@ -2196,12 +2196,12 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
|
||||
if (instr->representation().IsDouble()) {
|
||||
obj = UseRegister(instr->elements());
|
||||
} else {
|
||||
ASSERT(instr->representation().IsSmiOrTagged());
|
||||
DCHECK(instr->representation().IsSmiOrTagged());
|
||||
obj = UseRegisterAtStart(instr->elements());
|
||||
}
|
||||
result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
|
||||
} else {
|
||||
ASSERT(
|
||||
DCHECK(
|
||||
(instr->representation().IsInteger32() &&
|
||||
!IsDoubleOrFloatElementsKind(elements_kind)) ||
|
||||
(instr->representation().IsDouble() &&
|
||||
@ -2242,7 +2242,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
|
||||
|
||||
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
|
||||
if (!instr->is_typed_elements()) {
|
||||
ASSERT(instr->elements()->representation().IsTagged());
|
||||
DCHECK(instr->elements()->representation().IsTagged());
|
||||
bool needs_write_barrier = instr->NeedsWriteBarrier();
|
||||
LOperand* object = NULL;
|
||||
LOperand* key = NULL;
|
||||
@ -2253,7 +2253,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
|
||||
val = UseRegister(instr->value());
|
||||
key = UseRegisterOrConstantAtStart(instr->key());
|
||||
} else {
|
||||
ASSERT(instr->value()->representation().IsSmiOrTagged());
|
||||
DCHECK(instr->value()->representation().IsSmiOrTagged());
|
||||
if (needs_write_barrier) {
|
||||
object = UseTempRegister(instr->elements());
|
||||
val = UseTempRegister(instr->value());
|
||||
@ -2268,12 +2268,12 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
|
||||
return new(zone()) LStoreKeyed(object, key, val);
|
||||
}
|
||||
|
||||
ASSERT(
|
||||
DCHECK(
|
||||
(instr->value()->representation().IsInteger32() &&
|
||||
!IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
|
||||
(instr->value()->representation().IsDouble() &&
|
||||
IsDoubleOrFloatElementsKind(instr->elements_kind())));
|
||||
ASSERT((instr->is_fixed_typed_array() &&
|
||||
DCHECK((instr->is_fixed_typed_array() &&
|
||||
instr->elements()->representation().IsTagged()) ||
|
||||
(instr->is_external() &&
|
||||
instr->elements()->representation().IsExternal()));
|
||||
@ -2290,9 +2290,9 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
|
||||
LOperand* key = UseFixed(instr->key(), KeyedStoreIC::NameRegister());
|
||||
LOperand* val = UseFixed(instr->value(), KeyedStoreIC::ValueRegister());
|
||||
|
||||
ASSERT(instr->object()->representation().IsTagged());
|
||||
ASSERT(instr->key()->representation().IsTagged());
|
||||
ASSERT(instr->value()->representation().IsTagged());
|
||||
DCHECK(instr->object()->representation().IsTagged());
|
||||
DCHECK(instr->key()->representation().IsTagged());
|
||||
DCHECK(instr->value()->representation().IsTagged());
|
||||
|
||||
return MarkAsCall(
|
||||
new(zone()) LStoreKeyedGeneric(context, obj, key, val), instr);
|
||||
@ -2425,7 +2425,7 @@ LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
|
||||
ASSERT(argument_count_ == 0);
|
||||
DCHECK(argument_count_ == 0);
|
||||
allocator_->MarkAsOsrEntry();
|
||||
current_block_->last_environment()->set_ast_id(instr->ast_id());
|
||||
return AssignEnvironment(new(zone()) LOsrEntry);
|
||||
@ -2438,7 +2438,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
|
||||
int spill_index = chunk()->GetParameterStackSlot(instr->index());
|
||||
return DefineAsSpilled(result, spill_index);
|
||||
} else {
|
||||
ASSERT(info()->IsStub());
|
||||
DCHECK(info()->IsStub());
|
||||
CodeStubInterfaceDescriptor* descriptor =
|
||||
info()->code_stub()->GetInterfaceDescriptor();
|
||||
int index = static_cast<int>(instr->index());
|
||||
@ -2534,7 +2534,7 @@ LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
|
||||
LOperand* context = UseFixed(instr->context(), cp);
|
||||
return MarkAsCall(new(zone()) LStackCheck(context), instr);
|
||||
} else {
|
||||
ASSERT(instr->is_backwards_branch());
|
||||
DCHECK(instr->is_backwards_branch());
|
||||
LOperand* context = UseAny(instr->context());
|
||||
return AssignEnvironment(
|
||||
AssignPointerMap(new(zone()) LStackCheck(context)));
|
||||
@ -2570,7 +2570,7 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
|
||||
if (env->entry()->arguments_pushed()) {
|
||||
int argument_count = env->arguments_environment()->parameter_count();
|
||||
pop = new(zone()) LDrop(argument_count);
|
||||
ASSERT(instr->argument_delta() == -argument_count);
|
||||
DCHECK(instr->argument_delta() == -argument_count);
|
||||
}
|
||||
|
||||
HEnvironment* outer = current_block_->last_environment()->
|
||||
|
@ -174,7 +174,7 @@ class LCodeGen;
|
||||
return mnemonic; \
|
||||
} \
|
||||
static L##type* cast(LInstruction* instr) { \
|
||||
ASSERT(instr->Is##type()); \
|
||||
DCHECK(instr->Is##type()); \
|
||||
return reinterpret_cast<L##type*>(instr); \
|
||||
}
|
||||
|
||||
@ -334,7 +334,7 @@ class LGap : public LTemplateInstruction<0, 0, 0> {
|
||||
virtual bool IsGap() const V8_OVERRIDE { return true; }
|
||||
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
|
||||
static LGap* cast(LInstruction* instr) {
|
||||
ASSERT(instr->IsGap());
|
||||
DCHECK(instr->IsGap());
|
||||
return reinterpret_cast<LGap*>(instr);
|
||||
}
|
||||
|
||||
@ -1578,7 +1578,7 @@ class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
|
||||
return parameter_count()->IsConstantOperand();
|
||||
}
|
||||
LConstantOperand* constant_parameter_count() {
|
||||
ASSERT(has_constant_parameter_count());
|
||||
DCHECK(has_constant_parameter_count());
|
||||
return LConstantOperand::cast(parameter_count());
|
||||
}
|
||||
LOperand* parameter_count() { return inputs_[2]; }
|
||||
@ -1879,7 +1879,7 @@ class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
|
||||
Zone* zone)
|
||||
: descriptor_(descriptor),
|
||||
inputs_(descriptor->GetRegisterParameterCount() + 1, zone) {
|
||||
ASSERT(descriptor->GetRegisterParameterCount() + 1 == operands.length());
|
||||
DCHECK(descriptor->GetRegisterParameterCount() + 1 == operands.length());
|
||||
inputs_.AddAll(operands, zone);
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -355,14 +355,14 @@ class LCodeGen: public LCodeGenBase {
|
||||
public:
|
||||
explicit PushSafepointRegistersScope(LCodeGen* codegen)
|
||||
: codegen_(codegen) {
|
||||
ASSERT(codegen_->info()->is_calling());
|
||||
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
|
||||
DCHECK(codegen_->info()->is_calling());
|
||||
DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
|
||||
codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
|
||||
codegen_->masm_->PushSafepointRegisters();
|
||||
}
|
||||
|
||||
~PushSafepointRegistersScope() {
|
||||
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
|
||||
DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
|
||||
codegen_->masm_->PopSafepointRegisters();
|
||||
codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ LGapResolver::LGapResolver(LCodeGen* owner)
|
||||
|
||||
|
||||
void LGapResolver::Resolve(LParallelMove* parallel_move) {
|
||||
ASSERT(moves_.is_empty());
|
||||
DCHECK(moves_.is_empty());
|
||||
// Build up a worklist of moves.
|
||||
BuildInitialMoveList(parallel_move);
|
||||
|
||||
@ -50,13 +50,13 @@ void LGapResolver::Resolve(LParallelMove* parallel_move) {
|
||||
// Perform the moves with constant sources.
|
||||
for (int i = 0; i < moves_.length(); ++i) {
|
||||
if (!moves_[i].IsEliminated()) {
|
||||
ASSERT(moves_[i].source()->IsConstantOperand());
|
||||
DCHECK(moves_[i].source()->IsConstantOperand());
|
||||
EmitMove(i);
|
||||
}
|
||||
}
|
||||
|
||||
if (need_to_restore_root_) {
|
||||
ASSERT(kSavedValueRegister.is(kRootRegister));
|
||||
DCHECK(kSavedValueRegister.is(kRootRegister));
|
||||
__ InitializeRootRegister();
|
||||
need_to_restore_root_ = false;
|
||||
}
|
||||
@ -94,13 +94,13 @@ void LGapResolver::PerformMove(int index) {
|
||||
// An additional complication is that moves to MemOperands with large
|
||||
// offsets (more than 1K or 4K) require us to spill this spilled value to
|
||||
// the stack, to free up the register.
|
||||
ASSERT(!moves_[index].IsPending());
|
||||
ASSERT(!moves_[index].IsRedundant());
|
||||
DCHECK(!moves_[index].IsPending());
|
||||
DCHECK(!moves_[index].IsRedundant());
|
||||
|
||||
// Clear this move's destination to indicate a pending move. The actual
|
||||
// destination is saved in a stack allocated local. Multiple moves can
|
||||
// be pending because this function is recursive.
|
||||
ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
|
||||
DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated.
|
||||
LOperand* destination = moves_[index].destination();
|
||||
moves_[index].set_destination(NULL);
|
||||
|
||||
@ -127,7 +127,7 @@ void LGapResolver::PerformMove(int index) {
|
||||
// a scratch register to break it.
|
||||
LMoveOperands other_move = moves_[root_index_];
|
||||
if (other_move.Blocks(destination)) {
|
||||
ASSERT(other_move.IsPending());
|
||||
DCHECK(other_move.IsPending());
|
||||
BreakCycle(index);
|
||||
return;
|
||||
}
|
||||
@ -138,12 +138,12 @@ void LGapResolver::PerformMove(int index) {
|
||||
|
||||
|
||||
void LGapResolver::Verify() {
|
||||
#ifdef ENABLE_SLOW_ASSERTS
|
||||
#ifdef ENABLE_SLOW_DCHECKS
|
||||
// No operand should be the destination for more than one move.
|
||||
for (int i = 0; i < moves_.length(); ++i) {
|
||||
LOperand* destination = moves_[i].destination();
|
||||
for (int j = i + 1; j < moves_.length(); ++j) {
|
||||
SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
|
||||
SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@ -154,8 +154,8 @@ void LGapResolver::BreakCycle(int index) {
|
||||
// We save in a register the source of that move and we remember its
|
||||
// destination. Then we mark this move as resolved so the cycle is
|
||||
// broken and we can perform the other moves.
|
||||
ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
|
||||
ASSERT(!in_cycle_);
|
||||
DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
|
||||
DCHECK(!in_cycle_);
|
||||
in_cycle_ = true;
|
||||
LOperand* source = moves_[index].source();
|
||||
saved_destination_ = moves_[index].destination();
|
||||
@ -178,8 +178,8 @@ void LGapResolver::BreakCycle(int index) {
|
||||
|
||||
|
||||
void LGapResolver::RestoreValue() {
|
||||
ASSERT(in_cycle_);
|
||||
ASSERT(saved_destination_ != NULL);
|
||||
DCHECK(in_cycle_);
|
||||
DCHECK(saved_destination_ != NULL);
|
||||
|
||||
if (saved_destination_->IsRegister()) {
|
||||
__ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
|
||||
@ -210,7 +210,7 @@ void LGapResolver::EmitMove(int index) {
|
||||
if (destination->IsRegister()) {
|
||||
__ mov(cgen_->ToRegister(destination), source_register);
|
||||
} else {
|
||||
ASSERT(destination->IsStackSlot());
|
||||
DCHECK(destination->IsStackSlot());
|
||||
__ str(source_register, cgen_->ToMemOperand(destination));
|
||||
}
|
||||
} else if (source->IsStackSlot()) {
|
||||
@ -218,7 +218,7 @@ void LGapResolver::EmitMove(int index) {
|
||||
if (destination->IsRegister()) {
|
||||
__ ldr(cgen_->ToRegister(destination), source_operand);
|
||||
} else {
|
||||
ASSERT(destination->IsStackSlot());
|
||||
DCHECK(destination->IsStackSlot());
|
||||
MemOperand destination_operand = cgen_->ToMemOperand(destination);
|
||||
if (!destination_operand.OffsetIsUint12Encodable()) {
|
||||
// ip is overwritten while saving the value to the destination.
|
||||
@ -248,8 +248,8 @@ void LGapResolver::EmitMove(int index) {
|
||||
double v = cgen_->ToDouble(constant_source);
|
||||
__ Vmov(result, v, ip);
|
||||
} else {
|
||||
ASSERT(destination->IsStackSlot());
|
||||
ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
|
||||
DCHECK(destination->IsStackSlot());
|
||||
DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone.
|
||||
need_to_restore_root_ = true;
|
||||
Representation r = cgen_->IsSmi(constant_source)
|
||||
? Representation::Smi() : Representation::Integer32();
|
||||
@ -267,7 +267,7 @@ void LGapResolver::EmitMove(int index) {
|
||||
if (destination->IsDoubleRegister()) {
|
||||
__ vmov(cgen_->ToDoubleRegister(destination), source_register);
|
||||
} else {
|
||||
ASSERT(destination->IsDoubleStackSlot());
|
||||
DCHECK(destination->IsDoubleStackSlot());
|
||||
__ vstr(source_register, cgen_->ToMemOperand(destination));
|
||||
}
|
||||
|
||||
@ -276,7 +276,7 @@ void LGapResolver::EmitMove(int index) {
|
||||
if (destination->IsDoubleRegister()) {
|
||||
__ vldr(cgen_->ToDoubleRegister(destination), source_operand);
|
||||
} else {
|
||||
ASSERT(destination->IsDoubleStackSlot());
|
||||
DCHECK(destination->IsDoubleStackSlot());
|
||||
MemOperand destination_operand = cgen_->ToMemOperand(destination);
|
||||
if (in_cycle_) {
|
||||
// kScratchDoubleReg was used to break the cycle.
|
||||
|
@ -36,21 +36,21 @@ void MacroAssembler::Jump(Register target, Condition cond) {
|
||||
|
||||
void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
|
||||
Condition cond) {
|
||||
ASSERT(RelocInfo::IsCodeTarget(rmode));
|
||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||
mov(pc, Operand(target, rmode), LeaveCC, cond);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
|
||||
Condition cond) {
|
||||
ASSERT(!RelocInfo::IsCodeTarget(rmode));
|
||||
DCHECK(!RelocInfo::IsCodeTarget(rmode));
|
||||
Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
|
||||
Condition cond) {
|
||||
ASSERT(RelocInfo::IsCodeTarget(rmode));
|
||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||
// 'code' is always generated ARM code, never THUMB code
|
||||
AllowDeferredHandleDereference embedding_raw_address;
|
||||
Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
|
||||
@ -68,7 +68,7 @@ void MacroAssembler::Call(Register target, Condition cond) {
|
||||
Label start;
|
||||
bind(&start);
|
||||
blx(target, cond);
|
||||
ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
|
||||
DCHECK_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
|
||||
}
|
||||
|
||||
|
||||
@ -138,7 +138,7 @@ void MacroAssembler::Call(Address target,
|
||||
mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
|
||||
blx(ip, cond);
|
||||
|
||||
ASSERT_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
|
||||
DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
|
||||
if (mode == NEVER_INLINE_TARGET_ADDRESS) {
|
||||
set_predictable_code_size(old_predictable_code_size);
|
||||
}
|
||||
@ -161,7 +161,7 @@ void MacroAssembler::Call(Handle<Code> code,
|
||||
TargetAddressStorageMode mode) {
|
||||
Label start;
|
||||
bind(&start);
|
||||
ASSERT(RelocInfo::IsCodeTarget(rmode));
|
||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||
if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
|
||||
SetRecordedAstId(ast_id);
|
||||
rmode = RelocInfo::CODE_TARGET_WITH_ID;
|
||||
@ -222,7 +222,7 @@ void MacroAssembler::Move(Register dst, Handle<Object> value) {
|
||||
if (value->IsSmi()) {
|
||||
mov(dst, Operand(value));
|
||||
} else {
|
||||
ASSERT(value->IsHeapObject());
|
||||
DCHECK(value->IsHeapObject());
|
||||
if (isolate()->heap()->InNewSpace(*value)) {
|
||||
Handle<Cell> cell = isolate()->factory()->NewCell(value);
|
||||
mov(dst, Operand(cell));
|
||||
@ -254,7 +254,7 @@ void MacroAssembler::Mls(Register dst, Register src1, Register src2,
|
||||
CpuFeatureScope scope(this, MLS);
|
||||
mls(dst, src1, src2, srcA, cond);
|
||||
} else {
|
||||
ASSERT(!srcA.is(ip));
|
||||
DCHECK(!srcA.is(ip));
|
||||
mul(ip, src1, src2, LeaveCC, cond);
|
||||
sub(dst, srcA, ip, LeaveCC, cond);
|
||||
}
|
||||
@ -281,7 +281,7 @@ void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
|
||||
|
||||
void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
|
||||
Condition cond) {
|
||||
ASSERT(lsb < 32);
|
||||
DCHECK(lsb < 32);
|
||||
if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
|
||||
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
|
||||
and_(dst, src1, Operand(mask), LeaveCC, cond);
|
||||
@ -296,7 +296,7 @@ void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
|
||||
|
||||
void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
|
||||
Condition cond) {
|
||||
ASSERT(lsb < 32);
|
||||
DCHECK(lsb < 32);
|
||||
if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
|
||||
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
|
||||
and_(dst, src1, Operand(mask), LeaveCC, cond);
|
||||
@ -320,10 +320,10 @@ void MacroAssembler::Bfi(Register dst,
|
||||
int lsb,
|
||||
int width,
|
||||
Condition cond) {
|
||||
ASSERT(0 <= lsb && lsb < 32);
|
||||
ASSERT(0 <= width && width < 32);
|
||||
ASSERT(lsb + width < 32);
|
||||
ASSERT(!scratch.is(dst));
|
||||
DCHECK(0 <= lsb && lsb < 32);
|
||||
DCHECK(0 <= width && width < 32);
|
||||
DCHECK(lsb + width < 32);
|
||||
DCHECK(!scratch.is(dst));
|
||||
if (width == 0) return;
|
||||
if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
|
||||
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
|
||||
@ -339,7 +339,7 @@ void MacroAssembler::Bfi(Register dst,
|
||||
|
||||
void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
|
||||
Condition cond) {
|
||||
ASSERT(lsb < 32);
|
||||
DCHECK(lsb < 32);
|
||||
if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
|
||||
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
|
||||
bic(dst, src, Operand(mask));
|
||||
@ -353,13 +353,13 @@ void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
|
||||
void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
|
||||
Condition cond) {
|
||||
if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
|
||||
ASSERT(!dst.is(pc) && !src.rm().is(pc));
|
||||
ASSERT((satpos >= 0) && (satpos <= 31));
|
||||
DCHECK(!dst.is(pc) && !src.rm().is(pc));
|
||||
DCHECK((satpos >= 0) && (satpos <= 31));
|
||||
|
||||
// These asserts are required to ensure compatibility with the ARMv7
|
||||
// implementation.
|
||||
ASSERT((src.shift_op() == ASR) || (src.shift_op() == LSL));
|
||||
ASSERT(src.rs().is(no_reg));
|
||||
DCHECK((src.shift_op() == ASR) || (src.shift_op() == LSL));
|
||||
DCHECK(src.rs().is(no_reg));
|
||||
|
||||
Label done;
|
||||
int satval = (1 << satpos) - 1;
|
||||
@ -384,7 +384,7 @@ void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
|
||||
void MacroAssembler::Load(Register dst,
|
||||
const MemOperand& src,
|
||||
Representation r) {
|
||||
ASSERT(!r.IsDouble());
|
||||
DCHECK(!r.IsDouble());
|
||||
if (r.IsInteger8()) {
|
||||
ldrsb(dst, src);
|
||||
} else if (r.IsUInteger8()) {
|
||||
@ -402,7 +402,7 @@ void MacroAssembler::Load(Register dst,
|
||||
void MacroAssembler::Store(Register src,
|
||||
const MemOperand& dst,
|
||||
Representation r) {
|
||||
ASSERT(!r.IsDouble());
|
||||
DCHECK(!r.IsDouble());
|
||||
if (r.IsInteger8() || r.IsUInteger8()) {
|
||||
strb(src, dst);
|
||||
} else if (r.IsInteger16() || r.IsUInteger16()) {
|
||||
@ -445,7 +445,7 @@ void MacroAssembler::InNewSpace(Register object,
|
||||
Register scratch,
|
||||
Condition cond,
|
||||
Label* branch) {
|
||||
ASSERT(cond == eq || cond == ne);
|
||||
DCHECK(cond == eq || cond == ne);
|
||||
and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
|
||||
cmp(scratch, Operand(ExternalReference::new_space_start(isolate())));
|
||||
b(cond, branch);
|
||||
@ -473,7 +473,7 @@ void MacroAssembler::RecordWriteField(
|
||||
|
||||
// Although the object register is tagged, the offset is relative to the start
|
||||
// of the object, so so offset must be a multiple of kPointerSize.
|
||||
ASSERT(IsAligned(offset, kPointerSize));
|
||||
DCHECK(IsAligned(offset, kPointerSize));
|
||||
|
||||
add(dst, object, Operand(offset - kHeapObjectTag));
|
||||
if (emit_debug_code()) {
|
||||
@ -586,7 +586,7 @@ void MacroAssembler::RecordWrite(
|
||||
RememberedSetAction remembered_set_action,
|
||||
SmiCheck smi_check,
|
||||
PointersToHereCheck pointers_to_here_check_for_value) {
|
||||
ASSERT(!object.is(value));
|
||||
DCHECK(!object.is(value));
|
||||
if (emit_debug_code()) {
|
||||
ldr(ip, MemOperand(address));
|
||||
cmp(ip, value);
|
||||
@ -673,7 +673,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
|
||||
if (and_then == kFallThroughAtEnd) {
|
||||
b(eq, &done);
|
||||
} else {
|
||||
ASSERT(and_then == kReturnAtEnd);
|
||||
DCHECK(and_then == kReturnAtEnd);
|
||||
Ret(eq);
|
||||
}
|
||||
push(lr);
|
||||
@ -689,7 +689,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
|
||||
|
||||
|
||||
void MacroAssembler::PushFixedFrame(Register marker_reg) {
|
||||
ASSERT(!marker_reg.is_valid() || marker_reg.code() < cp.code());
|
||||
DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code());
|
||||
stm(db_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) |
|
||||
cp.bit() |
|
||||
(FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
|
||||
@ -699,7 +699,7 @@ void MacroAssembler::PushFixedFrame(Register marker_reg) {
|
||||
|
||||
|
||||
void MacroAssembler::PopFixedFrame(Register marker_reg) {
|
||||
ASSERT(!marker_reg.is_valid() || marker_reg.code() < cp.code());
|
||||
DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code());
|
||||
ldm(ia_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) |
|
||||
cp.bit() |
|
||||
(FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
|
||||
@ -711,11 +711,11 @@ void MacroAssembler::PopFixedFrame(Register marker_reg) {
|
||||
// Push and pop all registers that can hold pointers.
|
||||
void MacroAssembler::PushSafepointRegisters() {
|
||||
// Safepoints expect a block of contiguous register values starting with r0:
|
||||
ASSERT(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters);
|
||||
DCHECK(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters);
|
||||
// Safepoints expect a block of kNumSafepointRegisters values on the
|
||||
// stack, so adjust the stack for unsaved registers.
|
||||
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
|
||||
ASSERT(num_unsaved >= 0);
|
||||
DCHECK(num_unsaved >= 0);
|
||||
sub(sp, sp, Operand(num_unsaved * kPointerSize));
|
||||
stm(db_w, sp, kSafepointSavedRegisters);
|
||||
}
|
||||
@ -741,7 +741,7 @@ void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
|
||||
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
|
||||
// The registers are pushed starting with the highest encoding,
|
||||
// which means that lowest encodings are closest to the stack pointer.
|
||||
ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
|
||||
DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
|
||||
return reg_code;
|
||||
}
|
||||
|
||||
@ -753,7 +753,7 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
|
||||
|
||||
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
|
||||
// Number of d-regs not known at snapshot time.
|
||||
ASSERT(!serializer_enabled());
|
||||
DCHECK(!serializer_enabled());
|
||||
// General purpose registers are pushed last on the stack.
|
||||
int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize;
|
||||
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
|
||||
@ -763,12 +763,12 @@ MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
|
||||
|
||||
void MacroAssembler::Ldrd(Register dst1, Register dst2,
|
||||
const MemOperand& src, Condition cond) {
|
||||
ASSERT(src.rm().is(no_reg));
|
||||
ASSERT(!dst1.is(lr)); // r14.
|
||||
DCHECK(src.rm().is(no_reg));
|
||||
DCHECK(!dst1.is(lr)); // r14.
|
||||
|
||||
// V8 does not use this addressing mode, so the fallback code
|
||||
// below doesn't support it yet.
|
||||
ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex));
|
||||
DCHECK((src.am() != PreIndex) && (src.am() != NegPreIndex));
|
||||
|
||||
// Generate two ldr instructions if ldrd is not available.
|
||||
if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
|
||||
@ -787,7 +787,7 @@ void MacroAssembler::Ldrd(Register dst1, Register dst2,
|
||||
ldr(dst2, src2, cond);
|
||||
}
|
||||
} else { // PostIndex or NegPostIndex.
|
||||
ASSERT((src.am() == PostIndex) || (src.am() == NegPostIndex));
|
||||
DCHECK((src.am() == PostIndex) || (src.am() == NegPostIndex));
|
||||
if (dst1.is(src.rn())) {
|
||||
ldr(dst2, MemOperand(src.rn(), 4, Offset), cond);
|
||||
ldr(dst1, src, cond);
|
||||
@ -804,12 +804,12 @@ void MacroAssembler::Ldrd(Register dst1, Register dst2,
|
||||
|
||||
void MacroAssembler::Strd(Register src1, Register src2,
|
||||
const MemOperand& dst, Condition cond) {
|
||||
ASSERT(dst.rm().is(no_reg));
|
||||
ASSERT(!src1.is(lr)); // r14.
|
||||
DCHECK(dst.rm().is(no_reg));
|
||||
DCHECK(!src1.is(lr)); // r14.
|
||||
|
||||
// V8 does not use this addressing mode, so the fallback code
|
||||
// below doesn't support it yet.
|
||||
ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
|
||||
DCHECK((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
|
||||
|
||||
// Generate two str instructions if strd is not available.
|
||||
if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
|
||||
@ -823,7 +823,7 @@ void MacroAssembler::Strd(Register src1, Register src2,
|
||||
str(src1, dst, cond);
|
||||
str(src2, dst2, cond);
|
||||
} else { // PostIndex or NegPostIndex.
|
||||
ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
|
||||
DCHECK((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
|
||||
dst2.set_offset(dst2.offset() - 4);
|
||||
str(src1, MemOperand(dst.rn(), 4, PostIndex), cond);
|
||||
str(src2, dst2, cond);
|
||||
@ -953,7 +953,7 @@ void MacroAssembler::LoadConstantPoolPointerRegister() {
|
||||
if (FLAG_enable_ool_constant_pool) {
|
||||
int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize -
|
||||
pc_offset() - Instruction::kPCReadOffset;
|
||||
ASSERT(ImmediateFitsAddrMode2Instruction(constant_pool_offset));
|
||||
DCHECK(ImmediateFitsAddrMode2Instruction(constant_pool_offset));
|
||||
ldr(pp, MemOperand(pc, constant_pool_offset));
|
||||
}
|
||||
}
|
||||
@ -1037,9 +1037,9 @@ int MacroAssembler::LeaveFrame(StackFrame::Type type) {
|
||||
|
||||
void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
|
||||
// Set up the frame structure on the stack.
|
||||
ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
|
||||
ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
|
||||
ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
|
||||
DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
|
||||
DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
|
||||
DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
|
||||
Push(lr, fp);
|
||||
mov(fp, Operand(sp)); // Set up new frame pointer.
|
||||
// Reserve room for saved entry sp and code object.
|
||||
@ -1075,7 +1075,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
|
||||
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
|
||||
sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
|
||||
if (frame_alignment > 0) {
|
||||
ASSERT(IsPowerOf2(frame_alignment));
|
||||
DCHECK(IsPowerOf2(frame_alignment));
|
||||
and_(sp, sp, Operand(-frame_alignment));
|
||||
}
|
||||
|
||||
@ -1194,12 +1194,12 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
|
||||
// The code below is made a lot easier because the calling code already sets
|
||||
// up actual and expected registers according to the contract if values are
|
||||
// passed in registers.
|
||||
ASSERT(actual.is_immediate() || actual.reg().is(r0));
|
||||
ASSERT(expected.is_immediate() || expected.reg().is(r2));
|
||||
ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
|
||||
DCHECK(actual.is_immediate() || actual.reg().is(r0));
|
||||
DCHECK(expected.is_immediate() || expected.reg().is(r2));
|
||||
DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
|
||||
|
||||
if (expected.is_immediate()) {
|
||||
ASSERT(actual.is_immediate());
|
||||
DCHECK(actual.is_immediate());
|
||||
if (expected.immediate() == actual.immediate()) {
|
||||
definitely_matches = true;
|
||||
} else {
|
||||
@ -1256,7 +1256,7 @@ void MacroAssembler::InvokeCode(Register code,
|
||||
InvokeFlag flag,
|
||||
const CallWrapper& call_wrapper) {
|
||||
// You can't call a function without a valid frame.
|
||||
ASSERT(flag == JUMP_FUNCTION || has_frame());
|
||||
DCHECK(flag == JUMP_FUNCTION || has_frame());
|
||||
|
||||
Label done;
|
||||
bool definitely_mismatches = false;
|
||||
@ -1269,7 +1269,7 @@ void MacroAssembler::InvokeCode(Register code,
|
||||
Call(code);
|
||||
call_wrapper.AfterCall();
|
||||
} else {
|
||||
ASSERT(flag == JUMP_FUNCTION);
|
||||
DCHECK(flag == JUMP_FUNCTION);
|
||||
Jump(code);
|
||||
}
|
||||
|
||||
@ -1285,10 +1285,10 @@ void MacroAssembler::InvokeFunction(Register fun,
|
||||
InvokeFlag flag,
|
||||
const CallWrapper& call_wrapper) {
|
||||
// You can't call a function without a valid frame.
|
||||
ASSERT(flag == JUMP_FUNCTION || has_frame());
|
||||
DCHECK(flag == JUMP_FUNCTION || has_frame());
|
||||
|
||||
// Contract with called JS functions requires that function is passed in r1.
|
||||
ASSERT(fun.is(r1));
|
||||
DCHECK(fun.is(r1));
|
||||
|
||||
Register expected_reg = r2;
|
||||
Register code_reg = r3;
|
||||
@ -1313,10 +1313,10 @@ void MacroAssembler::InvokeFunction(Register function,
|
||||
InvokeFlag flag,
|
||||
const CallWrapper& call_wrapper) {
|
||||
// You can't call a function without a valid frame.
|
||||
ASSERT(flag == JUMP_FUNCTION || has_frame());
|
||||
DCHECK(flag == JUMP_FUNCTION || has_frame());
|
||||
|
||||
// Contract with called JS functions requires that function is passed in r1.
|
||||
ASSERT(function.is(r1));
|
||||
DCHECK(function.is(r1));
|
||||
|
||||
// Get the function and setup the context.
|
||||
ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
|
||||
@ -1362,7 +1362,7 @@ void MacroAssembler::IsInstanceJSObjectType(Register map,
|
||||
void MacroAssembler::IsObjectJSStringType(Register object,
|
||||
Register scratch,
|
||||
Label* fail) {
|
||||
ASSERT(kNotStringTag != 0);
|
||||
DCHECK(kNotStringTag != 0);
|
||||
|
||||
ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
|
||||
ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
|
||||
@ -1385,7 +1385,7 @@ void MacroAssembler::DebugBreak() {
|
||||
mov(r0, Operand::Zero());
|
||||
mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
|
||||
CEntryStub ces(isolate(), 1);
|
||||
ASSERT(AllowThisStubCall(&ces));
|
||||
DCHECK(AllowThisStubCall(&ces));
|
||||
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
|
||||
}
|
||||
|
||||
@ -1533,9 +1533,9 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
|
||||
Label* miss) {
|
||||
Label same_contexts;
|
||||
|
||||
ASSERT(!holder_reg.is(scratch));
|
||||
ASSERT(!holder_reg.is(ip));
|
||||
ASSERT(!scratch.is(ip));
|
||||
DCHECK(!holder_reg.is(scratch));
|
||||
DCHECK(!holder_reg.is(ip));
|
||||
DCHECK(!scratch.is(ip));
|
||||
|
||||
// Load current lexical context from the stack frame.
|
||||
ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
||||
@ -1683,7 +1683,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
|
||||
and_(t2, t2, Operand(t1));
|
||||
|
||||
// Scale the index by multiplying by the element size.
|
||||
ASSERT(SeededNumberDictionary::kEntrySize == 3);
|
||||
DCHECK(SeededNumberDictionary::kEntrySize == 3);
|
||||
add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3
|
||||
|
||||
// Check if the key is identical to the name.
|
||||
@ -1719,7 +1719,7 @@ void MacroAssembler::Allocate(int object_size,
|
||||
Register scratch2,
|
||||
Label* gc_required,
|
||||
AllocationFlags flags) {
|
||||
ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
|
||||
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
|
||||
if (!FLAG_inline_new) {
|
||||
if (emit_debug_code()) {
|
||||
// Trash the registers to simulate an allocation failure.
|
||||
@ -1731,17 +1731,17 @@ void MacroAssembler::Allocate(int object_size,
|
||||
return;
|
||||
}
|
||||
|
||||
ASSERT(!result.is(scratch1));
|
||||
ASSERT(!result.is(scratch2));
|
||||
ASSERT(!scratch1.is(scratch2));
|
||||
ASSERT(!scratch1.is(ip));
|
||||
ASSERT(!scratch2.is(ip));
|
||||
DCHECK(!result.is(scratch1));
|
||||
DCHECK(!result.is(scratch2));
|
||||
DCHECK(!scratch1.is(scratch2));
|
||||
DCHECK(!scratch1.is(ip));
|
||||
DCHECK(!scratch2.is(ip));
|
||||
|
||||
// Make object size into bytes.
|
||||
if ((flags & SIZE_IN_WORDS) != 0) {
|
||||
object_size *= kPointerSize;
|
||||
}
|
||||
ASSERT_EQ(0, object_size & kObjectAlignmentMask);
|
||||
DCHECK_EQ(0, object_size & kObjectAlignmentMask);
|
||||
|
||||
// Check relative positions of allocation top and limit addresses.
|
||||
// The values must be adjacent in memory to allow the use of LDM.
|
||||
@ -1756,8 +1756,8 @@ void MacroAssembler::Allocate(int object_size,
|
||||
reinterpret_cast<intptr_t>(allocation_top.address());
|
||||
intptr_t limit =
|
||||
reinterpret_cast<intptr_t>(allocation_limit.address());
|
||||
ASSERT((limit - top) == kPointerSize);
|
||||
ASSERT(result.code() < ip.code());
|
||||
DCHECK((limit - top) == kPointerSize);
|
||||
DCHECK(result.code() < ip.code());
|
||||
|
||||
// Set up allocation top address register.
|
||||
Register topaddr = scratch1;
|
||||
@ -1784,7 +1784,7 @@ void MacroAssembler::Allocate(int object_size,
|
||||
if ((flags & DOUBLE_ALIGNMENT) != 0) {
|
||||
// Align the next allocation. Storing the filler map without checking top is
|
||||
// safe in new-space because the limit of the heap is aligned there.
|
||||
ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
|
||||
DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
|
||||
STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
|
||||
and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
|
||||
Label aligned;
|
||||
@ -1801,7 +1801,7 @@ void MacroAssembler::Allocate(int object_size,
|
||||
// Calculate new top and bail out if new space is exhausted. Use result
|
||||
// to calculate the new top. We must preserve the ip register at this
|
||||
// point, so we cannot just use add().
|
||||
ASSERT(object_size > 0);
|
||||
DCHECK(object_size > 0);
|
||||
Register source = result;
|
||||
Condition cond = al;
|
||||
int shift = 0;
|
||||
@ -1813,7 +1813,7 @@ void MacroAssembler::Allocate(int object_size,
|
||||
object_size -= bits;
|
||||
shift += 8;
|
||||
Operand bits_operand(bits);
|
||||
ASSERT(bits_operand.instructions_required(this) == 1);
|
||||
DCHECK(bits_operand.instructions_required(this) == 1);
|
||||
add(scratch2, source, bits_operand, SetCC, cond);
|
||||
source = scratch2;
|
||||
cond = cc;
|
||||
@ -1850,13 +1850,13 @@ void MacroAssembler::Allocate(Register object_size,
|
||||
|
||||
// Assert that the register arguments are different and that none of
|
||||
// them are ip. ip is used explicitly in the code generated below.
|
||||
ASSERT(!result.is(scratch1));
|
||||
ASSERT(!result.is(scratch2));
|
||||
ASSERT(!scratch1.is(scratch2));
|
||||
ASSERT(!object_size.is(ip));
|
||||
ASSERT(!result.is(ip));
|
||||
ASSERT(!scratch1.is(ip));
|
||||
ASSERT(!scratch2.is(ip));
|
||||
DCHECK(!result.is(scratch1));
|
||||
DCHECK(!result.is(scratch2));
|
||||
DCHECK(!scratch1.is(scratch2));
|
||||
DCHECK(!object_size.is(ip));
|
||||
DCHECK(!result.is(ip));
|
||||
DCHECK(!scratch1.is(ip));
|
||||
DCHECK(!scratch2.is(ip));
|
||||
|
||||
// Check relative positions of allocation top and limit addresses.
|
||||
// The values must be adjacent in memory to allow the use of LDM.
|
||||
@ -1870,8 +1870,8 @@ void MacroAssembler::Allocate(Register object_size,
|
||||
reinterpret_cast<intptr_t>(allocation_top.address());
|
||||
intptr_t limit =
|
||||
reinterpret_cast<intptr_t>(allocation_limit.address());
|
||||
ASSERT((limit - top) == kPointerSize);
|
||||
ASSERT(result.code() < ip.code());
|
||||
DCHECK((limit - top) == kPointerSize);
|
||||
DCHECK(result.code() < ip.code());
|
||||
|
||||
// Set up allocation top address.
|
||||
Register topaddr = scratch1;
|
||||
@ -1898,8 +1898,8 @@ void MacroAssembler::Allocate(Register object_size,
|
||||
if ((flags & DOUBLE_ALIGNMENT) != 0) {
|
||||
// Align the next allocation. Storing the filler map without checking top is
|
||||
// safe in new-space because the limit of the heap is aligned there.
|
||||
ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
|
||||
ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
|
||||
DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
|
||||
DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
|
||||
and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
|
||||
Label aligned;
|
||||
b(eq, &aligned);
|
||||
@ -1966,7 +1966,7 @@ void MacroAssembler::AllocateTwoByteString(Register result,
|
||||
Label* gc_required) {
|
||||
// Calculate the number of bytes needed for the characters in the string while
|
||||
// observing object alignment.
|
||||
ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
|
||||
DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
|
||||
mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars.
|
||||
add(scratch1, scratch1,
|
||||
Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
|
||||
@ -1997,8 +1997,8 @@ void MacroAssembler::AllocateAsciiString(Register result,
|
||||
Label* gc_required) {
|
||||
// Calculate the number of bytes needed for the characters in the string while
|
||||
// observing object alignment.
|
||||
ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
|
||||
ASSERT(kCharSize == 1);
|
||||
DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
|
||||
DCHECK(kCharSize == 1);
|
||||
add(scratch1, length,
|
||||
Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
|
||||
and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
|
||||
@ -2129,7 +2129,7 @@ void MacroAssembler::CompareInstanceType(Register map,
|
||||
|
||||
void MacroAssembler::CompareRoot(Register obj,
|
||||
Heap::RootListIndex index) {
|
||||
ASSERT(!obj.is(ip));
|
||||
DCHECK(!obj.is(ip));
|
||||
LoadRoot(ip, index);
|
||||
cmp(obj, ip);
|
||||
}
|
||||
@ -2343,7 +2343,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
|
||||
void MacroAssembler::CallStub(CodeStub* stub,
|
||||
TypeFeedbackId ast_id,
|
||||
Condition cond) {
|
||||
ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
|
||||
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
|
||||
Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
|
||||
}
|
||||
|
||||
@ -2374,7 +2374,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
|
||||
ExternalReference::handle_scope_level_address(isolate()),
|
||||
next_address);
|
||||
|
||||
ASSERT(function_address.is(r1) || function_address.is(r2));
|
||||
DCHECK(function_address.is(r1) || function_address.is(r2));
|
||||
|
||||
Label profiler_disabled;
|
||||
Label end_profiler_check;
|
||||
@ -2496,7 +2496,7 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) {
|
||||
// that the constants for the maximum number of digits for an array index
|
||||
// cached in the hash field and the number of bits reserved for it does not
|
||||
// conflict.
|
||||
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
|
||||
DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
|
||||
(1 << String::kArrayIndexValueBits));
|
||||
DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
|
||||
}
|
||||
@ -2516,7 +2516,7 @@ void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
|
||||
|
||||
void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input,
|
||||
LowDwVfpRegister double_scratch) {
|
||||
ASSERT(!double_input.is(double_scratch));
|
||||
DCHECK(!double_input.is(double_scratch));
|
||||
vcvt_s32_f64(double_scratch.low(), double_input);
|
||||
vcvt_f64_s32(double_scratch, double_scratch.low());
|
||||
VFPCompareAndSetFlags(double_input, double_scratch);
|
||||
@ -2526,7 +2526,7 @@ void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input,
|
||||
void MacroAssembler::TryDoubleToInt32Exact(Register result,
|
||||
DwVfpRegister double_input,
|
||||
LowDwVfpRegister double_scratch) {
|
||||
ASSERT(!double_input.is(double_scratch));
|
||||
DCHECK(!double_input.is(double_scratch));
|
||||
vcvt_s32_f64(double_scratch.low(), double_input);
|
||||
vmov(result, double_scratch.low());
|
||||
vcvt_f64_s32(double_scratch, double_scratch.low());
|
||||
@ -2540,8 +2540,8 @@ void MacroAssembler::TryInt32Floor(Register result,
|
||||
LowDwVfpRegister double_scratch,
|
||||
Label* done,
|
||||
Label* exact) {
|
||||
ASSERT(!result.is(input_high));
|
||||
ASSERT(!double_input.is(double_scratch));
|
||||
DCHECK(!result.is(input_high));
|
||||
DCHECK(!double_input.is(double_scratch));
|
||||
Label negative, exception;
|
||||
|
||||
VmovHigh(input_high, double_input);
|
||||
@ -2619,7 +2619,7 @@ void MacroAssembler::TruncateHeapNumberToI(Register result,
|
||||
Register object) {
|
||||
Label done;
|
||||
LowDwVfpRegister double_scratch = kScratchDoubleReg;
|
||||
ASSERT(!result.is(object));
|
||||
DCHECK(!result.is(object));
|
||||
|
||||
vldr(double_scratch,
|
||||
MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
|
||||
@ -2646,7 +2646,7 @@ void MacroAssembler::TruncateNumberToI(Register object,
|
||||
Register scratch1,
|
||||
Label* not_number) {
|
||||
Label done;
|
||||
ASSERT(!result.is(object));
|
||||
DCHECK(!result.is(object));
|
||||
|
||||
UntagAndJumpIfSmi(result, object, &done);
|
||||
JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
|
||||
@ -2730,7 +2730,7 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
|
||||
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
|
||||
#if defined(__thumb__)
|
||||
// Thumb mode builtin.
|
||||
ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
|
||||
DCHECK((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
|
||||
#endif
|
||||
mov(r1, Operand(builtin));
|
||||
CEntryStub stub(isolate(), 1);
|
||||
@ -2742,7 +2742,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
|
||||
InvokeFlag flag,
|
||||
const CallWrapper& call_wrapper) {
|
||||
// You can't call a builtin without a valid frame.
|
||||
ASSERT(flag == JUMP_FUNCTION || has_frame());
|
||||
DCHECK(flag == JUMP_FUNCTION || has_frame());
|
||||
|
||||
GetBuiltinEntry(r2, id);
|
||||
if (flag == CALL_FUNCTION) {
|
||||
@ -2750,7 +2750,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
|
||||
Call(r2);
|
||||
call_wrapper.AfterCall();
|
||||
} else {
|
||||
ASSERT(flag == JUMP_FUNCTION);
|
||||
DCHECK(flag == JUMP_FUNCTION);
|
||||
Jump(r2);
|
||||
}
|
||||
}
|
||||
@ -2769,7 +2769,7 @@ void MacroAssembler::GetBuiltinFunction(Register target,
|
||||
|
||||
|
||||
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
|
||||
ASSERT(!target.is(r1));
|
||||
DCHECK(!target.is(r1));
|
||||
GetBuiltinFunction(r1, id);
|
||||
// Load the code entry point from the builtins object.
|
||||
ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
|
||||
@ -2788,7 +2788,7 @@ void MacroAssembler::SetCounter(StatsCounter* counter, int value,
|
||||
|
||||
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
|
||||
Register scratch1, Register scratch2) {
|
||||
ASSERT(value > 0);
|
||||
DCHECK(value > 0);
|
||||
if (FLAG_native_code_counters && counter->Enabled()) {
|
||||
mov(scratch2, Operand(ExternalReference(counter)));
|
||||
ldr(scratch1, MemOperand(scratch2));
|
||||
@ -2800,7 +2800,7 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
|
||||
|
||||
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
|
||||
Register scratch1, Register scratch2) {
|
||||
ASSERT(value > 0);
|
||||
DCHECK(value > 0);
|
||||
if (FLAG_native_code_counters && counter->Enabled()) {
|
||||
mov(scratch2, Operand(ExternalReference(counter)));
|
||||
ldr(scratch1, MemOperand(scratch2));
|
||||
@ -2818,7 +2818,7 @@ void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
|
||||
|
||||
void MacroAssembler::AssertFastElements(Register elements) {
|
||||
if (emit_debug_code()) {
|
||||
ASSERT(!elements.is(ip));
|
||||
DCHECK(!elements.is(ip));
|
||||
Label ok;
|
||||
push(elements);
|
||||
ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
|
||||
@ -2882,7 +2882,7 @@ void MacroAssembler::Abort(BailoutReason reason) {
|
||||
// of the Abort macro constant.
|
||||
static const int kExpectedAbortInstructions = 7;
|
||||
int abort_instructions = InstructionsGeneratedSince(&abort_start);
|
||||
ASSERT(abort_instructions <= kExpectedAbortInstructions);
|
||||
DCHECK(abort_instructions <= kExpectedAbortInstructions);
|
||||
while (abort_instructions++ < kExpectedAbortInstructions) {
|
||||
nop();
|
||||
}
|
||||
@ -3489,7 +3489,7 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
|
||||
// and the original value of sp.
|
||||
mov(scratch, sp);
|
||||
sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
|
||||
ASSERT(IsPowerOf2(frame_alignment));
|
||||
DCHECK(IsPowerOf2(frame_alignment));
|
||||
and_(sp, sp, Operand(-frame_alignment));
|
||||
str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
|
||||
} else {
|
||||
@ -3505,7 +3505,7 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
|
||||
|
||||
|
||||
void MacroAssembler::MovToFloatParameter(DwVfpRegister src) {
|
||||
ASSERT(src.is(d0));
|
||||
DCHECK(src.is(d0));
|
||||
if (!use_eabi_hardfloat()) {
|
||||
vmov(r0, r1, src);
|
||||
}
|
||||
@ -3520,8 +3520,8 @@ void MacroAssembler::MovToFloatResult(DwVfpRegister src) {
|
||||
|
||||
void MacroAssembler::MovToFloatParameters(DwVfpRegister src1,
|
||||
DwVfpRegister src2) {
|
||||
ASSERT(src1.is(d0));
|
||||
ASSERT(src2.is(d1));
|
||||
DCHECK(src1.is(d0));
|
||||
DCHECK(src2.is(d1));
|
||||
if (!use_eabi_hardfloat()) {
|
||||
vmov(r0, r1, src1);
|
||||
vmov(r2, r3, src2);
|
||||
@ -3559,7 +3559,7 @@ void MacroAssembler::CallCFunction(Register function,
|
||||
void MacroAssembler::CallCFunctionHelper(Register function,
|
||||
int num_reg_arguments,
|
||||
int num_double_arguments) {
|
||||
ASSERT(has_frame());
|
||||
DCHECK(has_frame());
|
||||
// Make sure that the stack is aligned before calling a C function unless
|
||||
// running in the simulator. The simulator has its own alignment check which
|
||||
// provides more information.
|
||||
@ -3568,7 +3568,7 @@ void MacroAssembler::CallCFunctionHelper(Register function,
|
||||
int frame_alignment = base::OS::ActivationFrameAlignment();
|
||||
int frame_alignment_mask = frame_alignment - 1;
|
||||
if (frame_alignment > kPointerSize) {
|
||||
ASSERT(IsPowerOf2(frame_alignment));
|
||||
DCHECK(IsPowerOf2(frame_alignment));
|
||||
Label alignment_as_expected;
|
||||
tst(sp, Operand(frame_alignment_mask));
|
||||
b(eq, &alignment_as_expected);
|
||||
@ -3693,7 +3693,7 @@ void MacroAssembler::JumpIfBlack(Register object,
|
||||
Register scratch1,
|
||||
Label* on_black) {
|
||||
HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
|
||||
ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
|
||||
DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
|
||||
}
|
||||
|
||||
|
||||
@ -3703,7 +3703,7 @@ void MacroAssembler::HasColor(Register object,
|
||||
Label* has_color,
|
||||
int first_bit,
|
||||
int second_bit) {
|
||||
ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
|
||||
DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
|
||||
|
||||
GetMarkBits(object, bitmap_scratch, mask_scratch);
|
||||
|
||||
@ -3736,8 +3736,8 @@ void MacroAssembler::JumpIfDataObject(Register value,
|
||||
ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
|
||||
CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
|
||||
b(eq, &is_data_object);
|
||||
ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
|
||||
ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
|
||||
DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
|
||||
DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
|
||||
// If it's a string and it's not a cons string then it's an object containing
|
||||
// no GC pointers.
|
||||
ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
|
||||
@ -3750,7 +3750,7 @@ void MacroAssembler::JumpIfDataObject(Register value,
|
||||
void MacroAssembler::GetMarkBits(Register addr_reg,
|
||||
Register bitmap_reg,
|
||||
Register mask_reg) {
|
||||
ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
|
||||
DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
|
||||
and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
|
||||
Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
|
||||
const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
|
||||
@ -3767,14 +3767,14 @@ void MacroAssembler::EnsureNotWhite(
|
||||
Register mask_scratch,
|
||||
Register load_scratch,
|
||||
Label* value_is_white_and_not_data) {
|
||||
ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
|
||||
DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
|
||||
GetMarkBits(value, bitmap_scratch, mask_scratch);
|
||||
|
||||
// If the value is black or grey we don't need to do anything.
|
||||
ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
|
||||
ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
|
||||
ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
|
||||
ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
|
||||
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
|
||||
DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
|
||||
DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
|
||||
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
|
||||
|
||||
Label done;
|
||||
|
||||
@ -3807,8 +3807,8 @@ void MacroAssembler::EnsureNotWhite(
|
||||
b(eq, &is_data_object);
|
||||
|
||||
// Check for strings.
|
||||
ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
|
||||
ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
|
||||
DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
|
||||
DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
|
||||
// If it's a string and it's not a cons string then it's an object containing
|
||||
// no GC pointers.
|
||||
Register instance_type = load_scratch;
|
||||
@ -3820,8 +3820,8 @@ void MacroAssembler::EnsureNotWhite(
|
||||
// Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
|
||||
// External strings are the only ones with the kExternalStringTag bit
|
||||
// set.
|
||||
ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
|
||||
ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
|
||||
DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
|
||||
DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
|
||||
tst(instance_type, Operand(kExternalStringTag));
|
||||
mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
|
||||
b(ne, &is_data_object);
|
||||
@ -3830,8 +3830,8 @@ void MacroAssembler::EnsureNotWhite(
|
||||
// For ASCII (char-size of 1) we shift the smi tag away to get the length.
|
||||
// For UC16 (char-size of 2) we just leave the smi tag in place, thereby
|
||||
// getting the length multiplied by 2.
|
||||
ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
|
||||
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
|
||||
DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
|
||||
DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
|
||||
ldr(ip, FieldMemOperand(value, String::kLengthOffset));
|
||||
tst(instance_type, Operand(kStringEncodingMask));
|
||||
mov(ip, Operand(ip, LSR, 1), LeaveCC, ne);
|
||||
@ -3994,7 +3994,7 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
|
||||
Register scratch0,
|
||||
Register scratch1,
|
||||
Label* found) {
|
||||
ASSERT(!scratch1.is(scratch0));
|
||||
DCHECK(!scratch1.is(scratch0));
|
||||
Factory* factory = isolate()->factory();
|
||||
Register current = scratch0;
|
||||
Label loop_again;
|
||||
@ -4054,7 +4054,7 @@ CodePatcher::CodePatcher(byte* address,
|
||||
// Create a new macro assembler pointing to the address of the code to patch.
|
||||
// The size is adjusted with kGap on order for the assembler to generate size
|
||||
// bytes of instructions without failing with buffer size constraints.
|
||||
ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
|
||||
DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
|
||||
}
|
||||
|
||||
|
||||
@ -4065,8 +4065,8 @@ CodePatcher::~CodePatcher() {
|
||||
}
|
||||
|
||||
// Check that the code was patched as expected.
|
||||
ASSERT(masm_.pc_ == address_ + size_);
|
||||
ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
|
||||
DCHECK(masm_.pc_ == address_ + size_);
|
||||
DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
|
||||
}
|
||||
|
||||
|
||||
@ -4090,9 +4090,9 @@ void CodePatcher::EmitCondition(Condition cond) {
|
||||
void MacroAssembler::TruncatingDiv(Register result,
|
||||
Register dividend,
|
||||
int32_t divisor) {
|
||||
ASSERT(!dividend.is(result));
|
||||
ASSERT(!dividend.is(ip));
|
||||
ASSERT(!result.is(ip));
|
||||
DCHECK(!dividend.is(result));
|
||||
DCHECK(!dividend.is(ip));
|
||||
DCHECK(!result.is(ip));
|
||||
MultiplierAndShift ms(divisor);
|
||||
mov(ip, Operand(ms.multiplier()));
|
||||
smull(ip, result, dividend, ip);
|
||||
|
@ -314,7 +314,7 @@ class MacroAssembler: public Assembler {
|
||||
|
||||
// Push two registers. Pushes leftmost register first (to highest address).
|
||||
void Push(Register src1, Register src2, Condition cond = al) {
|
||||
ASSERT(!src1.is(src2));
|
||||
DCHECK(!src1.is(src2));
|
||||
if (src1.code() > src2.code()) {
|
||||
stm(db_w, sp, src1.bit() | src2.bit(), cond);
|
||||
} else {
|
||||
@ -325,9 +325,9 @@ class MacroAssembler: public Assembler {
|
||||
|
||||
// Push three registers. Pushes leftmost register first (to highest address).
|
||||
void Push(Register src1, Register src2, Register src3, Condition cond = al) {
|
||||
ASSERT(!src1.is(src2));
|
||||
ASSERT(!src2.is(src3));
|
||||
ASSERT(!src1.is(src3));
|
||||
DCHECK(!src1.is(src2));
|
||||
DCHECK(!src2.is(src3));
|
||||
DCHECK(!src1.is(src3));
|
||||
if (src1.code() > src2.code()) {
|
||||
if (src2.code() > src3.code()) {
|
||||
stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
|
||||
@ -347,12 +347,12 @@ class MacroAssembler: public Assembler {
|
||||
Register src3,
|
||||
Register src4,
|
||||
Condition cond = al) {
|
||||
ASSERT(!src1.is(src2));
|
||||
ASSERT(!src2.is(src3));
|
||||
ASSERT(!src1.is(src3));
|
||||
ASSERT(!src1.is(src4));
|
||||
ASSERT(!src2.is(src4));
|
||||
ASSERT(!src3.is(src4));
|
||||
DCHECK(!src1.is(src2));
|
||||
DCHECK(!src2.is(src3));
|
||||
DCHECK(!src1.is(src3));
|
||||
DCHECK(!src1.is(src4));
|
||||
DCHECK(!src2.is(src4));
|
||||
DCHECK(!src3.is(src4));
|
||||
if (src1.code() > src2.code()) {
|
||||
if (src2.code() > src3.code()) {
|
||||
if (src3.code() > src4.code()) {
|
||||
@ -376,7 +376,7 @@ class MacroAssembler: public Assembler {
|
||||
|
||||
// Pop two registers. Pops rightmost register first (from lower address).
|
||||
void Pop(Register src1, Register src2, Condition cond = al) {
|
||||
ASSERT(!src1.is(src2));
|
||||
DCHECK(!src1.is(src2));
|
||||
if (src1.code() > src2.code()) {
|
||||
ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
|
||||
} else {
|
||||
@ -387,9 +387,9 @@ class MacroAssembler: public Assembler {
|
||||
|
||||
// Pop three registers. Pops rightmost register first (from lower address).
|
||||
void Pop(Register src1, Register src2, Register src3, Condition cond = al) {
|
||||
ASSERT(!src1.is(src2));
|
||||
ASSERT(!src2.is(src3));
|
||||
ASSERT(!src1.is(src3));
|
||||
DCHECK(!src1.is(src2));
|
||||
DCHECK(!src2.is(src3));
|
||||
DCHECK(!src1.is(src3));
|
||||
if (src1.code() > src2.code()) {
|
||||
if (src2.code() > src3.code()) {
|
||||
ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
|
||||
@ -409,12 +409,12 @@ class MacroAssembler: public Assembler {
|
||||
Register src3,
|
||||
Register src4,
|
||||
Condition cond = al) {
|
||||
ASSERT(!src1.is(src2));
|
||||
ASSERT(!src2.is(src3));
|
||||
ASSERT(!src1.is(src3));
|
||||
ASSERT(!src1.is(src4));
|
||||
ASSERT(!src2.is(src4));
|
||||
ASSERT(!src3.is(src4));
|
||||
DCHECK(!src1.is(src2));
|
||||
DCHECK(!src2.is(src3));
|
||||
DCHECK(!src1.is(src3));
|
||||
DCHECK(!src1.is(src4));
|
||||
DCHECK(!src2.is(src4));
|
||||
DCHECK(!src3.is(src4));
|
||||
if (src1.code() > src2.code()) {
|
||||
if (src2.code() > src3.code()) {
|
||||
if (src3.code() > src4.code()) {
|
||||
@ -687,7 +687,7 @@ class MacroAssembler: public Assembler {
|
||||
// These instructions are generated to mark special location in the code,
|
||||
// like some special IC code.
|
||||
static inline bool IsMarkedCode(Instr instr, int type) {
|
||||
ASSERT((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
|
||||
DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
|
||||
return IsNop(instr, type);
|
||||
}
|
||||
|
||||
@ -707,7 +707,7 @@ class MacroAssembler: public Assembler {
|
||||
(FIRST_IC_MARKER <= dst_reg) && (dst_reg < LAST_CODE_MARKER)
|
||||
? src_reg
|
||||
: -1;
|
||||
ASSERT((type == -1) ||
|
||||
DCHECK((type == -1) ||
|
||||
((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
|
||||
return type;
|
||||
}
|
||||
@ -947,7 +947,7 @@ class MacroAssembler: public Assembler {
|
||||
ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond);
|
||||
ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond);
|
||||
tst(type, Operand(kIsNotStringMask), cond);
|
||||
ASSERT_EQ(0, kStringTag);
|
||||
DCHECK_EQ(0, kStringTag);
|
||||
return eq;
|
||||
}
|
||||
|
||||
@ -1144,7 +1144,7 @@ class MacroAssembler: public Assembler {
|
||||
void GetBuiltinFunction(Register target, Builtins::JavaScript id);
|
||||
|
||||
Handle<Object> CodeObject() {
|
||||
ASSERT(!code_object_.is_null());
|
||||
DCHECK(!code_object_.is_null());
|
||||
return code_object_;
|
||||
}
|
||||
|
||||
@ -1547,7 +1547,7 @@ class FrameAndConstantPoolScope {
|
||||
old_constant_pool_available_(masm->is_constant_pool_available()) {
|
||||
// We only want to enable constant pool access for non-manual frame scopes
|
||||
// to ensure the constant pool pointer is valid throughout the scope.
|
||||
ASSERT(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
|
||||
DCHECK(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
|
||||
masm->set_has_frame(true);
|
||||
masm->set_constant_pool_available(true);
|
||||
masm->EnterFrame(type, !old_constant_pool_available_);
|
||||
@ -1565,7 +1565,7 @@ class FrameAndConstantPoolScope {
|
||||
// scope, the MacroAssembler is still marked as being in a frame scope, and
|
||||
// the code will be generated again when it goes out of scope.
|
||||
void GenerateLeaveFrame() {
|
||||
ASSERT(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
|
||||
DCHECK(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
|
||||
masm_->LeaveFrame(type_);
|
||||
}
|
||||
|
||||
|
@ -110,7 +110,7 @@ RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(
|
||||
success_label_(),
|
||||
backtrack_label_(),
|
||||
exit_label_() {
|
||||
ASSERT_EQ(0, registers_to_save % 2);
|
||||
DCHECK_EQ(0, registers_to_save % 2);
|
||||
__ jmp(&entry_label_); // We'll write the entry code later.
|
||||
__ bind(&start_label_); // And then continue from here.
|
||||
}
|
||||
@ -143,8 +143,8 @@ void RegExpMacroAssemblerARM::AdvanceCurrentPosition(int by) {
|
||||
|
||||
|
||||
void RegExpMacroAssemblerARM::AdvanceRegister(int reg, int by) {
|
||||
ASSERT(reg >= 0);
|
||||
ASSERT(reg < num_registers_);
|
||||
DCHECK(reg >= 0);
|
||||
DCHECK(reg < num_registers_);
|
||||
if (by != 0) {
|
||||
__ ldr(r0, register_location(reg));
|
||||
__ add(r0, r0, Operand(by));
|
||||
@ -287,7 +287,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
|
||||
// Compute new value of character position after the matched part.
|
||||
__ sub(current_input_offset(), r2, end_of_input_address());
|
||||
} else {
|
||||
ASSERT(mode_ == UC16);
|
||||
DCHECK(mode_ == UC16);
|
||||
int argument_count = 4;
|
||||
__ PrepareCallCFunction(argument_count, r2);
|
||||
|
||||
@ -358,7 +358,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReference(
|
||||
__ ldrb(r3, MemOperand(r0, char_size(), PostIndex));
|
||||
__ ldrb(r4, MemOperand(r2, char_size(), PostIndex));
|
||||
} else {
|
||||
ASSERT(mode_ == UC16);
|
||||
DCHECK(mode_ == UC16);
|
||||
__ ldrh(r3, MemOperand(r0, char_size(), PostIndex));
|
||||
__ ldrh(r4, MemOperand(r2, char_size(), PostIndex));
|
||||
}
|
||||
@ -411,7 +411,7 @@ void RegExpMacroAssemblerARM::CheckNotCharacterAfterMinusAnd(
|
||||
uc16 minus,
|
||||
uc16 mask,
|
||||
Label* on_not_equal) {
|
||||
ASSERT(minus < String::kMaxUtf16CodeUnit);
|
||||
DCHECK(minus < String::kMaxUtf16CodeUnit);
|
||||
__ sub(r0, current_character(), Operand(minus));
|
||||
__ and_(r0, r0, Operand(mask));
|
||||
__ cmp(r0, Operand(c));
|
||||
@ -710,7 +710,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
|
||||
__ add(r1, r1, Operand(r2));
|
||||
// r1 is length of string in characters.
|
||||
|
||||
ASSERT_EQ(0, num_saved_registers_ % 2);
|
||||
DCHECK_EQ(0, num_saved_registers_ % 2);
|
||||
// Always an even number of capture registers. This allows us to
|
||||
// unroll the loop once to add an operation between a load of a register
|
||||
// and the following use of that register.
|
||||
@ -895,8 +895,8 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacter(int cp_offset,
|
||||
Label* on_end_of_input,
|
||||
bool check_bounds,
|
||||
int characters) {
|
||||
ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
|
||||
ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
|
||||
DCHECK(cp_offset >= -1); // ^ and \b can look behind one character.
|
||||
DCHECK(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
|
||||
if (check_bounds) {
|
||||
CheckPosition(cp_offset + characters - 1, on_end_of_input);
|
||||
}
|
||||
@ -961,7 +961,7 @@ void RegExpMacroAssemblerARM::SetCurrentPositionFromEnd(int by) {
|
||||
|
||||
|
||||
void RegExpMacroAssemblerARM::SetRegister(int register_index, int to) {
|
||||
ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
|
||||
DCHECK(register_index >= num_saved_registers_); // Reserved for positions!
|
||||
__ mov(r0, Operand(to));
|
||||
__ str(r0, register_location(register_index));
|
||||
}
|
||||
@ -985,7 +985,7 @@ void RegExpMacroAssemblerARM::WriteCurrentPositionToRegister(int reg,
|
||||
|
||||
|
||||
void RegExpMacroAssemblerARM::ClearRegisters(int reg_from, int reg_to) {
|
||||
ASSERT(reg_from <= reg_to);
|
||||
DCHECK(reg_from <= reg_to);
|
||||
__ ldr(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
|
||||
for (int reg = reg_from; reg <= reg_to; reg++) {
|
||||
__ str(r0, register_location(reg));
|
||||
@ -1012,7 +1012,7 @@ void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) {
|
||||
|
||||
// We need to make room for the return address on the stack.
|
||||
int stack_alignment = base::OS::ActivationFrameAlignment();
|
||||
ASSERT(IsAligned(stack_alignment, kPointerSize));
|
||||
DCHECK(IsAligned(stack_alignment, kPointerSize));
|
||||
__ sub(sp, sp, Operand(stack_alignment));
|
||||
|
||||
// r0 will point to the return address, placed by DirectCEntry.
|
||||
@ -1027,7 +1027,7 @@ void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) {
|
||||
// Drop the return address from the stack.
|
||||
__ add(sp, sp, Operand(stack_alignment));
|
||||
|
||||
ASSERT(stack_alignment != 0);
|
||||
DCHECK(stack_alignment != 0);
|
||||
__ ldr(sp, MemOperand(sp, 0));
|
||||
|
||||
__ mov(code_pointer(), Operand(masm_->CodeObject()));
|
||||
@ -1069,8 +1069,8 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
|
||||
// Current string.
|
||||
bool is_ascii = subject->IsOneByteRepresentationUnderneath();
|
||||
|
||||
ASSERT(re_code->instruction_start() <= *return_address);
|
||||
ASSERT(*return_address <=
|
||||
DCHECK(re_code->instruction_start() <= *return_address);
|
||||
DCHECK(*return_address <=
|
||||
re_code->instruction_start() + re_code->instruction_size());
|
||||
|
||||
Object* result = isolate->stack_guard()->HandleInterrupts();
|
||||
@ -1109,7 +1109,7 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
|
||||
// be a sequential or external string with the same content.
|
||||
// Update the start and end pointers in the stack frame to the current
|
||||
// location (whether it has actually moved or not).
|
||||
ASSERT(StringShape(*subject_tmp).IsSequential() ||
|
||||
DCHECK(StringShape(*subject_tmp).IsSequential() ||
|
||||
StringShape(*subject_tmp).IsExternal());
|
||||
|
||||
// The original start address of the characters to match.
|
||||
@ -1141,7 +1141,7 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
|
||||
|
||||
|
||||
MemOperand RegExpMacroAssemblerARM::register_location(int register_index) {
|
||||
ASSERT(register_index < (1<<30));
|
||||
DCHECK(register_index < (1<<30));
|
||||
if (num_registers_ <= register_index) {
|
||||
num_registers_ = register_index + 1;
|
||||
}
|
||||
@ -1194,14 +1194,14 @@ void RegExpMacroAssemblerARM::SafeCallTarget(Label* name) {
|
||||
|
||||
|
||||
void RegExpMacroAssemblerARM::Push(Register source) {
|
||||
ASSERT(!source.is(backtrack_stackpointer()));
|
||||
DCHECK(!source.is(backtrack_stackpointer()));
|
||||
__ str(source,
|
||||
MemOperand(backtrack_stackpointer(), kPointerSize, NegPreIndex));
|
||||
}
|
||||
|
||||
|
||||
void RegExpMacroAssemblerARM::Pop(Register target) {
|
||||
ASSERT(!target.is(backtrack_stackpointer()));
|
||||
DCHECK(!target.is(backtrack_stackpointer()));
|
||||
__ ldr(target,
|
||||
MemOperand(backtrack_stackpointer(), kPointerSize, PostIndex));
|
||||
}
|
||||
@ -1246,7 +1246,7 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
|
||||
// If unaligned load/stores are not supported then this function must only
|
||||
// be used to load a single character at a time.
|
||||
if (!CanReadUnaligned()) {
|
||||
ASSERT(characters == 1);
|
||||
DCHECK(characters == 1);
|
||||
}
|
||||
|
||||
if (mode_ == ASCII) {
|
||||
@ -1255,15 +1255,15 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
|
||||
} else if (characters == 2) {
|
||||
__ ldrh(current_character(), MemOperand(end_of_input_address(), offset));
|
||||
} else {
|
||||
ASSERT(characters == 1);
|
||||
DCHECK(characters == 1);
|
||||
__ ldrb(current_character(), MemOperand(end_of_input_address(), offset));
|
||||
}
|
||||
} else {
|
||||
ASSERT(mode_ == UC16);
|
||||
DCHECK(mode_ == UC16);
|
||||
if (characters == 2) {
|
||||
__ ldr(current_character(), MemOperand(end_of_input_address(), offset));
|
||||
} else {
|
||||
ASSERT(characters == 1);
|
||||
DCHECK(characters == 1);
|
||||
__ ldrh(current_character(), MemOperand(end_of_input_address(), offset));
|
||||
}
|
||||
}
|
||||
|
@ -87,7 +87,7 @@ void ArmDebugger::Stop(Instruction* instr) {
|
||||
char** msg_address =
|
||||
reinterpret_cast<char**>(sim_->get_pc() + Instruction::kInstrSize);
|
||||
char* msg = *msg_address;
|
||||
ASSERT(msg != NULL);
|
||||
DCHECK(msg != NULL);
|
||||
|
||||
// Update this stop description.
|
||||
if (isWatchedStop(code) && !watched_stops_[code].desc) {
|
||||
@ -608,8 +608,8 @@ void ArmDebugger::Debug() {
|
||||
|
||||
|
||||
static bool ICacheMatch(void* one, void* two) {
|
||||
ASSERT((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
|
||||
ASSERT((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
|
||||
DCHECK((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
|
||||
DCHECK((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
|
||||
return one == two;
|
||||
}
|
||||
|
||||
@ -646,7 +646,7 @@ void Simulator::FlushICache(v8::internal::HashMap* i_cache,
|
||||
FlushOnePage(i_cache, start, bytes_to_flush);
|
||||
start += bytes_to_flush;
|
||||
size -= bytes_to_flush;
|
||||
ASSERT_EQ(0, start & CachePage::kPageMask);
|
||||
DCHECK_EQ(0, start & CachePage::kPageMask);
|
||||
offset = 0;
|
||||
}
|
||||
if (size != 0) {
|
||||
@ -671,10 +671,10 @@ CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
|
||||
void Simulator::FlushOnePage(v8::internal::HashMap* i_cache,
|
||||
intptr_t start,
|
||||
int size) {
|
||||
ASSERT(size <= CachePage::kPageSize);
|
||||
ASSERT(AllOnOnePage(start, size - 1));
|
||||
ASSERT((start & CachePage::kLineMask) == 0);
|
||||
ASSERT((size & CachePage::kLineMask) == 0);
|
||||
DCHECK(size <= CachePage::kPageSize);
|
||||
DCHECK(AllOnOnePage(start, size - 1));
|
||||
DCHECK((start & CachePage::kLineMask) == 0);
|
||||
DCHECK((size & CachePage::kLineMask) == 0);
|
||||
void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
|
||||
int offset = (start & CachePage::kPageMask);
|
||||
CachePage* cache_page = GetCachePage(i_cache, page);
|
||||
@ -814,7 +814,7 @@ class Redirection {
|
||||
Redirection* current = isolate->simulator_redirection();
|
||||
for (; current != NULL; current = current->next_) {
|
||||
if (current->external_function_ == external_function) {
|
||||
ASSERT_EQ(current->type(), type);
|
||||
DCHECK_EQ(current->type(), type);
|
||||
return current;
|
||||
}
|
||||
}
|
||||
@ -853,7 +853,7 @@ void* Simulator::RedirectExternalReference(void* external_function,
|
||||
Simulator* Simulator::current(Isolate* isolate) {
|
||||
v8::internal::Isolate::PerIsolateThreadData* isolate_data =
|
||||
isolate->FindOrAllocatePerThreadDataForThisThread();
|
||||
ASSERT(isolate_data != NULL);
|
||||
DCHECK(isolate_data != NULL);
|
||||
|
||||
Simulator* sim = isolate_data->simulator();
|
||||
if (sim == NULL) {
|
||||
@ -868,7 +868,7 @@ Simulator* Simulator::current(Isolate* isolate) {
|
||||
// Sets the register in the architecture state. It will also deal with updating
|
||||
// Simulator internal state for special registers such as PC.
|
||||
void Simulator::set_register(int reg, int32_t value) {
|
||||
ASSERT((reg >= 0) && (reg < num_registers));
|
||||
DCHECK((reg >= 0) && (reg < num_registers));
|
||||
if (reg == pc) {
|
||||
pc_modified_ = true;
|
||||
}
|
||||
@ -879,7 +879,7 @@ void Simulator::set_register(int reg, int32_t value) {
|
||||
// Get the register from the architecture state. This function does handle
|
||||
// the special case of accessing the PC register.
|
||||
int32_t Simulator::get_register(int reg) const {
|
||||
ASSERT((reg >= 0) && (reg < num_registers));
|
||||
DCHECK((reg >= 0) && (reg < num_registers));
|
||||
// Stupid code added to avoid bug in GCC.
|
||||
// See: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43949
|
||||
if (reg >= num_registers) return 0;
|
||||
@ -889,7 +889,7 @@ int32_t Simulator::get_register(int reg) const {
|
||||
|
||||
|
||||
double Simulator::get_double_from_register_pair(int reg) {
|
||||
ASSERT((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0));
|
||||
DCHECK((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0));
|
||||
|
||||
double dm_val = 0.0;
|
||||
// Read the bits from the unsigned integer register_[] array
|
||||
@ -902,62 +902,62 @@ double Simulator::get_double_from_register_pair(int reg) {
|
||||
|
||||
|
||||
void Simulator::set_register_pair_from_double(int reg, double* value) {
|
||||
ASSERT((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0));
|
||||
DCHECK((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0));
|
||||
memcpy(registers_ + reg, value, sizeof(*value));
|
||||
}
|
||||
|
||||
|
||||
void Simulator::set_dw_register(int dreg, const int* dbl) {
|
||||
ASSERT((dreg >= 0) && (dreg < num_d_registers));
|
||||
DCHECK((dreg >= 0) && (dreg < num_d_registers));
|
||||
registers_[dreg] = dbl[0];
|
||||
registers_[dreg + 1] = dbl[1];
|
||||
}
|
||||
|
||||
|
||||
void Simulator::get_d_register(int dreg, uint64_t* value) {
|
||||
ASSERT((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
|
||||
DCHECK((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
|
||||
memcpy(value, vfp_registers_ + dreg * 2, sizeof(*value));
|
||||
}
|
||||
|
||||
|
||||
void Simulator::set_d_register(int dreg, const uint64_t* value) {
|
||||
ASSERT((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
|
||||
DCHECK((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
|
||||
memcpy(vfp_registers_ + dreg * 2, value, sizeof(*value));
|
||||
}
|
||||
|
||||
|
||||
void Simulator::get_d_register(int dreg, uint32_t* value) {
|
||||
ASSERT((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
|
||||
DCHECK((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
|
||||
memcpy(value, vfp_registers_ + dreg * 2, sizeof(*value) * 2);
|
||||
}
|
||||
|
||||
|
||||
void Simulator::set_d_register(int dreg, const uint32_t* value) {
|
||||
ASSERT((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
|
||||
DCHECK((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
|
||||
memcpy(vfp_registers_ + dreg * 2, value, sizeof(*value) * 2);
|
||||
}
|
||||
|
||||
|
||||
void Simulator::get_q_register(int qreg, uint64_t* value) {
|
||||
ASSERT((qreg >= 0) && (qreg < num_q_registers));
|
||||
DCHECK((qreg >= 0) && (qreg < num_q_registers));
|
||||
memcpy(value, vfp_registers_ + qreg * 4, sizeof(*value) * 2);
|
||||
}
|
||||
|
||||
|
||||
void Simulator::set_q_register(int qreg, const uint64_t* value) {
|
||||
ASSERT((qreg >= 0) && (qreg < num_q_registers));
|
||||
DCHECK((qreg >= 0) && (qreg < num_q_registers));
|
||||
memcpy(vfp_registers_ + qreg * 4, value, sizeof(*value) * 2);
|
||||
}
|
||||
|
||||
|
||||
void Simulator::get_q_register(int qreg, uint32_t* value) {
|
||||
ASSERT((qreg >= 0) && (qreg < num_q_registers));
|
||||
DCHECK((qreg >= 0) && (qreg < num_q_registers));
|
||||
memcpy(value, vfp_registers_ + qreg * 4, sizeof(*value) * 4);
|
||||
}
|
||||
|
||||
|
||||
void Simulator::set_q_register(int qreg, const uint32_t* value) {
|
||||
ASSERT((qreg >= 0) && (qreg < num_q_registers));
|
||||
DCHECK((qreg >= 0) && (qreg < num_q_registers));
|
||||
memcpy(vfp_registers_ + qreg * 4, value, sizeof(*value) * 4);
|
||||
}
|
||||
|
||||
@ -982,22 +982,22 @@ int32_t Simulator::get_pc() const {
|
||||
|
||||
// Getting from and setting into VFP registers.
|
||||
void Simulator::set_s_register(int sreg, unsigned int value) {
|
||||
ASSERT((sreg >= 0) && (sreg < num_s_registers));
|
||||
DCHECK((sreg >= 0) && (sreg < num_s_registers));
|
||||
vfp_registers_[sreg] = value;
|
||||
}
|
||||
|
||||
|
||||
unsigned int Simulator::get_s_register(int sreg) const {
|
||||
ASSERT((sreg >= 0) && (sreg < num_s_registers));
|
||||
DCHECK((sreg >= 0) && (sreg < num_s_registers));
|
||||
return vfp_registers_[sreg];
|
||||
}
|
||||
|
||||
|
||||
template<class InputType, int register_size>
|
||||
void Simulator::SetVFPRegister(int reg_index, const InputType& value) {
|
||||
ASSERT(reg_index >= 0);
|
||||
if (register_size == 1) ASSERT(reg_index < num_s_registers);
|
||||
if (register_size == 2) ASSERT(reg_index < DwVfpRegister::NumRegisters());
|
||||
DCHECK(reg_index >= 0);
|
||||
if (register_size == 1) DCHECK(reg_index < num_s_registers);
|
||||
if (register_size == 2) DCHECK(reg_index < DwVfpRegister::NumRegisters());
|
||||
|
||||
char buffer[register_size * sizeof(vfp_registers_[0])];
|
||||
memcpy(buffer, &value, register_size * sizeof(vfp_registers_[0]));
|
||||
@ -1008,9 +1008,9 @@ void Simulator::SetVFPRegister(int reg_index, const InputType& value) {
|
||||
|
||||
template<class ReturnType, int register_size>
|
||||
ReturnType Simulator::GetFromVFPRegister(int reg_index) {
|
||||
ASSERT(reg_index >= 0);
|
||||
if (register_size == 1) ASSERT(reg_index < num_s_registers);
|
||||
if (register_size == 2) ASSERT(reg_index < DwVfpRegister::NumRegisters());
|
||||
DCHECK(reg_index >= 0);
|
||||
if (register_size == 1) DCHECK(reg_index < num_s_registers);
|
||||
if (register_size == 2) DCHECK(reg_index < DwVfpRegister::NumRegisters());
|
||||
|
||||
ReturnType value = 0;
|
||||
char buffer[register_size * sizeof(vfp_registers_[0])];
|
||||
@ -1430,7 +1430,7 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
|
||||
*carry_out = (result & 1) == 1;
|
||||
result >>= 1;
|
||||
} else {
|
||||
ASSERT(shift_amount >= 32);
|
||||
DCHECK(shift_amount >= 32);
|
||||
if (result < 0) {
|
||||
*carry_out = true;
|
||||
result = 0xffffffff;
|
||||
@ -1453,7 +1453,7 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
|
||||
*carry_out = (result & 1) == 1;
|
||||
result = 0;
|
||||
} else {
|
||||
ASSERT(shift_amount > 32);
|
||||
DCHECK(shift_amount > 32);
|
||||
*carry_out = false;
|
||||
result = 0;
|
||||
}
|
||||
@ -1575,7 +1575,7 @@ void Simulator::HandleRList(Instruction* instr, bool load) {
|
||||
|
||||
intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
|
||||
// Catch null pointers a little earlier.
|
||||
ASSERT(start_address > 8191 || start_address < 0);
|
||||
DCHECK(start_address > 8191 || start_address < 0);
|
||||
int reg = 0;
|
||||
while (rlist != 0) {
|
||||
if ((rlist & 1) != 0) {
|
||||
@ -1589,7 +1589,7 @@ void Simulator::HandleRList(Instruction* instr, bool load) {
|
||||
reg++;
|
||||
rlist >>= 1;
|
||||
}
|
||||
ASSERT(end_address == ((intptr_t)address) - 4);
|
||||
DCHECK(end_address == ((intptr_t)address) - 4);
|
||||
if (instr->HasW()) {
|
||||
set_register(instr->RnValue(), rn_val);
|
||||
}
|
||||
@ -1648,7 +1648,7 @@ void Simulator::HandleVList(Instruction* instr) {
|
||||
address += 2;
|
||||
}
|
||||
}
|
||||
ASSERT(reinterpret_cast<intptr_t>(address) - operand_size == end_address);
|
||||
DCHECK(reinterpret_cast<intptr_t>(address) - operand_size == end_address);
|
||||
if (instr->HasW()) {
|
||||
set_register(instr->RnValue(), rn_val);
|
||||
}
|
||||
@ -1853,7 +1853,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
|
||||
target(arg0, arg1, Redirection::ReverseRedirection(arg2));
|
||||
} else {
|
||||
// builtin call.
|
||||
ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
|
||||
DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL);
|
||||
SimulatorRuntimeCall target =
|
||||
reinterpret_cast<SimulatorRuntimeCall>(external);
|
||||
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
|
||||
@ -1929,13 +1929,13 @@ bool Simulator::isStopInstruction(Instruction* instr) {
|
||||
|
||||
|
||||
bool Simulator::isWatchedStop(uint32_t code) {
|
||||
ASSERT(code <= kMaxStopCode);
|
||||
DCHECK(code <= kMaxStopCode);
|
||||
return code < kNumOfWatchedStops;
|
||||
}
|
||||
|
||||
|
||||
bool Simulator::isEnabledStop(uint32_t code) {
|
||||
ASSERT(code <= kMaxStopCode);
|
||||
DCHECK(code <= kMaxStopCode);
|
||||
// Unwatched stops are always enabled.
|
||||
return !isWatchedStop(code) ||
|
||||
!(watched_stops_[code].count & kStopDisabledBit);
|
||||
@ -1943,7 +1943,7 @@ bool Simulator::isEnabledStop(uint32_t code) {
|
||||
|
||||
|
||||
void Simulator::EnableStop(uint32_t code) {
|
||||
ASSERT(isWatchedStop(code));
|
||||
DCHECK(isWatchedStop(code));
|
||||
if (!isEnabledStop(code)) {
|
||||
watched_stops_[code].count &= ~kStopDisabledBit;
|
||||
}
|
||||
@ -1951,7 +1951,7 @@ void Simulator::EnableStop(uint32_t code) {
|
||||
|
||||
|
||||
void Simulator::DisableStop(uint32_t code) {
|
||||
ASSERT(isWatchedStop(code));
|
||||
DCHECK(isWatchedStop(code));
|
||||
if (isEnabledStop(code)) {
|
||||
watched_stops_[code].count |= kStopDisabledBit;
|
||||
}
|
||||
@ -1959,8 +1959,8 @@ void Simulator::DisableStop(uint32_t code) {
|
||||
|
||||
|
||||
void Simulator::IncreaseStopCounter(uint32_t code) {
|
||||
ASSERT(code <= kMaxStopCode);
|
||||
ASSERT(isWatchedStop(code));
|
||||
DCHECK(code <= kMaxStopCode);
|
||||
DCHECK(isWatchedStop(code));
|
||||
if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
|
||||
PrintF("Stop counter for code %i has overflowed.\n"
|
||||
"Enabling this code and reseting the counter to 0.\n", code);
|
||||
@ -1974,7 +1974,7 @@ void Simulator::IncreaseStopCounter(uint32_t code) {
|
||||
|
||||
// Print a stop status.
|
||||
void Simulator::PrintStopInfo(uint32_t code) {
|
||||
ASSERT(code <= kMaxStopCode);
|
||||
DCHECK(code <= kMaxStopCode);
|
||||
if (!isWatchedStop(code)) {
|
||||
PrintF("Stop not watched.");
|
||||
} else {
|
||||
@ -2092,7 +2092,7 @@ void Simulator::DecodeType01(Instruction* instr) {
|
||||
switch (instr->PUField()) {
|
||||
case da_x: {
|
||||
// Format(instr, "'memop'cond'sign'h 'rd, ['rn], -'rm");
|
||||
ASSERT(!instr->HasW());
|
||||
DCHECK(!instr->HasW());
|
||||
addr = rn_val;
|
||||
rn_val -= rm_val;
|
||||
set_register(rn, rn_val);
|
||||
@ -2100,7 +2100,7 @@ void Simulator::DecodeType01(Instruction* instr) {
|
||||
}
|
||||
case ia_x: {
|
||||
// Format(instr, "'memop'cond'sign'h 'rd, ['rn], +'rm");
|
||||
ASSERT(!instr->HasW());
|
||||
DCHECK(!instr->HasW());
|
||||
addr = rn_val;
|
||||
rn_val += rm_val;
|
||||
set_register(rn, rn_val);
|
||||
@ -2135,7 +2135,7 @@ void Simulator::DecodeType01(Instruction* instr) {
|
||||
switch (instr->PUField()) {
|
||||
case da_x: {
|
||||
// Format(instr, "'memop'cond'sign'h 'rd, ['rn], #-'off8");
|
||||
ASSERT(!instr->HasW());
|
||||
DCHECK(!instr->HasW());
|
||||
addr = rn_val;
|
||||
rn_val -= imm_val;
|
||||
set_register(rn, rn_val);
|
||||
@ -2143,7 +2143,7 @@ void Simulator::DecodeType01(Instruction* instr) {
|
||||
}
|
||||
case ia_x: {
|
||||
// Format(instr, "'memop'cond'sign'h 'rd, ['rn], #+'off8");
|
||||
ASSERT(!instr->HasW());
|
||||
DCHECK(!instr->HasW());
|
||||
addr = rn_val;
|
||||
rn_val += imm_val;
|
||||
set_register(rn, rn_val);
|
||||
@ -2175,7 +2175,7 @@ void Simulator::DecodeType01(Instruction* instr) {
|
||||
}
|
||||
}
|
||||
if (((instr->Bits(7, 4) & 0xd) == 0xd) && (instr->Bit(20) == 0)) {
|
||||
ASSERT((rd % 2) == 0);
|
||||
DCHECK((rd % 2) == 0);
|
||||
if (instr->HasH()) {
|
||||
// The strd instruction.
|
||||
int32_t value1 = get_register(rd);
|
||||
@ -2206,8 +2206,8 @@ void Simulator::DecodeType01(Instruction* instr) {
|
||||
}
|
||||
} else {
|
||||
// signed byte loads
|
||||
ASSERT(instr->HasSign());
|
||||
ASSERT(instr->HasL());
|
||||
DCHECK(instr->HasSign());
|
||||
DCHECK(instr->HasL());
|
||||
int8_t val = ReadB(addr);
|
||||
set_register(rd, val);
|
||||
}
|
||||
@ -2271,7 +2271,7 @@ void Simulator::DecodeType01(Instruction* instr) {
|
||||
if (type == 0) {
|
||||
shifter_operand = GetShiftRm(instr, &shifter_carry_out);
|
||||
} else {
|
||||
ASSERT(instr->TypeValue() == 1);
|
||||
DCHECK(instr->TypeValue() == 1);
|
||||
shifter_operand = GetImm(instr, &shifter_carry_out);
|
||||
}
|
||||
int32_t alu_out;
|
||||
@ -2494,7 +2494,7 @@ void Simulator::DecodeType2(Instruction* instr) {
|
||||
switch (instr->PUField()) {
|
||||
case da_x: {
|
||||
// Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12");
|
||||
ASSERT(!instr->HasW());
|
||||
DCHECK(!instr->HasW());
|
||||
addr = rn_val;
|
||||
rn_val -= im_val;
|
||||
set_register(rn, rn_val);
|
||||
@ -2502,7 +2502,7 @@ void Simulator::DecodeType2(Instruction* instr) {
|
||||
}
|
||||
case ia_x: {
|
||||
// Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12");
|
||||
ASSERT(!instr->HasW());
|
||||
DCHECK(!instr->HasW());
|
||||
addr = rn_val;
|
||||
rn_val += im_val;
|
||||
set_register(rn, rn_val);
|
||||
@ -2558,7 +2558,7 @@ void Simulator::DecodeType3(Instruction* instr) {
|
||||
int32_t addr = 0;
|
||||
switch (instr->PUField()) {
|
||||
case da_x: {
|
||||
ASSERT(!instr->HasW());
|
||||
DCHECK(!instr->HasW());
|
||||
Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
|
||||
UNIMPLEMENTED();
|
||||
break;
|
||||
@ -2720,7 +2720,7 @@ void Simulator::DecodeType3(Instruction* instr) {
|
||||
int rs = instr->RsValue();
|
||||
int32_t rs_val = get_register(rs);
|
||||
int32_t ret_val = 0;
|
||||
ASSERT(rs_val != 0);
|
||||
DCHECK(rs_val != 0);
|
||||
// udiv
|
||||
if (instr->Bit(21) == 0x1) {
|
||||
ret_val = static_cast<int32_t>(static_cast<uint32_t>(rm_val) /
|
||||
@ -2821,7 +2821,7 @@ void Simulator::DecodeType3(Instruction* instr) {
|
||||
|
||||
|
||||
void Simulator::DecodeType4(Instruction* instr) {
|
||||
ASSERT(instr->Bit(22) == 0); // only allowed to be set in privileged mode
|
||||
DCHECK(instr->Bit(22) == 0); // only allowed to be set in privileged mode
|
||||
if (instr->HasL()) {
|
||||
// Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
|
||||
HandleRList(instr, true);
|
||||
@ -2875,8 +2875,8 @@ void Simulator::DecodeType7(Instruction* instr) {
|
||||
// vmrs
|
||||
// Dd = vsqrt(Dm)
|
||||
void Simulator::DecodeTypeVFP(Instruction* instr) {
|
||||
ASSERT((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
|
||||
ASSERT(instr->Bits(11, 9) == 0x5);
|
||||
DCHECK((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
|
||||
DCHECK(instr->Bits(11, 9) == 0x5);
|
||||
|
||||
// Obtain double precision register codes.
|
||||
int vm = instr->VFPMRegValue(kDoublePrecision);
|
||||
@ -3091,7 +3091,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
|
||||
|
||||
void Simulator::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(
|
||||
Instruction* instr) {
|
||||
ASSERT((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) &&
|
||||
DCHECK((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) &&
|
||||
(instr->VAValue() == 0x0));
|
||||
|
||||
int t = instr->RtValue();
|
||||
@ -3109,8 +3109,8 @@ void Simulator::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(
|
||||
|
||||
|
||||
void Simulator::DecodeVCMP(Instruction* instr) {
|
||||
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
|
||||
ASSERT(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
|
||||
DCHECK((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
|
||||
DCHECK(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
|
||||
(instr->Opc3Value() & 0x1));
|
||||
// Comparison.
|
||||
|
||||
@ -3147,8 +3147,8 @@ void Simulator::DecodeVCMP(Instruction* instr) {
|
||||
|
||||
|
||||
void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
|
||||
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
|
||||
ASSERT((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
|
||||
DCHECK((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
|
||||
DCHECK((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
|
||||
|
||||
VFPRegPrecision dst_precision = kDoublePrecision;
|
||||
VFPRegPrecision src_precision = kSinglePrecision;
|
||||
@ -3172,7 +3172,7 @@ void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
|
||||
bool get_inv_op_vfp_flag(VFPRoundingMode mode,
|
||||
double val,
|
||||
bool unsigned_) {
|
||||
ASSERT((mode == RN) || (mode == RM) || (mode == RZ));
|
||||
DCHECK((mode == RN) || (mode == RM) || (mode == RZ));
|
||||
double max_uint = static_cast<double>(0xffffffffu);
|
||||
double max_int = static_cast<double>(kMaxInt);
|
||||
double min_int = static_cast<double>(kMinInt);
|
||||
@ -3225,9 +3225,9 @@ int VFPConversionSaturate(double val, bool unsigned_res) {
|
||||
|
||||
|
||||
void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
|
||||
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7) &&
|
||||
DCHECK((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7) &&
|
||||
(instr->Bits(27, 23) == 0x1D));
|
||||
ASSERT(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
|
||||
DCHECK(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
|
||||
(((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1)));
|
||||
|
||||
// Conversion between floating-point and integer.
|
||||
@ -3251,7 +3251,7 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
|
||||
// mode or the default Round to Zero mode.
|
||||
VFPRoundingMode mode = (instr->Bit(7) != 1) ? FPSCR_rounding_mode_
|
||||
: RZ;
|
||||
ASSERT((mode == RM) || (mode == RZ) || (mode == RN));
|
||||
DCHECK((mode == RM) || (mode == RZ) || (mode == RN));
|
||||
|
||||
bool unsigned_integer = (instr->Bit(16) == 0);
|
||||
bool double_precision = (src_precision == kDoublePrecision);
|
||||
@ -3335,7 +3335,7 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
|
||||
// Ddst = MEM(Rbase + 4*offset).
|
||||
// MEM(Rbase + 4*offset) = Dsrc.
|
||||
void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
|
||||
ASSERT((instr->TypeValue() == 6));
|
||||
DCHECK((instr->TypeValue() == 6));
|
||||
|
||||
if (instr->CoprocessorValue() == 0xA) {
|
||||
switch (instr->OpcodeValue()) {
|
||||
@ -3756,7 +3756,7 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
|
||||
// Set up arguments
|
||||
|
||||
// First four arguments passed in registers.
|
||||
ASSERT(argument_count >= 4);
|
||||
DCHECK(argument_count >= 4);
|
||||
set_register(r0, va_arg(parameters, int32_t));
|
||||
set_register(r1, va_arg(parameters, int32_t));
|
||||
set_register(r2, va_arg(parameters, int32_t));
|
||||
|
@ -36,12 +36,12 @@ static void ProbeTable(Isolate* isolate,
|
||||
uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
|
||||
|
||||
// Check the relative positions of the address fields.
|
||||
ASSERT(value_off_addr > key_off_addr);
|
||||
ASSERT((value_off_addr - key_off_addr) % 4 == 0);
|
||||
ASSERT((value_off_addr - key_off_addr) < (256 * 4));
|
||||
ASSERT(map_off_addr > key_off_addr);
|
||||
ASSERT((map_off_addr - key_off_addr) % 4 == 0);
|
||||
ASSERT((map_off_addr - key_off_addr) < (256 * 4));
|
||||
DCHECK(value_off_addr > key_off_addr);
|
||||
DCHECK((value_off_addr - key_off_addr) % 4 == 0);
|
||||
DCHECK((value_off_addr - key_off_addr) < (256 * 4));
|
||||
DCHECK(map_off_addr > key_off_addr);
|
||||
DCHECK((map_off_addr - key_off_addr) % 4 == 0);
|
||||
DCHECK((map_off_addr - key_off_addr) < (256 * 4));
|
||||
|
||||
Label miss;
|
||||
Register base_addr = scratch;
|
||||
@ -77,7 +77,7 @@ static void ProbeTable(Isolate* isolate,
|
||||
// It's a nice optimization if this constant is encodable in the bic insn.
|
||||
|
||||
uint32_t mask = Code::kFlagsNotUsedInLookup;
|
||||
ASSERT(__ ImmediateFitsAddrMode1Instruction(mask));
|
||||
DCHECK(__ ImmediateFitsAddrMode1Instruction(mask));
|
||||
__ bic(flags_reg, flags_reg, Operand(mask));
|
||||
__ cmp(flags_reg, Operand(flags));
|
||||
__ b(ne, &miss);
|
||||
@ -101,8 +101,8 @@ static void ProbeTable(Isolate* isolate,
|
||||
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
|
||||
MacroAssembler* masm, Label* miss_label, Register receiver,
|
||||
Handle<Name> name, Register scratch0, Register scratch1) {
|
||||
ASSERT(name->IsUniqueName());
|
||||
ASSERT(!receiver.is(scratch0));
|
||||
DCHECK(name->IsUniqueName());
|
||||
DCHECK(!receiver.is(scratch0));
|
||||
Counters* counters = masm->isolate()->counters();
|
||||
__ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
|
||||
__ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
|
||||
@ -163,27 +163,27 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
|
||||
|
||||
// Make sure that code is valid. The multiplying code relies on the
|
||||
// entry size being 12.
|
||||
ASSERT(sizeof(Entry) == 12);
|
||||
DCHECK(sizeof(Entry) == 12);
|
||||
|
||||
// Make sure the flags does not name a specific type.
|
||||
ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
|
||||
DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
|
||||
|
||||
// Make sure that there are no register conflicts.
|
||||
ASSERT(!scratch.is(receiver));
|
||||
ASSERT(!scratch.is(name));
|
||||
ASSERT(!extra.is(receiver));
|
||||
ASSERT(!extra.is(name));
|
||||
ASSERT(!extra.is(scratch));
|
||||
ASSERT(!extra2.is(receiver));
|
||||
ASSERT(!extra2.is(name));
|
||||
ASSERT(!extra2.is(scratch));
|
||||
ASSERT(!extra2.is(extra));
|
||||
DCHECK(!scratch.is(receiver));
|
||||
DCHECK(!scratch.is(name));
|
||||
DCHECK(!extra.is(receiver));
|
||||
DCHECK(!extra.is(name));
|
||||
DCHECK(!extra.is(scratch));
|
||||
DCHECK(!extra2.is(receiver));
|
||||
DCHECK(!extra2.is(name));
|
||||
DCHECK(!extra2.is(scratch));
|
||||
DCHECK(!extra2.is(extra));
|
||||
|
||||
// Check scratch, extra and extra2 registers are valid.
|
||||
ASSERT(!scratch.is(no_reg));
|
||||
ASSERT(!extra.is(no_reg));
|
||||
ASSERT(!extra2.is(no_reg));
|
||||
ASSERT(!extra3.is(no_reg));
|
||||
DCHECK(!scratch.is(no_reg));
|
||||
DCHECK(!extra.is(no_reg));
|
||||
DCHECK(!extra2.is(no_reg));
|
||||
DCHECK(!extra3.is(no_reg));
|
||||
|
||||
Counters* counters = masm->isolate()->counters();
|
||||
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
|
||||
@ -284,7 +284,7 @@ void PropertyHandlerCompiler::GenerateCheckPropertyCell(
|
||||
MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
|
||||
Register scratch, Label* miss) {
|
||||
Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
|
||||
ASSERT(cell->value()->IsTheHole());
|
||||
DCHECK(cell->value()->IsTheHole());
|
||||
__ mov(scratch, Operand(cell));
|
||||
__ ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
|
||||
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
|
||||
@ -303,7 +303,7 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
|
||||
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
|
||||
__ push(name);
|
||||
Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
|
||||
ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
|
||||
DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
|
||||
Register scratch = name;
|
||||
__ mov(scratch, Operand(interceptor));
|
||||
__ push(scratch);
|
||||
@ -326,16 +326,16 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
|
||||
MacroAssembler* masm, const CallOptimization& optimization,
|
||||
Handle<Map> receiver_map, Register receiver, Register scratch_in,
|
||||
bool is_store, int argc, Register* values) {
|
||||
ASSERT(!receiver.is(scratch_in));
|
||||
DCHECK(!receiver.is(scratch_in));
|
||||
__ push(receiver);
|
||||
// Write the arguments to stack frame.
|
||||
for (int i = 0; i < argc; i++) {
|
||||
Register arg = values[argc - 1 - i];
|
||||
ASSERT(!receiver.is(arg));
|
||||
ASSERT(!scratch_in.is(arg));
|
||||
DCHECK(!receiver.is(arg));
|
||||
DCHECK(!scratch_in.is(arg));
|
||||
__ push(arg);
|
||||
}
|
||||
ASSERT(optimization.is_simple_api_call());
|
||||
DCHECK(optimization.is_simple_api_call());
|
||||
|
||||
// Abi for CallApiFunctionStub.
|
||||
Register callee = r0;
|
||||
@ -426,7 +426,7 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
|
||||
DescriptorArray* descriptors = transition->instance_descriptors();
|
||||
PropertyDetails details = descriptors->GetDetails(descriptor);
|
||||
Representation representation = details.representation();
|
||||
ASSERT(!representation.IsNone());
|
||||
DCHECK(!representation.IsNone());
|
||||
|
||||
if (details.type() == CONSTANT) {
|
||||
Handle<Object> constant(descriptors->GetValue(descriptor), isolate());
|
||||
@ -475,7 +475,7 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
|
||||
}
|
||||
|
||||
// Stub never generated for objects that require access checks.
|
||||
ASSERT(!transition->is_access_check_needed());
|
||||
DCHECK(!transition->is_access_check_needed());
|
||||
|
||||
// Perform map transition for the receiver if necessary.
|
||||
if (details.type() == FIELD &&
|
||||
@ -507,7 +507,7 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
|
||||
OMIT_SMI_CHECK);
|
||||
|
||||
if (details.type() == CONSTANT) {
|
||||
ASSERT(value_reg.is(r0));
|
||||
DCHECK(value_reg.is(r0));
|
||||
__ Ret();
|
||||
return;
|
||||
}
|
||||
@ -575,7 +575,7 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
|
||||
}
|
||||
|
||||
// Return the value (register r0).
|
||||
ASSERT(value_reg.is(r0));
|
||||
DCHECK(value_reg.is(r0));
|
||||
__ bind(&exit);
|
||||
__ Ret();
|
||||
}
|
||||
@ -593,13 +593,13 @@ void NamedStoreHandlerCompiler::GenerateStoreField(
|
||||
Label exit;
|
||||
|
||||
// Stub never generated for objects that require access checks.
|
||||
ASSERT(!object->IsAccessCheckNeeded());
|
||||
ASSERT(!object->IsJSGlobalProxy());
|
||||
DCHECK(!object->IsAccessCheckNeeded());
|
||||
DCHECK(!object->IsJSGlobalProxy());
|
||||
|
||||
FieldIndex index = lookup->GetFieldIndex();
|
||||
|
||||
Representation representation = lookup->representation();
|
||||
ASSERT(!representation.IsNone());
|
||||
DCHECK(!representation.IsNone());
|
||||
if (representation.IsSmi()) {
|
||||
__ JumpIfNotSmi(value_reg, miss_label);
|
||||
} else if (representation.IsHeapObject()) {
|
||||
@ -646,7 +646,7 @@ void NamedStoreHandlerCompiler::GenerateStoreField(
|
||||
__ bind(&do_store);
|
||||
__ vstr(d0, FieldMemOperand(scratch1, HeapNumber::kValueOffset));
|
||||
// Return the value (register r0).
|
||||
ASSERT(value_reg.is(r0));
|
||||
DCHECK(value_reg.is(r0));
|
||||
__ Ret();
|
||||
return;
|
||||
}
|
||||
@ -700,7 +700,7 @@ void NamedStoreHandlerCompiler::GenerateStoreField(
|
||||
}
|
||||
|
||||
// Return the value (register r0).
|
||||
ASSERT(value_reg.is(r0));
|
||||
DCHECK(value_reg.is(r0));
|
||||
__ bind(&exit);
|
||||
__ Ret();
|
||||
}
|
||||
@ -713,8 +713,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
|
||||
Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate()));
|
||||
|
||||
// Make sure there's no overlap between holder and object registers.
|
||||
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
|
||||
ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
|
||||
DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
|
||||
DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
|
||||
&& !scratch2.is(scratch1));
|
||||
|
||||
// Keep track of the current object in register reg.
|
||||
@ -735,7 +735,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
|
||||
|
||||
// Only global objects and objects that do not require access
|
||||
// checks are allowed in stubs.
|
||||
ASSERT(current_map->IsJSGlobalProxyMap() ||
|
||||
DCHECK(current_map->IsJSGlobalProxyMap() ||
|
||||
!current_map->is_access_check_needed());
|
||||
|
||||
prototype = handle(JSObject::cast(current_map->prototype()));
|
||||
@ -743,10 +743,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
|
||||
!current_map->IsJSGlobalObjectMap() &&
|
||||
!current_map->IsJSGlobalProxyMap()) {
|
||||
if (!name->IsUniqueName()) {
|
||||
ASSERT(name->IsString());
|
||||
DCHECK(name->IsString());
|
||||
name = factory()->InternalizeString(Handle<String>::cast(name));
|
||||
}
|
||||
ASSERT(current.is_null() ||
|
||||
DCHECK(current.is_null() ||
|
||||
current->property_dictionary()->FindEntry(name) ==
|
||||
NameDictionary::kNotFound);
|
||||
|
||||
@ -805,7 +805,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
|
||||
}
|
||||
|
||||
// Perform security check for access to the global object.
|
||||
ASSERT(current_map->IsJSGlobalProxyMap() ||
|
||||
DCHECK(current_map->IsJSGlobalProxyMap() ||
|
||||
!current_map->is_access_check_needed());
|
||||
if (current_map->IsJSGlobalProxyMap()) {
|
||||
__ CheckAccessGlobalProxy(reg, scratch1, miss);
|
||||
@ -846,10 +846,10 @@ Register NamedLoadHandlerCompiler::CallbackFrontend(Register object_reg,
|
||||
Register reg = FrontendHeader(object_reg, name, &miss);
|
||||
|
||||
if (!holder()->HasFastProperties()) {
|
||||
ASSERT(!holder()->IsGlobalObject());
|
||||
ASSERT(!reg.is(scratch2()));
|
||||
ASSERT(!reg.is(scratch3()));
|
||||
ASSERT(!reg.is(scratch4()));
|
||||
DCHECK(!holder()->IsGlobalObject());
|
||||
DCHECK(!reg.is(scratch2()));
|
||||
DCHECK(!reg.is(scratch3()));
|
||||
DCHECK(!reg.is(scratch4()));
|
||||
|
||||
// Load the properties dictionary.
|
||||
Register dictionary = scratch4();
|
||||
@ -908,9 +908,9 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback(
|
||||
STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
|
||||
STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
|
||||
STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
|
||||
ASSERT(!scratch2().is(reg));
|
||||
ASSERT(!scratch3().is(reg));
|
||||
ASSERT(!scratch4().is(reg));
|
||||
DCHECK(!scratch2().is(reg));
|
||||
DCHECK(!scratch3().is(reg));
|
||||
DCHECK(!scratch4().is(reg));
|
||||
__ push(receiver());
|
||||
if (heap()->InNewSpace(callback->data())) {
|
||||
__ Move(scratch3(), callback);
|
||||
@ -946,8 +946,8 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback(
|
||||
void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg,
|
||||
LookupResult* lookup,
|
||||
Handle<Name> name) {
|
||||
ASSERT(holder()->HasNamedInterceptor());
|
||||
ASSERT(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
|
||||
DCHECK(holder()->HasNamedInterceptor());
|
||||
DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
|
||||
|
||||
// So far the most popular follow ups for interceptor loads are FIELD
|
||||
// and CALLBACKS, so inline only them, other cases may be added
|
||||
@ -971,7 +971,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg,
|
||||
// Compile the interceptor call, followed by inline code to load the
|
||||
// property from further up the prototype chain if the call fails.
|
||||
// Check that the maps haven't changed.
|
||||
ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
|
||||
DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
|
||||
|
||||
// Preserve the receiver register explicitly whenever it is different from
|
||||
// the holder and it is needed should the interceptor return without any
|
||||
@ -1128,7 +1128,7 @@ Register* PropertyAccessCompiler::store_calling_convention() {
|
||||
// receiver, name, scratch1, scratch2, scratch3.
|
||||
Register receiver = StoreIC::ReceiverRegister();
|
||||
Register name = StoreIC::NameRegister();
|
||||
ASSERT(r3.is(KeyedStoreIC::MapRegister()));
|
||||
DCHECK(r3.is(KeyedStoreIC::MapRegister()));
|
||||
static Register registers[] = { receiver, name, r3, r4, r5 };
|
||||
return registers;
|
||||
}
|
||||
@ -1234,7 +1234,7 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
|
||||
|
||||
// Polymorphic keyed stores may use the map register
|
||||
Register map_reg = scratch1();
|
||||
ASSERT(kind() != Code::KEYED_STORE_IC ||
|
||||
DCHECK(kind() != Code::KEYED_STORE_IC ||
|
||||
map_reg.is(KeyedStoreIC::MapRegister()));
|
||||
|
||||
int receiver_count = types->length();
|
||||
@ -1248,13 +1248,13 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
|
||||
__ mov(ip, Operand(map));
|
||||
__ cmp(map_reg, ip);
|
||||
if (type->Is(HeapType::Number())) {
|
||||
ASSERT(!number_case.is_unused());
|
||||
DCHECK(!number_case.is_unused());
|
||||
__ bind(&number_case);
|
||||
}
|
||||
__ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq);
|
||||
}
|
||||
}
|
||||
ASSERT(number_of_handled_maps != 0);
|
||||
DCHECK(number_of_handled_maps != 0);
|
||||
|
||||
__ bind(&miss);
|
||||
TailCallBuiltin(masm(), MissBuiltin(kind()));
|
||||
@ -1307,8 +1307,8 @@ void ElementHandlerCompiler::GenerateLoadDictionaryElement(
|
||||
|
||||
Register key = LoadIC::NameRegister();
|
||||
Register receiver = LoadIC::ReceiverRegister();
|
||||
ASSERT(receiver.is(r1));
|
||||
ASSERT(key.is(r2));
|
||||
DCHECK(receiver.is(r1));
|
||||
DCHECK(key.is(r2));
|
||||
|
||||
__ UntagAndJumpIfNotSmi(r6, key, &miss);
|
||||
__ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
|
@ -25,7 +25,7 @@ void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
|
||||
void RelocInfo::set_target_address(Address target,
|
||||
WriteBarrierMode write_barrier_mode,
|
||||
ICacheFlushMode icache_flush_mode) {
|
||||
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
|
||||
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
|
||||
Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
|
||||
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
|
||||
IsCodeTarget(rmode_)) {
|
||||
@ -37,54 +37,54 @@ void RelocInfo::set_target_address(Address target,
|
||||
|
||||
|
||||
inline unsigned CPURegister::code() const {
|
||||
ASSERT(IsValid());
|
||||
DCHECK(IsValid());
|
||||
return reg_code;
|
||||
}
|
||||
|
||||
|
||||
inline CPURegister::RegisterType CPURegister::type() const {
|
||||
ASSERT(IsValidOrNone());
|
||||
DCHECK(IsValidOrNone());
|
||||
return reg_type;
|
||||
}
|
||||
|
||||
|
||||
inline RegList CPURegister::Bit() const {
|
||||
ASSERT(reg_code < (sizeof(RegList) * kBitsPerByte));
|
||||
DCHECK(reg_code < (sizeof(RegList) * kBitsPerByte));
|
||||
return IsValid() ? 1UL << reg_code : 0;
|
||||
}
|
||||
|
||||
|
||||
inline unsigned CPURegister::SizeInBits() const {
|
||||
ASSERT(IsValid());
|
||||
DCHECK(IsValid());
|
||||
return reg_size;
|
||||
}
|
||||
|
||||
|
||||
inline int CPURegister::SizeInBytes() const {
|
||||
ASSERT(IsValid());
|
||||
ASSERT(SizeInBits() % 8 == 0);
|
||||
DCHECK(IsValid());
|
||||
DCHECK(SizeInBits() % 8 == 0);
|
||||
return reg_size / 8;
|
||||
}
|
||||
|
||||
|
||||
inline bool CPURegister::Is32Bits() const {
|
||||
ASSERT(IsValid());
|
||||
DCHECK(IsValid());
|
||||
return reg_size == 32;
|
||||
}
|
||||
|
||||
|
||||
inline bool CPURegister::Is64Bits() const {
|
||||
ASSERT(IsValid());
|
||||
DCHECK(IsValid());
|
||||
return reg_size == 64;
|
||||
}
|
||||
|
||||
|
||||
inline bool CPURegister::IsValid() const {
|
||||
if (IsValidRegister() || IsValidFPRegister()) {
|
||||
ASSERT(!IsNone());
|
||||
DCHECK(!IsNone());
|
||||
return true;
|
||||
} else {
|
||||
ASSERT(IsNone());
|
||||
DCHECK(IsNone());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -106,21 +106,21 @@ inline bool CPURegister::IsValidFPRegister() const {
|
||||
|
||||
inline bool CPURegister::IsNone() const {
|
||||
// kNoRegister types should always have size 0 and code 0.
|
||||
ASSERT((reg_type != kNoRegister) || (reg_code == 0));
|
||||
ASSERT((reg_type != kNoRegister) || (reg_size == 0));
|
||||
DCHECK((reg_type != kNoRegister) || (reg_code == 0));
|
||||
DCHECK((reg_type != kNoRegister) || (reg_size == 0));
|
||||
|
||||
return reg_type == kNoRegister;
|
||||
}
|
||||
|
||||
|
||||
inline bool CPURegister::Is(const CPURegister& other) const {
|
||||
ASSERT(IsValidOrNone() && other.IsValidOrNone());
|
||||
DCHECK(IsValidOrNone() && other.IsValidOrNone());
|
||||
return Aliases(other) && (reg_size == other.reg_size);
|
||||
}
|
||||
|
||||
|
||||
inline bool CPURegister::Aliases(const CPURegister& other) const {
|
||||
ASSERT(IsValidOrNone() && other.IsValidOrNone());
|
||||
DCHECK(IsValidOrNone() && other.IsValidOrNone());
|
||||
return (reg_code == other.reg_code) && (reg_type == other.reg_type);
|
||||
}
|
||||
|
||||
@ -146,27 +146,27 @@ inline bool CPURegister::IsValidOrNone() const {
|
||||
|
||||
|
||||
inline bool CPURegister::IsZero() const {
|
||||
ASSERT(IsValid());
|
||||
DCHECK(IsValid());
|
||||
return IsRegister() && (reg_code == kZeroRegCode);
|
||||
}
|
||||
|
||||
|
||||
inline bool CPURegister::IsSP() const {
|
||||
ASSERT(IsValid());
|
||||
DCHECK(IsValid());
|
||||
return IsRegister() && (reg_code == kSPRegInternalCode);
|
||||
}
|
||||
|
||||
|
||||
inline void CPURegList::Combine(const CPURegList& other) {
|
||||
ASSERT(IsValid());
|
||||
ASSERT(other.type() == type_);
|
||||
ASSERT(other.RegisterSizeInBits() == size_);
|
||||
DCHECK(IsValid());
|
||||
DCHECK(other.type() == type_);
|
||||
DCHECK(other.RegisterSizeInBits() == size_);
|
||||
list_ |= other.list();
|
||||
}
|
||||
|
||||
|
||||
inline void CPURegList::Remove(const CPURegList& other) {
|
||||
ASSERT(IsValid());
|
||||
DCHECK(IsValid());
|
||||
if (other.type() == type_) {
|
||||
list_ &= ~other.list();
|
||||
}
|
||||
@ -174,8 +174,8 @@ inline void CPURegList::Remove(const CPURegList& other) {
|
||||
|
||||
|
||||
inline void CPURegList::Combine(const CPURegister& other) {
|
||||
ASSERT(other.type() == type_);
|
||||
ASSERT(other.SizeInBits() == size_);
|
||||
DCHECK(other.type() == type_);
|
||||
DCHECK(other.SizeInBits() == size_);
|
||||
Combine(other.code());
|
||||
}
|
||||
|
||||
@ -192,15 +192,15 @@ inline void CPURegList::Remove(const CPURegister& other1,
|
||||
|
||||
|
||||
inline void CPURegList::Combine(int code) {
|
||||
ASSERT(IsValid());
|
||||
ASSERT(CPURegister::Create(code, size_, type_).IsValid());
|
||||
DCHECK(IsValid());
|
||||
DCHECK(CPURegister::Create(code, size_, type_).IsValid());
|
||||
list_ |= (1UL << code);
|
||||
}
|
||||
|
||||
|
||||
inline void CPURegList::Remove(int code) {
|
||||
ASSERT(IsValid());
|
||||
ASSERT(CPURegister::Create(code, size_, type_).IsValid());
|
||||
DCHECK(IsValid());
|
||||
DCHECK(CPURegister::Create(code, size_, type_).IsValid());
|
||||
list_ &= ~(1UL << code);
|
||||
}
|
||||
|
||||
@ -209,7 +209,7 @@ inline Register Register::XRegFromCode(unsigned code) {
|
||||
if (code == kSPRegInternalCode) {
|
||||
return csp;
|
||||
} else {
|
||||
ASSERT(code < kNumberOfRegisters);
|
||||
DCHECK(code < kNumberOfRegisters);
|
||||
return Register::Create(code, kXRegSizeInBits);
|
||||
}
|
||||
}
|
||||
@ -219,44 +219,44 @@ inline Register Register::WRegFromCode(unsigned code) {
|
||||
if (code == kSPRegInternalCode) {
|
||||
return wcsp;
|
||||
} else {
|
||||
ASSERT(code < kNumberOfRegisters);
|
||||
DCHECK(code < kNumberOfRegisters);
|
||||
return Register::Create(code, kWRegSizeInBits);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
inline FPRegister FPRegister::SRegFromCode(unsigned code) {
|
||||
ASSERT(code < kNumberOfFPRegisters);
|
||||
DCHECK(code < kNumberOfFPRegisters);
|
||||
return FPRegister::Create(code, kSRegSizeInBits);
|
||||
}
|
||||
|
||||
|
||||
inline FPRegister FPRegister::DRegFromCode(unsigned code) {
|
||||
ASSERT(code < kNumberOfFPRegisters);
|
||||
DCHECK(code < kNumberOfFPRegisters);
|
||||
return FPRegister::Create(code, kDRegSizeInBits);
|
||||
}
|
||||
|
||||
|
||||
inline Register CPURegister::W() const {
|
||||
ASSERT(IsValidRegister());
|
||||
DCHECK(IsValidRegister());
|
||||
return Register::WRegFromCode(reg_code);
|
||||
}
|
||||
|
||||
|
||||
inline Register CPURegister::X() const {
|
||||
ASSERT(IsValidRegister());
|
||||
DCHECK(IsValidRegister());
|
||||
return Register::XRegFromCode(reg_code);
|
||||
}
|
||||
|
||||
|
||||
inline FPRegister CPURegister::S() const {
|
||||
ASSERT(IsValidFPRegister());
|
||||
DCHECK(IsValidFPRegister());
|
||||
return FPRegister::SRegFromCode(reg_code);
|
||||
}
|
||||
|
||||
|
||||
inline FPRegister CPURegister::D() const {
|
||||
ASSERT(IsValidFPRegister());
|
||||
DCHECK(IsValidFPRegister());
|
||||
return FPRegister::DRegFromCode(reg_code);
|
||||
}
|
||||
|
||||
@ -341,9 +341,9 @@ Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
|
||||
shift_(shift),
|
||||
extend_(NO_EXTEND),
|
||||
shift_amount_(shift_amount) {
|
||||
ASSERT(reg.Is64Bits() || (shift_amount < kWRegSizeInBits));
|
||||
ASSERT(reg.Is32Bits() || (shift_amount < kXRegSizeInBits));
|
||||
ASSERT(!reg.IsSP());
|
||||
DCHECK(reg.Is64Bits() || (shift_amount < kWRegSizeInBits));
|
||||
DCHECK(reg.Is32Bits() || (shift_amount < kXRegSizeInBits));
|
||||
DCHECK(!reg.IsSP());
|
||||
}
|
||||
|
||||
|
||||
@ -353,12 +353,12 @@ Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
|
||||
shift_(NO_SHIFT),
|
||||
extend_(extend),
|
||||
shift_amount_(shift_amount) {
|
||||
ASSERT(reg.IsValid());
|
||||
ASSERT(shift_amount <= 4);
|
||||
ASSERT(!reg.IsSP());
|
||||
DCHECK(reg.IsValid());
|
||||
DCHECK(shift_amount <= 4);
|
||||
DCHECK(!reg.IsSP());
|
||||
|
||||
// Extend modes SXTX and UXTX require a 64-bit register.
|
||||
ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
|
||||
DCHECK(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
|
||||
}
|
||||
|
||||
|
||||
@ -387,44 +387,44 @@ bool Operand::IsZero() const {
|
||||
|
||||
|
||||
Operand Operand::ToExtendedRegister() const {
|
||||
ASSERT(IsShiftedRegister());
|
||||
ASSERT((shift_ == LSL) && (shift_amount_ <= 4));
|
||||
DCHECK(IsShiftedRegister());
|
||||
DCHECK((shift_ == LSL) && (shift_amount_ <= 4));
|
||||
return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
|
||||
}
|
||||
|
||||
|
||||
Immediate Operand::immediate() const {
|
||||
ASSERT(IsImmediate());
|
||||
DCHECK(IsImmediate());
|
||||
return immediate_;
|
||||
}
|
||||
|
||||
|
||||
int64_t Operand::ImmediateValue() const {
|
||||
ASSERT(IsImmediate());
|
||||
DCHECK(IsImmediate());
|
||||
return immediate_.value();
|
||||
}
|
||||
|
||||
|
||||
Register Operand::reg() const {
|
||||
ASSERT(IsShiftedRegister() || IsExtendedRegister());
|
||||
DCHECK(IsShiftedRegister() || IsExtendedRegister());
|
||||
return reg_;
|
||||
}
|
||||
|
||||
|
||||
Shift Operand::shift() const {
|
||||
ASSERT(IsShiftedRegister());
|
||||
DCHECK(IsShiftedRegister());
|
||||
return shift_;
|
||||
}
|
||||
|
||||
|
||||
Extend Operand::extend() const {
|
||||
ASSERT(IsExtendedRegister());
|
||||
DCHECK(IsExtendedRegister());
|
||||
return extend_;
|
||||
}
|
||||
|
||||
|
||||
unsigned Operand::shift_amount() const {
|
||||
ASSERT(IsShiftedRegister() || IsExtendedRegister());
|
||||
DCHECK(IsShiftedRegister() || IsExtendedRegister());
|
||||
return shift_amount_;
|
||||
}
|
||||
|
||||
@ -432,7 +432,7 @@ unsigned Operand::shift_amount() const {
|
||||
Operand Operand::UntagSmi(Register smi) {
|
||||
STATIC_ASSERT(kXRegSizeInBits == static_cast<unsigned>(kSmiShift +
|
||||
kSmiValueSize));
|
||||
ASSERT(smi.Is64Bits());
|
||||
DCHECK(smi.Is64Bits());
|
||||
return Operand(smi, ASR, kSmiShift);
|
||||
}
|
||||
|
||||
@ -440,8 +440,8 @@ Operand Operand::UntagSmi(Register smi) {
|
||||
Operand Operand::UntagSmiAndScale(Register smi, int scale) {
|
||||
STATIC_ASSERT(kXRegSizeInBits == static_cast<unsigned>(kSmiShift +
|
||||
kSmiValueSize));
|
||||
ASSERT(smi.Is64Bits());
|
||||
ASSERT((scale >= 0) && (scale <= (64 - kSmiValueSize)));
|
||||
DCHECK(smi.Is64Bits());
|
||||
DCHECK((scale >= 0) && (scale <= (64 - kSmiValueSize)));
|
||||
if (scale > kSmiShift) {
|
||||
return Operand(smi, LSL, scale - kSmiShift);
|
||||
} else if (scale < kSmiShift) {
|
||||
@ -460,7 +460,7 @@ MemOperand::MemOperand()
|
||||
MemOperand::MemOperand(Register base, ptrdiff_t offset, AddrMode addrmode)
|
||||
: base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode),
|
||||
shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) {
|
||||
ASSERT(base.Is64Bits() && !base.IsZero());
|
||||
DCHECK(base.Is64Bits() && !base.IsZero());
|
||||
}
|
||||
|
||||
|
||||
@ -470,12 +470,12 @@ MemOperand::MemOperand(Register base,
|
||||
unsigned shift_amount)
|
||||
: base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
|
||||
shift_(NO_SHIFT), extend_(extend), shift_amount_(shift_amount) {
|
||||
ASSERT(base.Is64Bits() && !base.IsZero());
|
||||
ASSERT(!regoffset.IsSP());
|
||||
ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
|
||||
DCHECK(base.Is64Bits() && !base.IsZero());
|
||||
DCHECK(!regoffset.IsSP());
|
||||
DCHECK((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
|
||||
|
||||
// SXTX extend mode requires a 64-bit offset register.
|
||||
ASSERT(regoffset.Is64Bits() || (extend != SXTX));
|
||||
DCHECK(regoffset.Is64Bits() || (extend != SXTX));
|
||||
}
|
||||
|
||||
|
||||
@ -485,22 +485,22 @@ MemOperand::MemOperand(Register base,
|
||||
unsigned shift_amount)
|
||||
: base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
|
||||
shift_(shift), extend_(NO_EXTEND), shift_amount_(shift_amount) {
|
||||
ASSERT(base.Is64Bits() && !base.IsZero());
|
||||
ASSERT(regoffset.Is64Bits() && !regoffset.IsSP());
|
||||
ASSERT(shift == LSL);
|
||||
DCHECK(base.Is64Bits() && !base.IsZero());
|
||||
DCHECK(regoffset.Is64Bits() && !regoffset.IsSP());
|
||||
DCHECK(shift == LSL);
|
||||
}
|
||||
|
||||
|
||||
MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
|
||||
: base_(base), addrmode_(addrmode) {
|
||||
ASSERT(base.Is64Bits() && !base.IsZero());
|
||||
DCHECK(base.Is64Bits() && !base.IsZero());
|
||||
|
||||
if (offset.IsImmediate()) {
|
||||
offset_ = offset.ImmediateValue();
|
||||
|
||||
regoffset_ = NoReg;
|
||||
} else if (offset.IsShiftedRegister()) {
|
||||
ASSERT(addrmode == Offset);
|
||||
DCHECK(addrmode == Offset);
|
||||
|
||||
regoffset_ = offset.reg();
|
||||
shift_= offset.shift();
|
||||
@ -510,11 +510,11 @@ MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
|
||||
offset_ = 0;
|
||||
|
||||
// These assertions match those in the shifted-register constructor.
|
||||
ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP());
|
||||
ASSERT(shift_ == LSL);
|
||||
DCHECK(regoffset_.Is64Bits() && !regoffset_.IsSP());
|
||||
DCHECK(shift_ == LSL);
|
||||
} else {
|
||||
ASSERT(offset.IsExtendedRegister());
|
||||
ASSERT(addrmode == Offset);
|
||||
DCHECK(offset.IsExtendedRegister());
|
||||
DCHECK(addrmode == Offset);
|
||||
|
||||
regoffset_ = offset.reg();
|
||||
extend_ = offset.extend();
|
||||
@ -524,9 +524,9 @@ MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
|
||||
offset_ = 0;
|
||||
|
||||
// These assertions match those in the extended-register constructor.
|
||||
ASSERT(!regoffset_.IsSP());
|
||||
ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
|
||||
ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX)));
|
||||
DCHECK(!regoffset_.IsSP());
|
||||
DCHECK((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
|
||||
DCHECK((regoffset_.Is64Bits() || (extend_ != SXTX)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -553,7 +553,7 @@ Operand MemOperand::OffsetAsOperand() const {
|
||||
if (IsImmediateOffset()) {
|
||||
return offset();
|
||||
} else {
|
||||
ASSERT(IsRegisterOffset());
|
||||
DCHECK(IsRegisterOffset());
|
||||
if (extend() == NO_EXTEND) {
|
||||
return Operand(regoffset(), shift(), shift_amount());
|
||||
} else {
|
||||
@ -575,7 +575,7 @@ void Assembler::Unreachable() {
|
||||
|
||||
Address Assembler::target_pointer_address_at(Address pc) {
|
||||
Instruction* instr = reinterpret_cast<Instruction*>(pc);
|
||||
ASSERT(instr->IsLdrLiteralX());
|
||||
DCHECK(instr->IsLdrLiteralX());
|
||||
return reinterpret_cast<Address>(instr->ImmPCOffsetTarget());
|
||||
}
|
||||
|
||||
@ -602,7 +602,7 @@ Address Assembler::target_address_from_return_address(Address pc) {
|
||||
Address candidate = pc - 2 * kInstructionSize;
|
||||
Instruction* instr = reinterpret_cast<Instruction*>(candidate);
|
||||
USE(instr);
|
||||
ASSERT(instr->IsLdrLiteralX());
|
||||
DCHECK(instr->IsLdrLiteralX());
|
||||
return candidate;
|
||||
}
|
||||
|
||||
@ -630,14 +630,14 @@ Address Assembler::return_address_from_call_start(Address pc) {
|
||||
Instruction* instr = reinterpret_cast<Instruction*>(pc);
|
||||
if (instr->IsMovz()) {
|
||||
// Verify the instruction sequence.
|
||||
ASSERT(instr->following(1)->IsMovk());
|
||||
ASSERT(instr->following(2)->IsMovk());
|
||||
ASSERT(instr->following(3)->IsBranchAndLinkToRegister());
|
||||
DCHECK(instr->following(1)->IsMovk());
|
||||
DCHECK(instr->following(2)->IsMovk());
|
||||
DCHECK(instr->following(3)->IsBranchAndLinkToRegister());
|
||||
return pc + Assembler::kCallSizeWithoutRelocation;
|
||||
} else {
|
||||
// Verify the instruction sequence.
|
||||
ASSERT(instr->IsLdrLiteralX());
|
||||
ASSERT(instr->following(1)->IsBranchAndLinkToRegister());
|
||||
DCHECK(instr->IsLdrLiteralX());
|
||||
DCHECK(instr->following(1)->IsBranchAndLinkToRegister());
|
||||
return pc + Assembler::kCallSizeWithRelocation;
|
||||
}
|
||||
}
|
||||
@ -680,13 +680,13 @@ int RelocInfo::target_address_size() {
|
||||
|
||||
|
||||
Address RelocInfo::target_address() {
|
||||
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
|
||||
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
|
||||
return Assembler::target_address_at(pc_, host_);
|
||||
}
|
||||
|
||||
|
||||
Address RelocInfo::target_address_address() {
|
||||
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|
||||
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|
||||
|| rmode_ == EMBEDDED_OBJECT
|
||||
|| rmode_ == EXTERNAL_REFERENCE);
|
||||
return Assembler::target_pointer_address_at(pc_);
|
||||
@ -694,19 +694,19 @@ Address RelocInfo::target_address_address() {
|
||||
|
||||
|
||||
Address RelocInfo::constant_pool_entry_address() {
|
||||
ASSERT(IsInConstantPool());
|
||||
DCHECK(IsInConstantPool());
|
||||
return Assembler::target_pointer_address_at(pc_);
|
||||
}
|
||||
|
||||
|
||||
Object* RelocInfo::target_object() {
|
||||
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
|
||||
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
|
||||
return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
|
||||
}
|
||||
|
||||
|
||||
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
|
||||
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
|
||||
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
|
||||
return Handle<Object>(reinterpret_cast<Object**>(
|
||||
Assembler::target_address_at(pc_, host_)));
|
||||
}
|
||||
@ -715,7 +715,7 @@ Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
|
||||
void RelocInfo::set_target_object(Object* target,
|
||||
WriteBarrierMode write_barrier_mode,
|
||||
ICacheFlushMode icache_flush_mode) {
|
||||
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
|
||||
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
|
||||
Assembler::set_target_address_at(pc_, host_,
|
||||
reinterpret_cast<Address>(target),
|
||||
icache_flush_mode);
|
||||
@ -729,13 +729,13 @@ void RelocInfo::set_target_object(Object* target,
|
||||
|
||||
|
||||
Address RelocInfo::target_reference() {
|
||||
ASSERT(rmode_ == EXTERNAL_REFERENCE);
|
||||
DCHECK(rmode_ == EXTERNAL_REFERENCE);
|
||||
return Assembler::target_address_at(pc_, host_);
|
||||
}
|
||||
|
||||
|
||||
Address RelocInfo::target_runtime_entry(Assembler* origin) {
|
||||
ASSERT(IsRuntimeEntry(rmode_));
|
||||
DCHECK(IsRuntimeEntry(rmode_));
|
||||
return target_address();
|
||||
}
|
||||
|
||||
@ -743,7 +743,7 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
|
||||
void RelocInfo::set_target_runtime_entry(Address target,
|
||||
WriteBarrierMode write_barrier_mode,
|
||||
ICacheFlushMode icache_flush_mode) {
|
||||
ASSERT(IsRuntimeEntry(rmode_));
|
||||
DCHECK(IsRuntimeEntry(rmode_));
|
||||
if (target_address() != target) {
|
||||
set_target_address(target, write_barrier_mode, icache_flush_mode);
|
||||
}
|
||||
@ -758,7 +758,7 @@ Handle<Cell> RelocInfo::target_cell_handle() {
|
||||
|
||||
|
||||
Cell* RelocInfo::target_cell() {
|
||||
ASSERT(rmode_ == RelocInfo::CELL);
|
||||
DCHECK(rmode_ == RelocInfo::CELL);
|
||||
return Cell::FromValueAddress(Memory::Address_at(pc_));
|
||||
}
|
||||
|
||||
@ -781,7 +781,7 @@ Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
|
||||
|
||||
|
||||
Code* RelocInfo::code_age_stub() {
|
||||
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
|
||||
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
|
||||
// Read the stub entry point from the code age sequence.
|
||||
Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
|
||||
return Code::GetCodeFromTargetAddress(Memory::Address_at(stub_entry_address));
|
||||
@ -790,8 +790,8 @@ Code* RelocInfo::code_age_stub() {
|
||||
|
||||
void RelocInfo::set_code_age_stub(Code* stub,
|
||||
ICacheFlushMode icache_flush_mode) {
|
||||
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
|
||||
ASSERT(!Code::IsYoungSequence(stub->GetIsolate(), pc_));
|
||||
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
|
||||
DCHECK(!Code::IsYoungSequence(stub->GetIsolate(), pc_));
|
||||
// Overwrite the stub entry point in the code age sequence. This is loaded as
|
||||
// a literal so there is no need to call FlushICache here.
|
||||
Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
|
||||
@ -800,7 +800,7 @@ void RelocInfo::set_code_age_stub(Code* stub,
|
||||
|
||||
|
||||
Address RelocInfo::call_address() {
|
||||
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
|
||||
DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
|
||||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
|
||||
// For the above sequences the Relocinfo points to the load literal loading
|
||||
// the call address.
|
||||
@ -809,7 +809,7 @@ Address RelocInfo::call_address() {
|
||||
|
||||
|
||||
void RelocInfo::set_call_address(Address target) {
|
||||
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
|
||||
DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
|
||||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
|
||||
Assembler::set_target_address_at(pc_, host_, target);
|
||||
if (host() != NULL) {
|
||||
@ -821,7 +821,7 @@ void RelocInfo::set_call_address(Address target) {
|
||||
|
||||
|
||||
void RelocInfo::WipeOut() {
|
||||
ASSERT(IsEmbeddedObject(rmode_) ||
|
||||
DCHECK(IsEmbeddedObject(rmode_) ||
|
||||
IsCodeTarget(rmode_) ||
|
||||
IsRuntimeEntry(rmode_) ||
|
||||
IsExternalReference(rmode_));
|
||||
@ -893,11 +893,11 @@ void RelocInfo::Visit(Heap* heap) {
|
||||
|
||||
|
||||
LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
|
||||
ASSERT(rt.IsValid());
|
||||
DCHECK(rt.IsValid());
|
||||
if (rt.IsRegister()) {
|
||||
return rt.Is64Bits() ? LDR_x : LDR_w;
|
||||
} else {
|
||||
ASSERT(rt.IsFPRegister());
|
||||
DCHECK(rt.IsFPRegister());
|
||||
return rt.Is64Bits() ? LDR_d : LDR_s;
|
||||
}
|
||||
}
|
||||
@ -905,23 +905,23 @@ LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
|
||||
|
||||
LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
|
||||
const CPURegister& rt2) {
|
||||
ASSERT(AreSameSizeAndType(rt, rt2));
|
||||
DCHECK(AreSameSizeAndType(rt, rt2));
|
||||
USE(rt2);
|
||||
if (rt.IsRegister()) {
|
||||
return rt.Is64Bits() ? LDP_x : LDP_w;
|
||||
} else {
|
||||
ASSERT(rt.IsFPRegister());
|
||||
DCHECK(rt.IsFPRegister());
|
||||
return rt.Is64Bits() ? LDP_d : LDP_s;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
|
||||
ASSERT(rt.IsValid());
|
||||
DCHECK(rt.IsValid());
|
||||
if (rt.IsRegister()) {
|
||||
return rt.Is64Bits() ? STR_x : STR_w;
|
||||
} else {
|
||||
ASSERT(rt.IsFPRegister());
|
||||
DCHECK(rt.IsFPRegister());
|
||||
return rt.Is64Bits() ? STR_d : STR_s;
|
||||
}
|
||||
}
|
||||
@ -929,12 +929,12 @@ LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
|
||||
|
||||
LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
|
||||
const CPURegister& rt2) {
|
||||
ASSERT(AreSameSizeAndType(rt, rt2));
|
||||
DCHECK(AreSameSizeAndType(rt, rt2));
|
||||
USE(rt2);
|
||||
if (rt.IsRegister()) {
|
||||
return rt.Is64Bits() ? STP_x : STP_w;
|
||||
} else {
|
||||
ASSERT(rt.IsFPRegister());
|
||||
DCHECK(rt.IsFPRegister());
|
||||
return rt.Is64Bits() ? STP_d : STP_s;
|
||||
}
|
||||
}
|
||||
@ -942,12 +942,12 @@ LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
|
||||
|
||||
LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor(
|
||||
const CPURegister& rt, const CPURegister& rt2) {
|
||||
ASSERT(AreSameSizeAndType(rt, rt2));
|
||||
DCHECK(AreSameSizeAndType(rt, rt2));
|
||||
USE(rt2);
|
||||
if (rt.IsRegister()) {
|
||||
return rt.Is64Bits() ? LDNP_x : LDNP_w;
|
||||
} else {
|
||||
ASSERT(rt.IsFPRegister());
|
||||
DCHECK(rt.IsFPRegister());
|
||||
return rt.Is64Bits() ? LDNP_d : LDNP_s;
|
||||
}
|
||||
}
|
||||
@ -955,12 +955,12 @@ LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor(
|
||||
|
||||
LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor(
|
||||
const CPURegister& rt, const CPURegister& rt2) {
|
||||
ASSERT(AreSameSizeAndType(rt, rt2));
|
||||
DCHECK(AreSameSizeAndType(rt, rt2));
|
||||
USE(rt2);
|
||||
if (rt.IsRegister()) {
|
||||
return rt.Is64Bits() ? STNP_x : STNP_w;
|
||||
} else {
|
||||
ASSERT(rt.IsFPRegister());
|
||||
DCHECK(rt.IsFPRegister());
|
||||
return rt.Is64Bits() ? STNP_d : STNP_s;
|
||||
}
|
||||
}
|
||||
@ -970,16 +970,16 @@ LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) {
|
||||
if (rt.IsRegister()) {
|
||||
return rt.Is64Bits() ? LDR_x_lit : LDR_w_lit;
|
||||
} else {
|
||||
ASSERT(rt.IsFPRegister());
|
||||
DCHECK(rt.IsFPRegister());
|
||||
return rt.Is64Bits() ? LDR_d_lit : LDR_s_lit;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int Assembler::LinkAndGetInstructionOffsetTo(Label* label) {
|
||||
ASSERT(kStartOfLabelLinkChain == 0);
|
||||
DCHECK(kStartOfLabelLinkChain == 0);
|
||||
int offset = LinkAndGetByteOffsetTo(label);
|
||||
ASSERT(IsAligned(offset, kInstructionSize));
|
||||
DCHECK(IsAligned(offset, kInstructionSize));
|
||||
return offset >> kInstructionSizeLog2;
|
||||
}
|
||||
|
||||
@ -1034,7 +1034,7 @@ Instr Assembler::ImmTestBranch(int imm14) {
|
||||
|
||||
|
||||
Instr Assembler::ImmTestBranchBit(unsigned bit_pos) {
|
||||
ASSERT(is_uint6(bit_pos));
|
||||
DCHECK(is_uint6(bit_pos));
|
||||
// Subtract five from the shift offset, as we need bit 5 from bit_pos.
|
||||
unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5);
|
||||
unsigned b40 = bit_pos << ImmTestBranchBit40_offset;
|
||||
@ -1050,7 +1050,7 @@ Instr Assembler::SF(Register rd) {
|
||||
|
||||
|
||||
Instr Assembler::ImmAddSub(int64_t imm) {
|
||||
ASSERT(IsImmAddSub(imm));
|
||||
DCHECK(IsImmAddSub(imm));
|
||||
if (is_uint12(imm)) { // No shift required.
|
||||
return imm << ImmAddSub_offset;
|
||||
} else {
|
||||
@ -1060,7 +1060,7 @@ Instr Assembler::ImmAddSub(int64_t imm) {
|
||||
|
||||
|
||||
Instr Assembler::ImmS(unsigned imms, unsigned reg_size) {
|
||||
ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(imms)) ||
|
||||
DCHECK(((reg_size == kXRegSizeInBits) && is_uint6(imms)) ||
|
||||
((reg_size == kWRegSizeInBits) && is_uint5(imms)));
|
||||
USE(reg_size);
|
||||
return imms << ImmS_offset;
|
||||
@ -1068,26 +1068,26 @@ Instr Assembler::ImmS(unsigned imms, unsigned reg_size) {
|
||||
|
||||
|
||||
Instr Assembler::ImmR(unsigned immr, unsigned reg_size) {
|
||||
ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
|
||||
DCHECK(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
|
||||
((reg_size == kWRegSizeInBits) && is_uint5(immr)));
|
||||
USE(reg_size);
|
||||
ASSERT(is_uint6(immr));
|
||||
DCHECK(is_uint6(immr));
|
||||
return immr << ImmR_offset;
|
||||
}
|
||||
|
||||
|
||||
Instr Assembler::ImmSetBits(unsigned imms, unsigned reg_size) {
|
||||
ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
|
||||
ASSERT(is_uint6(imms));
|
||||
ASSERT((reg_size == kXRegSizeInBits) || is_uint6(imms + 3));
|
||||
DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
|
||||
DCHECK(is_uint6(imms));
|
||||
DCHECK((reg_size == kXRegSizeInBits) || is_uint6(imms + 3));
|
||||
USE(reg_size);
|
||||
return imms << ImmSetBits_offset;
|
||||
}
|
||||
|
||||
|
||||
Instr Assembler::ImmRotate(unsigned immr, unsigned reg_size) {
|
||||
ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
|
||||
ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
|
||||
DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
|
||||
DCHECK(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
|
||||
((reg_size == kWRegSizeInBits) && is_uint5(immr)));
|
||||
USE(reg_size);
|
||||
return immr << ImmRotate_offset;
|
||||
@ -1101,21 +1101,21 @@ Instr Assembler::ImmLLiteral(int imm19) {
|
||||
|
||||
|
||||
Instr Assembler::BitN(unsigned bitn, unsigned reg_size) {
|
||||
ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
|
||||
ASSERT((reg_size == kXRegSizeInBits) || (bitn == 0));
|
||||
DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
|
||||
DCHECK((reg_size == kXRegSizeInBits) || (bitn == 0));
|
||||
USE(reg_size);
|
||||
return bitn << BitN_offset;
|
||||
}
|
||||
|
||||
|
||||
Instr Assembler::ShiftDP(Shift shift) {
|
||||
ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
|
||||
DCHECK(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
|
||||
return shift << ShiftDP_offset;
|
||||
}
|
||||
|
||||
|
||||
Instr Assembler::ImmDPShift(unsigned amount) {
|
||||
ASSERT(is_uint6(amount));
|
||||
DCHECK(is_uint6(amount));
|
||||
return amount << ImmDPShift_offset;
|
||||
}
|
||||
|
||||
@ -1126,13 +1126,13 @@ Instr Assembler::ExtendMode(Extend extend) {
|
||||
|
||||
|
||||
Instr Assembler::ImmExtendShift(unsigned left_shift) {
|
||||
ASSERT(left_shift <= 4);
|
||||
DCHECK(left_shift <= 4);
|
||||
return left_shift << ImmExtendShift_offset;
|
||||
}
|
||||
|
||||
|
||||
Instr Assembler::ImmCondCmp(unsigned imm) {
|
||||
ASSERT(is_uint5(imm));
|
||||
DCHECK(is_uint5(imm));
|
||||
return imm << ImmCondCmp_offset;
|
||||
}
|
||||
|
||||
@ -1143,75 +1143,75 @@ Instr Assembler::Nzcv(StatusFlags nzcv) {
|
||||
|
||||
|
||||
Instr Assembler::ImmLSUnsigned(int imm12) {
|
||||
ASSERT(is_uint12(imm12));
|
||||
DCHECK(is_uint12(imm12));
|
||||
return imm12 << ImmLSUnsigned_offset;
|
||||
}
|
||||
|
||||
|
||||
Instr Assembler::ImmLS(int imm9) {
|
||||
ASSERT(is_int9(imm9));
|
||||
DCHECK(is_int9(imm9));
|
||||
return truncate_to_int9(imm9) << ImmLS_offset;
|
||||
}
|
||||
|
||||
|
||||
Instr Assembler::ImmLSPair(int imm7, LSDataSize size) {
|
||||
ASSERT(((imm7 >> size) << size) == imm7);
|
||||
DCHECK(((imm7 >> size) << size) == imm7);
|
||||
int scaled_imm7 = imm7 >> size;
|
||||
ASSERT(is_int7(scaled_imm7));
|
||||
DCHECK(is_int7(scaled_imm7));
|
||||
return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
|
||||
}
|
||||
|
||||
|
||||
Instr Assembler::ImmShiftLS(unsigned shift_amount) {
|
||||
ASSERT(is_uint1(shift_amount));
|
||||
DCHECK(is_uint1(shift_amount));
|
||||
return shift_amount << ImmShiftLS_offset;
|
||||
}
|
||||
|
||||
|
||||
Instr Assembler::ImmException(int imm16) {
|
||||
ASSERT(is_uint16(imm16));
|
||||
DCHECK(is_uint16(imm16));
|
||||
return imm16 << ImmException_offset;
|
||||
}
|
||||
|
||||
|
||||
Instr Assembler::ImmSystemRegister(int imm15) {
|
||||
ASSERT(is_uint15(imm15));
|
||||
DCHECK(is_uint15(imm15));
|
||||
return imm15 << ImmSystemRegister_offset;
|
||||
}
|
||||
|
||||
|
||||
Instr Assembler::ImmHint(int imm7) {
|
||||
ASSERT(is_uint7(imm7));
|
||||
DCHECK(is_uint7(imm7));
|
||||
return imm7 << ImmHint_offset;
|
||||
}
|
||||
|
||||
|
||||
Instr Assembler::ImmBarrierDomain(int imm2) {
|
||||
ASSERT(is_uint2(imm2));
|
||||
DCHECK(is_uint2(imm2));
|
||||
return imm2 << ImmBarrierDomain_offset;
|
||||
}
|
||||
|
||||
|
||||
Instr Assembler::ImmBarrierType(int imm2) {
|
||||
ASSERT(is_uint2(imm2));
|
||||
DCHECK(is_uint2(imm2));
|
||||
return imm2 << ImmBarrierType_offset;
|
||||
}
|
||||
|
||||
|
||||
LSDataSize Assembler::CalcLSDataSize(LoadStoreOp op) {
|
||||
ASSERT((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8));
|
||||
DCHECK((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8));
|
||||
return static_cast<LSDataSize>(op >> SizeLS_offset);
|
||||
}
|
||||
|
||||
|
||||
Instr Assembler::ImmMoveWide(uint64_t imm) {
|
||||
ASSERT(is_uint16(imm));
|
||||
DCHECK(is_uint16(imm));
|
||||
return imm << ImmMoveWide_offset;
|
||||
}
|
||||
|
||||
|
||||
Instr Assembler::ShiftMoveWide(int64_t shift) {
|
||||
ASSERT(is_uint2(shift));
|
||||
DCHECK(is_uint2(shift));
|
||||
return shift << ShiftMoveWide_offset;
|
||||
}
|
||||
|
||||
@ -1222,7 +1222,7 @@ Instr Assembler::FPType(FPRegister fd) {
|
||||
|
||||
|
||||
Instr Assembler::FPScale(unsigned scale) {
|
||||
ASSERT(is_uint6(scale));
|
||||
DCHECK(is_uint6(scale));
|
||||
return scale << FPScale_offset;
|
||||
}
|
||||
|
||||
@ -1233,7 +1233,7 @@ const Register& Assembler::AppropriateZeroRegFor(const CPURegister& reg) const {
|
||||
|
||||
|
||||
inline void Assembler::CheckBufferSpace() {
|
||||
ASSERT(pc_ < (buffer_ + buffer_size_));
|
||||
DCHECK(pc_ < (buffer_ + buffer_size_));
|
||||
if (buffer_space() < kGap) {
|
||||
GrowBuffer();
|
||||
}
|
||||
@ -1252,7 +1252,7 @@ inline void Assembler::CheckBuffer() {
|
||||
|
||||
|
||||
TypeFeedbackId Assembler::RecordedAstId() {
|
||||
ASSERT(!recorded_ast_id_.IsNone());
|
||||
DCHECK(!recorded_ast_id_.IsNone());
|
||||
return recorded_ast_id_;
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -106,18 +106,18 @@ struct Register : public CPURegister {
|
||||
reg_code = r.reg_code;
|
||||
reg_size = r.reg_size;
|
||||
reg_type = r.reg_type;
|
||||
ASSERT(IsValidOrNone());
|
||||
DCHECK(IsValidOrNone());
|
||||
}
|
||||
|
||||
Register(const Register& r) { // NOLINT(runtime/explicit)
|
||||
reg_code = r.reg_code;
|
||||
reg_size = r.reg_size;
|
||||
reg_type = r.reg_type;
|
||||
ASSERT(IsValidOrNone());
|
||||
DCHECK(IsValidOrNone());
|
||||
}
|
||||
|
||||
bool IsValid() const {
|
||||
ASSERT(IsRegister() || IsNone());
|
||||
DCHECK(IsRegister() || IsNone());
|
||||
return IsValidRegister();
|
||||
}
|
||||
|
||||
@ -169,7 +169,7 @@ struct Register : public CPURegister {
|
||||
}
|
||||
|
||||
static Register FromAllocationIndex(unsigned index) {
|
||||
ASSERT(index < static_cast<unsigned>(NumAllocatableRegisters()));
|
||||
DCHECK(index < static_cast<unsigned>(NumAllocatableRegisters()));
|
||||
// cp is the last allocatable register.
|
||||
if (index == (static_cast<unsigned>(NumAllocatableRegisters() - 1))) {
|
||||
return from_code(kAllocatableContext);
|
||||
@ -182,8 +182,8 @@ struct Register : public CPURegister {
|
||||
}
|
||||
|
||||
static const char* AllocationIndexToString(int index) {
|
||||
ASSERT((index >= 0) && (index < NumAllocatableRegisters()));
|
||||
ASSERT((kAllocatableLowRangeBegin == 0) &&
|
||||
DCHECK((index >= 0) && (index < NumAllocatableRegisters()));
|
||||
DCHECK((kAllocatableLowRangeBegin == 0) &&
|
||||
(kAllocatableLowRangeEnd == 15) &&
|
||||
(kAllocatableHighRangeBegin == 18) &&
|
||||
(kAllocatableHighRangeEnd == 24) &&
|
||||
@ -199,7 +199,7 @@ struct Register : public CPURegister {
|
||||
}
|
||||
|
||||
static int ToAllocationIndex(Register reg) {
|
||||
ASSERT(reg.IsAllocatable());
|
||||
DCHECK(reg.IsAllocatable());
|
||||
unsigned code = reg.code();
|
||||
if (code == kAllocatableContext) {
|
||||
return NumAllocatableRegisters() - 1;
|
||||
@ -235,18 +235,18 @@ struct FPRegister : public CPURegister {
|
||||
reg_code = r.reg_code;
|
||||
reg_size = r.reg_size;
|
||||
reg_type = r.reg_type;
|
||||
ASSERT(IsValidOrNone());
|
||||
DCHECK(IsValidOrNone());
|
||||
}
|
||||
|
||||
FPRegister(const FPRegister& r) { // NOLINT(runtime/explicit)
|
||||
reg_code = r.reg_code;
|
||||
reg_size = r.reg_size;
|
||||
reg_type = r.reg_type;
|
||||
ASSERT(IsValidOrNone());
|
||||
DCHECK(IsValidOrNone());
|
||||
}
|
||||
|
||||
bool IsValid() const {
|
||||
ASSERT(IsFPRegister() || IsNone());
|
||||
DCHECK(IsFPRegister() || IsNone());
|
||||
return IsValidFPRegister();
|
||||
}
|
||||
|
||||
@ -282,7 +282,7 @@ struct FPRegister : public CPURegister {
|
||||
}
|
||||
|
||||
static FPRegister FromAllocationIndex(unsigned int index) {
|
||||
ASSERT(index < static_cast<unsigned>(NumAllocatableRegisters()));
|
||||
DCHECK(index < static_cast<unsigned>(NumAllocatableRegisters()));
|
||||
|
||||
return (index <= kAllocatableLowRangeEnd)
|
||||
? from_code(index)
|
||||
@ -290,8 +290,8 @@ struct FPRegister : public CPURegister {
|
||||
}
|
||||
|
||||
static const char* AllocationIndexToString(int index) {
|
||||
ASSERT((index >= 0) && (index < NumAllocatableRegisters()));
|
||||
ASSERT((kAllocatableLowRangeBegin == 0) &&
|
||||
DCHECK((index >= 0) && (index < NumAllocatableRegisters()));
|
||||
DCHECK((kAllocatableLowRangeBegin == 0) &&
|
||||
(kAllocatableLowRangeEnd == 14) &&
|
||||
(kAllocatableHighRangeBegin == 16) &&
|
||||
(kAllocatableHighRangeEnd == 28));
|
||||
@ -305,7 +305,7 @@ struct FPRegister : public CPURegister {
|
||||
}
|
||||
|
||||
static int ToAllocationIndex(FPRegister reg) {
|
||||
ASSERT(reg.IsAllocatable());
|
||||
DCHECK(reg.IsAllocatable());
|
||||
unsigned code = reg.code();
|
||||
|
||||
return (code <= kAllocatableLowRangeEnd)
|
||||
@ -451,40 +451,40 @@ class CPURegList {
|
||||
CPURegister reg4 = NoCPUReg)
|
||||
: list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
|
||||
size_(reg1.SizeInBits()), type_(reg1.type()) {
|
||||
ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
|
||||
ASSERT(IsValid());
|
||||
DCHECK(AreSameSizeAndType(reg1, reg2, reg3, reg4));
|
||||
DCHECK(IsValid());
|
||||
}
|
||||
|
||||
CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
|
||||
: list_(list), size_(size), type_(type) {
|
||||
ASSERT(IsValid());
|
||||
DCHECK(IsValid());
|
||||
}
|
||||
|
||||
CPURegList(CPURegister::RegisterType type, unsigned size,
|
||||
unsigned first_reg, unsigned last_reg)
|
||||
: size_(size), type_(type) {
|
||||
ASSERT(((type == CPURegister::kRegister) &&
|
||||
DCHECK(((type == CPURegister::kRegister) &&
|
||||
(last_reg < kNumberOfRegisters)) ||
|
||||
((type == CPURegister::kFPRegister) &&
|
||||
(last_reg < kNumberOfFPRegisters)));
|
||||
ASSERT(last_reg >= first_reg);
|
||||
DCHECK(last_reg >= first_reg);
|
||||
list_ = (1UL << (last_reg + 1)) - 1;
|
||||
list_ &= ~((1UL << first_reg) - 1);
|
||||
ASSERT(IsValid());
|
||||
DCHECK(IsValid());
|
||||
}
|
||||
|
||||
CPURegister::RegisterType type() const {
|
||||
ASSERT(IsValid());
|
||||
DCHECK(IsValid());
|
||||
return type_;
|
||||
}
|
||||
|
||||
RegList list() const {
|
||||
ASSERT(IsValid());
|
||||
DCHECK(IsValid());
|
||||
return list_;
|
||||
}
|
||||
|
||||
inline void set_list(RegList new_list) {
|
||||
ASSERT(IsValid());
|
||||
DCHECK(IsValid());
|
||||
list_ = new_list;
|
||||
}
|
||||
|
||||
@ -529,7 +529,7 @@ class CPURegList {
|
||||
static CPURegList GetSafepointSavedRegisters();
|
||||
|
||||
bool IsEmpty() const {
|
||||
ASSERT(IsValid());
|
||||
DCHECK(IsValid());
|
||||
return list_ == 0;
|
||||
}
|
||||
|
||||
@ -537,7 +537,7 @@ class CPURegList {
|
||||
const CPURegister& other2 = NoCPUReg,
|
||||
const CPURegister& other3 = NoCPUReg,
|
||||
const CPURegister& other4 = NoCPUReg) const {
|
||||
ASSERT(IsValid());
|
||||
DCHECK(IsValid());
|
||||
RegList list = 0;
|
||||
if (!other1.IsNone() && (other1.type() == type_)) list |= other1.Bit();
|
||||
if (!other2.IsNone() && (other2.type() == type_)) list |= other2.Bit();
|
||||
@ -547,23 +547,23 @@ class CPURegList {
|
||||
}
|
||||
|
||||
int Count() const {
|
||||
ASSERT(IsValid());
|
||||
DCHECK(IsValid());
|
||||
return CountSetBits(list_, kRegListSizeInBits);
|
||||
}
|
||||
|
||||
unsigned RegisterSizeInBits() const {
|
||||
ASSERT(IsValid());
|
||||
DCHECK(IsValid());
|
||||
return size_;
|
||||
}
|
||||
|
||||
unsigned RegisterSizeInBytes() const {
|
||||
int size_in_bits = RegisterSizeInBits();
|
||||
ASSERT((size_in_bits % kBitsPerByte) == 0);
|
||||
DCHECK((size_in_bits % kBitsPerByte) == 0);
|
||||
return size_in_bits / kBitsPerByte;
|
||||
}
|
||||
|
||||
unsigned TotalSizeInBytes() const {
|
||||
ASSERT(IsValid());
|
||||
DCHECK(IsValid());
|
||||
return RegisterSizeInBytes() * Count();
|
||||
}
|
||||
|
||||
@ -829,7 +829,7 @@ class Assembler : public AssemblerBase {
|
||||
// Start generating code from the beginning of the buffer, discarding any code
|
||||
// and data that has already been emitted into the buffer.
|
||||
//
|
||||
// In order to avoid any accidental transfer of state, Reset ASSERTs that the
|
||||
// In order to avoid any accidental transfer of state, Reset DCHECKs that the
|
||||
// constant pool is not blocked.
|
||||
void Reset();
|
||||
|
||||
@ -913,15 +913,15 @@ class Assembler : public AssemblerBase {
|
||||
|
||||
// Size of the generated code in bytes
|
||||
uint64_t SizeOfGeneratedCode() const {
|
||||
ASSERT((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
|
||||
DCHECK((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
|
||||
return pc_ - buffer_;
|
||||
}
|
||||
|
||||
// Return the code size generated from label to the current position.
|
||||
uint64_t SizeOfCodeGeneratedSince(const Label* label) {
|
||||
ASSERT(label->is_bound());
|
||||
ASSERT(pc_offset() >= label->pos());
|
||||
ASSERT(pc_offset() < buffer_size_);
|
||||
DCHECK(label->is_bound());
|
||||
DCHECK(pc_offset() >= label->pos());
|
||||
DCHECK(pc_offset() < buffer_size_);
|
||||
return pc_offset() - label->pos();
|
||||
}
|
||||
|
||||
@ -931,8 +931,8 @@ class Assembler : public AssemblerBase {
|
||||
// TODO(jbramley): Work out what sign to use for these things and if possible,
|
||||
// change things to be consistent.
|
||||
void AssertSizeOfCodeGeneratedSince(const Label* label, ptrdiff_t size) {
|
||||
ASSERT(size >= 0);
|
||||
ASSERT(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label));
|
||||
DCHECK(size >= 0);
|
||||
DCHECK(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label));
|
||||
}
|
||||
|
||||
// Return the number of instructions generated from label to the
|
||||
@ -1214,8 +1214,8 @@ class Assembler : public AssemblerBase {
|
||||
const Register& rn,
|
||||
unsigned lsb,
|
||||
unsigned width) {
|
||||
ASSERT(width >= 1);
|
||||
ASSERT(lsb + width <= rn.SizeInBits());
|
||||
DCHECK(width >= 1);
|
||||
DCHECK(lsb + width <= rn.SizeInBits());
|
||||
bfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
|
||||
}
|
||||
|
||||
@ -1224,15 +1224,15 @@ class Assembler : public AssemblerBase {
|
||||
const Register& rn,
|
||||
unsigned lsb,
|
||||
unsigned width) {
|
||||
ASSERT(width >= 1);
|
||||
ASSERT(lsb + width <= rn.SizeInBits());
|
||||
DCHECK(width >= 1);
|
||||
DCHECK(lsb + width <= rn.SizeInBits());
|
||||
bfm(rd, rn, lsb, lsb + width - 1);
|
||||
}
|
||||
|
||||
// Sbfm aliases.
|
||||
// Arithmetic shift right.
|
||||
void asr(const Register& rd, const Register& rn, unsigned shift) {
|
||||
ASSERT(shift < rd.SizeInBits());
|
||||
DCHECK(shift < rd.SizeInBits());
|
||||
sbfm(rd, rn, shift, rd.SizeInBits() - 1);
|
||||
}
|
||||
|
||||
@ -1241,8 +1241,8 @@ class Assembler : public AssemblerBase {
|
||||
const Register& rn,
|
||||
unsigned lsb,
|
||||
unsigned width) {
|
||||
ASSERT(width >= 1);
|
||||
ASSERT(lsb + width <= rn.SizeInBits());
|
||||
DCHECK(width >= 1);
|
||||
DCHECK(lsb + width <= rn.SizeInBits());
|
||||
sbfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
|
||||
}
|
||||
|
||||
@ -1251,8 +1251,8 @@ class Assembler : public AssemblerBase {
|
||||
const Register& rn,
|
||||
unsigned lsb,
|
||||
unsigned width) {
|
||||
ASSERT(width >= 1);
|
||||
ASSERT(lsb + width <= rn.SizeInBits());
|
||||
DCHECK(width >= 1);
|
||||
DCHECK(lsb + width <= rn.SizeInBits());
|
||||
sbfm(rd, rn, lsb, lsb + width - 1);
|
||||
}
|
||||
|
||||
@ -1275,13 +1275,13 @@ class Assembler : public AssemblerBase {
|
||||
// Logical shift left.
|
||||
void lsl(const Register& rd, const Register& rn, unsigned shift) {
|
||||
unsigned reg_size = rd.SizeInBits();
|
||||
ASSERT(shift < reg_size);
|
||||
DCHECK(shift < reg_size);
|
||||
ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
|
||||
}
|
||||
|
||||
// Logical shift right.
|
||||
void lsr(const Register& rd, const Register& rn, unsigned shift) {
|
||||
ASSERT(shift < rd.SizeInBits());
|
||||
DCHECK(shift < rd.SizeInBits());
|
||||
ubfm(rd, rn, shift, rd.SizeInBits() - 1);
|
||||
}
|
||||
|
||||
@ -1290,8 +1290,8 @@ class Assembler : public AssemblerBase {
|
||||
const Register& rn,
|
||||
unsigned lsb,
|
||||
unsigned width) {
|
||||
ASSERT(width >= 1);
|
||||
ASSERT(lsb + width <= rn.SizeInBits());
|
||||
DCHECK(width >= 1);
|
||||
DCHECK(lsb + width <= rn.SizeInBits());
|
||||
ubfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
|
||||
}
|
||||
|
||||
@ -1300,8 +1300,8 @@ class Assembler : public AssemblerBase {
|
||||
const Register& rn,
|
||||
unsigned lsb,
|
||||
unsigned width) {
|
||||
ASSERT(width >= 1);
|
||||
ASSERT(lsb + width <= rn.SizeInBits());
|
||||
DCHECK(width >= 1);
|
||||
DCHECK(lsb + width <= rn.SizeInBits());
|
||||
ubfm(rd, rn, lsb, lsb + width - 1);
|
||||
}
|
||||
|
||||
@ -1571,7 +1571,7 @@ class Assembler : public AssemblerBase {
|
||||
};
|
||||
|
||||
void nop(NopMarkerTypes n) {
|
||||
ASSERT((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER));
|
||||
DCHECK((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER));
|
||||
mov(Register::XRegFromCode(n), Register::XRegFromCode(n));
|
||||
}
|
||||
|
||||
@ -1732,7 +1732,7 @@ class Assembler : public AssemblerBase {
|
||||
// subsequent instructions.
|
||||
void EmitStringData(const char * string) {
|
||||
size_t len = strlen(string) + 1;
|
||||
ASSERT(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
|
||||
DCHECK(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
|
||||
EmitData(string, len);
|
||||
// Pad with NULL characters until pc_ is aligned.
|
||||
const char pad[] = {'\0', '\0', '\0', '\0'};
|
||||
@ -1766,44 +1766,44 @@ class Assembler : public AssemblerBase {
|
||||
|
||||
// Register encoding.
|
||||
static Instr Rd(CPURegister rd) {
|
||||
ASSERT(rd.code() != kSPRegInternalCode);
|
||||
DCHECK(rd.code() != kSPRegInternalCode);
|
||||
return rd.code() << Rd_offset;
|
||||
}
|
||||
|
||||
static Instr Rn(CPURegister rn) {
|
||||
ASSERT(rn.code() != kSPRegInternalCode);
|
||||
DCHECK(rn.code() != kSPRegInternalCode);
|
||||
return rn.code() << Rn_offset;
|
||||
}
|
||||
|
||||
static Instr Rm(CPURegister rm) {
|
||||
ASSERT(rm.code() != kSPRegInternalCode);
|
||||
DCHECK(rm.code() != kSPRegInternalCode);
|
||||
return rm.code() << Rm_offset;
|
||||
}
|
||||
|
||||
static Instr Ra(CPURegister ra) {
|
||||
ASSERT(ra.code() != kSPRegInternalCode);
|
||||
DCHECK(ra.code() != kSPRegInternalCode);
|
||||
return ra.code() << Ra_offset;
|
||||
}
|
||||
|
||||
static Instr Rt(CPURegister rt) {
|
||||
ASSERT(rt.code() != kSPRegInternalCode);
|
||||
DCHECK(rt.code() != kSPRegInternalCode);
|
||||
return rt.code() << Rt_offset;
|
||||
}
|
||||
|
||||
static Instr Rt2(CPURegister rt2) {
|
||||
ASSERT(rt2.code() != kSPRegInternalCode);
|
||||
DCHECK(rt2.code() != kSPRegInternalCode);
|
||||
return rt2.code() << Rt2_offset;
|
||||
}
|
||||
|
||||
// These encoding functions allow the stack pointer to be encoded, and
|
||||
// disallow the zero register.
|
||||
static Instr RdSP(Register rd) {
|
||||
ASSERT(!rd.IsZero());
|
||||
DCHECK(!rd.IsZero());
|
||||
return (rd.code() & kRegCodeMask) << Rd_offset;
|
||||
}
|
||||
|
||||
static Instr RnSP(Register rn) {
|
||||
ASSERT(!rn.IsZero());
|
||||
DCHECK(!rn.IsZero());
|
||||
return (rn.code() & kRegCodeMask) << Rn_offset;
|
||||
}
|
||||
|
||||
@ -2087,7 +2087,7 @@ class Assembler : public AssemblerBase {
|
||||
void Emit(Instr instruction) {
|
||||
STATIC_ASSERT(sizeof(*pc_) == 1);
|
||||
STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
|
||||
ASSERT((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
|
||||
DCHECK((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
|
||||
|
||||
memcpy(pc_, &instruction, sizeof(instruction));
|
||||
pc_ += sizeof(instruction);
|
||||
@ -2096,8 +2096,8 @@ class Assembler : public AssemblerBase {
|
||||
|
||||
// Emit data inline in the instruction stream.
|
||||
void EmitData(void const * data, unsigned size) {
|
||||
ASSERT(sizeof(*pc_) == 1);
|
||||
ASSERT((pc_ + size) <= (buffer_ + buffer_size_));
|
||||
DCHECK(sizeof(*pc_) == 1);
|
||||
DCHECK((pc_ + size) <= (buffer_ + buffer_size_));
|
||||
|
||||
// TODO(all): Somehow register we have some data here. Then we can
|
||||
// disassemble it correctly.
|
||||
@ -2174,7 +2174,7 @@ class Assembler : public AssemblerBase {
|
||||
// Record the AST id of the CallIC being compiled, so that it can be placed
|
||||
// in the relocation information.
|
||||
void SetRecordedAstId(TypeFeedbackId ast_id) {
|
||||
ASSERT(recorded_ast_id_.IsNone());
|
||||
DCHECK(recorded_ast_id_.IsNone());
|
||||
recorded_ast_id_ = ast_id;
|
||||
}
|
||||
|
||||
@ -2222,7 +2222,7 @@ class Assembler : public AssemblerBase {
|
||||
static const int kVeneerDistanceCheckMargin =
|
||||
kVeneerNoProtectionFactor * kVeneerDistanceMargin;
|
||||
int unresolved_branches_first_limit() const {
|
||||
ASSERT(!unresolved_branches_.empty());
|
||||
DCHECK(!unresolved_branches_.empty());
|
||||
return unresolved_branches_.begin()->first;
|
||||
}
|
||||
// This is similar to next_constant_pool_check_ and helps reduce the overhead
|
||||
@ -2275,12 +2275,12 @@ class PatchingAssembler : public Assembler {
|
||||
|
||||
~PatchingAssembler() {
|
||||
// Const pool should still be blocked.
|
||||
ASSERT(is_const_pool_blocked());
|
||||
DCHECK(is_const_pool_blocked());
|
||||
EndBlockPools();
|
||||
// Verify we have generated the number of instruction we expected.
|
||||
ASSERT((pc_offset() + kGap) == buffer_size_);
|
||||
DCHECK((pc_offset() + kGap) == buffer_size_);
|
||||
// Verify no relocation information has been emitted.
|
||||
ASSERT(IsConstPoolEmpty());
|
||||
DCHECK(IsConstPoolEmpty());
|
||||
// Flush the Instruction cache.
|
||||
size_t length = buffer_size_ - kGap;
|
||||
CpuFeatures::FlushICache(buffer_, length);
|
||||
|
@ -66,7 +66,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
|
||||
num_extra_args = 1;
|
||||
__ Push(x1);
|
||||
} else {
|
||||
ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
|
||||
DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
|
||||
}
|
||||
|
||||
// JumpToExternalReference expects x0 to contain the number of arguments
|
||||
@ -315,7 +315,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
|
||||
ASM_LOCATION("Builtins::Generate_JSConstructStubHelper");
|
||||
// Should never create mementos for api functions.
|
||||
ASSERT(!is_api_function || !create_memento);
|
||||
DCHECK(!is_api_function || !create_memento);
|
||||
|
||||
Isolate* isolate = masm->isolate();
|
||||
|
||||
@ -465,11 +465,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
__ FillFields(first_prop, prop_fields, filler);
|
||||
__ Add(first_prop, new_obj, Operand(obj_size, LSL, kPointerSizeLog2));
|
||||
__ LoadRoot(x14, Heap::kAllocationMementoMapRootIndex);
|
||||
ASSERT_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
|
||||
DCHECK_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
|
||||
__ Str(x14, MemOperand(first_prop, kPointerSize, PostIndex));
|
||||
// Load the AllocationSite
|
||||
__ Peek(x14, 2 * kXRegSize);
|
||||
ASSERT_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
|
||||
DCHECK_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
|
||||
__ Str(x14, MemOperand(first_prop, kPointerSize, PostIndex));
|
||||
first_prop = NoReg;
|
||||
} else {
|
||||
|
@ -400,7 +400,7 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
|
||||
{
|
||||
// Call the runtime system in a fresh internal frame.
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
ASSERT((param_count == 0) ||
|
||||
DCHECK((param_count == 0) ||
|
||||
x0.Is(descriptor->GetEnvironmentParameterRegister(param_count - 1)));
|
||||
|
||||
// Push arguments
|
||||
@ -422,10 +422,10 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
|
||||
Label done;
|
||||
Register input = source();
|
||||
Register result = destination();
|
||||
ASSERT(is_truncating());
|
||||
DCHECK(is_truncating());
|
||||
|
||||
ASSERT(result.Is64Bits());
|
||||
ASSERT(jssp.Is(masm->StackPointer()));
|
||||
DCHECK(result.Is64Bits());
|
||||
DCHECK(jssp.Is(masm->StackPointer()));
|
||||
|
||||
int double_offset = offset();
|
||||
|
||||
@ -505,7 +505,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
|
||||
FPRegister double_scratch,
|
||||
Label* slow,
|
||||
Condition cond) {
|
||||
ASSERT(!AreAliased(left, right, scratch));
|
||||
DCHECK(!AreAliased(left, right, scratch));
|
||||
Label not_identical, return_equal, heap_number;
|
||||
Register result = x0;
|
||||
|
||||
@ -560,7 +560,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
|
||||
// it is handled in the parser (see Parser::ParseBinaryExpression). We are
|
||||
// only concerned with cases ge, le and eq here.
|
||||
if ((cond != lt) && (cond != gt)) {
|
||||
ASSERT((cond == ge) || (cond == le) || (cond == eq));
|
||||
DCHECK((cond == ge) || (cond == le) || (cond == eq));
|
||||
__ Bind(&heap_number);
|
||||
// Left and right are identical pointers to a heap number object. Return
|
||||
// non-equal if the heap number is a NaN, and equal otherwise. Comparing
|
||||
@ -593,7 +593,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
|
||||
Register left_type,
|
||||
Register right_type,
|
||||
Register scratch) {
|
||||
ASSERT(!AreAliased(left, right, left_type, right_type, scratch));
|
||||
DCHECK(!AreAliased(left, right, left_type, right_type, scratch));
|
||||
|
||||
if (masm->emit_debug_code()) {
|
||||
// We assume that the arguments are not identical.
|
||||
@ -611,7 +611,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
|
||||
__ B(lt, &right_non_object);
|
||||
|
||||
// Return non-zero - x0 already contains a non-zero pointer.
|
||||
ASSERT(left.is(x0) || right.is(x0));
|
||||
DCHECK(left.is(x0) || right.is(x0));
|
||||
Label return_not_equal;
|
||||
__ Bind(&return_not_equal);
|
||||
__ Ret();
|
||||
@ -649,9 +649,9 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
|
||||
Register scratch,
|
||||
Label* slow,
|
||||
bool strict) {
|
||||
ASSERT(!AreAliased(left, right, scratch));
|
||||
ASSERT(!AreAliased(left_d, right_d));
|
||||
ASSERT((left.is(x0) && right.is(x1)) ||
|
||||
DCHECK(!AreAliased(left, right, scratch));
|
||||
DCHECK(!AreAliased(left_d, right_d));
|
||||
DCHECK((left.is(x0) && right.is(x1)) ||
|
||||
(right.is(x0) && left.is(x1)));
|
||||
Register result = x0;
|
||||
|
||||
@ -724,7 +724,7 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
|
||||
Register right_type,
|
||||
Label* possible_strings,
|
||||
Label* not_both_strings) {
|
||||
ASSERT(!AreAliased(left, right, left_map, right_map, left_type, right_type));
|
||||
DCHECK(!AreAliased(left, right, left_map, right_map, left_type, right_type));
|
||||
Register result = x0;
|
||||
|
||||
Label object_test;
|
||||
@ -844,7 +844,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
|
||||
// Left and/or right is a NaN. Load the result register with whatever makes
|
||||
// the comparison fail, since comparisons with NaN always fail (except ne,
|
||||
// which is filtered out at a higher level.)
|
||||
ASSERT(cond != ne);
|
||||
DCHECK(cond != ne);
|
||||
if ((cond == lt) || (cond == le)) {
|
||||
__ Mov(result, GREATER);
|
||||
} else {
|
||||
@ -935,7 +935,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
|
||||
if ((cond == lt) || (cond == le)) {
|
||||
ncr = GREATER;
|
||||
} else {
|
||||
ASSERT((cond == gt) || (cond == ge)); // remaining cases
|
||||
DCHECK((cond == gt) || (cond == ge)); // remaining cases
|
||||
ncr = LESS;
|
||||
}
|
||||
__ Mov(x10, Smi::FromInt(ncr));
|
||||
@ -1243,7 +1243,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
|
||||
__ Bind(&done);
|
||||
__ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1,
|
||||
result_double);
|
||||
ASSERT(result_tagged.is(x0));
|
||||
DCHECK(result_tagged.is(x0));
|
||||
__ IncrementCounter(
|
||||
isolate()->counters()->math_pow(), 1, scratch0, scratch1);
|
||||
__ Ret();
|
||||
@ -1347,7 +1347,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
||||
//
|
||||
// The arguments are in reverse order, so that arg[argc-2] is actually the
|
||||
// first argument to the target function and arg[0] is the last.
|
||||
ASSERT(jssp.Is(__ StackPointer()));
|
||||
DCHECK(jssp.Is(__ StackPointer()));
|
||||
const Register& argc_input = x0;
|
||||
const Register& target_input = x1;
|
||||
|
||||
@ -1374,7 +1374,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
||||
// registers.
|
||||
FrameScope scope(masm, StackFrame::MANUAL);
|
||||
__ EnterExitFrame(save_doubles_, x10, 3);
|
||||
ASSERT(csp.Is(__ StackPointer()));
|
||||
DCHECK(csp.Is(__ StackPointer()));
|
||||
|
||||
// Poke callee-saved registers into reserved space.
|
||||
__ Poke(argv, 1 * kPointerSize);
|
||||
@ -1424,7 +1424,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
||||
// untouched, and the stub either throws an exception by jumping to one of
|
||||
// the exception_returned label.
|
||||
|
||||
ASSERT(csp.Is(__ StackPointer()));
|
||||
DCHECK(csp.Is(__ StackPointer()));
|
||||
|
||||
// Prepare AAPCS64 arguments to pass to the builtin.
|
||||
__ Mov(x0, argc);
|
||||
@ -1471,7 +1471,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
||||
__ Peek(target, 3 * kPointerSize);
|
||||
|
||||
__ LeaveExitFrame(save_doubles_, x10, true);
|
||||
ASSERT(jssp.Is(__ StackPointer()));
|
||||
DCHECK(jssp.Is(__ StackPointer()));
|
||||
// Pop or drop the remaining stack slots and return from the stub.
|
||||
// jssp[24]: Arguments array (of size argc), including receiver.
|
||||
// jssp[16]: Preserved x23 (used for target).
|
||||
@ -1543,7 +1543,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
||||
// Output:
|
||||
// x0: result.
|
||||
void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
|
||||
ASSERT(jssp.Is(__ StackPointer()));
|
||||
DCHECK(jssp.Is(__ StackPointer()));
|
||||
Register code_entry = x0;
|
||||
|
||||
// Enable instruction instrumentation. This only works on the simulator, and
|
||||
@ -1597,7 +1597,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
|
||||
__ B(&done);
|
||||
__ Bind(&non_outermost_js);
|
||||
// We spare one instruction by pushing xzr since the marker is 0.
|
||||
ASSERT(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL);
|
||||
DCHECK(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL);
|
||||
__ Push(xzr);
|
||||
__ Bind(&done);
|
||||
|
||||
@ -1699,7 +1699,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
|
||||
// Reset the stack to the callee saved registers.
|
||||
__ Drop(-EntryFrameConstants::kCallerFPOffset, kByteSizeInBytes);
|
||||
// Restore the callee-saved registers and return.
|
||||
ASSERT(jssp.Is(__ StackPointer()));
|
||||
DCHECK(jssp.Is(__ StackPointer()));
|
||||
__ Mov(csp, jssp);
|
||||
__ SetStackPointer(csp);
|
||||
__ PopCalleeSavedRegisters();
|
||||
@ -1832,7 +1832,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
|
||||
__ Mov(result, res_true);
|
||||
__ Bind(&return_result);
|
||||
if (HasCallSiteInlineCheck()) {
|
||||
ASSERT(ReturnTrueFalseObject());
|
||||
DCHECK(ReturnTrueFalseObject());
|
||||
__ Add(map_check_site, map_check_site, kDeltaToLoadBoolResult);
|
||||
__ GetRelocatedValueLocation(map_check_site, scratch2);
|
||||
__ Str(result, MemOperand(scratch2));
|
||||
@ -2468,7 +2468,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
||||
__ Cbz(x10, &runtime);
|
||||
|
||||
// Check that the first argument is a JSRegExp object.
|
||||
ASSERT(jssp.Is(__ StackPointer()));
|
||||
DCHECK(jssp.Is(__ StackPointer()));
|
||||
__ Peek(jsregexp_object, kJSRegExpOffset);
|
||||
__ JumpIfSmi(jsregexp_object, &runtime);
|
||||
__ JumpIfNotObjectType(jsregexp_object, x10, x10, JS_REGEXP_TYPE, &runtime);
|
||||
@ -2505,7 +2505,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
||||
// Initialize offset for possibly sliced string.
|
||||
__ Mov(sliced_string_offset, 0);
|
||||
|
||||
ASSERT(jssp.Is(__ StackPointer()));
|
||||
DCHECK(jssp.Is(__ StackPointer()));
|
||||
__ Peek(subject, kSubjectOffset);
|
||||
__ JumpIfSmi(subject, &runtime);
|
||||
|
||||
@ -2588,7 +2588,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
// Check that the third argument is a positive smi less than the subject
|
||||
// string length. A negative value will be greater (unsigned comparison).
|
||||
ASSERT(jssp.Is(__ StackPointer()));
|
||||
DCHECK(jssp.Is(__ StackPointer()));
|
||||
__ Peek(x10, kPreviousIndexOffset);
|
||||
__ JumpIfNotSmi(x10, &runtime);
|
||||
__ Cmp(jsstring_length, x10);
|
||||
@ -2606,7 +2606,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
||||
// Find the code object based on the assumptions above.
|
||||
// kDataAsciiCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
|
||||
// of kPointerSize to reach the latter.
|
||||
ASSERT_EQ(JSRegExp::kDataAsciiCodeOffset + kPointerSize,
|
||||
DCHECK_EQ(JSRegExp::kDataAsciiCodeOffset + kPointerSize,
|
||||
JSRegExp::kDataUC16CodeOffset);
|
||||
__ Mov(x10, kPointerSize);
|
||||
// We will need the encoding later: ASCII = 0x04
|
||||
@ -2630,7 +2630,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
// Isolates: note we add an additional parameter here (isolate pointer).
|
||||
__ EnterExitFrame(false, x10, 1);
|
||||
ASSERT(csp.Is(__ StackPointer()));
|
||||
DCHECK(csp.Is(__ StackPointer()));
|
||||
|
||||
// We have 9 arguments to pass to the regexp code, therefore we have to pass
|
||||
// one on the stack and the rest as registers.
|
||||
@ -2734,7 +2734,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
||||
__ Add(number_of_capture_registers, x10, 2);
|
||||
|
||||
// Check that the fourth object is a JSArray object.
|
||||
ASSERT(jssp.Is(__ StackPointer()));
|
||||
DCHECK(jssp.Is(__ StackPointer()));
|
||||
__ Peek(x10, kLastMatchInfoOffset);
|
||||
__ JumpIfSmi(x10, &runtime);
|
||||
__ JumpIfNotObjectType(x10, x11, x11, JS_ARRAY_TYPE, &runtime);
|
||||
@ -2916,7 +2916,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm,
|
||||
Register scratch1,
|
||||
Register scratch2) {
|
||||
ASM_LOCATION("GenerateRecordCallTarget");
|
||||
ASSERT(!AreAliased(scratch1, scratch2,
|
||||
DCHECK(!AreAliased(scratch1, scratch2,
|
||||
argc, function, feedback_vector, index));
|
||||
// Cache the called function in a feedback vector slot. Cache states are
|
||||
// uninitialized, monomorphic (indicated by a JSFunction), and megamorphic.
|
||||
@ -2926,9 +2926,9 @@ static void GenerateRecordCallTarget(MacroAssembler* masm,
|
||||
// index : slot in feedback vector (smi)
|
||||
Label initialize, done, miss, megamorphic, not_array_function;
|
||||
|
||||
ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
|
||||
DCHECK_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
|
||||
masm->isolate()->heap()->megamorphic_symbol());
|
||||
ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
|
||||
DCHECK_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
|
||||
masm->isolate()->heap()->uninitialized_symbol());
|
||||
|
||||
// Load the cache state.
|
||||
@ -2993,7 +2993,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm,
|
||||
|
||||
// CreateAllocationSiteStub expect the feedback vector in x2 and the slot
|
||||
// index in x3.
|
||||
ASSERT(feedback_vector.Is(x2) && index.Is(x3));
|
||||
DCHECK(feedback_vector.Is(x2) && index.Is(x3));
|
||||
__ CallStub(&create_stub);
|
||||
|
||||
__ Pop(index, feedback_vector, function, argc);
|
||||
@ -3419,7 +3419,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
|
||||
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
|
||||
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
|
||||
} else {
|
||||
ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
|
||||
DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
|
||||
// NumberToSmi discards numbers that are not exact integers.
|
||||
__ CallRuntime(Runtime::kNumberToSmi, 1);
|
||||
}
|
||||
@ -3486,7 +3486,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
|
||||
|
||||
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
|
||||
// Inputs are in x0 (lhs) and x1 (rhs).
|
||||
ASSERT(state_ == CompareIC::SMI);
|
||||
DCHECK(state_ == CompareIC::SMI);
|
||||
ASM_LOCATION("ICCompareStub[Smis]");
|
||||
Label miss;
|
||||
// Bail out (to 'miss') unless both x0 and x1 are smis.
|
||||
@ -3508,7 +3508,7 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
|
||||
|
||||
|
||||
void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
|
||||
ASSERT(state_ == CompareIC::NUMBER);
|
||||
DCHECK(state_ == CompareIC::NUMBER);
|
||||
ASM_LOCATION("ICCompareStub[HeapNumbers]");
|
||||
|
||||
Label unordered, maybe_undefined1, maybe_undefined2;
|
||||
@ -3576,7 +3576,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
|
||||
|
||||
|
||||
void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
|
||||
ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
|
||||
DCHECK(state_ == CompareIC::INTERNALIZED_STRING);
|
||||
ASM_LOCATION("ICCompareStub[InternalizedStrings]");
|
||||
Label miss;
|
||||
|
||||
@ -3614,9 +3614,9 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
|
||||
|
||||
|
||||
void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
|
||||
ASSERT(state_ == CompareIC::UNIQUE_NAME);
|
||||
DCHECK(state_ == CompareIC::UNIQUE_NAME);
|
||||
ASM_LOCATION("ICCompareStub[UniqueNames]");
|
||||
ASSERT(GetCondition() == eq);
|
||||
DCHECK(GetCondition() == eq);
|
||||
Label miss;
|
||||
|
||||
Register result = x0;
|
||||
@ -3653,7 +3653,7 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
|
||||
|
||||
|
||||
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
|
||||
ASSERT(state_ == CompareIC::STRING);
|
||||
DCHECK(state_ == CompareIC::STRING);
|
||||
ASM_LOCATION("ICCompareStub[Strings]");
|
||||
|
||||
Label miss;
|
||||
@ -3694,7 +3694,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
|
||||
// because we already know they are not identical. We know they are both
|
||||
// strings.
|
||||
if (equality) {
|
||||
ASSERT(GetCondition() == eq);
|
||||
DCHECK(GetCondition() == eq);
|
||||
STATIC_ASSERT(kInternalizedTag == 0);
|
||||
Label not_internalized_strings;
|
||||
__ Orr(x12, lhs_type, rhs_type);
|
||||
@ -3734,7 +3734,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
|
||||
|
||||
|
||||
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
|
||||
ASSERT(state_ == CompareIC::OBJECT);
|
||||
DCHECK(state_ == CompareIC::OBJECT);
|
||||
ASM_LOCATION("ICCompareStub[Objects]");
|
||||
|
||||
Label miss;
|
||||
@ -3748,7 +3748,7 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
|
||||
__ JumpIfNotObjectType(rhs, x10, x10, JS_OBJECT_TYPE, &miss);
|
||||
__ JumpIfNotObjectType(lhs, x10, x10, JS_OBJECT_TYPE, &miss);
|
||||
|
||||
ASSERT(GetCondition() == eq);
|
||||
DCHECK(GetCondition() == eq);
|
||||
__ Sub(result, rhs, lhs);
|
||||
__ Ret();
|
||||
|
||||
@ -3824,7 +3824,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
|
||||
void StringHelper::GenerateHashInit(MacroAssembler* masm,
|
||||
Register hash,
|
||||
Register character) {
|
||||
ASSERT(!AreAliased(hash, character));
|
||||
DCHECK(!AreAliased(hash, character));
|
||||
|
||||
// hash = character + (character << 10);
|
||||
__ LoadRoot(hash, Heap::kHashSeedRootIndex);
|
||||
@ -3844,7 +3844,7 @@ void StringHelper::GenerateHashInit(MacroAssembler* masm,
|
||||
void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
|
||||
Register hash,
|
||||
Register character) {
|
||||
ASSERT(!AreAliased(hash, character));
|
||||
DCHECK(!AreAliased(hash, character));
|
||||
|
||||
// hash += character;
|
||||
__ Add(hash, hash, character);
|
||||
@ -3865,7 +3865,7 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
|
||||
// Compute hashes modulo 2^32 using a 32-bit W register.
|
||||
Register hash_w = hash.W();
|
||||
Register scratch_w = scratch.W();
|
||||
ASSERT(!AreAliased(hash_w, scratch_w));
|
||||
DCHECK(!AreAliased(hash_w, scratch_w));
|
||||
|
||||
// hash += hash << 3;
|
||||
__ Add(hash_w, hash_w, Operand(hash_w, LSL, 3));
|
||||
@ -4139,7 +4139,7 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
|
||||
Register scratch1,
|
||||
Register scratch2,
|
||||
Register scratch3) {
|
||||
ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3));
|
||||
DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3));
|
||||
Register result = x0;
|
||||
Register left_length = scratch1;
|
||||
Register right_length = scratch2;
|
||||
@ -4182,7 +4182,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
|
||||
Register scratch2,
|
||||
Register scratch3,
|
||||
Register scratch4) {
|
||||
ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
|
||||
DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
|
||||
Label result_not_equal, compare_lengths;
|
||||
|
||||
// Find minimum length and length difference.
|
||||
@ -4203,7 +4203,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
|
||||
// Compare lengths - strings up to min-length are equal.
|
||||
__ Bind(&compare_lengths);
|
||||
|
||||
ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
|
||||
DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
|
||||
|
||||
// Use length_delta as result if it's zero.
|
||||
Register result = x0;
|
||||
@ -4228,7 +4228,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
|
||||
Register scratch1,
|
||||
Register scratch2,
|
||||
Label* chars_not_equal) {
|
||||
ASSERT(!AreAliased(left, right, length, scratch1, scratch2));
|
||||
DCHECK(!AreAliased(left, right, length, scratch1, scratch2));
|
||||
|
||||
// Change index to run from -length to -1 by adding length to string
|
||||
// start. This means that loop ends when index reaches zero, which
|
||||
@ -4368,8 +4368,8 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
|
||||
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
|
||||
Register address =
|
||||
x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address();
|
||||
ASSERT(!address.Is(regs_.object()));
|
||||
ASSERT(!address.Is(x0));
|
||||
DCHECK(!address.Is(regs_.object()));
|
||||
DCHECK(!address.Is(x0));
|
||||
__ Mov(address, regs_.address());
|
||||
__ Mov(x0, regs_.object());
|
||||
__ Mov(x1, address);
|
||||
@ -4609,7 +4609,7 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
|
||||
__ Bind(&entry_hook_call_start);
|
||||
__ Push(lr);
|
||||
__ CallStub(&stub);
|
||||
ASSERT(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
|
||||
DCHECK(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
|
||||
GetProfileEntryHookCallSize(masm));
|
||||
|
||||
__ Pop(lr);
|
||||
@ -4624,7 +4624,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
|
||||
// from anywhere.
|
||||
// TODO(jbramley): What about FP registers?
|
||||
__ PushCPURegList(kCallerSaved);
|
||||
ASSERT(kCallerSaved.IncludesAliasOf(lr));
|
||||
DCHECK(kCallerSaved.IncludesAliasOf(lr));
|
||||
const int kNumSavedRegs = kCallerSaved.Count();
|
||||
|
||||
// Compute the function's address as the first argument.
|
||||
@ -4685,7 +4685,7 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
|
||||
Register target) {
|
||||
// Make sure the caller configured the stack pointer (see comment in
|
||||
// DirectCEntryStub::Generate).
|
||||
ASSERT(csp.Is(__ StackPointer()));
|
||||
DCHECK(csp.Is(__ StackPointer()));
|
||||
|
||||
intptr_t code =
|
||||
reinterpret_cast<intptr_t>(GetCode().location());
|
||||
@ -4710,7 +4710,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(
|
||||
Register name,
|
||||
Register scratch1,
|
||||
Register scratch2) {
|
||||
ASSERT(!AreAliased(elements, name, scratch1, scratch2));
|
||||
DCHECK(!AreAliased(elements, name, scratch1, scratch2));
|
||||
|
||||
// Assert that name contains a string.
|
||||
__ AssertName(name);
|
||||
@ -4727,7 +4727,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(
|
||||
// Add the probe offset (i + i * i) left shifted to avoid right shifting
|
||||
// the hash in a separate instruction. The value hash + i + i * i is right
|
||||
// shifted in the following and instruction.
|
||||
ASSERT(NameDictionary::GetProbeOffset(i) <
|
||||
DCHECK(NameDictionary::GetProbeOffset(i) <
|
||||
1 << (32 - Name::kHashFieldOffset));
|
||||
__ Add(scratch2, scratch2, Operand(
|
||||
NameDictionary::GetProbeOffset(i) << Name::kHashShift));
|
||||
@ -4735,7 +4735,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(
|
||||
__ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
|
||||
|
||||
// Scale the index by multiplying by the element size.
|
||||
ASSERT(NameDictionary::kEntrySize == 3);
|
||||
DCHECK(NameDictionary::kEntrySize == 3);
|
||||
__ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
|
||||
|
||||
// Check if the key is identical to the name.
|
||||
@ -4758,7 +4758,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(
|
||||
__ PushCPURegList(spill_list);
|
||||
|
||||
if (name.is(x0)) {
|
||||
ASSERT(!elements.is(x1));
|
||||
DCHECK(!elements.is(x1));
|
||||
__ Mov(x1, name);
|
||||
__ Mov(x0, elements);
|
||||
} else {
|
||||
@ -4787,8 +4787,8 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
|
||||
Register properties,
|
||||
Handle<Name> name,
|
||||
Register scratch0) {
|
||||
ASSERT(!AreAliased(receiver, properties, scratch0));
|
||||
ASSERT(name->IsUniqueName());
|
||||
DCHECK(!AreAliased(receiver, properties, scratch0));
|
||||
DCHECK(name->IsUniqueName());
|
||||
// If names of slots in range from 1 to kProbes - 1 for the hash value are
|
||||
// not equal to the name and kProbes-th slot is not used (its name is the
|
||||
// undefined value), it guarantees the hash table doesn't contain the
|
||||
@ -4804,7 +4804,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
|
||||
__ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i));
|
||||
|
||||
// Scale the index by multiplying by the entry size.
|
||||
ASSERT(NameDictionary::kEntrySize == 3);
|
||||
DCHECK(NameDictionary::kEntrySize == 3);
|
||||
__ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
|
||||
|
||||
Register entity_name = scratch0;
|
||||
@ -4885,7 +4885,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
|
||||
// Add the probe offset (i + i * i) left shifted to avoid right shifting
|
||||
// the hash in a separate instruction. The value hash + i + i * i is right
|
||||
// shifted in the following and instruction.
|
||||
ASSERT(NameDictionary::GetProbeOffset(i) <
|
||||
DCHECK(NameDictionary::GetProbeOffset(i) <
|
||||
1 << (32 - Name::kHashFieldOffset));
|
||||
__ Add(index, hash,
|
||||
NameDictionary::GetProbeOffset(i) << Name::kHashShift);
|
||||
@ -4895,7 +4895,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
|
||||
__ And(index, mask, Operand(index, LSR, Name::kHashShift));
|
||||
|
||||
// Scale the index by multiplying by the entry size.
|
||||
ASSERT(NameDictionary::kEntrySize == 3);
|
||||
DCHECK(NameDictionary::kEntrySize == 3);
|
||||
__ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
|
||||
|
||||
__ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2));
|
||||
@ -5331,7 +5331,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
|
||||
FrameScope frame_scope(masm, StackFrame::MANUAL);
|
||||
__ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
|
||||
|
||||
ASSERT(!AreAliased(x0, api_function_address));
|
||||
DCHECK(!AreAliased(x0, api_function_address));
|
||||
// x0 = FunctionCallbackInfo&
|
||||
// Arguments is after the return address.
|
||||
__ Add(x0, masm->StackPointer(), 1 * kPointerSize);
|
||||
|
@ -120,17 +120,17 @@ class RecordWriteStub: public PlatformCodeStub {
|
||||
Instruction* instr2 = instr1->following();
|
||||
|
||||
if (instr1->IsUncondBranchImm()) {
|
||||
ASSERT(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code()));
|
||||
DCHECK(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code()));
|
||||
return INCREMENTAL;
|
||||
}
|
||||
|
||||
ASSERT(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code()));
|
||||
DCHECK(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code()));
|
||||
|
||||
if (instr2->IsUncondBranchImm()) {
|
||||
return INCREMENTAL_COMPACTION;
|
||||
}
|
||||
|
||||
ASSERT(instr2->IsPCRelAddressing());
|
||||
DCHECK(instr2->IsPCRelAddressing());
|
||||
|
||||
return STORE_BUFFER_ONLY;
|
||||
}
|
||||
@ -149,31 +149,31 @@ class RecordWriteStub: public PlatformCodeStub {
|
||||
Instruction* instr1 = patcher.InstructionAt(0);
|
||||
Instruction* instr2 = patcher.InstructionAt(kInstructionSize);
|
||||
// Instructions must be either 'adr' or 'b'.
|
||||
ASSERT(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
|
||||
ASSERT(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
|
||||
DCHECK(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
|
||||
DCHECK(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
|
||||
// Retrieve the offsets to the labels.
|
||||
int32_t offset_to_incremental_noncompacting = instr1->ImmPCOffset();
|
||||
int32_t offset_to_incremental_compacting = instr2->ImmPCOffset();
|
||||
|
||||
switch (mode) {
|
||||
case STORE_BUFFER_ONLY:
|
||||
ASSERT(GetMode(stub) == INCREMENTAL ||
|
||||
DCHECK(GetMode(stub) == INCREMENTAL ||
|
||||
GetMode(stub) == INCREMENTAL_COMPACTION);
|
||||
patcher.adr(xzr, offset_to_incremental_noncompacting);
|
||||
patcher.adr(xzr, offset_to_incremental_compacting);
|
||||
break;
|
||||
case INCREMENTAL:
|
||||
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
|
||||
DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
|
||||
patcher.b(offset_to_incremental_noncompacting >> kInstructionSizeLog2);
|
||||
patcher.adr(xzr, offset_to_incremental_compacting);
|
||||
break;
|
||||
case INCREMENTAL_COMPACTION:
|
||||
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
|
||||
DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
|
||||
patcher.adr(xzr, offset_to_incremental_noncompacting);
|
||||
patcher.b(offset_to_incremental_compacting >> kInstructionSizeLog2);
|
||||
break;
|
||||
}
|
||||
ASSERT(GetMode(stub) == mode);
|
||||
DCHECK(GetMode(stub) == mode);
|
||||
}
|
||||
|
||||
private:
|
||||
@ -189,7 +189,7 @@ class RecordWriteStub: public PlatformCodeStub {
|
||||
scratch0_(scratch),
|
||||
saved_regs_(kCallerSaved),
|
||||
saved_fp_regs_(kCallerSavedFP) {
|
||||
ASSERT(!AreAliased(scratch, object, address));
|
||||
DCHECK(!AreAliased(scratch, object, address));
|
||||
|
||||
// The SaveCallerSaveRegisters method needs to save caller-saved
|
||||
// registers, but we don't bother saving MacroAssembler scratch registers.
|
||||
@ -313,9 +313,9 @@ class RecordWriteStub: public PlatformCodeStub {
|
||||
Register address,
|
||||
RememberedSetAction action,
|
||||
SaveFPRegsMode fp_mode) {
|
||||
ASSERT(object.Is64Bits());
|
||||
ASSERT(value.Is64Bits());
|
||||
ASSERT(address.Is64Bits());
|
||||
DCHECK(object.Is64Bits());
|
||||
DCHECK(value.Is64Bits());
|
||||
DCHECK(address.Is64Bits());
|
||||
return ObjectBits::encode(object.code()) |
|
||||
ValueBits::encode(value.code()) |
|
||||
AddressBits::encode(address.code()) |
|
||||
|
@ -62,7 +62,7 @@ UnaryMathFunction CreateExpFunction() {
|
||||
|
||||
CodeDesc desc;
|
||||
masm.GetCode(&desc);
|
||||
ASSERT(!RelocInfo::RequiresRelocation(desc));
|
||||
DCHECK(!RelocInfo::RequiresRelocation(desc));
|
||||
|
||||
CpuFeatures::FlushICache(buffer, actual_size);
|
||||
base::OS::ProtectCode(buffer, actual_size);
|
||||
@ -86,14 +86,14 @@ UnaryMathFunction CreateSqrtFunction() {
|
||||
|
||||
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
|
||||
masm->EnterFrame(StackFrame::INTERNAL);
|
||||
ASSERT(!masm->has_frame());
|
||||
DCHECK(!masm->has_frame());
|
||||
masm->set_has_frame(true);
|
||||
}
|
||||
|
||||
|
||||
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
|
||||
masm->LeaveFrame(StackFrame::INTERNAL);
|
||||
ASSERT(masm->has_frame());
|
||||
DCHECK(masm->has_frame());
|
||||
masm->set_has_frame(false);
|
||||
}
|
||||
|
||||
@ -111,10 +111,10 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
|
||||
Label* allocation_memento_found) {
|
||||
ASM_LOCATION(
|
||||
"ElementsTransitionGenerator::GenerateMapChangeElementsTransition");
|
||||
ASSERT(!AreAliased(receiver, key, value, target_map));
|
||||
DCHECK(!AreAliased(receiver, key, value, target_map));
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
ASSERT(allocation_memento_found != NULL);
|
||||
DCHECK(allocation_memento_found != NULL);
|
||||
__ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11,
|
||||
allocation_memento_found);
|
||||
}
|
||||
@ -150,7 +150,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
|
||||
Register scratch = x6;
|
||||
|
||||
// Verify input registers don't conflict with locals.
|
||||
ASSERT(!AreAliased(receiver, key, value, target_map,
|
||||
DCHECK(!AreAliased(receiver, key, value, target_map,
|
||||
elements, length, array_size, array));
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
@ -253,7 +253,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
Register length = x5;
|
||||
|
||||
// Verify input registers don't conflict with locals.
|
||||
ASSERT(!AreAliased(receiver, key, value, target_map,
|
||||
DCHECK(!AreAliased(receiver, key, value, target_map,
|
||||
elements, array_size, array, length));
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
@ -356,7 +356,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
|
||||
|
||||
CodeAgingHelper::CodeAgingHelper() {
|
||||
ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength);
|
||||
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
|
||||
// The sequence of instructions that is patched out for aging code is the
|
||||
// following boilerplate stack-building prologue that is found both in
|
||||
// FUNCTION and OPTIMIZED_FUNCTION code:
|
||||
@ -368,7 +368,7 @@ CodeAgingHelper::CodeAgingHelper() {
|
||||
|
||||
#ifdef DEBUG
|
||||
const int length = kCodeAgeStubEntryOffset / kInstructionSize;
|
||||
ASSERT(old_sequence_.length() >= kCodeAgeStubEntryOffset);
|
||||
DCHECK(old_sequence_.length() >= kCodeAgeStubEntryOffset);
|
||||
PatchingAssembler patcher_old(old_sequence_.start(), length);
|
||||
MacroAssembler::EmitCodeAgeSequence(&patcher_old, NULL);
|
||||
#endif
|
||||
@ -420,7 +420,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
|
||||
Register index,
|
||||
Register result,
|
||||
Label* call_runtime) {
|
||||
ASSERT(string.Is64Bits() && index.Is32Bits() && result.Is64Bits());
|
||||
DCHECK(string.Is64Bits() && index.Is32Bits() && result.Is64Bits());
|
||||
// Fetch the instance type of the receiver into result register.
|
||||
__ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
|
||||
__ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
|
||||
@ -516,10 +516,10 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
|
||||
// instead of fmul and fsub. Doing this changes the result, but since this is
|
||||
// an estimation anyway, does it matter?
|
||||
|
||||
ASSERT(!AreAliased(input, result,
|
||||
DCHECK(!AreAliased(input, result,
|
||||
double_temp1, double_temp2,
|
||||
temp1, temp2, temp3));
|
||||
ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
|
||||
DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
|
||||
|
||||
Label done;
|
||||
DoubleRegister double_temp3 = result;
|
||||
@ -539,7 +539,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
|
||||
Label result_is_finite_non_zero;
|
||||
// Assert that we can load offset 0 (the small input threshold) and offset 1
|
||||
// (the large input threshold) with a single ldp.
|
||||
ASSERT(kDRegSize == (ExpConstant(constants, 1).offset() -
|
||||
DCHECK(kDRegSize == (ExpConstant(constants, 1).offset() -
|
||||
ExpConstant(constants, 0).offset()));
|
||||
__ Ldp(double_temp1, double_temp2, ExpConstant(constants, 0));
|
||||
|
||||
@ -569,7 +569,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
|
||||
__ Bind(&result_is_finite_non_zero);
|
||||
|
||||
// Assert that we can load offset 3 and offset 4 with a single ldp.
|
||||
ASSERT(kDRegSize == (ExpConstant(constants, 4).offset() -
|
||||
DCHECK(kDRegSize == (ExpConstant(constants, 4).offset() -
|
||||
ExpConstant(constants, 3).offset()));
|
||||
__ Ldp(double_temp1, double_temp3, ExpConstant(constants, 3));
|
||||
__ Fmadd(double_temp1, double_temp1, input, double_temp3);
|
||||
@ -577,7 +577,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
|
||||
__ Fsub(double_temp1, double_temp1, double_temp3);
|
||||
|
||||
// Assert that we can load offset 5 and offset 6 with a single ldp.
|
||||
ASSERT(kDRegSize == (ExpConstant(constants, 6).offset() -
|
||||
DCHECK(kDRegSize == (ExpConstant(constants, 6).offset() -
|
||||
ExpConstant(constants, 5).offset()));
|
||||
__ Ldp(double_temp2, double_temp3, ExpConstant(constants, 5));
|
||||
// TODO(jbramley): Consider using Fnmsub here.
|
||||
|
@ -262,7 +262,7 @@ enum Condition {
|
||||
inline Condition NegateCondition(Condition cond) {
|
||||
// Conditions al and nv behave identically, as "always true". They can't be
|
||||
// inverted, because there is no never condition.
|
||||
ASSERT((cond != al) && (cond != nv));
|
||||
DCHECK((cond != al) && (cond != nv));
|
||||
return static_cast<Condition>(cond ^ 1);
|
||||
}
|
||||
|
||||
@ -400,7 +400,7 @@ enum SystemRegister {
|
||||
//
|
||||
// The enumerations can be used like this:
|
||||
//
|
||||
// ASSERT(instr->Mask(PCRelAddressingFMask) == PCRelAddressingFixed);
|
||||
// DCHECK(instr->Mask(PCRelAddressingFMask) == PCRelAddressingFixed);
|
||||
// switch(instr->Mask(PCRelAddressingMask)) {
|
||||
// case ADR: Format("adr 'Xd, 'AddrPCRelByte"); break;
|
||||
// case ADRP: Format("adrp 'Xd, 'AddrPCRelPage"); break;
|
||||
|
@ -59,8 +59,8 @@ void CpuFeatures::FlushICache(void* address, size_t length) {
|
||||
uintptr_t dsize = sizes.dcache_line_size();
|
||||
uintptr_t isize = sizes.icache_line_size();
|
||||
// Cache line sizes are always a power of 2.
|
||||
ASSERT(CountSetBits(dsize, 64) == 1);
|
||||
ASSERT(CountSetBits(isize, 64) == 1);
|
||||
DCHECK(CountSetBits(dsize, 64) == 1);
|
||||
DCHECK(CountSetBits(isize, 64) == 1);
|
||||
uintptr_t dstart = start & ~(dsize - 1);
|
||||
uintptr_t istart = start & ~(isize - 1);
|
||||
uintptr_t end = start + length;
|
||||
|
@ -67,13 +67,13 @@ void BreakLocationIterator::ClearDebugBreakAtReturn() {
|
||||
|
||||
|
||||
bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
|
||||
ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
|
||||
DCHECK(RelocInfo::IsJSReturn(rinfo->rmode()));
|
||||
return rinfo->IsPatchedReturnSequence();
|
||||
}
|
||||
|
||||
|
||||
bool BreakLocationIterator::IsDebugBreakAtSlot() {
|
||||
ASSERT(IsDebugBreakSlot());
|
||||
DCHECK(IsDebugBreakSlot());
|
||||
// Check whether the debug break slot instructions have been patched.
|
||||
return rinfo()->IsPatchedDebugBreakSlotSequence();
|
||||
}
|
||||
@ -118,7 +118,7 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
|
||||
|
||||
|
||||
void BreakLocationIterator::ClearDebugBreakAtSlot() {
|
||||
ASSERT(IsDebugBreakSlot());
|
||||
DCHECK(IsDebugBreakSlot());
|
||||
rinfo()->PatchCode(original_rinfo()->pc(),
|
||||
Assembler::kDebugBreakSlotInstructions);
|
||||
}
|
||||
@ -150,12 +150,12 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
|
||||
// collector doesn't try to interpret them as pointers.
|
||||
//
|
||||
// TODO(jbramley): Why can't this handle callee-saved registers?
|
||||
ASSERT((~kCallerSaved.list() & object_regs) == 0);
|
||||
ASSERT((~kCallerSaved.list() & non_object_regs) == 0);
|
||||
ASSERT((object_regs & non_object_regs) == 0);
|
||||
ASSERT((scratch.Bit() & object_regs) == 0);
|
||||
ASSERT((scratch.Bit() & non_object_regs) == 0);
|
||||
ASSERT((masm->TmpList()->list() & (object_regs | non_object_regs)) == 0);
|
||||
DCHECK((~kCallerSaved.list() & object_regs) == 0);
|
||||
DCHECK((~kCallerSaved.list() & non_object_regs) == 0);
|
||||
DCHECK((object_regs & non_object_regs) == 0);
|
||||
DCHECK((scratch.Bit() & object_regs) == 0);
|
||||
DCHECK((scratch.Bit() & non_object_regs) == 0);
|
||||
DCHECK((masm->TmpList()->list() & (object_regs | non_object_regs)) == 0);
|
||||
STATIC_ASSERT(kSmiValueSize == 32);
|
||||
|
||||
CPURegList non_object_list =
|
||||
|
@ -96,17 +96,17 @@ void Decoder<V>::Decode(Instruction *instr) {
|
||||
|
||||
template<typename V>
|
||||
void Decoder<V>::DecodePCRelAddressing(Instruction* instr) {
|
||||
ASSERT(instr->Bits(27, 24) == 0x0);
|
||||
DCHECK(instr->Bits(27, 24) == 0x0);
|
||||
// We know bit 28 is set, as <b28:b27> = 0 is filtered out at the top level
|
||||
// decode.
|
||||
ASSERT(instr->Bit(28) == 0x1);
|
||||
DCHECK(instr->Bit(28) == 0x1);
|
||||
V::VisitPCRelAddressing(instr);
|
||||
}
|
||||
|
||||
|
||||
template<typename V>
|
||||
void Decoder<V>::DecodeBranchSystemException(Instruction* instr) {
|
||||
ASSERT((instr->Bits(27, 24) == 0x4) ||
|
||||
DCHECK((instr->Bits(27, 24) == 0x4) ||
|
||||
(instr->Bits(27, 24) == 0x5) ||
|
||||
(instr->Bits(27, 24) == 0x6) ||
|
||||
(instr->Bits(27, 24) == 0x7) );
|
||||
@ -208,7 +208,7 @@ void Decoder<V>::DecodeBranchSystemException(Instruction* instr) {
|
||||
|
||||
template<typename V>
|
||||
void Decoder<V>::DecodeLoadStore(Instruction* instr) {
|
||||
ASSERT((instr->Bits(27, 24) == 0x8) ||
|
||||
DCHECK((instr->Bits(27, 24) == 0x8) ||
|
||||
(instr->Bits(27, 24) == 0x9) ||
|
||||
(instr->Bits(27, 24) == 0xC) ||
|
||||
(instr->Bits(27, 24) == 0xD) );
|
||||
@ -328,7 +328,7 @@ void Decoder<V>::DecodeLoadStore(Instruction* instr) {
|
||||
|
||||
template<typename V>
|
||||
void Decoder<V>::DecodeLogical(Instruction* instr) {
|
||||
ASSERT(instr->Bits(27, 24) == 0x2);
|
||||
DCHECK(instr->Bits(27, 24) == 0x2);
|
||||
|
||||
if (instr->Mask(0x80400000) == 0x00400000) {
|
||||
V::VisitUnallocated(instr);
|
||||
@ -348,7 +348,7 @@ void Decoder<V>::DecodeLogical(Instruction* instr) {
|
||||
|
||||
template<typename V>
|
||||
void Decoder<V>::DecodeBitfieldExtract(Instruction* instr) {
|
||||
ASSERT(instr->Bits(27, 24) == 0x3);
|
||||
DCHECK(instr->Bits(27, 24) == 0x3);
|
||||
|
||||
if ((instr->Mask(0x80400000) == 0x80000000) ||
|
||||
(instr->Mask(0x80400000) == 0x00400000) ||
|
||||
@ -374,7 +374,7 @@ void Decoder<V>::DecodeBitfieldExtract(Instruction* instr) {
|
||||
|
||||
template<typename V>
|
||||
void Decoder<V>::DecodeAddSubImmediate(Instruction* instr) {
|
||||
ASSERT(instr->Bits(27, 24) == 0x1);
|
||||
DCHECK(instr->Bits(27, 24) == 0x1);
|
||||
if (instr->Bit(23) == 1) {
|
||||
V::VisitUnallocated(instr);
|
||||
} else {
|
||||
@ -385,7 +385,7 @@ void Decoder<V>::DecodeAddSubImmediate(Instruction* instr) {
|
||||
|
||||
template<typename V>
|
||||
void Decoder<V>::DecodeDataProcessing(Instruction* instr) {
|
||||
ASSERT((instr->Bits(27, 24) == 0xA) ||
|
||||
DCHECK((instr->Bits(27, 24) == 0xA) ||
|
||||
(instr->Bits(27, 24) == 0xB) );
|
||||
|
||||
if (instr->Bit(24) == 0) {
|
||||
@ -501,7 +501,7 @@ void Decoder<V>::DecodeDataProcessing(Instruction* instr) {
|
||||
|
||||
template<typename V>
|
||||
void Decoder<V>::DecodeFP(Instruction* instr) {
|
||||
ASSERT((instr->Bits(27, 24) == 0xE) ||
|
||||
DCHECK((instr->Bits(27, 24) == 0xE) ||
|
||||
(instr->Bits(27, 24) == 0xF) );
|
||||
|
||||
if (instr->Bit(28) == 0) {
|
||||
@ -614,7 +614,7 @@ void Decoder<V>::DecodeFP(Instruction* instr) {
|
||||
}
|
||||
} else {
|
||||
// Bit 30 == 1 has been handled earlier.
|
||||
ASSERT(instr->Bit(30) == 0);
|
||||
DCHECK(instr->Bit(30) == 0);
|
||||
if (instr->Mask(0xA0800000) != 0) {
|
||||
V::VisitUnallocated(instr);
|
||||
} else {
|
||||
@ -630,7 +630,7 @@ void Decoder<V>::DecodeFP(Instruction* instr) {
|
||||
template<typename V>
|
||||
void Decoder<V>::DecodeAdvSIMDLoadStore(Instruction* instr) {
|
||||
// TODO(all): Implement Advanced SIMD load/store instruction decode.
|
||||
ASSERT(instr->Bits(29, 25) == 0x6);
|
||||
DCHECK(instr->Bits(29, 25) == 0x6);
|
||||
V::VisitUnimplemented(instr);
|
||||
}
|
||||
|
||||
@ -638,7 +638,7 @@ void Decoder<V>::DecodeAdvSIMDLoadStore(Instruction* instr) {
|
||||
template<typename V>
|
||||
void Decoder<V>::DecodeAdvSIMDDataProcessing(Instruction* instr) {
|
||||
// TODO(all): Implement Advanced SIMD data processing instruction decode.
|
||||
ASSERT(instr->Bits(27, 25) == 0x7);
|
||||
DCHECK(instr->Bits(27, 25) == 0x7);
|
||||
V::VisitUnimplemented(instr);
|
||||
}
|
||||
|
||||
|
@ -39,7 +39,7 @@ void DispatchingDecoderVisitor::InsertVisitorBefore(
|
||||
}
|
||||
// We reached the end of the list. The last element must be
|
||||
// registered_visitor.
|
||||
ASSERT(*it == registered_visitor);
|
||||
DCHECK(*it == registered_visitor);
|
||||
visitors_.insert(it, new_visitor);
|
||||
}
|
||||
|
||||
@ -57,7 +57,7 @@ void DispatchingDecoderVisitor::InsertVisitorAfter(
|
||||
}
|
||||
// We reached the end of the list. The last element must be
|
||||
// registered_visitor.
|
||||
ASSERT(*it == registered_visitor);
|
||||
DCHECK(*it == registered_visitor);
|
||||
visitors_.push_back(new_visitor);
|
||||
}
|
||||
|
||||
@ -70,7 +70,7 @@ void DispatchingDecoderVisitor::RemoveVisitor(DecoderVisitor* visitor) {
|
||||
#define DEFINE_VISITOR_CALLERS(A) \
|
||||
void DispatchingDecoderVisitor::Visit##A(Instruction* instr) { \
|
||||
if (!(instr->Mask(A##FMask) == A##Fixed)) { \
|
||||
ASSERT(instr->Mask(A##FMask) == A##Fixed); \
|
||||
DCHECK(instr->Mask(A##FMask) == A##Fixed); \
|
||||
} \
|
||||
std::list<DecoderVisitor*>::iterator it; \
|
||||
for (it = visitors_.begin(); it != visitors_.end(); it++) { \
|
||||
|
@ -15,7 +15,7 @@ namespace internal {
|
||||
|
||||
void DelayedMasm::EndDelayedUse() {
|
||||
EmitPending();
|
||||
ASSERT(!scratch_register_acquired_);
|
||||
DCHECK(!scratch_register_acquired_);
|
||||
ResetSavedValue();
|
||||
}
|
||||
|
||||
@ -24,7 +24,7 @@ void DelayedMasm::Mov(const Register& rd,
|
||||
const Operand& operand,
|
||||
DiscardMoveMode discard_mode) {
|
||||
EmitPending();
|
||||
ASSERT(!IsScratchRegister(rd) || scratch_register_acquired_);
|
||||
DCHECK(!IsScratchRegister(rd) || scratch_register_acquired_);
|
||||
__ Mov(rd, operand, discard_mode);
|
||||
}
|
||||
|
||||
@ -43,7 +43,7 @@ void DelayedMasm::Fmov(FPRegister fd, double imm) {
|
||||
|
||||
void DelayedMasm::LoadObject(Register result, Handle<Object> object) {
|
||||
EmitPending();
|
||||
ASSERT(!IsScratchRegister(result) || scratch_register_acquired_);
|
||||
DCHECK(!IsScratchRegister(result) || scratch_register_acquired_);
|
||||
__ LoadObject(result, object);
|
||||
}
|
||||
|
||||
|
@ -16,12 +16,12 @@ namespace internal {
|
||||
|
||||
|
||||
void DelayedMasm::StackSlotMove(LOperand* src, LOperand* dst) {
|
||||
ASSERT(src->IsStackSlot());
|
||||
ASSERT(dst->IsStackSlot());
|
||||
DCHECK(src->IsStackSlot());
|
||||
DCHECK(dst->IsStackSlot());
|
||||
MemOperand src_operand = cgen_->ToMemOperand(src);
|
||||
MemOperand dst_operand = cgen_->ToMemOperand(dst);
|
||||
if (pending_ == kStackSlotMove) {
|
||||
ASSERT(pending_pc_ == masm_->pc_offset());
|
||||
DCHECK(pending_pc_ == masm_->pc_offset());
|
||||
UseScratchRegisterScope scope(masm_);
|
||||
DoubleRegister temp1 = scope.AcquireD();
|
||||
DoubleRegister temp2 = scope.AcquireD();
|
||||
@ -66,7 +66,7 @@ void DelayedMasm::StackSlotMove(LOperand* src, LOperand* dst) {
|
||||
|
||||
|
||||
void DelayedMasm::StoreConstant(uint64_t value, const MemOperand& operand) {
|
||||
ASSERT(!scratch_register_acquired_);
|
||||
DCHECK(!scratch_register_acquired_);
|
||||
if ((pending_ == kStoreConstant) && (value == pending_value_)) {
|
||||
MemOperand::PairResult result =
|
||||
MemOperand::AreConsistentForPair(pending_address_dst_, operand);
|
||||
@ -75,7 +75,7 @@ void DelayedMasm::StoreConstant(uint64_t value, const MemOperand& operand) {
|
||||
(result == MemOperand::kPairAB) ?
|
||||
pending_address_dst_ :
|
||||
operand;
|
||||
ASSERT(pending_pc_ == masm_->pc_offset());
|
||||
DCHECK(pending_pc_ == masm_->pc_offset());
|
||||
if (pending_value_ == 0) {
|
||||
__ Stp(xzr, xzr, dst);
|
||||
} else {
|
||||
@ -104,18 +104,18 @@ void DelayedMasm::Load(const CPURegister& rd, const MemOperand& operand) {
|
||||
case MemOperand::kNotPair:
|
||||
break;
|
||||
case MemOperand::kPairAB:
|
||||
ASSERT(pending_pc_ == masm_->pc_offset());
|
||||
ASSERT(!IsScratchRegister(pending_register_) ||
|
||||
DCHECK(pending_pc_ == masm_->pc_offset());
|
||||
DCHECK(!IsScratchRegister(pending_register_) ||
|
||||
scratch_register_acquired_);
|
||||
ASSERT(!IsScratchRegister(rd) || scratch_register_acquired_);
|
||||
DCHECK(!IsScratchRegister(rd) || scratch_register_acquired_);
|
||||
__ Ldp(pending_register_, rd, pending_address_src_);
|
||||
ResetPending();
|
||||
return;
|
||||
case MemOperand::kPairBA:
|
||||
ASSERT(pending_pc_ == masm_->pc_offset());
|
||||
ASSERT(!IsScratchRegister(pending_register_) ||
|
||||
DCHECK(pending_pc_ == masm_->pc_offset());
|
||||
DCHECK(!IsScratchRegister(pending_register_) ||
|
||||
scratch_register_acquired_);
|
||||
ASSERT(!IsScratchRegister(rd) || scratch_register_acquired_);
|
||||
DCHECK(!IsScratchRegister(rd) || scratch_register_acquired_);
|
||||
__ Ldp(rd, pending_register_, operand);
|
||||
ResetPending();
|
||||
return;
|
||||
@ -139,12 +139,12 @@ void DelayedMasm::Store(const CPURegister& rd, const MemOperand& operand) {
|
||||
case MemOperand::kNotPair:
|
||||
break;
|
||||
case MemOperand::kPairAB:
|
||||
ASSERT(pending_pc_ == masm_->pc_offset());
|
||||
DCHECK(pending_pc_ == masm_->pc_offset());
|
||||
__ Stp(pending_register_, rd, pending_address_dst_);
|
||||
ResetPending();
|
||||
return;
|
||||
case MemOperand::kPairBA:
|
||||
ASSERT(pending_pc_ == masm_->pc_offset());
|
||||
DCHECK(pending_pc_ == masm_->pc_offset());
|
||||
__ Stp(rd, pending_register_, operand);
|
||||
ResetPending();
|
||||
return;
|
||||
@ -162,7 +162,7 @@ void DelayedMasm::Store(const CPURegister& rd, const MemOperand& operand) {
|
||||
|
||||
|
||||
void DelayedMasm::EmitPending() {
|
||||
ASSERT((pending_ == kNone) || (pending_pc_ == masm_->pc_offset()));
|
||||
DCHECK((pending_ == kNone) || (pending_pc_ == masm_->pc_offset()));
|
||||
switch (pending_) {
|
||||
case kNone:
|
||||
return;
|
||||
@ -175,7 +175,7 @@ void DelayedMasm::EmitPending() {
|
||||
}
|
||||
break;
|
||||
case kLoad:
|
||||
ASSERT(!IsScratchRegister(pending_register_) ||
|
||||
DCHECK(!IsScratchRegister(pending_register_) ||
|
||||
scratch_register_acquired_);
|
||||
__ Ldr(pending_register_, pending_address_src_);
|
||||
break;
|
||||
|
@ -33,9 +33,9 @@ class DelayedMasm BASE_EMBEDDED {
|
||||
#endif
|
||||
}
|
||||
~DelayedMasm() {
|
||||
ASSERT(!scratch_register_acquired_);
|
||||
ASSERT(!scratch_register_used_);
|
||||
ASSERT(!pending());
|
||||
DCHECK(!scratch_register_acquired_);
|
||||
DCHECK(!scratch_register_used_);
|
||||
DCHECK(!pending());
|
||||
}
|
||||
inline void EndDelayedUse();
|
||||
|
||||
@ -53,13 +53,13 @@ class DelayedMasm BASE_EMBEDDED {
|
||||
EmitPending();
|
||||
ResetSavedValue();
|
||||
#ifdef DEBUG
|
||||
ASSERT(!scratch_register_acquired_);
|
||||
DCHECK(!scratch_register_acquired_);
|
||||
scratch_register_acquired_ = true;
|
||||
#endif
|
||||
}
|
||||
void ReleaseScratchRegister() {
|
||||
#ifdef DEBUG
|
||||
ASSERT(scratch_register_acquired_);
|
||||
DCHECK(scratch_register_acquired_);
|
||||
scratch_register_acquired_ = false;
|
||||
#endif
|
||||
}
|
||||
@ -100,7 +100,7 @@ class DelayedMasm BASE_EMBEDDED {
|
||||
private:
|
||||
// Set the saved value and load the ScratchRegister with it.
|
||||
void SetSavedValue(uint64_t saved_value) {
|
||||
ASSERT(saved_value != 0);
|
||||
DCHECK(saved_value != 0);
|
||||
if (saved_value_ != saved_value) {
|
||||
masm_->Mov(ScratchRegister(), saved_value);
|
||||
saved_value_ = saved_value;
|
||||
|
@ -49,9 +49,9 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
|
||||
patcher.blr(ip0);
|
||||
patcher.dc64(reinterpret_cast<intptr_t>(deopt_entry));
|
||||
|
||||
ASSERT((prev_call_address == NULL) ||
|
||||
DCHECK((prev_call_address == NULL) ||
|
||||
(call_address >= prev_call_address + patch_size()));
|
||||
ASSERT(call_address + patch_size() <= code->instruction_end());
|
||||
DCHECK(call_address + patch_size() <= code->instruction_end());
|
||||
#ifdef DEBUG
|
||||
prev_call_address = call_address;
|
||||
#endif
|
||||
@ -250,7 +250,7 @@ void Deoptimizer::EntryGenerator::Generate() {
|
||||
__ B(lt, &outer_push_loop);
|
||||
|
||||
__ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset()));
|
||||
ASSERT(!saved_fp_registers.IncludesAliasOf(crankshaft_fp_scratch) &&
|
||||
DCHECK(!saved_fp_registers.IncludesAliasOf(crankshaft_fp_scratch) &&
|
||||
!saved_fp_registers.IncludesAliasOf(fp_zero) &&
|
||||
!saved_fp_registers.IncludesAliasOf(fp_scratch));
|
||||
int src_offset = FrameDescription::double_registers_offset();
|
||||
@ -277,7 +277,7 @@ void Deoptimizer::EntryGenerator::Generate() {
|
||||
// Note that lr is not in the list of saved_registers and will be restored
|
||||
// later. We can use it to hold the address of last output frame while
|
||||
// reloading the other registers.
|
||||
ASSERT(!saved_registers.IncludesAliasOf(lr));
|
||||
DCHECK(!saved_registers.IncludesAliasOf(lr));
|
||||
Register last_output_frame = lr;
|
||||
__ Mov(last_output_frame, current_frame);
|
||||
|
||||
@ -320,14 +320,14 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
|
||||
// The number of entry will never exceed kMaxNumberOfEntries.
|
||||
// As long as kMaxNumberOfEntries is a valid 16 bits immediate you can use
|
||||
// a movz instruction to load the entry id.
|
||||
ASSERT(is_uint16(Deoptimizer::kMaxNumberOfEntries));
|
||||
DCHECK(is_uint16(Deoptimizer::kMaxNumberOfEntries));
|
||||
|
||||
for (int i = 0; i < count(); i++) {
|
||||
int start = masm()->pc_offset();
|
||||
USE(start);
|
||||
__ movz(entry_id, i);
|
||||
__ b(&done);
|
||||
ASSERT(masm()->pc_offset() - start == table_entry_size_);
|
||||
DCHECK(masm()->pc_offset() - start == table_entry_size_);
|
||||
}
|
||||
}
|
||||
__ Bind(&done);
|
||||
|
@ -258,7 +258,7 @@ void Disassembler::VisitLogicalImmediate(Instruction* instr) {
|
||||
|
||||
|
||||
bool Disassembler::IsMovzMovnImm(unsigned reg_size, uint64_t value) {
|
||||
ASSERT((reg_size == kXRegSizeInBits) ||
|
||||
DCHECK((reg_size == kXRegSizeInBits) ||
|
||||
((reg_size == kWRegSizeInBits) && (value <= 0xffffffff)));
|
||||
|
||||
// Test for movz: 16-bits set at positions 0, 16, 32 or 48.
|
||||
@ -1176,7 +1176,7 @@ void Disassembler::VisitSystem(Instruction* instr) {
|
||||
}
|
||||
}
|
||||
} else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
|
||||
ASSERT(instr->Mask(SystemHintMask) == HINT);
|
||||
DCHECK(instr->Mask(SystemHintMask) == HINT);
|
||||
switch (instr->ImmHint()) {
|
||||
case NOP: {
|
||||
mnemonic = "nop";
|
||||
@ -1246,7 +1246,7 @@ void Disassembler::Format(Instruction* instr, const char* mnemonic,
|
||||
const char* format) {
|
||||
// TODO(mcapewel) don't think I can use the instr address here - there needs
|
||||
// to be a base address too
|
||||
ASSERT(mnemonic != NULL);
|
||||
DCHECK(mnemonic != NULL);
|
||||
ResetOutput();
|
||||
Substitute(instr, mnemonic);
|
||||
if (format != NULL) {
|
||||
@ -1364,7 +1364,7 @@ int Disassembler::SubstituteRegisterField(Instruction* instr,
|
||||
|
||||
int Disassembler::SubstituteImmediateField(Instruction* instr,
|
||||
const char* format) {
|
||||
ASSERT(format[0] == 'I');
|
||||
DCHECK(format[0] == 'I');
|
||||
|
||||
switch (format[1]) {
|
||||
case 'M': { // IMoveImm or IMoveLSL.
|
||||
@ -1372,7 +1372,7 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
|
||||
uint64_t imm = instr->ImmMoveWide() << (16 * instr->ShiftMoveWide());
|
||||
AppendToOutput("#0x%" PRIx64, imm);
|
||||
} else {
|
||||
ASSERT(format[5] == 'L');
|
||||
DCHECK(format[5] == 'L');
|
||||
AppendToOutput("#0x%" PRIx64, instr->ImmMoveWide());
|
||||
if (instr->ShiftMoveWide() > 0) {
|
||||
AppendToOutput(", lsl #%d", 16 * instr->ShiftMoveWide());
|
||||
@ -1417,7 +1417,7 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
|
||||
return 6;
|
||||
}
|
||||
case 'A': { // IAddSub.
|
||||
ASSERT(instr->ShiftAddSub() <= 1);
|
||||
DCHECK(instr->ShiftAddSub() <= 1);
|
||||
int64_t imm = instr->ImmAddSub() << (12 * instr->ShiftAddSub());
|
||||
AppendToOutput("#0x%" PRIx64 " (%" PRId64 ")", imm, imm);
|
||||
return 7;
|
||||
@ -1474,7 +1474,7 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
|
||||
|
||||
int Disassembler::SubstituteBitfieldImmediateField(Instruction* instr,
|
||||
const char* format) {
|
||||
ASSERT((format[0] == 'I') && (format[1] == 'B'));
|
||||
DCHECK((format[0] == 'I') && (format[1] == 'B'));
|
||||
unsigned r = instr->ImmR();
|
||||
unsigned s = instr->ImmS();
|
||||
|
||||
@ -1488,13 +1488,13 @@ int Disassembler::SubstituteBitfieldImmediateField(Instruction* instr,
|
||||
AppendToOutput("#%d", s + 1);
|
||||
return 5;
|
||||
} else {
|
||||
ASSERT(format[3] == '-');
|
||||
DCHECK(format[3] == '-');
|
||||
AppendToOutput("#%d", s - r + 1);
|
||||
return 7;
|
||||
}
|
||||
}
|
||||
case 'Z': { // IBZ-r.
|
||||
ASSERT((format[3] == '-') && (format[4] == 'r'));
|
||||
DCHECK((format[3] == '-') && (format[4] == 'r'));
|
||||
unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSizeInBits
|
||||
: kWRegSizeInBits;
|
||||
AppendToOutput("#%d", reg_size - r);
|
||||
@ -1510,7 +1510,7 @@ int Disassembler::SubstituteBitfieldImmediateField(Instruction* instr,
|
||||
|
||||
int Disassembler::SubstituteLiteralField(Instruction* instr,
|
||||
const char* format) {
|
||||
ASSERT(strncmp(format, "LValue", 6) == 0);
|
||||
DCHECK(strncmp(format, "LValue", 6) == 0);
|
||||
USE(format);
|
||||
|
||||
switch (instr->Mask(LoadLiteralMask)) {
|
||||
@ -1526,12 +1526,12 @@ int Disassembler::SubstituteLiteralField(Instruction* instr,
|
||||
|
||||
|
||||
int Disassembler::SubstituteShiftField(Instruction* instr, const char* format) {
|
||||
ASSERT(format[0] == 'H');
|
||||
ASSERT(instr->ShiftDP() <= 0x3);
|
||||
DCHECK(format[0] == 'H');
|
||||
DCHECK(instr->ShiftDP() <= 0x3);
|
||||
|
||||
switch (format[1]) {
|
||||
case 'D': { // HDP.
|
||||
ASSERT(instr->ShiftDP() != ROR);
|
||||
DCHECK(instr->ShiftDP() != ROR);
|
||||
} // Fall through.
|
||||
case 'L': { // HLo.
|
||||
if (instr->ImmDPShift() != 0) {
|
||||
@ -1550,7 +1550,7 @@ int Disassembler::SubstituteShiftField(Instruction* instr, const char* format) {
|
||||
|
||||
int Disassembler::SubstituteConditionField(Instruction* instr,
|
||||
const char* format) {
|
||||
ASSERT(format[0] == 'C');
|
||||
DCHECK(format[0] == 'C');
|
||||
const char* condition_code[] = { "eq", "ne", "hs", "lo",
|
||||
"mi", "pl", "vs", "vc",
|
||||
"hi", "ls", "ge", "lt",
|
||||
@ -1572,12 +1572,12 @@ int Disassembler::SubstituteConditionField(Instruction* instr,
|
||||
int Disassembler::SubstitutePCRelAddressField(Instruction* instr,
|
||||
const char* format) {
|
||||
USE(format);
|
||||
ASSERT(strncmp(format, "AddrPCRel", 9) == 0);
|
||||
DCHECK(strncmp(format, "AddrPCRel", 9) == 0);
|
||||
|
||||
int offset = instr->ImmPCRel();
|
||||
|
||||
// Only ADR (AddrPCRelByte) is supported.
|
||||
ASSERT(strcmp(format, "AddrPCRelByte") == 0);
|
||||
DCHECK(strcmp(format, "AddrPCRelByte") == 0);
|
||||
|
||||
char sign = '+';
|
||||
if (offset < 0) {
|
||||
@ -1592,7 +1592,7 @@ int Disassembler::SubstitutePCRelAddressField(Instruction* instr,
|
||||
|
||||
int Disassembler::SubstituteBranchTargetField(Instruction* instr,
|
||||
const char* format) {
|
||||
ASSERT(strncmp(format, "BImm", 4) == 0);
|
||||
DCHECK(strncmp(format, "BImm", 4) == 0);
|
||||
|
||||
int64_t offset = 0;
|
||||
switch (format[5]) {
|
||||
@ -1619,8 +1619,8 @@ int Disassembler::SubstituteBranchTargetField(Instruction* instr,
|
||||
|
||||
int Disassembler::SubstituteExtendField(Instruction* instr,
|
||||
const char* format) {
|
||||
ASSERT(strncmp(format, "Ext", 3) == 0);
|
||||
ASSERT(instr->ExtendMode() <= 7);
|
||||
DCHECK(strncmp(format, "Ext", 3) == 0);
|
||||
DCHECK(instr->ExtendMode() <= 7);
|
||||
USE(format);
|
||||
|
||||
const char* extend_mode[] = { "uxtb", "uxth", "uxtw", "uxtx",
|
||||
@ -1646,7 +1646,7 @@ int Disassembler::SubstituteExtendField(Instruction* instr,
|
||||
|
||||
int Disassembler::SubstituteLSRegOffsetField(Instruction* instr,
|
||||
const char* format) {
|
||||
ASSERT(strncmp(format, "Offsetreg", 9) == 0);
|
||||
DCHECK(strncmp(format, "Offsetreg", 9) == 0);
|
||||
const char* extend_mode[] = { "undefined", "undefined", "uxtw", "lsl",
|
||||
"undefined", "undefined", "sxtw", "sxtx" };
|
||||
USE(format);
|
||||
@ -1675,7 +1675,7 @@ int Disassembler::SubstituteLSRegOffsetField(Instruction* instr,
|
||||
|
||||
int Disassembler::SubstitutePrefetchField(Instruction* instr,
|
||||
const char* format) {
|
||||
ASSERT(format[0] == 'P');
|
||||
DCHECK(format[0] == 'P');
|
||||
USE(format);
|
||||
|
||||
int prefetch_mode = instr->PrefetchMode();
|
||||
@ -1690,7 +1690,7 @@ int Disassembler::SubstitutePrefetchField(Instruction* instr,
|
||||
|
||||
int Disassembler::SubstituteBarrierField(Instruction* instr,
|
||||
const char* format) {
|
||||
ASSERT(format[0] == 'M');
|
||||
DCHECK(format[0] == 'M');
|
||||
USE(format);
|
||||
|
||||
static const char* options[4][4] = {
|
||||
|
@ -34,18 +34,18 @@ class JumpPatchSite BASE_EMBEDDED {
|
||||
|
||||
~JumpPatchSite() {
|
||||
if (patch_site_.is_bound()) {
|
||||
ASSERT(info_emitted_);
|
||||
DCHECK(info_emitted_);
|
||||
} else {
|
||||
ASSERT(reg_.IsNone());
|
||||
DCHECK(reg_.IsNone());
|
||||
}
|
||||
}
|
||||
|
||||
void EmitJumpIfNotSmi(Register reg, Label* target) {
|
||||
// This code will be patched by PatchInlinedSmiCode, in ic-arm64.cc.
|
||||
InstructionAccurateScope scope(masm_, 1);
|
||||
ASSERT(!info_emitted_);
|
||||
ASSERT(reg.Is64Bits());
|
||||
ASSERT(!reg.Is(csp));
|
||||
DCHECK(!info_emitted_);
|
||||
DCHECK(reg.Is64Bits());
|
||||
DCHECK(!reg.Is(csp));
|
||||
reg_ = reg;
|
||||
__ bind(&patch_site_);
|
||||
__ tbz(xzr, 0, target); // Always taken before patched.
|
||||
@ -54,9 +54,9 @@ class JumpPatchSite BASE_EMBEDDED {
|
||||
void EmitJumpIfSmi(Register reg, Label* target) {
|
||||
// This code will be patched by PatchInlinedSmiCode, in ic-arm64.cc.
|
||||
InstructionAccurateScope scope(masm_, 1);
|
||||
ASSERT(!info_emitted_);
|
||||
ASSERT(reg.Is64Bits());
|
||||
ASSERT(!reg.Is(csp));
|
||||
DCHECK(!info_emitted_);
|
||||
DCHECK(reg.Is64Bits());
|
||||
DCHECK(!reg.Is(csp));
|
||||
reg_ = reg;
|
||||
__ bind(&patch_site_);
|
||||
__ tbnz(xzr, 0, target); // Never taken before patched.
|
||||
@ -154,12 +154,12 @@ void FullCodeGenerator::Generate() {
|
||||
{ Comment cmnt(masm_, "[ Allocate locals");
|
||||
int locals_count = info->scope()->num_stack_slots();
|
||||
// Generators allocate locals, if any, in context slots.
|
||||
ASSERT(!info->function()->is_generator() || locals_count == 0);
|
||||
DCHECK(!info->function()->is_generator() || locals_count == 0);
|
||||
|
||||
if (locals_count > 0) {
|
||||
if (locals_count >= 128) {
|
||||
Label ok;
|
||||
ASSERT(jssp.Is(__ StackPointer()));
|
||||
DCHECK(jssp.Is(__ StackPointer()));
|
||||
__ Sub(x10, jssp, locals_count * kPointerSize);
|
||||
__ CompareRoot(x10, Heap::kRealStackLimitRootIndex);
|
||||
__ B(hs, &ok);
|
||||
@ -291,9 +291,9 @@ void FullCodeGenerator::Generate() {
|
||||
{ Comment cmnt(masm_, "[ Declarations");
|
||||
if (scope()->is_function_scope() && scope()->function() != NULL) {
|
||||
VariableDeclaration* function = scope()->function();
|
||||
ASSERT(function->proxy()->var()->mode() == CONST ||
|
||||
DCHECK(function->proxy()->var()->mode() == CONST ||
|
||||
function->proxy()->var()->mode() == CONST_LEGACY);
|
||||
ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
|
||||
DCHECK(function->proxy()->var()->location() != Variable::UNALLOCATED);
|
||||
VisitVariableDeclaration(function);
|
||||
}
|
||||
VisitDeclarations(scope()->declarations());
|
||||
@ -303,7 +303,7 @@ void FullCodeGenerator::Generate() {
|
||||
{ Comment cmnt(masm_, "[ Stack check");
|
||||
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
|
||||
Label ok;
|
||||
ASSERT(jssp.Is(__ StackPointer()));
|
||||
DCHECK(jssp.Is(__ StackPointer()));
|
||||
__ CompareRoot(jssp, Heap::kStackLimitRootIndex);
|
||||
__ B(hs, &ok);
|
||||
PredictableCodeSizeScope predictable(masm_,
|
||||
@ -313,9 +313,9 @@ void FullCodeGenerator::Generate() {
|
||||
}
|
||||
|
||||
{ Comment cmnt(masm_, "[ Body");
|
||||
ASSERT(loop_depth() == 0);
|
||||
DCHECK(loop_depth() == 0);
|
||||
VisitStatements(function()->body());
|
||||
ASSERT(loop_depth() == 0);
|
||||
DCHECK(loop_depth() == 0);
|
||||
}
|
||||
|
||||
// Always emit a 'return undefined' in case control fell off the end of
|
||||
@ -359,13 +359,13 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
|
||||
|
||||
void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
|
||||
Label* back_edge_target) {
|
||||
ASSERT(jssp.Is(__ StackPointer()));
|
||||
DCHECK(jssp.Is(__ StackPointer()));
|
||||
Comment cmnt(masm_, "[ Back edge bookkeeping");
|
||||
// Block literal pools whilst emitting back edge code.
|
||||
Assembler::BlockPoolsScope block_const_pool(masm_);
|
||||
Label ok;
|
||||
|
||||
ASSERT(back_edge_target->is_bound());
|
||||
DCHECK(back_edge_target->is_bound());
|
||||
// We want to do a round rather than a floor of distance/kCodeSizeMultiplier
|
||||
// to reduce the absolute error due to the integer division. To do that,
|
||||
// we add kCodeSizeMultiplier/2 to the distance (equivalent to adding 0.5 to
|
||||
@ -407,7 +407,7 @@ void FullCodeGenerator::EmitReturnSequence() {
|
||||
// Runtime::TraceExit returns its parameter in x0.
|
||||
__ Push(result_register());
|
||||
__ CallRuntime(Runtime::kTraceExit, 1);
|
||||
ASSERT(x0.Is(result_register()));
|
||||
DCHECK(x0.Is(result_register()));
|
||||
}
|
||||
// Pretend that the exit is a backwards jump to the entry.
|
||||
int weight = 1;
|
||||
@ -441,7 +441,7 @@ void FullCodeGenerator::EmitReturnSequence() {
|
||||
// of the generated code must be consistent.
|
||||
const Register& current_sp = __ StackPointer();
|
||||
// Nothing ensures 16 bytes alignment here.
|
||||
ASSERT(!current_sp.Is(csp));
|
||||
DCHECK(!current_sp.Is(csp));
|
||||
__ mov(current_sp, fp);
|
||||
int no_frame_start = masm_->pc_offset();
|
||||
__ ldp(fp, lr, MemOperand(current_sp, 2 * kXRegSize, PostIndex));
|
||||
@ -460,25 +460,25 @@ void FullCodeGenerator::EmitReturnSequence() {
|
||||
|
||||
|
||||
void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
|
||||
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
|
||||
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
|
||||
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
|
||||
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
|
||||
codegen()->GetVar(result_register(), var);
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
|
||||
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
|
||||
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
|
||||
codegen()->GetVar(result_register(), var);
|
||||
__ Push(result_register());
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::TestContext::Plug(Variable* var) const {
|
||||
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
|
||||
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
|
||||
// For simplicity we always test the accumulator register.
|
||||
codegen()->GetVar(result_register(), var);
|
||||
codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
|
||||
@ -542,7 +542,7 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
|
||||
true,
|
||||
true_label_,
|
||||
false_label_);
|
||||
ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals.
|
||||
DCHECK(!lit->IsUndetectableObject()); // There are no undetectable literals.
|
||||
if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
|
||||
if (false_label_ != fall_through_) __ B(false_label_);
|
||||
} else if (lit->IsTrue() || lit->IsJSObject()) {
|
||||
@ -569,7 +569,7 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
|
||||
|
||||
void FullCodeGenerator::EffectContext::DropAndPlug(int count,
|
||||
Register reg) const {
|
||||
ASSERT(count > 0);
|
||||
DCHECK(count > 0);
|
||||
__ Drop(count);
|
||||
}
|
||||
|
||||
@ -577,7 +577,7 @@ void FullCodeGenerator::EffectContext::DropAndPlug(int count,
|
||||
void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
|
||||
int count,
|
||||
Register reg) const {
|
||||
ASSERT(count > 0);
|
||||
DCHECK(count > 0);
|
||||
__ Drop(count);
|
||||
__ Move(result_register(), reg);
|
||||
}
|
||||
@ -585,7 +585,7 @@ void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
|
||||
|
||||
void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
|
||||
Register reg) const {
|
||||
ASSERT(count > 0);
|
||||
DCHECK(count > 0);
|
||||
if (count > 1) __ Drop(count - 1);
|
||||
__ Poke(reg, 0);
|
||||
}
|
||||
@ -593,7 +593,7 @@ void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
|
||||
|
||||
void FullCodeGenerator::TestContext::DropAndPlug(int count,
|
||||
Register reg) const {
|
||||
ASSERT(count > 0);
|
||||
DCHECK(count > 0);
|
||||
// For simplicity we always test the accumulator register.
|
||||
__ Drop(count);
|
||||
__ Mov(result_register(), reg);
|
||||
@ -604,7 +604,7 @@ void FullCodeGenerator::TestContext::DropAndPlug(int count,
|
||||
|
||||
void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
|
||||
Label* materialize_false) const {
|
||||
ASSERT(materialize_true == materialize_false);
|
||||
DCHECK(materialize_true == materialize_false);
|
||||
__ Bind(materialize_true);
|
||||
}
|
||||
|
||||
@ -638,8 +638,8 @@ void FullCodeGenerator::StackValueContext::Plug(
|
||||
|
||||
void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
|
||||
Label* materialize_false) const {
|
||||
ASSERT(materialize_true == true_label_);
|
||||
ASSERT(materialize_false == false_label_);
|
||||
DCHECK(materialize_true == true_label_);
|
||||
DCHECK(materialize_false == false_label_);
|
||||
}
|
||||
|
||||
|
||||
@ -700,7 +700,7 @@ void FullCodeGenerator::Split(Condition cond,
|
||||
if (if_false == fall_through) {
|
||||
__ B(cond, if_true);
|
||||
} else if (if_true == fall_through) {
|
||||
ASSERT(if_false != fall_through);
|
||||
DCHECK(if_false != fall_through);
|
||||
__ B(NegateCondition(cond), if_false);
|
||||
} else {
|
||||
__ B(cond, if_true);
|
||||
@ -723,7 +723,7 @@ MemOperand FullCodeGenerator::StackOperand(Variable* var) {
|
||||
|
||||
|
||||
MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
|
||||
ASSERT(var->IsContextSlot() || var->IsStackAllocated());
|
||||
DCHECK(var->IsContextSlot() || var->IsStackAllocated());
|
||||
if (var->IsContextSlot()) {
|
||||
int context_chain_length = scope()->ContextChainLength(var->scope());
|
||||
__ LoadContext(scratch, context_chain_length);
|
||||
@ -745,8 +745,8 @@ void FullCodeGenerator::SetVar(Variable* var,
|
||||
Register src,
|
||||
Register scratch0,
|
||||
Register scratch1) {
|
||||
ASSERT(var->IsContextSlot() || var->IsStackAllocated());
|
||||
ASSERT(!AreAliased(src, scratch0, scratch1));
|
||||
DCHECK(var->IsContextSlot() || var->IsStackAllocated());
|
||||
DCHECK(!AreAliased(src, scratch0, scratch1));
|
||||
MemOperand location = VarOperand(var, scratch0);
|
||||
__ Str(src, location);
|
||||
|
||||
@ -789,7 +789,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
|
||||
void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
|
||||
// The variable in the declaration always resides in the current function
|
||||
// context.
|
||||
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
|
||||
DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
|
||||
if (generate_debug_code_) {
|
||||
// Check that we're not inside a with or catch context.
|
||||
__ Ldr(x1, FieldMemOperand(cp, HeapObject::kMapOffset));
|
||||
@ -844,7 +844,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
|
||||
Comment cmnt(masm_, "[ VariableDeclaration");
|
||||
__ Mov(x2, Operand(variable->name()));
|
||||
// Declaration nodes are always introduced in one of four modes.
|
||||
ASSERT(IsDeclaredVariableMode(mode));
|
||||
DCHECK(IsDeclaredVariableMode(mode));
|
||||
PropertyAttributes attr = IsImmutableVariableMode(mode) ? READ_ONLY
|
||||
: NONE;
|
||||
__ Mov(x1, Smi::FromInt(attr));
|
||||
@ -924,8 +924,8 @@ void FullCodeGenerator::VisitFunctionDeclaration(
|
||||
|
||||
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
|
||||
Variable* variable = declaration->proxy()->var();
|
||||
ASSERT(variable->location() == Variable::CONTEXT);
|
||||
ASSERT(variable->interface()->IsFrozen());
|
||||
DCHECK(variable->location() == Variable::CONTEXT);
|
||||
DCHECK(variable->interface()->IsFrozen());
|
||||
|
||||
Comment cmnt(masm_, "[ ModuleDeclaration");
|
||||
EmitDebugCheckDeclarationContext(variable);
|
||||
@ -1186,7 +1186,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
|
||||
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
|
||||
// TODO(all): similar check was done already. Can we avoid it here?
|
||||
__ CompareObjectType(x10, x11, x12, LAST_JS_PROXY_TYPE);
|
||||
ASSERT(Smi::FromInt(0) == 0);
|
||||
DCHECK(Smi::FromInt(0) == 0);
|
||||
__ CzeroX(x1, le); // Zero indicates proxy.
|
||||
__ Ldr(x2, FieldMemOperand(x0, FixedArray::kLengthOffset));
|
||||
// Smi and array, fixed array length (as smi) and initial index.
|
||||
@ -1399,7 +1399,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
|
||||
|
||||
MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
|
||||
Label* slow) {
|
||||
ASSERT(var->IsContextSlot());
|
||||
DCHECK(var->IsContextSlot());
|
||||
Register context = cp;
|
||||
Register next = x10;
|
||||
Register temp = x11;
|
||||
@ -1492,7 +1492,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
|
||||
// always looked up dynamically, i.e. in that case
|
||||
// var->location() == LOOKUP.
|
||||
// always holds.
|
||||
ASSERT(var->scope() != NULL);
|
||||
DCHECK(var->scope() != NULL);
|
||||
|
||||
// Check if the binding really needs an initialization check. The check
|
||||
// can be skipped in the following situation: we have a LET or CONST
|
||||
@ -1515,8 +1515,8 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
|
||||
skip_init_check = false;
|
||||
} else {
|
||||
// Check that we always have valid source position.
|
||||
ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
|
||||
ASSERT(proxy->position() != RelocInfo::kNoPosition);
|
||||
DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
|
||||
DCHECK(proxy->position() != RelocInfo::kNoPosition);
|
||||
skip_init_check = var->mode() != CONST_LEGACY &&
|
||||
var->initializer_position() < proxy->position();
|
||||
}
|
||||
@ -1535,7 +1535,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
|
||||
__ Bind(&done);
|
||||
} else {
|
||||
// Uninitalized const bindings outside of harmony mode are unholed.
|
||||
ASSERT(var->mode() == CONST_LEGACY);
|
||||
DCHECK(var->mode() == CONST_LEGACY);
|
||||
__ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
|
||||
__ Bind(&done);
|
||||
}
|
||||
@ -1676,13 +1676,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
|
||||
case ObjectLiteral::Property::CONSTANT:
|
||||
UNREACHABLE();
|
||||
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
|
||||
ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
|
||||
DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value()));
|
||||
// Fall through.
|
||||
case ObjectLiteral::Property::COMPUTED:
|
||||
if (key->value()->IsInternalizedString()) {
|
||||
if (property->emit_store()) {
|
||||
VisitForAccumulatorValue(value);
|
||||
ASSERT(StoreIC::ValueRegister().is(x0));
|
||||
DCHECK(StoreIC::ValueRegister().is(x0));
|
||||
__ Mov(StoreIC::NameRegister(), Operand(key->value()));
|
||||
__ Peek(StoreIC::ReceiverRegister(), 0);
|
||||
CallStoreIC(key->LiteralFeedbackId());
|
||||
@ -1742,7 +1742,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
|
||||
}
|
||||
|
||||
if (expr->has_function()) {
|
||||
ASSERT(result_saved);
|
||||
DCHECK(result_saved);
|
||||
__ Peek(x0, 0);
|
||||
__ Push(x0);
|
||||
__ CallRuntime(Runtime::kToFastProperties, 1);
|
||||
@ -1766,7 +1766,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
|
||||
ZoneList<Expression*>* subexprs = expr->values();
|
||||
int length = subexprs->length();
|
||||
Handle<FixedArray> constant_elements = expr->constant_elements();
|
||||
ASSERT_EQ(2, constant_elements->length());
|
||||
DCHECK_EQ(2, constant_elements->length());
|
||||
ElementsKind constant_elements_kind =
|
||||
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
|
||||
bool has_fast_elements = IsFastObjectElementsKind(constant_elements_kind);
|
||||
@ -1838,7 +1838,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
|
||||
|
||||
|
||||
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
|
||||
ASSERT(expr->target()->IsValidReferenceExpression());
|
||||
DCHECK(expr->target()->IsValidReferenceExpression());
|
||||
|
||||
Comment cmnt(masm_, "[ Assignment");
|
||||
|
||||
@ -2091,7 +2091,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
|
||||
|
||||
|
||||
void FullCodeGenerator::EmitAssignment(Expression* expr) {
|
||||
ASSERT(expr->IsValidReferenceExpression());
|
||||
DCHECK(expr->IsValidReferenceExpression());
|
||||
|
||||
// Left-hand side can only be a property, a global or a (parameter or local)
|
||||
// slot.
|
||||
@ -2164,13 +2164,13 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
|
||||
|
||||
} else if (op == Token::INIT_CONST_LEGACY) {
|
||||
// Const initializers need a write barrier.
|
||||
ASSERT(!var->IsParameter()); // No const parameters.
|
||||
DCHECK(!var->IsParameter()); // No const parameters.
|
||||
if (var->IsLookupSlot()) {
|
||||
__ Mov(x1, Operand(var->name()));
|
||||
__ Push(x0, cp, x1);
|
||||
__ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
|
||||
} else {
|
||||
ASSERT(var->IsStackLocal() || var->IsContextSlot());
|
||||
DCHECK(var->IsStackLocal() || var->IsContextSlot());
|
||||
Label skip;
|
||||
MemOperand location = VarOperand(var, x1);
|
||||
__ Ldr(x10, location);
|
||||
@ -2181,8 +2181,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
|
||||
|
||||
} else if (var->mode() == LET && op != Token::INIT_LET) {
|
||||
// Non-initializing assignment to let variable needs a write barrier.
|
||||
ASSERT(!var->IsLookupSlot());
|
||||
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
|
||||
DCHECK(!var->IsLookupSlot());
|
||||
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
|
||||
Label assign;
|
||||
MemOperand location = VarOperand(var, x1);
|
||||
__ Ldr(x10, location);
|
||||
@ -2208,7 +2208,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
|
||||
} else {
|
||||
// Assignment to var or initializing assignment to let/const in harmony
|
||||
// mode.
|
||||
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
|
||||
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
|
||||
MemOperand location = VarOperand(var, x1);
|
||||
if (FLAG_debug_code && op == Token::INIT_LET) {
|
||||
__ Ldr(x10, location);
|
||||
@ -2226,8 +2226,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
|
||||
ASM_LOCATION("FullCodeGenerator::EmitNamedPropertyAssignment");
|
||||
// Assignment to a property, using a named store IC.
|
||||
Property* prop = expr->target()->AsProperty();
|
||||
ASSERT(prop != NULL);
|
||||
ASSERT(prop->key()->IsLiteral());
|
||||
DCHECK(prop != NULL);
|
||||
DCHECK(prop->key()->IsLiteral());
|
||||
|
||||
// Record source code position before IC call.
|
||||
SetSourcePosition(expr->position());
|
||||
@ -2248,7 +2248,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
|
||||
SetSourcePosition(expr->position());
|
||||
// TODO(all): Could we pass this in registers rather than on the stack?
|
||||
__ Pop(KeyedStoreIC::NameRegister(), KeyedStoreIC::ReceiverRegister());
|
||||
ASSERT(KeyedStoreIC::ValueRegister().is(x0));
|
||||
DCHECK(KeyedStoreIC::ValueRegister().is(x0));
|
||||
|
||||
Handle<Code> ic = strict_mode() == SLOPPY
|
||||
? isolate()->builtins()->KeyedStoreIC_Initialize()
|
||||
@ -2309,7 +2309,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
|
||||
__ Push(isolate()->factory()->undefined_value());
|
||||
} else {
|
||||
// Load the function from the receiver.
|
||||
ASSERT(callee->IsProperty());
|
||||
DCHECK(callee->IsProperty());
|
||||
__ Peek(LoadIC::ReceiverRegister(), 0);
|
||||
EmitNamedPropertyLoad(callee->AsProperty());
|
||||
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
|
||||
@ -2331,7 +2331,7 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
|
||||
Expression* callee = expr->expression();
|
||||
|
||||
// Load the function from the receiver.
|
||||
ASSERT(callee->IsProperty());
|
||||
DCHECK(callee->IsProperty());
|
||||
__ Peek(LoadIC::ReceiverRegister(), 0);
|
||||
__ Move(LoadIC::NameRegister(), x0);
|
||||
EmitKeyedPropertyLoad(callee->AsProperty());
|
||||
@ -2504,7 +2504,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
|
||||
}
|
||||
|
||||
} else {
|
||||
ASSERT(call_type == Call::OTHER_CALL);
|
||||
DCHECK(call_type == Call::OTHER_CALL);
|
||||
// Call to an arbitrary expression not handled specially above.
|
||||
{ PreservePositionScope scope(masm()->positions_recorder());
|
||||
VisitForStackValue(callee);
|
||||
@ -2517,7 +2517,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
|
||||
|
||||
#ifdef DEBUG
|
||||
// RecordJSReturnSite should have been called.
|
||||
ASSERT(expr->return_is_recorded_);
|
||||
DCHECK(expr->return_is_recorded_);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -2551,7 +2551,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
|
||||
// Record call targets in unoptimized code.
|
||||
if (FLAG_pretenuring_call_new) {
|
||||
EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
|
||||
ASSERT(expr->AllocationSiteFeedbackSlot() ==
|
||||
DCHECK(expr->AllocationSiteFeedbackSlot() ==
|
||||
expr->CallNewFeedbackSlot() + 1);
|
||||
}
|
||||
|
||||
@ -2567,7 +2567,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 1);
|
||||
DCHECK(args->length() == 1);
|
||||
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
|
||||
@ -2587,7 +2587,7 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 1);
|
||||
DCHECK(args->length() == 1);
|
||||
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
|
||||
@ -2609,7 +2609,7 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 1);
|
||||
DCHECK(args->length() == 1);
|
||||
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
|
||||
@ -2639,7 +2639,7 @@ void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 1);
|
||||
DCHECK(args->length() == 1);
|
||||
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
|
||||
@ -2662,7 +2662,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
|
||||
void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
|
||||
ASM_LOCATION("FullCodeGenerator::EmitIsUndetectableObject");
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 1);
|
||||
DCHECK(args->length() == 1);
|
||||
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
|
||||
@ -2687,7 +2687,7 @@ void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
|
||||
void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
|
||||
CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 1);
|
||||
DCHECK(args->length() == 1);
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
|
||||
Label materialize_true, materialize_false, skip_lookup;
|
||||
@ -2788,7 +2788,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
|
||||
|
||||
void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 1);
|
||||
DCHECK(args->length() == 1);
|
||||
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
|
||||
@ -2810,7 +2810,7 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 1);
|
||||
DCHECK(args->length() == 1);
|
||||
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
|
||||
@ -2837,7 +2837,7 @@ void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 1);
|
||||
DCHECK(args->length() == 1);
|
||||
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
|
||||
@ -2859,7 +2859,7 @@ void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 1);
|
||||
DCHECK(args->length() == 1);
|
||||
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
|
||||
@ -2881,7 +2881,7 @@ void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
|
||||
|
||||
|
||||
void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
|
||||
ASSERT(expr->arguments()->length() == 0);
|
||||
DCHECK(expr->arguments()->length() == 0);
|
||||
|
||||
Label materialize_true, materialize_false;
|
||||
Label* if_true = NULL;
|
||||
@ -2913,7 +2913,7 @@ void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 2);
|
||||
DCHECK(args->length() == 2);
|
||||
|
||||
// Load the two objects into registers and perform the comparison.
|
||||
VisitForStackValue(args->at(0));
|
||||
@ -2937,7 +2937,7 @@ void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 1);
|
||||
DCHECK(args->length() == 1);
|
||||
|
||||
// ArgumentsAccessStub expects the key in x1.
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
@ -2950,7 +2950,7 @@ void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
|
||||
|
||||
|
||||
void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
|
||||
ASSERT(expr->arguments()->length() == 0);
|
||||
DCHECK(expr->arguments()->length() == 0);
|
||||
Label exit;
|
||||
// Get the number of formal parameters.
|
||||
__ Mov(x0, Smi::FromInt(info_->scope()->num_parameters()));
|
||||
@ -2973,7 +2973,7 @@ void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
|
||||
void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
|
||||
ASM_LOCATION("FullCodeGenerator::EmitClassOf");
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 1);
|
||||
DCHECK(args->length() == 1);
|
||||
Label done, null, function, non_function_constructor;
|
||||
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
@ -3038,7 +3038,7 @@ void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
|
||||
// Load the arguments on the stack and call the stub.
|
||||
SubStringStub stub(isolate());
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 3);
|
||||
DCHECK(args->length() == 3);
|
||||
VisitForStackValue(args->at(0));
|
||||
VisitForStackValue(args->at(1));
|
||||
VisitForStackValue(args->at(2));
|
||||
@ -3051,7 +3051,7 @@ void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
|
||||
// Load the arguments on the stack and call the stub.
|
||||
RegExpExecStub stub(isolate());
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 4);
|
||||
DCHECK(args->length() == 4);
|
||||
VisitForStackValue(args->at(0));
|
||||
VisitForStackValue(args->at(1));
|
||||
VisitForStackValue(args->at(2));
|
||||
@ -3064,7 +3064,7 @@ void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
|
||||
void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
|
||||
ASM_LOCATION("FullCodeGenerator::EmitValueOf");
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 1);
|
||||
DCHECK(args->length() == 1);
|
||||
VisitForAccumulatorValue(args->at(0)); // Load the object.
|
||||
|
||||
Label done;
|
||||
@ -3081,8 +3081,8 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 2);
|
||||
ASSERT_NE(NULL, args->at(1)->AsLiteral());
|
||||
DCHECK(args->length() == 2);
|
||||
DCHECK_NE(NULL, args->at(1)->AsLiteral());
|
||||
Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
|
||||
|
||||
VisitForAccumulatorValue(args->at(0)); // Load the object.
|
||||
@ -3127,7 +3127,7 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT_EQ(3, args->length());
|
||||
DCHECK_EQ(3, args->length());
|
||||
|
||||
Register string = x0;
|
||||
Register index = x1;
|
||||
@ -3157,7 +3157,7 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT_EQ(3, args->length());
|
||||
DCHECK_EQ(3, args->length());
|
||||
|
||||
Register string = x0;
|
||||
Register index = x1;
|
||||
@ -3188,7 +3188,7 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
|
||||
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
|
||||
// Load the arguments on the stack and call the MathPow stub.
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 2);
|
||||
DCHECK(args->length() == 2);
|
||||
VisitForStackValue(args->at(0));
|
||||
VisitForStackValue(args->at(1));
|
||||
MathPowStub stub(isolate(), MathPowStub::ON_STACK);
|
||||
@ -3199,7 +3199,7 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 2);
|
||||
DCHECK(args->length() == 2);
|
||||
VisitForStackValue(args->at(0)); // Load the object.
|
||||
VisitForAccumulatorValue(args->at(1)); // Load the value.
|
||||
__ Pop(x1);
|
||||
@ -3228,7 +3228,7 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT_EQ(args->length(), 1);
|
||||
DCHECK_EQ(args->length(), 1);
|
||||
|
||||
// Load the argument into x0 and call the stub.
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
@ -3241,7 +3241,7 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 1);
|
||||
DCHECK(args->length() == 1);
|
||||
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
|
||||
@ -3263,7 +3263,7 @@ void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 2);
|
||||
DCHECK(args->length() == 2);
|
||||
|
||||
VisitForStackValue(args->at(0));
|
||||
VisitForAccumulatorValue(args->at(1));
|
||||
@ -3308,7 +3308,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 2);
|
||||
DCHECK(args->length() == 2);
|
||||
|
||||
VisitForStackValue(args->at(0));
|
||||
VisitForAccumulatorValue(args->at(1));
|
||||
@ -3355,7 +3355,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
|
||||
void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
|
||||
ASM_LOCATION("FullCodeGenerator::EmitStringAdd");
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT_EQ(2, args->length());
|
||||
DCHECK_EQ(2, args->length());
|
||||
|
||||
VisitForStackValue(args->at(0));
|
||||
VisitForAccumulatorValue(args->at(1));
|
||||
@ -3370,7 +3370,7 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT_EQ(2, args->length());
|
||||
DCHECK_EQ(2, args->length());
|
||||
VisitForStackValue(args->at(0));
|
||||
VisitForStackValue(args->at(1));
|
||||
|
||||
@ -3383,7 +3383,7 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
|
||||
void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
|
||||
ASM_LOCATION("FullCodeGenerator::EmitCallFunction");
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() >= 2);
|
||||
DCHECK(args->length() >= 2);
|
||||
|
||||
int arg_count = args->length() - 2; // 2 ~ receiver and function.
|
||||
for (int i = 0; i < arg_count + 1; i++) {
|
||||
@ -3415,7 +3415,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
|
||||
void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
|
||||
RegExpConstructResultStub stub(isolate());
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 3);
|
||||
DCHECK(args->length() == 3);
|
||||
VisitForStackValue(args->at(0));
|
||||
VisitForStackValue(args->at(1));
|
||||
VisitForAccumulatorValue(args->at(2));
|
||||
@ -3427,8 +3427,8 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT_EQ(2, args->length());
|
||||
ASSERT_NE(NULL, args->at(0)->AsLiteral());
|
||||
DCHECK_EQ(2, args->length());
|
||||
DCHECK_NE(NULL, args->at(0)->AsLiteral());
|
||||
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
|
||||
|
||||
Handle<FixedArray> jsfunction_result_caches(
|
||||
@ -3495,7 +3495,7 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
|
||||
|
||||
void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 1);
|
||||
DCHECK(args->length() == 1);
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
|
||||
__ AssertString(x0);
|
||||
@ -3511,7 +3511,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
|
||||
ASM_LOCATION("FullCodeGenerator::EmitFastAsciiArrayJoin");
|
||||
|
||||
ZoneList<Expression*>* args = expr->arguments();
|
||||
ASSERT(args->length() == 2);
|
||||
DCHECK(args->length() == 2);
|
||||
VisitForStackValue(args->at(1));
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
|
||||
@ -3724,7 +3724,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
|
||||
|
||||
|
||||
void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
|
||||
ASSERT(expr->arguments()->length() == 0);
|
||||
DCHECK(expr->arguments()->length() == 0);
|
||||
ExternalReference debug_is_active =
|
||||
ExternalReference::debug_is_active_address(isolate());
|
||||
__ Mov(x10, debug_is_active);
|
||||
@ -3814,7 +3814,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
|
||||
Variable* var = proxy->var();
|
||||
// Delete of an unqualified identifier is disallowed in strict mode
|
||||
// but "delete this" is allowed.
|
||||
ASSERT(strict_mode() == SLOPPY || var->is_this());
|
||||
DCHECK(strict_mode() == SLOPPY || var->is_this());
|
||||
if (var->IsUnallocated()) {
|
||||
__ Ldr(x12, GlobalObjectMemOperand());
|
||||
__ Mov(x11, Operand(var->name()));
|
||||
@ -3864,7 +3864,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
|
||||
test->fall_through());
|
||||
context()->Plug(test->true_label(), test->false_label());
|
||||
} else {
|
||||
ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
|
||||
DCHECK(context()->IsAccumulatorValue() || context()->IsStackValue());
|
||||
// TODO(jbramley): This could be much more efficient using (for
|
||||
// example) the CSEL instruction.
|
||||
Label materialize_true, materialize_false, done;
|
||||
@ -3907,7 +3907,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
|
||||
|
||||
|
||||
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
|
||||
ASSERT(expr->expression()->IsValidReferenceExpression());
|
||||
DCHECK(expr->expression()->IsValidReferenceExpression());
|
||||
|
||||
Comment cmnt(masm_, "[ CountOperation");
|
||||
SetSourcePosition(expr->position());
|
||||
@ -3926,7 +3926,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
|
||||
|
||||
// Evaluate expression and get value.
|
||||
if (assign_type == VARIABLE) {
|
||||
ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
|
||||
DCHECK(expr->expression()->AsVariableProxy()->var() != NULL);
|
||||
AccumulatorValueContext context(this);
|
||||
EmitVariableLoad(expr->expression()->AsVariableProxy());
|
||||
} else {
|
||||
@ -4090,8 +4090,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
|
||||
|
||||
|
||||
void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
|
||||
ASSERT(!context()->IsEffect());
|
||||
ASSERT(!context()->IsTest());
|
||||
DCHECK(!context()->IsEffect());
|
||||
DCHECK(!context()->IsTest());
|
||||
VariableProxy* proxy = expr->AsVariableProxy();
|
||||
if (proxy != NULL && proxy->var()->IsUnallocated()) {
|
||||
Comment cmnt(masm_, "Global variable");
|
||||
@ -4350,7 +4350,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
|
||||
|
||||
__ Bind(&suspend);
|
||||
VisitForAccumulatorValue(expr->generator_object());
|
||||
ASSERT((continuation.pos() > 0) && Smi::IsValid(continuation.pos()));
|
||||
DCHECK((continuation.pos() > 0) && Smi::IsValid(continuation.pos()));
|
||||
__ Mov(x1, Smi::FromInt(continuation.pos()));
|
||||
__ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset));
|
||||
__ Str(cp, FieldMemOperand(x0, JSGeneratorObject::kContextOffset));
|
||||
@ -4428,7 +4428,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
|
||||
const int generator_object_depth = kPointerSize + handler_size;
|
||||
__ Peek(x0, generator_object_depth);
|
||||
__ Push(x0); // g
|
||||
ASSERT((l_continuation.pos() > 0) && Smi::IsValid(l_continuation.pos()));
|
||||
DCHECK((l_continuation.pos() > 0) && Smi::IsValid(l_continuation.pos()));
|
||||
__ Mov(x1, Smi::FromInt(l_continuation.pos()));
|
||||
__ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset));
|
||||
__ Str(cp, FieldMemOperand(x0, JSGeneratorObject::kContextOffset));
|
||||
@ -4648,7 +4648,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
|
||||
__ Pop(result_value);
|
||||
__ Mov(boolean_done, Operand(isolate()->factory()->ToBoolean(done)));
|
||||
__ Mov(empty_fixed_array, Operand(isolate()->factory()->empty_fixed_array()));
|
||||
ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
|
||||
DCHECK_EQ(map->instance_size(), 5 * kPointerSize);
|
||||
STATIC_ASSERT(JSObject::kPropertiesOffset + kPointerSize ==
|
||||
JSObject::kElementsOffset);
|
||||
STATIC_ASSERT(JSGeneratorObject::kResultValuePropertyOffset + kPointerSize ==
|
||||
@ -4688,7 +4688,7 @@ Register FullCodeGenerator::context_register() {
|
||||
|
||||
|
||||
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
|
||||
ASSERT(POINTER_SIZE_ALIGN(frame_offset) == frame_offset);
|
||||
DCHECK(POINTER_SIZE_ALIGN(frame_offset) == frame_offset);
|
||||
__ Str(value, MemOperand(fp, frame_offset));
|
||||
}
|
||||
|
||||
@ -4706,7 +4706,7 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
|
||||
// as their closure, not the anonymous closure containing the global
|
||||
// code. Pass a smi sentinel and let the runtime look up the empty
|
||||
// function.
|
||||
ASSERT(kSmiTag == 0);
|
||||
DCHECK(kSmiTag == 0);
|
||||
__ Push(xzr);
|
||||
} else if (declaration_scope->is_eval_scope()) {
|
||||
// Contexts created by a call to eval have the same closure as the
|
||||
@ -4715,7 +4715,7 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
|
||||
__ Ldr(x10, ContextMemOperand(cp, Context::CLOSURE_INDEX));
|
||||
__ Push(x10);
|
||||
} else {
|
||||
ASSERT(declaration_scope->is_function_scope());
|
||||
DCHECK(declaration_scope->is_function_scope());
|
||||
__ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
|
||||
__ Push(x10);
|
||||
}
|
||||
@ -4724,7 +4724,7 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
|
||||
|
||||
void FullCodeGenerator::EnterFinallyBlock() {
|
||||
ASM_LOCATION("FullCodeGenerator::EnterFinallyBlock");
|
||||
ASSERT(!result_register().is(x10));
|
||||
DCHECK(!result_register().is(x10));
|
||||
// Preserve the result register while executing finally block.
|
||||
// Also cook the return address in lr to the stack (smi encoded Code* delta).
|
||||
__ Sub(x10, lr, Operand(masm_->CodeObject()));
|
||||
@ -4756,7 +4756,7 @@ void FullCodeGenerator::EnterFinallyBlock() {
|
||||
|
||||
void FullCodeGenerator::ExitFinallyBlock() {
|
||||
ASM_LOCATION("FullCodeGenerator::ExitFinallyBlock");
|
||||
ASSERT(!result_register().is(x10));
|
||||
DCHECK(!result_register().is(x10));
|
||||
|
||||
// Restore pending message from stack.
|
||||
__ Pop(x10, x11, x12);
|
||||
@ -4798,7 +4798,7 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
|
||||
Address branch_address = pc - 3 * kInstructionSize;
|
||||
PatchingAssembler patcher(branch_address, 1);
|
||||
|
||||
ASSERT(Instruction::Cast(branch_address)
|
||||
DCHECK(Instruction::Cast(branch_address)
|
||||
->IsNop(Assembler::INTERRUPT_CODE_NOP) ||
|
||||
(Instruction::Cast(branch_address)->IsCondBranchImm() &&
|
||||
Instruction::Cast(branch_address)->ImmPCOffset() ==
|
||||
@ -4829,7 +4829,7 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
|
||||
Instruction* load = Instruction::Cast(pc)->preceding(2);
|
||||
Address interrupt_address_pointer =
|
||||
reinterpret_cast<Address>(load) + load->ImmPCOffset();
|
||||
ASSERT((Memory::uint64_at(interrupt_address_pointer) ==
|
||||
DCHECK((Memory::uint64_at(interrupt_address_pointer) ==
|
||||
reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
|
||||
->builtins()
|
||||
->OnStackReplacement()
|
||||
|
@ -52,8 +52,8 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
|
||||
Register result,
|
||||
Register scratch1,
|
||||
Register scratch2) {
|
||||
ASSERT(!AreAliased(elements, name, scratch1, scratch2));
|
||||
ASSERT(!AreAliased(result, scratch1, scratch2));
|
||||
DCHECK(!AreAliased(elements, name, scratch1, scratch2));
|
||||
DCHECK(!AreAliased(result, scratch1, scratch2));
|
||||
|
||||
Label done;
|
||||
|
||||
@ -99,7 +99,7 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
|
||||
Register value,
|
||||
Register scratch1,
|
||||
Register scratch2) {
|
||||
ASSERT(!AreAliased(elements, name, value, scratch1, scratch2));
|
||||
DCHECK(!AreAliased(elements, name, value, scratch1, scratch2));
|
||||
|
||||
Label done;
|
||||
|
||||
@ -147,7 +147,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
|
||||
Register scratch,
|
||||
int interceptor_bit,
|
||||
Label* slow) {
|
||||
ASSERT(!AreAliased(map_scratch, scratch));
|
||||
DCHECK(!AreAliased(map_scratch, scratch));
|
||||
|
||||
// Check that the object isn't a smi.
|
||||
__ JumpIfSmi(receiver, slow);
|
||||
@ -196,7 +196,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
|
||||
Register result,
|
||||
Label* not_fast_array,
|
||||
Label* slow) {
|
||||
ASSERT(!AreAliased(receiver, key, elements, elements_map, scratch2));
|
||||
DCHECK(!AreAliased(receiver, key, elements, elements_map, scratch2));
|
||||
|
||||
// Check for fast array.
|
||||
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
@ -245,7 +245,7 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
|
||||
Register hash_scratch,
|
||||
Label* index_string,
|
||||
Label* not_unique) {
|
||||
ASSERT(!AreAliased(key, map_scratch, hash_scratch));
|
||||
DCHECK(!AreAliased(key, map_scratch, hash_scratch));
|
||||
|
||||
// Is the key a name?
|
||||
Label unique;
|
||||
@ -284,7 +284,7 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
|
||||
Register scratch2,
|
||||
Label* unmapped_case,
|
||||
Label* slow_case) {
|
||||
ASSERT(!AreAliased(object, key, map, scratch1, scratch2));
|
||||
DCHECK(!AreAliased(object, key, map, scratch1, scratch2));
|
||||
|
||||
Heap* heap = masm->isolate()->heap();
|
||||
|
||||
@ -339,7 +339,7 @@ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
|
||||
Register parameter_map,
|
||||
Register scratch,
|
||||
Label* slow_case) {
|
||||
ASSERT(!AreAliased(key, parameter_map, scratch));
|
||||
DCHECK(!AreAliased(key, parameter_map, scratch));
|
||||
|
||||
// Element is in arguments backing store, which is referenced by the
|
||||
// second element of the parameter_map.
|
||||
@ -365,8 +365,8 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
|
||||
// The return address is in lr.
|
||||
Register receiver = ReceiverRegister();
|
||||
Register name = NameRegister();
|
||||
ASSERT(receiver.is(x1));
|
||||
ASSERT(name.is(x2));
|
||||
DCHECK(receiver.is(x1));
|
||||
DCHECK(name.is(x2));
|
||||
|
||||
// Probe the stub cache.
|
||||
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
|
||||
@ -381,8 +381,8 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
|
||||
|
||||
void LoadIC::GenerateNormal(MacroAssembler* masm) {
|
||||
Register dictionary = x0;
|
||||
ASSERT(!dictionary.is(ReceiverRegister()));
|
||||
ASSERT(!dictionary.is(NameRegister()));
|
||||
DCHECK(!dictionary.is(ReceiverRegister()));
|
||||
DCHECK(!dictionary.is(NameRegister()));
|
||||
Label slow;
|
||||
|
||||
__ Ldr(dictionary,
|
||||
@ -423,8 +423,8 @@ void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
|
||||
Register result = x0;
|
||||
Register receiver = ReceiverRegister();
|
||||
Register key = NameRegister();
|
||||
ASSERT(receiver.is(x1));
|
||||
ASSERT(key.is(x2));
|
||||
DCHECK(receiver.is(x1));
|
||||
DCHECK(key.is(x2));
|
||||
|
||||
Label miss, unmapped;
|
||||
|
||||
@ -453,9 +453,9 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
|
||||
Register value = ValueRegister();
|
||||
Register key = NameRegister();
|
||||
Register receiver = ReceiverRegister();
|
||||
ASSERT(receiver.is(x1));
|
||||
ASSERT(key.is(x2));
|
||||
ASSERT(value.is(x0));
|
||||
DCHECK(receiver.is(x1));
|
||||
DCHECK(key.is(x2));
|
||||
DCHECK(value.is(x0));
|
||||
|
||||
Register map = x3;
|
||||
|
||||
@ -516,13 +516,13 @@ const Register LoadIC::ReceiverRegister() { return x1; }
|
||||
const Register LoadIC::NameRegister() { return x2; }
|
||||
|
||||
const Register LoadIC::SlotRegister() {
|
||||
ASSERT(FLAG_vector_ics);
|
||||
DCHECK(FLAG_vector_ics);
|
||||
return x0;
|
||||
}
|
||||
|
||||
|
||||
const Register LoadIC::VectorRegister() {
|
||||
ASSERT(FLAG_vector_ics);
|
||||
DCHECK(FLAG_vector_ics);
|
||||
return x3;
|
||||
}
|
||||
|
||||
@ -553,7 +553,7 @@ static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm,
|
||||
Register scratch4,
|
||||
Register scratch5,
|
||||
Label *slow) {
|
||||
ASSERT(!AreAliased(
|
||||
DCHECK(!AreAliased(
|
||||
key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5));
|
||||
|
||||
Isolate* isolate = masm->isolate();
|
||||
@ -594,7 +594,7 @@ static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm,
|
||||
Register scratch4,
|
||||
Register scratch5,
|
||||
Label *slow) {
|
||||
ASSERT(!AreAliased(
|
||||
DCHECK(!AreAliased(
|
||||
key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5));
|
||||
|
||||
Isolate* isolate = masm->isolate();
|
||||
@ -713,8 +713,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
|
||||
Register key = NameRegister();
|
||||
Register receiver = ReceiverRegister();
|
||||
ASSERT(key.is(x2));
|
||||
ASSERT(receiver.is(x1));
|
||||
DCHECK(key.is(x2));
|
||||
DCHECK(receiver.is(x1));
|
||||
|
||||
__ JumpIfNotSmi(key, &check_name);
|
||||
__ Bind(&index_smi);
|
||||
@ -748,7 +748,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
|
||||
Register index = NameRegister();
|
||||
Register result = x0;
|
||||
Register scratch = x3;
|
||||
ASSERT(!scratch.is(receiver) && !scratch.is(index));
|
||||
DCHECK(!scratch.is(receiver) && !scratch.is(index));
|
||||
|
||||
StringCharAtGenerator char_at_generator(receiver,
|
||||
index,
|
||||
@ -777,7 +777,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
|
||||
Register key = NameRegister();
|
||||
Register scratch1 = x3;
|
||||
Register scratch2 = x4;
|
||||
ASSERT(!AreAliased(scratch1, scratch2, receiver, key));
|
||||
DCHECK(!AreAliased(scratch1, scratch2, receiver, key));
|
||||
|
||||
// Check that the receiver isn't a smi.
|
||||
__ JumpIfSmi(receiver, &slow);
|
||||
@ -792,7 +792,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
|
||||
// Check that it has indexed interceptor and access checks
|
||||
// are not enabled for this object.
|
||||
__ Ldrb(scratch2, FieldMemOperand(map, Map::kBitFieldOffset));
|
||||
ASSERT(kSlowCaseBitFieldMask ==
|
||||
DCHECK(kSlowCaseBitFieldMask ==
|
||||
((1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor)));
|
||||
__ Tbnz(scratch2, Map::kIsAccessCheckNeeded, &slow);
|
||||
__ Tbz(scratch2, Map::kHasIndexedInterceptor, &slow);
|
||||
@ -863,7 +863,7 @@ static void KeyedStoreGenerateGenericHelper(
|
||||
Register receiver_map,
|
||||
Register elements_map,
|
||||
Register elements) {
|
||||
ASSERT(!AreAliased(
|
||||
DCHECK(!AreAliased(
|
||||
value, key, receiver, receiver_map, elements_map, elements, x10, x11));
|
||||
|
||||
Label transition_smi_elements;
|
||||
@ -1025,9 +1025,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
|
||||
Register value = ValueRegister();
|
||||
Register key = NameRegister();
|
||||
Register receiver = ReceiverRegister();
|
||||
ASSERT(receiver.is(x1));
|
||||
ASSERT(key.is(x2));
|
||||
ASSERT(value.is(x0));
|
||||
DCHECK(receiver.is(x1));
|
||||
DCHECK(key.is(x2));
|
||||
DCHECK(value.is(x0));
|
||||
|
||||
Register receiver_map = x3;
|
||||
Register elements = x4;
|
||||
@ -1115,7 +1115,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
|
||||
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
|
||||
Register receiver = ReceiverRegister();
|
||||
Register name = NameRegister();
|
||||
ASSERT(!AreAliased(receiver, name, ValueRegister(), x3, x4, x5, x6));
|
||||
DCHECK(!AreAliased(receiver, name, ValueRegister(), x3, x4, x5, x6));
|
||||
|
||||
// Probe the stub cache.
|
||||
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
|
||||
@ -1144,7 +1144,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
|
||||
Register receiver = ReceiverRegister();
|
||||
Register name = NameRegister();
|
||||
Register dictionary = x3;
|
||||
ASSERT(!AreAliased(value, receiver, name, x3, x4, x5));
|
||||
DCHECK(!AreAliased(value, receiver, name, x3, x4, x5));
|
||||
|
||||
__ Ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
|
||||
|
||||
@ -1253,9 +1253,9 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
|
||||
// tb(!n)z test_reg, #0, <target>
|
||||
Instruction* to_patch = info.SmiCheck();
|
||||
PatchingAssembler patcher(to_patch, 1);
|
||||
ASSERT(to_patch->IsTestBranch());
|
||||
ASSERT(to_patch->ImmTestBranchBit5() == 0);
|
||||
ASSERT(to_patch->ImmTestBranchBit40() == 0);
|
||||
DCHECK(to_patch->IsTestBranch());
|
||||
DCHECK(to_patch->ImmTestBranchBit5() == 0);
|
||||
DCHECK(to_patch->ImmTestBranchBit40() == 0);
|
||||
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
STATIC_ASSERT(kSmiTagMask == 1);
|
||||
@ -1263,11 +1263,11 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
|
||||
int branch_imm = to_patch->ImmTestBranch();
|
||||
Register smi_reg;
|
||||
if (check == ENABLE_INLINED_SMI_CHECK) {
|
||||
ASSERT(to_patch->Rt() == xzr.code());
|
||||
DCHECK(to_patch->Rt() == xzr.code());
|
||||
smi_reg = info.SmiRegister();
|
||||
} else {
|
||||
ASSERT(check == DISABLE_INLINED_SMI_CHECK);
|
||||
ASSERT(to_patch->Rt() != xzr.code());
|
||||
DCHECK(check == DISABLE_INLINED_SMI_CHECK);
|
||||
DCHECK(to_patch->Rt() != xzr.code());
|
||||
smi_reg = xzr;
|
||||
}
|
||||
|
||||
@ -1275,7 +1275,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
|
||||
// This is JumpIfNotSmi(smi_reg, branch_imm).
|
||||
patcher.tbnz(smi_reg, 0, branch_imm);
|
||||
} else {
|
||||
ASSERT(to_patch->Mask(TestBranchMask) == TBNZ);
|
||||
DCHECK(to_patch->Mask(TestBranchMask) == TBNZ);
|
||||
// This is JumpIfSmi(smi_reg, branch_imm).
|
||||
patcher.tbz(smi_reg, 0, branch_imm);
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ bool Instruction::IsStore() const {
|
||||
static uint64_t RotateRight(uint64_t value,
|
||||
unsigned int rotate,
|
||||
unsigned int width) {
|
||||
ASSERT(width <= 64);
|
||||
DCHECK(width <= 64);
|
||||
rotate &= 63;
|
||||
return ((value & ((1UL << rotate) - 1UL)) << (width - rotate)) |
|
||||
(value >> rotate);
|
||||
@ -77,9 +77,9 @@ static uint64_t RotateRight(uint64_t value,
|
||||
static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
|
||||
uint64_t value,
|
||||
unsigned width) {
|
||||
ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
|
||||
DCHECK((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
|
||||
(width == 32));
|
||||
ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
|
||||
DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
|
||||
uint64_t result = value & ((1UL << width) - 1UL);
|
||||
for (unsigned i = width; i < reg_size; i *= 2) {
|
||||
result |= (result << i);
|
||||
@ -193,7 +193,7 @@ ptrdiff_t Instruction::ImmPCOffset() {
|
||||
offset = ImmBranch() << kInstructionSizeLog2;
|
||||
} else {
|
||||
// Load literal (offset from PC).
|
||||
ASSERT(IsLdrLiteral());
|
||||
DCHECK(IsLdrLiteral());
|
||||
// The offset is always shifted by 2 bits, even for loads to 64-bits
|
||||
// registers.
|
||||
offset = ImmLLiteral() << kInstructionSizeLog2;
|
||||
@ -231,7 +231,7 @@ void Instruction::SetImmPCOffsetTarget(Instruction* target) {
|
||||
|
||||
void Instruction::SetPCRelImmTarget(Instruction* target) {
|
||||
// ADRP is not supported, so 'this' must point to an ADR instruction.
|
||||
ASSERT(IsAdr());
|
||||
DCHECK(IsAdr());
|
||||
|
||||
ptrdiff_t target_offset = DistanceTo(target);
|
||||
Instr imm;
|
||||
@ -247,7 +247,7 @@ void Instruction::SetPCRelImmTarget(Instruction* target) {
|
||||
|
||||
|
||||
void Instruction::SetBranchImmTarget(Instruction* target) {
|
||||
ASSERT(IsAligned(DistanceTo(target), kInstructionSize));
|
||||
DCHECK(IsAligned(DistanceTo(target), kInstructionSize));
|
||||
Instr branch_imm = 0;
|
||||
uint32_t imm_mask = 0;
|
||||
ptrdiff_t offset = DistanceTo(target) >> kInstructionSizeLog2;
|
||||
@ -279,7 +279,7 @@ void Instruction::SetBranchImmTarget(Instruction* target) {
|
||||
|
||||
|
||||
void Instruction::SetImmLLiteral(Instruction* source) {
|
||||
ASSERT(IsAligned(DistanceTo(source), kInstructionSize));
|
||||
DCHECK(IsAligned(DistanceTo(source), kInstructionSize));
|
||||
ptrdiff_t offset = DistanceTo(source) >> kLoadLiteralScaleLog2;
|
||||
Instr imm = Assembler::ImmLLiteral(offset);
|
||||
Instr mask = ImmLLiteral_mask;
|
||||
@ -304,7 +304,7 @@ bool InstructionSequence::IsInlineData() const {
|
||||
// xzr and Register are not defined in that header. Consider adding
|
||||
// instructions-arm64-inl.h to work around this.
|
||||
uint64_t InstructionSequence::InlineData() const {
|
||||
ASSERT(IsInlineData());
|
||||
DCHECK(IsInlineData());
|
||||
uint64_t payload = ImmMoveWide();
|
||||
// TODO(all): If we extend ::InlineData() to support bigger data, we need
|
||||
// to update this method too.
|
||||
|
@ -137,7 +137,7 @@ class Instruction {
|
||||
// ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
|
||||
// formed from ImmPCRelLo and ImmPCRelHi.
|
||||
int ImmPCRel() const {
|
||||
ASSERT(IsPCRelAddressing());
|
||||
DCHECK(IsPCRelAddressing());
|
||||
int const offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
|
||||
int const width = ImmPCRelLo_width + ImmPCRelHi_width;
|
||||
return signed_bitextract_32(width - 1, 0, offset);
|
||||
@ -364,7 +364,7 @@ class Instruction {
|
||||
CheckAlignment check = CHECK_ALIGNMENT) {
|
||||
Address addr = reinterpret_cast<Address>(this) + offset;
|
||||
// The FUZZ_disasm test relies on no check being done.
|
||||
ASSERT(check == NO_CHECK || IsAddressAligned(addr, kInstructionSize));
|
||||
DCHECK(check == NO_CHECK || IsAddressAligned(addr, kInstructionSize));
|
||||
return Cast(addr);
|
||||
}
|
||||
|
||||
|
@ -9,7 +9,7 @@ namespace internal {
|
||||
|
||||
Counter::Counter(const char* name, CounterType type)
|
||||
: count_(0), enabled_(false), type_(type) {
|
||||
ASSERT(name != NULL);
|
||||
DCHECK(name != NULL);
|
||||
strncpy(name_, name, kCounterNameMaxLength);
|
||||
}
|
||||
|
||||
@ -144,7 +144,7 @@ void Instrument::Update() {
|
||||
// Increment the instruction counter, and dump all counters if a sample period
|
||||
// has elapsed.
|
||||
static Counter* counter = GetCounter("Instruction");
|
||||
ASSERT(counter->type() == Cumulative);
|
||||
DCHECK(counter->type() == Cumulative);
|
||||
counter->Increment();
|
||||
|
||||
if (counter->IsEnabled() && (counter->count() % sample_period_) == 0) {
|
||||
|
@ -24,17 +24,17 @@ void LInstruction::VerifyCall() {
|
||||
// outputs because all registers are blocked by the calling convention.
|
||||
// Inputs operands must use a fixed register or use-at-start policy or
|
||||
// a non-register policy.
|
||||
ASSERT(Output() == NULL ||
|
||||
DCHECK(Output() == NULL ||
|
||||
LUnallocated::cast(Output())->HasFixedPolicy() ||
|
||||
!LUnallocated::cast(Output())->HasRegisterPolicy());
|
||||
for (UseIterator it(this); !it.Done(); it.Advance()) {
|
||||
LUnallocated* operand = LUnallocated::cast(it.Current());
|
||||
ASSERT(operand->HasFixedPolicy() ||
|
||||
DCHECK(operand->HasFixedPolicy() ||
|
||||
operand->IsUsedAtStart());
|
||||
}
|
||||
for (TempIterator it(this); !it.Done(); it.Advance()) {
|
||||
LUnallocated* operand = LUnallocated::cast(it.Current());
|
||||
ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
|
||||
DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@ -501,7 +501,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
|
||||
ASSERT(!instr->HasPointerMap());
|
||||
DCHECK(!instr->HasPointerMap());
|
||||
instr->set_pointer_map(new(zone()) LPointerMap(zone()));
|
||||
return instr;
|
||||
}
|
||||
@ -543,7 +543,7 @@ LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
|
||||
if (kind == DOUBLE_REGISTERS) {
|
||||
return LDoubleStackSlot::Create(index, zone());
|
||||
} else {
|
||||
ASSERT(kind == GENERAL_REGISTERS);
|
||||
DCHECK(kind == GENERAL_REGISTERS);
|
||||
return LStackSlot::Create(index, zone());
|
||||
}
|
||||
}
|
||||
@ -551,20 +551,20 @@ LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
|
||||
|
||||
LOperand* LChunkBuilder::FixedTemp(Register reg) {
|
||||
LUnallocated* operand = ToUnallocated(reg);
|
||||
ASSERT(operand->HasFixedPolicy());
|
||||
DCHECK(operand->HasFixedPolicy());
|
||||
return operand;
|
||||
}
|
||||
|
||||
|
||||
LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
|
||||
LUnallocated* operand = ToUnallocated(reg);
|
||||
ASSERT(operand->HasFixedPolicy());
|
||||
DCHECK(operand->HasFixedPolicy());
|
||||
return operand;
|
||||
}
|
||||
|
||||
|
||||
LPlatformChunk* LChunkBuilder::Build() {
|
||||
ASSERT(is_unused());
|
||||
DCHECK(is_unused());
|
||||
chunk_ = new(zone()) LPlatformChunk(info_, graph_);
|
||||
LPhase phase("L_Building chunk", chunk_);
|
||||
status_ = BUILDING;
|
||||
@ -590,7 +590,7 @@ LPlatformChunk* LChunkBuilder::Build() {
|
||||
|
||||
|
||||
void LChunkBuilder::DoBasicBlock(HBasicBlock* block) {
|
||||
ASSERT(is_building());
|
||||
DCHECK(is_building());
|
||||
current_block_ = block;
|
||||
|
||||
if (block->IsStartBlock()) {
|
||||
@ -599,14 +599,14 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block) {
|
||||
} else if (block->predecessors()->length() == 1) {
|
||||
// We have a single predecessor => copy environment and outgoing
|
||||
// argument count from the predecessor.
|
||||
ASSERT(block->phis()->length() == 0);
|
||||
DCHECK(block->phis()->length() == 0);
|
||||
HBasicBlock* pred = block->predecessors()->at(0);
|
||||
HEnvironment* last_environment = pred->last_environment();
|
||||
ASSERT(last_environment != NULL);
|
||||
DCHECK(last_environment != NULL);
|
||||
|
||||
// Only copy the environment, if it is later used again.
|
||||
if (pred->end()->SecondSuccessor() == NULL) {
|
||||
ASSERT(pred->end()->FirstSuccessor() == block);
|
||||
DCHECK(pred->end()->FirstSuccessor() == block);
|
||||
} else {
|
||||
if ((pred->end()->FirstSuccessor()->block_id() > block->block_id()) ||
|
||||
(pred->end()->SecondSuccessor()->block_id() > block->block_id())) {
|
||||
@ -614,7 +614,7 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block) {
|
||||
}
|
||||
}
|
||||
block->UpdateEnvironment(last_environment);
|
||||
ASSERT(pred->argument_count() >= 0);
|
||||
DCHECK(pred->argument_count() >= 0);
|
||||
argument_count_ = pred->argument_count();
|
||||
} else {
|
||||
// We are at a state join => process phis.
|
||||
@ -667,7 +667,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
|
||||
if (current->OperandCount() == 0) {
|
||||
instr = DefineAsRegister(new(zone()) LDummy());
|
||||
} else {
|
||||
ASSERT(!current->OperandAt(0)->IsControlInstruction());
|
||||
DCHECK(!current->OperandAt(0)->IsControlInstruction());
|
||||
instr = DefineAsRegister(new(zone())
|
||||
LDummyUse(UseAny(current->OperandAt(0))));
|
||||
}
|
||||
@ -690,7 +690,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
|
||||
}
|
||||
|
||||
argument_count_ += current->argument_delta();
|
||||
ASSERT(argument_count_ >= 0);
|
||||
DCHECK(argument_count_ >= 0);
|
||||
|
||||
if (instr != NULL) {
|
||||
AddInstruction(instr, current);
|
||||
@ -732,7 +732,7 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
|
||||
LUnallocated* operand = LUnallocated::cast(it.Current());
|
||||
if (operand->HasFixedPolicy()) ++fixed;
|
||||
}
|
||||
ASSERT(fixed == 0 || used_at_start == 0);
|
||||
DCHECK(fixed == 0 || used_at_start == 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -786,9 +786,9 @@ LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
|
||||
|
||||
LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
|
||||
HArithmeticBinaryOperation* instr) {
|
||||
ASSERT(instr->representation().IsDouble());
|
||||
ASSERT(instr->left()->representation().IsDouble());
|
||||
ASSERT(instr->right()->representation().IsDouble());
|
||||
DCHECK(instr->representation().IsDouble());
|
||||
DCHECK(instr->left()->representation().IsDouble());
|
||||
DCHECK(instr->right()->representation().IsDouble());
|
||||
|
||||
if (op == Token::MOD) {
|
||||
LOperand* left = UseFixedDouble(instr->left(), d0);
|
||||
@ -806,7 +806,7 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
|
||||
|
||||
LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
|
||||
HBinaryOperation* instr) {
|
||||
ASSERT((op == Token::ADD) || (op == Token::SUB) || (op == Token::MUL) ||
|
||||
DCHECK((op == Token::ADD) || (op == Token::SUB) || (op == Token::MUL) ||
|
||||
(op == Token::DIV) || (op == Token::MOD) || (op == Token::SHR) ||
|
||||
(op == Token::SHL) || (op == Token::SAR) || (op == Token::ROR) ||
|
||||
(op == Token::BIT_OR) || (op == Token::BIT_AND) ||
|
||||
@ -816,9 +816,9 @@ LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
|
||||
|
||||
// TODO(jbramley): Once we've implemented smi support for all arithmetic
|
||||
// operations, these assertions should check IsTagged().
|
||||
ASSERT(instr->representation().IsSmiOrTagged());
|
||||
ASSERT(left->representation().IsSmiOrTagged());
|
||||
ASSERT(right->representation().IsSmiOrTagged());
|
||||
DCHECK(instr->representation().IsSmiOrTagged());
|
||||
DCHECK(left->representation().IsSmiOrTagged());
|
||||
DCHECK(right->representation().IsSmiOrTagged());
|
||||
|
||||
LOperand* context = UseFixed(instr->context(), cp);
|
||||
LOperand* left_operand = UseFixed(left, x1);
|
||||
@ -858,8 +858,8 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
|
||||
|
||||
LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
|
||||
if (instr->representation().IsSmiOrInteger32()) {
|
||||
ASSERT(instr->left()->representation().Equals(instr->representation()));
|
||||
ASSERT(instr->right()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->left()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->right()->representation().Equals(instr->representation()));
|
||||
|
||||
LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr);
|
||||
if (shifted_operation != NULL) {
|
||||
@ -877,16 +877,16 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
|
||||
}
|
||||
return result;
|
||||
} else if (instr->representation().IsExternal()) {
|
||||
ASSERT(instr->left()->representation().IsExternal());
|
||||
ASSERT(instr->right()->representation().IsInteger32());
|
||||
ASSERT(!instr->CheckFlag(HValue::kCanOverflow));
|
||||
DCHECK(instr->left()->representation().IsExternal());
|
||||
DCHECK(instr->right()->representation().IsInteger32());
|
||||
DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
|
||||
LOperand* left = UseRegisterAtStart(instr->left());
|
||||
LOperand* right = UseRegisterOrConstantAtStart(instr->right());
|
||||
return DefineAsRegister(new(zone()) LAddE(left, right));
|
||||
} else if (instr->representation().IsDouble()) {
|
||||
return DoArithmeticD(Token::ADD, instr);
|
||||
} else {
|
||||
ASSERT(instr->representation().IsTagged());
|
||||
DCHECK(instr->representation().IsTagged());
|
||||
return DoArithmeticT(Token::ADD, instr);
|
||||
}
|
||||
}
|
||||
@ -942,9 +942,9 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
|
||||
|
||||
LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
|
||||
if (instr->representation().IsSmiOrInteger32()) {
|
||||
ASSERT(instr->left()->representation().Equals(instr->representation()));
|
||||
ASSERT(instr->right()->representation().Equals(instr->representation()));
|
||||
ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
|
||||
DCHECK(instr->left()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->right()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
|
||||
|
||||
LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr);
|
||||
if (shifted_operation != NULL) {
|
||||
@ -994,7 +994,7 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
|
||||
// These representations have simple checks that cannot deoptimize.
|
||||
return new(zone()) LBranch(UseRegister(value), NULL, NULL);
|
||||
} else {
|
||||
ASSERT(r.IsTagged());
|
||||
DCHECK(r.IsTagged());
|
||||
if (type.IsBoolean() || type.IsSmi() || type.IsJSArray() ||
|
||||
type.IsHeapNumber()) {
|
||||
// These types have simple checks that cannot deoptimize.
|
||||
@ -1014,7 +1014,7 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
|
||||
if (expected.IsGeneric() || expected.IsEmpty()) {
|
||||
// The generic case cannot deoptimize because it already supports every
|
||||
// possible input type.
|
||||
ASSERT(needs_temps);
|
||||
DCHECK(needs_temps);
|
||||
return new(zone()) LBranch(UseRegister(value), temp1, temp2);
|
||||
} else {
|
||||
return AssignEnvironment(
|
||||
@ -1126,7 +1126,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
|
||||
}
|
||||
return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
|
||||
} else {
|
||||
ASSERT(to.IsInteger32());
|
||||
DCHECK(to.IsInteger32());
|
||||
if (val->type().IsSmi() || val->representation().IsSmi()) {
|
||||
LOperand* value = UseRegisterAtStart(val);
|
||||
return DefineAsRegister(new(zone()) LSmiUntag(value, false));
|
||||
@ -1150,7 +1150,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
|
||||
LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
|
||||
return AssignPointerMap(DefineAsRegister(result));
|
||||
} else {
|
||||
ASSERT(to.IsSmi() || to.IsInteger32());
|
||||
DCHECK(to.IsSmi() || to.IsInteger32());
|
||||
if (instr->CanTruncateToInt32()) {
|
||||
LOperand* value = UseRegister(val);
|
||||
return DefineAsRegister(new(zone()) LTruncateDoubleToIntOrSmi(value));
|
||||
@ -1182,7 +1182,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
|
||||
}
|
||||
return result;
|
||||
} else {
|
||||
ASSERT(to.IsDouble());
|
||||
DCHECK(to.IsDouble());
|
||||
if (val->CheckFlag(HInstruction::kUint32)) {
|
||||
return DefineAsRegister(
|
||||
new(zone()) LUint32ToDouble(UseRegisterAtStart(val)));
|
||||
@ -1249,7 +1249,7 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
|
||||
} else if (input_rep.IsInteger32()) {
|
||||
return DefineAsRegister(new(zone()) LClampIToUint8(reg));
|
||||
} else {
|
||||
ASSERT(input_rep.IsSmiOrTagged());
|
||||
DCHECK(input_rep.IsSmiOrTagged());
|
||||
return AssignEnvironment(
|
||||
DefineAsRegister(new(zone()) LClampTToUint8(reg,
|
||||
TempRegister(),
|
||||
@ -1260,7 +1260,7 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
|
||||
|
||||
LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
|
||||
HClassOfTestAndBranch* instr) {
|
||||
ASSERT(instr->value()->representation().IsTagged());
|
||||
DCHECK(instr->value()->representation().IsTagged());
|
||||
LOperand* value = UseRegisterAtStart(instr->value());
|
||||
return new(zone()) LClassOfTestAndBranch(value,
|
||||
TempRegister(),
|
||||
@ -1272,15 +1272,15 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
|
||||
HCompareNumericAndBranch* instr) {
|
||||
Representation r = instr->representation();
|
||||
if (r.IsSmiOrInteger32()) {
|
||||
ASSERT(instr->left()->representation().Equals(r));
|
||||
ASSERT(instr->right()->representation().Equals(r));
|
||||
DCHECK(instr->left()->representation().Equals(r));
|
||||
DCHECK(instr->right()->representation().Equals(r));
|
||||
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
|
||||
LOperand* right = UseRegisterOrConstantAtStart(instr->right());
|
||||
return new(zone()) LCompareNumericAndBranch(left, right);
|
||||
} else {
|
||||
ASSERT(r.IsDouble());
|
||||
ASSERT(instr->left()->representation().IsDouble());
|
||||
ASSERT(instr->right()->representation().IsDouble());
|
||||
DCHECK(r.IsDouble());
|
||||
DCHECK(instr->left()->representation().IsDouble());
|
||||
DCHECK(instr->right()->representation().IsDouble());
|
||||
if (instr->left()->IsConstant() && instr->right()->IsConstant()) {
|
||||
LOperand* left = UseConstant(instr->left());
|
||||
LOperand* right = UseConstant(instr->right());
|
||||
@ -1294,8 +1294,8 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
|
||||
ASSERT(instr->left()->representation().IsTagged());
|
||||
ASSERT(instr->right()->representation().IsTagged());
|
||||
DCHECK(instr->left()->representation().IsTagged());
|
||||
DCHECK(instr->right()->representation().IsTagged());
|
||||
LOperand* context = UseFixed(instr->context(), cp);
|
||||
LOperand* left = UseFixed(instr->left(), x1);
|
||||
LOperand* right = UseFixed(instr->right(), x0);
|
||||
@ -1325,7 +1325,7 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
|
||||
ASSERT(instr->value()->representation().IsTagged());
|
||||
DCHECK(instr->value()->representation().IsTagged());
|
||||
LOperand* value = UseRegisterAtStart(instr->value());
|
||||
LOperand* temp = TempRegister();
|
||||
return new(zone()) LCmpMapAndBranch(value, temp);
|
||||
@ -1386,9 +1386,9 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
|
||||
ASSERT(instr->representation().IsInteger32());
|
||||
ASSERT(instr->left()->representation().Equals(instr->representation()));
|
||||
ASSERT(instr->right()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->representation().IsInteger32());
|
||||
DCHECK(instr->left()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->right()->representation().Equals(instr->representation()));
|
||||
LOperand* dividend = UseRegister(instr->left());
|
||||
int32_t divisor = instr->right()->GetInteger32Constant();
|
||||
LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
|
||||
@ -1404,9 +1404,9 @@ LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
|
||||
ASSERT(instr->representation().IsInteger32());
|
||||
ASSERT(instr->left()->representation().Equals(instr->representation()));
|
||||
ASSERT(instr->right()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->representation().IsInteger32());
|
||||
DCHECK(instr->left()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->right()->representation().Equals(instr->representation()));
|
||||
LOperand* dividend = UseRegister(instr->left());
|
||||
int32_t divisor = instr->right()->GetInteger32Constant();
|
||||
LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)
|
||||
@ -1423,9 +1423,9 @@ LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) {
|
||||
ASSERT(instr->representation().IsSmiOrInteger32());
|
||||
ASSERT(instr->left()->representation().Equals(instr->representation()));
|
||||
ASSERT(instr->right()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->representation().IsSmiOrInteger32());
|
||||
DCHECK(instr->left()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->right()->representation().Equals(instr->representation()));
|
||||
LOperand* dividend = UseRegister(instr->left());
|
||||
LOperand* divisor = UseRegister(instr->right());
|
||||
LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)
|
||||
@ -1506,7 +1506,7 @@ LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
|
||||
|
||||
LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
|
||||
HGetCachedArrayIndex* instr) {
|
||||
ASSERT(instr->value()->representation().IsTagged());
|
||||
DCHECK(instr->value()->representation().IsTagged());
|
||||
LOperand* value = UseRegisterAtStart(instr->value());
|
||||
return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
|
||||
}
|
||||
@ -1519,7 +1519,7 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
|
||||
|
||||
LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
|
||||
HHasCachedArrayIndexAndBranch* instr) {
|
||||
ASSERT(instr->value()->representation().IsTagged());
|
||||
DCHECK(instr->value()->representation().IsTagged());
|
||||
return new(zone()) LHasCachedArrayIndexAndBranch(
|
||||
UseRegisterAtStart(instr->value()), TempRegister());
|
||||
}
|
||||
@ -1527,7 +1527,7 @@ LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
|
||||
|
||||
LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
|
||||
HHasInstanceTypeAndBranch* instr) {
|
||||
ASSERT(instr->value()->representation().IsTagged());
|
||||
DCHECK(instr->value()->representation().IsTagged());
|
||||
LOperand* value = UseRegisterAtStart(instr->value());
|
||||
return new(zone()) LHasInstanceTypeAndBranch(value, TempRegister());
|
||||
}
|
||||
@ -1585,7 +1585,7 @@ LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
|
||||
ASSERT(instr->value()->representation().IsTagged());
|
||||
DCHECK(instr->value()->representation().IsTagged());
|
||||
LOperand* value = UseRegisterAtStart(instr->value());
|
||||
LOperand* temp1 = TempRegister();
|
||||
LOperand* temp2 = TempRegister();
|
||||
@ -1594,7 +1594,7 @@ LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
|
||||
ASSERT(instr->value()->representation().IsTagged());
|
||||
DCHECK(instr->value()->representation().IsTagged());
|
||||
LOperand* value = UseRegisterAtStart(instr->value());
|
||||
LOperand* temp = TempRegister();
|
||||
return new(zone()) LIsStringAndBranch(value, temp);
|
||||
@ -1602,14 +1602,14 @@ LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
|
||||
ASSERT(instr->value()->representation().IsTagged());
|
||||
DCHECK(instr->value()->representation().IsTagged());
|
||||
return new(zone()) LIsSmiAndBranch(UseRegisterAtStart(instr->value()));
|
||||
}
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
|
||||
HIsUndetectableAndBranch* instr) {
|
||||
ASSERT(instr->value()->representation().IsTagged());
|
||||
DCHECK(instr->value()->representation().IsTagged());
|
||||
LOperand* value = UseRegisterAtStart(instr->value());
|
||||
return new(zone()) LIsUndetectableAndBranch(value, TempRegister());
|
||||
}
|
||||
@ -1622,7 +1622,7 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
|
||||
if (env->entry()->arguments_pushed()) {
|
||||
int argument_count = env->arguments_environment()->parameter_count();
|
||||
pop = new(zone()) LDrop(argument_count);
|
||||
ASSERT(instr->argument_delta() == -argument_count);
|
||||
DCHECK(instr->argument_delta() == -argument_count);
|
||||
}
|
||||
|
||||
HEnvironment* outer =
|
||||
@ -1677,7 +1677,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
|
||||
ASSERT(instr->key()->representation().IsSmiOrInteger32());
|
||||
DCHECK(instr->key()->representation().IsSmiOrInteger32());
|
||||
ElementsKind elements_kind = instr->elements_kind();
|
||||
LOperand* elements = UseRegister(instr->elements());
|
||||
LOperand* key = UseRegisterOrConstant(instr->key());
|
||||
@ -1695,7 +1695,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
|
||||
? AssignEnvironment(DefineAsRegister(result))
|
||||
: DefineAsRegister(result);
|
||||
} else {
|
||||
ASSERT(instr->representation().IsSmiOrTagged() ||
|
||||
DCHECK(instr->representation().IsSmiOrTagged() ||
|
||||
instr->representation().IsInteger32());
|
||||
LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister();
|
||||
LLoadKeyedFixed* result =
|
||||
@ -1705,7 +1705,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
|
||||
: DefineAsRegister(result);
|
||||
}
|
||||
} else {
|
||||
ASSERT((instr->representation().IsInteger32() &&
|
||||
DCHECK((instr->representation().IsInteger32() &&
|
||||
!IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
|
||||
(instr->representation().IsDouble() &&
|
||||
IsDoubleOrFloatElementsKind(instr->elements_kind())));
|
||||
@ -1771,9 +1771,9 @@ LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
|
||||
ASSERT(instr->representation().IsInteger32());
|
||||
ASSERT(instr->left()->representation().Equals(instr->representation()));
|
||||
ASSERT(instr->right()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->representation().IsInteger32());
|
||||
DCHECK(instr->left()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->right()->representation().Equals(instr->representation()));
|
||||
LOperand* dividend = UseRegisterAtStart(instr->left());
|
||||
int32_t divisor = instr->right()->GetInteger32Constant();
|
||||
LInstruction* result = DefineAsRegister(new(zone()) LFlooringDivByPowerOf2I(
|
||||
@ -1787,9 +1787,9 @@ LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
|
||||
ASSERT(instr->representation().IsInteger32());
|
||||
ASSERT(instr->left()->representation().Equals(instr->representation()));
|
||||
ASSERT(instr->right()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->representation().IsInteger32());
|
||||
DCHECK(instr->left()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->right()->representation().Equals(instr->representation()));
|
||||
LOperand* dividend = UseRegister(instr->left());
|
||||
int32_t divisor = instr->right()->GetInteger32Constant();
|
||||
LOperand* temp =
|
||||
@ -1831,14 +1831,14 @@ LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
|
||||
LOperand* left = NULL;
|
||||
LOperand* right = NULL;
|
||||
if (instr->representation().IsSmiOrInteger32()) {
|
||||
ASSERT(instr->left()->representation().Equals(instr->representation()));
|
||||
ASSERT(instr->right()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->left()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->right()->representation().Equals(instr->representation()));
|
||||
left = UseRegisterAtStart(instr->BetterLeftOperand());
|
||||
right = UseRegisterOrConstantAtStart(instr->BetterRightOperand());
|
||||
} else {
|
||||
ASSERT(instr->representation().IsDouble());
|
||||
ASSERT(instr->left()->representation().IsDouble());
|
||||
ASSERT(instr->right()->representation().IsDouble());
|
||||
DCHECK(instr->representation().IsDouble());
|
||||
DCHECK(instr->left()->representation().IsDouble());
|
||||
DCHECK(instr->right()->representation().IsDouble());
|
||||
left = UseRegisterAtStart(instr->left());
|
||||
right = UseRegisterAtStart(instr->right());
|
||||
}
|
||||
@ -1847,9 +1847,9 @@ LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
|
||||
ASSERT(instr->representation().IsInteger32());
|
||||
ASSERT(instr->left()->representation().Equals(instr->representation()));
|
||||
ASSERT(instr->right()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->representation().IsInteger32());
|
||||
DCHECK(instr->left()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->right()->representation().Equals(instr->representation()));
|
||||
LOperand* dividend = UseRegisterAtStart(instr->left());
|
||||
int32_t divisor = instr->right()->GetInteger32Constant();
|
||||
LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
|
||||
@ -1863,9 +1863,9 @@ LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
|
||||
ASSERT(instr->representation().IsInteger32());
|
||||
ASSERT(instr->left()->representation().Equals(instr->representation()));
|
||||
ASSERT(instr->right()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->representation().IsInteger32());
|
||||
DCHECK(instr->left()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->right()->representation().Equals(instr->representation()));
|
||||
LOperand* dividend = UseRegister(instr->left());
|
||||
int32_t divisor = instr->right()->GetInteger32Constant();
|
||||
LOperand* temp = TempRegister();
|
||||
@ -1879,9 +1879,9 @@ LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoModI(HMod* instr) {
|
||||
ASSERT(instr->representation().IsSmiOrInteger32());
|
||||
ASSERT(instr->left()->representation().Equals(instr->representation()));
|
||||
ASSERT(instr->right()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->representation().IsSmiOrInteger32());
|
||||
DCHECK(instr->left()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->right()->representation().Equals(instr->representation()));
|
||||
LOperand* dividend = UseRegister(instr->left());
|
||||
LOperand* divisor = UseRegister(instr->right());
|
||||
LInstruction* result = DefineAsRegister(new(zone()) LModI(dividend, divisor));
|
||||
@ -1912,8 +1912,8 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
|
||||
|
||||
LInstruction* LChunkBuilder::DoMul(HMul* instr) {
|
||||
if (instr->representation().IsSmiOrInteger32()) {
|
||||
ASSERT(instr->left()->representation().Equals(instr->representation()));
|
||||
ASSERT(instr->right()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->left()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->right()->representation().Equals(instr->representation()));
|
||||
|
||||
bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
|
||||
bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
|
||||
@ -1971,7 +1971,7 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
|
||||
ASSERT(argument_count_ == 0);
|
||||
DCHECK(argument_count_ == 0);
|
||||
allocator_->MarkAsOsrEntry();
|
||||
current_block_->last_environment()->set_ast_id(instr->ast_id());
|
||||
return AssignEnvironment(new(zone()) LOsrEntry);
|
||||
@ -1984,7 +1984,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
|
||||
int spill_index = chunk_->GetParameterStackSlot(instr->index());
|
||||
return DefineAsSpilled(result, spill_index);
|
||||
} else {
|
||||
ASSERT(info()->IsStub());
|
||||
DCHECK(info()->IsStub());
|
||||
CodeStubInterfaceDescriptor* descriptor =
|
||||
info()->code_stub()->GetInterfaceDescriptor();
|
||||
int index = static_cast<int>(instr->index());
|
||||
@ -1995,11 +1995,11 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoPower(HPower* instr) {
|
||||
ASSERT(instr->representation().IsDouble());
|
||||
DCHECK(instr->representation().IsDouble());
|
||||
// We call a C function for double power. It can't trigger a GC.
|
||||
// We need to use fixed result register for the call.
|
||||
Representation exponent_type = instr->right()->representation();
|
||||
ASSERT(instr->left()->representation().IsDouble());
|
||||
DCHECK(instr->left()->representation().IsDouble());
|
||||
LOperand* left = UseFixedDouble(instr->left(), d0);
|
||||
LOperand* right = exponent_type.IsInteger32()
|
||||
? UseFixed(instr->right(), x12)
|
||||
@ -2040,7 +2040,7 @@ LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
|
||||
|
||||
LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
|
||||
HValue* value = instr->value();
|
||||
ASSERT(value->representation().IsDouble());
|
||||
DCHECK(value->representation().IsDouble());
|
||||
return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
|
||||
}
|
||||
|
||||
@ -2094,8 +2094,8 @@ HBitwiseBinaryOperation* LChunkBuilder::CanTransformToShiftedOp(HValue* val,
|
||||
HBinaryOperation* hinstr = HBinaryOperation::cast(val);
|
||||
HValue* hleft = hinstr->left();
|
||||
HValue* hright = hinstr->right();
|
||||
ASSERT(hleft->representation().Equals(hinstr->representation()));
|
||||
ASSERT(hright->representation().Equals(hinstr->representation()));
|
||||
DCHECK(hleft->representation().Equals(hinstr->representation()));
|
||||
DCHECK(hright->representation().Equals(hinstr->representation()));
|
||||
|
||||
if ((hright->IsConstant() &&
|
||||
LikelyFitsImmField(hinstr, HConstant::cast(hright)->Integer32Value())) ||
|
||||
@ -2167,8 +2167,8 @@ LInstruction* LChunkBuilder::TryDoOpWithShiftedRightOperand(
|
||||
|
||||
LInstruction* LChunkBuilder::DoShiftedBinaryOp(
|
||||
HBinaryOperation* hinstr, HValue* hleft, HBitwiseBinaryOperation* hshift) {
|
||||
ASSERT(hshift->IsBitwiseBinaryShift());
|
||||
ASSERT(!hshift->IsShr() || (JSShiftAmountFromHConstant(hshift->right()) > 0));
|
||||
DCHECK(hshift->IsBitwiseBinaryShift());
|
||||
DCHECK(!hshift->IsShr() || (JSShiftAmountFromHConstant(hshift->right()) > 0));
|
||||
|
||||
LTemplateResultInstruction<1>* res;
|
||||
LOperand* left = UseRegisterAtStart(hleft);
|
||||
@ -2187,7 +2187,7 @@ LInstruction* LChunkBuilder::DoShiftedBinaryOp(
|
||||
} else if (hinstr->IsAdd()) {
|
||||
res = new(zone()) LAddI(left, right, shift_op, shift_amount);
|
||||
} else {
|
||||
ASSERT(hinstr->IsSub());
|
||||
DCHECK(hinstr->IsSub());
|
||||
res = new(zone()) LSubI(left, right, shift_op, shift_amount);
|
||||
}
|
||||
if (hinstr->CheckFlag(HValue::kCanOverflow)) {
|
||||
@ -2203,10 +2203,10 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
|
||||
return DoArithmeticT(op, instr);
|
||||
}
|
||||
|
||||
ASSERT(instr->representation().IsInteger32() ||
|
||||
DCHECK(instr->representation().IsInteger32() ||
|
||||
instr->representation().IsSmi());
|
||||
ASSERT(instr->left()->representation().Equals(instr->representation()));
|
||||
ASSERT(instr->right()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->left()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->right()->representation().Equals(instr->representation()));
|
||||
|
||||
if (ShiftCanBeOptimizedAway(instr)) {
|
||||
return NULL;
|
||||
@ -2245,7 +2245,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
|
||||
if (instr->representation().IsInteger32()) {
|
||||
result = DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
|
||||
} else {
|
||||
ASSERT(instr->representation().IsSmi());
|
||||
DCHECK(instr->representation().IsSmi());
|
||||
result = DefineAsRegister(
|
||||
new(zone()) LShiftS(op, left, right, temp, does_deopt));
|
||||
}
|
||||
@ -2285,7 +2285,7 @@ LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
|
||||
LOperand* context = UseFixed(instr->context(), cp);
|
||||
return MarkAsCall(new(zone()) LStackCheck(context), instr);
|
||||
} else {
|
||||
ASSERT(instr->is_backwards_branch());
|
||||
DCHECK(instr->is_backwards_branch());
|
||||
LOperand* context = UseAny(instr->context());
|
||||
return AssignEnvironment(
|
||||
AssignPointerMap(new(zone()) LStackCheck(context)));
|
||||
@ -2354,23 +2354,23 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
|
||||
}
|
||||
|
||||
if (instr->is_typed_elements()) {
|
||||
ASSERT((instr->value()->representation().IsInteger32() &&
|
||||
DCHECK((instr->value()->representation().IsInteger32() &&
|
||||
!IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
|
||||
(instr->value()->representation().IsDouble() &&
|
||||
IsDoubleOrFloatElementsKind(instr->elements_kind())));
|
||||
ASSERT((instr->is_fixed_typed_array() &&
|
||||
DCHECK((instr->is_fixed_typed_array() &&
|
||||
instr->elements()->representation().IsTagged()) ||
|
||||
(instr->is_external() &&
|
||||
instr->elements()->representation().IsExternal()));
|
||||
return new(zone()) LStoreKeyedExternal(elements, key, val, temp);
|
||||
|
||||
} else if (instr->value()->representation().IsDouble()) {
|
||||
ASSERT(instr->elements()->representation().IsTagged());
|
||||
DCHECK(instr->elements()->representation().IsTagged());
|
||||
return new(zone()) LStoreKeyedFixedDouble(elements, key, val, temp);
|
||||
|
||||
} else {
|
||||
ASSERT(instr->elements()->representation().IsTagged());
|
||||
ASSERT(instr->value()->representation().IsSmiOrTagged() ||
|
||||
DCHECK(instr->elements()->representation().IsTagged());
|
||||
DCHECK(instr->value()->representation().IsSmiOrTagged() ||
|
||||
instr->value()->representation().IsInteger32());
|
||||
return new(zone()) LStoreKeyedFixed(elements, key, val, temp);
|
||||
}
|
||||
@ -2384,9 +2384,9 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
|
||||
LOperand* key = UseFixed(instr->key(), KeyedStoreIC::NameRegister());
|
||||
LOperand* value = UseFixed(instr->value(), KeyedStoreIC::ValueRegister());
|
||||
|
||||
ASSERT(instr->object()->representation().IsTagged());
|
||||
ASSERT(instr->key()->representation().IsTagged());
|
||||
ASSERT(instr->value()->representation().IsTagged());
|
||||
DCHECK(instr->object()->representation().IsTagged());
|
||||
DCHECK(instr->key()->representation().IsTagged());
|
||||
DCHECK(instr->value()->representation().IsTagged());
|
||||
|
||||
return MarkAsCall(
|
||||
new(zone()) LStoreKeyedGeneric(context, object, key, value), instr);
|
||||
@ -2463,8 +2463,8 @@ LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
|
||||
|
||||
LInstruction* LChunkBuilder::DoStringCompareAndBranch(
|
||||
HStringCompareAndBranch* instr) {
|
||||
ASSERT(instr->left()->representation().IsTagged());
|
||||
ASSERT(instr->right()->representation().IsTagged());
|
||||
DCHECK(instr->left()->representation().IsTagged());
|
||||
DCHECK(instr->right()->representation().IsTagged());
|
||||
LOperand* context = UseFixed(instr->context(), cp);
|
||||
LOperand* left = UseFixed(instr->left(), x1);
|
||||
LOperand* right = UseFixed(instr->right(), x0);
|
||||
@ -2476,8 +2476,8 @@ LInstruction* LChunkBuilder::DoStringCompareAndBranch(
|
||||
|
||||
LInstruction* LChunkBuilder::DoSub(HSub* instr) {
|
||||
if (instr->representation().IsSmiOrInteger32()) {
|
||||
ASSERT(instr->left()->representation().Equals(instr->representation()));
|
||||
ASSERT(instr->right()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->left()->representation().Equals(instr->representation()));
|
||||
DCHECK(instr->right()->representation().Equals(instr->representation()));
|
||||
|
||||
LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr);
|
||||
if (shifted_operation != NULL) {
|
||||
@ -2598,8 +2598,8 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
|
||||
}
|
||||
}
|
||||
case kMathExp: {
|
||||
ASSERT(instr->representation().IsDouble());
|
||||
ASSERT(instr->value()->representation().IsDouble());
|
||||
DCHECK(instr->representation().IsDouble());
|
||||
DCHECK(instr->value()->representation().IsDouble());
|
||||
LOperand* input = UseRegister(instr->value());
|
||||
LOperand* double_temp1 = TempDoubleRegister();
|
||||
LOperand* temp1 = TempRegister();
|
||||
@ -2610,58 +2610,58 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
|
||||
return DefineAsRegister(result);
|
||||
}
|
||||
case kMathFloor: {
|
||||
ASSERT(instr->value()->representation().IsDouble());
|
||||
DCHECK(instr->value()->representation().IsDouble());
|
||||
LOperand* input = UseRegisterAtStart(instr->value());
|
||||
if (instr->representation().IsInteger32()) {
|
||||
LMathFloorI* result = new(zone()) LMathFloorI(input);
|
||||
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
|
||||
} else {
|
||||
ASSERT(instr->representation().IsDouble());
|
||||
DCHECK(instr->representation().IsDouble());
|
||||
LMathFloorD* result = new(zone()) LMathFloorD(input);
|
||||
return DefineAsRegister(result);
|
||||
}
|
||||
}
|
||||
case kMathLog: {
|
||||
ASSERT(instr->representation().IsDouble());
|
||||
ASSERT(instr->value()->representation().IsDouble());
|
||||
DCHECK(instr->representation().IsDouble());
|
||||
DCHECK(instr->value()->representation().IsDouble());
|
||||
LOperand* input = UseFixedDouble(instr->value(), d0);
|
||||
LMathLog* result = new(zone()) LMathLog(input);
|
||||
return MarkAsCall(DefineFixedDouble(result, d0), instr);
|
||||
}
|
||||
case kMathPowHalf: {
|
||||
ASSERT(instr->representation().IsDouble());
|
||||
ASSERT(instr->value()->representation().IsDouble());
|
||||
DCHECK(instr->representation().IsDouble());
|
||||
DCHECK(instr->value()->representation().IsDouble());
|
||||
LOperand* input = UseRegister(instr->value());
|
||||
return DefineAsRegister(new(zone()) LMathPowHalf(input));
|
||||
}
|
||||
case kMathRound: {
|
||||
ASSERT(instr->value()->representation().IsDouble());
|
||||
DCHECK(instr->value()->representation().IsDouble());
|
||||
LOperand* input = UseRegister(instr->value());
|
||||
if (instr->representation().IsInteger32()) {
|
||||
LOperand* temp = TempDoubleRegister();
|
||||
LMathRoundI* result = new(zone()) LMathRoundI(input, temp);
|
||||
return AssignEnvironment(DefineAsRegister(result));
|
||||
} else {
|
||||
ASSERT(instr->representation().IsDouble());
|
||||
DCHECK(instr->representation().IsDouble());
|
||||
LMathRoundD* result = new(zone()) LMathRoundD(input);
|
||||
return DefineAsRegister(result);
|
||||
}
|
||||
}
|
||||
case kMathFround: {
|
||||
ASSERT(instr->value()->representation().IsDouble());
|
||||
DCHECK(instr->value()->representation().IsDouble());
|
||||
LOperand* input = UseRegister(instr->value());
|
||||
LMathFround* result = new (zone()) LMathFround(input);
|
||||
return DefineAsRegister(result);
|
||||
}
|
||||
case kMathSqrt: {
|
||||
ASSERT(instr->representation().IsDouble());
|
||||
ASSERT(instr->value()->representation().IsDouble());
|
||||
DCHECK(instr->representation().IsDouble());
|
||||
DCHECK(instr->value()->representation().IsDouble());
|
||||
LOperand* input = UseRegisterAtStart(instr->value());
|
||||
return DefineAsRegister(new(zone()) LMathSqrt(input));
|
||||
}
|
||||
case kMathClz32: {
|
||||
ASSERT(instr->representation().IsInteger32());
|
||||
ASSERT(instr->value()->representation().IsInteger32());
|
||||
DCHECK(instr->representation().IsInteger32());
|
||||
DCHECK(instr->value()->representation().IsInteger32());
|
||||
LOperand* input = UseRegisterAtStart(instr->value());
|
||||
return DefineAsRegister(new(zone()) LMathClz32(input));
|
||||
}
|
||||
|
@ -186,7 +186,7 @@ class LCodeGen;
|
||||
return mnemonic; \
|
||||
} \
|
||||
static L##type* cast(LInstruction* instr) { \
|
||||
ASSERT(instr->Is##type()); \
|
||||
DCHECK(instr->Is##type()); \
|
||||
return reinterpret_cast<L##type*>(instr); \
|
||||
}
|
||||
|
||||
@ -391,7 +391,7 @@ class LGap : public LTemplateInstruction<0, 0, 0> {
|
||||
virtual bool IsGap() const V8_OVERRIDE { return true; }
|
||||
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
|
||||
static LGap* cast(LInstruction* instr) {
|
||||
ASSERT(instr->IsGap());
|
||||
DCHECK(instr->IsGap());
|
||||
return reinterpret_cast<LGap*>(instr);
|
||||
}
|
||||
|
||||
@ -1528,7 +1528,7 @@ class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
|
||||
Zone* zone)
|
||||
: descriptor_(descriptor),
|
||||
inputs_(descriptor->GetRegisterParameterCount() + 1, zone) {
|
||||
ASSERT(descriptor->GetRegisterParameterCount() + 1 == operands.length());
|
||||
DCHECK(descriptor->GetRegisterParameterCount() + 1 == operands.length());
|
||||
inputs_.AddAll(operands, zone);
|
||||
}
|
||||
|
||||
@ -2348,7 +2348,7 @@ class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
|
||||
return parameter_count()->IsConstantOperand();
|
||||
}
|
||||
LConstantOperand* constant_parameter_count() {
|
||||
ASSERT(has_constant_parameter_count());
|
||||
DCHECK(has_constant_parameter_count());
|
||||
return LConstantOperand::cast(parameter_count());
|
||||
}
|
||||
|
||||
@ -2491,7 +2491,7 @@ class LStoreKeyed : public LTemplateInstruction<0, 3, T> {
|
||||
}
|
||||
|
||||
if (this->value() == NULL) {
|
||||
ASSERT(hydrogen()->IsConstantHoleStore() &&
|
||||
DCHECK(hydrogen()->IsConstantHoleStore() &&
|
||||
hydrogen()->value()->representation().IsDouble());
|
||||
stream->Add("<the hole(nan)>");
|
||||
} else {
|
||||
@ -3220,7 +3220,7 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
|
||||
if (instr->IsAdd() || instr->IsSub()) {
|
||||
return Assembler::IsImmAddSub(imm) || Assembler::IsImmAddSub(-imm);
|
||||
} else {
|
||||
ASSERT(instr->IsBitwise());
|
||||
DCHECK(instr->IsBitwise());
|
||||
unsigned unused_n, unused_imm_s, unused_imm_r;
|
||||
return Assembler::IsImmLogical(imm, kWRegSizeInBits,
|
||||
&unused_n, &unused_imm_s, &unused_imm_r);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -44,7 +44,7 @@ class LCodeGen: public LCodeGenBase {
|
||||
}
|
||||
|
||||
~LCodeGen() {
|
||||
ASSERT(!after_push_argument_ || inlined_arguments_);
|
||||
DCHECK(!after_push_argument_ || inlined_arguments_);
|
||||
}
|
||||
|
||||
// Simple accessors.
|
||||
@ -387,8 +387,8 @@ class LCodeGen: public LCodeGenBase {
|
||||
public:
|
||||
explicit PushSafepointRegistersScope(LCodeGen* codegen)
|
||||
: codegen_(codegen) {
|
||||
ASSERT(codegen_->info()->is_calling());
|
||||
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
|
||||
DCHECK(codegen_->info()->is_calling());
|
||||
DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
|
||||
codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
|
||||
|
||||
UseScratchRegisterScope temps(codegen_->masm_);
|
||||
@ -402,7 +402,7 @@ class LCodeGen: public LCodeGenBase {
|
||||
}
|
||||
|
||||
~PushSafepointRegistersScope() {
|
||||
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
|
||||
DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
|
||||
RestoreRegistersStateStub stub(codegen_->isolate());
|
||||
codegen_->masm_->CallStub(&stub);
|
||||
codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
|
||||
|
@ -17,8 +17,8 @@ namespace internal {
|
||||
void DelayedGapMasm::EndDelayedUse() {
|
||||
DelayedMasm::EndDelayedUse();
|
||||
if (scratch_register_used()) {
|
||||
ASSERT(ScratchRegister().Is(root));
|
||||
ASSERT(!pending());
|
||||
DCHECK(ScratchRegister().Is(root));
|
||||
DCHECK(!pending());
|
||||
InitializeRootRegister();
|
||||
reset_scratch_register_used();
|
||||
}
|
||||
@ -32,8 +32,8 @@ LGapResolver::LGapResolver(LCodeGen* owner)
|
||||
|
||||
|
||||
void LGapResolver::Resolve(LParallelMove* parallel_move) {
|
||||
ASSERT(moves_.is_empty());
|
||||
ASSERT(!masm_.pending());
|
||||
DCHECK(moves_.is_empty());
|
||||
DCHECK(!masm_.pending());
|
||||
|
||||
// Build up a worklist of moves.
|
||||
BuildInitialMoveList(parallel_move);
|
||||
@ -56,7 +56,7 @@ void LGapResolver::Resolve(LParallelMove* parallel_move) {
|
||||
LMoveOperands move = moves_[i];
|
||||
|
||||
if (!move.IsEliminated()) {
|
||||
ASSERT(move.source()->IsConstantOperand());
|
||||
DCHECK(move.source()->IsConstantOperand());
|
||||
EmitMove(i);
|
||||
}
|
||||
}
|
||||
@ -88,13 +88,13 @@ void LGapResolver::PerformMove(int index) {
|
||||
// cycles in the move graph.
|
||||
LMoveOperands& current_move = moves_[index];
|
||||
|
||||
ASSERT(!current_move.IsPending());
|
||||
ASSERT(!current_move.IsRedundant());
|
||||
DCHECK(!current_move.IsPending());
|
||||
DCHECK(!current_move.IsRedundant());
|
||||
|
||||
// Clear this move's destination to indicate a pending move. The actual
|
||||
// destination is saved in a stack allocated local. Multiple moves can
|
||||
// be pending because this function is recursive.
|
||||
ASSERT(current_move.source() != NULL); // Otherwise it will look eliminated.
|
||||
DCHECK(current_move.source() != NULL); // Otherwise it will look eliminated.
|
||||
LOperand* destination = current_move.destination();
|
||||
current_move.set_destination(NULL);
|
||||
|
||||
@ -121,7 +121,7 @@ void LGapResolver::PerformMove(int index) {
|
||||
// a scratch register to break it.
|
||||
LMoveOperands other_move = moves_[root_index_];
|
||||
if (other_move.Blocks(destination)) {
|
||||
ASSERT(other_move.IsPending());
|
||||
DCHECK(other_move.IsPending());
|
||||
BreakCycle(index);
|
||||
return;
|
||||
}
|
||||
@ -132,12 +132,12 @@ void LGapResolver::PerformMove(int index) {
|
||||
|
||||
|
||||
void LGapResolver::Verify() {
|
||||
#ifdef ENABLE_SLOW_ASSERTS
|
||||
#ifdef ENABLE_SLOW_DCHECKS
|
||||
// No operand should be the destination for more than one move.
|
||||
for (int i = 0; i < moves_.length(); ++i) {
|
||||
LOperand* destination = moves_[i].destination();
|
||||
for (int j = i + 1; j < moves_.length(); ++j) {
|
||||
SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
|
||||
SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@ -145,8 +145,8 @@ void LGapResolver::Verify() {
|
||||
|
||||
|
||||
void LGapResolver::BreakCycle(int index) {
|
||||
ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
|
||||
ASSERT(!in_cycle_);
|
||||
DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
|
||||
DCHECK(!in_cycle_);
|
||||
|
||||
// We save in a register the source of that move and we remember its
|
||||
// destination. Then we mark this move as resolved so the cycle is
|
||||
@ -177,8 +177,8 @@ void LGapResolver::BreakCycle(int index) {
|
||||
|
||||
|
||||
void LGapResolver::RestoreValue() {
|
||||
ASSERT(in_cycle_);
|
||||
ASSERT(saved_destination_ != NULL);
|
||||
DCHECK(in_cycle_);
|
||||
DCHECK(saved_destination_ != NULL);
|
||||
|
||||
if (saved_destination_->IsRegister()) {
|
||||
__ Mov(cgen_->ToRegister(saved_destination_), SavedValueRegister());
|
||||
@ -212,7 +212,7 @@ void LGapResolver::EmitMove(int index) {
|
||||
if (destination->IsRegister()) {
|
||||
__ Mov(cgen_->ToRegister(destination), source_register);
|
||||
} else {
|
||||
ASSERT(destination->IsStackSlot());
|
||||
DCHECK(destination->IsStackSlot());
|
||||
__ Store(source_register, cgen_->ToMemOperand(destination));
|
||||
}
|
||||
|
||||
@ -221,7 +221,7 @@ void LGapResolver::EmitMove(int index) {
|
||||
if (destination->IsRegister()) {
|
||||
__ Load(cgen_->ToRegister(destination), source_operand);
|
||||
} else {
|
||||
ASSERT(destination->IsStackSlot());
|
||||
DCHECK(destination->IsStackSlot());
|
||||
EmitStackSlotMove(index);
|
||||
}
|
||||
|
||||
@ -240,8 +240,8 @@ void LGapResolver::EmitMove(int index) {
|
||||
DoubleRegister result = cgen_->ToDoubleRegister(destination);
|
||||
__ Fmov(result, cgen_->ToDouble(constant_source));
|
||||
} else {
|
||||
ASSERT(destination->IsStackSlot());
|
||||
ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
|
||||
DCHECK(destination->IsStackSlot());
|
||||
DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone.
|
||||
if (cgen_->IsSmi(constant_source)) {
|
||||
Smi* smi = cgen_->ToSmi(constant_source);
|
||||
__ StoreConstant(reinterpret_cast<intptr_t>(smi),
|
||||
@ -254,7 +254,7 @@ void LGapResolver::EmitMove(int index) {
|
||||
AllowDeferredHandleDereference smi_object_check;
|
||||
if (handle->IsSmi()) {
|
||||
Object* obj = *handle;
|
||||
ASSERT(!obj->IsHeapObject());
|
||||
DCHECK(!obj->IsHeapObject());
|
||||
__ StoreConstant(reinterpret_cast<intptr_t>(obj),
|
||||
cgen_->ToMemOperand(destination));
|
||||
} else {
|
||||
@ -271,7 +271,7 @@ void LGapResolver::EmitMove(int index) {
|
||||
if (destination->IsDoubleRegister()) {
|
||||
__ Fmov(cgen_->ToDoubleRegister(destination), src);
|
||||
} else {
|
||||
ASSERT(destination->IsDoubleStackSlot());
|
||||
DCHECK(destination->IsDoubleStackSlot());
|
||||
__ Store(src, cgen_->ToMemOperand(destination));
|
||||
}
|
||||
|
||||
@ -280,7 +280,7 @@ void LGapResolver::EmitMove(int index) {
|
||||
if (destination->IsDoubleRegister()) {
|
||||
__ Load(cgen_->ToDoubleRegister(destination), src);
|
||||
} else {
|
||||
ASSERT(destination->IsDoubleStackSlot());
|
||||
DCHECK(destination->IsDoubleStackSlot());
|
||||
EmitStackSlotMove(index);
|
||||
}
|
||||
|
||||
|
@ -68,7 +68,7 @@ class LGapResolver BASE_EMBEDDED {
|
||||
|
||||
// Registers used to solve cycles.
|
||||
const Register& SavedValueRegister() {
|
||||
ASSERT(!masm_.ScratchRegister().IsAllocatable());
|
||||
DCHECK(!masm_.ScratchRegister().IsAllocatable());
|
||||
return masm_.ScratchRegister();
|
||||
}
|
||||
// The scratch register is used to break cycles and to store constant.
|
||||
@ -79,7 +79,7 @@ class LGapResolver BASE_EMBEDDED {
|
||||
// We use the Crankshaft floating-point scratch register to break a cycle
|
||||
// involving double values as the MacroAssembler will not need it for the
|
||||
// operations performed by the gap resolver.
|
||||
ASSERT(!crankshaft_fp_scratch.IsAllocatable());
|
||||
DCHECK(!crankshaft_fp_scratch.IsAllocatable());
|
||||
return crankshaft_fp_scratch;
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -366,7 +366,7 @@ class MacroAssembler : public Assembler {
|
||||
// Provide a template to allow other types to be converted automatically.
|
||||
template<typename T>
|
||||
void Fmov(FPRegister fd, T imm) {
|
||||
ASSERT(allow_macro_instructions_);
|
||||
DCHECK(allow_macro_instructions_);
|
||||
Fmov(fd, static_cast<double>(imm));
|
||||
}
|
||||
inline void Fmov(Register rd, FPRegister fn);
|
||||
@ -619,7 +619,7 @@ class MacroAssembler : public Assembler {
|
||||
explicit PushPopQueue(MacroAssembler* masm) : masm_(masm), size_(0) { }
|
||||
|
||||
~PushPopQueue() {
|
||||
ASSERT(queued_.empty());
|
||||
DCHECK(queued_.empty());
|
||||
}
|
||||
|
||||
void Queue(const CPURegister& rt) {
|
||||
@ -771,7 +771,7 @@ class MacroAssembler : public Assembler {
|
||||
|
||||
// Set the current stack pointer, but don't generate any code.
|
||||
inline void SetStackPointer(const Register& stack_pointer) {
|
||||
ASSERT(!TmpList()->IncludesAliasOf(stack_pointer));
|
||||
DCHECK(!TmpList()->IncludesAliasOf(stack_pointer));
|
||||
sp_ = stack_pointer;
|
||||
}
|
||||
|
||||
@ -785,8 +785,8 @@ class MacroAssembler : public Assembler {
|
||||
inline void AlignAndSetCSPForFrame() {
|
||||
int sp_alignment = ActivationFrameAlignment();
|
||||
// AAPCS64 mandates at least 16-byte alignment.
|
||||
ASSERT(sp_alignment >= 16);
|
||||
ASSERT(IsPowerOf2(sp_alignment));
|
||||
DCHECK(sp_alignment >= 16);
|
||||
DCHECK(IsPowerOf2(sp_alignment));
|
||||
Bic(csp, StackPointer(), sp_alignment - 1);
|
||||
SetStackPointer(csp);
|
||||
}
|
||||
@ -841,7 +841,7 @@ class MacroAssembler : public Assembler {
|
||||
if (object->IsHeapObject()) {
|
||||
LoadHeapObject(result, Handle<HeapObject>::cast(object));
|
||||
} else {
|
||||
ASSERT(object->IsSmi());
|
||||
DCHECK(object->IsSmi());
|
||||
Mov(result, Operand(object));
|
||||
}
|
||||
}
|
||||
@ -981,7 +981,7 @@ class MacroAssembler : public Assembler {
|
||||
FPRegister scratch_d,
|
||||
Label* on_successful_conversion = NULL,
|
||||
Label* on_failed_conversion = NULL) {
|
||||
ASSERT(as_int.Is32Bits());
|
||||
DCHECK(as_int.Is32Bits());
|
||||
TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion,
|
||||
on_failed_conversion);
|
||||
}
|
||||
@ -996,7 +996,7 @@ class MacroAssembler : public Assembler {
|
||||
FPRegister scratch_d,
|
||||
Label* on_successful_conversion = NULL,
|
||||
Label* on_failed_conversion = NULL) {
|
||||
ASSERT(as_int.Is64Bits());
|
||||
DCHECK(as_int.Is64Bits());
|
||||
TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion,
|
||||
on_failed_conversion);
|
||||
}
|
||||
@ -2204,7 +2204,7 @@ class InstructionAccurateScope BASE_EMBEDDED {
|
||||
masm_->EndBlockPools();
|
||||
#ifdef DEBUG
|
||||
if (start_.is_bound()) {
|
||||
ASSERT(masm_->SizeOfCodeGeneratedSince(&start_) == size_);
|
||||
DCHECK(masm_->SizeOfCodeGeneratedSince(&start_) == size_);
|
||||
}
|
||||
masm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
|
||||
#endif
|
||||
@ -2234,8 +2234,8 @@ class UseScratchRegisterScope {
|
||||
availablefp_(masm->FPTmpList()),
|
||||
old_available_(available_->list()),
|
||||
old_availablefp_(availablefp_->list()) {
|
||||
ASSERT(available_->type() == CPURegister::kRegister);
|
||||
ASSERT(availablefp_->type() == CPURegister::kFPRegister);
|
||||
DCHECK(available_->type() == CPURegister::kRegister);
|
||||
DCHECK(availablefp_->type() == CPURegister::kFPRegister);
|
||||
}
|
||||
|
||||
~UseScratchRegisterScope();
|
||||
|
@ -126,7 +126,7 @@ RegExpMacroAssemblerARM64::RegExpMacroAssemblerARM64(
|
||||
backtrack_label_(),
|
||||
exit_label_() {
|
||||
__ SetStackPointer(csp);
|
||||
ASSERT_EQ(0, registers_to_save % 2);
|
||||
DCHECK_EQ(0, registers_to_save % 2);
|
||||
// We can cache at most 16 W registers in x0-x7.
|
||||
STATIC_ASSERT(kNumCachedRegisters <= 16);
|
||||
STATIC_ASSERT((kNumCachedRegisters % 2) == 0);
|
||||
@ -161,7 +161,7 @@ void RegExpMacroAssemblerARM64::AdvanceCurrentPosition(int by) {
|
||||
|
||||
|
||||
void RegExpMacroAssemblerARM64::AdvanceRegister(int reg, int by) {
|
||||
ASSERT((reg >= 0) && (reg < num_registers_));
|
||||
DCHECK((reg >= 0) && (reg < num_registers_));
|
||||
if (by != 0) {
|
||||
Register to_advance;
|
||||
RegisterState register_state = GetRegisterState(reg);
|
||||
@ -262,7 +262,7 @@ void RegExpMacroAssemblerARM64::CheckCharacters(Vector<const uc16> str,
|
||||
for (int i = 0; i < str.length(); i++) {
|
||||
if (mode_ == ASCII) {
|
||||
__ Ldrb(w10, MemOperand(characters_address, 1, PostIndex));
|
||||
ASSERT(str[i] <= String::kMaxOneByteCharCode);
|
||||
DCHECK(str[i] <= String::kMaxOneByteCharCode);
|
||||
} else {
|
||||
__ Ldrh(w10, MemOperand(characters_address, 2, PostIndex));
|
||||
}
|
||||
@ -289,10 +289,10 @@ void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
|
||||
// Save the capture length in a callee-saved register so it will
|
||||
// be preserved if we call a C helper.
|
||||
Register capture_length = w19;
|
||||
ASSERT(kCalleeSaved.IncludesAliasOf(capture_length));
|
||||
DCHECK(kCalleeSaved.IncludesAliasOf(capture_length));
|
||||
|
||||
// Find length of back-referenced capture.
|
||||
ASSERT((start_reg % 2) == 0);
|
||||
DCHECK((start_reg % 2) == 0);
|
||||
if (start_reg < kNumCachedRegisters) {
|
||||
__ Mov(capture_start_offset.X(), GetCachedRegister(start_reg));
|
||||
__ Lsr(x11, GetCachedRegister(start_reg), kWRegSizeInBits);
|
||||
@ -365,12 +365,12 @@ void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
|
||||
__ Check(le, kOffsetOutOfRange);
|
||||
}
|
||||
} else {
|
||||
ASSERT(mode_ == UC16);
|
||||
DCHECK(mode_ == UC16);
|
||||
int argument_count = 4;
|
||||
|
||||
// The cached registers need to be retained.
|
||||
CPURegList cached_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 7);
|
||||
ASSERT((cached_registers.Count() * 2) == kNumCachedRegisters);
|
||||
DCHECK((cached_registers.Count() * 2) == kNumCachedRegisters);
|
||||
__ PushCPURegList(cached_registers);
|
||||
|
||||
// Put arguments into arguments registers.
|
||||
@ -421,7 +421,7 @@ void RegExpMacroAssemblerARM64::CheckNotBackReference(
|
||||
Register capture_length = w15;
|
||||
|
||||
// Find length of back-referenced capture.
|
||||
ASSERT((start_reg % 2) == 0);
|
||||
DCHECK((start_reg % 2) == 0);
|
||||
if (start_reg < kNumCachedRegisters) {
|
||||
__ Mov(x10, GetCachedRegister(start_reg));
|
||||
__ Lsr(x11, GetCachedRegister(start_reg), kWRegSizeInBits);
|
||||
@ -451,7 +451,7 @@ void RegExpMacroAssemblerARM64::CheckNotBackReference(
|
||||
__ Ldrb(w10, MemOperand(capture_start_address, 1, PostIndex));
|
||||
__ Ldrb(w11, MemOperand(current_position_address, 1, PostIndex));
|
||||
} else {
|
||||
ASSERT(mode_ == UC16);
|
||||
DCHECK(mode_ == UC16);
|
||||
__ Ldrh(w10, MemOperand(capture_start_address, 2, PostIndex));
|
||||
__ Ldrh(w11, MemOperand(current_position_address, 2, PostIndex));
|
||||
}
|
||||
@ -499,7 +499,7 @@ void RegExpMacroAssemblerARM64::CheckNotCharacterAfterMinusAnd(
|
||||
uc16 minus,
|
||||
uc16 mask,
|
||||
Label* on_not_equal) {
|
||||
ASSERT(minus < String::kMaxUtf16CodeUnit);
|
||||
DCHECK(minus < String::kMaxUtf16CodeUnit);
|
||||
__ Sub(w10, current_character(), minus);
|
||||
__ And(w10, w10, mask);
|
||||
CompareAndBranchOrBacktrack(w10, c, ne, on_not_equal);
|
||||
@ -681,10 +681,10 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
|
||||
CPURegList argument_registers(x0, x5, x6, x7);
|
||||
|
||||
CPURegList registers_to_retain = kCalleeSaved;
|
||||
ASSERT(kCalleeSaved.Count() == 11);
|
||||
DCHECK(kCalleeSaved.Count() == 11);
|
||||
registers_to_retain.Combine(lr);
|
||||
|
||||
ASSERT(csp.Is(__ StackPointer()));
|
||||
DCHECK(csp.Is(__ StackPointer()));
|
||||
__ PushCPURegList(registers_to_retain);
|
||||
__ PushCPURegList(argument_registers);
|
||||
|
||||
@ -708,7 +708,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
|
||||
|
||||
// Make sure the stack alignment will be respected.
|
||||
int alignment = masm_->ActivationFrameAlignment();
|
||||
ASSERT_EQ(alignment % 16, 0);
|
||||
DCHECK_EQ(alignment % 16, 0);
|
||||
int align_mask = (alignment / kWRegSize) - 1;
|
||||
num_wreg_to_allocate = (num_wreg_to_allocate + align_mask) & ~align_mask;
|
||||
|
||||
@ -861,7 +861,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
|
||||
Register base = x10;
|
||||
// There are always an even number of capture registers. A couple of
|
||||
// registers determine one match with two offsets.
|
||||
ASSERT_EQ(0, num_registers_left_on_stack % 2);
|
||||
DCHECK_EQ(0, num_registers_left_on_stack % 2);
|
||||
__ Add(base, frame_pointer(), kFirstCaptureOnStack);
|
||||
|
||||
// We can unroll the loop here, we should not unroll for less than 2
|
||||
@ -978,7 +978,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
|
||||
__ Bind(&return_w0);
|
||||
|
||||
// Set stack pointer back to first register to retain
|
||||
ASSERT(csp.Is(__ StackPointer()));
|
||||
DCHECK(csp.Is(__ StackPointer()));
|
||||
__ Mov(csp, fp);
|
||||
__ AssertStackConsistency();
|
||||
|
||||
@ -991,7 +991,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
|
||||
// Registers x0 to x7 are used to store the first captures, they need to be
|
||||
// retained over calls to C++ code.
|
||||
CPURegList cached_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 7);
|
||||
ASSERT((cached_registers.Count() * 2) == kNumCachedRegisters);
|
||||
DCHECK((cached_registers.Count() * 2) == kNumCachedRegisters);
|
||||
|
||||
if (check_preempt_label_.is_linked()) {
|
||||
__ Bind(&check_preempt_label_);
|
||||
@ -1084,9 +1084,9 @@ void RegExpMacroAssemblerARM64::LoadCurrentCharacter(int cp_offset,
|
||||
int characters) {
|
||||
// TODO(pielan): Make sure long strings are caught before this, and not
|
||||
// just asserted in debug mode.
|
||||
ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
|
||||
DCHECK(cp_offset >= -1); // ^ and \b can look behind one character.
|
||||
// Be sane! (And ensure that an int32_t can be used to index the string)
|
||||
ASSERT(cp_offset < (1<<30));
|
||||
DCHECK(cp_offset < (1<<30));
|
||||
if (check_bounds) {
|
||||
CheckPosition(cp_offset + characters - 1, on_end_of_input);
|
||||
}
|
||||
@ -1179,7 +1179,7 @@ void RegExpMacroAssemblerARM64::SetCurrentPositionFromEnd(int by) {
|
||||
|
||||
|
||||
void RegExpMacroAssemblerARM64::SetRegister(int register_index, int to) {
|
||||
ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
|
||||
DCHECK(register_index >= num_saved_registers_); // Reserved for positions!
|
||||
Register set_to = wzr;
|
||||
if (to != 0) {
|
||||
set_to = w10;
|
||||
@ -1207,7 +1207,7 @@ void RegExpMacroAssemblerARM64::WriteCurrentPositionToRegister(int reg,
|
||||
|
||||
|
||||
void RegExpMacroAssemblerARM64::ClearRegisters(int reg_from, int reg_to) {
|
||||
ASSERT(reg_from <= reg_to);
|
||||
DCHECK(reg_from <= reg_to);
|
||||
int num_registers = reg_to - reg_from + 1;
|
||||
|
||||
// If the first capture register is cached in a hardware register but not
|
||||
@ -1220,7 +1220,7 @@ void RegExpMacroAssemblerARM64::ClearRegisters(int reg_from, int reg_to) {
|
||||
|
||||
// Clear cached registers in pairs as far as possible.
|
||||
while ((num_registers >= 2) && (reg_from < kNumCachedRegisters)) {
|
||||
ASSERT(GetRegisterState(reg_from) == CACHED_LSW);
|
||||
DCHECK(GetRegisterState(reg_from) == CACHED_LSW);
|
||||
__ Mov(GetCachedRegister(reg_from), twice_non_position_value());
|
||||
reg_from += 2;
|
||||
num_registers -= 2;
|
||||
@ -1234,7 +1234,7 @@ void RegExpMacroAssemblerARM64::ClearRegisters(int reg_from, int reg_to) {
|
||||
|
||||
if (num_registers > 0) {
|
||||
// If there are some remaining registers, they are stored on the stack.
|
||||
ASSERT(reg_from >= kNumCachedRegisters);
|
||||
DCHECK(reg_from >= kNumCachedRegisters);
|
||||
|
||||
// Move down the indexes of the registers on stack to get the correct offset
|
||||
// in memory.
|
||||
@ -1317,8 +1317,8 @@ int RegExpMacroAssemblerARM64::CheckStackGuardState(Address* return_address,
|
||||
// Current string.
|
||||
bool is_ascii = subject->IsOneByteRepresentationUnderneath();
|
||||
|
||||
ASSERT(re_code->instruction_start() <= *return_address);
|
||||
ASSERT(*return_address <=
|
||||
DCHECK(re_code->instruction_start() <= *return_address);
|
||||
DCHECK(*return_address <=
|
||||
re_code->instruction_start() + re_code->instruction_size());
|
||||
|
||||
Object* result = isolate->stack_guard()->HandleInterrupts();
|
||||
@ -1357,7 +1357,7 @@ int RegExpMacroAssemblerARM64::CheckStackGuardState(Address* return_address,
|
||||
// be a sequential or external string with the same content.
|
||||
// Update the start and end pointers in the stack frame to the current
|
||||
// location (whether it has actually moved or not).
|
||||
ASSERT(StringShape(*subject_tmp).IsSequential() ||
|
||||
DCHECK(StringShape(*subject_tmp).IsSequential() ||
|
||||
StringShape(*subject_tmp).IsExternal());
|
||||
|
||||
// The original start address of the characters to match.
|
||||
@ -1410,11 +1410,11 @@ void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
|
||||
// moved. Allocate extra space for 2 arguments passed by pointers.
|
||||
// AAPCS64 requires the stack to be 16 byte aligned.
|
||||
int alignment = masm_->ActivationFrameAlignment();
|
||||
ASSERT_EQ(alignment % 16, 0);
|
||||
DCHECK_EQ(alignment % 16, 0);
|
||||
int align_mask = (alignment / kXRegSize) - 1;
|
||||
int xreg_to_claim = (3 + align_mask) & ~align_mask;
|
||||
|
||||
ASSERT(csp.Is(__ StackPointer()));
|
||||
DCHECK(csp.Is(__ StackPointer()));
|
||||
__ Claim(xreg_to_claim);
|
||||
|
||||
// CheckStackGuardState needs the end and start addresses of the input string.
|
||||
@ -1444,7 +1444,7 @@ void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
|
||||
__ Peek(input_start(), kPointerSize);
|
||||
__ Peek(input_end(), 2 * kPointerSize);
|
||||
|
||||
ASSERT(csp.Is(__ StackPointer()));
|
||||
DCHECK(csp.Is(__ StackPointer()));
|
||||
__ Drop(xreg_to_claim);
|
||||
|
||||
// Reload the Code pointer.
|
||||
@ -1493,7 +1493,7 @@ void RegExpMacroAssemblerARM64::CheckPreemption() {
|
||||
ExternalReference::address_of_stack_limit(isolate());
|
||||
__ Mov(x10, stack_limit);
|
||||
__ Ldr(x10, MemOperand(x10));
|
||||
ASSERT(csp.Is(__ StackPointer()));
|
||||
DCHECK(csp.Is(__ StackPointer()));
|
||||
__ Cmp(csp, x10);
|
||||
CallIf(&check_preempt_label_, ls);
|
||||
}
|
||||
@ -1510,8 +1510,8 @@ void RegExpMacroAssemblerARM64::CheckStackLimit() {
|
||||
|
||||
|
||||
void RegExpMacroAssemblerARM64::Push(Register source) {
|
||||
ASSERT(source.Is32Bits());
|
||||
ASSERT(!source.is(backtrack_stackpointer()));
|
||||
DCHECK(source.Is32Bits());
|
||||
DCHECK(!source.is(backtrack_stackpointer()));
|
||||
__ Str(source,
|
||||
MemOperand(backtrack_stackpointer(),
|
||||
-static_cast<int>(kWRegSize),
|
||||
@ -1520,23 +1520,23 @@ void RegExpMacroAssemblerARM64::Push(Register source) {
|
||||
|
||||
|
||||
void RegExpMacroAssemblerARM64::Pop(Register target) {
|
||||
ASSERT(target.Is32Bits());
|
||||
ASSERT(!target.is(backtrack_stackpointer()));
|
||||
DCHECK(target.Is32Bits());
|
||||
DCHECK(!target.is(backtrack_stackpointer()));
|
||||
__ Ldr(target,
|
||||
MemOperand(backtrack_stackpointer(), kWRegSize, PostIndex));
|
||||
}
|
||||
|
||||
|
||||
Register RegExpMacroAssemblerARM64::GetCachedRegister(int register_index) {
|
||||
ASSERT(register_index < kNumCachedRegisters);
|
||||
DCHECK(register_index < kNumCachedRegisters);
|
||||
return Register::Create(register_index / 2, kXRegSizeInBits);
|
||||
}
|
||||
|
||||
|
||||
Register RegExpMacroAssemblerARM64::GetRegister(int register_index,
|
||||
Register maybe_result) {
|
||||
ASSERT(maybe_result.Is32Bits());
|
||||
ASSERT(register_index >= 0);
|
||||
DCHECK(maybe_result.Is32Bits());
|
||||
DCHECK(register_index >= 0);
|
||||
if (num_registers_ <= register_index) {
|
||||
num_registers_ = register_index + 1;
|
||||
}
|
||||
@ -1559,15 +1559,15 @@ Register RegExpMacroAssemblerARM64::GetRegister(int register_index,
|
||||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
ASSERT(result.Is32Bits());
|
||||
DCHECK(result.Is32Bits());
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
void RegExpMacroAssemblerARM64::StoreRegister(int register_index,
|
||||
Register source) {
|
||||
ASSERT(source.Is32Bits());
|
||||
ASSERT(register_index >= 0);
|
||||
DCHECK(source.Is32Bits());
|
||||
DCHECK(register_index >= 0);
|
||||
if (num_registers_ <= register_index) {
|
||||
num_registers_ = register_index + 1;
|
||||
}
|
||||
@ -1604,22 +1604,22 @@ void RegExpMacroAssemblerARM64::CallIf(Label* to, Condition condition) {
|
||||
|
||||
|
||||
void RegExpMacroAssemblerARM64::RestoreLinkRegister() {
|
||||
ASSERT(csp.Is(__ StackPointer()));
|
||||
DCHECK(csp.Is(__ StackPointer()));
|
||||
__ Pop(lr, xzr);
|
||||
__ Add(lr, lr, Operand(masm_->CodeObject()));
|
||||
}
|
||||
|
||||
|
||||
void RegExpMacroAssemblerARM64::SaveLinkRegister() {
|
||||
ASSERT(csp.Is(__ StackPointer()));
|
||||
DCHECK(csp.Is(__ StackPointer()));
|
||||
__ Sub(lr, lr, Operand(masm_->CodeObject()));
|
||||
__ Push(xzr, lr);
|
||||
}
|
||||
|
||||
|
||||
MemOperand RegExpMacroAssemblerARM64::register_location(int register_index) {
|
||||
ASSERT(register_index < (1<<30));
|
||||
ASSERT(register_index >= kNumCachedRegisters);
|
||||
DCHECK(register_index < (1<<30));
|
||||
DCHECK(register_index >= kNumCachedRegisters);
|
||||
if (num_registers_ <= register_index) {
|
||||
num_registers_ = register_index + 1;
|
||||
}
|
||||
@ -1630,10 +1630,10 @@ MemOperand RegExpMacroAssemblerARM64::register_location(int register_index) {
|
||||
|
||||
MemOperand RegExpMacroAssemblerARM64::capture_location(int register_index,
|
||||
Register scratch) {
|
||||
ASSERT(register_index < (1<<30));
|
||||
ASSERT(register_index < num_saved_registers_);
|
||||
ASSERT(register_index >= kNumCachedRegisters);
|
||||
ASSERT_EQ(register_index % 2, 0);
|
||||
DCHECK(register_index < (1<<30));
|
||||
DCHECK(register_index < num_saved_registers_);
|
||||
DCHECK(register_index >= kNumCachedRegisters);
|
||||
DCHECK_EQ(register_index % 2, 0);
|
||||
register_index -= kNumCachedRegisters;
|
||||
int offset = kFirstCaptureOnStack - register_index * kWRegSize;
|
||||
// capture_location is used with Stp instructions to load/store 2 registers.
|
||||
@ -1659,7 +1659,7 @@ void RegExpMacroAssemblerARM64::LoadCurrentCharacterUnchecked(int cp_offset,
|
||||
// disable it.
|
||||
// TODO(pielan): See whether or not we should disable unaligned accesses.
|
||||
if (!CanReadUnaligned()) {
|
||||
ASSERT(characters == 1);
|
||||
DCHECK(characters == 1);
|
||||
}
|
||||
|
||||
if (cp_offset != 0) {
|
||||
@ -1681,15 +1681,15 @@ void RegExpMacroAssemblerARM64::LoadCurrentCharacterUnchecked(int cp_offset,
|
||||
} else if (characters == 2) {
|
||||
__ Ldrh(current_character(), MemOperand(input_end(), offset, SXTW));
|
||||
} else {
|
||||
ASSERT(characters == 1);
|
||||
DCHECK(characters == 1);
|
||||
__ Ldrb(current_character(), MemOperand(input_end(), offset, SXTW));
|
||||
}
|
||||
} else {
|
||||
ASSERT(mode_ == UC16);
|
||||
DCHECK(mode_ == UC16);
|
||||
if (characters == 2) {
|
||||
__ Ldr(current_character(), MemOperand(input_end(), offset, SXTW));
|
||||
} else {
|
||||
ASSERT(characters == 1);
|
||||
DCHECK(characters == 1);
|
||||
__ Ldrh(current_character(), MemOperand(input_end(), offset, SXTW));
|
||||
}
|
||||
}
|
||||
|
@ -231,7 +231,7 @@ class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
|
||||
};
|
||||
|
||||
RegisterState GetRegisterState(int register_index) {
|
||||
ASSERT(register_index >= 0);
|
||||
DCHECK(register_index >= 0);
|
||||
if (register_index >= kNumCachedRegisters) {
|
||||
return STACKED;
|
||||
} else {
|
||||
|
@ -73,11 +73,11 @@ const Instruction* Simulator::kEndOfSimAddress = NULL;
|
||||
|
||||
void SimSystemRegister::SetBits(int msb, int lsb, uint32_t bits) {
|
||||
int width = msb - lsb + 1;
|
||||
ASSERT(is_uintn(bits, width) || is_intn(bits, width));
|
||||
DCHECK(is_uintn(bits, width) || is_intn(bits, width));
|
||||
|
||||
bits <<= lsb;
|
||||
uint32_t mask = ((1 << width) - 1) << lsb;
|
||||
ASSERT((mask & write_ignore_mask_) == 0);
|
||||
DCHECK((mask & write_ignore_mask_) == 0);
|
||||
|
||||
value_ = (value_ & ~mask) | (bits & mask);
|
||||
}
|
||||
@ -107,7 +107,7 @@ void Simulator::Initialize(Isolate* isolate) {
|
||||
Simulator* Simulator::current(Isolate* isolate) {
|
||||
Isolate::PerIsolateThreadData* isolate_data =
|
||||
isolate->FindOrAllocatePerThreadDataForThisThread();
|
||||
ASSERT(isolate_data != NULL);
|
||||
DCHECK(isolate_data != NULL);
|
||||
|
||||
Simulator* sim = isolate_data->simulator();
|
||||
if (sim == NULL) {
|
||||
@ -135,7 +135,7 @@ void Simulator::CallVoid(byte* entry, CallArgument* args) {
|
||||
} else if (arg.IsD() && (index_d < 8)) {
|
||||
set_dreg_bits(index_d++, arg.bits());
|
||||
} else {
|
||||
ASSERT(arg.IsD() || arg.IsX());
|
||||
DCHECK(arg.IsD() || arg.IsX());
|
||||
stack_args.push_back(arg.bits());
|
||||
}
|
||||
}
|
||||
@ -154,7 +154,7 @@ void Simulator::CallVoid(byte* entry, CallArgument* args) {
|
||||
stack += sizeof(*it);
|
||||
}
|
||||
|
||||
ASSERT(reinterpret_cast<uintptr_t>(stack) <= original_stack);
|
||||
DCHECK(reinterpret_cast<uintptr_t>(stack) <= original_stack);
|
||||
set_sp(entry_stack);
|
||||
|
||||
// Call the generated code.
|
||||
@ -256,7 +256,7 @@ void Simulator::CheckPCSComplianceAndRun() {
|
||||
CHECK_EQ(saved_registers[i], xreg(register_list.PopLowestIndex().code()));
|
||||
}
|
||||
for (int i = 0; i < kNumberOfCalleeSavedFPRegisters; i++) {
|
||||
ASSERT(saved_fpregisters[i] ==
|
||||
DCHECK(saved_fpregisters[i] ==
|
||||
dreg_bits(fpregister_list.PopLowestIndex().code()));
|
||||
}
|
||||
|
||||
@ -289,7 +289,7 @@ void Simulator::CorruptRegisters(CPURegList* list, uint64_t value) {
|
||||
set_xreg(code, value | code);
|
||||
}
|
||||
} else {
|
||||
ASSERT(list->type() == CPURegister::kFPRegister);
|
||||
DCHECK(list->type() == CPURegister::kFPRegister);
|
||||
while (!list->IsEmpty()) {
|
||||
unsigned code = list->PopLowestIndex().code();
|
||||
set_dreg_bits(code, value | code);
|
||||
@ -311,7 +311,7 @@ void Simulator::CorruptAllCallerSavedCPURegisters() {
|
||||
|
||||
// Extending the stack by 2 * 64 bits is required for stack alignment purposes.
|
||||
uintptr_t Simulator::PushAddress(uintptr_t address) {
|
||||
ASSERT(sizeof(uintptr_t) < 2 * kXRegSize);
|
||||
DCHECK(sizeof(uintptr_t) < 2 * kXRegSize);
|
||||
intptr_t new_sp = sp() - 2 * kXRegSize;
|
||||
uintptr_t* alignment_slot =
|
||||
reinterpret_cast<uintptr_t*>(new_sp + kXRegSize);
|
||||
@ -327,7 +327,7 @@ uintptr_t Simulator::PopAddress() {
|
||||
intptr_t current_sp = sp();
|
||||
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
|
||||
uintptr_t address = *stack_slot;
|
||||
ASSERT(sizeof(uintptr_t) < 2 * kXRegSize);
|
||||
DCHECK(sizeof(uintptr_t) < 2 * kXRegSize);
|
||||
set_sp(current_sp + 2 * kXRegSize);
|
||||
return address;
|
||||
}
|
||||
@ -481,7 +481,7 @@ class Redirection {
|
||||
Redirection* current = isolate->simulator_redirection();
|
||||
for (; current != NULL; current = current->next_) {
|
||||
if (current->external_function_ == external_function) {
|
||||
ASSERT_EQ(current->type(), type);
|
||||
DCHECK_EQ(current->type(), type);
|
||||
return current;
|
||||
}
|
||||
}
|
||||
@ -765,7 +765,7 @@ const char* Simulator::vreg_names[] = {
|
||||
|
||||
|
||||
const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) {
|
||||
ASSERT(code < kNumberOfRegisters);
|
||||
DCHECK(code < kNumberOfRegisters);
|
||||
// If the code represents the stack pointer, index the name after zr.
|
||||
if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
|
||||
code = kZeroRegCode + 1;
|
||||
@ -775,7 +775,7 @@ const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) {
|
||||
|
||||
|
||||
const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) {
|
||||
ASSERT(code < kNumberOfRegisters);
|
||||
DCHECK(code < kNumberOfRegisters);
|
||||
// If the code represents the stack pointer, index the name after zr.
|
||||
if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
|
||||
code = kZeroRegCode + 1;
|
||||
@ -785,19 +785,19 @@ const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) {
|
||||
|
||||
|
||||
const char* Simulator::SRegNameForCode(unsigned code) {
|
||||
ASSERT(code < kNumberOfFPRegisters);
|
||||
DCHECK(code < kNumberOfFPRegisters);
|
||||
return sreg_names[code];
|
||||
}
|
||||
|
||||
|
||||
const char* Simulator::DRegNameForCode(unsigned code) {
|
||||
ASSERT(code < kNumberOfFPRegisters);
|
||||
DCHECK(code < kNumberOfFPRegisters);
|
||||
return dreg_names[code];
|
||||
}
|
||||
|
||||
|
||||
const char* Simulator::VRegNameForCode(unsigned code) {
|
||||
ASSERT(code < kNumberOfFPRegisters);
|
||||
DCHECK(code < kNumberOfFPRegisters);
|
||||
return vreg_names[code];
|
||||
}
|
||||
|
||||
@ -830,7 +830,7 @@ T Simulator::AddWithCarry(bool set_flags,
|
||||
T src2,
|
||||
T carry_in) {
|
||||
typedef typename make_unsigned<T>::type unsignedT;
|
||||
ASSERT((carry_in == 0) || (carry_in == 1));
|
||||
DCHECK((carry_in == 0) || (carry_in == 1));
|
||||
|
||||
T signed_sum = src1 + src2 + carry_in;
|
||||
T result = signed_sum;
|
||||
@ -1065,7 +1065,7 @@ void Simulator::PrintSystemRegisters(bool print_all) {
|
||||
"0b10 (Round towards Minus Infinity)",
|
||||
"0b11 (Round towards Zero)"
|
||||
};
|
||||
ASSERT(fpcr().RMode() <= (sizeof(rmode) / sizeof(rmode[0])));
|
||||
DCHECK(fpcr().RMode() <= (sizeof(rmode) / sizeof(rmode[0])));
|
||||
fprintf(stream_, "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n",
|
||||
clr_flag_name,
|
||||
clr_flag_value,
|
||||
@ -1205,7 +1205,7 @@ void Simulator::VisitUnconditionalBranch(Instruction* instr) {
|
||||
|
||||
|
||||
void Simulator::VisitConditionalBranch(Instruction* instr) {
|
||||
ASSERT(instr->Mask(ConditionalBranchMask) == B_cond);
|
||||
DCHECK(instr->Mask(ConditionalBranchMask) == B_cond);
|
||||
if (ConditionPassed(static_cast<Condition>(instr->ConditionBranch()))) {
|
||||
set_pc(instr->ImmPCOffsetTarget());
|
||||
}
|
||||
@ -1418,7 +1418,7 @@ void Simulator::ConditionalCompareHelper(Instruction* instr, T op2) {
|
||||
if (instr->Mask(ConditionalCompareMask) == CCMP) {
|
||||
AddWithCarry<T>(true, op1, ~op2, 1);
|
||||
} else {
|
||||
ASSERT(instr->Mask(ConditionalCompareMask) == CCMN);
|
||||
DCHECK(instr->Mask(ConditionalCompareMask) == CCMN);
|
||||
AddWithCarry<T>(true, op1, op2, 0);
|
||||
}
|
||||
} else {
|
||||
@ -1451,7 +1451,7 @@ void Simulator::VisitLoadStorePostIndex(Instruction* instr) {
|
||||
|
||||
void Simulator::VisitLoadStoreRegisterOffset(Instruction* instr) {
|
||||
Extend ext = static_cast<Extend>(instr->ExtendMode());
|
||||
ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX));
|
||||
DCHECK((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX));
|
||||
unsigned shift_amount = instr->ImmShiftLS() * instr->SizeLS();
|
||||
|
||||
int64_t offset = ExtendValue(xreg(instr->Rm()), ext, shift_amount);
|
||||
@ -1586,7 +1586,7 @@ void Simulator::LoadStorePairHelper(Instruction* instr,
|
||||
static_cast<LoadStorePairOp>(instr->Mask(LoadStorePairMask));
|
||||
|
||||
// 'rt' and 'rt2' can only be aliased for stores.
|
||||
ASSERT(((op & LoadStorePairLBit) == 0) || (rt != rt2));
|
||||
DCHECK(((op & LoadStorePairLBit) == 0) || (rt != rt2));
|
||||
|
||||
switch (op) {
|
||||
case LDP_w: {
|
||||
@ -1694,7 +1694,7 @@ void Simulator::LoadStoreWriteBack(unsigned addr_reg,
|
||||
int64_t offset,
|
||||
AddrMode addrmode) {
|
||||
if ((addrmode == PreIndex) || (addrmode == PostIndex)) {
|
||||
ASSERT(offset != 0);
|
||||
DCHECK(offset != 0);
|
||||
uint64_t address = xreg(addr_reg, Reg31IsStackPointer);
|
||||
set_reg(addr_reg, address + offset, Reg31IsStackPointer);
|
||||
}
|
||||
@ -1714,8 +1714,8 @@ void Simulator::CheckMemoryAccess(uint8_t* address, uint8_t* stack) {
|
||||
|
||||
|
||||
uint64_t Simulator::MemoryRead(uint8_t* address, unsigned num_bytes) {
|
||||
ASSERT(address != NULL);
|
||||
ASSERT((num_bytes > 0) && (num_bytes <= sizeof(uint64_t)));
|
||||
DCHECK(address != NULL);
|
||||
DCHECK((num_bytes > 0) && (num_bytes <= sizeof(uint64_t)));
|
||||
uint64_t read = 0;
|
||||
memcpy(&read, address, num_bytes);
|
||||
return read;
|
||||
@ -1755,8 +1755,8 @@ double Simulator::MemoryReadFP64(uint8_t* address) {
|
||||
void Simulator::MemoryWrite(uint8_t* address,
|
||||
uint64_t value,
|
||||
unsigned num_bytes) {
|
||||
ASSERT(address != NULL);
|
||||
ASSERT((num_bytes > 0) && (num_bytes <= sizeof(uint64_t)));
|
||||
DCHECK(address != NULL);
|
||||
DCHECK((num_bytes > 0) && (num_bytes <= sizeof(uint64_t)));
|
||||
|
||||
LogWrite(address, value, num_bytes);
|
||||
memcpy(address, &value, num_bytes);
|
||||
@ -1790,7 +1790,7 @@ void Simulator::VisitMoveWideImmediate(Instruction* instr) {
|
||||
|
||||
bool is_64_bits = instr->SixtyFourBits() == 1;
|
||||
// Shift is limited for W operations.
|
||||
ASSERT(is_64_bits || (instr->ShiftMoveWide() < 2));
|
||||
DCHECK(is_64_bits || (instr->ShiftMoveWide() < 2));
|
||||
|
||||
// Get the shifted immediate.
|
||||
int64_t shift = instr->ShiftMoveWide() * 16;
|
||||
@ -1880,7 +1880,7 @@ void Simulator::VisitDataProcessing1Source(Instruction* instr) {
|
||||
|
||||
|
||||
uint64_t Simulator::ReverseBits(uint64_t value, unsigned num_bits) {
|
||||
ASSERT((num_bits == kWRegSizeInBits) || (num_bits == kXRegSizeInBits));
|
||||
DCHECK((num_bits == kWRegSizeInBits) || (num_bits == kXRegSizeInBits));
|
||||
uint64_t result = 0;
|
||||
for (unsigned i = 0; i < num_bits; i++) {
|
||||
result = (result << 1) | (value & 1);
|
||||
@ -1904,7 +1904,7 @@ uint64_t Simulator::ReverseBytes(uint64_t value, ReverseByteMode mode) {
|
||||
// permute_table[Reverse16] is used by REV16_x, REV16_w
|
||||
// permute_table[Reverse32] is used by REV32_x, REV_w
|
||||
// permute_table[Reverse64] is used by REV_x
|
||||
ASSERT((Reverse16 == 0) && (Reverse32 == 1) && (Reverse64 == 2));
|
||||
DCHECK((Reverse16 == 0) && (Reverse32 == 1) && (Reverse64 == 2));
|
||||
static const uint8_t permute_table[3][8] = { {6, 7, 4, 5, 2, 3, 0, 1},
|
||||
{4, 5, 6, 7, 0, 1, 2, 3},
|
||||
{0, 1, 2, 3, 4, 5, 6, 7} };
|
||||
@ -2027,7 +2027,7 @@ void Simulator::VisitDataProcessing3Source(Instruction* instr) {
|
||||
case UMADDL_x: result = xreg(instr->Ra()) + (rn_u32 * rm_u32); break;
|
||||
case UMSUBL_x: result = xreg(instr->Ra()) - (rn_u32 * rm_u32); break;
|
||||
case SMULH_x:
|
||||
ASSERT(instr->Ra() == kZeroRegCode);
|
||||
DCHECK(instr->Ra() == kZeroRegCode);
|
||||
result = MultiplyHighSigned(xreg(instr->Rn()), xreg(instr->Rm()));
|
||||
break;
|
||||
default: UNIMPLEMENTED();
|
||||
@ -2407,10 +2407,10 @@ void Simulator::VisitFPDataProcessing1Source(Instruction* instr) {
|
||||
template <class T, int ebits, int mbits>
|
||||
static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
|
||||
FPRounding round_mode) {
|
||||
ASSERT((sign == 0) || (sign == 1));
|
||||
DCHECK((sign == 0) || (sign == 1));
|
||||
|
||||
// Only the FPTieEven rounding mode is implemented.
|
||||
ASSERT(round_mode == FPTieEven);
|
||||
DCHECK(round_mode == FPTieEven);
|
||||
USE(round_mode);
|
||||
|
||||
// Rounding can promote subnormals to normals, and normals to infinities. For
|
||||
@ -2727,7 +2727,7 @@ double Simulator::FPToDouble(float value) {
|
||||
|
||||
float Simulator::FPToFloat(double value, FPRounding round_mode) {
|
||||
// Only the FPTieEven rounding mode is implemented.
|
||||
ASSERT(round_mode == FPTieEven);
|
||||
DCHECK(round_mode == FPTieEven);
|
||||
USE(round_mode);
|
||||
|
||||
switch (std::fpclassify(value)) {
|
||||
@ -2856,7 +2856,7 @@ void Simulator::VisitFPDataProcessing3Source(Instruction* instr) {
|
||||
template <typename T>
|
||||
T Simulator::FPAdd(T op1, T op2) {
|
||||
// NaNs should be handled elsewhere.
|
||||
ASSERT(!std::isnan(op1) && !std::isnan(op2));
|
||||
DCHECK(!std::isnan(op1) && !std::isnan(op2));
|
||||
|
||||
if (std::isinf(op1) && std::isinf(op2) && (op1 != op2)) {
|
||||
// inf + -inf returns the default NaN.
|
||||
@ -2871,7 +2871,7 @@ T Simulator::FPAdd(T op1, T op2) {
|
||||
template <typename T>
|
||||
T Simulator::FPDiv(T op1, T op2) {
|
||||
// NaNs should be handled elsewhere.
|
||||
ASSERT(!std::isnan(op1) && !std::isnan(op2));
|
||||
DCHECK(!std::isnan(op1) && !std::isnan(op2));
|
||||
|
||||
if ((std::isinf(op1) && std::isinf(op2)) || ((op1 == 0.0) && (op2 == 0.0))) {
|
||||
// inf / inf and 0.0 / 0.0 return the default NaN.
|
||||
@ -2886,7 +2886,7 @@ T Simulator::FPDiv(T op1, T op2) {
|
||||
template <typename T>
|
||||
T Simulator::FPMax(T a, T b) {
|
||||
// NaNs should be handled elsewhere.
|
||||
ASSERT(!std::isnan(a) && !std::isnan(b));
|
||||
DCHECK(!std::isnan(a) && !std::isnan(b));
|
||||
|
||||
if ((a == 0.0) && (b == 0.0) &&
|
||||
(copysign(1.0, a) != copysign(1.0, b))) {
|
||||
@ -2913,7 +2913,7 @@ T Simulator::FPMaxNM(T a, T b) {
|
||||
template <typename T>
|
||||
T Simulator::FPMin(T a, T b) {
|
||||
// NaNs should be handled elsewhere.
|
||||
ASSERT(!std::isnan(a) && !std::isnan(b));
|
||||
DCHECK(!std::isnan(a) && !std::isnan(b));
|
||||
|
||||
if ((a == 0.0) && (b == 0.0) &&
|
||||
(copysign(1.0, a) != copysign(1.0, b))) {
|
||||
@ -2941,7 +2941,7 @@ T Simulator::FPMinNM(T a, T b) {
|
||||
template <typename T>
|
||||
T Simulator::FPMul(T op1, T op2) {
|
||||
// NaNs should be handled elsewhere.
|
||||
ASSERT(!std::isnan(op1) && !std::isnan(op2));
|
||||
DCHECK(!std::isnan(op1) && !std::isnan(op2));
|
||||
|
||||
if ((std::isinf(op1) && (op2 == 0.0)) || (std::isinf(op2) && (op1 == 0.0))) {
|
||||
// inf * 0.0 returns the default NaN.
|
||||
@ -2986,7 +2986,7 @@ T Simulator::FPMulAdd(T a, T op1, T op2) {
|
||||
}
|
||||
|
||||
result = FusedMultiplyAdd(op1, op2, a);
|
||||
ASSERT(!std::isnan(result));
|
||||
DCHECK(!std::isnan(result));
|
||||
|
||||
// Work around broken fma implementations for rounded zero results: If a is
|
||||
// 0.0, the sign of the result is the sign of op1 * op2 before rounding.
|
||||
@ -3013,7 +3013,7 @@ T Simulator::FPSqrt(T op) {
|
||||
template <typename T>
|
||||
T Simulator::FPSub(T op1, T op2) {
|
||||
// NaNs should be handled elsewhere.
|
||||
ASSERT(!std::isnan(op1) && !std::isnan(op2));
|
||||
DCHECK(!std::isnan(op1) && !std::isnan(op2));
|
||||
|
||||
if (std::isinf(op1) && std::isinf(op2) && (op1 == op2)) {
|
||||
// inf - inf returns the default NaN.
|
||||
@ -3027,7 +3027,7 @@ T Simulator::FPSub(T op1, T op2) {
|
||||
|
||||
template <typename T>
|
||||
T Simulator::FPProcessNaN(T op) {
|
||||
ASSERT(std::isnan(op));
|
||||
DCHECK(std::isnan(op));
|
||||
return fpcr().DN() ? FPDefaultNaN<T>() : ToQuietNaN(op);
|
||||
}
|
||||
|
||||
@ -3039,10 +3039,10 @@ T Simulator::FPProcessNaNs(T op1, T op2) {
|
||||
} else if (IsSignallingNaN(op2)) {
|
||||
return FPProcessNaN(op2);
|
||||
} else if (std::isnan(op1)) {
|
||||
ASSERT(IsQuietNaN(op1));
|
||||
DCHECK(IsQuietNaN(op1));
|
||||
return FPProcessNaN(op1);
|
||||
} else if (std::isnan(op2)) {
|
||||
ASSERT(IsQuietNaN(op2));
|
||||
DCHECK(IsQuietNaN(op2));
|
||||
return FPProcessNaN(op2);
|
||||
} else {
|
||||
return 0.0;
|
||||
@ -3059,13 +3059,13 @@ T Simulator::FPProcessNaNs3(T op1, T op2, T op3) {
|
||||
} else if (IsSignallingNaN(op3)) {
|
||||
return FPProcessNaN(op3);
|
||||
} else if (std::isnan(op1)) {
|
||||
ASSERT(IsQuietNaN(op1));
|
||||
DCHECK(IsQuietNaN(op1));
|
||||
return FPProcessNaN(op1);
|
||||
} else if (std::isnan(op2)) {
|
||||
ASSERT(IsQuietNaN(op2));
|
||||
DCHECK(IsQuietNaN(op2));
|
||||
return FPProcessNaN(op2);
|
||||
} else if (std::isnan(op3)) {
|
||||
ASSERT(IsQuietNaN(op3));
|
||||
DCHECK(IsQuietNaN(op3));
|
||||
return FPProcessNaN(op3);
|
||||
} else {
|
||||
return 0.0;
|
||||
@ -3121,7 +3121,7 @@ void Simulator::VisitSystem(Instruction* instr) {
|
||||
}
|
||||
}
|
||||
} else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
|
||||
ASSERT(instr->Mask(SystemHintMask) == HINT);
|
||||
DCHECK(instr->Mask(SystemHintMask) == HINT);
|
||||
switch (instr->ImmHint()) {
|
||||
case NOP: break;
|
||||
default: UNIMPLEMENTED();
|
||||
@ -3164,12 +3164,12 @@ bool Simulator::GetValue(const char* desc, int64_t* value) {
|
||||
|
||||
bool Simulator::PrintValue(const char* desc) {
|
||||
if (strcmp(desc, "csp") == 0) {
|
||||
ASSERT(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
|
||||
DCHECK(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
|
||||
PrintF(stream_, "%s csp:%s 0x%016" PRIx64 "%s\n",
|
||||
clr_reg_name, clr_reg_value, xreg(31, Reg31IsStackPointer), clr_normal);
|
||||
return true;
|
||||
} else if (strcmp(desc, "wcsp") == 0) {
|
||||
ASSERT(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
|
||||
DCHECK(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
|
||||
PrintF(stream_, "%s wcsp:%s 0x%08" PRIx32 "%s\n",
|
||||
clr_reg_name, clr_reg_value, wreg(31, Reg31IsStackPointer), clr_normal);
|
||||
return true;
|
||||
@ -3561,7 +3561,7 @@ void Simulator::VisitException(Instruction* instr) {
|
||||
break;
|
||||
default:
|
||||
// We don't support a one-shot LOG_DISASM.
|
||||
ASSERT((parameters & LOG_DISASM) == 0);
|
||||
DCHECK((parameters & LOG_DISASM) == 0);
|
||||
// Don't print information that is already being traced.
|
||||
parameters &= ~log_parameters();
|
||||
// Print the requested information.
|
||||
@ -3575,8 +3575,8 @@ void Simulator::VisitException(Instruction* instr) {
|
||||
size_t size = kDebugMessageOffset + strlen(message) + 1;
|
||||
pc_ = pc_->InstructionAtOffset(RoundUp(size, kInstructionSize));
|
||||
// - Verify that the unreachable marker is present.
|
||||
ASSERT(pc_->Mask(ExceptionMask) == HLT);
|
||||
ASSERT(pc_->ImmException() == kImmExceptionIsUnreachable);
|
||||
DCHECK(pc_->Mask(ExceptionMask) == HLT);
|
||||
DCHECK(pc_->ImmException() == kImmExceptionIsUnreachable);
|
||||
// - Skip past the unreachable marker.
|
||||
set_pc(pc_->following());
|
||||
|
||||
@ -3606,7 +3606,7 @@ void Simulator::VisitException(Instruction* instr) {
|
||||
|
||||
|
||||
void Simulator::DoPrintf(Instruction* instr) {
|
||||
ASSERT((instr->Mask(ExceptionMask) == HLT) &&
|
||||
DCHECK((instr->Mask(ExceptionMask) == HLT) &&
|
||||
(instr->ImmException() == kImmExceptionIsPrintf));
|
||||
|
||||
// Read the arguments encoded inline in the instruction stream.
|
||||
@ -3620,8 +3620,8 @@ void Simulator::DoPrintf(Instruction* instr) {
|
||||
instr + kPrintfArgPatternListOffset,
|
||||
sizeof(arg_pattern_list));
|
||||
|
||||
ASSERT(arg_count <= kPrintfMaxArgCount);
|
||||
ASSERT((arg_pattern_list >> (kPrintfArgPatternBits * arg_count)) == 0);
|
||||
DCHECK(arg_count <= kPrintfMaxArgCount);
|
||||
DCHECK((arg_pattern_list >> (kPrintfArgPatternBits * arg_count)) == 0);
|
||||
|
||||
// We need to call the host printf function with a set of arguments defined by
|
||||
// arg_pattern_list. Because we don't know the types and sizes of the
|
||||
@ -3633,7 +3633,7 @@ void Simulator::DoPrintf(Instruction* instr) {
|
||||
// Leave enough space for one extra character per expected argument (plus the
|
||||
// '\0' termination).
|
||||
const char * format_base = reg<const char *>(0);
|
||||
ASSERT(format_base != NULL);
|
||||
DCHECK(format_base != NULL);
|
||||
size_t length = strlen(format_base) + 1;
|
||||
char * const format = new char[length + arg_count];
|
||||
|
||||
@ -3668,7 +3668,7 @@ void Simulator::DoPrintf(Instruction* instr) {
|
||||
}
|
||||
}
|
||||
}
|
||||
ASSERT(format_scratch <= (format + length + arg_count));
|
||||
DCHECK(format_scratch <= (format + length + arg_count));
|
||||
CHECK(placeholder_count == arg_count);
|
||||
|
||||
// Finally, call printf with each chunk, passing the appropriate register
|
||||
|
@ -212,13 +212,13 @@ class Simulator : public DecoderVisitor {
|
||||
template<typename T>
|
||||
explicit CallArgument(T argument) {
|
||||
bits_ = 0;
|
||||
ASSERT(sizeof(argument) <= sizeof(bits_));
|
||||
DCHECK(sizeof(argument) <= sizeof(bits_));
|
||||
memcpy(&bits_, &argument, sizeof(argument));
|
||||
type_ = X_ARG;
|
||||
}
|
||||
|
||||
explicit CallArgument(double argument) {
|
||||
ASSERT(sizeof(argument) == sizeof(bits_));
|
||||
DCHECK(sizeof(argument) == sizeof(bits_));
|
||||
memcpy(&bits_, &argument, sizeof(argument));
|
||||
type_ = D_ARG;
|
||||
}
|
||||
@ -229,10 +229,10 @@ class Simulator : public DecoderVisitor {
|
||||
UNIMPLEMENTED();
|
||||
// Make the D register a NaN to try to trap errors if the callee expects a
|
||||
// double. If it expects a float, the callee should ignore the top word.
|
||||
ASSERT(sizeof(kFP64SignallingNaN) == sizeof(bits_));
|
||||
DCHECK(sizeof(kFP64SignallingNaN) == sizeof(bits_));
|
||||
memcpy(&bits_, &kFP64SignallingNaN, sizeof(kFP64SignallingNaN));
|
||||
// Write the float payload to the S register.
|
||||
ASSERT(sizeof(argument) <= sizeof(bits_));
|
||||
DCHECK(sizeof(argument) <= sizeof(bits_));
|
||||
memcpy(&bits_, &argument, sizeof(argument));
|
||||
type_ = D_ARG;
|
||||
}
|
||||
@ -290,7 +290,7 @@ class Simulator : public DecoderVisitor {
|
||||
// Simulation helpers.
|
||||
template <typename T>
|
||||
void set_pc(T new_pc) {
|
||||
ASSERT(sizeof(T) == sizeof(pc_));
|
||||
DCHECK(sizeof(T) == sizeof(pc_));
|
||||
memcpy(&pc_, &new_pc, sizeof(T));
|
||||
pc_modified_ = true;
|
||||
}
|
||||
@ -309,7 +309,7 @@ class Simulator : public DecoderVisitor {
|
||||
}
|
||||
|
||||
void ExecuteInstruction() {
|
||||
ASSERT(IsAligned(reinterpret_cast<uintptr_t>(pc_), kInstructionSize));
|
||||
DCHECK(IsAligned(reinterpret_cast<uintptr_t>(pc_), kInstructionSize));
|
||||
CheckBreakNext();
|
||||
Decode(pc_);
|
||||
LogProcessorState();
|
||||
@ -332,7 +332,7 @@ class Simulator : public DecoderVisitor {
|
||||
//
|
||||
template<typename T>
|
||||
T reg(unsigned code, Reg31Mode r31mode = Reg31IsZeroRegister) const {
|
||||
ASSERT(code < kNumberOfRegisters);
|
||||
DCHECK(code < kNumberOfRegisters);
|
||||
if (IsZeroRegister(code, r31mode)) {
|
||||
return 0;
|
||||
}
|
||||
@ -355,7 +355,7 @@ class Simulator : public DecoderVisitor {
|
||||
template<typename T>
|
||||
void set_reg(unsigned code, T value,
|
||||
Reg31Mode r31mode = Reg31IsZeroRegister) {
|
||||
ASSERT(code < kNumberOfRegisters);
|
||||
DCHECK(code < kNumberOfRegisters);
|
||||
if (!IsZeroRegister(code, r31mode))
|
||||
registers_[code].Set(value);
|
||||
}
|
||||
@ -374,13 +374,13 @@ class Simulator : public DecoderVisitor {
|
||||
// Commonly-used special cases.
|
||||
template<typename T>
|
||||
void set_lr(T value) {
|
||||
ASSERT(sizeof(T) == kPointerSize);
|
||||
DCHECK(sizeof(T) == kPointerSize);
|
||||
set_reg(kLinkRegCode, value);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void set_sp(T value) {
|
||||
ASSERT(sizeof(T) == kPointerSize);
|
||||
DCHECK(sizeof(T) == kPointerSize);
|
||||
set_reg(31, value, Reg31IsStackPointer);
|
||||
}
|
||||
|
||||
@ -395,7 +395,7 @@ class Simulator : public DecoderVisitor {
|
||||
|
||||
template<typename T>
|
||||
T fpreg(unsigned code) const {
|
||||
ASSERT(code < kNumberOfRegisters);
|
||||
DCHECK(code < kNumberOfRegisters);
|
||||
return fpregisters_[code].Get<T>();
|
||||
}
|
||||
|
||||
@ -430,8 +430,8 @@ class Simulator : public DecoderVisitor {
|
||||
// This behaviour matches AArch64 register writes.
|
||||
template<typename T>
|
||||
void set_fpreg(unsigned code, T value) {
|
||||
ASSERT((sizeof(value) == kDRegSize) || (sizeof(value) == kSRegSize));
|
||||
ASSERT(code < kNumberOfFPRegisters);
|
||||
DCHECK((sizeof(value) == kDRegSize) || (sizeof(value) == kSRegSize));
|
||||
DCHECK(code < kNumberOfFPRegisters);
|
||||
fpregisters_[code].Set(value);
|
||||
}
|
||||
|
||||
@ -744,8 +744,8 @@ class Simulator : public DecoderVisitor {
|
||||
// functions, or to save and restore it when entering and leaving generated
|
||||
// code.
|
||||
void AssertSupportedFPCR() {
|
||||
ASSERT(fpcr().FZ() == 0); // No flush-to-zero support.
|
||||
ASSERT(fpcr().RMode() == FPTieEven); // Ties-to-even rounding only.
|
||||
DCHECK(fpcr().FZ() == 0); // No flush-to-zero support.
|
||||
DCHECK(fpcr().RMode() == FPTieEven); // Ties-to-even rounding only.
|
||||
|
||||
// The simulator does not support half-precision operations so fpcr().AHP()
|
||||
// is irrelevant, and is not checked here.
|
||||
|
@ -20,8 +20,8 @@ namespace internal {
|
||||
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
|
||||
MacroAssembler* masm, Label* miss_label, Register receiver,
|
||||
Handle<Name> name, Register scratch0, Register scratch1) {
|
||||
ASSERT(!AreAliased(receiver, scratch0, scratch1));
|
||||
ASSERT(name->IsUniqueName());
|
||||
DCHECK(!AreAliased(receiver, scratch0, scratch1));
|
||||
DCHECK(name->IsUniqueName());
|
||||
Counters* counters = masm->isolate()->counters();
|
||||
__ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
|
||||
__ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
|
||||
@ -93,7 +93,7 @@ static void ProbeTable(Isolate* isolate,
|
||||
|
||||
Label miss;
|
||||
|
||||
ASSERT(!AreAliased(name, offset, scratch, scratch2, scratch3));
|
||||
DCHECK(!AreAliased(name, offset, scratch, scratch2, scratch3));
|
||||
|
||||
// Multiply by 3 because there are 3 fields per entry.
|
||||
__ Add(scratch3, offset, Operand(offset, LSL, 1));
|
||||
@ -151,15 +151,15 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
|
||||
Label miss;
|
||||
|
||||
// Make sure the flags does not name a specific type.
|
||||
ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
|
||||
DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
|
||||
|
||||
// Make sure that there are no register conflicts.
|
||||
ASSERT(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
|
||||
DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
|
||||
|
||||
// Make sure extra and extra2 registers are valid.
|
||||
ASSERT(!extra.is(no_reg));
|
||||
ASSERT(!extra2.is(no_reg));
|
||||
ASSERT(!extra3.is(no_reg));
|
||||
DCHECK(!extra.is(no_reg));
|
||||
DCHECK(!extra2.is(no_reg));
|
||||
DCHECK(!extra3.is(no_reg));
|
||||
|
||||
Counters* counters = masm->isolate()->counters();
|
||||
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
|
||||
@ -240,7 +240,7 @@ void PropertyHandlerCompiler::GenerateCheckPropertyCell(
|
||||
MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
|
||||
Register scratch, Label* miss) {
|
||||
Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
|
||||
ASSERT(cell->value()->IsTheHole());
|
||||
DCHECK(cell->value()->IsTheHole());
|
||||
__ Mov(scratch, Operand(cell));
|
||||
__ Ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
|
||||
__ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, miss);
|
||||
@ -258,7 +258,7 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
|
||||
|
||||
__ Push(name);
|
||||
Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
|
||||
ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
|
||||
DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
|
||||
Register scratch = name;
|
||||
__ Mov(scratch, Operand(interceptor));
|
||||
__ Push(scratch, receiver, holder);
|
||||
@ -280,19 +280,19 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
|
||||
MacroAssembler* masm, const CallOptimization& optimization,
|
||||
Handle<Map> receiver_map, Register receiver, Register scratch,
|
||||
bool is_store, int argc, Register* values) {
|
||||
ASSERT(!AreAliased(receiver, scratch));
|
||||
DCHECK(!AreAliased(receiver, scratch));
|
||||
|
||||
MacroAssembler::PushPopQueue queue(masm);
|
||||
queue.Queue(receiver);
|
||||
// Write the arguments to the stack frame.
|
||||
for (int i = 0; i < argc; i++) {
|
||||
Register arg = values[argc - 1 - i];
|
||||
ASSERT(!AreAliased(receiver, scratch, arg));
|
||||
DCHECK(!AreAliased(receiver, scratch, arg));
|
||||
queue.Queue(arg);
|
||||
}
|
||||
queue.PushQueued();
|
||||
|
||||
ASSERT(optimization.is_simple_api_call());
|
||||
DCHECK(optimization.is_simple_api_call());
|
||||
|
||||
// Abi for CallApiFunctionStub.
|
||||
Register callee = x0;
|
||||
@ -378,7 +378,7 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
|
||||
Register scratch2, Register scratch3, Label* miss_label, Label* slow) {
|
||||
Label exit;
|
||||
|
||||
ASSERT(!AreAliased(receiver_reg, storage_reg, value_reg,
|
||||
DCHECK(!AreAliased(receiver_reg, storage_reg, value_reg,
|
||||
scratch1, scratch2, scratch3));
|
||||
|
||||
// We don't need scratch3.
|
||||
@ -388,7 +388,7 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
|
||||
DescriptorArray* descriptors = transition->instance_descriptors();
|
||||
PropertyDetails details = descriptors->GetDetails(descriptor);
|
||||
Representation representation = details.representation();
|
||||
ASSERT(!representation.IsNone());
|
||||
DCHECK(!representation.IsNone());
|
||||
|
||||
if (details.type() == CONSTANT) {
|
||||
Handle<Object> constant(descriptors->GetValue(descriptor), isolate());
|
||||
@ -433,7 +433,7 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
|
||||
}
|
||||
|
||||
// Stub never generated for objects that require access checks.
|
||||
ASSERT(!transition->is_access_check_needed());
|
||||
DCHECK(!transition->is_access_check_needed());
|
||||
|
||||
// Perform map transition for the receiver if necessary.
|
||||
if (details.type() == FIELD &&
|
||||
@ -464,7 +464,7 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
|
||||
OMIT_SMI_CHECK);
|
||||
|
||||
if (details.type() == CONSTANT) {
|
||||
ASSERT(value_reg.is(x0));
|
||||
DCHECK(value_reg.is(x0));
|
||||
__ Ret();
|
||||
return;
|
||||
}
|
||||
@ -526,7 +526,7 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
|
||||
|
||||
__ Bind(&exit);
|
||||
// Return the value (register x0).
|
||||
ASSERT(value_reg.is(x0));
|
||||
DCHECK(value_reg.is(x0));
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
@ -543,13 +543,13 @@ void NamedStoreHandlerCompiler::GenerateStoreField(
|
||||
Label exit;
|
||||
|
||||
// Stub never generated for objects that require access checks.
|
||||
ASSERT(!object->IsAccessCheckNeeded());
|
||||
ASSERT(!object->IsJSGlobalProxy());
|
||||
DCHECK(!object->IsAccessCheckNeeded());
|
||||
DCHECK(!object->IsJSGlobalProxy());
|
||||
|
||||
FieldIndex index = lookup->GetFieldIndex();
|
||||
|
||||
Representation representation = lookup->representation();
|
||||
ASSERT(!representation.IsNone());
|
||||
DCHECK(!representation.IsNone());
|
||||
if (representation.IsSmi()) {
|
||||
__ JumpIfNotSmi(value_reg, miss_label);
|
||||
} else if (representation.IsHeapObject()) {
|
||||
@ -598,7 +598,7 @@ void NamedStoreHandlerCompiler::GenerateStoreField(
|
||||
__ Str(temp_double, FieldMemOperand(scratch1, HeapNumber::kValueOffset));
|
||||
|
||||
// Return the value (register x0).
|
||||
ASSERT(value_reg.is(x0));
|
||||
DCHECK(value_reg.is(x0));
|
||||
__ Ret();
|
||||
return;
|
||||
}
|
||||
@ -653,7 +653,7 @@ void NamedStoreHandlerCompiler::GenerateStoreField(
|
||||
|
||||
__ Bind(&exit);
|
||||
// Return the value (register x0).
|
||||
ASSERT(value_reg.is(x0));
|
||||
DCHECK(value_reg.is(x0));
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
@ -665,8 +665,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
|
||||
Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate()));
|
||||
|
||||
// object_reg and holder_reg registers can alias.
|
||||
ASSERT(!AreAliased(object_reg, scratch1, scratch2));
|
||||
ASSERT(!AreAliased(holder_reg, scratch1, scratch2));
|
||||
DCHECK(!AreAliased(object_reg, scratch1, scratch2));
|
||||
DCHECK(!AreAliased(holder_reg, scratch1, scratch2));
|
||||
|
||||
// Keep track of the current object in register reg.
|
||||
Register reg = object_reg;
|
||||
@ -686,7 +686,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
|
||||
|
||||
// Only global objects and objects that do not require access
|
||||
// checks are allowed in stubs.
|
||||
ASSERT(current_map->IsJSGlobalProxyMap() ||
|
||||
DCHECK(current_map->IsJSGlobalProxyMap() ||
|
||||
!current_map->is_access_check_needed());
|
||||
|
||||
prototype = handle(JSObject::cast(current_map->prototype()));
|
||||
@ -694,10 +694,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
|
||||
!current_map->IsJSGlobalObjectMap() &&
|
||||
!current_map->IsJSGlobalProxyMap()) {
|
||||
if (!name->IsUniqueName()) {
|
||||
ASSERT(name->IsString());
|
||||
DCHECK(name->IsString());
|
||||
name = factory()->InternalizeString(Handle<String>::cast(name));
|
||||
}
|
||||
ASSERT(current.is_null() ||
|
||||
DCHECK(current.is_null() ||
|
||||
(current->property_dictionary()->FindEntry(name) ==
|
||||
NameDictionary::kNotFound));
|
||||
|
||||
@ -757,7 +757,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
|
||||
}
|
||||
|
||||
// Perform security check for access to the global object.
|
||||
ASSERT(current_map->IsJSGlobalProxyMap() ||
|
||||
DCHECK(current_map->IsJSGlobalProxyMap() ||
|
||||
!current_map->is_access_check_needed());
|
||||
if (current_map->IsJSGlobalProxyMap()) {
|
||||
__ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
|
||||
@ -805,10 +805,10 @@ Register NamedLoadHandlerCompiler::CallbackFrontend(Register object_reg,
|
||||
Register scratch2 = this->scratch2();
|
||||
Register scratch3 = this->scratch3();
|
||||
Register dictionary = this->scratch4();
|
||||
ASSERT(!AreAliased(reg, scratch2, scratch3, dictionary));
|
||||
DCHECK(!AreAliased(reg, scratch2, scratch3, dictionary));
|
||||
|
||||
if (!holder()->HasFastProperties()) {
|
||||
ASSERT(!holder()->IsGlobalObject());
|
||||
DCHECK(!holder()->IsGlobalObject());
|
||||
// Load the properties dictionary.
|
||||
__ Ldr(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset));
|
||||
|
||||
@ -856,7 +856,7 @@ void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
|
||||
|
||||
void NamedLoadHandlerCompiler::GenerateLoadCallback(
|
||||
Register reg, Handle<ExecutableAccessorInfo> callback) {
|
||||
ASSERT(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
|
||||
DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
|
||||
|
||||
// Build ExecutableAccessorInfo::args_ list on the stack and push property
|
||||
// name below the exit frame to make GC aware of them and store pointers to
|
||||
@ -911,10 +911,10 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback(
|
||||
void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg,
|
||||
LookupResult* lookup,
|
||||
Handle<Name> name) {
|
||||
ASSERT(!AreAliased(receiver(), this->name(),
|
||||
DCHECK(!AreAliased(receiver(), this->name(),
|
||||
scratch1(), scratch2(), scratch3()));
|
||||
ASSERT(holder()->HasNamedInterceptor());
|
||||
ASSERT(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
|
||||
DCHECK(holder()->HasNamedInterceptor());
|
||||
DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
|
||||
|
||||
// So far the most popular follow ups for interceptor loads are FIELD
|
||||
// and CALLBACKS, so inline only them, other cases may be added later.
|
||||
@ -937,7 +937,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg,
|
||||
// Compile the interceptor call, followed by inline code to load the
|
||||
// property from further up the prototype chain if the call fails.
|
||||
// Check that the maps haven't changed.
|
||||
ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
|
||||
DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
|
||||
|
||||
// Preserve the receiver register explicitly whenever it is different from
|
||||
// the holder and it is needed should the interceptor return without any
|
||||
@ -1003,11 +1003,11 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
|
||||
Register holder_reg = Frontend(receiver(), name);
|
||||
|
||||
// Stub never generated for non-global objects that require access checks.
|
||||
ASSERT(holder()->IsJSGlobalProxy() || !holder()->IsAccessCheckNeeded());
|
||||
DCHECK(holder()->IsJSGlobalProxy() || !holder()->IsAccessCheckNeeded());
|
||||
|
||||
// receiver() and holder_reg can alias.
|
||||
ASSERT(!AreAliased(receiver(), scratch1(), scratch2(), value()));
|
||||
ASSERT(!AreAliased(holder_reg, scratch1(), scratch2(), value()));
|
||||
DCHECK(!AreAliased(receiver(), scratch1(), scratch2(), value()));
|
||||
DCHECK(!AreAliased(holder_reg, scratch1(), scratch2(), value()));
|
||||
__ Mov(scratch1(), Operand(callback));
|
||||
__ Mov(scratch2(), Operand(name));
|
||||
__ Push(receiver(), holder_reg, scratch1(), scratch2(), value());
|
||||
@ -1110,7 +1110,7 @@ Register* PropertyAccessCompiler::store_calling_convention() {
|
||||
// receiver, value, scratch1, scratch2, scratch3.
|
||||
Register receiver = StoreIC::ReceiverRegister();
|
||||
Register name = StoreIC::NameRegister();
|
||||
ASSERT(x3.is(KeyedStoreIC::MapRegister()));
|
||||
DCHECK(x3.is(KeyedStoreIC::MapRegister()));
|
||||
static Register registers[] = { receiver, name, x3, x4, x5 };
|
||||
return registers;
|
||||
}
|
||||
@ -1207,7 +1207,7 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
|
||||
|
||||
// Polymorphic keyed stores may use the map register
|
||||
Register map_reg = scratch1();
|
||||
ASSERT(kind() != Code::KEYED_STORE_IC ||
|
||||
DCHECK(kind() != Code::KEYED_STORE_IC ||
|
||||
map_reg.is(KeyedStoreIC::MapRegister()));
|
||||
__ Ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
|
||||
int receiver_count = types->length();
|
||||
@ -1221,14 +1221,14 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
|
||||
__ Cmp(map_reg, Operand(map));
|
||||
__ B(ne, &try_next);
|
||||
if (type->Is(HeapType::Number())) {
|
||||
ASSERT(!number_case.is_unused());
|
||||
DCHECK(!number_case.is_unused());
|
||||
__ Bind(&number_case);
|
||||
}
|
||||
__ Jump(handlers->at(current), RelocInfo::CODE_TARGET);
|
||||
__ Bind(&try_next);
|
||||
}
|
||||
}
|
||||
ASSERT(number_of_handled_maps != 0);
|
||||
DCHECK(number_of_handled_maps != 0);
|
||||
|
||||
__ Bind(&miss);
|
||||
TailCallBuiltin(masm(), MissBuiltin(kind()));
|
||||
@ -1283,8 +1283,8 @@ void ElementHandlerCompiler::GenerateLoadDictionaryElement(
|
||||
Register result = x0;
|
||||
Register key = LoadIC::NameRegister();
|
||||
Register receiver = LoadIC::ReceiverRegister();
|
||||
ASSERT(receiver.is(x1));
|
||||
ASSERT(key.is(x2));
|
||||
DCHECK(receiver.is(x1));
|
||||
DCHECK(key.is(x2));
|
||||
|
||||
__ JumpIfNotSmi(key, &miss);
|
||||
__ Ldr(x4, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
|
@ -15,7 +15,7 @@ namespace internal {
|
||||
|
||||
int CountLeadingZeros(uint64_t value, int width) {
|
||||
// TODO(jbramley): Optimize this for ARM64 hosts.
|
||||
ASSERT((width == 32) || (width == 64));
|
||||
DCHECK((width == 32) || (width == 64));
|
||||
int count = 0;
|
||||
uint64_t bit_test = 1UL << (width - 1);
|
||||
while ((count < width) && ((bit_test & value) == 0)) {
|
||||
@ -28,7 +28,7 @@ int CountLeadingZeros(uint64_t value, int width) {
|
||||
|
||||
int CountLeadingSignBits(int64_t value, int width) {
|
||||
// TODO(jbramley): Optimize this for ARM64 hosts.
|
||||
ASSERT((width == 32) || (width == 64));
|
||||
DCHECK((width == 32) || (width == 64));
|
||||
if (value >= 0) {
|
||||
return CountLeadingZeros(value, width) - 1;
|
||||
} else {
|
||||
@ -39,7 +39,7 @@ int CountLeadingSignBits(int64_t value, int width) {
|
||||
|
||||
int CountTrailingZeros(uint64_t value, int width) {
|
||||
// TODO(jbramley): Optimize this for ARM64 hosts.
|
||||
ASSERT((width == 32) || (width == 64));
|
||||
DCHECK((width == 32) || (width == 64));
|
||||
int count = 0;
|
||||
while ((count < width) && (((value >> count) & 1) == 0)) {
|
||||
count++;
|
||||
@ -51,7 +51,7 @@ int CountTrailingZeros(uint64_t value, int width) {
|
||||
int CountSetBits(uint64_t value, int width) {
|
||||
// TODO(jbramley): Would it be useful to allow other widths? The
|
||||
// implementation already supports them.
|
||||
ASSERT((width == 32) || (width == 64));
|
||||
DCHECK((width == 32) || (width == 64));
|
||||
|
||||
// Mask out unused bits to ensure that they are not counted.
|
||||
value &= (0xffffffffffffffffUL >> (64-width));
|
||||
@ -84,7 +84,7 @@ uint64_t LargestPowerOf2Divisor(uint64_t value) {
|
||||
|
||||
|
||||
int MaskToBit(uint64_t mask) {
|
||||
ASSERT(CountSetBits(mask, 64) == 1);
|
||||
DCHECK(CountSetBits(mask, 64) == 1);
|
||||
return CountTrailingZeros(mask, 64);
|
||||
}
|
||||
|
||||
|
@ -88,13 +88,13 @@ inline bool IsQuietNaN(T num) {
|
||||
|
||||
// Convert the NaN in 'num' to a quiet NaN.
|
||||
inline double ToQuietNaN(double num) {
|
||||
ASSERT(std::isnan(num));
|
||||
DCHECK(std::isnan(num));
|
||||
return rawbits_to_double(double_to_rawbits(num) | kDQuietNanMask);
|
||||
}
|
||||
|
||||
|
||||
inline float ToQuietNaN(float num) {
|
||||
ASSERT(std::isnan(num));
|
||||
DCHECK(std::isnan(num));
|
||||
return rawbits_to_float(float_to_rawbits(num) | kSQuietNanMask);
|
||||
}
|
||||
|
||||
|
@ -150,7 +150,7 @@ AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size)
|
||||
own_buffer_ = true;
|
||||
} else {
|
||||
// Use externally provided buffer instead.
|
||||
ASSERT(buffer_size > 0);
|
||||
DCHECK(buffer_size > 0);
|
||||
own_buffer_ = false;
|
||||
}
|
||||
buffer_ = static_cast<byte*>(buffer);
|
||||
@ -201,7 +201,7 @@ PredictableCodeSizeScope::~PredictableCodeSizeScope() {
|
||||
#ifdef DEBUG
|
||||
CpuFeatureScope::CpuFeatureScope(AssemblerBase* assembler, CpuFeature f)
|
||||
: assembler_(assembler) {
|
||||
ASSERT(CpuFeatures::IsSupported(f));
|
||||
DCHECK(CpuFeatures::IsSupported(f));
|
||||
old_enabled_ = assembler_->enabled_cpu_features();
|
||||
uint64_t mask = static_cast<uint64_t>(1) << f;
|
||||
// TODO(svenpanne) This special case below doesn't belong here!
|
||||
@ -358,7 +358,7 @@ uint32_t RelocInfoWriter::WriteVariableLengthPCJump(uint32_t pc_delta) {
|
||||
if (is_uintn(pc_delta, kSmallPCDeltaBits)) return pc_delta;
|
||||
WriteExtraTag(kPCJumpExtraTag, kVariableLengthPCJumpTopTag);
|
||||
uint32_t pc_jump = pc_delta >> kSmallPCDeltaBits;
|
||||
ASSERT(pc_jump > 0);
|
||||
DCHECK(pc_jump > 0);
|
||||
// Write kChunkBits size chunks of the pc_jump.
|
||||
for (; pc_jump > 0; pc_jump = pc_jump >> kChunkBits) {
|
||||
byte b = pc_jump & kChunkMask;
|
||||
@ -432,9 +432,9 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
|
||||
#ifdef DEBUG
|
||||
byte* begin_pos = pos_;
|
||||
#endif
|
||||
ASSERT(rinfo->rmode() < RelocInfo::NUMBER_OF_MODES);
|
||||
ASSERT(rinfo->pc() - last_pc_ >= 0);
|
||||
ASSERT(RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM - RelocInfo::LAST_COMPACT_ENUM
|
||||
DCHECK(rinfo->rmode() < RelocInfo::NUMBER_OF_MODES);
|
||||
DCHECK(rinfo->pc() - last_pc_ >= 0);
|
||||
DCHECK(RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM - RelocInfo::LAST_COMPACT_ENUM
|
||||
<= kMaxStandardNonCompactModes);
|
||||
// Use unsigned delta-encoding for pc.
|
||||
uint32_t pc_delta = static_cast<uint32_t>(rinfo->pc() - last_pc_);
|
||||
@ -445,10 +445,10 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
|
||||
WriteTaggedPC(pc_delta, kEmbeddedObjectTag);
|
||||
} else if (rmode == RelocInfo::CODE_TARGET) {
|
||||
WriteTaggedPC(pc_delta, kCodeTargetTag);
|
||||
ASSERT(begin_pos - pos_ <= RelocInfo::kMaxCallSize);
|
||||
DCHECK(begin_pos - pos_ <= RelocInfo::kMaxCallSize);
|
||||
} else if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
|
||||
// Use signed delta-encoding for id.
|
||||
ASSERT(static_cast<int>(rinfo->data()) == rinfo->data());
|
||||
DCHECK(static_cast<int>(rinfo->data()) == rinfo->data());
|
||||
int id_delta = static_cast<int>(rinfo->data()) - last_id_;
|
||||
// Check if delta is small enough to fit in a tagged byte.
|
||||
if (is_intn(id_delta, kSmallDataBits)) {
|
||||
@ -462,7 +462,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
|
||||
last_id_ = static_cast<int>(rinfo->data());
|
||||
} else if (RelocInfo::IsPosition(rmode)) {
|
||||
// Use signed delta-encoding for position.
|
||||
ASSERT(static_cast<int>(rinfo->data()) == rinfo->data());
|
||||
DCHECK(static_cast<int>(rinfo->data()) == rinfo->data());
|
||||
int pos_delta = static_cast<int>(rinfo->data()) - last_position_;
|
||||
int pos_type_tag = (rmode == RelocInfo::POSITION) ? kNonstatementPositionTag
|
||||
: kStatementPositionTag;
|
||||
@ -480,23 +480,23 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
|
||||
// Comments are normally not generated, so we use the costly encoding.
|
||||
WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
|
||||
WriteExtraTaggedData(rinfo->data(), kCommentTag);
|
||||
ASSERT(begin_pos - pos_ >= RelocInfo::kMinRelocCommentSize);
|
||||
DCHECK(begin_pos - pos_ >= RelocInfo::kMinRelocCommentSize);
|
||||
} else if (RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode)) {
|
||||
WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
|
||||
WriteExtraTaggedPoolData(static_cast<int>(rinfo->data()),
|
||||
RelocInfo::IsConstPool(rmode) ? kConstPoolTag
|
||||
: kVeneerPoolTag);
|
||||
} else {
|
||||
ASSERT(rmode > RelocInfo::LAST_COMPACT_ENUM);
|
||||
DCHECK(rmode > RelocInfo::LAST_COMPACT_ENUM);
|
||||
int saved_mode = rmode - RelocInfo::LAST_COMPACT_ENUM;
|
||||
// For all other modes we simply use the mode as the extra tag.
|
||||
// None of these modes need a data component.
|
||||
ASSERT(saved_mode < kPCJumpExtraTag && saved_mode < kDataJumpExtraTag);
|
||||
DCHECK(saved_mode < kPCJumpExtraTag && saved_mode < kDataJumpExtraTag);
|
||||
WriteExtraTaggedPC(pc_delta, saved_mode);
|
||||
}
|
||||
last_pc_ = rinfo->pc();
|
||||
#ifdef DEBUG
|
||||
ASSERT(begin_pos - pos_ <= kMaxSize);
|
||||
DCHECK(begin_pos - pos_ <= kMaxSize);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -602,7 +602,7 @@ inline void RelocIterator::ReadTaggedPosition() {
|
||||
|
||||
|
||||
static inline RelocInfo::Mode GetPositionModeFromTag(int tag) {
|
||||
ASSERT(tag == kNonstatementPositionTag ||
|
||||
DCHECK(tag == kNonstatementPositionTag ||
|
||||
tag == kStatementPositionTag);
|
||||
return (tag == kNonstatementPositionTag) ?
|
||||
RelocInfo::POSITION :
|
||||
@ -611,7 +611,7 @@ static inline RelocInfo::Mode GetPositionModeFromTag(int tag) {
|
||||
|
||||
|
||||
void RelocIterator::next() {
|
||||
ASSERT(!done());
|
||||
DCHECK(!done());
|
||||
// Basically, do the opposite of RelocInfoWriter::Write.
|
||||
// Reading of data is as far as possible avoided for unwanted modes,
|
||||
// but we must always update the pc.
|
||||
@ -637,7 +637,7 @@ void RelocIterator::next() {
|
||||
} else {
|
||||
// Compact encoding is never used for comments,
|
||||
// so it must be a position.
|
||||
ASSERT(locatable_tag == kNonstatementPositionTag ||
|
||||
DCHECK(locatable_tag == kNonstatementPositionTag ||
|
||||
locatable_tag == kStatementPositionTag);
|
||||
if (mode_mask_ & RelocInfo::kPositionMask) {
|
||||
ReadTaggedPosition();
|
||||
@ -645,7 +645,7 @@ void RelocIterator::next() {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ASSERT(tag == kDefaultTag);
|
||||
DCHECK(tag == kDefaultTag);
|
||||
int extra_tag = GetExtraTag();
|
||||
if (extra_tag == kPCJumpExtraTag) {
|
||||
if (GetTopTag() == kVariableLengthPCJumpTopTag) {
|
||||
@ -662,7 +662,7 @@ void RelocIterator::next() {
|
||||
}
|
||||
Advance(kIntSize);
|
||||
} else if (locatable_tag != kCommentTag) {
|
||||
ASSERT(locatable_tag == kNonstatementPositionTag ||
|
||||
DCHECK(locatable_tag == kNonstatementPositionTag ||
|
||||
locatable_tag == kStatementPositionTag);
|
||||
if (mode_mask_ & RelocInfo::kPositionMask) {
|
||||
AdvanceReadPosition();
|
||||
@ -671,7 +671,7 @@ void RelocIterator::next() {
|
||||
Advance(kIntSize);
|
||||
}
|
||||
} else {
|
||||
ASSERT(locatable_tag == kCommentTag);
|
||||
DCHECK(locatable_tag == kCommentTag);
|
||||
if (SetMode(RelocInfo::COMMENT)) {
|
||||
AdvanceReadData();
|
||||
return;
|
||||
@ -680,7 +680,7 @@ void RelocIterator::next() {
|
||||
}
|
||||
} else if (extra_tag == kPoolExtraTag) {
|
||||
int pool_type = GetTopTag();
|
||||
ASSERT(pool_type == kConstPoolTag || pool_type == kVeneerPoolTag);
|
||||
DCHECK(pool_type == kConstPoolTag || pool_type == kVeneerPoolTag);
|
||||
RelocInfo::Mode rmode = (pool_type == kConstPoolTag) ?
|
||||
RelocInfo::CONST_POOL : RelocInfo::VENEER_POOL;
|
||||
if (SetMode(rmode)) {
|
||||
@ -891,7 +891,7 @@ void RelocInfo::Verify(Isolate* isolate) {
|
||||
UNREACHABLE();
|
||||
break;
|
||||
case CODE_AGE_SEQUENCE:
|
||||
ASSERT(Code::IsYoungSequence(isolate, pc_) || code_age_stub()->IsCode());
|
||||
DCHECK(Code::IsYoungSequence(isolate, pc_) || code_age_stub()->IsCode());
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -1403,14 +1403,14 @@ ExternalReference ExternalReference::math_log_double_function(
|
||||
|
||||
|
||||
ExternalReference ExternalReference::math_exp_constants(int constant_index) {
|
||||
ASSERT(math_exp_data_initialized);
|
||||
DCHECK(math_exp_data_initialized);
|
||||
return ExternalReference(
|
||||
reinterpret_cast<void*>(math_exp_constants_array + constant_index));
|
||||
}
|
||||
|
||||
|
||||
ExternalReference ExternalReference::math_exp_log_table() {
|
||||
ASSERT(math_exp_data_initialized);
|
||||
DCHECK(math_exp_data_initialized);
|
||||
return ExternalReference(reinterpret_cast<void*>(math_exp_log_table_array));
|
||||
}
|
||||
|
||||
@ -1427,7 +1427,7 @@ ExternalReference ExternalReference::ForDeoptEntry(Address entry) {
|
||||
|
||||
|
||||
ExternalReference ExternalReference::cpu_features() {
|
||||
ASSERT(CpuFeatures::initialized_);
|
||||
DCHECK(CpuFeatures::initialized_);
|
||||
return ExternalReference(&CpuFeatures::supported_);
|
||||
}
|
||||
|
||||
@ -1533,7 +1533,7 @@ ExternalReference ExternalReference::power_double_int_function(
|
||||
|
||||
|
||||
bool EvalComparison(Token::Value op, double op1, double op2) {
|
||||
ASSERT(Token::IsCompareOp(op));
|
||||
DCHECK(Token::IsCompareOp(op));
|
||||
switch (op) {
|
||||
case Token::EQ:
|
||||
case Token::EQ_STRICT: return (op1 == op2);
|
||||
@ -1569,8 +1569,8 @@ ExternalReference ExternalReference::debug_step_in_fp_address(
|
||||
|
||||
|
||||
void PositionsRecorder::RecordPosition(int pos) {
|
||||
ASSERT(pos != RelocInfo::kNoPosition);
|
||||
ASSERT(pos >= 0);
|
||||
DCHECK(pos != RelocInfo::kNoPosition);
|
||||
DCHECK(pos >= 0);
|
||||
state_.current_position = pos;
|
||||
LOG_CODE_EVENT(assembler_->isolate(),
|
||||
CodeLinePosInfoAddPositionEvent(jit_handler_data_,
|
||||
@ -1580,8 +1580,8 @@ void PositionsRecorder::RecordPosition(int pos) {
|
||||
|
||||
|
||||
void PositionsRecorder::RecordStatementPosition(int pos) {
|
||||
ASSERT(pos != RelocInfo::kNoPosition);
|
||||
ASSERT(pos >= 0);
|
||||
DCHECK(pos != RelocInfo::kNoPosition);
|
||||
DCHECK(pos >= 0);
|
||||
state_.current_statement_position = pos;
|
||||
LOG_CODE_EVENT(assembler_->isolate(),
|
||||
CodeLinePosInfoAddStatementPositionEvent(
|
||||
@ -1620,7 +1620,7 @@ bool PositionsRecorder::WriteRecordedPositions() {
|
||||
|
||||
|
||||
MultiplierAndShift::MultiplierAndShift(int32_t d) {
|
||||
ASSERT(d <= -2 || 2 <= d);
|
||||
DCHECK(d <= -2 || 2 <= d);
|
||||
const uint32_t two31 = 0x80000000;
|
||||
uint32_t ad = Abs(d);
|
||||
uint32_t t = two31 + (uint32_t(d) >> 31);
|
||||
|
@ -183,7 +183,7 @@ class CpuFeatures : public AllStatic {
|
||||
static inline bool SupportsCrankshaft();
|
||||
|
||||
static inline unsigned cache_line_size() {
|
||||
ASSERT(cache_line_size_ != 0);
|
||||
DCHECK(cache_line_size_ != 0);
|
||||
return cache_line_size_;
|
||||
}
|
||||
|
||||
@ -223,8 +223,8 @@ class Label BASE_EMBEDDED {
|
||||
}
|
||||
|
||||
INLINE(~Label()) {
|
||||
ASSERT(!is_linked());
|
||||
ASSERT(!is_near_linked());
|
||||
DCHECK(!is_linked());
|
||||
DCHECK(!is_near_linked());
|
||||
}
|
||||
|
||||
INLINE(void Unuse()) { pos_ = 0; }
|
||||
@ -254,15 +254,15 @@ class Label BASE_EMBEDDED {
|
||||
|
||||
void bind_to(int pos) {
|
||||
pos_ = -pos - 1;
|
||||
ASSERT(is_bound());
|
||||
DCHECK(is_bound());
|
||||
}
|
||||
void link_to(int pos, Distance distance = kFar) {
|
||||
if (distance == kNear) {
|
||||
near_link_pos_ = pos + 1;
|
||||
ASSERT(is_near_linked());
|
||||
DCHECK(is_near_linked());
|
||||
} else {
|
||||
pos_ = pos + 1;
|
||||
ASSERT(is_linked());
|
||||
DCHECK(is_linked());
|
||||
}
|
||||
}
|
||||
|
||||
@ -384,7 +384,7 @@ class RelocInfo {
|
||||
mode <= LAST_REAL_RELOC_MODE;
|
||||
}
|
||||
static inline bool IsPseudoRelocMode(Mode mode) {
|
||||
ASSERT(!IsRealRelocMode(mode));
|
||||
DCHECK(!IsRealRelocMode(mode));
|
||||
return mode >= FIRST_PSEUDO_RELOC_MODE &&
|
||||
mode <= LAST_PSEUDO_RELOC_MODE;
|
||||
}
|
||||
@ -681,7 +681,7 @@ class RelocIterator: public Malloced {
|
||||
|
||||
// Return pointer valid until next next().
|
||||
RelocInfo* rinfo() {
|
||||
ASSERT(!done());
|
||||
DCHECK(!done());
|
||||
return &rinfo_;
|
||||
}
|
||||
|
||||
@ -954,7 +954,7 @@ class ExternalReference BASE_EMBEDDED {
|
||||
static void set_redirector(Isolate* isolate,
|
||||
ExternalReferenceRedirector* redirector) {
|
||||
// We can't stack them.
|
||||
ASSERT(isolate->external_reference_redirector() == NULL);
|
||||
DCHECK(isolate->external_reference_redirector() == NULL);
|
||||
isolate->set_external_reference_redirector(
|
||||
reinterpret_cast<ExternalReferenceRedirectorPointer*>(redirector));
|
||||
}
|
||||
|
@ -149,7 +149,7 @@ bool AstValue::IsPropertyName() const {
|
||||
bool AstValue::BooleanValue() const {
|
||||
switch (type_) {
|
||||
case STRING:
|
||||
ASSERT(string_ != NULL);
|
||||
DCHECK(string_ != NULL);
|
||||
return !string_->IsEmpty();
|
||||
case SYMBOL:
|
||||
UNREACHABLE();
|
||||
@ -179,9 +179,9 @@ bool AstValue::BooleanValue() const {
|
||||
void AstValue::Internalize(Isolate* isolate) {
|
||||
switch (type_) {
|
||||
case STRING:
|
||||
ASSERT(string_ != NULL);
|
||||
DCHECK(string_ != NULL);
|
||||
// Strings are already internalized.
|
||||
ASSERT(!string_->string().is_null());
|
||||
DCHECK(!string_->string().is_null());
|
||||
break;
|
||||
case SYMBOL:
|
||||
value_ = Object::GetProperty(
|
||||
@ -202,7 +202,7 @@ void AstValue::Internalize(Isolate* isolate) {
|
||||
}
|
||||
break;
|
||||
case STRING_ARRAY: {
|
||||
ASSERT(strings_ != NULL);
|
||||
DCHECK(strings_ != NULL);
|
||||
Factory* factory = isolate->factory();
|
||||
int len = strings_->length();
|
||||
Handle<FixedArray> elements = factory->NewFixedArray(len, TENURED);
|
||||
@ -210,7 +210,7 @@ void AstValue::Internalize(Isolate* isolate) {
|
||||
const AstRawString* string = (*strings_)[i];
|
||||
Handle<Object> element = string->string();
|
||||
// Strings are already internalized.
|
||||
ASSERT(!element.is_null());
|
||||
DCHECK(!element.is_null());
|
||||
elements->set(i, *element);
|
||||
}
|
||||
value_ =
|
||||
@ -252,7 +252,7 @@ const AstRawString* AstValueFactory::GetString(Handle<String> literal) {
|
||||
if (content.IsAscii()) {
|
||||
return GetOneByteString(content.ToOneByteVector());
|
||||
}
|
||||
ASSERT(content.IsTwoByte());
|
||||
DCHECK(content.IsTwoByte());
|
||||
return GetTwoByteString(content.ToUC16Vector());
|
||||
}
|
||||
|
||||
@ -289,7 +289,7 @@ void AstValueFactory::Internalize(Isolate* isolate) {
|
||||
|
||||
const AstValue* AstValueFactory::NewString(const AstRawString* string) {
|
||||
AstValue* value = new (zone_) AstValue(string);
|
||||
ASSERT(string != NULL);
|
||||
DCHECK(string != NULL);
|
||||
if (isolate_) {
|
||||
value->Internalize(isolate_);
|
||||
}
|
||||
|
@ -52,7 +52,7 @@ class AstString : public ZoneObject {
|
||||
|
||||
// This function can be called after internalizing.
|
||||
V8_INLINE Handle<String> string() const {
|
||||
ASSERT(!string_.is_null());
|
||||
DCHECK(!string_.is_null());
|
||||
return string_;
|
||||
}
|
||||
|
||||
@ -177,7 +177,7 @@ class AstValue : public ZoneObject {
|
||||
if (type_ == STRING) {
|
||||
return string_->string();
|
||||
}
|
||||
ASSERT(!value_.is_null());
|
||||
DCHECK(!value_.is_null());
|
||||
return value_;
|
||||
}
|
||||
|
||||
@ -203,7 +203,7 @@ class AstValue : public ZoneObject {
|
||||
explicit AstValue(double n) : type_(NUMBER) { number_ = n; }
|
||||
|
||||
AstValue(Type t, int i) : type_(t) {
|
||||
ASSERT(type_ == SMI);
|
||||
DCHECK(type_ == SMI);
|
||||
smi_ = i;
|
||||
}
|
||||
|
||||
@ -214,7 +214,7 @@ class AstValue : public ZoneObject {
|
||||
}
|
||||
|
||||
explicit AstValue(Type t) : type_(t) {
|
||||
ASSERT(t == NULL_TYPE || t == UNDEFINED || t == THE_HOLE);
|
||||
DCHECK(t == NULL_TYPE || t == UNDEFINED || t == THE_HOLE);
|
||||
}
|
||||
|
||||
Type type_;
|
||||
|
20
src/ast.cc
20
src/ast.cc
@ -87,10 +87,10 @@ VariableProxy::VariableProxy(Zone* zone,
|
||||
|
||||
|
||||
void VariableProxy::BindTo(Variable* var) {
|
||||
ASSERT(var_ == NULL); // must be bound only once
|
||||
ASSERT(var != NULL); // must bind
|
||||
ASSERT(!FLAG_harmony_modules || interface_->IsUnified(var->interface()));
|
||||
ASSERT((is_this() && var->is_this()) || name_ == var->raw_name());
|
||||
DCHECK(var_ == NULL); // must be bound only once
|
||||
DCHECK(var != NULL); // must bind
|
||||
DCHECK(!FLAG_harmony_modules || interface_->IsUnified(var->interface()));
|
||||
DCHECK((is_this() && var->is_this()) || name_ == var->raw_name());
|
||||
// Ideally CONST-ness should match. However, this is very hard to achieve
|
||||
// because we don't know the exact semantics of conflicting (const and
|
||||
// non-const) multiple variable declarations, const vars introduced via
|
||||
@ -402,8 +402,8 @@ void MaterializedLiteral::BuildConstants(Isolate* isolate) {
|
||||
if (IsObjectLiteral()) {
|
||||
return AsObjectLiteral()->BuildConstantProperties(isolate);
|
||||
}
|
||||
ASSERT(IsRegExpLiteral());
|
||||
ASSERT(depth() >= 1); // Depth should be initialized.
|
||||
DCHECK(IsRegExpLiteral());
|
||||
DCHECK(depth() >= 1); // Depth should be initialized.
|
||||
}
|
||||
|
||||
|
||||
@ -593,7 +593,7 @@ bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
|
||||
LookupResult* lookup) {
|
||||
target_ = Handle<JSFunction>::null();
|
||||
cell_ = Handle<Cell>::null();
|
||||
ASSERT(lookup->IsFound() &&
|
||||
DCHECK(lookup->IsFound() &&
|
||||
lookup->type() == NORMAL &&
|
||||
lookup->holder() == *global);
|
||||
cell_ = Handle<Cell>(global->GetPropertyCell(lookup));
|
||||
@ -960,7 +960,7 @@ OStream& RegExpTree::Print(OStream& os, Zone* zone) { // NOLINT
|
||||
|
||||
RegExpDisjunction::RegExpDisjunction(ZoneList<RegExpTree*>* alternatives)
|
||||
: alternatives_(alternatives) {
|
||||
ASSERT(alternatives->length() > 1);
|
||||
DCHECK(alternatives->length() > 1);
|
||||
RegExpTree* first_alternative = alternatives->at(0);
|
||||
min_match_ = first_alternative->min_match();
|
||||
max_match_ = first_alternative->max_match();
|
||||
@ -982,7 +982,7 @@ static int IncreaseBy(int previous, int increase) {
|
||||
|
||||
RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes)
|
||||
: nodes_(nodes) {
|
||||
ASSERT(nodes->length() > 1);
|
||||
DCHECK(nodes->length() > 1);
|
||||
min_match_ = 0;
|
||||
max_match_ = 0;
|
||||
for (int i = 0; i < nodes->length(); i++) {
|
||||
@ -1127,7 +1127,7 @@ void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
|
||||
|
||||
Handle<String> Literal::ToString() {
|
||||
if (value_->IsString()) return value_->AsString()->string();
|
||||
ASSERT(value_->IsNumber());
|
||||
DCHECK(value_->IsNumber());
|
||||
char arr[100];
|
||||
Vector<char> buffer(arr, ARRAY_SIZE(arr));
|
||||
const char* str;
|
||||
|
54
src/ast.h
54
src/ast.h
@ -428,7 +428,7 @@ class BreakableStatement : public Statement {
|
||||
breakable_type_(breakable_type),
|
||||
entry_id_(GetNextId(zone)),
|
||||
exit_id_(GetNextId(zone)) {
|
||||
ASSERT(labels == NULL || labels->length() > 0);
|
||||
DCHECK(labels == NULL || labels->length() > 0);
|
||||
}
|
||||
|
||||
|
||||
@ -501,7 +501,7 @@ class Declaration : public AstNode {
|
||||
proxy_(proxy),
|
||||
mode_(mode),
|
||||
scope_(scope) {
|
||||
ASSERT(IsDeclaredVariableMode(mode));
|
||||
DCHECK(IsDeclaredVariableMode(mode));
|
||||
}
|
||||
|
||||
private:
|
||||
@ -552,8 +552,8 @@ class FunctionDeclaration V8_FINAL : public Declaration {
|
||||
: Declaration(zone, proxy, mode, scope, pos),
|
||||
fun_(fun) {
|
||||
// At the moment there are no "const functions" in JavaScript...
|
||||
ASSERT(mode == VAR || mode == LET);
|
||||
ASSERT(fun != NULL);
|
||||
DCHECK(mode == VAR || mode == LET);
|
||||
DCHECK(fun != NULL);
|
||||
}
|
||||
|
||||
private:
|
||||
@ -925,7 +925,7 @@ class ForInStatement V8_FINAL : public ForEachStatement,
|
||||
virtual void SetFirstFeedbackSlot(int slot) { for_in_feedback_slot_ = slot; }
|
||||
|
||||
int ForInFeedbackSlot() {
|
||||
ASSERT(for_in_feedback_slot_ != kInvalidFeedbackSlot);
|
||||
DCHECK(for_in_feedback_slot_ != kInvalidFeedbackSlot);
|
||||
return for_in_feedback_slot_;
|
||||
}
|
||||
|
||||
@ -1346,12 +1346,12 @@ class Literal V8_FINAL : public Expression {
|
||||
}
|
||||
|
||||
Handle<String> AsPropertyName() {
|
||||
ASSERT(IsPropertyName());
|
||||
DCHECK(IsPropertyName());
|
||||
return Handle<String>::cast(value());
|
||||
}
|
||||
|
||||
const AstRawString* AsRawPropertyName() {
|
||||
ASSERT(IsPropertyName());
|
||||
DCHECK(IsPropertyName());
|
||||
return value_->AsString();
|
||||
}
|
||||
|
||||
@ -1401,7 +1401,7 @@ class MaterializedLiteral : public Expression {
|
||||
|
||||
int depth() const {
|
||||
// only callable after initialization.
|
||||
ASSERT(depth_ >= 1);
|
||||
DCHECK(depth_ >= 1);
|
||||
return depth_;
|
||||
}
|
||||
|
||||
@ -1421,7 +1421,7 @@ class MaterializedLiteral : public Expression {
|
||||
friend class CompileTimeValue;
|
||||
|
||||
void set_depth(int depth) {
|
||||
ASSERT(depth >= 1);
|
||||
DCHECK(depth >= 1);
|
||||
depth_ = depth;
|
||||
}
|
||||
|
||||
@ -1859,12 +1859,12 @@ class CallNew V8_FINAL : public Expression, public FeedbackSlotInterface {
|
||||
}
|
||||
|
||||
int CallNewFeedbackSlot() {
|
||||
ASSERT(callnew_feedback_slot_ != kInvalidFeedbackSlot);
|
||||
DCHECK(callnew_feedback_slot_ != kInvalidFeedbackSlot);
|
||||
return callnew_feedback_slot_;
|
||||
}
|
||||
int AllocationSiteFeedbackSlot() {
|
||||
ASSERT(callnew_feedback_slot_ != kInvalidFeedbackSlot);
|
||||
ASSERT(FLAG_pretenuring_call_new);
|
||||
DCHECK(callnew_feedback_slot_ != kInvalidFeedbackSlot);
|
||||
DCHECK(FLAG_pretenuring_call_new);
|
||||
return callnew_feedback_slot_ + 1;
|
||||
}
|
||||
|
||||
@ -1930,7 +1930,7 @@ class CallRuntime V8_FINAL : public Expression, public FeedbackSlotInterface {
|
||||
}
|
||||
|
||||
int CallRuntimeFeedbackSlot() {
|
||||
ASSERT(!is_jsruntime() ||
|
||||
DCHECK(!is_jsruntime() ||
|
||||
callruntime_feedback_slot_ != kInvalidFeedbackSlot);
|
||||
return callruntime_feedback_slot_;
|
||||
}
|
||||
@ -1979,7 +1979,7 @@ class UnaryOperation V8_FINAL : public Expression {
|
||||
expression_(expression),
|
||||
materialize_true_id_(GetNextId(zone)),
|
||||
materialize_false_id_(GetNextId(zone)) {
|
||||
ASSERT(Token::IsUnaryOp(op));
|
||||
DCHECK(Token::IsUnaryOp(op));
|
||||
}
|
||||
|
||||
private:
|
||||
@ -2027,7 +2027,7 @@ class BinaryOperation V8_FINAL : public Expression {
|
||||
left_(left),
|
||||
right_(right),
|
||||
right_id_(GetNextId(zone)) {
|
||||
ASSERT(Token::IsBinaryOp(op));
|
||||
DCHECK(Token::IsBinaryOp(op));
|
||||
}
|
||||
|
||||
private:
|
||||
@ -2135,7 +2135,7 @@ class CompareOperation V8_FINAL : public Expression {
|
||||
left_(left),
|
||||
right_(right),
|
||||
combined_type_(Type::None(zone)) {
|
||||
ASSERT(Token::IsCompareOp(op));
|
||||
DCHECK(Token::IsCompareOp(op));
|
||||
}
|
||||
|
||||
private:
|
||||
@ -2225,7 +2225,7 @@ class Assignment V8_FINAL : public Expression {
|
||||
|
||||
template<class Visitor>
|
||||
void Init(Zone* zone, AstNodeFactory<Visitor>* factory) {
|
||||
ASSERT(Token::IsAssignmentOp(op_));
|
||||
DCHECK(Token::IsAssignmentOp(op_));
|
||||
if (is_compound()) {
|
||||
binary_operation_ = factory->NewBinaryOperation(
|
||||
binary_op(), target_, value_, position() + 1);
|
||||
@ -2265,11 +2265,11 @@ class Yield V8_FINAL : public Expression, public FeedbackSlotInterface {
|
||||
// locates the catch handler in the handler table, and is equivalent to
|
||||
// TryCatchStatement::index().
|
||||
int index() const {
|
||||
ASSERT(yield_kind() == DELEGATING);
|
||||
DCHECK(yield_kind() == DELEGATING);
|
||||
return index_;
|
||||
}
|
||||
void set_index(int index) {
|
||||
ASSERT(yield_kind() == DELEGATING);
|
||||
DCHECK(yield_kind() == DELEGATING);
|
||||
index_ = index;
|
||||
}
|
||||
|
||||
@ -2282,17 +2282,17 @@ class Yield V8_FINAL : public Expression, public FeedbackSlotInterface {
|
||||
}
|
||||
|
||||
int KeyedLoadFeedbackSlot() {
|
||||
ASSERT(yield_first_feedback_slot_ != kInvalidFeedbackSlot);
|
||||
DCHECK(yield_first_feedback_slot_ != kInvalidFeedbackSlot);
|
||||
return yield_first_feedback_slot_;
|
||||
}
|
||||
|
||||
int DoneFeedbackSlot() {
|
||||
ASSERT(yield_first_feedback_slot_ != kInvalidFeedbackSlot);
|
||||
DCHECK(yield_first_feedback_slot_ != kInvalidFeedbackSlot);
|
||||
return yield_first_feedback_slot_ + 1;
|
||||
}
|
||||
|
||||
int ValueFeedbackSlot() {
|
||||
ASSERT(yield_first_feedback_slot_ != kInvalidFeedbackSlot);
|
||||
DCHECK(yield_first_feedback_slot_ != kInvalidFeedbackSlot);
|
||||
return yield_first_feedback_slot_ + 2;
|
||||
}
|
||||
|
||||
@ -2402,7 +2402,7 @@ class FunctionLiteral V8_FINAL : public Expression {
|
||||
|
||||
Handle<String> inferred_name() const {
|
||||
if (!inferred_name_.is_null()) {
|
||||
ASSERT(raw_inferred_name_ == NULL);
|
||||
DCHECK(raw_inferred_name_ == NULL);
|
||||
return inferred_name_;
|
||||
}
|
||||
if (raw_inferred_name_ != NULL) {
|
||||
@ -2414,16 +2414,16 @@ class FunctionLiteral V8_FINAL : public Expression {
|
||||
|
||||
// Only one of {set_inferred_name, set_raw_inferred_name} should be called.
|
||||
void set_inferred_name(Handle<String> inferred_name) {
|
||||
ASSERT(!inferred_name.is_null());
|
||||
DCHECK(!inferred_name.is_null());
|
||||
inferred_name_ = inferred_name;
|
||||
ASSERT(raw_inferred_name_== NULL || raw_inferred_name_->IsEmpty());
|
||||
DCHECK(raw_inferred_name_== NULL || raw_inferred_name_->IsEmpty());
|
||||
raw_inferred_name_ = NULL;
|
||||
}
|
||||
|
||||
void set_raw_inferred_name(const AstString* raw_inferred_name) {
|
||||
ASSERT(raw_inferred_name != NULL);
|
||||
DCHECK(raw_inferred_name != NULL);
|
||||
raw_inferred_name_ = raw_inferred_name;
|
||||
ASSERT(inferred_name_.is_null());
|
||||
DCHECK(inferred_name_.is_null());
|
||||
inferred_name_ = Handle<String>();
|
||||
}
|
||||
|
||||
|
@ -163,7 +163,7 @@ class CPUInfo V8_FINAL {
|
||||
// string that must be freed by the caller using delete[].
|
||||
// Return NULL if not found.
|
||||
char* ExtractField(const char* field) const {
|
||||
ASSERT(field != NULL);
|
||||
DCHECK(field != NULL);
|
||||
|
||||
// Look for first field occurence, and ensure it starts the line.
|
||||
size_t fieldlen = strlen(field);
|
||||
@ -441,7 +441,7 @@ CPU::CPU() : stepping_(0),
|
||||
// QNX doesn't say if Thumb2 is available.
|
||||
// Assume false for the architectures older than ARMv7.
|
||||
}
|
||||
ASSERT(architecture_ >= 6);
|
||||
DCHECK(architecture_ >= 6);
|
||||
has_fpu_ = (cpu_flags & CPU_FLAG_FPU) != 0;
|
||||
has_vfp_ = has_fpu_;
|
||||
if (cpu_flags & ARM_CPU_FLAG_NEON) {
|
||||
|
@ -190,27 +190,27 @@ void DumpBacktrace();
|
||||
} } // namespace v8::base
|
||||
|
||||
|
||||
// The ASSERT macro is equivalent to CHECK except that it only
|
||||
// The DCHECK macro is equivalent to CHECK except that it only
|
||||
// generates code in debug builds.
|
||||
#ifdef DEBUG
|
||||
#define ASSERT_RESULT(expr) CHECK(expr)
|
||||
#define ASSERT(condition) CHECK(condition)
|
||||
#define ASSERT_EQ(v1, v2) CHECK_EQ(v1, v2)
|
||||
#define ASSERT_NE(v1, v2) CHECK_NE(v1, v2)
|
||||
#define ASSERT_GE(v1, v2) CHECK_GE(v1, v2)
|
||||
#define ASSERT_LT(v1, v2) CHECK_LT(v1, v2)
|
||||
#define ASSERT_LE(v1, v2) CHECK_LE(v1, v2)
|
||||
#define DCHECK_RESULT(expr) CHECK(expr)
|
||||
#define DCHECK(condition) CHECK(condition)
|
||||
#define DCHECK_EQ(v1, v2) CHECK_EQ(v1, v2)
|
||||
#define DCHECK_NE(v1, v2) CHECK_NE(v1, v2)
|
||||
#define DCHECK_GE(v1, v2) CHECK_GE(v1, v2)
|
||||
#define DCHECK_LT(v1, v2) CHECK_LT(v1, v2)
|
||||
#define DCHECK_LE(v1, v2) CHECK_LE(v1, v2)
|
||||
#else
|
||||
#define ASSERT_RESULT(expr) (expr)
|
||||
#define ASSERT(condition) ((void) 0)
|
||||
#define ASSERT_EQ(v1, v2) ((void) 0)
|
||||
#define ASSERT_NE(v1, v2) ((void) 0)
|
||||
#define ASSERT_GE(v1, v2) ((void) 0)
|
||||
#define ASSERT_LT(v1, v2) ((void) 0)
|
||||
#define ASSERT_LE(v1, v2) ((void) 0)
|
||||
#define DCHECK_RESULT(expr) (expr)
|
||||
#define DCHECK(condition) ((void) 0)
|
||||
#define DCHECK_EQ(v1, v2) ((void) 0)
|
||||
#define DCHECK_NE(v1, v2) ((void) 0)
|
||||
#define DCHECK_GE(v1, v2) ((void) 0)
|
||||
#define DCHECK_LT(v1, v2) ((void) 0)
|
||||
#define DCHECK_LE(v1, v2) ((void) 0)
|
||||
#endif
|
||||
|
||||
#define ASSERT_NOT_NULL(p) ASSERT_NE(NULL, p)
|
||||
#define DCHECK_NOT_NULL(p) DCHECK_NE(NULL, p)
|
||||
|
||||
// "Extra checks" are lightweight checks that are enabled in some release
|
||||
// builds.
|
||||
|
@ -195,7 +195,7 @@ inline T AddressFrom(intptr_t x) {
|
||||
// Return the largest multiple of m which is <= x.
|
||||
template <typename T>
|
||||
inline T RoundDown(T x, intptr_t m) {
|
||||
ASSERT(IsPowerOf2(m));
|
||||
DCHECK(IsPowerOf2(m));
|
||||
return AddressFrom<T>(OffsetFrom(x) & -m);
|
||||
}
|
||||
|
||||
@ -212,7 +212,7 @@ inline T RoundUp(T x, intptr_t m) {
|
||||
// sizeof(*pointer) might not be 1.
|
||||
template<class T>
|
||||
T AlignUp(T pointer, size_t alignment) {
|
||||
ASSERT(sizeof(pointer) == sizeof(uintptr_t));
|
||||
DCHECK(sizeof(pointer) == sizeof(uintptr_t));
|
||||
uintptr_t pointer_raw = reinterpret_cast<uintptr_t>(pointer);
|
||||
return reinterpret_cast<T>(RoundUp(pointer_raw, alignment));
|
||||
}
|
||||
@ -229,7 +229,7 @@ inline bool IsAligned(T value, U alignment) {
|
||||
// Implementation is from "Hacker's Delight" by Henry S. Warren, Jr.,
|
||||
// figure 3-3, page 48, where the function is called clp2.
|
||||
inline uint32_t RoundUpToPowerOf2(uint32_t x) {
|
||||
ASSERT(x <= 0x80000000u);
|
||||
DCHECK(x <= 0x80000000u);
|
||||
x = x - 1;
|
||||
x = x | (x >> 1);
|
||||
x = x | (x >> 2);
|
||||
|
@ -24,37 +24,37 @@ ConditionVariable::ConditionVariable() {
|
||||
// source for pthread_cond_timedwait() to use the monotonic clock.
|
||||
pthread_condattr_t attr;
|
||||
int result = pthread_condattr_init(&attr);
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
result = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
result = pthread_cond_init(&native_handle_, &attr);
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
result = pthread_condattr_destroy(&attr);
|
||||
#else
|
||||
int result = pthread_cond_init(&native_handle_, NULL);
|
||||
#endif
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
USE(result);
|
||||
}
|
||||
|
||||
|
||||
ConditionVariable::~ConditionVariable() {
|
||||
int result = pthread_cond_destroy(&native_handle_);
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
USE(result);
|
||||
}
|
||||
|
||||
|
||||
void ConditionVariable::NotifyOne() {
|
||||
int result = pthread_cond_signal(&native_handle_);
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
USE(result);
|
||||
}
|
||||
|
||||
|
||||
void ConditionVariable::NotifyAll() {
|
||||
int result = pthread_cond_broadcast(&native_handle_);
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
USE(result);
|
||||
}
|
||||
|
||||
@ -62,7 +62,7 @@ void ConditionVariable::NotifyAll() {
|
||||
void ConditionVariable::Wait(Mutex* mutex) {
|
||||
mutex->AssertHeldAndUnmark();
|
||||
int result = pthread_cond_wait(&native_handle_, &mutex->native_handle());
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
USE(result);
|
||||
mutex->AssertUnheldAndMark();
|
||||
}
|
||||
@ -76,8 +76,8 @@ bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) {
|
||||
// Mac OS X provides pthread_cond_timedwait_relative_np(), which does
|
||||
// not depend on the real time clock, which is what you really WANT here!
|
||||
ts = rel_time.ToTimespec();
|
||||
ASSERT_GE(ts.tv_sec, 0);
|
||||
ASSERT_GE(ts.tv_nsec, 0);
|
||||
DCHECK_GE(ts.tv_sec, 0);
|
||||
DCHECK_GE(ts.tv_nsec, 0);
|
||||
result = pthread_cond_timedwait_relative_np(
|
||||
&native_handle_, &mutex->native_handle(), &ts);
|
||||
#else
|
||||
@ -89,14 +89,14 @@ bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) {
|
||||
// On Free/Net/OpenBSD and Linux with glibc we can change the time
|
||||
// source for pthread_cond_timedwait() to use the monotonic clock.
|
||||
result = clock_gettime(CLOCK_MONOTONIC, &ts);
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
Time now = Time::FromTimespec(ts);
|
||||
#else
|
||||
// The timeout argument to pthread_cond_timedwait() is in absolute time.
|
||||
Time now = Time::NowFromSystemTime();
|
||||
#endif
|
||||
Time end_time = now + rel_time;
|
||||
ASSERT_GE(end_time, now);
|
||||
DCHECK_GE(end_time, now);
|
||||
ts = end_time.ToTimespec();
|
||||
result = pthread_cond_timedwait(
|
||||
&native_handle_, &mutex->native_handle(), &ts);
|
||||
@ -105,7 +105,7 @@ bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) {
|
||||
if (result == ETIMEDOUT) {
|
||||
return false;
|
||||
}
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -113,12 +113,12 @@ bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) {
|
||||
|
||||
struct ConditionVariable::Event {
|
||||
Event() : handle_(::CreateEventA(NULL, true, false, NULL)) {
|
||||
ASSERT(handle_ != NULL);
|
||||
DCHECK(handle_ != NULL);
|
||||
}
|
||||
|
||||
~Event() {
|
||||
BOOL ok = ::CloseHandle(handle_);
|
||||
ASSERT(ok);
|
||||
DCHECK(ok);
|
||||
USE(ok);
|
||||
}
|
||||
|
||||
@ -127,7 +127,7 @@ struct ConditionVariable::Event {
|
||||
if (result == WAIT_OBJECT_0) {
|
||||
return true;
|
||||
}
|
||||
ASSERT(result == WAIT_TIMEOUT);
|
||||
DCHECK(result == WAIT_TIMEOUT);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -139,7 +139,7 @@ struct ConditionVariable::Event {
|
||||
|
||||
|
||||
ConditionVariable::NativeHandle::~NativeHandle() {
|
||||
ASSERT(waitlist_ == NULL);
|
||||
DCHECK(waitlist_ == NULL);
|
||||
|
||||
while (freelist_ != NULL) {
|
||||
Event* event = freelist_;
|
||||
@ -165,7 +165,7 @@ ConditionVariable::Event* ConditionVariable::NativeHandle::Pre() {
|
||||
#ifdef DEBUG
|
||||
// The event must not be on the wait list.
|
||||
for (Event* we = waitlist_; we != NULL; we = we->next_) {
|
||||
ASSERT_NE(event, we);
|
||||
DCHECK_NE(event, we);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -182,7 +182,7 @@ void ConditionVariable::NativeHandle::Post(Event* event, bool result) {
|
||||
|
||||
// Remove the event from the wait list.
|
||||
for (Event** wep = &waitlist_;; wep = &(*wep)->next_) {
|
||||
ASSERT_NE(NULL, *wep);
|
||||
DCHECK_NE(NULL, *wep);
|
||||
if (*wep == event) {
|
||||
*wep = event->next_;
|
||||
break;
|
||||
@ -192,13 +192,13 @@ void ConditionVariable::NativeHandle::Post(Event* event, bool result) {
|
||||
#ifdef DEBUG
|
||||
// The event must not be on the free list.
|
||||
for (Event* fe = freelist_; fe != NULL; fe = fe->next_) {
|
||||
ASSERT_NE(event, fe);
|
||||
DCHECK_NE(event, fe);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Reset the event.
|
||||
BOOL ok = ::ResetEvent(event->handle_);
|
||||
ASSERT(ok);
|
||||
DCHECK(ok);
|
||||
USE(ok);
|
||||
|
||||
// Insert the event into the free list.
|
||||
@ -208,7 +208,7 @@ void ConditionVariable::NativeHandle::Post(Event* event, bool result) {
|
||||
// Forward signals delivered after the timeout to the next waiting event.
|
||||
if (!result && event->notified_ && waitlist_ != NULL) {
|
||||
ok = ::SetEvent(waitlist_->handle_);
|
||||
ASSERT(ok);
|
||||
DCHECK(ok);
|
||||
USE(ok);
|
||||
waitlist_->notified_ = true;
|
||||
}
|
||||
@ -234,14 +234,14 @@ void ConditionVariable::NotifyOne() {
|
||||
continue;
|
||||
}
|
||||
int priority = GetThreadPriority(event->thread_);
|
||||
ASSERT_NE(THREAD_PRIORITY_ERROR_RETURN, priority);
|
||||
DCHECK_NE(THREAD_PRIORITY_ERROR_RETURN, priority);
|
||||
if (priority >= highest_priority) {
|
||||
highest_priority = priority;
|
||||
highest_event = event;
|
||||
}
|
||||
}
|
||||
if (highest_event != NULL) {
|
||||
ASSERT(!highest_event->notified_);
|
||||
DCHECK(!highest_event->notified_);
|
||||
::SetEvent(highest_event->handle_);
|
||||
highest_event->notified_ = true;
|
||||
}
|
||||
@ -277,7 +277,7 @@ void ConditionVariable::Wait(Mutex* mutex) {
|
||||
mutex->Lock();
|
||||
|
||||
// Release the wait event (we must have been notified).
|
||||
ASSERT(event->notified_);
|
||||
DCHECK(event->notified_);
|
||||
native_handle_.Post(event, true);
|
||||
}
|
||||
|
||||
@ -311,7 +311,7 @@ bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) {
|
||||
mutex->Lock();
|
||||
|
||||
// Release the wait event.
|
||||
ASSERT(!result || event->notified_);
|
||||
DCHECK(!result || event->notified_);
|
||||
native_handle_.Post(event, result);
|
||||
|
||||
return result;
|
||||
|
@ -21,29 +21,29 @@ class ElapsedTimer V8_FINAL {
|
||||
// |Elapsed()| or |HasExpired()|, and may be restarted using |Restart()|.
|
||||
// This method must not be called on an already started timer.
|
||||
void Start() {
|
||||
ASSERT(!IsStarted());
|
||||
DCHECK(!IsStarted());
|
||||
start_ticks_ = Now();
|
||||
#ifdef DEBUG
|
||||
started_ = true;
|
||||
#endif
|
||||
ASSERT(IsStarted());
|
||||
DCHECK(IsStarted());
|
||||
}
|
||||
|
||||
// Stops this timer. Must not be called on a timer that was not
|
||||
// started before.
|
||||
void Stop() {
|
||||
ASSERT(IsStarted());
|
||||
DCHECK(IsStarted());
|
||||
start_ticks_ = TimeTicks();
|
||||
#ifdef DEBUG
|
||||
started_ = false;
|
||||
#endif
|
||||
ASSERT(!IsStarted());
|
||||
DCHECK(!IsStarted());
|
||||
}
|
||||
|
||||
// Returns |true| if this timer was started previously.
|
||||
bool IsStarted() const {
|
||||
ASSERT(started_ || start_ticks_.IsNull());
|
||||
ASSERT(!started_ || !start_ticks_.IsNull());
|
||||
DCHECK(started_ || start_ticks_.IsNull());
|
||||
DCHECK(!started_ || !start_ticks_.IsNull());
|
||||
return !start_ticks_.IsNull();
|
||||
}
|
||||
|
||||
@ -53,21 +53,21 @@ class ElapsedTimer V8_FINAL {
|
||||
// avoiding the need to obtain the clock value twice. It may only be called
|
||||
// on a previously started timer.
|
||||
TimeDelta Restart() {
|
||||
ASSERT(IsStarted());
|
||||
DCHECK(IsStarted());
|
||||
TimeTicks ticks = Now();
|
||||
TimeDelta elapsed = ticks - start_ticks_;
|
||||
ASSERT(elapsed.InMicroseconds() >= 0);
|
||||
DCHECK(elapsed.InMicroseconds() >= 0);
|
||||
start_ticks_ = ticks;
|
||||
ASSERT(IsStarted());
|
||||
DCHECK(IsStarted());
|
||||
return elapsed;
|
||||
}
|
||||
|
||||
// Returns the time elapsed since the previous start. This method may only
|
||||
// be called on a previously started timer.
|
||||
TimeDelta Elapsed() const {
|
||||
ASSERT(IsStarted());
|
||||
DCHECK(IsStarted());
|
||||
TimeDelta elapsed = Now() - start_ticks_;
|
||||
ASSERT(elapsed.InMicroseconds() >= 0);
|
||||
DCHECK(elapsed.InMicroseconds() >= 0);
|
||||
return elapsed;
|
||||
}
|
||||
|
||||
@ -75,14 +75,14 @@ class ElapsedTimer V8_FINAL {
|
||||
// previous start, or |false| if not. This method may only be called on
|
||||
// a previously started timer.
|
||||
bool HasExpired(TimeDelta time_delta) const {
|
||||
ASSERT(IsStarted());
|
||||
DCHECK(IsStarted());
|
||||
return Elapsed() >= time_delta;
|
||||
}
|
||||
|
||||
private:
|
||||
static V8_INLINE TimeTicks Now() {
|
||||
TimeTicks now = TimeTicks::HighResolutionNow();
|
||||
ASSERT(!now.IsNull());
|
||||
DCHECK(!now.IsNull());
|
||||
return now;
|
||||
}
|
||||
|
||||
|
@ -17,17 +17,17 @@ static V8_INLINE void InitializeNativeHandle(pthread_mutex_t* mutex) {
|
||||
// Use an error checking mutex in debug mode.
|
||||
pthread_mutexattr_t attr;
|
||||
result = pthread_mutexattr_init(&attr);
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
result = pthread_mutex_init(mutex, &attr);
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
result = pthread_mutexattr_destroy(&attr);
|
||||
#else
|
||||
// Use a fast mutex (default attributes).
|
||||
result = pthread_mutex_init(mutex, NULL);
|
||||
#endif // defined(DEBUG)
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
USE(result);
|
||||
}
|
||||
|
||||
@ -35,34 +35,34 @@ static V8_INLINE void InitializeNativeHandle(pthread_mutex_t* mutex) {
|
||||
static V8_INLINE void InitializeRecursiveNativeHandle(pthread_mutex_t* mutex) {
|
||||
pthread_mutexattr_t attr;
|
||||
int result = pthread_mutexattr_init(&attr);
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
result = pthread_mutex_init(mutex, &attr);
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
result = pthread_mutexattr_destroy(&attr);
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
USE(result);
|
||||
}
|
||||
|
||||
|
||||
static V8_INLINE void DestroyNativeHandle(pthread_mutex_t* mutex) {
|
||||
int result = pthread_mutex_destroy(mutex);
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
USE(result);
|
||||
}
|
||||
|
||||
|
||||
static V8_INLINE void LockNativeHandle(pthread_mutex_t* mutex) {
|
||||
int result = pthread_mutex_lock(mutex);
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
USE(result);
|
||||
}
|
||||
|
||||
|
||||
static V8_INLINE void UnlockNativeHandle(pthread_mutex_t* mutex) {
|
||||
int result = pthread_mutex_unlock(mutex);
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
USE(result);
|
||||
}
|
||||
|
||||
@ -72,7 +72,7 @@ static V8_INLINE bool TryLockNativeHandle(pthread_mutex_t* mutex) {
|
||||
if (result == EBUSY) {
|
||||
return false;
|
||||
}
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -120,7 +120,7 @@ Mutex::Mutex() {
|
||||
|
||||
Mutex::~Mutex() {
|
||||
DestroyNativeHandle(&native_handle_);
|
||||
ASSERT_EQ(0, level_);
|
||||
DCHECK_EQ(0, level_);
|
||||
}
|
||||
|
||||
|
||||
@ -155,14 +155,14 @@ RecursiveMutex::RecursiveMutex() {
|
||||
|
||||
RecursiveMutex::~RecursiveMutex() {
|
||||
DestroyNativeHandle(&native_handle_);
|
||||
ASSERT_EQ(0, level_);
|
||||
DCHECK_EQ(0, level_);
|
||||
}
|
||||
|
||||
|
||||
void RecursiveMutex::Lock() {
|
||||
LockNativeHandle(&native_handle_);
|
||||
#ifdef DEBUG
|
||||
ASSERT_LE(0, level_);
|
||||
DCHECK_LE(0, level_);
|
||||
level_++;
|
||||
#endif
|
||||
}
|
||||
@ -170,7 +170,7 @@ void RecursiveMutex::Lock() {
|
||||
|
||||
void RecursiveMutex::Unlock() {
|
||||
#ifdef DEBUG
|
||||
ASSERT_LT(0, level_);
|
||||
DCHECK_LT(0, level_);
|
||||
level_--;
|
||||
#endif
|
||||
UnlockNativeHandle(&native_handle_);
|
||||
@ -182,7 +182,7 @@ bool RecursiveMutex::TryLock() {
|
||||
return false;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
ASSERT_LE(0, level_);
|
||||
DCHECK_LE(0, level_);
|
||||
level_++;
|
||||
#endif
|
||||
return true;
|
||||
|
@ -74,14 +74,14 @@ class Mutex V8_FINAL {
|
||||
|
||||
V8_INLINE void AssertHeldAndUnmark() {
|
||||
#ifdef DEBUG
|
||||
ASSERT_EQ(1, level_);
|
||||
DCHECK_EQ(1, level_);
|
||||
level_--;
|
||||
#endif
|
||||
}
|
||||
|
||||
V8_INLINE void AssertUnheldAndMark() {
|
||||
#ifdef DEBUG
|
||||
ASSERT_EQ(0, level_);
|
||||
DCHECK_EQ(0, level_);
|
||||
level_++;
|
||||
#endif
|
||||
}
|
||||
|
@ -38,9 +38,9 @@ const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
|
||||
double OS::LocalTimeOffset(TimezoneCache* cache) {
|
||||
// On Cygwin, struct tm does not contain a tm_gmtoff field.
|
||||
time_t utc = time(NULL);
|
||||
ASSERT(utc != -1);
|
||||
DCHECK(utc != -1);
|
||||
struct tm* loc = localtime(&utc);
|
||||
ASSERT(loc != NULL);
|
||||
DCHECK(loc != NULL);
|
||||
// time - localtime includes any daylight savings offset, so subtract it.
|
||||
return static_cast<double>((mktime(loc) - utc) * msPerSecond -
|
||||
(loc->tm_isdst > 0 ? 3600 * msPerSecond : 0));
|
||||
@ -205,7 +205,7 @@ VirtualMemory::VirtualMemory(size_t size)
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
: address_(NULL), size_(0) {
|
||||
ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
|
||||
DCHECK(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
|
||||
size_t request_size = RoundUp(size + alignment,
|
||||
static_cast<intptr_t>(OS::AllocateAlignment()));
|
||||
void* address = ReserveRegion(request_size);
|
||||
@ -214,11 +214,11 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
// Try reducing the size by freeing and then reallocating a specific area.
|
||||
bool result = ReleaseRegion(address, request_size);
|
||||
USE(result);
|
||||
ASSERT(result);
|
||||
DCHECK(result);
|
||||
address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
|
||||
if (address != NULL) {
|
||||
request_size = size;
|
||||
ASSERT(base == static_cast<uint8_t*>(address));
|
||||
DCHECK(base == static_cast<uint8_t*>(address));
|
||||
} else {
|
||||
// Resizing failed, just go with a bigger area.
|
||||
address = ReserveRegion(request_size);
|
||||
@ -232,7 +232,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
VirtualMemory::~VirtualMemory() {
|
||||
if (IsReserved()) {
|
||||
bool result = ReleaseRegion(address_, size_);
|
||||
ASSERT(result);
|
||||
DCHECK(result);
|
||||
USE(result);
|
||||
}
|
||||
}
|
||||
@ -255,7 +255,7 @@ bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
|
||||
|
||||
|
||||
bool VirtualMemory::Uncommit(void* address, size_t size) {
|
||||
ASSERT(IsReserved());
|
||||
DCHECK(IsReserved());
|
||||
return UncommitRegion(address, size);
|
||||
}
|
||||
|
||||
|
@ -182,7 +182,7 @@ VirtualMemory::VirtualMemory(size_t size)
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
: address_(NULL), size_(0) {
|
||||
ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
|
||||
DCHECK(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
|
||||
size_t request_size = RoundUp(size + alignment,
|
||||
static_cast<intptr_t>(OS::AllocateAlignment()));
|
||||
void* reservation = mmap(OS::GetRandomMmapAddr(),
|
||||
@ -195,7 +195,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
|
||||
uint8_t* base = static_cast<uint8_t*>(reservation);
|
||||
uint8_t* aligned_base = RoundUp(base, alignment);
|
||||
ASSERT_LE(base, aligned_base);
|
||||
DCHECK_LE(base, aligned_base);
|
||||
|
||||
// Unmap extra memory reserved before and after the desired block.
|
||||
if (aligned_base != base) {
|
||||
@ -205,7 +205,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
}
|
||||
|
||||
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
|
||||
ASSERT_LE(aligned_size, request_size);
|
||||
DCHECK_LE(aligned_size, request_size);
|
||||
|
||||
if (aligned_size != request_size) {
|
||||
size_t suffix_size = request_size - aligned_size;
|
||||
@ -213,7 +213,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
request_size -= suffix_size;
|
||||
}
|
||||
|
||||
ASSERT(aligned_size == request_size);
|
||||
DCHECK(aligned_size == request_size);
|
||||
|
||||
address_ = static_cast<void*>(aligned_base);
|
||||
size_ = aligned_size;
|
||||
@ -223,7 +223,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
VirtualMemory::~VirtualMemory() {
|
||||
if (IsReserved()) {
|
||||
bool result = ReleaseRegion(address(), size());
|
||||
ASSERT(result);
|
||||
DCHECK(result);
|
||||
USE(result);
|
||||
}
|
||||
}
|
||||
|
@ -272,7 +272,7 @@ void OS::SignalCodeMovingGC() {
|
||||
MAP_PRIVATE,
|
||||
fileno(f),
|
||||
0);
|
||||
ASSERT(addr != MAP_FAILED);
|
||||
DCHECK(addr != MAP_FAILED);
|
||||
OS::Free(addr, size);
|
||||
fclose(f);
|
||||
}
|
||||
@ -292,7 +292,7 @@ VirtualMemory::VirtualMemory(size_t size)
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
: address_(NULL), size_(0) {
|
||||
ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
|
||||
DCHECK(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
|
||||
size_t request_size = RoundUp(size + alignment,
|
||||
static_cast<intptr_t>(OS::AllocateAlignment()));
|
||||
void* reservation = mmap(OS::GetRandomMmapAddr(),
|
||||
@ -305,7 +305,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
|
||||
uint8_t* base = static_cast<uint8_t*>(reservation);
|
||||
uint8_t* aligned_base = RoundUp(base, alignment);
|
||||
ASSERT_LE(base, aligned_base);
|
||||
DCHECK_LE(base, aligned_base);
|
||||
|
||||
// Unmap extra memory reserved before and after the desired block.
|
||||
if (aligned_base != base) {
|
||||
@ -315,7 +315,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
}
|
||||
|
||||
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
|
||||
ASSERT_LE(aligned_size, request_size);
|
||||
DCHECK_LE(aligned_size, request_size);
|
||||
|
||||
if (aligned_size != request_size) {
|
||||
size_t suffix_size = request_size - aligned_size;
|
||||
@ -323,7 +323,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
request_size -= suffix_size;
|
||||
}
|
||||
|
||||
ASSERT(aligned_size == request_size);
|
||||
DCHECK(aligned_size == request_size);
|
||||
|
||||
address_ = static_cast<void*>(aligned_base);
|
||||
size_ = aligned_size;
|
||||
@ -336,7 +336,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
VirtualMemory::~VirtualMemory() {
|
||||
if (IsReserved()) {
|
||||
bool result = ReleaseRegion(address(), size());
|
||||
ASSERT(result);
|
||||
DCHECK(result);
|
||||
USE(result);
|
||||
}
|
||||
}
|
||||
|
@ -184,7 +184,7 @@ VirtualMemory::VirtualMemory(size_t size)
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
: address_(NULL), size_(0) {
|
||||
ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
|
||||
DCHECK(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
|
||||
size_t request_size = RoundUp(size + alignment,
|
||||
static_cast<intptr_t>(OS::AllocateAlignment()));
|
||||
void* reservation = mmap(OS::GetRandomMmapAddr(),
|
||||
@ -197,7 +197,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
|
||||
uint8_t* base = static_cast<uint8_t*>(reservation);
|
||||
uint8_t* aligned_base = RoundUp(base, alignment);
|
||||
ASSERT_LE(base, aligned_base);
|
||||
DCHECK_LE(base, aligned_base);
|
||||
|
||||
// Unmap extra memory reserved before and after the desired block.
|
||||
if (aligned_base != base) {
|
||||
@ -207,7 +207,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
}
|
||||
|
||||
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
|
||||
ASSERT_LE(aligned_size, request_size);
|
||||
DCHECK_LE(aligned_size, request_size);
|
||||
|
||||
if (aligned_size != request_size) {
|
||||
size_t suffix_size = request_size - aligned_size;
|
||||
@ -215,7 +215,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
request_size -= suffix_size;
|
||||
}
|
||||
|
||||
ASSERT(aligned_size == request_size);
|
||||
DCHECK(aligned_size == request_size);
|
||||
|
||||
address_ = static_cast<void*>(aligned_base);
|
||||
size_ = aligned_size;
|
||||
@ -225,7 +225,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
VirtualMemory::~VirtualMemory() {
|
||||
if (IsReserved()) {
|
||||
bool result = ReleaseRegion(address(), size());
|
||||
ASSERT(result);
|
||||
DCHECK(result);
|
||||
USE(result);
|
||||
}
|
||||
}
|
||||
|
@ -192,7 +192,7 @@ void OS::SignalCodeMovingGC() {
|
||||
}
|
||||
void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
|
||||
fileno(f), 0);
|
||||
ASSERT(addr != MAP_FAILED);
|
||||
DCHECK(addr != MAP_FAILED);
|
||||
OS::Free(addr, size);
|
||||
fclose(f);
|
||||
}
|
||||
@ -213,7 +213,7 @@ VirtualMemory::VirtualMemory(size_t size)
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
: address_(NULL), size_(0) {
|
||||
ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
|
||||
DCHECK(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
|
||||
size_t request_size = RoundUp(size + alignment,
|
||||
static_cast<intptr_t>(OS::AllocateAlignment()));
|
||||
void* reservation = mmap(OS::GetRandomMmapAddr(),
|
||||
@ -226,7 +226,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
|
||||
uint8_t* base = static_cast<uint8_t*>(reservation);
|
||||
uint8_t* aligned_base = RoundUp(base, alignment);
|
||||
ASSERT_LE(base, aligned_base);
|
||||
DCHECK_LE(base, aligned_base);
|
||||
|
||||
// Unmap extra memory reserved before and after the desired block.
|
||||
if (aligned_base != base) {
|
||||
@ -236,7 +236,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
}
|
||||
|
||||
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
|
||||
ASSERT_LE(aligned_size, request_size);
|
||||
DCHECK_LE(aligned_size, request_size);
|
||||
|
||||
if (aligned_size != request_size) {
|
||||
size_t suffix_size = request_size - aligned_size;
|
||||
@ -244,7 +244,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
request_size -= suffix_size;
|
||||
}
|
||||
|
||||
ASSERT(aligned_size == request_size);
|
||||
DCHECK(aligned_size == request_size);
|
||||
|
||||
address_ = static_cast<void*>(aligned_base);
|
||||
size_ = aligned_size;
|
||||
@ -254,7 +254,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
VirtualMemory::~VirtualMemory() {
|
||||
if (IsReserved()) {
|
||||
bool result = ReleaseRegion(address(), size());
|
||||
ASSERT(result);
|
||||
DCHECK(result);
|
||||
USE(result);
|
||||
}
|
||||
}
|
||||
|
@ -169,7 +169,7 @@ void OS::Free(void* address, const size_t size) {
|
||||
// TODO(1240712): munmap has a return value which is ignored here.
|
||||
int result = munmap(address, size);
|
||||
USE(result);
|
||||
ASSERT(result == 0);
|
||||
DCHECK(result == 0);
|
||||
}
|
||||
|
||||
|
||||
@ -357,12 +357,12 @@ TimezoneCache* OS::CreateTimezoneCache() {
|
||||
|
||||
|
||||
void OS::DisposeTimezoneCache(TimezoneCache* cache) {
|
||||
ASSERT(cache == NULL);
|
||||
DCHECK(cache == NULL);
|
||||
}
|
||||
|
||||
|
||||
void OS::ClearTimezoneCache(TimezoneCache* cache) {
|
||||
ASSERT(cache == NULL);
|
||||
DCHECK(cache == NULL);
|
||||
}
|
||||
|
||||
|
||||
@ -561,7 +561,7 @@ static void* ThreadEntry(void* arg) {
|
||||
// one).
|
||||
{ LockGuard<Mutex> lock_guard(&thread->data()->thread_creation_mutex_); }
|
||||
SetThreadName(thread->name());
|
||||
ASSERT(thread->data()->thread_ != kNoThread);
|
||||
DCHECK(thread->data()->thread_ != kNoThread);
|
||||
thread->NotifyStartedAndRun();
|
||||
return NULL;
|
||||
}
|
||||
@ -578,22 +578,22 @@ void Thread::Start() {
|
||||
pthread_attr_t attr;
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
result = pthread_attr_init(&attr);
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
// Native client uses default stack size.
|
||||
#if !V8_OS_NACL
|
||||
if (stack_size_ > 0) {
|
||||
result = pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
}
|
||||
#endif
|
||||
{
|
||||
LockGuard<Mutex> lock_guard(&data_->thread_creation_mutex_);
|
||||
result = pthread_create(&data_->thread_, &attr, ThreadEntry, this);
|
||||
}
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
result = pthread_attr_destroy(&attr);
|
||||
ASSERT_EQ(0, result);
|
||||
ASSERT(data_->thread_ != kNoThread);
|
||||
DCHECK_EQ(0, result);
|
||||
DCHECK(data_->thread_ != kNoThread);
|
||||
USE(result);
|
||||
}
|
||||
|
||||
@ -605,7 +605,7 @@ void Thread::Join() {
|
||||
|
||||
void Thread::YieldCPU() {
|
||||
int result = sched_yield();
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
USE(result);
|
||||
}
|
||||
|
||||
@ -701,7 +701,7 @@ Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
|
||||
#endif
|
||||
pthread_key_t key;
|
||||
int result = pthread_key_create(&key, NULL);
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
USE(result);
|
||||
LocalStorageKey local_key = PthreadKeyToLocalKey(key);
|
||||
#ifdef V8_FAST_TLS_SUPPORTED
|
||||
@ -715,7 +715,7 @@ Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
|
||||
void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
|
||||
pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
|
||||
int result = pthread_key_delete(pthread_key);
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
USE(result);
|
||||
}
|
||||
|
||||
@ -729,7 +729,7 @@ void* Thread::GetThreadLocal(LocalStorageKey key) {
|
||||
void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
|
||||
pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
|
||||
int result = pthread_setspecific(pthread_key, value);
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
USE(result);
|
||||
}
|
||||
|
||||
|
@ -249,7 +249,7 @@ VirtualMemory::VirtualMemory(size_t size)
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
: address_(NULL), size_(0) {
|
||||
ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
|
||||
DCHECK(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
|
||||
size_t request_size = RoundUp(size + alignment,
|
||||
static_cast<intptr_t>(OS::AllocateAlignment()));
|
||||
void* reservation = mmap(OS::GetRandomMmapAddr(),
|
||||
@ -262,7 +262,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
|
||||
uint8_t* base = static_cast<uint8_t*>(reservation);
|
||||
uint8_t* aligned_base = RoundUp(base, alignment);
|
||||
ASSERT_LE(base, aligned_base);
|
||||
DCHECK_LE(base, aligned_base);
|
||||
|
||||
// Unmap extra memory reserved before and after the desired block.
|
||||
if (aligned_base != base) {
|
||||
@ -272,7 +272,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
}
|
||||
|
||||
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
|
||||
ASSERT_LE(aligned_size, request_size);
|
||||
DCHECK_LE(aligned_size, request_size);
|
||||
|
||||
if (aligned_size != request_size) {
|
||||
size_t suffix_size = request_size - aligned_size;
|
||||
@ -280,7 +280,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
request_size -= suffix_size;
|
||||
}
|
||||
|
||||
ASSERT(aligned_size == request_size);
|
||||
DCHECK(aligned_size == request_size);
|
||||
|
||||
address_ = static_cast<void*>(aligned_base);
|
||||
size_ = aligned_size;
|
||||
@ -290,7 +290,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
VirtualMemory::~VirtualMemory() {
|
||||
if (IsReserved()) {
|
||||
bool result = ReleaseRegion(address(), size());
|
||||
ASSERT(result);
|
||||
DCHECK(result);
|
||||
USE(result);
|
||||
}
|
||||
}
|
||||
|
@ -154,7 +154,7 @@ VirtualMemory::VirtualMemory(size_t size)
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
: address_(NULL), size_(0) {
|
||||
ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
|
||||
DCHECK(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
|
||||
size_t request_size = RoundUp(size + alignment,
|
||||
static_cast<intptr_t>(OS::AllocateAlignment()));
|
||||
void* reservation = mmap(OS::GetRandomMmapAddr(),
|
||||
@ -167,7 +167,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
|
||||
uint8_t* base = static_cast<uint8_t*>(reservation);
|
||||
uint8_t* aligned_base = RoundUp(base, alignment);
|
||||
ASSERT_LE(base, aligned_base);
|
||||
DCHECK_LE(base, aligned_base);
|
||||
|
||||
// Unmap extra memory reserved before and after the desired block.
|
||||
if (aligned_base != base) {
|
||||
@ -177,7 +177,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
}
|
||||
|
||||
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
|
||||
ASSERT_LE(aligned_size, request_size);
|
||||
DCHECK_LE(aligned_size, request_size);
|
||||
|
||||
if (aligned_size != request_size) {
|
||||
size_t suffix_size = request_size - aligned_size;
|
||||
@ -185,7 +185,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
request_size -= suffix_size;
|
||||
}
|
||||
|
||||
ASSERT(aligned_size == request_size);
|
||||
DCHECK(aligned_size == request_size);
|
||||
|
||||
address_ = static_cast<void*>(aligned_base);
|
||||
size_ = aligned_size;
|
||||
@ -195,7 +195,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
VirtualMemory::~VirtualMemory() {
|
||||
if (IsReserved()) {
|
||||
bool result = ReleaseRegion(address(), size());
|
||||
ASSERT(result);
|
||||
DCHECK(result);
|
||||
USE(result);
|
||||
}
|
||||
}
|
||||
|
@ -71,7 +71,7 @@ int fopen_s(FILE** pFile, const char* filename, const char* mode) {
|
||||
|
||||
int _vsnprintf_s(char* buffer, size_t sizeOfBuffer, size_t count,
|
||||
const char* format, va_list argptr) {
|
||||
ASSERT(count == _TRUNCATE);
|
||||
DCHECK(count == _TRUNCATE);
|
||||
return _vsnprintf(buffer, sizeOfBuffer, format, argptr);
|
||||
}
|
||||
|
||||
@ -689,7 +689,7 @@ void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
|
||||
n = _TRUNCATE;
|
||||
int result = strncpy_s(dest, length, src, n);
|
||||
USE(result);
|
||||
ASSERT(result == 0 || (n == _TRUNCATE && result == STRUNCATE));
|
||||
DCHECK(result == 0 || (n == _TRUNCATE && result == STRUNCATE));
|
||||
}
|
||||
|
||||
|
||||
@ -790,7 +790,7 @@ void* OS::Allocate(const size_t requested,
|
||||
|
||||
if (mbase == NULL) return NULL;
|
||||
|
||||
ASSERT(IsAligned(reinterpret_cast<size_t>(mbase), OS::AllocateAlignment()));
|
||||
DCHECK(IsAligned(reinterpret_cast<size_t>(mbase), OS::AllocateAlignment()));
|
||||
|
||||
*allocated = msize;
|
||||
return mbase;
|
||||
@ -1228,7 +1228,7 @@ VirtualMemory::VirtualMemory(size_t size)
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
: address_(NULL), size_(0) {
|
||||
ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
|
||||
DCHECK(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
|
||||
size_t request_size = RoundUp(size + alignment,
|
||||
static_cast<intptr_t>(OS::AllocateAlignment()));
|
||||
void* address = ReserveRegion(request_size);
|
||||
@ -1237,11 +1237,11 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
// Try reducing the size by freeing and then reallocating a specific area.
|
||||
bool result = ReleaseRegion(address, request_size);
|
||||
USE(result);
|
||||
ASSERT(result);
|
||||
DCHECK(result);
|
||||
address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
|
||||
if (address != NULL) {
|
||||
request_size = size;
|
||||
ASSERT(base == static_cast<uint8_t*>(address));
|
||||
DCHECK(base == static_cast<uint8_t*>(address));
|
||||
} else {
|
||||
// Resizing failed, just go with a bigger area.
|
||||
address = ReserveRegion(request_size);
|
||||
@ -1255,7 +1255,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
|
||||
VirtualMemory::~VirtualMemory() {
|
||||
if (IsReserved()) {
|
||||
bool result = ReleaseRegion(address(), size());
|
||||
ASSERT(result);
|
||||
DCHECK(result);
|
||||
USE(result);
|
||||
}
|
||||
}
|
||||
@ -1278,7 +1278,7 @@ bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
|
||||
|
||||
|
||||
bool VirtualMemory::Uncommit(void* address, size_t size) {
|
||||
ASSERT(IsReserved());
|
||||
DCHECK(IsReserved());
|
||||
return UncommitRegion(address, size);
|
||||
}
|
||||
|
||||
@ -1397,7 +1397,7 @@ void Thread::Join() {
|
||||
|
||||
Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
|
||||
DWORD result = TlsAlloc();
|
||||
ASSERT(result != TLS_OUT_OF_INDEXES);
|
||||
DCHECK(result != TLS_OUT_OF_INDEXES);
|
||||
return static_cast<LocalStorageKey>(result);
|
||||
}
|
||||
|
||||
@ -1405,7 +1405,7 @@ Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
|
||||
void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
|
||||
BOOL result = TlsFree(static_cast<DWORD>(key));
|
||||
USE(result);
|
||||
ASSERT(result);
|
||||
DCHECK(result);
|
||||
}
|
||||
|
||||
|
||||
@ -1417,7 +1417,7 @@ void* Thread::GetThreadLocal(LocalStorageKey key) {
|
||||
void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
|
||||
BOOL result = TlsSetValue(static_cast<DWORD>(key), value);
|
||||
USE(result);
|
||||
ASSERT(result);
|
||||
DCHECK(result);
|
||||
}
|
||||
|
||||
|
||||
|
@ -91,13 +91,13 @@ inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
|
||||
const intptr_t kMaxInlineSlots = 64;
|
||||
const intptr_t kMaxSlots = kMaxInlineSlots + 1024;
|
||||
const intptr_t kPointerSize = sizeof(void*);
|
||||
ASSERT(0 <= index && index < kMaxSlots);
|
||||
DCHECK(0 <= index && index < kMaxSlots);
|
||||
if (index < kMaxInlineSlots) {
|
||||
return static_cast<intptr_t>(__readfsdword(kTibInlineTlsOffset +
|
||||
kPointerSize * index));
|
||||
}
|
||||
intptr_t extra = static_cast<intptr_t>(__readfsdword(kTibExtraTlsOffset));
|
||||
ASSERT(extra != 0);
|
||||
DCHECK(extra != 0);
|
||||
return *reinterpret_cast<intptr_t*>(extra +
|
||||
kPointerSize * (index - kMaxInlineSlots));
|
||||
}
|
||||
@ -351,7 +351,7 @@ class VirtualMemory {
|
||||
// necessarily aligned. The user might need to round it up to a multiple of
|
||||
// the alignment to get the start of the aligned block.
|
||||
void* address() {
|
||||
ASSERT(IsReserved());
|
||||
DCHECK(IsReserved());
|
||||
return address_;
|
||||
}
|
||||
|
||||
@ -371,7 +371,7 @@ class VirtualMemory {
|
||||
bool Guard(void* address);
|
||||
|
||||
void Release() {
|
||||
ASSERT(IsReserved());
|
||||
DCHECK(IsReserved());
|
||||
// Notice: Order is important here. The VirtualMemory object might live
|
||||
// inside the allocated region.
|
||||
void* address = address_;
|
||||
@ -379,13 +379,13 @@ class VirtualMemory {
|
||||
Reset();
|
||||
bool result = ReleaseRegion(address, size);
|
||||
USE(result);
|
||||
ASSERT(result);
|
||||
DCHECK(result);
|
||||
}
|
||||
|
||||
// Assign control of the reserved region to a different VirtualMemory object.
|
||||
// The old object is no longer functional (IsReserved() returns false).
|
||||
void TakeControl(VirtualMemory* from) {
|
||||
ASSERT(!IsReserved());
|
||||
DCHECK(!IsReserved());
|
||||
address_ = from->address_;
|
||||
size_ = from->size_;
|
||||
from->Reset();
|
||||
@ -484,7 +484,7 @@ class Thread {
|
||||
static inline void* GetExistingThreadLocal(LocalStorageKey key) {
|
||||
void* result = reinterpret_cast<void*>(
|
||||
InternalGetExistingThreadLocal(static_cast<intptr_t>(key)));
|
||||
ASSERT(result == GetThreadLocal(key));
|
||||
DCHECK(result == GetThreadLocal(key));
|
||||
return result;
|
||||
}
|
||||
#else
|
||||
|
@ -22,21 +22,21 @@ namespace base {
|
||||
Semaphore::Semaphore(int count) {
|
||||
kern_return_t result = semaphore_create(
|
||||
mach_task_self(), &native_handle_, SYNC_POLICY_FIFO, count);
|
||||
ASSERT_EQ(KERN_SUCCESS, result);
|
||||
DCHECK_EQ(KERN_SUCCESS, result);
|
||||
USE(result);
|
||||
}
|
||||
|
||||
|
||||
Semaphore::~Semaphore() {
|
||||
kern_return_t result = semaphore_destroy(mach_task_self(), native_handle_);
|
||||
ASSERT_EQ(KERN_SUCCESS, result);
|
||||
DCHECK_EQ(KERN_SUCCESS, result);
|
||||
USE(result);
|
||||
}
|
||||
|
||||
|
||||
void Semaphore::Signal() {
|
||||
kern_return_t result = semaphore_signal(native_handle_);
|
||||
ASSERT_EQ(KERN_SUCCESS, result);
|
||||
DCHECK_EQ(KERN_SUCCESS, result);
|
||||
USE(result);
|
||||
}
|
||||
|
||||
@ -45,7 +45,7 @@ void Semaphore::Wait() {
|
||||
while (true) {
|
||||
kern_return_t result = semaphore_wait(native_handle_);
|
||||
if (result == KERN_SUCCESS) return; // Semaphore was signalled.
|
||||
ASSERT_EQ(KERN_ABORTED, result);
|
||||
DCHECK_EQ(KERN_ABORTED, result);
|
||||
}
|
||||
}
|
||||
|
||||
@ -65,7 +65,7 @@ bool Semaphore::WaitFor(const TimeDelta& rel_time) {
|
||||
kern_return_t result = semaphore_timedwait(native_handle_, ts);
|
||||
if (result == KERN_SUCCESS) return true; // Semaphore was signalled.
|
||||
if (result == KERN_OPERATION_TIMED_OUT) return false; // Timeout.
|
||||
ASSERT_EQ(KERN_ABORTED, result);
|
||||
DCHECK_EQ(KERN_ABORTED, result);
|
||||
now = TimeTicks::Now();
|
||||
}
|
||||
}
|
||||
@ -73,23 +73,23 @@ bool Semaphore::WaitFor(const TimeDelta& rel_time) {
|
||||
#elif V8_OS_POSIX
|
||||
|
||||
Semaphore::Semaphore(int count) {
|
||||
ASSERT(count >= 0);
|
||||
DCHECK(count >= 0);
|
||||
int result = sem_init(&native_handle_, 0, count);
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
USE(result);
|
||||
}
|
||||
|
||||
|
||||
Semaphore::~Semaphore() {
|
||||
int result = sem_destroy(&native_handle_);
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
USE(result);
|
||||
}
|
||||
|
||||
|
||||
void Semaphore::Signal() {
|
||||
int result = sem_post(&native_handle_);
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
USE(result);
|
||||
}
|
||||
|
||||
@ -99,8 +99,8 @@ void Semaphore::Wait() {
|
||||
int result = sem_wait(&native_handle_);
|
||||
if (result == 0) return; // Semaphore was signalled.
|
||||
// Signal caused spurious wakeup.
|
||||
ASSERT_EQ(-1, result);
|
||||
ASSERT_EQ(EINTR, errno);
|
||||
DCHECK_EQ(-1, result);
|
||||
DCHECK_EQ(EINTR, errno);
|
||||
}
|
||||
}
|
||||
|
||||
@ -126,23 +126,23 @@ bool Semaphore::WaitFor(const TimeDelta& rel_time) {
|
||||
return false;
|
||||
}
|
||||
// Signal caused spurious wakeup.
|
||||
ASSERT_EQ(-1, result);
|
||||
ASSERT_EQ(EINTR, errno);
|
||||
DCHECK_EQ(-1, result);
|
||||
DCHECK_EQ(EINTR, errno);
|
||||
}
|
||||
}
|
||||
|
||||
#elif V8_OS_WIN
|
||||
|
||||
Semaphore::Semaphore(int count) {
|
||||
ASSERT(count >= 0);
|
||||
DCHECK(count >= 0);
|
||||
native_handle_ = ::CreateSemaphoreA(NULL, count, 0x7fffffff, NULL);
|
||||
ASSERT(native_handle_ != NULL);
|
||||
DCHECK(native_handle_ != NULL);
|
||||
}
|
||||
|
||||
|
||||
Semaphore::~Semaphore() {
|
||||
BOOL result = CloseHandle(native_handle_);
|
||||
ASSERT(result);
|
||||
DCHECK(result);
|
||||
USE(result);
|
||||
}
|
||||
|
||||
@ -150,14 +150,14 @@ Semaphore::~Semaphore() {
|
||||
void Semaphore::Signal() {
|
||||
LONG dummy;
|
||||
BOOL result = ReleaseSemaphore(native_handle_, 1, &dummy);
|
||||
ASSERT(result);
|
||||
DCHECK(result);
|
||||
USE(result);
|
||||
}
|
||||
|
||||
|
||||
void Semaphore::Wait() {
|
||||
DWORD result = WaitForSingleObject(native_handle_, INFINITE);
|
||||
ASSERT(result == WAIT_OBJECT_0);
|
||||
DCHECK(result == WAIT_OBJECT_0);
|
||||
USE(result);
|
||||
}
|
||||
|
||||
@ -172,7 +172,7 @@ bool Semaphore::WaitFor(const TimeDelta& rel_time) {
|
||||
if (result == WAIT_OBJECT_0) {
|
||||
return true;
|
||||
}
|
||||
ASSERT(result == WAIT_TIMEOUT);
|
||||
DCHECK(result == WAIT_TIMEOUT);
|
||||
now = TimeTicks::Now();
|
||||
} else {
|
||||
DWORD result = WaitForSingleObject(
|
||||
@ -180,7 +180,7 @@ bool Semaphore::WaitFor(const TimeDelta& rel_time) {
|
||||
if (result == WAIT_TIMEOUT) {
|
||||
return false;
|
||||
}
|
||||
ASSERT(result == WAIT_OBJECT_0);
|
||||
DCHECK(result == WAIT_OBJECT_0);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -99,8 +99,8 @@ int64_t TimeDelta::InNanoseconds() const {
|
||||
#if V8_OS_MACOSX
|
||||
|
||||
TimeDelta TimeDelta::FromMachTimespec(struct mach_timespec ts) {
|
||||
ASSERT_GE(ts.tv_nsec, 0);
|
||||
ASSERT_LT(ts.tv_nsec,
|
||||
DCHECK_GE(ts.tv_nsec, 0);
|
||||
DCHECK_LT(ts.tv_nsec,
|
||||
static_cast<long>(Time::kNanosecondsPerSecond)); // NOLINT
|
||||
return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
|
||||
ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
|
||||
@ -109,7 +109,7 @@ TimeDelta TimeDelta::FromMachTimespec(struct mach_timespec ts) {
|
||||
|
||||
struct mach_timespec TimeDelta::ToMachTimespec() const {
|
||||
struct mach_timespec ts;
|
||||
ASSERT(delta_ >= 0);
|
||||
DCHECK(delta_ >= 0);
|
||||
ts.tv_sec = delta_ / Time::kMicrosecondsPerSecond;
|
||||
ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
|
||||
Time::kNanosecondsPerMicrosecond;
|
||||
@ -122,8 +122,8 @@ struct mach_timespec TimeDelta::ToMachTimespec() const {
|
||||
#if V8_OS_POSIX
|
||||
|
||||
TimeDelta TimeDelta::FromTimespec(struct timespec ts) {
|
||||
ASSERT_GE(ts.tv_nsec, 0);
|
||||
ASSERT_LT(ts.tv_nsec,
|
||||
DCHECK_GE(ts.tv_nsec, 0);
|
||||
DCHECK_LT(ts.tv_nsec,
|
||||
static_cast<long>(Time::kNanosecondsPerSecond)); // NOLINT
|
||||
return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
|
||||
ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
|
||||
@ -230,7 +230,7 @@ Time Time::FromFiletime(FILETIME ft) {
|
||||
|
||||
|
||||
FILETIME Time::ToFiletime() const {
|
||||
ASSERT(us_ >= 0);
|
||||
DCHECK(us_ >= 0);
|
||||
FILETIME ft;
|
||||
if (IsNull()) {
|
||||
ft.dwLowDateTime = 0;
|
||||
@ -253,7 +253,7 @@ FILETIME Time::ToFiletime() const {
|
||||
Time Time::Now() {
|
||||
struct timeval tv;
|
||||
int result = gettimeofday(&tv, NULL);
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
USE(result);
|
||||
return FromTimeval(tv);
|
||||
}
|
||||
@ -265,8 +265,8 @@ Time Time::NowFromSystemTime() {
|
||||
|
||||
|
||||
Time Time::FromTimespec(struct timespec ts) {
|
||||
ASSERT(ts.tv_nsec >= 0);
|
||||
ASSERT(ts.tv_nsec < static_cast<long>(kNanosecondsPerSecond)); // NOLINT
|
||||
DCHECK(ts.tv_nsec >= 0);
|
||||
DCHECK(ts.tv_nsec < static_cast<long>(kNanosecondsPerSecond)); // NOLINT
|
||||
if (ts.tv_nsec == 0 && ts.tv_sec == 0) {
|
||||
return Time();
|
||||
}
|
||||
@ -298,8 +298,8 @@ struct timespec Time::ToTimespec() const {
|
||||
|
||||
|
||||
Time Time::FromTimeval(struct timeval tv) {
|
||||
ASSERT(tv.tv_usec >= 0);
|
||||
ASSERT(tv.tv_usec < static_cast<suseconds_t>(kMicrosecondsPerSecond));
|
||||
DCHECK(tv.tv_usec >= 0);
|
||||
DCHECK(tv.tv_usec < static_cast<suseconds_t>(kMicrosecondsPerSecond));
|
||||
if (tv.tv_usec == 0 && tv.tv_sec == 0) {
|
||||
return Time();
|
||||
}
|
||||
@ -397,14 +397,14 @@ class HighResolutionTickClock V8_FINAL : public TickClock {
|
||||
public:
|
||||
explicit HighResolutionTickClock(int64_t ticks_per_second)
|
||||
: ticks_per_second_(ticks_per_second) {
|
||||
ASSERT_LT(0, ticks_per_second);
|
||||
DCHECK_LT(0, ticks_per_second);
|
||||
}
|
||||
virtual ~HighResolutionTickClock() {}
|
||||
|
||||
virtual int64_t Now() V8_OVERRIDE {
|
||||
LARGE_INTEGER now;
|
||||
BOOL result = QueryPerformanceCounter(&now);
|
||||
ASSERT(result);
|
||||
DCHECK(result);
|
||||
USE(result);
|
||||
|
||||
// Intentionally calculate microseconds in a round about manner to avoid
|
||||
@ -500,7 +500,7 @@ static LazyDynamicInstance<TickClock, CreateHighResTickClockTrait,
|
||||
TimeTicks TimeTicks::Now() {
|
||||
// Make sure we never return 0 here.
|
||||
TimeTicks ticks(tick_clock.Pointer()->Now());
|
||||
ASSERT(!ticks.IsNull());
|
||||
DCHECK(!ticks.IsNull());
|
||||
return ticks;
|
||||
}
|
||||
|
||||
@ -508,7 +508,7 @@ TimeTicks TimeTicks::Now() {
|
||||
TimeTicks TimeTicks::HighResolutionNow() {
|
||||
// Make sure we never return 0 here.
|
||||
TimeTicks ticks(high_res_tick_clock.Pointer()->Now());
|
||||
ASSERT(!ticks.IsNull());
|
||||
DCHECK(!ticks.IsNull());
|
||||
return ticks;
|
||||
}
|
||||
|
||||
@ -539,7 +539,7 @@ TimeTicks TimeTicks::HighResolutionNow() {
|
||||
static struct mach_timebase_info info;
|
||||
if (info.denom == 0) {
|
||||
kern_return_t result = mach_timebase_info(&info);
|
||||
ASSERT_EQ(KERN_SUCCESS, result);
|
||||
DCHECK_EQ(KERN_SUCCESS, result);
|
||||
USE(result);
|
||||
}
|
||||
ticks = (mach_absolute_time() / Time::kNanosecondsPerMicrosecond *
|
||||
@ -552,13 +552,13 @@ TimeTicks TimeTicks::HighResolutionNow() {
|
||||
// cleanup the tools/gyp/v8.gyp file.
|
||||
struct timeval tv;
|
||||
int result = gettimeofday(&tv, NULL);
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
USE(result);
|
||||
ticks = (tv.tv_sec * Time::kMicrosecondsPerSecond + tv.tv_usec);
|
||||
#elif V8_OS_POSIX
|
||||
struct timespec ts;
|
||||
int result = clock_gettime(CLOCK_MONOTONIC, &ts);
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
USE(result);
|
||||
ticks = (ts.tv_sec * Time::kMicrosecondsPerSecond +
|
||||
ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
|
||||
|
@ -99,7 +99,7 @@ enum RangeConstraint {
|
||||
|
||||
// Helper function for coercing an int back to a RangeContraint.
|
||||
inline RangeConstraint GetRangeConstraint(int integer_range_constraint) {
|
||||
ASSERT(integer_range_constraint >= RANGE_VALID &&
|
||||
DCHECK(integer_range_constraint >= RANGE_VALID &&
|
||||
integer_range_constraint <= RANGE_INVALID);
|
||||
return static_cast<RangeConstraint>(integer_range_constraint);
|
||||
}
|
||||
|
@ -45,9 +45,9 @@ RandomNumberGenerator::RandomNumberGenerator() {
|
||||
// https://code.google.com/p/v8/issues/detail?id=2905
|
||||
unsigned first_half, second_half;
|
||||
errno_t result = rand_s(&first_half);
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
result = rand_s(&second_half);
|
||||
ASSERT_EQ(0, result);
|
||||
DCHECK_EQ(0, result);
|
||||
SetSeed((static_cast<int64_t>(first_half) << 32) + second_half);
|
||||
#else
|
||||
// Gather entropy from /dev/urandom if available.
|
||||
@ -79,7 +79,7 @@ RandomNumberGenerator::RandomNumberGenerator() {
|
||||
|
||||
|
||||
int RandomNumberGenerator::NextInt(int max) {
|
||||
ASSERT_LE(0, max);
|
||||
DCHECK_LE(0, max);
|
||||
|
||||
// Fast path if max is a power of 2.
|
||||
if (IS_POWER_OF_TWO(max)) {
|
||||
@ -110,8 +110,8 @@ void RandomNumberGenerator::NextBytes(void* buffer, size_t buflen) {
|
||||
|
||||
|
||||
int RandomNumberGenerator::Next(int bits) {
|
||||
ASSERT_LT(0, bits);
|
||||
ASSERT_GE(32, bits);
|
||||
DCHECK_LT(0, bits);
|
||||
DCHECK_GE(32, bits);
|
||||
// Do unsigned multiplication, which has the intended modulo semantics, while
|
||||
// signed multiplication would expose undefined behavior.
|
||||
uint64_t product = static_cast<uint64_t>(seed_) * kMultiplier;
|
||||
|
@ -62,7 +62,7 @@ int fpclassify(double x) {
|
||||
if (flags & (_FPCLASS_PINF | _FPCLASS_NINF)) return FP_INFINITE;
|
||||
|
||||
// All cases should be covered by the code above.
|
||||
ASSERT(flags & (_FPCLASS_SNAN | _FPCLASS_QNAN));
|
||||
DCHECK(flags & (_FPCLASS_SNAN | _FPCLASS_QNAN));
|
||||
return FP_NAN;
|
||||
}
|
||||
|
||||
|
@ -17,7 +17,7 @@ namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
static int NormalizedExponent(uint64_t significand, int exponent) {
|
||||
ASSERT(significand != 0);
|
||||
DCHECK(significand != 0);
|
||||
while ((significand & Double::kHiddenBit) == 0) {
|
||||
significand = significand << 1;
|
||||
exponent = exponent - 1;
|
||||
@ -68,8 +68,8 @@ static void GenerateCountedDigits(int count, int* decimal_point,
|
||||
|
||||
void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits,
|
||||
Vector<char> buffer, int* length, int* decimal_point) {
|
||||
ASSERT(v > 0);
|
||||
ASSERT(!Double(v).IsSpecial());
|
||||
DCHECK(v > 0);
|
||||
DCHECK(!Double(v).IsSpecial());
|
||||
uint64_t significand = Double(v).Significand();
|
||||
bool is_even = (significand & 1) == 0;
|
||||
int exponent = Double(v).Exponent();
|
||||
@ -99,7 +99,7 @@ void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits,
|
||||
// 4e-324. In this case the denominator needs fewer than 324*4 binary digits.
|
||||
// The maximum double is 1.7976931348623157e308 which needs fewer than
|
||||
// 308*4 binary digits.
|
||||
ASSERT(Bignum::kMaxSignificantBits >= 324*4);
|
||||
DCHECK(Bignum::kMaxSignificantBits >= 324*4);
|
||||
bool need_boundary_deltas = (mode == BIGNUM_DTOA_SHORTEST);
|
||||
InitialScaledStartValues(v, estimated_power, need_boundary_deltas,
|
||||
&numerator, &denominator,
|
||||
@ -159,7 +159,7 @@ static void GenerateShortestDigits(Bignum* numerator, Bignum* denominator,
|
||||
while (true) {
|
||||
uint16_t digit;
|
||||
digit = numerator->DivideModuloIntBignum(*denominator);
|
||||
ASSERT(digit <= 9); // digit is a uint16_t and therefore always positive.
|
||||
DCHECK(digit <= 9); // digit is a uint16_t and therefore always positive.
|
||||
// digit = numerator / denominator (integer division).
|
||||
// numerator = numerator % denominator.
|
||||
buffer[(*length)++] = digit + '0';
|
||||
@ -205,7 +205,7 @@ static void GenerateShortestDigits(Bignum* numerator, Bignum* denominator,
|
||||
// loop would have stopped earlier.
|
||||
// We still have an assert here in case the preconditions were not
|
||||
// satisfied.
|
||||
ASSERT(buffer[(*length) - 1] != '9');
|
||||
DCHECK(buffer[(*length) - 1] != '9');
|
||||
buffer[(*length) - 1]++;
|
||||
} else {
|
||||
// Halfway case.
|
||||
@ -216,7 +216,7 @@ static void GenerateShortestDigits(Bignum* numerator, Bignum* denominator,
|
||||
if ((buffer[(*length) - 1] - '0') % 2 == 0) {
|
||||
// Round down => Do nothing.
|
||||
} else {
|
||||
ASSERT(buffer[(*length) - 1] != '9');
|
||||
DCHECK(buffer[(*length) - 1] != '9');
|
||||
buffer[(*length) - 1]++;
|
||||
}
|
||||
}
|
||||
@ -228,9 +228,9 @@ static void GenerateShortestDigits(Bignum* numerator, Bignum* denominator,
|
||||
// Round up.
|
||||
// Note again that the last digit could not be '9' since this would have
|
||||
// stopped the loop earlier.
|
||||
// We still have an ASSERT here, in case the preconditions were not
|
||||
// We still have an DCHECK here, in case the preconditions were not
|
||||
// satisfied.
|
||||
ASSERT(buffer[(*length) -1] != '9');
|
||||
DCHECK(buffer[(*length) -1] != '9');
|
||||
buffer[(*length) - 1]++;
|
||||
return;
|
||||
}
|
||||
@ -247,11 +247,11 @@ static void GenerateShortestDigits(Bignum* numerator, Bignum* denominator,
|
||||
static void GenerateCountedDigits(int count, int* decimal_point,
|
||||
Bignum* numerator, Bignum* denominator,
|
||||
Vector<char>(buffer), int* length) {
|
||||
ASSERT(count >= 0);
|
||||
DCHECK(count >= 0);
|
||||
for (int i = 0; i < count - 1; ++i) {
|
||||
uint16_t digit;
|
||||
digit = numerator->DivideModuloIntBignum(*denominator);
|
||||
ASSERT(digit <= 9); // digit is a uint16_t and therefore always positive.
|
||||
DCHECK(digit <= 9); // digit is a uint16_t and therefore always positive.
|
||||
// digit = numerator / denominator (integer division).
|
||||
// numerator = numerator % denominator.
|
||||
buffer[i] = digit + '0';
|
||||
@ -304,7 +304,7 @@ static void BignumToFixed(int requested_digits, int* decimal_point,
|
||||
} else if (-(*decimal_point) == requested_digits) {
|
||||
// We only need to verify if the number rounds down or up.
|
||||
// Ex: 0.04 and 0.06 with requested_digits == 1.
|
||||
ASSERT(*decimal_point == -requested_digits);
|
||||
DCHECK(*decimal_point == -requested_digits);
|
||||
// Initially the fraction lies in range (1, 10]. Multiply the denominator
|
||||
// by 10 so that we can compare more easily.
|
||||
denominator->Times10();
|
||||
@ -383,7 +383,7 @@ static void InitialScaledStartValuesPositiveExponent(
|
||||
Bignum* numerator, Bignum* denominator,
|
||||
Bignum* delta_minus, Bignum* delta_plus) {
|
||||
// A positive exponent implies a positive power.
|
||||
ASSERT(estimated_power >= 0);
|
||||
DCHECK(estimated_power >= 0);
|
||||
// Since the estimated_power is positive we simply multiply the denominator
|
||||
// by 10^estimated_power.
|
||||
|
||||
@ -502,7 +502,7 @@ static void InitialScaledStartValuesNegativeExponentNegativePower(
|
||||
// numerator = v * 10^-estimated_power * 2 * 2^-exponent.
|
||||
// Remember: numerator has been abused as power_ten. So no need to assign it
|
||||
// to itself.
|
||||
ASSERT(numerator == power_ten);
|
||||
DCHECK(numerator == power_ten);
|
||||
numerator->MultiplyByUInt64(significand);
|
||||
|
||||
// denominator = 2 * 2^-exponent with exponent < 0.
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user