Define DEBUG for v8_optimized_debug=2

Thereby ensuring there is only a minimal performance regression vs. NDEBUG (now it's only about 10% slower rather than ~2x).

R=jkummerow@chromium.org, mstarzinger@chromium.org

Review URL: https://codereview.chromium.org/39183004

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@17392 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
danno@chromium.org 2013-10-25 11:10:28 +00:00
parent f8a4bd7c4e
commit 7339d45a9f
14 changed files with 51 additions and 33 deletions

View File

@ -436,6 +436,7 @@
'V8_ENABLE_CHECKS',
'OBJECT_PRINT',
'VERIFY_HEAP',
'DEBUG'
],
'msvs_settings': {
'VCCLCompilerTool': {
@ -503,15 +504,6 @@
},
},
'conditions': [
['v8_optimized_debug==2', {
'defines': [
'NDEBUG',
],
}, {
'defines': [
'DEBUG',
],
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual',
@ -553,6 +545,9 @@
'-fdata-sections',
'-ffunction-sections',
],
'defines': [
'OPTIMIZED_DEBUG'
],
'conditions': [
# TODO(crbug.com/272548): Avoid -O3 in NaCl
['nacl_target_arch=="none"', {

View File

@ -627,7 +627,7 @@ void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
holder_ = GetPrototypeForPrimitiveCheck(check_type_, oracle->isolate());
receiver_types_.Add(handle(holder_->map()), oracle->zone());
}
#ifdef DEBUG
#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
int length = receiver_types_.length();
for (int i = 0; i < length; i++) {

View File

@ -129,8 +129,6 @@ void API_Fatal(const char* location, const char* format, ...) {
namespace v8 { namespace internal {
bool EnableSlowAsserts() { return FLAG_enable_slow_asserts; }
intptr_t HeapObjectTagMask() { return kHeapObjectTagMask; }
} } // namespace v8::internal

View File

@ -272,7 +272,24 @@ template <int> class StaticAssertionHelper { };
#endif
#ifdef DEBUG
#ifndef OPTIMIZED_DEBUG
#define ENABLE_SLOW_ASSERTS 1
#endif
#endif
namespace v8 {
namespace internal {
#ifdef ENABLE_SLOW_ASSERTS
#define SLOW_ASSERT(condition) \
CHECK(!v8::internal::FLAG_enable_slow_asserts || (condition))
extern bool FLAG_enable_slow_asserts;
#else
#define SLOW_ASSERT(condition) ((void) 0)
const bool FLAG_enable_slow_asserts = false;
#endif
} // namespace internal
} // namespace v8
// The ASSERT macro is equivalent to CHECK except that it only
@ -285,7 +302,6 @@ extern bool FLAG_enable_slow_asserts;
#define ASSERT_GE(v1, v2) CHECK_GE(v1, v2)
#define ASSERT_LT(v1, v2) CHECK_LT(v1, v2)
#define ASSERT_LE(v1, v2) CHECK_LE(v1, v2)
#define SLOW_ASSERT(condition) CHECK(!FLAG_enable_slow_asserts || (condition))
#else
#define ASSERT_RESULT(expr) (expr)
#define ASSERT(condition) ((void) 0)
@ -294,7 +310,6 @@ extern bool FLAG_enable_slow_asserts;
#define ASSERT_GE(v1, v2) ((void) 0)
#define ASSERT_LT(v1, v2) ((void) 0)
#define ASSERT_LE(v1, v2) ((void) 0)
#define SLOW_ASSERT(condition) ((void) 0)
#endif
// Static asserts has no impact on runtime performance, so they can be
// safely enabled in release mode. Moreover, the ((void) 0) expression

View File

@ -259,7 +259,7 @@ Handle<Object> Context::Lookup(Handle<String> name,
void Context::AddOptimizedFunction(JSFunction* function) {
ASSERT(IsNativeContext());
#ifdef DEBUG
#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
while (!element->IsUndefined()) {

View File

@ -355,7 +355,7 @@ double InternalStringToInt(UnicodeCache* unicode_cache,
return JunkStringValue();
}
ASSERT(buffer_pos < kBufferSize);
SLOW_ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos] = '\0';
Vector<const char> buffer_vector(buffer, buffer_pos);
return negative ? -Strtod(buffer_vector, 0) : Strtod(buffer_vector, 0);
@ -692,7 +692,7 @@ double InternalStringToDouble(UnicodeCache* unicode_cache,
exponent--;
}
ASSERT(buffer_pos < kBufferSize);
SLOW_ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos] = '\0';
double converted = Strtod(Vector<const char>(buffer, buffer_pos), exponent);

View File

@ -506,7 +506,15 @@ class FrameDescription {
void SetCallerFp(unsigned offset, intptr_t value);
intptr_t GetRegister(unsigned n) const {
ASSERT(n < ARRAY_SIZE(registers_));
#if DEBUG
// This convoluted ASSERT is needed to work around a gcc problem that
// improperly detects an array bounds overflow in optimized debug builds
// when using a plain ASSERT.
if (n >= ARRAY_SIZE(registers_)) {
ASSERT(false);
return 0;
}
#endif
return registers_[n];
}

View File

@ -792,7 +792,7 @@ class ElementsAccessorBase : public ElementsAccessor {
FixedArray* to,
FixedArrayBase* from) {
int len0 = to->length();
#ifdef DEBUG
#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
for (int i = 0; i < len0; i++) {
ASSERT(!to->get(i)->IsTheHole());

View File

@ -696,8 +696,10 @@ DEFINE_bool(stress_compaction, false,
#endif
// checks.cc
#ifndef OPTIMIZED_DEBUG
DEFINE_bool(enable_slow_asserts, false,
"enable asserts that are slow to execute")
#endif
// codegen-ia32.cc / codegen-arm.cc / macro-assembler-*.cc
DEFINE_bool(print_source, false, "pretty print source code")

View File

@ -728,7 +728,7 @@ void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
MarkBit mark_bit = Marking::MarkBitFrom(obj);
#ifdef DEBUG
#if ENABLE_SLOW_ASSERTS
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
SLOW_ASSERT(Marking::IsGrey(mark_bit) ||
(obj->IsFiller() && Marking::IsWhite(mark_bit)) ||

View File

@ -84,7 +84,7 @@ class List {
// backing store (e.g. Add).
inline T& operator[](int i) const {
ASSERT(0 <= i);
ASSERT(i < length_);
SLOW_ASSERT(i < length_);
return data_[i];
}
inline T& at(int i) const { return operator[](i); }

View File

@ -80,7 +80,7 @@ PropertyDetails PropertyDetails::AsDeleted() {
#define CAST_ACCESSOR(type) \
type* type::cast(Object* object) { \
ASSERT(object->Is##type()); \
SLOW_ASSERT(object->Is##type()); \
return reinterpret_cast<type*>(object); \
}
@ -1190,7 +1190,7 @@ void HeapObject::VerifySmiField(int offset) {
Heap* HeapObject::GetHeap() {
Heap* heap =
MemoryChunk::FromAddress(reinterpret_cast<Address>(this))->heap();
ASSERT(heap != NULL);
SLOW_ASSERT(heap != NULL);
return heap;
}
@ -1307,7 +1307,7 @@ FixedArrayBase* JSObject::elements() {
void JSObject::ValidateElements() {
#if DEBUG
#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
ElementsAccessor* accessor = GetElementsAccessor();
accessor->Validate(this);
@ -1901,7 +1901,7 @@ FixedArrayBase* FixedArrayBase::cast(Object* object) {
Object* FixedArray::get(int index) {
ASSERT(index >= 0 && index < this->length());
SLOW_ASSERT(index >= 0 && index < this->length());
return READ_FIELD(this, kHeaderSize + index * kPointerSize);
}

View File

@ -1196,7 +1196,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// Externalizing twice leaks the external resource, so it's
// prohibited by the API.
ASSERT(!this->IsExternalString());
#ifdef DEBUG
#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
// Assert that the resource and the string are equivalent.
ASSERT(static_cast<size_t>(this->length()) == resource->length());
@ -1253,7 +1253,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
#ifdef DEBUG
#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
// Assert that the resource and the string are equivalent.
ASSERT(static_cast<size_t>(this->length()) == resource->length());
@ -4483,7 +4483,7 @@ Handle<Map> NormalizedMapCache::Get(Handle<NormalizedMapCache> cache,
Handle<Map>::cast(result)->SharedMapVerify();
}
#endif
#ifdef DEBUG
#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
// The cached map should match newly created normalized map bit-by-bit,
// except for the code cache, which can contain some ics which can be
@ -7828,7 +7828,7 @@ MaybeObject* FixedArray::AddKeysFromJSArray(JSArray* array) {
accessor->AddElementsToFixedArray(array, array, this);
FixedArray* result;
if (!maybe_result->To<FixedArray>(&result)) return maybe_result;
#ifdef DEBUG
#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
for (int i = 0; i < result->length(); i++) {
Object* current = result->get(i);
@ -7846,7 +7846,7 @@ MaybeObject* FixedArray::UnionOfKeys(FixedArray* other) {
accessor->AddElementsToFixedArray(NULL, NULL, this, other);
FixedArray* result;
if (!maybe_result->To(&result)) return maybe_result;
#ifdef DEBUG
#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
for (int i = 0; i < result->length(); i++) {
Object* current = result->get(i);
@ -8901,7 +8901,7 @@ bool String::SlowEquals(String* other) {
// Fast check: if hash code is computed for both strings
// a fast negative check can be performed.
if (HasHashCode() && other->HasHashCode()) {
#ifdef DEBUG
#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
if (Hash() != other->Hash()) {
bool found_difference = false;

View File

@ -419,8 +419,8 @@ class Vector {
// Returns a vector using the same backing storage as this one,
// spanning from and including 'from', to but not including 'to'.
Vector<T> SubVector(int from, int to) {
ASSERT(to <= length_);
ASSERT(from < to);
SLOW_ASSERT(to <= length_);
SLOW_ASSERT(from < to);
ASSERT(0 <= from);
return Vector<T>(start() + from, to - from);
}