From c65ae4f10c7273956e88db433f626b26a1377caf Mon Sep 17 00:00:00 2001 From: bmeurer Date: Fri, 30 Jan 2015 01:29:25 -0800 Subject: [PATCH] Reland "Initial switch to Chromium-style CHECK_* and DCHECK_* macros.". R=svenpanne@chromium.org Review URL: https://codereview.chromium.org/877753007 Cr-Commit-Position: refs/heads/master@{#26346} --- include/v8config.h | 2 + src/api.cc | 7 +- src/arm/code-stubs-arm.cc | 2 +- src/arm/full-codegen-arm.cc | 4 +- src/arm/macro-assembler-arm.h | 2 +- src/arm64/full-codegen-arm64.cc | 4 +- src/arm64/lithium-codegen-arm64.cc | 2 +- src/arm64/macro-assembler-arm64.cc | 2 +- src/base/compiler-specific.h | 23 + src/base/logging.cc | 38 +- src/base/logging.h | 261 +++---- src/base/platform/condition-variable.cc | 2 +- src/base/platform/time.cc | 8 +- src/base/platform/time.h | 5 +- src/bootstrapper.cc | 2 +- src/checks.cc | 83 +-- src/checks.h | 32 +- src/compiler.cc | 2 +- src/compiler/arm/code-generator-arm.cc | 2 +- src/compiler/arm/instruction-selector-arm.cc | 29 +- src/compiler/arm64/code-generator-arm64.cc | 2 +- .../arm64/instruction-selector-arm64.cc | 12 +- src/compiler/code-generator.cc | 2 +- src/compiler/control-reducer.cc | 4 +- src/compiler/ia32/code-generator-ia32.cc | 2 +- .../ia32/instruction-selector-ia32.cc | 4 +- src/compiler/instruction-selector.cc | 8 +- src/compiler/instruction.cc | 2 +- src/compiler/instruction.h | 2 +- src/compiler/js-inlining.cc | 6 +- src/compiler/loop-peeling.cc | 2 +- src/compiler/machine-operator-reducer.cc | 6 +- src/compiler/move-optimizer.cc | 4 +- src/compiler/node.cc | 2 +- src/compiler/osr.cc | 6 +- src/compiler/pipeline.cc | 12 +- src/compiler/register-allocator-verifier.cc | 6 +- src/compiler/register-allocator.cc | 2 +- src/compiler/scheduler.cc | 18 +- src/compiler/verifier.cc | 8 +- src/compiler/zone-pool.cc | 4 +- src/deoptimizer.cc | 14 +- src/deoptimizer.h | 5 +- src/heap/heap.cc | 2 +- src/heap/spaces.h | 8 +- src/hydrogen-check-elimination.cc | 6 +- src/hydrogen.cc | 6 +- src/ia32/code-stubs-ia32.cc | 2 +- src/ia32/full-codegen-ia32.cc | 4 +- src/ia32/lithium-codegen-ia32.cc | 2 +- src/ic/handler-compiler.cc | 2 +- src/ic/ic-state.cc | 8 + src/ic/ic.cc | 2 +- src/ic/x64/stub-cache-x64.cc | 2 +- src/isolate.cc | 2 +- src/jsregexp.cc | 10 +- src/jsregexp.h | 2 +- src/log.cc | 2 +- src/mips/full-codegen-mips.cc | 4 +- src/mips64/full-codegen-mips64.cc | 4 +- src/objects-debug.cc | 2 +- src/objects-inl.h | 8 +- src/objects.cc | 4 +- src/optimizing-compiler-thread.cc | 6 +- src/perf-jit.cc | 2 +- src/ppc/full-codegen-ppc.cc | 4 +- src/runtime/runtime-array.cc | 2 +- src/serialize.cc | 16 +- src/serialize.h | 4 +- src/unique.h | 2 +- src/v8.h | 2 +- src/x64/disasm-x64.cc | 2 +- src/x64/full-codegen-x64.cc | 4 +- src/x64/macro-assembler-x64.cc | 2 +- src/x87/full-codegen-x87.cc | 4 +- test/cctest/cctest.gyp | 1 - test/cctest/cctest.h | 4 +- test/cctest/compiler/codegen-tester.cc | 8 +- test/cctest/compiler/codegen-tester.h | 10 + test/cctest/compiler/function-tester.h | 2 +- test/cctest/compiler/graph-builder-tester.cc | 4 +- .../compiler/test-basic-block-profiler.cc | 2 +- test/cctest/compiler/test-changes-lowering.cc | 4 +- test/cctest/compiler/test-control-reducer.cc | 22 +- test/cctest/compiler/test-instruction.cc | 4 +- .../cctest/compiler/test-js-constant-cache.cc | 8 +- .../cctest/compiler/test-js-typed-lowering.cc | 2 +- test/cctest/compiler/test-linkage.cc | 6 +- test/cctest/compiler/test-loop-analysis.cc | 4 +- .../compiler/test-loop-assignment-analysis.cc | 6 +- .../compiler/test-machine-operator-reducer.cc | 142 +--- test/cctest/compiler/test-node-cache.cc | 4 +- test/cctest/compiler/test-node.cc | 8 +- test/cctest/compiler/test-operator.cc | 20 +- .../compiler/test-representation-change.cc | 5 +- test/cctest/compiler/test-run-machops.cc | 220 +++--- test/cctest/compiler/value-helper.h | 2 +- test/cctest/test-accessors.cc | 18 +- test/cctest/test-api.cc | 698 +++++++++--------- test/cctest/test-assembler-arm.cc | 58 +- test/cctest/test-assembler-arm64.cc | 8 +- test/cctest/test-assembler-ia32.cc | 8 +- test/cctest/test-assembler-x64.cc | 52 +- test/cctest/test-bignum-dtoa.cc | 72 +- test/cctest/test-bignum.cc | 620 ++++++++-------- test/cctest/test-checks.cc | 26 - test/cctest/test-circular-queue.cc | 42 +- test/cctest/test-conversions.cc | 4 +- test/cctest/test-cpu-profiler.cc | 64 +- test/cctest/test-debug.cc | 25 +- test/cctest/test-decls.cc | 8 +- test/cctest/test-deoptimization.cc | 2 +- test/cctest/test-dtoa.cc | 74 +- test/cctest/test-fast-dtoa.cc | 48 +- test/cctest/test-feedback-vector.cc | 4 +- test/cctest/test-fixed-dtoa.cc | 220 +++--- test/cctest/test-flags.cc | 2 +- test/cctest/test-func-name-inference.cc | 2 +- test/cctest/test-global-object.cc | 2 +- test/cctest/test-hashmap.cc | 16 +- test/cctest/test-heap-profiler.cc | 524 +++++++------ test/cctest/test-lockers.cc | 9 +- test/cctest/test-log-stack-tracer.cc | 8 +- test/cctest/test-log.cc | 17 +- test/cctest/test-macro-assembler-arm.cc | 2 +- test/cctest/test-parsing.cc | 11 +- test/cctest/test-platform.cc | 3 +- test/cctest/test-profile-generator.cc | 228 +++--- test/cctest/test-regexp.cc | 10 +- test/cctest/test-sampler-api.cc | 6 +- test/cctest/test-serialize.cc | 2 +- test/cctest/test-strings.cc | 15 +- test/cctest/test-symbols.cc | 2 +- test/cctest/test-version.cc | 8 +- test/cctest/trace-extension.cc | 6 +- test/unittests/base/logging-unittest.cc | 19 + .../compiler/instruction-sequence-unittest.cc | 4 +- test/unittests/compiler/scheduler-unittest.cc | 6 +- test/unittests/unittests.gyp | 1 + 139 files changed, 2033 insertions(+), 2186 deletions(-) delete mode 100644 test/cctest/test-checks.cc create mode 100644 test/unittests/base/logging-unittest.cc diff --git a/include/v8config.h b/include/v8config.h index 8497abd81f..f9c3ffde17 100644 --- a/include/v8config.h +++ b/include/v8config.h @@ -185,6 +185,7 @@ // V8_HAS_DECLSPEC_ALIGN - __declspec(align(n)) supported // V8_HAS_DECLSPEC_DEPRECATED - __declspec(deprecated) supported // V8_HAS_DECLSPEC_NOINLINE - __declspec(noinline) supported +// V8_HAS_DECLSPEC_SELECTANY - __declspec(selectany) supported // V8_HAS___FINAL - __final supported in non-C++11 mode // V8_HAS___FORCEINLINE - __forceinline supported // @@ -289,6 +290,7 @@ # define V8_HAS_DECLSPEC_ALIGN 1 # define V8_HAS_DECLSPEC_DEPRECATED 1 # define V8_HAS_DECLSPEC_NOINLINE 1 +# define V8_HAS_DECLSPEC_SELECTANY 1 # define V8_HAS___FORCEINLINE 1 diff --git a/src/api.cc b/src/api.cc index fd84341b1e..6caa82a5ea 100644 --- a/src/api.cc +++ b/src/api.cc @@ -4996,7 +4996,7 @@ void v8::Object::SetInternalField(int index, v8::Handle value) { if (!InternalFieldOK(obj, index, location)) return; i::Handle val = Utils::OpenHandle(*value); obj->SetInternalField(index, *val); - DCHECK_EQ(value, GetInternalField(index)); + DCHECK(value->Equals(GetInternalField(index))); } @@ -7357,6 +7357,11 @@ void HeapSnapshot::Serialize(OutputStream* stream, } +// static +STATIC_CONST_MEMBER_DEFINITION const SnapshotObjectId + HeapProfiler::kUnknownObjectId; + + int HeapProfiler::GetSnapshotCount() { return reinterpret_cast(this)->GetSnapshotsCount(); } diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc index 2e585ba53a..a8c374fd1d 100644 --- a/src/arm/code-stubs-arm.cc +++ b/src/arm/code-stubs-arm.cc @@ -561,7 +561,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) { // If either is a Smi (we know that not both are), then they can only // be strictly equal if the other is a HeapNumber. STATIC_ASSERT(kSmiTag == 0); - DCHECK_EQ(0, Smi::FromInt(0)); + DCHECK_EQ(static_cast(0), Smi::FromInt(0)); __ and_(r2, lhs, Operand(rhs)); __ JumpIfNotSmi(r2, ¬_smis); // One operand is a smi. EmitSmiNonsmiComparison generates code that can: diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc index 47525c9cf1..ad0c8174f1 100644 --- a/src/arm/full-codegen-arm.cc +++ b/src/arm/full-codegen-arm.cc @@ -3813,7 +3813,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) { void FullCodeGenerator::EmitDateField(CallRuntime* expr) { ZoneList* args = expr->arguments(); DCHECK(args->length() == 2); - DCHECK_NE(NULL, args->at(1)->AsLiteral()); + DCHECK_NOT_NULL(args->at(1)->AsLiteral()); Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value())); VisitForAccumulatorValue(args->at(0)); // Load the object. @@ -4161,7 +4161,7 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) { void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { ZoneList* args = expr->arguments(); DCHECK_EQ(2, args->length()); - DCHECK_NE(NULL, args->at(0)->AsLiteral()); + DCHECK_NOT_NULL(args->at(0)->AsLiteral()); int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value(); Handle jsfunction_result_caches( diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h index 88a68a90c2..9d46165eca 100644 --- a/src/arm/macro-assembler-arm.h +++ b/src/arm/macro-assembler-arm.h @@ -944,7 +944,7 @@ class MacroAssembler: public Assembler { ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond); ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond); tst(type, Operand(kIsNotStringMask), cond); - DCHECK_EQ(0, kStringTag); + DCHECK_EQ(0u, kStringTag); return eq; } diff --git a/src/arm64/full-codegen-arm64.cc b/src/arm64/full-codegen-arm64.cc index fd07d744a1..a013543998 100644 --- a/src/arm64/full-codegen-arm64.cc +++ b/src/arm64/full-codegen-arm64.cc @@ -3522,7 +3522,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) { void FullCodeGenerator::EmitDateField(CallRuntime* expr) { ZoneList* args = expr->arguments(); DCHECK(args->length() == 2); - DCHECK_NE(NULL, args->at(1)->AsLiteral()); + DCHECK_NOT_NULL(args->at(1)->AsLiteral()); Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value())); VisitForAccumulatorValue(args->at(0)); // Load the object. @@ -3868,7 +3868,7 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) { void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { ZoneList* args = expr->arguments(); DCHECK_EQ(2, args->length()); - DCHECK_NE(NULL, args->at(0)->AsLiteral()); + DCHECK_NOT_NULL(args->at(0)->AsLiteral()); int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value(); Handle jsfunction_result_caches( diff --git a/src/arm64/lithium-codegen-arm64.cc b/src/arm64/lithium-codegen-arm64.cc index 8a300e29b8..c49ecb761f 100644 --- a/src/arm64/lithium-codegen-arm64.cc +++ b/src/arm64/lithium-codegen-arm64.cc @@ -2820,7 +2820,7 @@ void LCodeGen::DoDivI(LDivI* instr) { __ Sdiv(result, dividend, divisor); if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { - DCHECK_EQ(NULL, instr->temp()); + DCHECK(!instr->temp()); return; } diff --git a/src/arm64/macro-assembler-arm64.cc b/src/arm64/macro-assembler-arm64.cc index 0644c341cb..37739d9c16 100644 --- a/src/arm64/macro-assembler-arm64.cc +++ b/src/arm64/macro-assembler-arm64.cc @@ -3936,7 +3936,7 @@ void MacroAssembler::EmitSeqStringSetCharCheck( Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch)); Check(lt, kIndexIsTooLarge); - DCHECK_EQ(0, Smi::FromInt(0)); + DCHECK_EQ(static_cast(0), Smi::FromInt(0)); Cmp(index, 0); Check(ge, kIndexIsNegative); } diff --git a/src/base/compiler-specific.h b/src/base/compiler-specific.h index 9755fc13ce..ac1eb55fdd 100644 --- a/src/base/compiler-specific.h +++ b/src/base/compiler-specific.h @@ -51,4 +51,27 @@ #define WARN_UNUSED_RESULT /* NOT SUPPORTED */ #endif + +// The C++ standard requires that static const members have an out-of-class +// definition (in a single compilation unit), but MSVC chokes on this (when +// language extensions, which are required, are enabled). (You're only likely to +// notice the need for a definition if you take the address of the member or, +// more commonly, pass it to a function that takes it as a reference argument -- +// probably an STL function.) This macro makes MSVC do the right thing. See +// http://msdn.microsoft.com/en-us/library/34h23df8(v=vs.100).aspx for more +// information. Use like: +// +// In .h file: +// struct Foo { +// static const int kBar = 5; +// }; +// +// In .cc file: +// STATIC_CONST_MEMBER_DEFINITION const int Foo::kBar; +#if V8_HAS_DECLSPEC_SELECTANY +#define STATIC_CONST_MEMBER_DEFINITION __declspec(selectany) +#else +#define STATIC_CONST_MEMBER_DEFINITION +#endif + #endif // V8_BASE_COMPILER_SPECIFIC_H_ diff --git a/src/base/logging.cc b/src/base/logging.cc index c3f609f980..25d77bb1ec 100644 --- a/src/base/logging.cc +++ b/src/base/logging.cc @@ -10,14 +10,45 @@ #elif V8_OS_QNX # include #endif // V8_LIBC_GLIBC || V8_OS_BSD -#include -#include + +#include +#include #include "src/base/platform/platform.h" namespace v8 { namespace base { +// Explicit instantiations for commonly used comparisons. +#define DEFINE_MAKE_CHECK_OP_STRING(type) \ + template std::string* MakeCheckOpString( \ + type const&, type const&, char const*); +DEFINE_MAKE_CHECK_OP_STRING(int) +DEFINE_MAKE_CHECK_OP_STRING(long) // NOLINT(runtime/int) +DEFINE_MAKE_CHECK_OP_STRING(long long) // NOLINT(runtime/int) +DEFINE_MAKE_CHECK_OP_STRING(unsigned int) +DEFINE_MAKE_CHECK_OP_STRING(unsigned long) // NOLINT(runtime/int) +DEFINE_MAKE_CHECK_OP_STRING(unsigned long long) // NOLINT(runtime/int) +DEFINE_MAKE_CHECK_OP_STRING(char const*) +DEFINE_MAKE_CHECK_OP_STRING(void const*) +#undef DEFINE_MAKE_CHECK_OP_STRING + + +// Explicit instantiations for floating point checks. +#define DEFINE_CHECK_OP_IMPL(NAME) \ + template std::string* Check##NAME##Impl( \ + float const& lhs, float const& rhs, char const* msg); \ + template std::string* Check##NAME##Impl( \ + double const& lhs, double const& rhs, char const* msg); +DEFINE_CHECK_OP_IMPL(EQ) +DEFINE_CHECK_OP_IMPL(NE) +DEFINE_CHECK_OP_IMPL(LE) +DEFINE_CHECK_OP_IMPL(LT) +DEFINE_CHECK_OP_IMPL(GE) +DEFINE_CHECK_OP_IMPL(GT) +#undef DEFINE_CHECK_OP_IMPL + + // Attempts to dump a backtrace (if supported). void DumpBacktrace() { #if V8_LIBC_GLIBC || V8_OS_BSD @@ -68,7 +99,8 @@ void DumpBacktrace() { #endif // V8_LIBC_GLIBC || V8_OS_BSD } -} } // namespace v8::base +} // namespace base +} // namespace v8 // Contains protection against recursive calls (faults while handling faults). diff --git a/src/base/logging.h b/src/base/logging.h index d228eb0bf4..f54f10c1cd 100644 --- a/src/base/logging.h +++ b/src/base/logging.h @@ -5,8 +5,9 @@ #ifndef V8_BASE_LOGGING_H_ #define V8_BASE_LOGGING_H_ -#include -#include +#include +#include +#include #include "src/base/build_config.h" @@ -31,186 +32,124 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...); #endif -// The CHECK macro checks that the given condition is true; if not, it -// prints a message to stderr and aborts. -#define CHECK(condition) \ - do { \ - if (V8_UNLIKELY(!(condition))) { \ - V8_Fatal(__FILE__, __LINE__, "CHECK(%s) failed", #condition); \ - } \ +namespace v8 { +namespace base { + +// CHECK dies with a fatal error if condition is not true. It is *not* +// controlled by DEBUG, so the check will be executed regardless of +// compilation mode. +// +// We make sure CHECK et al. always evaluates their arguments, as +// doing CHECK(FunctionWithSideEffect()) is a common idiom. +#define CHECK(condition) \ + do { \ + if (V8_UNLIKELY(!(condition))) { \ + V8_Fatal(__FILE__, __LINE__, "Check failed: %s.", #condition); \ + } \ } while (0) -// Helper function used by the CHECK_EQ function when given int -// arguments. Should not be called directly. -inline void CheckEqualsHelper(const char* file, int line, - const char* expected_source, int expected, - const char* value_source, int value) { - if (V8_UNLIKELY(expected != value)) { - V8_Fatal(file, line, - "CHECK_EQ(%s, %s) failed\n# Expected: %i\n# Found: %i", - expected_source, value_source, expected, value); - } -} +#ifdef DEBUG +// Helper macro for binary operators. +// Don't use this macro directly in your code, use CHECK_EQ et al below. +#define CHECK_OP(name, op, lhs, rhs) \ + do { \ + if (std::string* _msg = ::v8::base::Check##name##Impl( \ + (lhs), (rhs), #lhs " " #op " " #rhs)) { \ + V8_Fatal(__FILE__, __LINE__, "Check failed: %s.", _msg->c_str()); \ + delete _msg; \ + } \ + } while (0) -// Helper function used by the CHECK_EQ function when given int64_t -// arguments. Should not be called directly. -inline void CheckEqualsHelper(const char* file, int line, - const char* expected_source, - int64_t expected, - const char* value_source, - int64_t value) { - if (V8_UNLIKELY(expected != value)) { - // Print int64_t values in hex, as two int32s, - // to avoid platform-dependencies. - V8_Fatal(file, line, - "CHECK_EQ(%s, %s) failed\n#" - " Expected: 0x%08x%08x\n# Found: 0x%08x%08x", - expected_source, value_source, - static_cast(expected >> 32), - static_cast(expected), - static_cast(value >> 32), - static_cast(value)); - } -} +#else +// Make all CHECK functions discard their log strings to reduce code +// bloat for official release builds. + +#define CHECK_OP(name, op, lhs, rhs) CHECK((lhs)op(rhs)) -// 32-bit AIX defines intptr_t as long int. -#if V8_OS_AIX && V8_HOST_ARCH_32_BIT -// Helper function used by the CHECK_EQ function when given intptr_t -// arguments. Should not be called directly. -inline void CheckEqualsHelper(const char* file, int line, - const char* expected_source, intptr_t expected, - const char* value_source, intptr_t value) { - if (expected != value) { - V8_Fatal(file, line, - "CHECK_EQ(%s, %s) failed\n#" - " Expected: 0x%lx\n# Found: 0x%lx", - expected_source, value_source, expected, value); - } -} #endif -// Helper function used by the CHECK_NE function when given int -// arguments. Should not be called directly. -inline void CheckNonEqualsHelper(const char* file, - int line, - const char* unexpected_source, - int unexpected, - const char* value_source, - int value) { - if (V8_UNLIKELY(unexpected == value)) { - V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %i", - unexpected_source, value_source, value); - } +// Build the error message string. This is separate from the "Impl" +// function template because it is not performance critical and so can +// be out of line, while the "Impl" code should be inline. Caller +// takes ownership of the returned string. +template +std::string* MakeCheckOpString(Lhs const& lhs, Rhs const& rhs, + char const* msg) { + std::ostringstream ss; + ss << msg << " (" << lhs << " vs. " << rhs << ")"; + return new std::string(ss.str()); } - -// Helper function used by the CHECK function when given string -// arguments. Should not be called directly. -inline void CheckEqualsHelper(const char* file, - int line, - const char* expected_source, - const char* expected, - const char* value_source, - const char* value) { - if (V8_UNLIKELY((expected == NULL && value != NULL) || - (expected != NULL && value == NULL) || - (expected != NULL && value != NULL && - strcmp(expected, value) != 0))) { - V8_Fatal(file, line, - "CHECK_EQ(%s, %s) failed\n# Expected: %s\n# Found: %s", - expected_source, value_source, expected, value); - } -} +// Commonly used instantiations of MakeCheckOpString<>. Explicitly instantiated +// in logging.cc. +#define DEFINE_MAKE_CHECK_OP_STRING(type) \ + extern template std::string* MakeCheckOpString( \ + type const&, type const&, char const*); +DEFINE_MAKE_CHECK_OP_STRING(int) +DEFINE_MAKE_CHECK_OP_STRING(long) // NOLINT(runtime/int) +DEFINE_MAKE_CHECK_OP_STRING(long long) // NOLINT(runtime/int) +DEFINE_MAKE_CHECK_OP_STRING(unsigned int) +DEFINE_MAKE_CHECK_OP_STRING(unsigned long) // NOLINT(runtime/int) +DEFINE_MAKE_CHECK_OP_STRING(unsigned long long) // NOLINT(runtime/int) +DEFINE_MAKE_CHECK_OP_STRING(char const*) +DEFINE_MAKE_CHECK_OP_STRING(void const*) +#undef DEFINE_MAKE_CHECK_OP_STRING -inline void CheckNonEqualsHelper(const char* file, - int line, - const char* expected_source, - const char* expected, - const char* value_source, - const char* value) { - if (V8_UNLIKELY(expected == value || (expected != NULL && value != NULL && - strcmp(expected, value) == 0))) { - V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %s", - expected_source, value_source, value); - } -} +// Helper functions for CHECK_OP macro. +// The (int, int) specialization works around the issue that the compiler +// will not instantiate the template version of the function on values of +// unnamed enum type - see comment below. +// The (float, float) and (double, double) instantiations are explicitly +// externialized to ensure proper 32/64-bit comparisons on x86. +#define DEFINE_CHECK_OP_IMPL(NAME, op) \ + template \ + V8_INLINE std::string* Check##NAME##Impl(Lhs const& lhs, Rhs const& rhs, \ + char const* msg) { \ + return V8_LIKELY(lhs op rhs) ? nullptr : MakeCheckOpString(lhs, rhs, msg); \ + } \ + V8_INLINE std::string* Check##NAME##Impl(int lhs, int rhs, \ + char const* msg) { \ + return V8_LIKELY(lhs op rhs) ? nullptr : MakeCheckOpString(lhs, rhs, msg); \ + } \ + extern template std::string* Check##NAME##Impl( \ + float const& lhs, float const& rhs, char const* msg); \ + extern template std::string* Check##NAME##Impl( \ + double const& lhs, double const& rhs, char const* msg); +DEFINE_CHECK_OP_IMPL(EQ, ==) +DEFINE_CHECK_OP_IMPL(NE, !=) +DEFINE_CHECK_OP_IMPL(LE, <=) +DEFINE_CHECK_OP_IMPL(LT, < ) +DEFINE_CHECK_OP_IMPL(GE, >=) +DEFINE_CHECK_OP_IMPL(GT, > ) +#undef DEFINE_CHECK_OP_IMPL +#define CHECK_EQ(lhs, rhs) CHECK_OP(EQ, ==, lhs, rhs) +#define CHECK_NE(lhs, rhs) CHECK_OP(NE, !=, lhs, rhs) +#define CHECK_LE(lhs, rhs) CHECK_OP(LE, <=, lhs, rhs) +#define CHECK_LT(lhs, rhs) CHECK_OP(LT, <, lhs, rhs) +#define CHECK_GE(lhs, rhs) CHECK_OP(GE, >=, lhs, rhs) +#define CHECK_GT(lhs, rhs) CHECK_OP(GT, >, lhs, rhs) +#define CHECK_NULL(val) CHECK((val) == nullptr) +#define CHECK_NOT_NULL(val) CHECK((val) != nullptr) +#define CHECK_IMPLIES(lhs, rhs) CHECK(!(lhs) || (rhs)) -// Helper function used by the CHECK function when given pointer -// arguments. Should not be called directly. -inline void CheckEqualsHelper(const char* file, - int line, - const char* expected_source, - const void* expected, - const char* value_source, - const void* value) { - if (V8_UNLIKELY(expected != value)) { - V8_Fatal(file, line, - "CHECK_EQ(%s, %s) failed\n# Expected: %p\n# Found: %p", - expected_source, value_source, - expected, value); - } -} - - -inline void CheckNonEqualsHelper(const char* file, - int line, - const char* expected_source, - const void* expected, - const char* value_source, - const void* value) { - if (V8_UNLIKELY(expected == value)) { - V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %p", - expected_source, value_source, value); - } -} - - -inline void CheckNonEqualsHelper(const char* file, - int line, - const char* expected_source, - int64_t expected, - const char* value_source, - int64_t value) { - if (V8_UNLIKELY(expected == value)) { - V8_Fatal(file, line, - "CHECK_EQ(%s, %s) failed\n# Expected: %f\n# Found: %f", - expected_source, value_source, expected, value); - } -} - - -#define CHECK_EQ(expected, value) CheckEqualsHelper(__FILE__, __LINE__, \ - #expected, expected, #value, value) - - -#define CHECK_NE(unexpected, value) CheckNonEqualsHelper(__FILE__, __LINE__, \ - #unexpected, unexpected, #value, value) - - -#define CHECK_GT(a, b) CHECK((a) > (b)) -#define CHECK_GE(a, b) CHECK((a) >= (b)) -#define CHECK_LT(a, b) CHECK((a) < (b)) -#define CHECK_LE(a, b) CHECK((a) <= (b)) - - -namespace v8 { -namespace base { // Exposed for making debugging easier (to see where your function is being // called, just add a call to DumpBacktrace). void DumpBacktrace(); -} } // namespace v8::base +} // namespace base +} // namespace v8 // The DCHECK macro is equivalent to CHECK except that it only // generates code in debug builds. +// TODO(bmeurer): DCHECK_RESULT(expr) must die! #ifdef DEBUG #define DCHECK_RESULT(expr) CHECK(expr) #define DCHECK(condition) CHECK(condition) @@ -219,6 +158,9 @@ void DumpBacktrace(); #define DCHECK_GE(v1, v2) CHECK_GE(v1, v2) #define DCHECK_LT(v1, v2) CHECK_LT(v1, v2) #define DCHECK_LE(v1, v2) CHECK_LE(v1, v2) +#define DCHECK_NULL(val) CHECK_NULL(val) +#define DCHECK_NOT_NULL(val) CHECK_NOT_NULL(val) +#define DCHECK_IMPLIES(v1, v2) CHECK_IMPLIES(v1, v2) #else #define DCHECK_RESULT(expr) (expr) #define DCHECK(condition) ((void) 0) @@ -227,8 +169,9 @@ void DumpBacktrace(); #define DCHECK_GE(v1, v2) ((void) 0) #define DCHECK_LT(v1, v2) ((void) 0) #define DCHECK_LE(v1, v2) ((void) 0) +#define DCHECK_NULL(val) ((void) 0) +#define DCHECK_NOT_NULL(val) ((void) 0) +#define DCHECK_IMPLIES(v1, v2) ((void) 0) #endif -#define DCHECK_NOT_NULL(p) DCHECK_NE(NULL, p) - #endif // V8_BASE_LOGGING_H_ diff --git a/src/base/platform/condition-variable.cc b/src/base/platform/condition-variable.cc index 4547b66f7a..b91025a3db 100644 --- a/src/base/platform/condition-variable.cc +++ b/src/base/platform/condition-variable.cc @@ -182,7 +182,7 @@ void ConditionVariable::NativeHandle::Post(Event* event, bool result) { // Remove the event from the wait list. for (Event** wep = &waitlist_;; wep = &(*wep)->next_) { - DCHECK_NE(NULL, *wep); + DCHECK(*wep); if (*wep == event) { *wep = event->next_; break; diff --git a/src/base/platform/time.cc b/src/base/platform/time.cc index 40dd188db3..6734218e50 100644 --- a/src/base/platform/time.cc +++ b/src/base/platform/time.cc @@ -13,7 +13,8 @@ #include #endif -#include +#include +#include #if V8_OS_WIN #include "src/base/lazy-instance.h" @@ -355,6 +356,11 @@ double Time::ToJsTime() const { } +std::ostream& operator<<(std::ostream& os, const Time& time) { + return os << time.ToJsTime(); +} + + #if V8_OS_WIN class TickClock { diff --git a/src/base/platform/time.h b/src/base/platform/time.h index 9dfa47d4e5..887664e7ba 100644 --- a/src/base/platform/time.h +++ b/src/base/platform/time.h @@ -5,7 +5,8 @@ #ifndef V8_BASE_PLATFORM_TIME_H_ #define V8_BASE_PLATFORM_TIME_H_ -#include +#include +#include #include #include "src/base/macros.h" @@ -280,6 +281,8 @@ class Time FINAL { int64_t us_; }; +std::ostream& operator<<(std::ostream&, const Time&); + inline Time operator+(const TimeDelta& delta, const Time& time) { return time + delta; } diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc index aef94282ca..f5baafc42f 100644 --- a/src/bootstrapper.cc +++ b/src/bootstrapper.cc @@ -1491,7 +1491,7 @@ static Handle ResolveBuiltinIdHolder(Handle native_context, .ToHandleChecked()); } const char* inner = period_pos + 1; - DCHECK_EQ(NULL, strchr(inner, '.')); + DCHECK(!strchr(inner, '.')); Vector property(holder_expr, static_cast(period_pos - holder_expr)); Handle property_string = factory->InternalizeUtf8String(property); diff --git a/src/checks.cc b/src/checks.cc index e5a4caa6c8..2871a66c64 100644 --- a/src/checks.cc +++ b/src/checks.cc @@ -4,85 +4,6 @@ #include "src/checks.h" -#include "src/v8.h" - namespace v8 { -namespace internal { - -intptr_t HeapObjectTagMask() { return kHeapObjectTagMask; } - -} } // namespace v8::internal - - -static bool CheckEqualsStrict(volatile double* exp, volatile double* val) { - v8::internal::DoubleRepresentation exp_rep(*exp); - v8::internal::DoubleRepresentation val_rep(*val); - if (std::isnan(exp_rep.value) && std::isnan(val_rep.value)) return true; - return exp_rep.bits == val_rep.bits; -} - - -void CheckEqualsHelper(const char* file, int line, const char* expected_source, - double expected, const char* value_source, - double value) { - // Force values to 64 bit memory to truncate 80 bit precision on IA32. - volatile double* exp = new double[1]; - *exp = expected; - volatile double* val = new double[1]; - *val = value; - if (!CheckEqualsStrict(exp, val)) { - V8_Fatal(file, line, - "CHECK_EQ(%s, %s) failed\n# Expected: %f\n# Found: %f", - expected_source, value_source, *exp, *val); - } - delete[] exp; - delete[] val; -} - - -void CheckNonEqualsHelper(const char* file, int line, - const char* expected_source, double expected, - const char* value_source, double value) { - // Force values to 64 bit memory to truncate 80 bit precision on IA32. - volatile double* exp = new double[1]; - *exp = expected; - volatile double* val = new double[1]; - *val = value; - if (CheckEqualsStrict(exp, val)) { - V8_Fatal(file, line, - "CHECK_EQ(%s, %s) failed\n# Expected: %f\n# Found: %f", - expected_source, value_source, *exp, *val); - } - delete[] exp; - delete[] val; -} - - -void CheckEqualsHelper(const char* file, - int line, - const char* expected_source, - v8::Handle expected, - const char* value_source, - v8::Handle value) { - if (!expected->Equals(value)) { - v8::String::Utf8Value value_str(value); - v8::String::Utf8Value expected_str(expected); - V8_Fatal(file, line, - "CHECK_EQ(%s, %s) failed\n# Expected: %s\n# Found: %s", - expected_source, value_source, *expected_str, *value_str); - } -} - - -void CheckNonEqualsHelper(const char* file, - int line, - const char* unexpected_source, - v8::Handle unexpected, - const char* value_source, - v8::Handle value) { - if (unexpected->Equals(value)) { - v8::String::Utf8Value value_str(value); - V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %s", - unexpected_source, value_source, *value_str); - } -} +namespace internal {} // namespace internal +} // namespace v8 diff --git a/src/checks.h b/src/checks.h index 6ba64c1225..54ac92649b 100644 --- a/src/checks.h +++ b/src/checks.h @@ -5,6 +5,7 @@ #ifndef V8_CHECKS_H_ #define V8_CHECKS_H_ +#include "include/v8.h" #include "src/base/logging.h" namespace v8 { @@ -14,8 +15,6 @@ template class Handle; namespace internal { -intptr_t HeapObjectTagMask(); - #ifdef ENABLE_SLOW_DCHECKS #define SLOW_DCHECK(condition) \ CHECK(!v8::internal::FLAG_enable_slow_asserts || (condition)) @@ -27,30 +26,11 @@ const bool FLAG_enable_slow_asserts = false; } } // namespace v8::internal +#define DCHECK_TAG_ALIGNED(address) \ + DCHECK((reinterpret_cast(address) & \ + ::v8::internal::kHeapObjectTagMask) == 0) -void CheckNonEqualsHelper(const char* file, int line, - const char* expected_source, double expected, - const char* value_source, double value); - -void CheckEqualsHelper(const char* file, int line, const char* expected_source, - double expected, const char* value_source, double value); - -void CheckNonEqualsHelper(const char* file, int line, - const char* unexpected_source, - v8::Handle unexpected, - const char* value_source, - v8::Handle value); - -void CheckEqualsHelper(const char* file, - int line, - const char* expected_source, - v8::Handle expected, - const char* value_source, - v8::Handle value); - -#define DCHECK_TAG_ALIGNED(address) \ - DCHECK((reinterpret_cast(address) & HeapObjectTagMask()) == 0) - -#define DCHECK_SIZE_TAG_ALIGNED(size) DCHECK((size & HeapObjectTagMask()) == 0) +#define DCHECK_SIZE_TAG_ALIGNED(size) \ + DCHECK((size & ::v8::internal::kHeapObjectTagMask) == 0) #endif // V8_CHECKS_H_ diff --git a/src/compiler.cc b/src/compiler.cc index 7766778d47..d31ca9e11b 100644 --- a/src/compiler.cc +++ b/src/compiler.cc @@ -208,7 +208,7 @@ CompilationInfo::~CompilationInfo() { // Check that no dependent maps have been added or added dependent maps have // been rolled back or committed. for (int i = 0; i < DependentCode::kGroupCount; i++) { - DCHECK_EQ(NULL, dependencies_[i]); + DCHECK(!dependencies_[i]); } #endif // DEBUG } diff --git a/src/compiler/arm/code-generator-arm.cc b/src/compiler/arm/code-generator-arm.cc index 51ac2070c8..894584b8d3 100644 --- a/src/compiler/arm/code-generator-arm.cc +++ b/src/compiler/arm/code-generator-arm.cc @@ -745,7 +745,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr, // Materialize a full 32-bit 1 or 0 value. The result register is always the // last output of the instruction. - DCHECK_NE(0, instr->OutputCount()); + DCHECK_NE(0u, instr->OutputCount()); Register reg = i.OutputRegister(instr->OutputCount() - 1); Condition cc = FlagsConditionToCondition(condition); __ mov(reg, Operand(0)); diff --git a/src/compiler/arm/instruction-selector-arm.cc b/src/compiler/arm/instruction-selector-arm.cc index 64393dd91f..7219ca3b30 100644 --- a/src/compiler/arm/instruction-selector-arm.cc +++ b/src/compiler/arm/instruction-selector-arm.cc @@ -233,8 +233,8 @@ void VisitBinop(InstructionSelector* selector, Node* node, outputs[output_count++] = g.DefineAsRegister(cont->result()); } - DCHECK_NE(0, input_count); - DCHECK_NE(0, output_count); + DCHECK_NE(0u, input_count); + DCHECK_NE(0u, output_count); DCHECK_GE(arraysize(inputs), input_count); DCHECK_GE(arraysize(outputs), output_count); DCHECK_NE(kMode_None, AddressingModeField::decode(opcode)); @@ -448,8 +448,8 @@ void EmitBic(InstructionSelector* selector, Node* node, Node* left, void EmitUbfx(InstructionSelector* selector, Node* node, Node* left, uint32_t lsb, uint32_t width) { - DCHECK_LE(1, width); - DCHECK_LE(width, 32 - lsb); + DCHECK_LE(1u, width); + DCHECK_LE(width, 32u - lsb); ArmOperandGenerator g(selector); selector->Emit(kArmUbfx, g.DefineAsRegister(node), g.UseRegister(left), g.TempImmediate(lsb), g.TempImmediate(width)); @@ -481,7 +481,7 @@ void InstructionSelector::VisitWord32And(Node* node) { uint32_t msb = base::bits::CountLeadingZeros32(value); // Try to interpret this AND as UBFX. if (IsSupported(ARMv7) && width != 0 && msb + width == 32) { - DCHECK_EQ(0, base::bits::CountTrailingZeros32(value)); + DCHECK_EQ(0u, base::bits::CountTrailingZeros32(value)); if (m.left().IsWord32Shr()) { Int32BinopMatcher mleft(m.left().node()); if (mleft.right().IsInRange(0, 31)) { @@ -550,10 +550,11 @@ void InstructionSelector::VisitWord32Xor(Node* node) { } +namespace { + template -static inline void VisitShift(InstructionSelector* selector, Node* node, - TryMatchShift try_match_shift, - FlagsContinuation* cont) { +void VisitShift(InstructionSelector* selector, Node* node, + TryMatchShift try_match_shift, FlagsContinuation* cont) { ArmOperandGenerator g(selector); InstructionCode opcode = kArmMov; InstructionOperand* inputs[4]; @@ -573,8 +574,8 @@ static inline void VisitShift(InstructionSelector* selector, Node* node, outputs[output_count++] = g.DefineAsRegister(cont->result()); } - DCHECK_NE(0, input_count); - DCHECK_NE(0, output_count); + DCHECK_NE(0u, input_count); + DCHECK_NE(0u, output_count); DCHECK_GE(arraysize(inputs), input_count); DCHECK_GE(arraysize(outputs), output_count); DCHECK_NE(kMode_None, AddressingModeField::decode(opcode)); @@ -586,12 +587,14 @@ static inline void VisitShift(InstructionSelector* selector, Node* node, template -static inline void VisitShift(InstructionSelector* selector, Node* node, +void VisitShift(InstructionSelector* selector, Node* node, TryMatchShift try_match_shift) { FlagsContinuation cont; VisitShift(selector, node, try_match_shift, &cont); } +} // namespace + void InstructionSelector::VisitWord32Shl(Node* node) { VisitShift(this, node, TryMatchLSL); @@ -603,7 +606,7 @@ void InstructionSelector::VisitWord32Shr(Node* node) { Int32BinopMatcher m(node); if (IsSupported(ARMv7) && m.left().IsWord32And() && m.right().IsInRange(0, 31)) { - int32_t lsb = m.right().Value(); + uint32_t lsb = m.right().Value(); Int32BinopMatcher mleft(m.left().node()); if (mleft.right().HasValue()) { uint32_t value = (mleft.right().Value() >> lsb) << lsb; @@ -1123,7 +1126,7 @@ void VisitWordCompare(InstructionSelector* selector, Node* node, outputs[output_count++] = g.DefineAsRegister(cont->result()); } - DCHECK_NE(0, input_count); + DCHECK_NE(0u, input_count); DCHECK_GE(arraysize(inputs), input_count); DCHECK_GE(arraysize(outputs), output_count); diff --git a/src/compiler/arm64/code-generator-arm64.cc b/src/compiler/arm64/code-generator-arm64.cc index 27cb4e7ae0..bea4805f56 100644 --- a/src/compiler/arm64/code-generator-arm64.cc +++ b/src/compiler/arm64/code-generator-arm64.cc @@ -846,7 +846,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr, // Materialize a full 64-bit 1 or 0 value. The result register is always the // last output of the instruction. - DCHECK_NE(0, instr->OutputCount()); + DCHECK_NE(0u, instr->OutputCount()); Register reg = i.OutputRegister(instr->OutputCount() - 1); Condition cc = FlagsConditionToCondition(condition); __ Cset(reg, cc); diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc index 82c0bea1ce..4063f8a995 100644 --- a/src/compiler/arm64/instruction-selector-arm64.cc +++ b/src/compiler/arm64/instruction-selector-arm64.cc @@ -215,8 +215,8 @@ static void VisitBinop(InstructionSelector* selector, Node* node, outputs[output_count++] = g.DefineAsRegister(cont->result()); } - DCHECK_NE(0, input_count); - DCHECK_NE(0, output_count); + DCHECK_NE(0u, input_count); + DCHECK_NE(0u, output_count); DCHECK_GE(arraysize(inputs), input_count); DCHECK_GE(arraysize(outputs), output_count); @@ -507,7 +507,7 @@ void InstructionSelector::VisitWord32And(Node* node) { uint32_t mask_msb = base::bits::CountLeadingZeros32(mask); if ((mask_width != 0) && (mask_msb + mask_width == 32)) { // The mask must be contiguous, and occupy the least-significant bits. - DCHECK_EQ(0, base::bits::CountTrailingZeros32(mask)); + DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask)); // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least // significant bits. @@ -544,7 +544,7 @@ void InstructionSelector::VisitWord64And(Node* node) { uint64_t mask_msb = base::bits::CountLeadingZeros64(mask); if ((mask_width != 0) && (mask_msb + mask_width == 64)) { // The mask must be contiguous, and occupy the least-significant bits. - DCHECK_EQ(0, base::bits::CountTrailingZeros64(mask)); + DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask)); // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least // significant bits. @@ -628,7 +628,7 @@ void InstructionSelector::VisitWord32Shr(Node* node) { Arm64OperandGenerator g(this); Int32BinopMatcher m(node); if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) { - int32_t lsb = m.right().Value(); + uint32_t lsb = m.right().Value(); Int32BinopMatcher mleft(m.left().node()); if (mleft.right().HasValue()) { uint32_t mask = (mleft.right().Value() >> lsb) << lsb; @@ -653,7 +653,7 @@ void InstructionSelector::VisitWord64Shr(Node* node) { Arm64OperandGenerator g(this); Int64BinopMatcher m(node); if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) { - int64_t lsb = m.right().Value(); + uint64_t lsb = m.right().Value(); Int64BinopMatcher mleft(m.left().node()); if (mleft.right().HasValue()) { // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is diff --git a/src/compiler/code-generator.cc b/src/compiler/code-generator.cc index 72c6ec984a..3a56ece0c0 100644 --- a/src/compiler/code-generator.cc +++ b/src/compiler/code-generator.cc @@ -281,7 +281,7 @@ void CodeGenerator::PopulateDeoptimizationData(Handle code_object) { for (int i = 0; i < deopt_count; i++) { DeoptimizationState* deoptimization_state = deoptimization_states_[i]; data->SetAstId(i, deoptimization_state->bailout_id()); - CHECK_NE(NULL, deoptimization_states_[i]); + CHECK(deoptimization_states_[i]); data->SetTranslationIndex( i, Smi::FromInt(deoptimization_states_[i]->translation_id())); data->SetArgumentsStackHeight(i, Smi::FromInt(0)); diff --git a/src/compiler/control-reducer.cc b/src/compiler/control-reducer.cc index b7dec3622f..105bdfe201 100644 --- a/src/compiler/control-reducer.cc +++ b/src/compiler/control-reducer.cc @@ -296,7 +296,7 @@ class ControlReducerImpl { for (size_t j = 0; j < nodes.size(); j++) { Node* node = nodes[j]; for (Node* const input : node->inputs()) { - CHECK_NE(NULL, input); + CHECK(input); } for (Node* const use : node->uses()) { CHECK(marked.IsReachableFromEnd(use)); @@ -319,7 +319,7 @@ class ControlReducerImpl { // Recurse on an input if necessary. for (Node* const input : node->inputs()) { - CHECK_NE(NULL, input); + DCHECK(input); if (Recurse(input)) return; } diff --git a/src/compiler/ia32/code-generator-ia32.cc b/src/compiler/ia32/code-generator-ia32.cc index fd9b8049f5..525451a13b 100644 --- a/src/compiler/ia32/code-generator-ia32.cc +++ b/src/compiler/ia32/code-generator-ia32.cc @@ -768,7 +768,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr, // Materialize a full 32-bit 1 or 0 value. The result register is always the // last output of the instruction. Label check; - DCHECK_NE(0, instr->OutputCount()); + DCHECK_NE(0u, instr->OutputCount()); Register reg = i.OutputRegister(instr->OutputCount() - 1); Condition cc = no_condition; switch (condition) { diff --git a/src/compiler/ia32/instruction-selector-ia32.cc b/src/compiler/ia32/instruction-selector-ia32.cc index 233fa37670..b81210a705 100644 --- a/src/compiler/ia32/instruction-selector-ia32.cc +++ b/src/compiler/ia32/instruction-selector-ia32.cc @@ -370,8 +370,8 @@ static void VisitBinop(InstructionSelector* selector, Node* node, outputs[output_count++] = g.DefineAsRegister(cont->result()); } - DCHECK_NE(0, input_count); - DCHECK_NE(0, output_count); + DCHECK_NE(0u, input_count); + DCHECK_NE(0u, output_count); DCHECK_GE(arraysize(inputs), input_count); DCHECK_GE(arraysize(outputs), output_count); diff --git a/src/compiler/instruction-selector.cc b/src/compiler/instruction-selector.cc index da58698835..fe9c287bc7 100644 --- a/src/compiler/instruction-selector.cc +++ b/src/compiler/instruction-selector.cc @@ -40,7 +40,7 @@ void InstructionSelector::SelectInstructions() { BasicBlockVector* blocks = schedule()->rpo_order(); for (auto const block : *blocks) { if (!block->IsLoopHeader()) continue; - DCHECK_LE(2, block->PredecessorCount()); + DCHECK_LE(2u, block->PredecessorCount()); for (Node* const phi : *block) { if (phi->opcode() != IrOpcode::kPhi) continue; @@ -342,7 +342,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer, if (use->opcode() != IrOpcode::kProjection) continue; size_t const index = ProjectionIndexOf(use->op()); DCHECK_LT(index, buffer->output_nodes.size()); - DCHECK_EQ(nullptr, buffer->output_nodes[index]); + DCHECK(!buffer->output_nodes[index]); buffer->output_nodes[index] = use; } } @@ -435,7 +435,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer, if (static_cast(stack_index) >= buffer->pushed_nodes.size()) { buffer->pushed_nodes.resize(stack_index + 1, NULL); } - DCHECK_EQ(NULL, buffer->pushed_nodes[stack_index]); + DCHECK(!buffer->pushed_nodes[stack_index]); buffer->pushed_nodes[stack_index] = *iter; pushed_count++; } else { @@ -450,7 +450,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer, void InstructionSelector::VisitBlock(BasicBlock* block) { - DCHECK_EQ(NULL, current_block_); + DCHECK(!current_block_); current_block_ = block; int current_block_end = static_cast(instructions_.size()); diff --git a/src/compiler/instruction.cc b/src/compiler/instruction.cc index ce2c076615..34a3151009 100644 --- a/src/compiler/instruction.cc +++ b/src/compiler/instruction.cc @@ -447,7 +447,7 @@ InstructionBlocks* InstructionSequence::InstructionBlocksFor( size_t rpo_number = 0; for (BasicBlockVector::const_iterator it = schedule->rpo_order()->begin(); it != schedule->rpo_order()->end(); ++it, ++rpo_number) { - DCHECK_EQ(NULL, (*blocks)[rpo_number]); + DCHECK(!(*blocks)[rpo_number]); DCHECK((*it)->GetRpoNumber().ToSize() == rpo_number); (*blocks)[rpo_number] = InstructionBlockFor(zone, *it); } diff --git a/src/compiler/instruction.h b/src/compiler/instruction.h index 3dda702d69..21750e7257 100644 --- a/src/compiler/instruction.h +++ b/src/compiler/instruction.h @@ -520,7 +520,7 @@ class Instruction : public ZoneObject { void set_pointer_map(PointerMap* map) { DCHECK(NeedsPointerMap()); - DCHECK_EQ(NULL, pointer_map_); + DCHECK(!pointer_map_); pointer_map_ = map; } diff --git a/src/compiler/js-inlining.cc b/src/compiler/js-inlining.cc index d45877b901..42fc58ec38 100644 --- a/src/compiler/js-inlining.cc +++ b/src/compiler/js-inlining.cc @@ -84,7 +84,7 @@ class Inlinee { // Counts only formal parameters. size_t formal_parameters() { - DCHECK_GE(total_parameters(), 3); + DCHECK_GE(total_parameters(), 3u); return total_parameters() - 3; } @@ -176,7 +176,7 @@ class CopyVisitor : public NullNodeVisitor { if (copy == NULL) { copy = GetSentinel(original); } - DCHECK_NE(NULL, copy); + DCHECK(copy); return copy; } @@ -193,7 +193,7 @@ class CopyVisitor : public NullNodeVisitor { Node* sentinel = sentinels_[id]; if (sentinel == NULL) continue; Node* copy = copies_[id]; - DCHECK_NE(NULL, copy); + DCHECK(copy); sentinel->ReplaceUses(copy); } } diff --git a/src/compiler/loop-peeling.cc b/src/compiler/loop-peeling.cc index 1bdf71a7d1..39f487f854 100644 --- a/src/compiler/loop-peeling.cc +++ b/src/compiler/loop-peeling.cc @@ -268,7 +268,7 @@ PeeledIteration* LoopPeeler::Peel(Graph* graph, CommonOperatorBuilder* common, } } // There should be a merge or a return for each exit. - CHECK_NE(NULL, found); + CHECK(found); } // Return nodes, the end merge, and the phis associated with the end merge // must be duplicated as well. diff --git a/src/compiler/machine-operator-reducer.cc b/src/compiler/machine-operator-reducer.cc index 95c6eaa318..8f91d49f81 100644 --- a/src/compiler/machine-operator-reducer.cc +++ b/src/compiler/machine-operator-reducer.cc @@ -103,7 +103,7 @@ Node* MachineOperatorReducer::Int32Div(Node* dividend, int32_t divisor) { Node* MachineOperatorReducer::Uint32Div(Node* dividend, uint32_t divisor) { - DCHECK_LT(0, divisor); + DCHECK_LT(0u, divisor); // If the divisor is even, we can avoid using the expensive fixup by shifting // the dividend upfront. unsigned const shift = base::bits::CountTrailingZeros32(divisor); @@ -115,7 +115,7 @@ Node* MachineOperatorReducer::Uint32Div(Node* dividend, uint32_t divisor) { Node* quotient = graph()->NewNode(machine()->Uint32MulHigh(), dividend, Uint32Constant(mag.multiplier)); if (mag.add) { - DCHECK_LE(1, mag.shift); + DCHECK_LE(1u, mag.shift); quotient = Word32Shr( Int32Add(Word32Shr(Int32Sub(dividend, quotient), 1), quotient), mag.shift - 1); @@ -520,7 +520,7 @@ Reduction MachineOperatorReducer::ReduceInt32Div(Node* node) { Node* quotient = dividend; if (base::bits::IsPowerOfTwo32(Abs(divisor))) { uint32_t const shift = WhichPowerOf2Abs(divisor); - DCHECK_NE(0, shift); + DCHECK_NE(0u, shift); if (shift > 1) { quotient = Word32Sar(quotient, 31); } diff --git a/src/compiler/move-optimizer.cc b/src/compiler/move-optimizer.cc index 330f32f65d..855256e92c 100644 --- a/src/compiler/move-optimizer.cc +++ b/src/compiler/move-optimizer.cc @@ -83,11 +83,11 @@ static MoveOperands* PrepareInsertAfter(ParallelMove* left, MoveOperands* move, for (auto curr = move_ops->begin(); curr != move_ops->end(); ++curr) { if (curr->IsEliminated()) continue; if (curr->destination()->Equals(move->source())) { - DCHECK_EQ(nullptr, replacement); + DCHECK(!replacement); replacement = curr; if (to_eliminate != nullptr) break; } else if (curr->destination()->Equals(move->destination())) { - DCHECK_EQ(nullptr, to_eliminate); + DCHECK(!to_eliminate); to_eliminate = curr; if (replacement != nullptr) break; } diff --git a/src/compiler/node.cc b/src/compiler/node.cc index a4680e4636..d38e9ceff7 100644 --- a/src/compiler/node.cc +++ b/src/compiler/node.cc @@ -134,7 +134,7 @@ void Node::ReplaceUses(Node* replace_to) { use->from->GetInputRecordPtr(use->input_index)->to = replace_to; } if (!replace_to->last_use_) { - DCHECK_EQ(nullptr, replace_to->first_use_); + DCHECK(!replace_to->first_use_); replace_to->first_use_ = first_use_; replace_to->last_use_ = last_use_; } else if (first_use_) { diff --git a/src/compiler/osr.cc b/src/compiler/osr.cc index 6f30963067..a4b845249f 100644 --- a/src/compiler/osr.cc +++ b/src/compiler/osr.cc @@ -40,18 +40,18 @@ bool OsrHelper::Deconstruct(JSGraph* jsgraph, CommonOperatorBuilder* common, if (osr_loop_entry == nullptr) { // No OSR entry found, do nothing. - CHECK_NE(nullptr, osr_normal_entry); + CHECK(osr_normal_entry); return true; } for (Node* use : osr_loop_entry->uses()) { if (use->opcode() == IrOpcode::kLoop) { - CHECK_EQ(nullptr, osr_loop); // should be only one OSR loop. + CHECK(!osr_loop); // should be only one OSR loop. osr_loop = use; // found the OSR loop. } } - CHECK_NE(nullptr, osr_loop); // Should have found the OSR loop. + CHECK(osr_loop); // Should have found the OSR loop. // Analyze the graph to determine how deeply nested the OSR loop is. LoopTree* loop_tree = LoopFinder::BuildLoopTree(graph, tmp_zone); diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc index cf1007873b..e0667edd93 100644 --- a/src/compiler/pipeline.cc +++ b/src/compiler/pipeline.cc @@ -145,19 +145,19 @@ class PipelineData { LoopAssignmentAnalysis* loop_assignment() const { return loop_assignment_; } void set_loop_assignment(LoopAssignmentAnalysis* loop_assignment) { - DCHECK_EQ(nullptr, loop_assignment_); + DCHECK(!loop_assignment_); loop_assignment_ = loop_assignment; } Node* context_node() const { return context_node_; } void set_context_node(Node* context_node) { - DCHECK_EQ(nullptr, context_node_); + DCHECK(!context_node_); context_node_ = context_node; } Schedule* schedule() const { return schedule_; } void set_schedule(Schedule* schedule) { - DCHECK_EQ(nullptr, schedule_); + DCHECK(!schedule_); schedule_ = schedule; } @@ -194,7 +194,7 @@ class PipelineData { } void InitializeInstructionSequence() { - DCHECK_EQ(nullptr, sequence_); + DCHECK(!sequence_); InstructionBlocks* instruction_blocks = InstructionSequence::InstructionBlocksFor(instruction_zone(), schedule()); @@ -205,8 +205,8 @@ class PipelineData { void InitializeRegisterAllocator(Zone* local_zone, const RegisterConfiguration* config, const char* debug_name) { - DCHECK_EQ(nullptr, register_allocator_); - DCHECK_EQ(nullptr, frame_); + DCHECK(!register_allocator_); + DCHECK(!frame_); frame_ = new (instruction_zone()) Frame(); register_allocator_ = new (instruction_zone()) RegisterAllocator(config, local_zone, frame(), sequence(), debug_name); diff --git a/src/compiler/register-allocator-verifier.cc b/src/compiler/register-allocator-verifier.cc index d4471ff899..0f053331a6 100644 --- a/src/compiler/register-allocator-verifier.cc +++ b/src/compiler/register-allocator-verifier.cc @@ -20,7 +20,7 @@ static void VerifyGapEmpty(const GapInstruction* gap) { i <= GapInstruction::LAST_INNER_POSITION; i++) { GapInstruction::InnerPosition inner_pos = static_cast(i); - CHECK_EQ(NULL, gap->GetParallelMove(inner_pos)); + CHECK(!gap->GetParallelMove(inner_pos)); } } @@ -432,14 +432,14 @@ class OperandMap : public ZoneObject { for (; p != nullptr; p = p->first_pred_phi) { if (p->virtual_register == v->use_vreg) break; } - CHECK_NE(nullptr, p); + CHECK(p); } // Mark the use. it->second->use_vreg = use_vreg; return; } // Use of a phi value without definition. - CHECK(false); + UNREACHABLE(); } private: diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc index e0487fa8e3..4e5e53b2e6 100644 --- a/src/compiler/register-allocator.cc +++ b/src/compiler/register-allocator.cc @@ -183,7 +183,7 @@ void LiveRange::SetSpillOperand(InstructionOperand* operand) { void LiveRange::SetSpillRange(SpillRange* spill_range) { DCHECK(HasNoSpillType() || HasSpillRange()); - DCHECK_NE(spill_range, nullptr); + DCHECK(spill_range); spill_type_ = SpillType::kSpillRange; spill_range_ = spill_range; } diff --git a/src/compiler/scheduler.cc b/src/compiler/scheduler.cc index f512cd2433..6281371aba 100644 --- a/src/compiler/scheduler.cc +++ b/src/compiler/scheduler.cc @@ -266,7 +266,7 @@ class CFGBuilder : public ZoneObject { // single-exit region that makes up a minimal component to be scheduled. if (IsSingleEntrySingleExitRegion(node, exit)) { Trace("Found SESE at #%d:%s\n", node->id(), node->op()->mnemonic()); - DCHECK_EQ(NULL, component_entry_); + DCHECK(!component_entry_); component_entry_ = node; continue; } @@ -276,7 +276,7 @@ class CFGBuilder : public ZoneObject { Queue(node->InputAt(i)); } } - DCHECK_NE(NULL, component_entry_); + DCHECK(component_entry_); for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) { ConnectBlocks(*i); // Connect block to its predecessor/successors. @@ -370,16 +370,16 @@ class CFGBuilder : public ZoneObject { buffer[1] = NULL; for (Node* use : node->uses()) { if (use->opcode() == true_opcode) { - DCHECK_EQ(NULL, buffer[0]); + DCHECK(!buffer[0]); buffer[0] = use; } if (use->opcode() == false_opcode) { - DCHECK_EQ(NULL, buffer[1]); + DCHECK(!buffer[1]); buffer[1] = use; } } - DCHECK_NE(NULL, buffer[0]); - DCHECK_NE(NULL, buffer[1]); + DCHECK(buffer[0]); + DCHECK(buffer[1]); } void CollectSuccessorBlocks(Node* node, BasicBlock** buffer, @@ -448,7 +448,7 @@ class CFGBuilder : public ZoneObject { } void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) { - DCHECK_NE(NULL, block); + DCHECK(block); if (succ == NULL) { Trace("Connect #%d:%s, B%d -> end\n", node->id(), node->op()->mnemonic(), block->id().ToInt()); @@ -533,7 +533,7 @@ class SpecialRPONumberer : public ZoneObject { // that is for the graph spanned between the schedule's start and end blocks. void ComputeSpecialRPO() { DCHECK(schedule_->end()->SuccessorCount() == 0); - DCHECK_EQ(NULL, order_); // Main order does not exist yet. + DCHECK(!order_); // Main order does not exist yet. ComputeAndInsertSpecialRPO(schedule_->start(), schedule_->end()); } @@ -541,7 +541,7 @@ class SpecialRPONumberer : public ZoneObject { // that is for the graph spanned between the given {entry} and {end} blocks, // then updates the existing ordering with this new information. void UpdateSpecialRPO(BasicBlock* entry, BasicBlock* end) { - DCHECK_NE(NULL, order_); // Main order to be updated is present. + DCHECK(order_); // Main order to be updated is present. ComputeAndInsertSpecialRPO(entry, end); } diff --git a/src/compiler/verifier.cc b/src/compiler/verifier.cc index 3aad6ea2b9..e040cd296c 100644 --- a/src/compiler/verifier.cc +++ b/src/compiler/verifier.cc @@ -763,8 +763,8 @@ void Verifier::Visitor::Check(Node* node) { void Verifier::Run(Graph* graph, Typing typing) { - CHECK_NE(NULL, graph->start()); - CHECK_NE(NULL, graph->end()); + CHECK_NOT_NULL(graph->start()); + CHECK_NOT_NULL(graph->end()); Zone zone; Visitor visitor(&zone, typing); for (Node* node : AllNodes(&zone, graph).live) visitor.Check(node); @@ -868,10 +868,10 @@ void ScheduleVerifier::Run(Schedule* schedule) { BasicBlock* dom = block->dominator(); if (b == 0) { // All blocks except start should have a dominator. - CHECK_EQ(NULL, dom); + CHECK_NULL(dom); } else { // Check that the immediate dominator appears somewhere before the block. - CHECK_NE(NULL, dom); + CHECK_NOT_NULL(dom); CHECK_LT(dom->rpo_number(), block->rpo_number()); } } diff --git a/src/compiler/zone-pool.cc b/src/compiler/zone-pool.cc index 05907a969d..2006a79d2c 100644 --- a/src/compiler/zone-pool.cc +++ b/src/compiler/zone-pool.cc @@ -106,7 +106,7 @@ Zone* ZonePool::NewEmptyZone() { zone = new Zone(); } used_.push_back(zone); - DCHECK_EQ(0, zone->allocation_size()); + DCHECK_EQ(0u, zone->allocation_size()); return zone; } @@ -129,7 +129,7 @@ void ZonePool::ReturnZone(Zone* zone) { delete zone; } else { zone->DeleteAll(); - DCHECK_EQ(0, zone->allocation_size()); + DCHECK_EQ(0u, zone->allocation_size()); unused_.push_back(zone); } } diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc index 7ddd928861..4fc36d1b9f 100644 --- a/src/deoptimizer.cc +++ b/src/deoptimizer.cc @@ -110,7 +110,7 @@ size_t Deoptimizer::GetMaxDeoptTableSize() { Deoptimizer* Deoptimizer::Grab(Isolate* isolate) { Deoptimizer* result = isolate->deoptimizer_data()->current_; - CHECK_NE(result, NULL); + CHECK_NOT_NULL(result); result->DeleteFrameDescriptions(); isolate->deoptimizer_data()->current_ = NULL; return result; @@ -901,7 +901,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, bool is_bottommost = (0 == frame_index); bool is_topmost = (output_count_ - 1 == frame_index); CHECK(frame_index >= 0 && frame_index < output_count_); - CHECK_EQ(output_[frame_index], NULL); + CHECK_NULL(output_[frame_index]); output_[frame_index] = output_frame; // The top address for the bottommost output frame can be computed from @@ -1060,7 +1060,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, output_offset -= kPointerSize; DoTranslateCommand(iterator, frame_index, output_offset); } - CHECK_EQ(0, output_offset); + CHECK_EQ(0u, output_offset); // Compute this frame's PC, state, and continuation. Code* non_optimized_code = function->shared()->code(); @@ -1382,7 +1382,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator, top_address + output_offset, output_offset, value); } - CHECK_EQ(0, output_offset); + CHECK_EQ(0u, output_offset); intptr_t pc = reinterpret_cast( construct_stub->instruction_start() + @@ -1429,7 +1429,7 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator, // A frame for an accessor stub can not be the topmost or bottommost one. CHECK(frame_index > 0 && frame_index < output_count_ - 1); - CHECK_EQ(output_[frame_index], NULL); + CHECK_NULL(output_[frame_index]); output_[frame_index] = output_frame; // The top address of the frame is computed from the previous frame's top and @@ -1522,7 +1522,7 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator, DoTranslateCommand(iterator, frame_index, output_offset); } - CHECK_EQ(output_offset, 0); + CHECK_EQ(0u, output_offset); Smi* offset = is_setter_stub_frame ? isolate_->heap()->setter_stub_deopt_pc_offset() : @@ -1735,7 +1735,7 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator, } } - CHECK_EQ(output_frame_offset, 0); + CHECK_EQ(0u, output_frame_offset); if (!arg_count_known) { CHECK_GE(arguments_length_offset, 0); diff --git a/src/deoptimizer.h b/src/deoptimizer.h index b14c369741..47412f4752 100644 --- a/src/deoptimizer.h +++ b/src/deoptimizer.h @@ -95,11 +95,10 @@ class Deoptimizer : public Malloced { SOFT, // This last bailout type is not really a bailout, but used by the // debugger to deoptimize stack frames to allow inspection. - DEBUGGER + DEBUGGER, + kBailoutTypesWithCodeEntry = SOFT + 1 }; - static const int kBailoutTypesWithCodeEntry = SOFT + 1; - struct Reason { Reason(int r, const char* m, const char* d) : raw_position(r), mnemonic(m), detail(d) {} diff --git a/src/heap/heap.cc b/src/heap/heap.cc index 10701c4bcc..cec871403a 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -5458,7 +5458,7 @@ bool Heap::CreateHeapObjects() { // Create initial objects CreateInitialObjects(); - CHECK_EQ(0, gc_count_); + CHECK_EQ(0u, gc_count_); set_native_contexts_list(undefined_value()); set_array_buffers_list(undefined_value()); diff --git a/src/heap/spaces.h b/src/heap/spaces.h index e21876be51..efb784ca69 100644 --- a/src/heap/spaces.h +++ b/src/heap/spaces.h @@ -1276,13 +1276,13 @@ class AllocationInfo { INLINE(void set_top(Address top)) { SLOW_DCHECK(top == NULL || - (reinterpret_cast(top) & HeapObjectTagMask()) == 0); + (reinterpret_cast(top) & kHeapObjectTagMask) == 0); top_ = top; } INLINE(Address top()) const { SLOW_DCHECK(top_ == NULL || - (reinterpret_cast(top_) & HeapObjectTagMask()) == 0); + (reinterpret_cast(top_) & kHeapObjectTagMask) == 0); return top_; } @@ -1290,13 +1290,13 @@ class AllocationInfo { INLINE(void set_limit(Address limit)) { SLOW_DCHECK(limit == NULL || - (reinterpret_cast(limit) & HeapObjectTagMask()) == 0); + (reinterpret_cast(limit) & kHeapObjectTagMask) == 0); limit_ = limit; } INLINE(Address limit()) const { SLOW_DCHECK(limit_ == NULL || - (reinterpret_cast(limit_) & HeapObjectTagMask()) == + (reinterpret_cast(limit_) & kHeapObjectTagMask) == 0); return limit_; } diff --git a/src/hydrogen-check-elimination.cc b/src/hydrogen-check-elimination.cc index 1530fe1cf5..3542fa601a 100644 --- a/src/hydrogen-check-elimination.cc +++ b/src/hydrogen-check-elimination.cc @@ -373,7 +373,7 @@ class HCheckTable : public ZoneObject { instr->DeleteAndReplaceWith(entry->check_); INC_STAT(redundant_); } else if (entry->state_ == HCheckTableEntry::UNCHECKED_STABLE) { - DCHECK_EQ(NULL, entry->check_); + DCHECK_NULL(entry->check_); TRACE(("Marking redundant CheckMaps #%d at B%d as stability check\n", instr->id(), instr->block()->block_id())); instr->set_maps(entry->maps_->Copy(graph->zone())); @@ -684,14 +684,14 @@ class HCheckTable : public ZoneObject { bool compact = false; for (int i = 0; i < size_; i++) { HCheckTableEntry* entry = &entries_[i]; - DCHECK(entry->object_ != NULL); + DCHECK_NOT_NULL(entry->object_); if (phase_->aliasing_->MayAlias(entry->object_, object)) { entry->object_ = NULL; compact = true; } } if (compact) Compact(); - DCHECK(Find(object) == NULL); + DCHECK_NULL(Find(object)); } void Compact() { diff --git a/src/hydrogen.cc b/src/hydrogen.cc index cbed39efb1..9771d3f052 100644 --- a/src/hydrogen.cc +++ b/src/hydrogen.cc @@ -8721,7 +8721,7 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(Handle function, case kCallApiGetter: // Receiver and prototype chain cannot have changed. DCHECK_EQ(0, argc); - DCHECK_EQ(NULL, receiver); + DCHECK_NULL(receiver); // Receiver is on expression stack. receiver = Pop(); Add(receiver); @@ -8731,7 +8731,7 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(Handle function, is_store = true; // Receiver and prototype chain cannot have changed. DCHECK_EQ(1, argc); - DCHECK_EQ(NULL, receiver); + DCHECK_NULL(receiver); // Receiver and value are on expression stack. HValue* value = Pop(); receiver = Pop(); @@ -11812,7 +11812,7 @@ void HOptimizedGraphBuilder::GenerateValueOf(CallRuntime* call) { void HOptimizedGraphBuilder::GenerateDateField(CallRuntime* call) { DCHECK(call->arguments()->length() == 2); - DCHECK_NE(NULL, call->arguments()->at(1)->AsLiteral()); + DCHECK_NOT_NULL(call->arguments()->at(1)->AsLiteral()); Smi* index = Smi::cast(*(call->arguments()->at(1)->AsLiteral()->value())); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* date = Pop(); diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc index ca1afcda97..1c74450c6c 100644 --- a/src/ia32/code-stubs-ia32.cc +++ b/src/ia32/code-stubs-ia32.cc @@ -1706,7 +1706,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) { // If either is a Smi (we know that not both are), then they can only // be equal if the other is a HeapNumber. If so, use the slow case. STATIC_ASSERT(kSmiTag == 0); - DCHECK_EQ(0, Smi::FromInt(0)); + DCHECK_EQ(static_cast(0), Smi::FromInt(0)); __ mov(ecx, Immediate(kSmiTagMask)); __ and_(ecx, eax); __ test(ecx, edx); diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc index 81cd11f469..af5fe45e25 100644 --- a/src/ia32/full-codegen-ia32.cc +++ b/src/ia32/full-codegen-ia32.cc @@ -3708,7 +3708,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) { void FullCodeGenerator::EmitDateField(CallRuntime* expr) { ZoneList* args = expr->arguments(); DCHECK(args->length() == 2); - DCHECK_NE(NULL, args->at(1)->AsLiteral()); + DCHECK_NOT_NULL(args->at(1)->AsLiteral()); Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value())); VisitForAccumulatorValue(args->at(0)); // Load the object. @@ -4064,7 +4064,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { ZoneList* args = expr->arguments(); DCHECK_EQ(2, args->length()); - DCHECK_NE(NULL, args->at(0)->AsLiteral()); + DCHECK_NOT_NULL(args->at(0)->AsLiteral()); int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value(); Handle jsfunction_result_caches( diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc index 357928193c..854e5edfb1 100644 --- a/src/ia32/lithium-codegen-ia32.cc +++ b/src/ia32/lithium-codegen-ia32.cc @@ -4387,7 +4387,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { __ mov(FieldOperand(object_reg, HeapObject::kMapOffset), Immediate(to_map)); // Write barrier. - DCHECK_NE(instr->temp(), NULL); + DCHECK_NOT_NULL(instr->temp()); __ RecordWriteForMap(object_reg, to_map, new_map_reg, ToRegister(instr->temp()), kDontSaveFPRegs); diff --git a/src/ic/handler-compiler.cc b/src/ic/handler-compiler.cc index 16e42cebd6..3af36fac20 100644 --- a/src/ic/handler-compiler.cc +++ b/src/ic/handler-compiler.cc @@ -347,7 +347,7 @@ void NamedLoadHandlerCompiler::GenerateLoadPostInterceptor( case LookupIterator::ACCESSOR: Handle info = Handle::cast(it->GetAccessors()); - DCHECK_NE(NULL, info->getter()); + DCHECK_NOT_NULL(info->getter()); GenerateLoadCallback(reg, info); } } diff --git a/src/ic/ic-state.cc b/src/ic/ic-state.cc index 9c883ad5e3..37822e523a 100644 --- a/src/ic/ic-state.cc +++ b/src/ic/ic-state.cc @@ -36,6 +36,14 @@ std::ostream& operator<<(std::ostream& os, const CallICState& s) { } +// static +STATIC_CONST_MEMBER_DEFINITION const int BinaryOpICState::FIRST_TOKEN; + + +// static +STATIC_CONST_MEMBER_DEFINITION const int BinaryOpICState::LAST_TOKEN; + + BinaryOpICState::BinaryOpICState(Isolate* isolate, ExtraICState extra_ic_state) : isolate_(isolate) { op_ = diff --git a/src/ic/ic.cc b/src/ic/ic.cc index 8fd6e180b2..a4dce6a66a 100644 --- a/src/ic/ic.cc +++ b/src/ic/ic.cc @@ -2550,7 +2550,7 @@ MaybeHandle BinaryOpIC::Transition( target = stub.GetCode(); // Sanity check the generic stub. - DCHECK_EQ(NULL, target->FindFirstAllocationSite()); + DCHECK_NULL(target->FindFirstAllocationSite()); } set_target(*target); diff --git a/src/ic/x64/stub-cache-x64.cc b/src/ic/x64/stub-cache-x64.cc index f15635c6b9..4be0d5b330 100644 --- a/src/ic/x64/stub-cache-x64.cc +++ b/src/ic/x64/stub-cache-x64.cc @@ -30,7 +30,7 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm, : kPointerSizeLog2 == StubCache::kCacheIndexShift); ScaleFactor scale_factor = kPointerSize == kInt64Size ? times_2 : times_1; - DCHECK_EQ(3 * kPointerSize, sizeof(StubCache::Entry)); + DCHECK_EQ(3u * kPointerSize, sizeof(StubCache::Entry)); // The offset register holds the entry offset times four (due to masking // and shifting optimizations). ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); diff --git a/src/isolate.cc b/src/isolate.cc index ea5e3a95c9..3479ae2392 100644 --- a/src/isolate.cc +++ b/src/isolate.cc @@ -1560,7 +1560,7 @@ Isolate::ThreadDataTable::~ThreadDataTable() { // TODO(svenpanne) The assertion below would fire if an embedder does not // cleanly dispose all Isolates before disposing v8, so we are conservative // and leave it out for now. - // DCHECK_EQ(NULL, list_); + // DCHECK_NULL(list_); } diff --git a/src/jsregexp.cc b/src/jsregexp.cc index 1843597e74..63c7a504de 100644 --- a/src/jsregexp.cc +++ b/src/jsregexp.cc @@ -3446,14 +3446,14 @@ int ChoiceNode::GreedyLoopTextLengthForAlternative( void LoopChoiceNode::AddLoopAlternative(GuardedAlternative alt) { - DCHECK_EQ(loop_node_, NULL); + DCHECK_NULL(loop_node_); AddAlternative(alt); loop_node_ = alt.node(); } void LoopChoiceNode::AddContinueAlternative(GuardedAlternative alt) { - DCHECK_EQ(continue_node_, NULL); + DCHECK_NULL(continue_node_); AddAlternative(alt); continue_node_ = alt.node(); } @@ -3473,7 +3473,7 @@ void LoopChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) { macro_assembler->GoTo(trace->loop_label()); return; } - DCHECK(trace->stop_node() == NULL); + DCHECK_NULL(trace->stop_node()); if (!trace->is_trivial()) { trace->Flush(compiler, this); return; @@ -5294,8 +5294,8 @@ void CharacterRange::Split(ZoneList* base, ZoneList** included, ZoneList** excluded, Zone* zone) { - DCHECK_EQ(NULL, *included); - DCHECK_EQ(NULL, *excluded); + DCHECK_NULL(*included); + DCHECK_NULL(*excluded); DispatchTable table(zone); for (int i = 0; i < base->length(); i++) table.AddRange(base->at(i), CharacterRangeSplitter::kInBase, zone); diff --git a/src/jsregexp.h b/src/jsregexp.h index d74b3bcbbe..0b4f39dc49 100644 --- a/src/jsregexp.h +++ b/src/jsregexp.h @@ -239,7 +239,7 @@ class CharacterRange { public: CharacterRange() : from_(0), to_(0) { } // For compatibility with the CHECK_OK macro - CharacterRange(void* null) { DCHECK_EQ(NULL, null); } //NOLINT + CharacterRange(void* null) { DCHECK_NULL(null); } // NOLINT CharacterRange(uc16 from, uc16 to) : from_(from), to_(to) { } static void AddClassEscape(uc16 type, ZoneList* ranges, Zone* zone); diff --git a/src/log.cc b/src/log.cc index 31460b6ce5..3f1c970885 100644 --- a/src/log.cc +++ b/src/log.cc @@ -271,7 +271,7 @@ PerfBasicLogger::PerfBasicLogger() CHECK_NE(size, -1); perf_output_handle_ = base::OS::FOpen(perf_dump_name.start(), base::OS::LogFileOpenMode); - CHECK_NE(perf_output_handle_, NULL); + CHECK_NOT_NULL(perf_output_handle_); setvbuf(perf_output_handle_, NULL, _IOFBF, kLogBufferSize); } diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc index b9c34ca7e0..dc85538c3e 100644 --- a/src/mips/full-codegen-mips.cc +++ b/src/mips/full-codegen-mips.cc @@ -3795,7 +3795,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) { void FullCodeGenerator::EmitDateField(CallRuntime* expr) { ZoneList* args = expr->arguments(); DCHECK(args->length() == 2); - DCHECK_NE(NULL, args->at(1)->AsLiteral()); + DCHECK_NOT_NULL(args->at(1)->AsLiteral()); Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value())); VisitForAccumulatorValue(args->at(0)); // Load the object. @@ -4161,7 +4161,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { ZoneList* args = expr->arguments(); DCHECK_EQ(2, args->length()); - DCHECK_NE(NULL, args->at(0)->AsLiteral()); + DCHECK_NOT_NULL(args->at(0)->AsLiteral()); int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value(); Handle jsfunction_result_caches( diff --git a/src/mips64/full-codegen-mips64.cc b/src/mips64/full-codegen-mips64.cc index f86de38d4b..00791c0c9d 100644 --- a/src/mips64/full-codegen-mips64.cc +++ b/src/mips64/full-codegen-mips64.cc @@ -3794,7 +3794,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) { void FullCodeGenerator::EmitDateField(CallRuntime* expr) { ZoneList* args = expr->arguments(); DCHECK(args->length() == 2); - DCHECK_NE(NULL, args->at(1)->AsLiteral()); + DCHECK_NOT_NULL(args->at(1)->AsLiteral()); Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value())); VisitForAccumulatorValue(args->at(0)); // Load the object. @@ -4161,7 +4161,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { ZoneList* args = expr->arguments(); DCHECK_EQ(2, args->length()); - DCHECK_NE(NULL, args->at(0)->AsLiteral()); + DCHECK_NOT_NULL(args->at(0)->AsLiteral()); int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value(); Handle jsfunction_result_caches( diff --git a/src/objects-debug.cc b/src/objects-debug.cc index e6c231ecc6..a8cbe9cf15 100644 --- a/src/objects-debug.cc +++ b/src/objects-debug.cc @@ -207,7 +207,7 @@ void HeapObject::VerifyHeapPointer(Object* p) { void Symbol::SymbolVerify() { CHECK(IsSymbol()); CHECK(HasHashCode()); - CHECK_GT(Hash(), 0); + CHECK_GT(Hash(), 0u); CHECK(name()->IsUndefined() || name()->IsString()); CHECK(flags()->IsSmi()); } diff --git a/src/objects-inl.h b/src/objects-inl.h index a0e2a38bc0..1dde60df54 100644 --- a/src/objects-inl.h +++ b/src/objects-inl.h @@ -2951,7 +2951,7 @@ int LinearSearch(T* array, Name* name, int len, int valid_entries, return T::kNotFound; } else { DCHECK(len >= valid_entries); - DCHECK_EQ(NULL, out_insertion_index); // Not supported here. + DCHECK_NULL(out_insertion_index); // Not supported here. for (int number = 0; number < valid_entries; number++) { Name* entry = array->GetKey(number); uint32_t current_hash = entry->Hash(); @@ -3392,6 +3392,12 @@ CAST_ACCESSOR(WeakFixedArray) CAST_ACCESSOR(WeakHashTable) +// static +template +STATIC_CONST_MEMBER_DEFINITION const InstanceType + FixedTypedArray::kInstanceType; + + template FixedTypedArray* FixedTypedArray::cast(Object* object) { SLOW_DCHECK(object->IsHeapObject() && diff --git a/src/objects.cc b/src/objects.cc index 94a122800b..ac1a1b2cf1 100644 --- a/src/objects.cc +++ b/src/objects.cc @@ -9589,7 +9589,7 @@ FixedArray* SharedFunctionInfo::GetLiteralsFromOptimizedCodeMap(int index) { FixedArray* code_map = FixedArray::cast(optimized_code_map()); if (!bound()) { FixedArray* cached_literals = FixedArray::cast(code_map->get(index + 1)); - DCHECK_NE(NULL, cached_literals); + DCHECK_NOT_NULL(cached_literals); return cached_literals; } return NULL; @@ -9600,7 +9600,7 @@ Code* SharedFunctionInfo::GetCodeFromOptimizedCodeMap(int index) { DCHECK(index > kEntriesStart); FixedArray* code_map = FixedArray::cast(optimized_code_map()); Code* code = Code::cast(code_map->get(index)); - DCHECK_NE(NULL, code); + DCHECK_NOT_NULL(code); return code; } diff --git a/src/optimizing-compiler-thread.cc b/src/optimizing-compiler-thread.cc index ed1e56acd5..5999df9d6d 100644 --- a/src/optimizing-compiler-thread.cc +++ b/src/optimizing-compiler-thread.cc @@ -102,7 +102,7 @@ OptimizingCompilerThread::~OptimizingCompilerThread() { if (FLAG_concurrent_osr) { #ifdef DEBUG for (int i = 0; i < osr_buffer_capacity_; i++) { - CHECK_EQ(NULL, osr_buffer_[i]); + CHECK_NULL(osr_buffer_[i]); } #endif DeleteArray(osr_buffer_); @@ -178,7 +178,7 @@ OptimizedCompileJob* OptimizingCompilerThread::NextInput(StopFlag* flag) { return NULL; } OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)]; - DCHECK_NE(NULL, job); + DCHECK_NOT_NULL(job); input_queue_shift_ = InputQueueIndex(1); input_queue_length_--; if (flag) { @@ -189,7 +189,7 @@ OptimizedCompileJob* OptimizingCompilerThread::NextInput(StopFlag* flag) { void OptimizingCompilerThread::CompileNext(OptimizedCompileJob* job) { - DCHECK_NE(NULL, job); + DCHECK_NOT_NULL(job); // The function may have already been optimized by OSR. Simply continue. OptimizedCompileJob::Status status = job->OptimizeGraph(); diff --git a/src/perf-jit.cc b/src/perf-jit.cc index 3f30e38467..819fe4eef4 100644 --- a/src/perf-jit.cc +++ b/src/perf-jit.cc @@ -57,7 +57,7 @@ PerfJitLogger::PerfJitLogger() : perf_output_handle_(NULL), code_index_(0) { CHECK_NE(size, -1); perf_output_handle_ = base::OS::FOpen(perf_dump_name.start(), base::OS::LogFileOpenMode); - CHECK_NE(perf_output_handle_, NULL); + CHECK_NOT_NULL(perf_output_handle_); setvbuf(perf_output_handle_, NULL, _IOFBF, kLogBufferSize); LogWriteHeader(); diff --git a/src/ppc/full-codegen-ppc.cc b/src/ppc/full-codegen-ppc.cc index a762d00da0..e926d6ee27 100644 --- a/src/ppc/full-codegen-ppc.cc +++ b/src/ppc/full-codegen-ppc.cc @@ -3753,7 +3753,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) { void FullCodeGenerator::EmitDateField(CallRuntime* expr) { ZoneList* args = expr->arguments(); DCHECK(args->length() == 2); - DCHECK_NE(NULL, args->at(1)->AsLiteral()); + DCHECK_NOT_NULL(args->at(1)->AsLiteral()); Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value())); VisitForAccumulatorValue(args->at(0)); // Load the object. @@ -4089,7 +4089,7 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) { void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { ZoneList* args = expr->arguments(); DCHECK_EQ(2, args->length()); - DCHECK_NE(NULL, args->at(0)->AsLiteral()); + DCHECK_NOT_NULL(args->at(0)->AsLiteral()); int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value(); Handle jsfunction_result_caches( diff --git a/src/runtime/runtime-array.cc b/src/runtime/runtime-array.cc index 0ba1fa6cce..d1c1f30208 100644 --- a/src/runtime/runtime-array.cc +++ b/src/runtime/runtime-array.cc @@ -851,7 +851,7 @@ RUNTIME_FUNCTION(Runtime_ArrayConcat) { case FAST_HOLEY_ELEMENTS: case FAST_ELEMENTS: case DICTIONARY_ELEMENTS: - DCHECK_EQ(0, length); + DCHECK_EQ(0u, length); break; default: UNREACHABLE(); diff --git a/src/serialize.cc b/src/serialize.cc index fcc0b190a4..0b9e9387c3 100644 --- a/src/serialize.cc +++ b/src/serialize.cc @@ -95,12 +95,12 @@ void ExternalReferenceTable::Add(Address address, TypeCode type, uint16_t id, const char* name) { - DCHECK_NE(NULL, address); + DCHECK_NOT_NULL(address); ExternalReferenceEntry entry; entry.address = address; entry.code = EncodeExternal(type, id); entry.name = name; - DCHECK_NE(0, entry.code); + DCHECK_NE(0u, entry.code); // Assert that the code is added in ascending order to rule out duplicates. DCHECK((size() == 0) || (code(size() - 1) < entry.code)); refs_.Add(entry); @@ -647,10 +647,10 @@ bool Deserializer::ReserveSpace() { void Deserializer::Initialize(Isolate* isolate) { - DCHECK_EQ(NULL, isolate_); - DCHECK_NE(NULL, isolate); + DCHECK_NULL(isolate_); + DCHECK_NOT_NULL(isolate); isolate_ = isolate; - DCHECK_EQ(NULL, external_reference_decoder_); + DCHECK_NULL(external_reference_decoder_); external_reference_decoder_ = new ExternalReferenceDecoder(isolate); } @@ -659,7 +659,7 @@ void Deserializer::Deserialize(Isolate* isolate) { Initialize(isolate); if (!ReserveSpace()) FatalProcessOutOfMemory("deserializing context"); // No active threads. - DCHECK_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse()); + DCHECK_NULL(isolate_->thread_manager()->FirstThreadStateInUse()); // No active handles. DCHECK(isolate_->handle_scope_implementer()->blocks()->is_empty()); isolate_->heap()->IterateSmiRoots(this); @@ -942,7 +942,7 @@ Address Deserializer::Allocate(int space_index, int size) { } else { DCHECK(space_index < kNumberOfPreallocatedSpaces); Address address = high_water_[space_index]; - DCHECK_NE(NULL, address); + DCHECK_NOT_NULL(address); high_water_[space_index] += size; #ifdef DEBUG // Assert that the current reserved chunk is still big enough. @@ -1383,7 +1383,7 @@ Serializer::~Serializer() { void StartupSerializer::SerializeStrongReferences() { Isolate* isolate = this->isolate(); // No active threads. - CHECK_EQ(NULL, isolate->thread_manager()->FirstThreadStateInUse()); + CHECK_NULL(isolate->thread_manager()->FirstThreadStateInUse()); // No active or weak handles. CHECK(isolate->handle_scope_implementer()->blocks()->is_empty()); CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles()); diff --git a/src/serialize.h b/src/serialize.h index cc267aa5fa..9aaf38182d 100644 --- a/src/serialize.h +++ b/src/serialize.h @@ -275,7 +275,7 @@ class BackReferenceMap : public AddressMapBase { void Add(HeapObject* obj, BackReference b) { DCHECK(b.is_valid()); - DCHECK_EQ(NULL, LookupEntry(map_, obj, false)); + DCHECK_NULL(LookupEntry(map_, obj, false)); HashMap::Entry* entry = LookupEntry(map_, obj, true); SetValue(entry, b.bitfield()); } @@ -307,7 +307,7 @@ class HotObjectsList { } HeapObject* Get(int index) { - DCHECK_NE(NULL, circular_queue_[index]); + DCHECK_NOT_NULL(circular_queue_[index]); return circular_queue_[index]; } diff --git a/src/unique.h b/src/unique.h index 321eb3683d..b56ee84a33 100644 --- a/src/unique.h +++ b/src/unique.h @@ -49,7 +49,7 @@ class Unique { // TODO(titzer): other immortable immovable objects are also fine. DCHECK(!AllowHeapAllocation::IsAllowed() || handle->IsMap()); raw_address_ = reinterpret_cast
(*handle); - DCHECK_NE(raw_address_, NULL); // Non-null should imply non-zero address. + DCHECK_NOT_NULL(raw_address_); // Non-null should imply non-zero address. } handle_ = handle; } diff --git a/src/v8.h b/src/v8.h index 4922a4db1e..17398edc0f 100644 --- a/src/v8.h +++ b/src/v8.h @@ -73,7 +73,7 @@ class V8 : public AllStatic { } static void SetArrayBufferAllocator(v8::ArrayBuffer::Allocator *allocator) { - CHECK_EQ(NULL, array_buffer_allocator_); + CHECK_NULL(array_buffer_allocator_); array_buffer_allocator_ = allocator; } diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc index 75adc8988f..ded6c331ec 100644 --- a/src/x64/disasm-x64.cc +++ b/src/x64/disasm-x64.cc @@ -803,7 +803,7 @@ int DisassemblerX64::ShiftInstruction(byte* data) { UnimplementedInstruction(); return count + 1; } - DCHECK_NE(NULL, mnem); + DCHECK_NOT_NULL(mnem); AppendToBuffer("%s%c ", mnem, operand_size_code()); } count += PrintRightOperand(data + count); diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc index acdb08fd0a..db49e75d49 100644 --- a/src/x64/full-codegen-x64.cc +++ b/src/x64/full-codegen-x64.cc @@ -3706,7 +3706,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) { void FullCodeGenerator::EmitDateField(CallRuntime* expr) { ZoneList* args = expr->arguments(); DCHECK(args->length() == 2); - DCHECK_NE(NULL, args->at(1)->AsLiteral()); + DCHECK_NOT_NULL(args->at(1)->AsLiteral()); Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value())); VisitForAccumulatorValue(args->at(0)); // Load the object. @@ -4058,7 +4058,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { ZoneList* args = expr->arguments(); DCHECK_EQ(2, args->length()); - DCHECK_NE(NULL, args->at(0)->AsLiteral()); + DCHECK_NOT_NULL(args->at(0)->AsLiteral()); int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value(); Handle jsfunction_result_caches( diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc index 3a477fc793..d81a00fc92 100644 --- a/src/x64/macro-assembler-x64.cc +++ b/src/x64/macro-assembler-x64.cc @@ -2198,7 +2198,7 @@ void MacroAssembler::SelectNonSmi(Register dst, Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi); #endif STATIC_ASSERT(kSmiTag == 0); - DCHECK_EQ(0, Smi::FromInt(0)); + DCHECK_EQ(static_cast(0), Smi::FromInt(0)); movl(kScratchRegister, Immediate(kSmiTagMask)); andp(kScratchRegister, src1); testl(kScratchRegister, src2); diff --git a/src/x87/full-codegen-x87.cc b/src/x87/full-codegen-x87.cc index b6b501b5d1..96809744fc 100644 --- a/src/x87/full-codegen-x87.cc +++ b/src/x87/full-codegen-x87.cc @@ -3648,7 +3648,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) { void FullCodeGenerator::EmitDateField(CallRuntime* expr) { ZoneList* args = expr->arguments(); DCHECK(args->length() == 2); - DCHECK_NE(NULL, args->at(1)->AsLiteral()); + DCHECK_NOT_NUL(args->at(1)->AsLiteral()); Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value())); VisitForAccumulatorValue(args->at(0)); // Load the object. @@ -4003,7 +4003,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { ZoneList* args = expr->arguments(); DCHECK_EQ(2, args->length()); - DCHECK_NE(NULL, args->at(0)->AsLiteral()); + DCHECK_NOT_NULL(args->at(0)->AsLiteral()); int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value(); Handle jsfunction_result_caches( diff --git a/test/cctest/cctest.gyp b/test/cctest/cctest.gyp index de5fedb0fc..fc98256142 100644 --- a/test/cctest/cctest.gyp +++ b/test/cctest/cctest.gyp @@ -103,7 +103,6 @@ 'test-bignum.cc', 'test-bignum-dtoa.cc', 'test-bit-vector.cc', - 'test-checks.cc', 'test-circular-queue.cc', 'test-compiler.cc', 'test-constantpool.cc', diff --git a/test/cctest/cctest.h b/test/cctest/cctest.h index 81a0dd9a4e..2f33d13394 100644 --- a/test/cctest/cctest.h +++ b/test/cctest/cctest.h @@ -440,7 +440,7 @@ static inline void ExpectString(const char* code, const char* expected) { v8::Local result = CompileRun(code); CHECK(result->IsString()); v8::String::Utf8Value utf8(result); - CHECK_EQ(expected, *utf8); + CHECK_EQ(0, strcmp(expected, *utf8)); } @@ -557,7 +557,7 @@ class HeapObjectsTracker { public: HeapObjectsTracker() { heap_profiler_ = i::Isolate::Current()->heap_profiler(); - CHECK_NE(NULL, heap_profiler_); + CHECK_NOT_NULL(heap_profiler_); heap_profiler_->StartHeapObjectsTracking(true); } diff --git a/test/cctest/compiler/codegen-tester.cc b/test/cctest/compiler/codegen-tester.cc index b20da872eb..d05b282293 100644 --- a/test/cctest/compiler/codegen-tester.cc +++ b/test/cctest/compiler/codegen-tester.cc @@ -373,9 +373,9 @@ void Int32BinopInputShapeTester::RunRight( TEST(ParametersEqual) { RawMachineAssemblerTester m(kMachInt32, kMachInt32); Node* p1 = m.Parameter(1); - CHECK_NE(NULL, p1); + CHECK(p1); Node* p0 = m.Parameter(0); - CHECK_NE(NULL, p0); + CHECK(p0); CHECK_EQ(p0, m.Parameter(0)); CHECK_EQ(p1, m.Parameter(1)); } @@ -561,7 +561,7 @@ TEST(RunBinopTester) { Float64BinopTester bt(&m); bt.AddReturn(bt.param0); - FOR_FLOAT64_INPUTS(i) { CHECK_EQ(*i, bt.call(*i, 9.0)); } + FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(*i, bt.call(*i, 9.0)); } } { @@ -569,7 +569,7 @@ TEST(RunBinopTester) { Float64BinopTester bt(&m); bt.AddReturn(bt.param1); - FOR_FLOAT64_INPUTS(i) { CHECK_EQ(*i, bt.call(-11.25, *i)); } + FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(*i, bt.call(-11.25, *i)); } } } diff --git a/test/cctest/compiler/codegen-tester.h b/test/cctest/compiler/codegen-tester.h index 18903fd85e..d45d1fdc33 100644 --- a/test/cctest/compiler/codegen-tester.h +++ b/test/cctest/compiler/codegen-tester.h @@ -332,6 +332,16 @@ class Int32BinopInputShapeTester { void RunLeft(RawMachineAssemblerTester* m); void RunRight(RawMachineAssemblerTester* m); }; + +// TODO(bmeurer): Drop this crap once we switch to GTest/Gmock. +static inline void CheckDoubleEq(volatile double x, volatile double y) { + if (std::isnan(x)) { + CHECK(std::isnan(y)); + } else { + CHECK_EQ(x, y); + } +} + } // namespace compiler } // namespace internal } // namespace v8 diff --git a/test/cctest/compiler/function-tester.h b/test/cctest/compiler/function-tester.h index 7e16eead38..7cb118ac5b 100644 --- a/test/cctest/compiler/function-tester.h +++ b/test/cctest/compiler/function-tester.h @@ -36,7 +36,7 @@ class FunctionTester : public InitializedHandleScope { const uint32_t supported_flags = CompilationInfo::kContextSpecializing | CompilationInfo::kInliningEnabled | CompilationInfo::kTypingEnabled; - CHECK_EQ(0, flags_ & ~supported_flags); + CHECK_EQ(0u, flags_ & ~supported_flags); } explicit FunctionTester(Graph* graph) diff --git a/test/cctest/compiler/graph-builder-tester.cc b/test/cctest/compiler/graph-builder-tester.cc index de39410d32..38bc633711 100644 --- a/test/cctest/compiler/graph-builder-tester.cc +++ b/test/cctest/compiler/graph-builder-tester.cc @@ -21,7 +21,7 @@ MachineCallHelper::MachineCallHelper(Isolate* isolate, void MachineCallHelper::InitParameters(GraphBuilder* builder, CommonOperatorBuilder* common) { - DCHECK_EQ(NULL, parameters_); + DCHECK(!parameters_); graph_ = builder->graph(); int param_count = static_cast(parameter_count()); if (param_count == 0) return; @@ -46,7 +46,7 @@ byte* MachineCallHelper::Generate() { Node* MachineCallHelper::Parameter(size_t index) { - DCHECK_NE(NULL, parameters_); + DCHECK(parameters_); DCHECK(index < parameter_count()); return parameters_[index]; } diff --git a/test/cctest/compiler/test-basic-block-profiler.cc b/test/cctest/compiler/test-basic-block-profiler.cc index 703fc176ad..fa4da9a736 100644 --- a/test/cctest/compiler/test-basic-block-profiler.cc +++ b/test/cctest/compiler/test-basic-block-profiler.cc @@ -24,7 +24,7 @@ class BasicBlockProfilerTest : public RawMachineAssemblerTester { void ResetCounts() { isolate()->basic_block_profiler()->ResetCounts(); } void Expect(size_t size, uint32_t* expected) { - CHECK_NE(NULL, isolate()->basic_block_profiler()); + CHECK(isolate()->basic_block_profiler()); const BasicBlockProfiler::DataList* l = isolate()->basic_block_profiler()->data_list(); CHECK_NE(0, static_cast(l->size())); diff --git a/test/cctest/compiler/test-changes-lowering.cc b/test/cctest/compiler/test-changes-lowering.cc index 2c76461427..c416420079 100644 --- a/test/cctest/compiler/test-changes-lowering.cc +++ b/test/cctest/compiler/test-changes-lowering.cc @@ -242,13 +242,13 @@ TEST(RunChangeTaggedToFloat64) { { Handle number = t.factory()->NewNumber(input); t.Call(*number); - CHECK_EQ(input, result); + CheckDoubleEq(input, result); } { Handle number = t.factory()->NewHeapNumber(input); t.Call(*number); - CHECK_EQ(input, result); + CheckDoubleEq(input, result); } } } diff --git a/test/cctest/compiler/test-control-reducer.cc b/test/cctest/compiler/test-control-reducer.cc index 5f56a8fabc..84b6b4bea5 100644 --- a/test/cctest/compiler/test-control-reducer.cc +++ b/test/cctest/compiler/test-control-reducer.cc @@ -221,7 +221,7 @@ TEST(Trim1_dead) { CHECK(IsUsedBy(T.start, T.p0)); T.Trim(); CHECK(!IsUsedBy(T.start, T.p0)); - CHECK_EQ(NULL, T.p0->InputAt(0)); + CHECK(!T.p0->InputAt(0)); } @@ -252,9 +252,9 @@ TEST(Trim2_dead) { CHECK(!IsUsedBy(T.one, phi)); CHECK(!IsUsedBy(T.half, phi)); CHECK(!IsUsedBy(T.start, phi)); - CHECK_EQ(NULL, phi->InputAt(0)); - CHECK_EQ(NULL, phi->InputAt(1)); - CHECK_EQ(NULL, phi->InputAt(2)); + CHECK(!phi->InputAt(0)); + CHECK(!phi->InputAt(1)); + CHECK(!phi->InputAt(2)); } @@ -274,7 +274,7 @@ TEST(Trim_chain1) { T.Trim(); for (int i = 0; i < kDepth; i++) { CHECK(!IsUsedBy(live[i], dead[i])); - CHECK_EQ(NULL, dead[i]->InputAt(0)); + CHECK(!dead[i]->InputAt(0)); CHECK_EQ(i == 0 ? T.start : live[i - 1], live[i]->InputAt(0)); } } @@ -354,9 +354,9 @@ TEST(Trim_cycle2) { CHECK(!IsUsedBy(loop, phi)); CHECK(!IsUsedBy(T.one, phi)); CHECK(!IsUsedBy(T.half, phi)); - CHECK_EQ(NULL, phi->InputAt(0)); - CHECK_EQ(NULL, phi->InputAt(1)); - CHECK_EQ(NULL, phi->InputAt(2)); + CHECK(!phi->InputAt(0)); + CHECK(!phi->InputAt(1)); + CHECK(!phi->InputAt(2)); } @@ -365,8 +365,8 @@ void CheckTrimConstant(ControlReducerTester* T, Node* k) { CHECK(IsUsedBy(k, phi)); T->Trim(); CHECK(!IsUsedBy(k, phi)); - CHECK_EQ(NULL, phi->InputAt(0)); - CHECK_EQ(NULL, phi->InputAt(1)); + CHECK(!phi->InputAt(0)); + CHECK(!phi->InputAt(1)); } @@ -954,7 +954,7 @@ TEST(CMergeReduce_dead_chain1) { R.graph.SetEnd(end); R.ReduceGraph(); CHECK(merge->IsDead()); - CHECK_EQ(NULL, end->InputAt(0)); // end dies. + CHECK(!end->InputAt(0)); // end dies. } } diff --git a/test/cctest/compiler/test-instruction.cc b/test/cctest/compiler/test-instruction.cc index 47e661028c..a884d28c18 100644 --- a/test/cctest/compiler/test-instruction.cc +++ b/test/cctest/compiler/test-instruction.cc @@ -135,7 +135,7 @@ TEST(InstructionBasic) { for (auto block : *blocks) { CHECK_EQ(block->rpo_number(), R.BlockAt(block)->rpo_number().ToInt()); CHECK_EQ(block->id().ToInt(), R.BlockAt(block)->id().ToInt()); - CHECK_EQ(NULL, block->loop_end()); + CHECK(!block->loop_end()); } } @@ -278,7 +278,7 @@ TEST(InstructionAddGapMove) { R.code->AddGapMove(index, op1, op2); GapInstruction* gap = R.code->GapAt(index); ParallelMove* move = gap->GetParallelMove(GapInstruction::START); - CHECK_NE(NULL, move); + CHECK(move); const ZoneList* move_operands = move->move_operands(); CHECK_EQ(1, move_operands->length()); MoveOperands* cur = &move_operands->at(0); diff --git a/test/cctest/compiler/test-js-constant-cache.cc b/test/cctest/compiler/test-js-constant-cache.cc index 60cbb8d9db..630f911c5e 100644 --- a/test/cctest/compiler/test-js-constant-cache.cc +++ b/test/cctest/compiler/test-js-constant-cache.cc @@ -103,10 +103,10 @@ TEST(MinusZeroConstant) { double zero_value = OpParameter(zero); double minus_zero_value = OpParameter(minus_zero); - CHECK_EQ(0.0, zero_value); - CHECK_NE(-0.0, zero_value); - CHECK_EQ(-0.0, minus_zero_value); - CHECK_NE(0.0, minus_zero_value); + CHECK(bit_cast(0.0) == bit_cast(zero_value)); + CHECK(bit_cast(-0.0) != bit_cast(zero_value)); + CHECK(bit_cast(0.0) != bit_cast(minus_zero_value)); + CHECK(bit_cast(-0.0) == bit_cast(minus_zero_value)); } diff --git a/test/cctest/compiler/test-js-typed-lowering.cc b/test/cctest/compiler/test-js-typed-lowering.cc index e1b8a150a9..cf252c4c6c 100644 --- a/test/cctest/compiler/test-js-typed-lowering.cc +++ b/test/cctest/compiler/test-js-typed-lowering.cc @@ -797,7 +797,7 @@ TEST(RemoveToNumberEffects) { } } - CHECK_EQ(NULL, effect_use); // should have done all cases above. + CHECK(!effect_use); // should have done all cases above. } diff --git a/test/cctest/compiler/test-linkage.cc b/test/cctest/compiler/test-linkage.cc index b21965df55..cb74deb2b1 100644 --- a/test/cctest/compiler/test-linkage.cc +++ b/test/cctest/compiler/test-linkage.cc @@ -62,7 +62,7 @@ TEST(TestLinkageJSFunctionIncoming) { Linkage linkage(info.zone(), &info); CallDescriptor* descriptor = linkage.GetIncomingDescriptor(); - CHECK_NE(NULL, descriptor); + CHECK(descriptor); CHECK_EQ(1 + i, static_cast(descriptor->JSParameterCount())); CHECK_EQ(1, static_cast(descriptor->ReturnCount())); @@ -78,7 +78,7 @@ TEST(TestLinkageCodeStubIncoming) { Linkage linkage(info.zone(), &info); // TODO(titzer): test linkage creation with a bonafide code stub. // this just checks current behavior. - CHECK_EQ(NULL, linkage.GetIncomingDescriptor()); + CHECK(!linkage.GetIncomingDescriptor()); } @@ -91,7 +91,7 @@ TEST(TestLinkageJSCall) { for (int i = 0; i < 32; i++) { CallDescriptor* descriptor = linkage.GetJSCallDescriptor(i, CallDescriptor::kNoFlags); - CHECK_NE(NULL, descriptor); + CHECK(descriptor); CHECK_EQ(i, static_cast(descriptor->JSParameterCount())); CHECK_EQ(1, static_cast(descriptor->ReturnCount())); CHECK_EQ(Operator::kNoProperties, descriptor->properties()); diff --git a/test/cctest/compiler/test-loop-analysis.cc b/test/cctest/compiler/test-loop-analysis.cc index 71708aad81..87746ecf21 100644 --- a/test/cctest/compiler/test-loop-analysis.cc +++ b/test/cctest/compiler/test-loop-analysis.cc @@ -136,7 +136,7 @@ class LoopFinderTester : HandleAndZoneScope { void CheckLoop(Node** header, int header_count, Node** body, int body_count) { LoopTree* tree = GetLoopTree(); LoopTree::Loop* loop = tree->ContainingLoop(header[0]); - CHECK_NE(NULL, loop); + CHECK(loop); CHECK(header_count == static_cast(loop->HeaderSize())); for (int i = 0; i < header_count; i++) { @@ -164,7 +164,7 @@ class LoopFinderTester : HandleAndZoneScope { Node* header = chain[i]; // Each header should be in a loop. LoopTree::Loop* loop = tree->ContainingLoop(header); - CHECK_NE(NULL, loop); + CHECK(loop); // Check parentage. LoopTree::Loop* parent = i == 0 ? NULL : tree->ContainingLoop(chain[i - 1]); diff --git a/test/cctest/compiler/test-loop-assignment-analysis.cc b/test/cctest/compiler/test-loop-assignment-analysis.cc index aabd95bc23..6f379641f2 100644 --- a/test/cctest/compiler/test-loop-assignment-analysis.cc +++ b/test/cctest/compiler/test-loop-assignment-analysis.cc @@ -37,18 +37,18 @@ struct TestHelper : public HandleAndZoneScope { Scope* scope = info.function()->scope(); AstValueFactory* factory = info.ast_value_factory(); - CHECK_NE(NULL, scope); + CHECK(scope); if (result == NULL) { AstLoopAssignmentAnalyzer analyzer(main_zone(), &info); result = analyzer.Analyze(); - CHECK_NE(NULL, result); + CHECK(result); } const i::AstRawString* name = factory->GetOneByteString(var_name); i::Variable* var = scope->Lookup(name); - CHECK_NE(NULL, var); + CHECK(var); if (var->location() == Variable::UNALLOCATED) { CHECK_EQ(0, expected); diff --git a/test/cctest/compiler/test-machine-operator-reducer.cc b/test/cctest/compiler/test-machine-operator-reducer.cc index c46045635d..7ee5751875 100644 --- a/test/cctest/compiler/test-machine-operator-reducer.cc +++ b/test/cctest/compiler/test-machine-operator-reducer.cc @@ -100,7 +100,7 @@ class ReducerTester : public HandleAndZoneScope { // the {expect} value. template void CheckFoldBinop(volatile T expect, Node* a, Node* b) { - CHECK_NE(NULL, binop); + CHECK(binop); Node* n = CreateBinopNode(a, b); MachineOperatorReducer reducer(&jsgraph); Reduction reduction = reducer.Reduce(n); @@ -112,7 +112,7 @@ class ReducerTester : public HandleAndZoneScope { // Check that the reduction of this binop applied to {a} and {b} yields // the {expect} node. void CheckBinop(Node* expect, Node* a, Node* b) { - CHECK_NE(NULL, binop); + CHECK(binop); Node* n = CreateBinopNode(a, b); MachineOperatorReducer reducer(&jsgraph); Reduction reduction = reducer.Reduce(n); @@ -124,7 +124,7 @@ class ReducerTester : public HandleAndZoneScope { // this binop applied to {left_expect} and {right_expect}. void CheckFoldBinop(Node* left_expect, Node* right_expect, Node* left, Node* right) { - CHECK_NE(NULL, binop); + CHECK(binop); Node* n = CreateBinopNode(left, right); MachineOperatorReducer reducer(&jsgraph); Reduction reduction = reducer.Reduce(n); @@ -139,7 +139,7 @@ class ReducerTester : public HandleAndZoneScope { template void CheckFoldBinop(volatile T left_expect, const Operator* op_expect, Node* right_expect, Node* left, Node* right) { - CHECK_NE(NULL, binop); + CHECK(binop); Node* n = CreateBinopNode(left, right); MachineOperatorReducer reducer(&jsgraph); Reduction r = reducer.Reduce(n); @@ -154,7 +154,7 @@ class ReducerTester : public HandleAndZoneScope { template void CheckFoldBinop(Node* left_expect, const Operator* op_expect, volatile T right_expect, Node* left, Node* right) { - CHECK_NE(NULL, binop); + CHECK(binop); Node* n = CreateBinopNode(left, right); MachineOperatorReducer reducer(&jsgraph); Reduction r = reducer.Reduce(n); @@ -723,133 +723,6 @@ TEST(ReduceLoadStore) { } -static void CheckNans(ReducerTester* R) { - Node* x = R->Parameter(); - std::vector nans = ValueHelper::nan_vector(); - for (std::vector::const_iterator pl = nans.begin(); pl != nans.end(); - ++pl) { - for (std::vector::const_iterator pr = nans.begin(); - pr != nans.end(); ++pr) { - Node* nan1 = R->Constant(*pl); - Node* nan2 = R->Constant(*pr); - R->CheckBinop(nan1, x, nan1); // x op NaN => NaN - R->CheckBinop(nan1, nan1, x); // NaN op x => NaN - R->CheckBinop(nan1, nan2, nan1); // NaN op NaN => NaN - } - } -} - - -TEST(ReduceFloat64Add) { - ReducerTester R; - R.binop = R.machine.Float64Add(); - - FOR_FLOAT64_INPUTS(pl) { - FOR_FLOAT64_INPUTS(pr) { - double x = *pl, y = *pr; - R.CheckFoldBinop(x + y, x, y); - } - } - - FOR_FLOAT64_INPUTS(i) { - Double tmp(*i); - if (!tmp.IsSpecial() || tmp.IsInfinite()) { - // Don't check NaNs as they are reduced more. - R.CheckPutConstantOnRight(*i); - } - } - - CheckNans(&R); -} - - -TEST(ReduceFloat64Sub) { - ReducerTester R; - R.binop = R.machine.Float64Sub(); - - FOR_FLOAT64_INPUTS(pl) { - FOR_FLOAT64_INPUTS(pr) { - double x = *pl, y = *pr; - R.CheckFoldBinop(x - y, x, y); - } - } - - Node* zero = R.Constant(0.0); - Node* x = R.Parameter(); - - R.CheckBinop(x, x, zero); // x - 0.0 => x - - CheckNans(&R); -} - - -TEST(ReduceFloat64Mul) { - ReducerTester R; - R.binop = R.machine.Float64Mul(); - - FOR_FLOAT64_INPUTS(pl) { - FOR_FLOAT64_INPUTS(pr) { - double x = *pl, y = *pr; - R.CheckFoldBinop(x * y, x, y); - } - } - - double inf = V8_INFINITY; - R.CheckPutConstantOnRight(-inf); - R.CheckPutConstantOnRight(-0.1); - R.CheckPutConstantOnRight(0.1); - R.CheckPutConstantOnRight(inf); - - Node* x = R.Parameter(); - Node* one = R.Constant(1.0); - - R.CheckBinop(x, x, one); // x * 1.0 => x - R.CheckBinop(x, one, x); // 1.0 * x => x - - CheckNans(&R); -} - - -TEST(ReduceFloat64Div) { - ReducerTester R; - R.binop = R.machine.Float64Div(); - - FOR_FLOAT64_INPUTS(pl) { - FOR_FLOAT64_INPUTS(pr) { - double x = *pl, y = *pr; - R.CheckFoldBinop(x / y, x, y); - } - } - - Node* x = R.Parameter(); - Node* one = R.Constant(1.0); - - R.CheckBinop(x, x, one); // x / 1.0 => x - - CheckNans(&R); -} - - -TEST(ReduceFloat64Mod) { - ReducerTester R; - R.binop = R.machine.Float64Mod(); - - FOR_FLOAT64_INPUTS(pl) { - FOR_FLOAT64_INPUTS(pr) { - double x = *pl, y = *pr; - R.CheckFoldBinop(modulo(x, y), x, y); - } - } - - Node* x = R.Parameter(); - Node* zero = R.Constant(0.0); - - R.CheckFoldBinop(std::numeric_limits::quiet_NaN(), x, zero); - - CheckNans(&R); -} - - // TODO(titzer): test MachineOperatorReducer for Word64And // TODO(titzer): test MachineOperatorReducer for Word64Or // TODO(titzer): test MachineOperatorReducer for Word64Xor @@ -870,3 +743,8 @@ TEST(ReduceFloat64Mod) { // TODO(titzer): test MachineOperatorReducer for ChangeInt32ToFloat64 // TODO(titzer): test MachineOperatorReducer for ChangeFloat64ToInt32 // TODO(titzer): test MachineOperatorReducer for Float64Compare +// TODO(titzer): test MachineOperatorReducer for Float64Add +// TODO(titzer): test MachineOperatorReducer for Float64Sub +// TODO(titzer): test MachineOperatorReducer for Float64Mul +// TODO(titzer): test MachineOperatorReducer for Float64Div +// TODO(titzer): test MachineOperatorReducer for Float64Mod diff --git a/test/cctest/compiler/test-node-cache.cc b/test/cctest/compiler/test-node-cache.cc index 835c028b38..b11e859cbc 100644 --- a/test/cctest/compiler/test-node-cache.cc +++ b/test/cctest/compiler/test-node-cache.cc @@ -17,7 +17,7 @@ TEST(Int32Constant_back_to_back) { for (int i = -2000000000; i < 2000000000; i += 3315177) { Node** pos = cache.Find(graph.zone(), i); - CHECK_NE(NULL, pos); + CHECK(pos); for (int j = 0; j < 3; j++) { Node** npos = cache.Find(graph.zone(), i); CHECK_EQ(pos, npos); @@ -80,7 +80,7 @@ TEST(Int64Constant_back_to_back) { for (int64_t i = -2000000000; i < 2000000000; i += 3315177) { Node** pos = cache.Find(graph.zone(), i); - CHECK_NE(NULL, pos); + CHECK(pos); for (int j = 0; j < 3; j++) { Node** npos = cache.Find(graph.zone(), i); CHECK_EQ(pos, npos); diff --git a/test/cctest/compiler/test-node.cc b/test/cctest/compiler/test-node.cc index 23238dac53..2c51e26f86 100644 --- a/test/cctest/compiler/test-node.cc +++ b/test/cctest/compiler/test-node.cc @@ -632,15 +632,15 @@ TEST(RemoveAllInputs) { n1->RemoveAllInputs(); CHECK_EQ(1, n1->InputCount()); CHECK_EQ(1, n0->UseCount()); - CHECK_EQ(NULL, n1->InputAt(0)); + CHECK(!n1->InputAt(0)); CHECK_EQ(1, n1->UseCount()); n2->RemoveAllInputs(); CHECK_EQ(2, n2->InputCount()); CHECK_EQ(0, n0->UseCount()); CHECK_EQ(0, n1->UseCount()); - CHECK_EQ(NULL, n2->InputAt(0)); - CHECK_EQ(NULL, n2->InputAt(1)); + CHECK(!n2->InputAt(0)); + CHECK(!n2->InputAt(1)); } { @@ -653,6 +653,6 @@ TEST(RemoveAllInputs) { n1->RemoveAllInputs(); CHECK_EQ(1, n1->InputCount()); CHECK_EQ(0, n1->UseCount()); - CHECK_EQ(NULL, n1->InputAt(0)); + CHECK(!n1->InputAt(0)); } } diff --git a/test/cctest/compiler/test-operator.cc b/test/cctest/compiler/test-operator.cc index 39f660fef9..e635da797d 100644 --- a/test/cctest/compiler/test-operator.cc +++ b/test/cctest/compiler/test-operator.cc @@ -80,14 +80,14 @@ TEST(TestOperator_Print) { Operator op1a(19, NONE, "Another1", 0, 0, 0, 0, 0, 0); Operator op1b(19, FOLD, "Another2", 2, 0, 0, 2, 0, 0); - CHECK_EQ("Another1", OperatorToString(&op1a).get()); - CHECK_EQ("Another2", OperatorToString(&op1b).get()); + CHECK_EQ(0, strcmp("Another1", OperatorToString(&op1a).get())); + CHECK_EQ(0, strcmp("Another2", OperatorToString(&op1b).get())); Operator op2a(20, NONE, "Flog1", 0, 0, 0, 0, 0, 0); Operator op2b(20, FOLD, "Flog2", 1, 0, 0, 1, 0, 0); - CHECK_EQ("Flog1", OperatorToString(&op2a).get()); - CHECK_EQ("Flog2", OperatorToString(&op2b).get()); + CHECK_EQ(0, strcmp("Flog1", OperatorToString(&op2a).get())); + CHECK_EQ(0, strcmp("Flog2", OperatorToString(&op2b).get())); } @@ -148,16 +148,16 @@ TEST(TestOperator1int_Equals) { TEST(TestOperator1int_Print) { Operator1 op1(12, NONE, "Op1Test", 0, 0, 0, 1, 0, 0, 0); - CHECK_EQ("Op1Test[0]", OperatorToString(&op1).get()); + CHECK_EQ(0, strcmp("Op1Test[0]", OperatorToString(&op1).get())); Operator1 op2(12, NONE, "Op1Test", 0, 0, 0, 1, 0, 0, 66666666); - CHECK_EQ("Op1Test[66666666]", OperatorToString(&op2).get()); + CHECK_EQ(0, strcmp("Op1Test[66666666]", OperatorToString(&op2).get())); Operator1 op3(12, NONE, "FooBar", 0, 0, 0, 1, 0, 0, 2347); - CHECK_EQ("FooBar[2347]", OperatorToString(&op3).get()); + CHECK_EQ(0, strcmp("FooBar[2347]", OperatorToString(&op3).get())); Operator1 op4(12, NONE, "BarFoo", 0, 0, 0, 1, 0, 0, -879); - CHECK_EQ("BarFoo[-879]", OperatorToString(&op4).get()); + CHECK_EQ(0, strcmp("BarFoo[-879]", OperatorToString(&op4).get())); } @@ -179,8 +179,8 @@ TEST(TestOperator1doublePrint) { Operator1 op1a(23, NONE, "Canary", 0, 0, 0, 0, 0, 0, 0.5); Operator1 op1b(23, FOLD, "Finch", 2, 0, 0, 2, 0, 0, -1.5); - CHECK_EQ("Canary[0.5]", OperatorToString(&op1a).get()); - CHECK_EQ("Finch[-1.5]", OperatorToString(&op1b).get()); + CHECK_EQ(0, strcmp("Canary[0.5]", OperatorToString(&op1a).get())); + CHECK_EQ(0, strcmp("Finch[-1.5]", OperatorToString(&op1b).get())); } diff --git a/test/cctest/compiler/test-representation-change.cc b/test/cctest/compiler/test-representation-change.cc index f44648e651..55f054a74b 100644 --- a/test/cctest/compiler/test-representation-change.cc +++ b/test/cctest/compiler/test-representation-change.cc @@ -6,6 +6,7 @@ #include "src/v8.h" #include "test/cctest/cctest.h" +#include "test/cctest/compiler/codegen-tester.h" #include "test/cctest/compiler/graph-builder-tester.h" #include "test/cctest/compiler/value-helper.h" @@ -58,7 +59,7 @@ class RepresentationChangerTester : public HandleAndZoneScope, void CheckFloat64Constant(Node* n, double expected) { Float64Matcher m(n); CHECK(m.HasValue()); - CHECK_EQ(expected, m.Value()); + CheckDoubleEq(expected, m.Value()); } void CheckFloat32Constant(Node* n, float expected) { @@ -77,7 +78,7 @@ class RepresentationChangerTester : public HandleAndZoneScope, NumberMatcher m(n); CHECK_EQ(IrOpcode::kNumberConstant, n->opcode()); CHECK(m.HasValue()); - CHECK_EQ(expected, m.Value()); + CheckDoubleEq(expected, m.Value()); } Node* Parameter(int index = 0) { diff --git a/test/cctest/compiler/test-run-machops.cc b/test/cctest/compiler/test-run-machops.cc index a5a6823303..2d205758aa 100644 --- a/test/cctest/compiler/test-run-machops.cc +++ b/test/cctest/compiler/test-run-machops.cc @@ -15,10 +15,6 @@ #if V8_TURBOFAN_TARGET using namespace v8::base; - -#define CHECK_UINT32_EQ(x, y) \ - CHECK_EQ(static_cast(x), static_cast(y)) - using namespace v8::internal; using namespace v8::internal::compiler; @@ -505,7 +501,7 @@ TEST(RunLoadStoreFloat64Offset) { p1 = *j; p2 = *j - 5; CHECK_EQ(magic, m.Call()); - CHECK_EQ(p1, p2); + CheckDoubleEq(p1, p2); } } } @@ -763,7 +759,7 @@ TEST(RunInt32AddInBranch) { static const int32_t constant = 987654321; { RawMachineAssemblerTester m; - Uint32BinopTester bt(&m); + Int32BinopTester bt(&m); MLabel blocka, blockb; m.Branch( m.Word32Equal(m.Int32Add(bt.param0, bt.param1), m.Int32Constant(0)), @@ -781,7 +777,7 @@ TEST(RunInt32AddInBranch) { } { RawMachineAssemblerTester m; - Uint32BinopTester bt(&m); + Int32BinopTester bt(&m); MLabel blocka, blockb; m.Branch( m.Word32NotEqual(m.Int32Add(bt.param0, bt.param1), m.Int32Constant(0)), @@ -810,7 +806,7 @@ TEST(RunInt32AddInBranch) { m.Return(m.Int32Constant(0 - constant)); FOR_UINT32_INPUTS(j) { uint32_t expected = (*i + *j) == 0 ? constant : 0 - constant; - CHECK_UINT32_EQ(expected, m.Call(*j)); + CHECK_EQ(expected, m.Call(*j)); } } } @@ -827,7 +823,7 @@ TEST(RunInt32AddInBranch) { m.Return(m.Int32Constant(0 - constant)); FOR_UINT32_INPUTS(j) { uint32_t expected = (*i + *j) != 0 ? constant : 0 - constant; - CHECK_UINT32_EQ(expected, m.Call(*j)); + CHECK_EQ(expected, m.Call(*j)); } } } @@ -885,7 +881,7 @@ TEST(RunInt32AddInComparison) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = (*i + *j) == 0; - CHECK_UINT32_EQ(expected, bt.call(*i, *j)); + CHECK_EQ(expected, bt.call(*i, *j)); } } } @@ -897,7 +893,7 @@ TEST(RunInt32AddInComparison) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = (*i + *j) == 0; - CHECK_UINT32_EQ(expected, bt.call(*i, *j)); + CHECK_EQ(expected, bt.call(*i, *j)); } } } @@ -908,7 +904,7 @@ TEST(RunInt32AddInComparison) { m.Int32Constant(0))); FOR_UINT32_INPUTS(j) { uint32_t expected = (*i + *j) == 0; - CHECK_UINT32_EQ(expected, m.Call(*j)); + CHECK_EQ(expected, m.Call(*j)); } } } @@ -919,7 +915,7 @@ TEST(RunInt32AddInComparison) { m.Int32Constant(0))); FOR_UINT32_INPUTS(j) { uint32_t expected = (*j + *i) == 0; - CHECK_UINT32_EQ(expected, m.Call(*j)); + CHECK_EQ(expected, m.Call(*j)); } } } @@ -971,7 +967,7 @@ TEST(RunInt32SubP) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = static_cast(*i - *j); - CHECK_UINT32_EQ(expected, bt.call(*i, *j)); + CHECK_EQ(expected, bt.call(*i, *j)); } } } @@ -984,7 +980,7 @@ TEST(RunInt32SubImm) { m.Return(m.Int32Sub(m.Int32Constant(*i), m.Parameter(0))); FOR_UINT32_INPUTS(j) { uint32_t expected = *i - *j; - CHECK_UINT32_EQ(expected, m.Call(*j)); + CHECK_EQ(expected, m.Call(*j)); } } } @@ -994,7 +990,7 @@ TEST(RunInt32SubImm) { m.Return(m.Int32Sub(m.Parameter(0), m.Int32Constant(*i))); FOR_UINT32_INPUTS(j) { uint32_t expected = *j - *i; - CHECK_UINT32_EQ(expected, m.Call(*j)); + CHECK_EQ(expected, m.Call(*j)); } } } @@ -1072,8 +1068,8 @@ TEST(RunInt32SubAndWord32ShrP) { FOR_UINT32_INPUTS(j) { FOR_UINT32_SHIFTS(shift) { // Use uint32_t because signed overflow is UB in C. - int32_t expected = *i - (*j >> shift); - CHECK_UINT32_EQ(expected, m.Call(*i, *j, shift)); + uint32_t expected = *i - (*j >> shift); + CHECK_EQ(expected, m.Call(*i, *j, shift)); } } } @@ -1087,7 +1083,7 @@ TEST(RunInt32SubAndWord32ShrP) { FOR_UINT32_SHIFTS(shift) { FOR_UINT32_INPUTS(k) { // Use uint32_t because signed overflow is UB in C. - int32_t expected = (*i >> shift) - *k; + uint32_t expected = (*i >> shift) - *k; CHECK_EQ(expected, m.Call(*i, shift, *k)); } } @@ -1100,7 +1096,7 @@ TEST(RunInt32SubInBranch) { static const int constant = 987654321; { RawMachineAssemblerTester m; - Uint32BinopTester bt(&m); + Int32BinopTester bt(&m); MLabel blocka, blockb; m.Branch( m.Word32Equal(m.Int32Sub(bt.param0, bt.param1), m.Int32Constant(0)), @@ -1118,7 +1114,7 @@ TEST(RunInt32SubInBranch) { } { RawMachineAssemblerTester m; - Uint32BinopTester bt(&m); + Int32BinopTester bt(&m); MLabel blocka, blockb; m.Branch( m.Word32NotEqual(m.Int32Sub(bt.param0, bt.param1), m.Int32Constant(0)), @@ -1146,7 +1142,7 @@ TEST(RunInt32SubInBranch) { m.Bind(&blockb); m.Return(m.Int32Constant(0 - constant)); FOR_UINT32_INPUTS(j) { - int32_t expected = (*i - *j) == 0 ? constant : 0 - constant; + uint32_t expected = (*i - *j) == 0 ? constant : 0 - constant; CHECK_EQ(expected, m.Call(*j)); } } @@ -1222,7 +1218,7 @@ TEST(RunInt32SubInComparison) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = (*i - *j) == 0; - CHECK_UINT32_EQ(expected, bt.call(*i, *j)); + CHECK_EQ(expected, bt.call(*i, *j)); } } } @@ -1234,7 +1230,7 @@ TEST(RunInt32SubInComparison) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = (*i - *j) == 0; - CHECK_UINT32_EQ(expected, bt.call(*i, *j)); + CHECK_EQ(expected, bt.call(*i, *j)); } } } @@ -1245,7 +1241,7 @@ TEST(RunInt32SubInComparison) { m.Int32Constant(0))); FOR_UINT32_INPUTS(j) { uint32_t expected = (*i - *j) == 0; - CHECK_UINT32_EQ(expected, m.Call(*j)); + CHECK_EQ(expected, m.Call(*j)); } } } @@ -1256,7 +1252,7 @@ TEST(RunInt32SubInComparison) { m.Int32Constant(0))); FOR_UINT32_INPUTS(j) { uint32_t expected = (*j - *i) == 0; - CHECK_UINT32_EQ(expected, m.Call(*j)); + CHECK_EQ(expected, m.Call(*j)); } } } @@ -1318,7 +1314,7 @@ TEST(RunInt32MulP) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = *i * *j; - CHECK_UINT32_EQ(expected, bt.call(*i, *j)); + CHECK_EQ(expected, bt.call(*i, *j)); } } } @@ -1346,7 +1342,7 @@ TEST(RunInt32MulImm) { m.Return(m.Int32Mul(m.Int32Constant(*i), m.Parameter(0))); FOR_UINT32_INPUTS(j) { uint32_t expected = *i * *j; - CHECK_UINT32_EQ(expected, m.Call(*j)); + CHECK_EQ(expected, m.Call(*j)); } } } @@ -1356,7 +1352,7 @@ TEST(RunInt32MulImm) { m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant(*i))); FOR_UINT32_INPUTS(j) { uint32_t expected = *j * *i; - CHECK_UINT32_EQ(expected, m.Call(*j)); + CHECK_EQ(expected, m.Call(*j)); } } } @@ -1527,7 +1523,7 @@ TEST(RunUint32DivP) { uint32_t p0 = *i; uint32_t p1 = *j; if (p1 != 0) { - uint32_t expected = static_cast(p0 / p1); + int32_t expected = bit_cast(p0 / p1); CHECK_EQ(expected, bt.call(p0, p1)); } } @@ -1542,7 +1538,7 @@ TEST(RunUint32DivP) { uint32_t p0 = *i; uint32_t p1 = *j; if (p1 != 0) { - uint32_t expected = static_cast(p0 + (p0 / p1)); + int32_t expected = bit_cast(p0 + (p0 / p1)); CHECK_EQ(expected, bt.call(p0, p1)); } } @@ -1588,7 +1584,7 @@ TEST(RunInt32ModP) { TEST(RunUint32ModP) { { RawMachineAssemblerTester m; - Int32BinopTester bt(&m); + Uint32BinopTester bt(&m); bt.AddReturn(m.Uint32Mod(bt.param0, bt.param1)); FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { @@ -1603,7 +1599,7 @@ TEST(RunUint32ModP) { } { RawMachineAssemblerTester m; - Int32BinopTester bt(&m); + Uint32BinopTester bt(&m); bt.AddReturn(m.Int32Add(bt.param0, m.Uint32Mod(bt.param0, bt.param1))); FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { @@ -1626,7 +1622,7 @@ TEST(RunWord32AndP) { bt.AddReturn(m.Word32And(bt.param0, bt.param1)); FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { - uint32_t expected = *i & *j; + int32_t expected = *i & *j; CHECK_EQ(expected, bt.call(*i, *j)); } } @@ -1637,7 +1633,7 @@ TEST(RunWord32AndP) { bt.AddReturn(m.Word32And(bt.param0, m.Word32Not(bt.param1))); FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { - uint32_t expected = *i & ~(*j); + int32_t expected = *i & ~(*j); CHECK_EQ(expected, bt.call(*i, *j)); } } @@ -1648,7 +1644,7 @@ TEST(RunWord32AndP) { bt.AddReturn(m.Word32And(m.Word32Not(bt.param0), bt.param1)); FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { - uint32_t expected = ~(*i) & *j; + int32_t expected = ~(*i) & *j; CHECK_EQ(expected, bt.call(*i, *j)); } } @@ -1665,7 +1661,7 @@ TEST(RunWord32AndAndWord32ShlP) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = *i << (*j & 0x1f); - CHECK_UINT32_EQ(expected, bt.call(*i, *j)); + CHECK_EQ(expected, bt.call(*i, *j)); } } } @@ -1677,7 +1673,7 @@ TEST(RunWord32AndAndWord32ShlP) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = *i << (0x1f & *j); - CHECK_UINT32_EQ(expected, bt.call(*i, *j)); + CHECK_EQ(expected, bt.call(*i, *j)); } } } @@ -1693,7 +1689,7 @@ TEST(RunWord32AndAndWord32ShrP) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = *i >> (*j & 0x1f); - CHECK_UINT32_EQ(expected, bt.call(*i, *j)); + CHECK_EQ(expected, bt.call(*i, *j)); } } } @@ -1705,7 +1701,7 @@ TEST(RunWord32AndAndWord32ShrP) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = *i >> (0x1f & *j); - CHECK_UINT32_EQ(expected, bt.call(*i, *j)); + CHECK_EQ(expected, bt.call(*i, *j)); } } } @@ -1732,7 +1728,7 @@ TEST(RunWord32AndAndWord32SarP) { m.Word32Sar(bt.param0, m.Word32And(m.Int32Constant(0x1f), bt.param1))); FOR_INT32_INPUTS(i) { FOR_INT32_INPUTS(j) { - uint32_t expected = *i >> (0x1f & *j); + int32_t expected = *i >> (0x1f & *j); CHECK_EQ(expected, bt.call(*i, *j)); } } @@ -1747,7 +1743,7 @@ TEST(RunWord32AndImm) { m.Return(m.Word32And(m.Int32Constant(*i), m.Parameter(0))); FOR_UINT32_INPUTS(j) { uint32_t expected = *i & *j; - CHECK_UINT32_EQ(expected, m.Call(*j)); + CHECK_EQ(expected, m.Call(*j)); } } } @@ -1757,7 +1753,7 @@ TEST(RunWord32AndImm) { m.Return(m.Word32And(m.Int32Constant(*i), m.Word32Not(m.Parameter(0)))); FOR_UINT32_INPUTS(j) { uint32_t expected = *i & ~(*j); - CHECK_UINT32_EQ(expected, m.Call(*j)); + CHECK_EQ(expected, m.Call(*j)); } } } @@ -1768,7 +1764,7 @@ TEST(RunWord32AndInBranch) { static const int constant = 987654321; { RawMachineAssemblerTester m; - Uint32BinopTester bt(&m); + Int32BinopTester bt(&m); MLabel blocka, blockb; m.Branch( m.Word32Equal(m.Word32And(bt.param0, bt.param1), m.Int32Constant(0)), @@ -1786,7 +1782,7 @@ TEST(RunWord32AndInBranch) { } { RawMachineAssemblerTester m; - Uint32BinopTester bt(&m); + Int32BinopTester bt(&m); MLabel blocka, blockb; m.Branch( m.Word32NotEqual(m.Word32And(bt.param0, bt.param1), m.Int32Constant(0)), @@ -1891,7 +1887,7 @@ TEST(RunWord32AndInComparison) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = (*i & *j) == 0; - CHECK_UINT32_EQ(expected, bt.call(*i, *j)); + CHECK_EQ(expected, bt.call(*i, *j)); } } } @@ -1903,7 +1899,7 @@ TEST(RunWord32AndInComparison) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = (*i & *j) == 0; - CHECK_UINT32_EQ(expected, bt.call(*i, *j)); + CHECK_EQ(expected, bt.call(*i, *j)); } } } @@ -1914,7 +1910,7 @@ TEST(RunWord32AndInComparison) { m.Int32Constant(0))); FOR_UINT32_INPUTS(j) { uint32_t expected = (*i & *j) == 0; - CHECK_UINT32_EQ(expected, m.Call(*j)); + CHECK_EQ(expected, m.Call(*j)); } } } @@ -1925,7 +1921,7 @@ TEST(RunWord32AndInComparison) { m.Int32Constant(0))); FOR_UINT32_INPUTS(j) { uint32_t expected = (*j & *i) == 0; - CHECK_UINT32_EQ(expected, m.Call(*j)); + CHECK_EQ(expected, m.Call(*j)); } } } @@ -1940,7 +1936,7 @@ TEST(RunWord32OrP) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = *i | *j; - CHECK_UINT32_EQ(expected, bt.call(*i, *j)); + CHECK_EQ(expected, bt.call(*i, *j)); } } } @@ -1951,7 +1947,7 @@ TEST(RunWord32OrP) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = *i | ~(*j); - CHECK_UINT32_EQ(expected, bt.call(*i, *j)); + CHECK_EQ(expected, bt.call(*i, *j)); } } } @@ -1962,7 +1958,7 @@ TEST(RunWord32OrP) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = ~(*i) | *j; - CHECK_UINT32_EQ(expected, bt.call(*i, *j)); + CHECK_EQ(expected, bt.call(*i, *j)); } } } @@ -1976,7 +1972,7 @@ TEST(RunWord32OrImm) { m.Return(m.Word32Or(m.Int32Constant(*i), m.Parameter(0))); FOR_UINT32_INPUTS(j) { uint32_t expected = *i | *j; - CHECK_UINT32_EQ(expected, m.Call(*j)); + CHECK_EQ(expected, m.Call(*j)); } } } @@ -1986,7 +1982,7 @@ TEST(RunWord32OrImm) { m.Return(m.Word32Or(m.Int32Constant(*i), m.Word32Not(m.Parameter(0)))); FOR_UINT32_INPUTS(j) { uint32_t expected = *i | ~(*j); - CHECK_UINT32_EQ(expected, m.Call(*j)); + CHECK_EQ(expected, m.Call(*j)); } } } @@ -2113,7 +2109,7 @@ TEST(RunWord32OrInBranch) { TEST(RunWord32OrInComparison) { { RawMachineAssemblerTester m; - Uint32BinopTester bt(&m); + Int32BinopTester bt(&m); bt.AddReturn( m.Word32Equal(m.Word32Or(bt.param0, bt.param1), m.Int32Constant(0))); FOR_UINT32_INPUTS(i) { @@ -2125,7 +2121,7 @@ TEST(RunWord32OrInComparison) { } { RawMachineAssemblerTester m; - Uint32BinopTester bt(&m); + Int32BinopTester bt(&m); bt.AddReturn( m.Word32Equal(m.Int32Constant(0), m.Word32Or(bt.param0, bt.param1))); FOR_UINT32_INPUTS(i) { @@ -2142,7 +2138,7 @@ TEST(RunWord32OrInComparison) { m.Int32Constant(0))); FOR_UINT32_INPUTS(j) { uint32_t expected = (*i | *j) == 0; - CHECK_UINT32_EQ(expected, m.Call(*j)); + CHECK_EQ(expected, m.Call(*j)); } } } @@ -2153,7 +2149,7 @@ TEST(RunWord32OrInComparison) { m.Int32Constant(0))); FOR_UINT32_INPUTS(j) { uint32_t expected = (*j | *i) == 0; - CHECK_UINT32_EQ(expected, m.Call(*j)); + CHECK_EQ(expected, m.Call(*j)); } } } @@ -2163,11 +2159,11 @@ TEST(RunWord32OrInComparison) { TEST(RunWord32XorP) { { FOR_UINT32_INPUTS(i) { - RawMachineAssemblerTester m(kMachUint32); + RawMachineAssemblerTester m(kMachUint32); m.Return(m.Word32Xor(m.Int32Constant(*i), m.Parameter(0))); FOR_UINT32_INPUTS(j) { uint32_t expected = *i ^ *j; - CHECK_UINT32_EQ(expected, m.Call(*j)); + CHECK_EQ(expected, m.Call(*j)); } } } @@ -2177,8 +2173,8 @@ TEST(RunWord32XorP) { bt.AddReturn(m.Word32Xor(bt.param0, bt.param1)); FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { - int32_t expected = *i ^ *j; - CHECK_UINT32_EQ(expected, bt.call(*i, *j)); + uint32_t expected = *i ^ *j; + CHECK_EQ(expected, bt.call(*i, *j)); } } } @@ -2210,7 +2206,7 @@ TEST(RunWord32XorP) { m.Return(m.Word32Xor(m.Int32Constant(*i), m.Word32Not(m.Parameter(0)))); FOR_UINT32_INPUTS(j) { uint32_t expected = *i ^ ~(*j); - CHECK_UINT32_EQ(expected, m.Call(*j)); + CHECK_EQ(expected, m.Call(*j)); } } } @@ -2233,7 +2229,7 @@ TEST(RunWord32XorInBranch) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = (*i ^ *j) == 0 ? constant : 0 - constant; - CHECK_UINT32_EQ(expected, bt.call(*i, *j)); + CHECK_EQ(expected, bt.call(*i, *j)); } } } @@ -2251,7 +2247,7 @@ TEST(RunWord32XorInBranch) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = (*i ^ *j) != 0 ? constant : 0 - constant; - CHECK_UINT32_EQ(expected, bt.call(*i, *j)); + CHECK_EQ(expected, bt.call(*i, *j)); } } } @@ -2268,7 +2264,7 @@ TEST(RunWord32XorInBranch) { m.Return(m.Int32Constant(0 - constant)); FOR_UINT32_INPUTS(j) { uint32_t expected = (*i ^ *j) == 0 ? constant : 0 - constant; - CHECK_UINT32_EQ(expected, m.Call(*j)); + CHECK_EQ(expected, m.Call(*j)); } } } @@ -2286,7 +2282,7 @@ TEST(RunWord32XorInBranch) { m.Return(m.Int32Constant(0 - constant)); FOR_UINT32_INPUTS(j) { uint32_t expected = (*i ^ *j) != 0 ? constant : 0 - constant; - CHECK_UINT32_EQ(expected, m.Call(*j)); + CHECK_EQ(expected, m.Call(*j)); } } } @@ -2342,7 +2338,7 @@ TEST(RunWord32ShlP) { m.Return(m.Word32Shl(m.Parameter(0), m.Int32Constant(shift))); FOR_UINT32_INPUTS(j) { uint32_t expected = *j << shift; - CHECK_UINT32_EQ(expected, m.Call(*j)); + CHECK_EQ(expected, m.Call(*j)); } } } @@ -2353,7 +2349,7 @@ TEST(RunWord32ShlP) { FOR_UINT32_INPUTS(i) { FOR_UINT32_SHIFTS(shift) { uint32_t expected = *i << shift; - CHECK_UINT32_EQ(expected, bt.call(*i, shift)); + CHECK_EQ(expected, bt.call(*i, shift)); } } } @@ -2369,7 +2365,7 @@ TEST(RunWord32ShlInComparison) { FOR_UINT32_INPUTS(i) { FOR_UINT32_SHIFTS(shift) { uint32_t expected = 0 == (*i << shift); - CHECK_UINT32_EQ(expected, bt.call(*i, shift)); + CHECK_EQ(expected, bt.call(*i, shift)); } } } @@ -2381,31 +2377,31 @@ TEST(RunWord32ShlInComparison) { FOR_UINT32_INPUTS(i) { FOR_UINT32_SHIFTS(shift) { uint32_t expected = 0 == (*i << shift); - CHECK_UINT32_EQ(expected, bt.call(*i, shift)); + CHECK_EQ(expected, bt.call(*i, shift)); } } } { FOR_UINT32_SHIFTS(shift) { - RawMachineAssemblerTester m(kMachUint32); + RawMachineAssemblerTester m(kMachUint32); m.Return( m.Word32Equal(m.Int32Constant(0), m.Word32Shl(m.Parameter(0), m.Int32Constant(shift)))); FOR_UINT32_INPUTS(i) { uint32_t expected = 0 == (*i << shift); - CHECK_UINT32_EQ(expected, m.Call(*i)); + CHECK_EQ(expected, m.Call(*i)); } } } { FOR_UINT32_SHIFTS(shift) { - RawMachineAssemblerTester m(kMachUint32); + RawMachineAssemblerTester m(kMachUint32); m.Return( m.Word32Equal(m.Word32Shl(m.Parameter(0), m.Int32Constant(shift)), m.Int32Constant(0))); FOR_UINT32_INPUTS(i) { uint32_t expected = 0 == (*i << shift); - CHECK_UINT32_EQ(expected, m.Call(*i)); + CHECK_EQ(expected, m.Call(*i)); } } } @@ -2419,7 +2415,7 @@ TEST(RunWord32ShrP) { m.Return(m.Word32Shr(m.Parameter(0), m.Int32Constant(shift))); FOR_UINT32_INPUTS(j) { uint32_t expected = *j >> shift; - CHECK_UINT32_EQ(expected, m.Call(*j)); + CHECK_EQ(expected, m.Call(*j)); } } } @@ -2430,10 +2426,10 @@ TEST(RunWord32ShrP) { FOR_UINT32_INPUTS(i) { FOR_UINT32_SHIFTS(shift) { uint32_t expected = *i >> shift; - CHECK_UINT32_EQ(expected, bt.call(*i, shift)); + CHECK_EQ(expected, bt.call(*i, shift)); } } - CHECK_EQ(0x00010000, bt.call(0x80000000, 15)); + CHECK_EQ(0x00010000u, bt.call(0x80000000, 15)); } } @@ -2447,7 +2443,7 @@ TEST(RunWord32ShrInComparison) { FOR_UINT32_INPUTS(i) { FOR_UINT32_SHIFTS(shift) { uint32_t expected = 0 == (*i >> shift); - CHECK_UINT32_EQ(expected, bt.call(*i, shift)); + CHECK_EQ(expected, bt.call(*i, shift)); } } } @@ -2459,31 +2455,31 @@ TEST(RunWord32ShrInComparison) { FOR_UINT32_INPUTS(i) { FOR_UINT32_SHIFTS(shift) { uint32_t expected = 0 == (*i >> shift); - CHECK_UINT32_EQ(expected, bt.call(*i, shift)); + CHECK_EQ(expected, bt.call(*i, shift)); } } } { FOR_UINT32_SHIFTS(shift) { - RawMachineAssemblerTester m(kMachUint32); + RawMachineAssemblerTester m(kMachUint32); m.Return( m.Word32Equal(m.Int32Constant(0), m.Word32Shr(m.Parameter(0), m.Int32Constant(shift)))); FOR_UINT32_INPUTS(i) { uint32_t expected = 0 == (*i >> shift); - CHECK_UINT32_EQ(expected, m.Call(*i)); + CHECK_EQ(expected, m.Call(*i)); } } } { FOR_UINT32_SHIFTS(shift) { - RawMachineAssemblerTester m(kMachUint32); + RawMachineAssemblerTester m(kMachUint32); m.Return( m.Word32Equal(m.Word32Shr(m.Parameter(0), m.Int32Constant(shift)), m.Int32Constant(0))); FOR_UINT32_INPUTS(i) { uint32_t expected = 0 == (*i >> shift); - CHECK_UINT32_EQ(expected, m.Call(*i)); + CHECK_EQ(expected, m.Call(*i)); } } } @@ -2511,7 +2507,7 @@ TEST(RunWord32SarP) { CHECK_EQ(expected, bt.call(*i, shift)); } } - CHECK_EQ(0xFFFF0000, bt.call(0x80000000, 15)); + CHECK_EQ(bit_cast(0xFFFF0000), bt.call(0x80000000, 15)); } } @@ -2560,7 +2556,7 @@ TEST(RunWord32SarInComparison) { m.Word32Equal(m.Word32Sar(m.Parameter(0), m.Int32Constant(shift)), m.Int32Constant(0))); FOR_INT32_INPUTS(i) { - uint32_t expected = 0 == (*i >> shift); + int32_t expected = 0 == (*i >> shift); CHECK_EQ(expected, m.Call(*i)); } } @@ -2586,7 +2582,7 @@ TEST(RunWord32RorP) { FOR_UINT32_INPUTS(i) { FOR_UINT32_SHIFTS(shift) { uint32_t expected = bits::RotateRight32(*i, shift); - CHECK_UINT32_EQ(expected, bt.call(*i, shift)); + CHECK_EQ(expected, bt.call(*i, shift)); } } } @@ -2602,7 +2598,7 @@ TEST(RunWord32RorInComparison) { FOR_UINT32_INPUTS(i) { FOR_UINT32_SHIFTS(shift) { uint32_t expected = 0 == bits::RotateRight32(*i, shift); - CHECK_UINT32_EQ(expected, bt.call(*i, shift)); + CHECK_EQ(expected, bt.call(*i, shift)); } } } @@ -2614,31 +2610,31 @@ TEST(RunWord32RorInComparison) { FOR_UINT32_INPUTS(i) { FOR_UINT32_SHIFTS(shift) { uint32_t expected = 0 == bits::RotateRight32(*i, shift); - CHECK_UINT32_EQ(expected, bt.call(*i, shift)); + CHECK_EQ(expected, bt.call(*i, shift)); } } } { FOR_UINT32_SHIFTS(shift) { - RawMachineAssemblerTester m(kMachUint32); + RawMachineAssemblerTester m(kMachUint32); m.Return( m.Word32Equal(m.Int32Constant(0), m.Word32Ror(m.Parameter(0), m.Int32Constant(shift)))); FOR_UINT32_INPUTS(i) { uint32_t expected = 0 == bits::RotateRight32(*i, shift); - CHECK_UINT32_EQ(expected, m.Call(*i)); + CHECK_EQ(expected, m.Call(*i)); } } } { FOR_UINT32_SHIFTS(shift) { - RawMachineAssemblerTester m(kMachUint32); + RawMachineAssemblerTester m(kMachUint32); m.Return( m.Word32Equal(m.Word32Ror(m.Parameter(0), m.Int32Constant(shift)), m.Int32Constant(0))); FOR_UINT32_INPUTS(i) { uint32_t expected = 0 == bits::RotateRight32(*i, shift); - CHECK_UINT32_EQ(expected, m.Call(*i)); + CHECK_EQ(expected, m.Call(*i)); } } } @@ -2964,7 +2960,7 @@ TEST(RunFloat64AddP) { FOR_FLOAT64_INPUTS(pl) { FOR_FLOAT64_INPUTS(pr) { double expected = *pl + *pr; - CHECK_EQ(expected, bt.call(*pl, *pr)); + CheckDoubleEq(expected, bt.call(*pl, *pr)); } } } @@ -2979,7 +2975,7 @@ TEST(RunFloat64SubP) { FOR_FLOAT64_INPUTS(pl) { FOR_FLOAT64_INPUTS(pr) { double expected = *pl - *pr; - CHECK_EQ(expected, bt.call(*pl, *pr)); + CheckDoubleEq(expected, bt.call(*pl, *pr)); } } } @@ -2999,7 +2995,7 @@ TEST(RunFloat64SubImm1) { input = *j; double expected = *i - input; CHECK_EQ(0, m.Call()); - CHECK_EQ(expected, output); + CheckDoubleEq(expected, output); } } } @@ -3019,7 +3015,7 @@ TEST(RunFloat64SubImm2) { input = *j; double expected = input - *i; CHECK_EQ(0, m.Call()); - CHECK_EQ(expected, output); + CheckDoubleEq(expected, output); } } } @@ -3034,7 +3030,7 @@ TEST(RunFloat64MulP) { FOR_FLOAT64_INPUTS(pl) { FOR_FLOAT64_INPUTS(pr) { double expected = *pl * *pr; - CHECK_EQ(expected, bt.call(*pl, *pr)); + CheckDoubleEq(expected, bt.call(*pl, *pr)); } } } @@ -3063,7 +3059,7 @@ TEST(RunFloat64MulAndFloat64AddP) { volatile double temp = input_a * input_b; volatile double expected = temp + input_c; CHECK_EQ(0, m.Call()); - CHECK_EQ(expected, output); + CheckDoubleEq(expected, output); } } } @@ -3085,7 +3081,7 @@ TEST(RunFloat64MulAndFloat64AddP) { volatile double temp = input_b * input_c; volatile double expected = input_a + temp; CHECK_EQ(0, m.Call()); - CHECK_EQ(expected, output); + CheckDoubleEq(expected, output); } } } @@ -3115,7 +3111,7 @@ TEST(RunFloat64MulAndFloat64SubP) { volatile double temp = input_b * input_c; volatile double expected = input_a - temp; CHECK_EQ(0, m.Call()); - CHECK_EQ(expected, output); + CheckDoubleEq(expected, output); } } } @@ -3137,7 +3133,7 @@ TEST(RunFloat64MulImm) { input = *j; double expected = *i * input; CHECK_EQ(0, m.Call()); - CHECK_EQ(expected, output); + CheckDoubleEq(expected, output); } } } @@ -3152,7 +3148,7 @@ TEST(RunFloat64MulImm) { input = *j; double expected = input * *i; CHECK_EQ(0, m.Call()); - CHECK_EQ(expected, output); + CheckDoubleEq(expected, output); } } } @@ -3168,7 +3164,7 @@ TEST(RunFloat64DivP) { FOR_FLOAT64_INPUTS(pl) { FOR_FLOAT64_INPUTS(pr) { double expected = *pl / *pr; - CHECK_EQ(expected, bt.call(*pl, *pr)); + CheckDoubleEq(expected, bt.call(*pl, *pr)); } } } @@ -3184,7 +3180,7 @@ TEST(RunFloat64ModP) { FOR_FLOAT64_INPUTS(j) { double expected = modulo(*i, *j); double found = bt.call(*i, *j); - CHECK_EQ(expected, found); + CheckDoubleEq(expected, found); } } } @@ -3223,7 +3219,7 @@ TEST(RunChangeInt32ToFloat64_B) { TEST(RunChangeUint32ToFloat64_B) { - RawMachineAssemblerTester m(kMachUint32); + RawMachineAssemblerTester m(kMachUint32); double output = 0; Node* convert = m.ChangeUint32ToFloat64(m.Parameter(0)); @@ -3404,7 +3400,7 @@ TEST(RunChangeFloat64ToInt32_spilled) { TEST(RunChangeFloat64ToUint32_spilled) { RawMachineAssemblerTester m; const int kNumInputs = 32; - int32_t magic = 0x786234; + uint32_t magic = 0x786234; double input[kNumInputs]; uint32_t result[kNumInputs]; Node* input_node[kNumInputs]; @@ -3433,9 +3429,9 @@ TEST(RunChangeFloat64ToUint32_spilled) { for (int i = 0; i < kNumInputs; i++) { if (i % 2) { - CHECK_UINT32_EQ(result[i], static_cast(100 + i + 2147483648u)); + CHECK_EQ(result[i], static_cast(100 + i + 2147483648u)); } else { - CHECK_UINT32_EQ(result[i], static_cast(100 + i)); + CHECK_EQ(result[i], static_cast(100 + i)); } } } @@ -3444,7 +3440,7 @@ TEST(RunChangeFloat64ToUint32_spilled) { TEST(RunTruncateFloat64ToFloat32_spilled) { RawMachineAssemblerTester m; const int kNumInputs = 32; - int32_t magic = 0x786234; + uint32_t magic = 0x786234; double input[kNumInputs]; float result[kNumInputs]; Node* input_node[kNumInputs]; @@ -4368,7 +4364,7 @@ TEST(RunTruncateInt64ToInt32P) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { expected = (static_cast(*j) << 32) | *i; - CHECK_UINT32_EQ(expected, m.Call()); + CHECK_EQ(static_cast(expected), m.Call()); } } } @@ -4504,7 +4500,7 @@ TEST(RunTruncateFloat64ToFloat32) { input = *i; volatile double expected = DoubleToFloat32(input); CHECK_EQ(0, m.Call()); - CHECK_EQ(expected, actual); + CheckDoubleEq(expected, actual); } } diff --git a/test/cctest/compiler/value-helper.h b/test/cctest/compiler/value-helper.h index caf1daf3a7..208fa437c2 100644 --- a/test/cctest/compiler/value-helper.h +++ b/test/cctest/compiler/value-helper.h @@ -44,7 +44,7 @@ class ValueHelper { void CheckUint32Constant(int32_t expected, Node* node) { CHECK_EQ(IrOpcode::kInt32Constant, node->opcode()); - CHECK_EQ(expected, OpParameter(node)); + CHECK_EQ(expected, OpParameter(node)); } void CheckHeapConstant(Object* expected, Node* node) { diff --git a/test/cctest/test-accessors.cc b/test/cctest/test-accessors.cc index 5f452ead07..bbb74c0a71 100644 --- a/test/cctest/test-accessors.cc +++ b/test/cctest/test-accessors.cc @@ -150,20 +150,20 @@ static void XGetter(const Info& info, int offset) { ApiTestFuzzer::Fuzz(); v8::Isolate* isolate = CcTest::isolate(); CHECK_EQ(isolate, info.GetIsolate()); - CHECK_EQ(x_receiver, info.This()); + CHECK(x_receiver->Equals(info.This())); info.GetReturnValue().Set(v8_num(x_register[offset])); } static void XGetter(Local name, const v8::PropertyCallbackInfo& info) { - CHECK_EQ(x_holder, info.Holder()); + CHECK(x_holder->Equals(info.Holder())); XGetter(info, 0); } static void XGetter(const v8::FunctionCallbackInfo& info) { - CHECK_EQ(x_receiver, info.Holder()); + CHECK(x_receiver->Equals(info.Holder())); XGetter(info, 1); } @@ -172,8 +172,8 @@ template static void XSetter(Local value, const Info& info, int offset) { v8::Isolate* isolate = CcTest::isolate(); CHECK_EQ(isolate, info.GetIsolate()); - CHECK_EQ(x_holder, info.This()); - CHECK_EQ(x_holder, info.Holder()); + CHECK(x_holder->Equals(info.This())); + CHECK(x_holder->Equals(info.Holder())); x_register[offset] = value->Int32Value(); info.GetReturnValue().Set(v8_num(-1)); } @@ -222,10 +222,10 @@ THREADED_TEST(AccessorIC) { " result.push(obj[key_1]);" "}" "result")); - CHECK_EQ(80, array->Length()); + CHECK_EQ(80u, array->Length()); for (int i = 0; i < 80; i++) { v8::Handle entry = array->Get(v8::Integer::New(isolate, i)); - CHECK_EQ(v8::Integer::New(isolate, i/2), entry); + CHECK(v8::Integer::New(isolate, i / 2)->Equals(entry)); } } @@ -407,7 +407,7 @@ THREADED_TEST(Regress1054726) { "for (var i = 0; i < 5; i++) {" " try { obj.x; } catch (e) { result += e; }" "}; result"))->Run(); - CHECK_EQ(v8_str("ggggg"), result); + CHECK(v8_str("ggggg")->Equals(result)); result = Script::Compile(String::NewFromUtf8( isolate, @@ -415,7 +415,7 @@ THREADED_TEST(Regress1054726) { "for (var i = 0; i < 5; i++) {" " try { obj.x = i; } catch (e) { result += e; }" "}; result"))->Run(); - CHECK_EQ(v8_str("01234"), result); + CHECK(v8_str("01234")->Equals(result)); } diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc index 6306db91fd..a043b36499 100644 --- a/test/cctest/test-api.cc +++ b/test/cctest/test-api.cc @@ -107,8 +107,8 @@ static void IncrementingSignatureCallback( const v8::FunctionCallbackInfo& args) { ApiTestFuzzer::Fuzz(); signature_callback_count++; - CHECK_EQ(signature_expected_receiver, args.Holder()); - CHECK_EQ(signature_expected_receiver, args.This()); + CHECK(signature_expected_receiver->Equals(args.Holder())); + CHECK(signature_expected_receiver->Equals(args.This())); v8::Handle result = v8::Array::New(args.GetIsolate(), args.Length()); for (int i = 0; i < args.Length(); i++) @@ -190,8 +190,8 @@ static void TestSignature(const char* loop_js, Local receiver, if (!expected_to_throw) { CHECK_EQ(10, signature_callback_count); } else { - CHECK_EQ(v8_str("TypeError: Illegal invocation"), - try_catch.Exception()->ToString(isolate)); + CHECK(v8_str("TypeError: Illegal invocation") + ->Equals(try_catch.Exception()->ToString(isolate))); } } @@ -296,7 +296,7 @@ THREADED_TEST(Access) { Local foo_after = obj->Get(v8_str("foo")); CHECK(!foo_after->IsUndefined()); CHECK(foo_after->IsString()); - CHECK_EQ(bar_str, foo_after); + CHECK(bar_str->Equals(foo_after)); } @@ -311,11 +311,11 @@ THREADED_TEST(AccessElement) { Local after = obj->Get(1); CHECK(!after->IsUndefined()); CHECK(after->IsString()); - CHECK_EQ(bar_str, after); + CHECK(bar_str->Equals(after)); Local value = CompileRun("[\"a\", \"b\"]").As(); - CHECK_EQ(v8_str("a"), value->Get(0)); - CHECK_EQ(v8_str("b"), value->Get(1)); + CHECK(v8_str("a")->Equals(value->Get(0))); + CHECK(v8_str("b")->Equals(value->Get(1))); } @@ -459,7 +459,7 @@ THREADED_TEST(ScriptMakingExternalString) { CHECK_EQ(source->IsExternal(), false); CHECK_EQ(source->IsExternalOneByte(), false); String::Encoding encoding = String::UNKNOWN_ENCODING; - CHECK_EQ(NULL, source->GetExternalStringResourceBase(&encoding)); + CHECK(!source->GetExternalStringResourceBase(&encoding)); CHECK_EQ(String::ONE_BYTE_ENCODING, encoding); bool success = source->MakeExternal(new TestResource(two_byte_source, &dispose_count)); @@ -697,7 +697,7 @@ THREADED_TEST(NewExternalForVeryLongString) { CHECK(str.IsEmpty()); CHECK(try_catch.HasCaught()); String::Utf8Value exception_value(try_catch.Exception()); - CHECK_EQ("RangeError: Invalid string length", *exception_value); + CHECK_EQ(0, strcmp("RangeError: Invalid string length", *exception_value)); } { @@ -709,7 +709,7 @@ THREADED_TEST(NewExternalForVeryLongString) { CHECK(str.IsEmpty()); CHECK(try_catch.HasCaught()); String::Utf8Value exception_value(try_catch.Exception()); - CHECK_EQ("RangeError: Invalid string length", *exception_value); + CHECK_EQ(0, strcmp("RangeError: Invalid string length", *exception_value)); } } @@ -1001,7 +1001,7 @@ static void TestFunctionTemplateAccessor(Constructor constructor, Local fun = fun_templ->GetFunction(); env->Global()->Set(v8_str("obj"), fun); Local result = v8_compile("(new obj()).toString()")->Run(); - CHECK_EQ(v8_str("[object funky]"), result); + CHECK(v8_str("[object funky]")->Equals(result)); CompileRun("var obj_instance = new obj();"); Local