[turbofan] Drop V8_TURBOFAN_BACKEND and V8_TURBOFAN_TARGET defines.

TurboFan is now a requirement and supported by all backends, so we don't
need those macros (plus all the machinery on top) anymore.

R=jarin@chromium.org

Review URL: https://codereview.chromium.org/1282763002

Cr-Commit-Position: refs/heads/master@{#30082}
This commit is contained in:
bmeurer 2015-08-10 00:17:16 -07:00 committed by Commit bot
parent 0c674828dc
commit 6db78c8065
21 changed files with 16 additions and 382 deletions

View File

@ -654,61 +654,6 @@ void CodeGenerator::MarkLazyDeoptSite() {
last_lazy_deopt_pc_ = masm()->pc_offset();
}
#if !V8_TURBOFAN_BACKEND
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
UNIMPLEMENTED();
}
void CodeGenerator::AssembleArchBranch(Instruction* instr,
BranchInfo* branch) {
UNIMPLEMENTED();
}
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
FlagsCondition condition) {
UNIMPLEMENTED();
}
void CodeGenerator::AssembleArchJump(RpoNumber target) { UNIMPLEMENTED(); }
void CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
UNIMPLEMENTED();
}
void CodeGenerator::AssemblePrologue() { UNIMPLEMENTED(); }
void CodeGenerator::AssembleReturn() { UNIMPLEMENTED(); }
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
UNIMPLEMENTED();
}
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
UNIMPLEMENTED();
}
void CodeGenerator::AddNopForSmiCodeInlining() { UNIMPLEMENTED(); }
void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
UNIMPLEMENTED();
}
#endif // !V8_TURBOFAN_BACKEND
OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
: masm_(gen->masm()), next_(gen->ools_) {

View File

@ -777,8 +777,6 @@ void InstructionSelector::VisitNode(Node* node) {
}
#if V8_TURBOFAN_BACKEND
void InstructionSelector::VisitLoadStackPointer(Node* node) {
OperandGenerator g(this);
Emit(kArchStackPointer, g.DefineAsRegister(node));
@ -827,10 +825,8 @@ void InstructionSelector::EmitLookupSwitch(const SwitchInfo& sw,
}
#endif // V8_TURBOFAN_BACKEND
// 32 bit targets do not implement the following instructions.
#if !V8_TURBOFAN_BACKEND_64
#if V8_TARGET_ARCH_32_BIT
void InstructionSelector::VisitWord64And(Node* node) { UNIMPLEMENTED(); }
@ -907,7 +903,7 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
UNIMPLEMENTED();
}
#endif // V8_TARGET_ARCH_32_BIT && !V8_TARGET_ARCH_X64 && V8_TURBOFAN_BACKEND
#endif // V8_TARGET_ARCH_32_BIT
void InstructionSelector::VisitFinish(Node* node) {
@ -1124,42 +1120,6 @@ void InstructionSelector::AddFrameStateInputs(
DCHECK(value_index == descriptor->GetSize());
}
#if !V8_TURBOFAN_BACKEND
#define DECLARE_UNIMPLEMENTED_SELECTOR(x) \
void InstructionSelector::Visit##x(Node* node) { UNIMPLEMENTED(); }
MACHINE_OP_LIST(DECLARE_UNIMPLEMENTED_SELECTOR)
#undef DECLARE_UNIMPLEMENTED_SELECTOR
void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitTailCall(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
BasicBlock* fbranch) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
UNIMPLEMENTED();
}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
return MachineOperatorBuilder::Flag::kNoFlags;
}
#endif // !V8_TURBOFAN_BACKEND
} // namespace compiler
} // namespace internal
} // namespace v8

View File

@ -1076,9 +1076,6 @@ Handle<Code> Pipeline::GenerateCode() {
GraphReplayPrinter::PrintReplay(data.graph());
}
// Bailout here in case target architecture is not supported.
if (!SupportedTarget()) return Handle<Code>::null();
base::SmartPointer<Typer> typer;
if (info()->is_typing_enabled()) {
// Type the graph.
@ -1203,7 +1200,6 @@ Handle<Code> Pipeline::ScheduleAndGenerateCode(
PipelineData* data = this->data_;
DCHECK_NOT_NULL(data->graph());
CHECK(SupportedBackend());
if (data->schedule() == nullptr) Run<ComputeSchedulePhase>();
TraceSchedule(data->info(), data->schedule());

View File

@ -46,9 +46,6 @@ class Pipeline {
InstructionSequence* sequence,
bool run_verifier);
static inline bool SupportedBackend() { return V8_TURBOFAN_BACKEND != 0; }
static inline bool SupportedTarget() { return V8_TURBOFAN_TARGET != 0; }
private:
static Handle<Code> GenerateCodeForTesting(CompilationInfo* info,
CallDescriptor* call_descriptor,

View File

@ -30,29 +30,6 @@
# define V8_INFINITY INFINITY
#endif
#if V8_TARGET_ARCH_IA32 || (V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_32_BIT) || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_X87
#define V8_TURBOFAN_BACKEND 1
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS64 || \
V8_TARGET_ARCH_PPC64
// 64-bit TurboFan backends support 64-bit integer arithmetic.
#define V8_TURBOFAN_BACKEND_64 1
#else
#define V8_TURBOFAN_BACKEND_64 0
#endif
#else
#define V8_TURBOFAN_BACKEND 0
#endif
#if V8_TURBOFAN_BACKEND
#define V8_TURBOFAN_TARGET 1
#else
#define V8_TURBOFAN_TARGET 0
#endif
namespace v8 {
namespace base {

View File

@ -368,8 +368,6 @@ void Int32BinopInputShapeTester::RunRight(
}
#if V8_TURBOFAN_TARGET
TEST(ParametersEqual) {
RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
Node* p1 = m.Parameter(1);
@ -572,5 +570,3 @@ TEST(RunBinopTester) {
FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(*i, bt.call(-11.25, *i)); }
}
}
#endif // V8_TURBOFAN_TARGET

View File

@ -20,8 +20,6 @@
#include "src/rewriter.h"
#include "src/scopes.h"
#define USE_CRANKSHAFT 0
namespace v8 {
namespace internal {
namespace compiler {
@ -156,7 +154,6 @@ class FunctionTester : public InitializedHandleScope {
Handle<JSFunction> Compile(Handle<JSFunction> function) {
// TODO(titzer): make this method private.
#if V8_TURBOFAN_TARGET
Zone zone;
ParseInfo parse_info(&zone, function);
CompilationInfo info(&parse_info);
@ -181,19 +178,6 @@ class FunctionTester : public InitializedHandleScope {
CHECK(!code.is_null());
info.context()->native_context()->AddOptimizedCode(*code);
function->ReplaceCode(*code);
#elif USE_CRANKSHAFT
Handle<Code> unoptimized = Handle<Code>(function->code());
Handle<Code> code = Compiler::GetOptimizedCode(function, unoptimized,
Compiler::NOT_CONCURRENT);
CHECK(!code.is_null());
#if ENABLE_DISASSEMBLER
if (FLAG_print_opt_code) {
CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
code->Disassemble("test code", tracing_scope.file());
}
#endif
function->ReplaceCode(*code);
#endif
return function;
}
@ -212,7 +196,6 @@ class FunctionTester : public InitializedHandleScope {
// Compile the given machine graph instead of the source of the function
// and replace the JSFunction's code with the result.
Handle<JSFunction> CompileGraph(Graph* graph) {
CHECK(Pipeline::SupportedTarget());
Zone zone;
ParseInfo parse_info(&zone, function);
CompilationInfo info(&parse_info);

View File

@ -268,7 +268,6 @@ class GraphBuilderTester : public HandleAndZoneScope,
}
virtual byte* Generate() {
if (!Pipeline::SupportedBackend()) return NULL;
if (code_.is_null()) {
Zone* zone = graph()->zone();
CallDescriptor* desc =

View File

@ -8,8 +8,6 @@
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
#if V8_TURBOFAN_TARGET
using namespace v8::internal;
using namespace v8::internal::compiler;
@ -110,5 +108,3 @@ TEST(ProfileLoop) {
m.Expect(arraysize(expected), expected);
}
}
#endif // V8_TURBOFAN_TARGET

View File

@ -8,8 +8,6 @@
#include "test/cctest/compiler/codegen-tester.h"
#include "test/cctest/compiler/value-helper.h"
#if V8_TURBOFAN_TARGET
using namespace v8::internal;
using namespace v8::internal::compiler;
@ -459,4 +457,3 @@ TEST(BranchCombineFloat64Compares) {
}
}
}
#endif // V8_TURBOFAN_TARGET

View File

@ -147,7 +147,6 @@ TEST(RunChangeTaggedToInt32) {
ChangesLoweringTester<int32_t> t(kMachAnyTagged);
t.BuildAndLower(t.simplified()->ChangeTaggedToInt32());
if (Pipeline::SupportedTarget()) {
FOR_INT32_INPUTS(i) {
int32_t input = *i;
@ -167,7 +166,6 @@ TEST(RunChangeTaggedToInt32) {
int32_t result = t.Call(*number);
CHECK_EQ(input, result);
}
}
}
}
@ -177,7 +175,6 @@ TEST(RunChangeTaggedToUint32) {
ChangesLoweringTester<uint32_t> t(kMachAnyTagged);
t.BuildAndLower(t.simplified()->ChangeTaggedToUint32());
if (Pipeline::SupportedTarget()) {
FOR_UINT32_INPUTS(i) {
uint32_t input = *i;
@ -198,7 +195,6 @@ TEST(RunChangeTaggedToUint32) {
CHECK_EQ(static_cast<int32_t>(input), static_cast<int32_t>(result));
}
}
}
}
@ -211,7 +207,7 @@ TEST(RunChangeTaggedToFloat64) {
t.machine()->Store(StoreRepresentation(kMachFloat64, kNoWriteBarrier)),
&result);
if (Pipeline::SupportedTarget()) {
{
FOR_INT32_INPUTS(i) {
int32_t input = *i;
@ -234,7 +230,7 @@ TEST(RunChangeTaggedToFloat64) {
}
}
if (Pipeline::SupportedTarget()) {
{
FOR_FLOAT64_INPUTS(i) {
double input = *i;
{
@ -257,13 +253,13 @@ TEST(RunChangeBoolToBit) {
ChangesLoweringTester<int32_t> t(kMachAnyTagged);
t.BuildAndLower(t.simplified()->ChangeBoolToBit());
if (Pipeline::SupportedTarget()) {
{
Object* true_obj = t.heap()->true_value();
int32_t result = t.Call(true_obj);
CHECK_EQ(1, result);
}
if (Pipeline::SupportedTarget()) {
{
Object* false_obj = t.heap()->false_value();
int32_t result = t.Call(false_obj);
CHECK_EQ(0, result);
@ -275,122 +271,15 @@ TEST(RunChangeBitToBool) {
ChangesLoweringTester<Object*> t(kMachInt32);
t.BuildAndLower(t.simplified()->ChangeBitToBool());
if (Pipeline::SupportedTarget()) {
{
Object* result = t.Call(1);
Object* true_obj = t.heap()->true_value();
CHECK_EQ(true_obj, result);
}
if (Pipeline::SupportedTarget()) {
{
Object* result = t.Call(0);
Object* false_obj = t.heap()->false_value();
CHECK_EQ(false_obj, result);
}
}
#if V8_TURBOFAN_BACKEND
// TODO(titzer): disabled on ARM
TEST(RunChangeInt32ToTaggedSmi) {
ChangesLoweringTester<Object*> t;
int32_t input;
t.BuildLoadAndLower(t.simplified()->ChangeInt32ToTagged(),
t.machine()->Load(kMachInt32), &input);
if (Pipeline::SupportedTarget()) {
FOR_INT32_INPUTS(i) {
input = *i;
if (!Smi::IsValid(input)) continue;
Object* result = t.Call();
t.CheckNumber(static_cast<double>(input), result);
}
}
}
TEST(RunChangeUint32ToTaggedSmi) {
ChangesLoweringTester<Object*> t;
uint32_t input;
t.BuildLoadAndLower(t.simplified()->ChangeUint32ToTagged(),
t.machine()->Load(kMachUint32), &input);
if (Pipeline::SupportedTarget()) {
FOR_UINT32_INPUTS(i) {
input = *i;
if (input > static_cast<uint32_t>(Smi::kMaxValue)) continue;
Object* result = t.Call();
double expected = static_cast<double>(input);
t.CheckNumber(expected, result);
}
}
}
TEST(RunChangeInt32ToTagged) {
ChangesLoweringTester<Object*> t;
int32_t input;
t.BuildLoadAndLower(t.simplified()->ChangeInt32ToTagged(),
t.machine()->Load(kMachInt32), &input);
if (Pipeline::SupportedTarget()) {
for (int m = 0; m < 3; m++) { // Try 3 GC modes.
FOR_INT32_INPUTS(i) {
if (m == 0) CcTest::heap()->EnableInlineAllocation();
if (m == 1) CcTest::heap()->DisableInlineAllocation();
if (m == 2) SimulateFullSpace(CcTest::heap()->new_space());
input = *i;
Object* result = t.CallWithPotentialGC<Object>();
t.CheckNumber(static_cast<double>(input), result);
}
}
}
}
TEST(RunChangeUint32ToTagged) {
ChangesLoweringTester<Object*> t;
uint32_t input;
t.BuildLoadAndLower(t.simplified()->ChangeUint32ToTagged(),
t.machine()->Load(kMachUint32), &input);
if (Pipeline::SupportedTarget()) {
for (int m = 0; m < 3; m++) { // Try 3 GC modes.
FOR_UINT32_INPUTS(i) {
if (m == 0) CcTest::heap()->EnableInlineAllocation();
if (m == 1) CcTest::heap()->DisableInlineAllocation();
if (m == 2) SimulateFullSpace(CcTest::heap()->new_space());
input = *i;
Object* result = t.CallWithPotentialGC<Object>();
double expected = static_cast<double>(input);
t.CheckNumber(expected, result);
}
}
}
}
TEST(RunChangeFloat64ToTagged) {
ChangesLoweringTester<Object*> t;
double input;
t.BuildLoadAndLower(t.simplified()->ChangeFloat64ToTagged(),
t.machine()->Load(kMachFloat64), &input);
if (Pipeline::SupportedTarget()) {
for (int m = 0; m < 3; m++) { // Try 3 GC modes.
FOR_FLOAT64_INPUTS(i) {
if (m == 0) CcTest::heap()->EnableInlineAllocation();
if (m == 1) CcTest::heap()->DisableInlineAllocation();
if (m == 2) SimulateFullSpace(CcTest::heap()->new_space());
input = *i;
Object* result = t.CallWithPotentialGC<Object>();
t.CheckNumber(input, result);
}
}
}
}
#endif // V8_TURBOFAN_BACKEND

View File

@ -19,8 +19,6 @@
#include "src/compiler/schedule.h"
#include "test/cctest/cctest.h"
#if V8_TURBOFAN_TARGET
using namespace v8::internal;
using namespace v8::internal::compiler;
@ -113,5 +111,3 @@ TEST(TestLinkageRuntimeCall) {
TEST(TestLinkageStubCall) {
// TODO(titzer): test linkage creation for outgoing stub calls.
}
#endif // V8_TURBOFAN_TARGET

View File

@ -21,13 +21,8 @@ static void RunPipeline(Zone* zone, const char* source) {
CompilationInfo info(&parse_info);
Pipeline pipeline(&info);
#if V8_TURBOFAN_TARGET
Handle<Code> code = pipeline.GenerateCode();
CHECK(Pipeline::SupportedTarget());
CHECK(!code.is_null());
#else
USE(pipeline);
#endif
}

View File

@ -10,8 +10,6 @@
using namespace v8::internal;
using namespace v8::internal::compiler;
#if V8_TURBOFAN_TARGET
static void IsOptimized(const v8::FunctionCallbackInfo<v8::Value>& args) {
JavaScriptFrameIterator it(CcTest::i_isolate());
JavaScriptFrame* frame = it.frame();
@ -103,7 +101,6 @@ TEST(DeoptExceptionHandlerFinally) {
#endif
}
#endif
TEST(DeoptTrivial) {
FLAG_allow_natives_syntax = true;

View File

@ -6,8 +6,6 @@
#include "test/cctest/compiler/function-tester.h"
#if V8_TURBOFAN_TARGET
using namespace v8::internal;
using namespace v8::internal::compiler;
@ -574,5 +572,3 @@ TEST(InlineMutuallyRecursive) {
InstallAssertInlineCountHelper(CcTest::isolate());
T.CheckCall(T.Val(42), T.Val(1));
}
#endif // V8_TURBOFAN_TARGET

View File

@ -13,8 +13,6 @@
#include "test/cctest/compiler/codegen-tester.h"
#include "test/cctest/compiler/value-helper.h"
#if V8_TURBOFAN_TARGET
using namespace v8::base;
using namespace v8::internal;
using namespace v8::internal::compiler;
@ -89,7 +87,7 @@ TEST(CodeGenNop) {
}
#if V8_TURBOFAN_BACKEND_64
#if V8_TARGET_ARCH_64_BIT
static Node* Int64Input(RawMachineAssemblerTester<int64_t>* m, int index) {
switch (index) {
case 0:
@ -143,7 +141,7 @@ TEST(CodeGenInt64Binop) {
// TODO(titzer): add tests that run 64-bit integer operations.
#endif // V8_TURBOFAN_BACKEND_64
#endif // V8_TARGET_ARCH_64_BIT
TEST(RunGoto) {
@ -5281,5 +5279,3 @@ TEST(RunCallCFunction8) {
}
#endif // USE_SIMULATOR
#endif // V8_TURBOFAN_TARGET

View File

@ -14,8 +14,6 @@
#include "src/parser.h"
#include "test/cctest/compiler/function-tester.h"
#if V8_TURBOFAN_TARGET
using namespace v8::internal;
using namespace v8::internal::compiler;
@ -146,5 +144,3 @@ TEST(RunStringAddTFStub) {
Handle<Object> result = ft.Call(leftArg, rightArg).ToHandleChecked();
CHECK(String::Equals(ft.Val("linksrechts"), Handle<String>::cast(result)));
}
#endif // V8_TURBOFAN_TARGET

View File

@ -110,14 +110,12 @@ TEST(RunNumberToInt32_float64) {
t.LowerAllNodes();
t.GenerateCode();
if (Pipeline::SupportedTarget()) {
FOR_FLOAT64_INPUTS(i) {
input = *i;
int32_t expected = DoubleToInt32(*i);
t.Call();
CHECK_EQ(expected, result);
}
}
}
@ -139,7 +137,6 @@ TEST(RunNumberToUint32_float64) {
t.LowerAllNodes();
t.GenerateCode();
if (Pipeline::SupportedTarget()) {
FOR_FLOAT64_INPUTS(i) {
input = *i;
uint32_t expected = DoubleToUint32(*i);
@ -147,7 +144,6 @@ TEST(RunNumberToUint32_float64) {
CHECK_EQ(static_cast<int32_t>(expected), static_cast<int32_t>(result));
}
}
}
// Create a simple JSObject with a unique map.
@ -168,12 +164,10 @@ TEST(RunLoadMap) {
t.LowerAllNodes();
t.GenerateCode();
if (Pipeline::SupportedTarget()) {
Handle<JSObject> src = TestObject();
Handle<Map> src_map(src->map());
Object* result = t.Call(*src); // TODO(titzer): raw pointers in call
CHECK_EQ(*src_map, result);
}
Handle<JSObject> src = TestObject();
Handle<Map> src_map(src->map());
Object* result = t.Call(*src); // TODO(titzer): raw pointers in call
CHECK_EQ(*src_map, result);
}
@ -186,7 +180,6 @@ TEST(RunStoreMap) {
t.LowerAllNodes();
t.GenerateCode();
if (Pipeline::SupportedTarget()) {
Handle<JSObject> src = TestObject();
Handle<Map> src_map(src->map());
Handle<JSObject> dst = TestObject();
@ -194,7 +187,6 @@ TEST(RunStoreMap) {
t.Call(*src_map, *dst); // TODO(titzer): raw pointers in call
CHECK(*src_map == dst->map());
}
}
TEST(RunLoadProperties) {
@ -206,12 +198,10 @@ TEST(RunLoadProperties) {
t.LowerAllNodes();
t.GenerateCode();
if (Pipeline::SupportedTarget()) {
Handle<JSObject> src = TestObject();
Handle<FixedArray> src_props(src->properties());
Object* result = t.Call(*src); // TODO(titzer): raw pointers in call
CHECK_EQ(*src_props, result);
}
}
@ -225,7 +215,6 @@ TEST(RunLoadStoreMap) {
t.LowerAllNodes();
t.GenerateCode();
if (Pipeline::SupportedTarget()) {
Handle<JSObject> src = TestObject();
Handle<Map> src_map(src->map());
Handle<JSObject> dst = TestObject();
@ -234,7 +223,6 @@ TEST(RunLoadStoreMap) {
CHECK(result->IsMap());
CHECK_EQ(*src_map, result);
CHECK(*src_map == dst->map());
}
}
@ -248,7 +236,6 @@ TEST(RunLoadStoreFixedArrayIndex) {
t.LowerAllNodes();
t.GenerateCode();
if (Pipeline::SupportedTarget()) {
Handle<FixedArray> array = t.factory()->NewFixedArray(2);
Handle<JSObject> src = TestObject();
Handle<JSObject> dst = TestObject();
@ -258,7 +245,6 @@ TEST(RunLoadStoreFixedArrayIndex) {
CHECK_EQ(*src, result);
CHECK_EQ(*src, array->get(0));
CHECK_EQ(*src, array->get(1));
}
}
@ -279,7 +265,6 @@ TEST(RunLoadStoreArrayBuffer) {
t.LowerAllNodes();
t.GenerateCode();
if (Pipeline::SupportedTarget()) {
Handle<JSArrayBuffer> array = t.factory()->NewJSArrayBuffer();
Runtime::SetupArrayBufferAllocatingData(t.isolate(), array, array_length);
uint8_t* data = reinterpret_cast<uint8_t*>(array->backing_store());
@ -296,7 +281,6 @@ TEST(RunLoadStoreArrayBuffer) {
CHECK_EQ(data[i], expected);
}
}
}
TEST(RunLoadFieldFromUntaggedBase) {
@ -312,8 +296,6 @@ TEST(RunLoadFieldFromUntaggedBase) {
t.Return(load);
t.LowerAllNodes();
if (!Pipeline::SupportedTarget()) continue;
for (int j = -5; j <= 5; j++) {
Smi* expected = Smi::FromInt(j);
smis[i] = expected;
@ -337,8 +319,6 @@ TEST(RunStoreFieldToUntaggedBase) {
t.Return(p0);
t.LowerAllNodes();
if (!Pipeline::SupportedTarget()) continue;
for (int j = -5; j <= 5; j++) {
Smi* expected = Smi::FromInt(j);
smis[i] = Smi::FromInt(-100);
@ -365,8 +345,6 @@ TEST(RunLoadElementFromUntaggedBase) {
t.Return(load);
t.LowerAllNodes();
if (!Pipeline::SupportedTarget()) continue;
for (int k = -5; k <= 5; k++) {
Smi* expected = Smi::FromInt(k);
smis[i + j] = expected;
@ -394,8 +372,6 @@ TEST(RunStoreElementFromUntaggedBase) {
t.Return(p0);
t.LowerAllNodes();
if (!Pipeline::SupportedTarget()) continue;
for (int k = -5; k <= 5; k++) {
Smi* expected = Smi::FromInt(k);
smis[i + j] = Smi::FromInt(-100);
@ -462,10 +438,8 @@ class AccessTester : public HandleAndZoneScope {
t.LowerAllNodes();
t.GenerateCode();
if (Pipeline::SupportedTarget()) {
Object* result = t.Call();
CHECK_EQ(t.isolate()->heap()->true_value(), result);
}
}
// Create and run code that copies the field in either {untagged_array}
@ -484,10 +458,8 @@ class AccessTester : public HandleAndZoneScope {
t.LowerAllNodes();
t.GenerateCode();
if (Pipeline::SupportedTarget()) {
Object* result = t.Call();
CHECK_EQ(t.isolate()->heap()->true_value(), result);
}
}
// Create and run code that copies the elements from {this} to {that}.
@ -525,10 +497,8 @@ class AccessTester : public HandleAndZoneScope {
t.LowerAllNodes();
t.GenerateCode();
if (Pipeline::SupportedTarget()) {
Object* result = t.Call();
CHECK_EQ(t.isolate()->heap()->true_value(), result);
}
#endif
}
@ -596,13 +566,11 @@ static void RunAccessTest(MachineType rep, E* original_elements, size_t num) {
} else {
a.RunCopyElement(i, i + 1); // Test element read/write.
}
if (Pipeline::SupportedTarget()) { // verify.
for (int j = 0; j < num_elements; j++) {
E expect =
j == (i + 1) ? original_elements[i] : original_elements[j];
CHECK_EQ(expect, a.GetElement(j));
}
}
}
}
}
@ -612,10 +580,8 @@ static void RunAccessTest(MachineType rep, E* original_elements, size_t num) {
AccessTester<E> a(tf == 1, rep, original_elements, num);
AccessTester<E> b(tt == 1, rep, original_elements, num);
a.RunCopyElements(&b);
if (Pipeline::SupportedTarget()) { // verify.
for (int i = 0; i < num_elements; i++) {
CHECK_EQ(a.GetElement(i), b.GetElement(i));
}
}
}
}
@ -668,7 +634,7 @@ TEST(RunAccessTests_Smi) {
RunAccessTest<Smi*>(kMachAnyTagged, data, arraysize(data));
}
#if V8_TURBOFAN_TARGET
TEST(RunAllocate) {
PretenureFlag flag[] = {NOT_TENURED, TENURED};
@ -684,15 +650,13 @@ TEST(RunAllocate) {
t.LowerAllNodes();
t.GenerateCode();
if (Pipeline::SupportedTarget()) {
HeapObject* result = t.CallWithPotentialGC<HeapObject>();
CHECK(t.heap()->new_space()->Contains(result) || flag[i] == TENURED);
CHECK(t.heap()->old_space()->Contains(result) || flag[i] == NOT_TENURED);
CHECK(result->IsHeapNumber());
}
}
}
#endif
// Fills in most of the nodes of the graph in order to make tests shorter.
class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
@ -1264,7 +1228,6 @@ TEST(LowerReferenceEqual_to_wordeq) {
TEST(LowerStringOps_to_call_and_compare) {
if (Pipeline::SupportedTarget()) {
// These tests need linkage for the calls.
TestingGraph t(Type::String(), Type::String());
IrOpcode::Value compare_eq =
@ -1277,7 +1240,6 @@ TEST(LowerStringOps_to_call_and_compare) {
t.CheckLoweringBinop(compare_lt, t.simplified()->StringLessThan());
t.CheckLoweringBinop(compare_le, t.simplified()->StringLessThanOrEqual());
}
}
void CheckChangeInsertion(IrOpcode::Value expected, MachineType from,
@ -1708,7 +1670,6 @@ TEST(RunNumberDivide_minus_1_TruncatingToInt32) {
Node* trunc = t.NumberToInt32(div);
t.Return(trunc);
if (Pipeline::SupportedTarget()) {
t.LowerAllNodesAndLowerChanges();
t.GenerateCode();
@ -1716,7 +1677,6 @@ TEST(RunNumberDivide_minus_1_TruncatingToInt32) {
int32_t x = 0 - *i;
t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
}
}
}
@ -1747,7 +1707,6 @@ TEST(RunNumberMultiply_TruncatingToInt32) {
Node* trunc = t.NumberToInt32(mul);
t.Return(trunc);
if (Pipeline::SupportedTarget()) {
t.LowerAllNodesAndLowerChanges();
t.GenerateCode();
@ -1756,7 +1715,6 @@ TEST(RunNumberMultiply_TruncatingToInt32) {
t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
}
}
}
}
@ -1771,14 +1729,12 @@ TEST(RunNumberMultiply_TruncatingToUint32) {
Node* trunc = t.NumberToUint32(mul);
t.Return(trunc);
if (Pipeline::SupportedTarget()) {
t.LowerAllNodesAndLowerChanges();
t.GenerateCode();
FOR_UINT32_INPUTS(i) {
uint32_t x = DoubleToUint32(static_cast<double>(*i) * k);
t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
}
}
}
}
@ -1791,7 +1747,6 @@ TEST(RunNumberDivide_2_TruncatingToUint32) {
Node* trunc = t.NumberToUint32(div);
t.Return(trunc);
if (Pipeline::SupportedTarget()) {
t.LowerAllNodesAndLowerChanges();
t.GenerateCode();
@ -1799,7 +1754,6 @@ TEST(RunNumberDivide_2_TruncatingToUint32) {
uint32_t x = DoubleToUint32(static_cast<double>(*i / 2.0));
t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
}
}
}
@ -1853,7 +1807,6 @@ TEST(RunNumberDivide_TruncatingToInt32) {
Node* trunc = t.NumberToInt32(div);
t.Return(trunc);
if (Pipeline::SupportedTarget()) {
t.LowerAllNodesAndLowerChanges();
t.GenerateCode();
@ -1861,7 +1814,6 @@ TEST(RunNumberDivide_TruncatingToInt32) {
if (*i == INT_MAX) continue; // exclude max int.
int32_t x = DoubleToInt32(static_cast<double>(*i) / k);
t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
}
}
}
}
@ -1894,14 +1846,12 @@ TEST(RunNumberDivide_TruncatingToUint32) {
Node* trunc = t.NumberToUint32(div);
t.Return(trunc);
if (Pipeline::SupportedTarget()) {
t.LowerAllNodesAndLowerChanges();
t.GenerateCode();
FOR_UINT32_INPUTS(i) {
uint32_t x = *i / k;
t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
}
}
}
}
@ -1972,7 +1922,6 @@ TEST(RunNumberModulus_TruncatingToInt32) {
Node* trunc = t.NumberToInt32(mod);
t.Return(trunc);
if (Pipeline::SupportedTarget()) {
t.LowerAllNodesAndLowerChanges();
t.GenerateCode();
@ -1980,7 +1929,6 @@ TEST(RunNumberModulus_TruncatingToInt32) {
if (*i == INT_MAX) continue; // exclude max int.
int32_t x = DoubleToInt32(std::fmod(static_cast<double>(*i), k));
t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
}
}
}
}
@ -2014,14 +1962,12 @@ TEST(RunNumberModulus_TruncatingToUint32) {
Node* trunc = t.NumberToUint32(mod);
t.Return(trunc);
if (Pipeline::SupportedTarget()) {
t.LowerAllNodesAndLowerChanges();
t.GenerateCode();
FOR_UINT32_INPUTS(i) {
uint32_t x = *i % k;
t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
}
}
}
}

View File

@ -21178,11 +21178,7 @@ TEST(TurboAsmDisablesNeuter) {
v8::V8::Initialize();
v8::HandleScope scope(CcTest::isolate());
LocalContext context;
#if V8_TURBOFAN_TARGET
bool should_be_neuterable = !i::FLAG_turbo_asm;
#else
bool should_be_neuterable = true;
#endif
const char* load =
"function Module(stdlib, foreign, heap) {"
" 'use asm';"

View File

@ -14,41 +14,25 @@ namespace compiler {
// The TARGET_TEST(Case, Name) macro works just like
// TEST(Case, Name), except that the test is disabled
// if the platform is not a supported TurboFan target.
#if V8_TURBOFAN_TARGET
#define TARGET_TEST(Case, Name) TEST(Case, Name)
#else
#define TARGET_TEST(Case, Name) TEST(Case, DISABLED_##Name)
#endif
// The TARGET_TEST_F(Case, Name) macro works just like
// TEST_F(Case, Name), except that the test is disabled
// if the platform is not a supported TurboFan target.
#if V8_TURBOFAN_TARGET
#define TARGET_TEST_F(Case, Name) TEST_F(Case, Name)
#else
#define TARGET_TEST_F(Case, Name) TEST_F(Case, DISABLED_##Name)
#endif
// The TARGET_TEST_P(Case, Name) macro works just like
// TEST_P(Case, Name), except that the test is disabled
// if the platform is not a supported TurboFan target.
#if V8_TURBOFAN_TARGET
#define TARGET_TEST_P(Case, Name) TEST_P(Case, Name)
#else
#define TARGET_TEST_P(Case, Name) TEST_P(Case, DISABLED_##Name)
#endif
// The TARGET_TYPED_TEST(Case, Name) macro works just like
// TYPED_TEST(Case, Name), except that the test is disabled
// if the platform is not a supported TurboFan target.
#if V8_TURBOFAN_TARGET
#define TARGET_TYPED_TEST(Case, Name) TYPED_TEST(Case, Name)
#else
#define TARGET_TYPED_TEST(Case, Name) TYPED_TEST(Case, DISABLED_##Name)
#endif
} // namespace compiler
} // namespace internal

View File

@ -1004,7 +1004,6 @@ TEST_F(JSTypedLoweringTest, JSLoadDynamicContext) {
}
}
#if V8_TURBOFAN_TARGET
// -----------------------------------------------------------------------------
// JSAdd
@ -1104,8 +1103,6 @@ TEST_F(JSTypedLoweringTest, JSCreateLiteralObject) {
input0, input1, input2, _, context, frame_state, effect, control));
}
#endif // V8_TURBOFAN_TARGET
// -----------------------------------------------------------------------------
// JSCreateWithContext