2014-02-17 15:09:46 +00:00
|
|
|
// Copyright 2013 the V8 project authors. All rights reserved.
|
|
|
|
// Rrdistribution and use in source and binary forms, with or without
|
|
|
|
// modification, are permitted provided that the following conditions are
|
|
|
|
// met:
|
|
|
|
//
|
|
|
|
// * Rrdistributions of source code must retain the above copyright
|
|
|
|
// notice, this list of conditions and the following disclaimer.
|
|
|
|
// * Rrdistributions in binary form must reproduce the above
|
|
|
|
// copyright notice, this list of conditions and the following
|
|
|
|
// disclaimer in the documentation and/or other materials provided
|
|
|
|
// with the distribution.
|
|
|
|
// * Neither the name of Google Inc. nor the names of its
|
|
|
|
// contributors may be used to endorse or promote products derived
|
|
|
|
// from this software without specific prior written permission.
|
|
|
|
//
|
|
|
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
|
|
|
|
#include <stdlib.h>
|
|
|
|
|
2014-06-03 08:12:43 +00:00
|
|
|
#include "src/v8.h"
|
2014-02-17 15:09:46 +00:00
|
|
|
|
2017-03-15 11:38:48 +00:00
|
|
|
#include "src/arm64/macro-assembler-arm64-inl.h"
|
2014-06-30 13:25:46 +00:00
|
|
|
#include "src/base/platform/platform.h"
|
2014-06-03 08:12:43 +00:00
|
|
|
#include "src/code-stubs.h"
|
2018-04-09 19:11:22 +00:00
|
|
|
#include "src/heap/factory.h"
|
2014-06-03 08:12:43 +00:00
|
|
|
#include "src/macro-assembler.h"
|
|
|
|
#include "src/simulator.h"
|
|
|
|
#include "test/cctest/cctest.h"
|
|
|
|
#include "test/cctest/test-code-stubs.h"
|
Reland "[turboassembler] Introduce hard-abort mode"
This is a reland of a462a7854a081f4f34bb4c112ee33f3d69efa309
Original change's description:
> [turboassembler] Introduce hard-abort mode
>
> For checks and assertions (mostly for debug code, like stack alignment
> or zero extension), we had two modes: Emit a call to the {Abort}
> runtime function (the default), and emit a debug break (used for
> testing, enabled via --trap-on-abort).
> In wasm, where we cannot just call a runtime function because code must
> be isolate independent, we always used the trap-on-abort behaviour.
> This causes problems for our fuzzers, which do not catch SIGTRAP, and
> hence do not detect debug code failures.
>
> This CL introduces a third mode ("hard abort"), which calls a C
> function via {ExternalReference}. The C function still outputs the
> abort reason, but does not print the stack trace. It then aborts via
> "OS::Abort", just like the runtime function.
> This will allow fuzzers to detect the crash and even find a nice error
> message.
>
> Even though this looks like a lot of code churn, it is actually not.
> Most added lines are new tests, and other changes are minimal.
>
> R=mstarzinger@chromium.org
>
> Bug: chromium:863799
> Change-Id: I77c58ff72db552d49014614436259ccfb49ba87b
> Reviewed-on: https://chromium-review.googlesource.com/1142163
> Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
> Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#54592}
Bug: chromium:863799
Change-Id: I7729a47b4823a982a8e201df36520aa2b6ef5326
Reviewed-on: https://chromium-review.googlesource.com/1146100
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#54656}
2018-07-24 14:12:47 +00:00
|
|
|
#include "test/common/assembler-tester.h"
|
2014-02-17 15:09:46 +00:00
|
|
|
|
2017-08-31 12:34:55 +00:00
|
|
|
namespace v8 {
|
|
|
|
namespace internal {
|
2014-02-17 15:09:46 +00:00
|
|
|
|
|
|
|
#define __ masm.
|
|
|
|
|
|
|
|
ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
|
Simplify DoubleToI stub.
The DoubleToI stub is no longer called outside of TurboFan, and always in the
same way:
- The parameter is on top of the stack.
- The stub is always called in a slow path.
- It truncates.
Therefore, we can simplify it to only support this case and remove dead
code.
On top of this, since the stub is always considered to be on a slow path for all
backends, this patch takes the opportunity to remove the `skip_fastpath`
optimisation. This would generate a stub which does not handle all inputs,
assuming that the backend already handled some of the inputs in a fast
path. Removing this allows the stub to have the same behaviour on all targets.
On Arm, this patch reworks the stub a little. We could use ip instead of saving
and restoring a register on the stack. Also, comments would mention that we
assume the exponent to be greater than 31 when the it can be 30 or higher. As
done for Arm64, let's check this at runtime in debug mode.
On Arm64, we can also implement the stub without pushing and poping off the
stack. It needs 2 general purpose and a double scratch registers which we have
reserved already (ip0, ip1 and d30). This removes the need to check that the
stack pointer is always 16-bytes aligned.
Finally, this also fixes a potential bug on Arm64, in the
`GetAllocatableRegisterThatIsNotOneOf` method which is now removed. We were
picking an allocatable double register when we meant to pick a general one.
Bug: v8:6644
Change-Id: I88d4597f377c9fc05432d5922a0d7129b6d19b47
Reviewed-on: https://chromium-review.googlesource.com/720963
Reviewed-by: Benedikt Meurer <bmeurer@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Commit-Queue: Pierre Langlois <pierre.langlois@arm.com>
Cr-Commit-Position: refs/heads/master@{#48671}
2017-10-16 12:13:53 +00:00
|
|
|
Register destination_reg) {
|
2014-02-17 15:09:46 +00:00
|
|
|
HandleScope handles(isolate);
|
2017-11-14 15:55:09 +00:00
|
|
|
|
|
|
|
size_t allocated;
|
|
|
|
byte* buffer =
|
|
|
|
AllocateAssemblerBuffer(&allocated, 4 * Assembler::kMinimalBufferSize);
|
|
|
|
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
|
2015-11-25 14:23:37 +00:00
|
|
|
v8::internal::CodeObjectRequired::kYes);
|
2017-11-14 15:55:09 +00:00
|
|
|
|
2018-04-13 11:21:09 +00:00
|
|
|
Handle<Code> code = BUILTIN_CODE(isolate, DoubleToI);
|
2018-04-13 22:28:05 +00:00
|
|
|
Address start = code->InstructionStart();
|
2014-02-17 15:09:46 +00:00
|
|
|
|
|
|
|
__ PushCalleeSavedRegisters();
|
|
|
|
|
2014-02-19 09:43:45 +00:00
|
|
|
MacroAssembler::PushPopQueue queue(&masm);
|
2014-02-17 15:09:46 +00:00
|
|
|
|
|
|
|
// Save registers make sure they don't get clobbered.
|
|
|
|
int source_reg_offset = kDoubleSize;
|
|
|
|
int reg_num = 0;
|
2015-10-02 16:55:12 +00:00
|
|
|
for (; reg_num < Register::kNumRegisters; ++reg_num) {
|
2017-08-02 13:36:27 +00:00
|
|
|
if (RegisterConfiguration::Default()->IsAllocatableGeneralCode(reg_num)) {
|
2016-06-27 15:29:51 +00:00
|
|
|
Register reg = Register::from_code(reg_num);
|
2017-12-05 11:02:18 +00:00
|
|
|
queue.Queue(reg);
|
|
|
|
source_reg_offset += kPointerSize;
|
2014-02-17 15:09:46 +00:00
|
|
|
}
|
|
|
|
}
|
2017-12-05 11:02:18 +00:00
|
|
|
// Push the double argument. We push a second copy to maintain sp alignment.
|
|
|
|
queue.Queue(d0);
|
2014-02-19 09:43:45 +00:00
|
|
|
queue.Queue(d0);
|
|
|
|
|
|
|
|
queue.PushQueued();
|
2014-02-17 15:09:46 +00:00
|
|
|
|
2017-12-05 11:02:18 +00:00
|
|
|
// Call through to the actual stub.
|
2018-04-30 13:42:39 +00:00
|
|
|
__ IndirectCall(start, RelocInfo::CODE_TARGET);
|
2018-04-12 15:14:56 +00:00
|
|
|
__ Peek(destination_reg, 0);
|
2014-02-17 15:09:46 +00:00
|
|
|
|
2017-12-05 11:02:18 +00:00
|
|
|
__ Drop(2, kDoubleSize);
|
|
|
|
|
|
|
|
// Make sure no registers have been unexpectedly clobbered.
|
|
|
|
{
|
2018-01-15 13:32:54 +00:00
|
|
|
const RegisterConfiguration* config(RegisterConfiguration::Default());
|
|
|
|
int allocatable_register_count =
|
|
|
|
config->num_allocatable_general_registers();
|
2017-12-05 11:02:18 +00:00
|
|
|
UseScratchRegisterScope temps(&masm);
|
|
|
|
Register temp0 = temps.AcquireX();
|
|
|
|
Register temp1 = temps.AcquireX();
|
2018-01-15 13:32:54 +00:00
|
|
|
for (int i = allocatable_register_count - 1; i > 0; i -= 2) {
|
|
|
|
int code0 = config->GetAllocatableGeneralCode(i);
|
|
|
|
int code1 = config->GetAllocatableGeneralCode(i - 1);
|
|
|
|
Register reg0 = Register::from_code(code0);
|
|
|
|
Register reg1 = Register::from_code(code1);
|
|
|
|
__ Pop(temp0, temp1);
|
|
|
|
if (!reg0.is(destination_reg)) {
|
|
|
|
__ Cmp(reg0, temp0);
|
|
|
|
__ Assert(eq, AbortReason::kRegisterWasClobbered);
|
|
|
|
}
|
|
|
|
if (!reg1.is(destination_reg)) {
|
|
|
|
__ Cmp(reg1, temp1);
|
|
|
|
__ Assert(eq, AbortReason::kRegisterWasClobbered);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (allocatable_register_count % 2 != 0) {
|
|
|
|
int code = config->GetAllocatableGeneralCode(0);
|
|
|
|
Register reg = Register::from_code(code);
|
|
|
|
__ Pop(temp0, xzr);
|
|
|
|
if (!reg.is(destination_reg)) {
|
|
|
|
__ Cmp(reg, temp0);
|
|
|
|
__ Assert(eq, AbortReason::kRegisterWasClobbered);
|
2015-10-02 16:55:12 +00:00
|
|
|
}
|
2014-02-17 15:09:46 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!destination_reg.is(x0))
|
|
|
|
__ Mov(x0, destination_reg);
|
|
|
|
|
|
|
|
// Restore callee save registers.
|
|
|
|
__ PopCalleeSavedRegisters();
|
|
|
|
|
|
|
|
__ Ret();
|
|
|
|
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
masm.GetCode(isolate, &desc);
|
2017-12-06 16:57:55 +00:00
|
|
|
MakeAssemblerBufferExecutable(buffer, allocated);
|
2018-02-08 17:33:57 +00:00
|
|
|
Assembler::FlushICache(buffer, allocated);
|
2014-02-17 15:09:46 +00:00
|
|
|
return (reinterpret_cast<ConvertDToIFunc>(
|
|
|
|
reinterpret_cast<intptr_t>(buffer)));
|
|
|
|
}
|
|
|
|
|
|
|
|
#undef __
|
|
|
|
|
|
|
|
|
|
|
|
static Isolate* GetIsolateFrom(LocalContext* context) {
|
|
|
|
return reinterpret_cast<Isolate*>((*context)->GetIsolate());
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int32_t RunGeneratedCodeCallWrapper(ConvertDToIFunc func,
|
|
|
|
double from) {
|
|
|
|
#ifdef USE_SIMULATOR
|
2018-01-08 15:31:30 +00:00
|
|
|
return Simulator::current(CcTest::i_isolate())
|
|
|
|
->Call<int32_t>(FUNCTION_ADDR(func), from);
|
2014-02-17 15:09:46 +00:00
|
|
|
#else
|
|
|
|
return (*func)(from);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
TEST(ConvertDToI) {
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
LocalContext context;
|
|
|
|
Isolate* isolate = GetIsolateFrom(&context);
|
|
|
|
HandleScope scope(isolate);
|
|
|
|
|
|
|
|
#if DEBUG
|
|
|
|
// Verify that the tests actually work with the C version. In the release
|
|
|
|
// code, the compiler optimizes it away because it's all constant, but does it
|
|
|
|
// wrong, triggering an assert on gcc.
|
|
|
|
RunAllTruncationTests(&ConvertDToICVersion);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
Register dest_registers[] = {x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11,
|
|
|
|
x12, x13, x14, x15, x18, x19, x20, x21, x22, x23,
|
|
|
|
x24};
|
|
|
|
|
Simplify DoubleToI stub.
The DoubleToI stub is no longer called outside of TurboFan, and always in the
same way:
- The parameter is on top of the stack.
- The stub is always called in a slow path.
- It truncates.
Therefore, we can simplify it to only support this case and remove dead
code.
On top of this, since the stub is always considered to be on a slow path for all
backends, this patch takes the opportunity to remove the `skip_fastpath`
optimisation. This would generate a stub which does not handle all inputs,
assuming that the backend already handled some of the inputs in a fast
path. Removing this allows the stub to have the same behaviour on all targets.
On Arm, this patch reworks the stub a little. We could use ip instead of saving
and restoring a register on the stack. Also, comments would mention that we
assume the exponent to be greater than 31 when the it can be 30 or higher. As
done for Arm64, let's check this at runtime in debug mode.
On Arm64, we can also implement the stub without pushing and poping off the
stack. It needs 2 general purpose and a double scratch registers which we have
reserved already (ip0, ip1 and d30). This removes the need to check that the
stack pointer is always 16-bytes aligned.
Finally, this also fixes a potential bug on Arm64, in the
`GetAllocatableRegisterThatIsNotOneOf` method which is now removed. We were
picking an allocatable double register when we meant to pick a general one.
Bug: v8:6644
Change-Id: I88d4597f377c9fc05432d5922a0d7129b6d19b47
Reviewed-on: https://chromium-review.googlesource.com/720963
Reviewed-by: Benedikt Meurer <bmeurer@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Commit-Queue: Pierre Langlois <pierre.langlois@arm.com>
Cr-Commit-Position: refs/heads/master@{#48671}
2017-10-16 12:13:53 +00:00
|
|
|
for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) {
|
|
|
|
RunAllTruncationTests(
|
|
|
|
RunGeneratedCodeCallWrapper,
|
|
|
|
MakeConvertDToIFuncTrampoline(isolate, dest_registers[d]));
|
2014-02-17 15:09:46 +00:00
|
|
|
}
|
|
|
|
}
|
2017-08-31 12:34:55 +00:00
|
|
|
|
|
|
|
} // namespace internal
|
|
|
|
} // namespace v8
|