2012-07-25 15:26:16 +00:00
|
|
|
// Copyright 2012 the V8 project authors. All rights reserved.
|
2008-08-22 13:33:59 +00:00
|
|
|
// Redistribution and use in source and binary forms, with or without
|
|
|
|
// modification, are permitted provided that the following conditions are
|
|
|
|
// met:
|
|
|
|
//
|
|
|
|
// * Redistributions of source code must retain the above copyright
|
|
|
|
// notice, this list of conditions and the following disclaimer.
|
|
|
|
// * Redistributions in binary form must reproduce the above
|
|
|
|
// copyright notice, this list of conditions and the following
|
|
|
|
// disclaimer in the documentation and/or other materials provided
|
|
|
|
// with the distribution.
|
|
|
|
// * Neither the name of Google Inc. nor the names of its
|
|
|
|
// contributors may be used to endorse or promote products derived
|
|
|
|
// from this software without specific prior written permission.
|
|
|
|
//
|
|
|
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
|
2014-10-14 05:03:04 +00:00
|
|
|
#include <iostream> // NOLINT(readability/streams)
|
|
|
|
|
2014-06-20 08:40:11 +00:00
|
|
|
#include "src/arm/simulator-arm.h"
|
2017-02-28 12:27:32 +00:00
|
|
|
#include "src/assembler-inl.h"
|
2015-04-21 10:21:50 +00:00
|
|
|
#include "src/base/utils/random-number-generator.h"
|
2014-06-03 08:12:43 +00:00
|
|
|
#include "src/disassembler.h"
|
2017-07-05 13:48:12 +00:00
|
|
|
#include "src/double.h"
|
2014-06-03 08:12:43 +00:00
|
|
|
#include "src/factory.h"
|
2016-09-06 11:27:41 +00:00
|
|
|
#include "src/macro-assembler.h"
|
2014-07-30 13:54:45 +00:00
|
|
|
#include "src/ostreams.h"
|
2017-02-28 12:27:32 +00:00
|
|
|
#include "src/v8.h"
|
|
|
|
#include "test/cctest/cctest.h"
|
2008-08-22 13:33:59 +00:00
|
|
|
|
2017-08-31 12:34:55 +00:00
|
|
|
namespace v8 {
|
|
|
|
namespace internal {
|
2017-10-02 21:19:21 +00:00
|
|
|
namespace test_assembler_arm {
|
2008-08-22 13:33:59 +00:00
|
|
|
|
2017-08-31 12:34:55 +00:00
|
|
|
using base::RandomNumberGenerator;
|
2008-08-22 13:33:59 +00:00
|
|
|
|
|
|
|
// Define these function prototypes to match JSEntryFunction in execution.cc.
|
2009-09-09 09:35:34 +00:00
|
|
|
typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
|
|
|
|
typedef Object* (*F2)(int x, int y, int p2, int p3, int p4);
|
2011-04-06 09:06:23 +00:00
|
|
|
typedef Object* (*F3)(void* p0, int p1, int p2, int p3, int p4);
|
|
|
|
typedef Object* (*F4)(void* p0, void* p1, int p2, int p3, int p4);
|
2016-03-10 11:38:17 +00:00
|
|
|
typedef Object* (*F5)(uint32_t p0, void* p1, void* p2, int p3, int p4);
|
2008-08-22 13:33:59 +00:00
|
|
|
|
|
|
|
#define __ assm.
|
|
|
|
|
|
|
|
TEST(0) {
|
2013-04-10 08:29:39 +00:00
|
|
|
CcTest::InitializeVM();
|
2013-09-19 09:17:13 +00:00
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
2013-03-15 12:06:53 +00:00
|
|
|
HandleScope scope(isolate);
|
|
|
|
|
2013-02-25 14:46:09 +00:00
|
|
|
Assembler assm(isolate, NULL, 0);
|
2008-08-22 13:33:59 +00:00
|
|
|
|
|
|
|
__ add(r0, r0, Operand(r1));
|
|
|
|
__ mov(pc, Operand(lr));
|
|
|
|
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2008-08-22 13:33:59 +00:00
|
|
|
#ifdef DEBUG
|
2014-07-07 09:57:29 +00:00
|
|
|
OFStream os(stdout);
|
|
|
|
code->Print(os);
|
2008-08-22 13:33:59 +00:00
|
|
|
#endif
|
2014-04-16 11:38:56 +00:00
|
|
|
F2 f = FUNCTION_CAST<F2>(code->entry());
|
2015-11-23 08:09:34 +00:00
|
|
|
int res =
|
|
|
|
reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 3, 4, 0, 0, 0));
|
2008-08-22 13:33:59 +00:00
|
|
|
::printf("f() = %d\n", res);
|
|
|
|
CHECK_EQ(7, res);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
TEST(1) {
|
2013-04-10 08:29:39 +00:00
|
|
|
CcTest::InitializeVM();
|
2013-09-19 09:17:13 +00:00
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
2013-03-15 12:06:53 +00:00
|
|
|
HandleScope scope(isolate);
|
|
|
|
|
2013-02-25 14:46:09 +00:00
|
|
|
Assembler assm(isolate, NULL, 0);
|
2008-08-22 13:33:59 +00:00
|
|
|
Label L, C;
|
|
|
|
|
|
|
|
__ mov(r1, Operand(r0));
|
2013-01-07 09:43:12 +00:00
|
|
|
__ mov(r0, Operand::Zero());
|
2008-08-22 13:33:59 +00:00
|
|
|
__ b(&C);
|
|
|
|
|
|
|
|
__ bind(&L);
|
|
|
|
__ add(r0, r0, Operand(r1));
|
|
|
|
__ sub(r1, r1, Operand(1));
|
|
|
|
|
|
|
|
__ bind(&C);
|
2013-01-07 09:43:12 +00:00
|
|
|
__ teq(r1, Operand::Zero());
|
2008-08-22 13:33:59 +00:00
|
|
|
__ b(ne, &L);
|
|
|
|
__ mov(pc, Operand(lr));
|
|
|
|
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2008-08-22 13:33:59 +00:00
|
|
|
#ifdef DEBUG
|
2014-07-07 09:57:29 +00:00
|
|
|
OFStream os(stdout);
|
|
|
|
code->Print(os);
|
2008-08-22 13:33:59 +00:00
|
|
|
#endif
|
2014-04-16 11:38:56 +00:00
|
|
|
F1 f = FUNCTION_CAST<F1>(code->entry());
|
2015-11-23 08:09:34 +00:00
|
|
|
int res =
|
|
|
|
reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 100, 0, 0, 0, 0));
|
2008-08-22 13:33:59 +00:00
|
|
|
::printf("f() = %d\n", res);
|
|
|
|
CHECK_EQ(5050, res);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
TEST(2) {
|
2013-04-10 08:29:39 +00:00
|
|
|
CcTest::InitializeVM();
|
2013-09-19 09:17:13 +00:00
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
2013-03-15 12:06:53 +00:00
|
|
|
HandleScope scope(isolate);
|
|
|
|
|
2013-02-25 14:46:09 +00:00
|
|
|
Assembler assm(isolate, NULL, 0);
|
2008-08-22 13:33:59 +00:00
|
|
|
Label L, C;
|
|
|
|
|
|
|
|
__ mov(r1, Operand(r0));
|
|
|
|
__ mov(r0, Operand(1));
|
|
|
|
__ b(&C);
|
|
|
|
|
|
|
|
__ bind(&L);
|
|
|
|
__ mul(r0, r1, r0);
|
|
|
|
__ sub(r1, r1, Operand(1));
|
|
|
|
|
|
|
|
__ bind(&C);
|
2013-01-07 09:43:12 +00:00
|
|
|
__ teq(r1, Operand::Zero());
|
2008-08-22 13:33:59 +00:00
|
|
|
__ b(ne, &L);
|
|
|
|
__ mov(pc, Operand(lr));
|
|
|
|
|
|
|
|
// some relocated stuff here, not executed
|
|
|
|
__ RecordComment("dead code, just testing relocations");
|
2013-06-04 10:30:05 +00:00
|
|
|
__ mov(r0, Operand(isolate->factory()->true_value()));
|
2008-08-22 13:33:59 +00:00
|
|
|
__ RecordComment("dead code, just testing immediate operands");
|
|
|
|
__ mov(r0, Operand(-1));
|
|
|
|
__ mov(r0, Operand(0xFF000000));
|
|
|
|
__ mov(r0, Operand(0xF0F0F0F0));
|
|
|
|
__ mov(r0, Operand(0xFFF0FFFF));
|
|
|
|
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2008-08-22 13:33:59 +00:00
|
|
|
#ifdef DEBUG
|
2014-07-07 09:57:29 +00:00
|
|
|
OFStream os(stdout);
|
|
|
|
code->Print(os);
|
2008-08-22 13:33:59 +00:00
|
|
|
#endif
|
2014-04-16 11:38:56 +00:00
|
|
|
F1 f = FUNCTION_CAST<F1>(code->entry());
|
2015-11-23 08:09:34 +00:00
|
|
|
int res =
|
|
|
|
reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 10, 0, 0, 0, 0));
|
2008-08-22 13:33:59 +00:00
|
|
|
::printf("f() = %d\n", res);
|
|
|
|
CHECK_EQ(3628800, res);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
TEST(3) {
|
2013-04-10 08:29:39 +00:00
|
|
|
CcTest::InitializeVM();
|
2013-09-19 09:17:13 +00:00
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
2013-03-15 12:06:53 +00:00
|
|
|
HandleScope scope(isolate);
|
2008-08-22 13:33:59 +00:00
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
int i;
|
|
|
|
char c;
|
|
|
|
int16_t s;
|
|
|
|
} T;
|
|
|
|
T t;
|
|
|
|
|
2013-02-25 14:46:09 +00:00
|
|
|
Assembler assm(isolate, NULL, 0);
|
2008-08-22 13:33:59 +00:00
|
|
|
Label L, C;
|
|
|
|
|
|
|
|
__ mov(ip, Operand(sp));
|
2009-08-31 12:40:37 +00:00
|
|
|
__ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
|
2008-08-22 13:33:59 +00:00
|
|
|
__ sub(fp, ip, Operand(4));
|
|
|
|
__ mov(r4, Operand(r0));
|
2015-06-17 09:06:44 +00:00
|
|
|
__ ldr(r0, MemOperand(r4, offsetof(T, i)));
|
2008-08-22 13:33:59 +00:00
|
|
|
__ mov(r2, Operand(r0, ASR, 1));
|
2015-06-17 09:06:44 +00:00
|
|
|
__ str(r2, MemOperand(r4, offsetof(T, i)));
|
|
|
|
__ ldrsb(r2, MemOperand(r4, offsetof(T, c)));
|
2008-08-22 13:33:59 +00:00
|
|
|
__ add(r0, r2, Operand(r0));
|
|
|
|
__ mov(r2, Operand(r2, LSL, 2));
|
2015-06-17 09:06:44 +00:00
|
|
|
__ strb(r2, MemOperand(r4, offsetof(T, c)));
|
|
|
|
__ ldrsh(r2, MemOperand(r4, offsetof(T, s)));
|
2008-08-22 13:33:59 +00:00
|
|
|
__ add(r0, r2, Operand(r0));
|
|
|
|
__ mov(r2, Operand(r2, ASR, 3));
|
2015-06-17 09:06:44 +00:00
|
|
|
__ strh(r2, MemOperand(r4, offsetof(T, s)));
|
2009-08-31 12:40:37 +00:00
|
|
|
__ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
|
2008-08-22 13:33:59 +00:00
|
|
|
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2008-08-22 13:33:59 +00:00
|
|
|
#ifdef DEBUG
|
2014-07-07 09:57:29 +00:00
|
|
|
OFStream os(stdout);
|
|
|
|
code->Print(os);
|
2008-08-22 13:33:59 +00:00
|
|
|
#endif
|
2014-04-16 11:38:56 +00:00
|
|
|
F3 f = FUNCTION_CAST<F3>(code->entry());
|
2008-08-22 13:33:59 +00:00
|
|
|
t.i = 100000;
|
|
|
|
t.c = 10;
|
|
|
|
t.s = 1000;
|
2015-11-23 08:09:34 +00:00
|
|
|
int res =
|
|
|
|
reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0));
|
2008-08-22 13:33:59 +00:00
|
|
|
::printf("f() = %d\n", res);
|
|
|
|
CHECK_EQ(101010, res);
|
|
|
|
CHECK_EQ(100000/2, t.i);
|
|
|
|
CHECK_EQ(10*4, t.c);
|
|
|
|
CHECK_EQ(1000/8, t.s);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-01-25 11:54:10 +00:00
|
|
|
TEST(4) {
|
|
|
|
// Test the VFP floating point instructions.
|
2013-04-10 08:29:39 +00:00
|
|
|
CcTest::InitializeVM();
|
2013-09-19 09:17:13 +00:00
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
2013-03-15 12:06:53 +00:00
|
|
|
HandleScope scope(isolate);
|
2010-01-25 11:54:10 +00:00
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
double a;
|
|
|
|
double b;
|
|
|
|
double c;
|
2010-08-26 08:53:00 +00:00
|
|
|
double d;
|
|
|
|
double e;
|
|
|
|
double f;
|
2011-01-31 10:16:28 +00:00
|
|
|
double g;
|
|
|
|
double h;
|
2010-08-26 08:53:00 +00:00
|
|
|
int i;
|
2013-05-17 15:38:14 +00:00
|
|
|
double j;
|
2011-03-15 11:19:13 +00:00
|
|
|
double m;
|
|
|
|
double n;
|
2016-03-14 15:30:13 +00:00
|
|
|
float o;
|
|
|
|
float p;
|
2010-08-26 08:53:00 +00:00
|
|
|
float x;
|
|
|
|
float y;
|
2010-01-25 11:54:10 +00:00
|
|
|
} T;
|
|
|
|
T t;
|
|
|
|
|
|
|
|
// Create a function that accepts &t, and loads, manipulates, and stores
|
2010-08-26 08:53:00 +00:00
|
|
|
// the doubles and floats.
|
2013-02-25 14:46:09 +00:00
|
|
|
Assembler assm(isolate, NULL, 0);
|
2010-01-25 11:54:10 +00:00
|
|
|
Label L, C;
|
|
|
|
|
2016-09-23 15:29:11 +00:00
|
|
|
if (CpuFeatures::IsSupported(VFPv3)) {
|
|
|
|
CpuFeatureScope scope(&assm, VFPv3);
|
2010-01-25 11:54:10 +00:00
|
|
|
|
|
|
|
__ mov(ip, Operand(sp));
|
|
|
|
__ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
|
|
|
|
__ sub(fp, ip, Operand(4));
|
|
|
|
|
|
|
|
__ mov(r4, Operand(r0));
|
2015-06-17 09:06:44 +00:00
|
|
|
__ vldr(d6, r4, offsetof(T, a));
|
|
|
|
__ vldr(d7, r4, offsetof(T, b));
|
2010-01-25 11:54:10 +00:00
|
|
|
__ vadd(d5, d6, d7);
|
2015-06-17 09:06:44 +00:00
|
|
|
__ vstr(d5, r4, offsetof(T, c));
|
2010-01-25 11:54:10 +00:00
|
|
|
|
2012-11-14 11:01:18 +00:00
|
|
|
__ vmla(d5, d6, d7);
|
2013-02-27 10:24:40 +00:00
|
|
|
__ vmls(d5, d5, d6);
|
2012-11-14 11:01:18 +00:00
|
|
|
|
2010-01-25 11:54:10 +00:00
|
|
|
__ vmov(r2, r3, d5);
|
|
|
|
__ vmov(d4, r2, r3);
|
2015-06-17 09:06:44 +00:00
|
|
|
__ vstr(d4, r4, offsetof(T, b));
|
2010-01-25 11:54:10 +00:00
|
|
|
|
2010-08-26 08:53:00 +00:00
|
|
|
// Load t.x and t.y, switch values, and store back to the struct.
|
2015-06-17 09:06:44 +00:00
|
|
|
__ vldr(s0, r4, offsetof(T, x));
|
2016-09-08 11:51:01 +00:00
|
|
|
__ vldr(s1, r4, offsetof(T, y));
|
|
|
|
__ vmov(s2, s0);
|
|
|
|
__ vmov(s0, s1);
|
|
|
|
__ vmov(s1, s2);
|
2015-06-17 09:06:44 +00:00
|
|
|
__ vstr(s0, r4, offsetof(T, x));
|
2016-09-08 11:51:01 +00:00
|
|
|
__ vstr(s1, r4, offsetof(T, y));
|
2010-08-26 08:53:00 +00:00
|
|
|
|
|
|
|
// Move a literal into a register that can be encoded in the instruction.
|
2017-07-05 13:48:12 +00:00
|
|
|
__ vmov(d4, Double(1.0));
|
2015-06-17 09:06:44 +00:00
|
|
|
__ vstr(d4, r4, offsetof(T, e));
|
2010-08-26 08:53:00 +00:00
|
|
|
|
|
|
|
// Move a literal into a register that requires 64 bits to encode.
|
|
|
|
// 0x3ff0000010000000 = 1.000000059604644775390625
|
2017-07-05 13:48:12 +00:00
|
|
|
__ vmov(d4, Double(1.000000059604644775390625));
|
2015-06-17 09:06:44 +00:00
|
|
|
__ vstr(d4, r4, offsetof(T, d));
|
2010-08-26 08:53:00 +00:00
|
|
|
|
|
|
|
// Convert from floating point to integer.
|
2017-07-05 13:48:12 +00:00
|
|
|
__ vmov(d4, Double(2.0));
|
2016-09-08 11:51:01 +00:00
|
|
|
__ vcvt_s32_f64(s1, d4);
|
|
|
|
__ vstr(s1, r4, offsetof(T, i));
|
2010-08-26 08:53:00 +00:00
|
|
|
|
|
|
|
// Convert from integer to floating point.
|
|
|
|
__ mov(lr, Operand(42));
|
2016-09-08 11:51:01 +00:00
|
|
|
__ vmov(s1, lr);
|
|
|
|
__ vcvt_f64_s32(d4, s1);
|
2015-06-17 09:06:44 +00:00
|
|
|
__ vstr(d4, r4, offsetof(T, f));
|
2011-01-31 10:16:28 +00:00
|
|
|
|
2013-05-17 15:38:14 +00:00
|
|
|
// Convert from fixed point to floating point.
|
2014-04-04 11:12:40 +00:00
|
|
|
__ mov(lr, Operand(2468));
|
2013-05-17 15:38:14 +00:00
|
|
|
__ vmov(s8, lr);
|
2014-04-04 11:12:40 +00:00
|
|
|
__ vcvt_f64_s32(d4, 2);
|
2015-06-17 09:06:44 +00:00
|
|
|
__ vstr(d4, r4, offsetof(T, j));
|
2013-05-17 15:38:14 +00:00
|
|
|
|
2011-01-31 10:16:28 +00:00
|
|
|
// Test vabs.
|
2015-06-17 09:06:44 +00:00
|
|
|
__ vldr(d1, r4, offsetof(T, g));
|
2011-01-31 10:16:28 +00:00
|
|
|
__ vabs(d0, d1);
|
2015-06-17 09:06:44 +00:00
|
|
|
__ vstr(d0, r4, offsetof(T, g));
|
|
|
|
__ vldr(d2, r4, offsetof(T, h));
|
2011-01-31 10:16:28 +00:00
|
|
|
__ vabs(d0, d2);
|
2015-06-17 09:06:44 +00:00
|
|
|
__ vstr(d0, r4, offsetof(T, h));
|
2011-01-31 10:16:28 +00:00
|
|
|
|
2011-03-15 11:19:13 +00:00
|
|
|
// Test vneg.
|
2015-06-17 09:06:44 +00:00
|
|
|
__ vldr(d1, r4, offsetof(T, m));
|
2011-03-15 11:19:13 +00:00
|
|
|
__ vneg(d0, d1);
|
2015-06-17 09:06:44 +00:00
|
|
|
__ vstr(d0, r4, offsetof(T, m));
|
|
|
|
__ vldr(d1, r4, offsetof(T, n));
|
2011-03-15 11:19:13 +00:00
|
|
|
__ vneg(d0, d1);
|
2015-06-17 09:06:44 +00:00
|
|
|
__ vstr(d0, r4, offsetof(T, n));
|
2011-03-15 11:19:13 +00:00
|
|
|
|
2016-03-14 15:30:13 +00:00
|
|
|
// Test vmov for single-precision immediates.
|
2017-07-07 11:32:01 +00:00
|
|
|
__ vmov(s0, Float32(0.25f));
|
2016-03-14 15:30:13 +00:00
|
|
|
__ vstr(s0, r4, offsetof(T, o));
|
2017-07-07 11:32:01 +00:00
|
|
|
__ vmov(s0, Float32(-16.0f));
|
2016-03-14 15:30:13 +00:00
|
|
|
__ vstr(s0, r4, offsetof(T, p));
|
|
|
|
|
2010-01-25 11:54:10 +00:00
|
|
|
__ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
|
2010-01-25 15:17:58 +00:00
|
|
|
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2010-01-25 11:54:10 +00:00
|
|
|
#ifdef DEBUG
|
2014-07-07 09:57:29 +00:00
|
|
|
OFStream os(stdout);
|
|
|
|
code->Print(os);
|
2010-01-25 11:54:10 +00:00
|
|
|
#endif
|
2014-04-16 11:38:56 +00:00
|
|
|
F3 f = FUNCTION_CAST<F3>(code->entry());
|
2010-01-25 15:17:58 +00:00
|
|
|
t.a = 1.5;
|
|
|
|
t.b = 2.75;
|
|
|
|
t.c = 17.17;
|
2010-08-26 08:53:00 +00:00
|
|
|
t.d = 0.0;
|
|
|
|
t.e = 0.0;
|
|
|
|
t.f = 0.0;
|
2011-01-31 10:16:28 +00:00
|
|
|
t.g = -2718.2818;
|
|
|
|
t.h = 31415926.5;
|
2010-08-26 08:53:00 +00:00
|
|
|
t.i = 0;
|
2013-05-17 15:38:14 +00:00
|
|
|
t.j = 0;
|
2011-03-15 11:19:13 +00:00
|
|
|
t.m = -2718.2818;
|
|
|
|
t.n = 123.456;
|
2010-08-26 08:53:00 +00:00
|
|
|
t.x = 4.5;
|
|
|
|
t.y = 9.0;
|
2015-11-23 08:09:34 +00:00
|
|
|
Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
|
2010-01-25 15:17:58 +00:00
|
|
|
USE(dummy);
|
2016-03-14 15:30:13 +00:00
|
|
|
CHECK_EQ(-16.0f, t.p);
|
|
|
|
CHECK_EQ(0.25f, t.o);
|
2011-03-15 11:19:13 +00:00
|
|
|
CHECK_EQ(-123.456, t.n);
|
|
|
|
CHECK_EQ(2718.2818, t.m);
|
2010-08-26 08:53:00 +00:00
|
|
|
CHECK_EQ(2, t.i);
|
2011-01-31 10:16:28 +00:00
|
|
|
CHECK_EQ(2718.2818, t.g);
|
|
|
|
CHECK_EQ(31415926.5, t.h);
|
2013-05-17 15:38:14 +00:00
|
|
|
CHECK_EQ(617.0, t.j);
|
2010-08-26 08:53:00 +00:00
|
|
|
CHECK_EQ(42.0, t.f);
|
|
|
|
CHECK_EQ(1.0, t.e);
|
|
|
|
CHECK_EQ(1.000000059604644775390625, t.d);
|
2010-01-25 15:17:58 +00:00
|
|
|
CHECK_EQ(4.25, t.c);
|
2013-02-27 10:24:40 +00:00
|
|
|
CHECK_EQ(-4.1875, t.b);
|
2010-01-25 15:17:58 +00:00
|
|
|
CHECK_EQ(1.5, t.a);
|
2015-10-08 19:02:29 +00:00
|
|
|
CHECK_EQ(4.5f, t.y);
|
|
|
|
CHECK_EQ(9.0f, t.x);
|
2010-01-25 15:17:58 +00:00
|
|
|
}
|
2010-01-25 11:54:10 +00:00
|
|
|
}
|
|
|
|
|
2010-05-27 13:46:18 +00:00
|
|
|
|
|
|
|
TEST(5) {
|
|
|
|
// Test the ARMv7 bitfield instructions.
|
2013-04-10 08:29:39 +00:00
|
|
|
CcTest::InitializeVM();
|
2013-09-19 09:17:13 +00:00
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
2013-03-15 12:06:53 +00:00
|
|
|
HandleScope scope(isolate);
|
|
|
|
|
2013-02-25 14:46:09 +00:00
|
|
|
Assembler assm(isolate, NULL, 0);
|
2010-05-27 13:46:18 +00:00
|
|
|
|
2011-03-31 16:17:37 +00:00
|
|
|
if (CpuFeatures::IsSupported(ARMv7)) {
|
2013-03-05 10:48:16 +00:00
|
|
|
CpuFeatureScope scope(&assm, ARMv7);
|
2010-05-27 13:46:18 +00:00
|
|
|
// On entry, r0 = 0xAAAAAAAA = 0b10..10101010.
|
|
|
|
__ ubfx(r0, r0, 1, 12); // 0b00..010101010101 = 0x555
|
|
|
|
__ sbfx(r0, r0, 0, 5); // 0b11..111111110101 = -11
|
|
|
|
__ bfc(r0, 1, 3); // 0b11..111111110001 = -15
|
|
|
|
__ mov(r1, Operand(7));
|
|
|
|
__ bfi(r0, r1, 3, 3); // 0b11..111111111001 = -7
|
|
|
|
__ mov(pc, Operand(lr));
|
|
|
|
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2010-05-27 13:46:18 +00:00
|
|
|
#ifdef DEBUG
|
2014-07-07 09:57:29 +00:00
|
|
|
OFStream os(stdout);
|
|
|
|
code->Print(os);
|
2010-05-27 13:46:18 +00:00
|
|
|
#endif
|
2014-04-16 11:38:56 +00:00
|
|
|
F1 f = FUNCTION_CAST<F1>(code->entry());
|
2010-05-27 13:46:18 +00:00
|
|
|
int res = reinterpret_cast<int>(
|
2015-11-23 08:09:34 +00:00
|
|
|
CALL_GENERATED_CODE(isolate, f, 0xAAAAAAAA, 0, 0, 0, 0));
|
2010-05-27 13:46:18 +00:00
|
|
|
::printf("f() = %d\n", res);
|
|
|
|
CHECK_EQ(-7, res);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-07-21 07:42:51 +00:00
|
|
|
|
|
|
|
TEST(6) {
|
|
|
|
// Test saturating instructions.
|
2013-04-10 08:29:39 +00:00
|
|
|
CcTest::InitializeVM();
|
2013-09-19 09:17:13 +00:00
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
2013-03-15 12:06:53 +00:00
|
|
|
HandleScope scope(isolate);
|
|
|
|
|
2013-02-25 14:46:09 +00:00
|
|
|
Assembler assm(isolate, NULL, 0);
|
2010-07-21 07:42:51 +00:00
|
|
|
|
2016-05-12 18:18:23 +00:00
|
|
|
__ usat(r1, 8, Operand(r0)); // Sat 0xFFFF to 0-255 = 0xFF.
|
|
|
|
__ usat(r2, 12, Operand(r0, ASR, 9)); // Sat (0xFFFF>>9) to 0-4095 = 0x7F.
|
|
|
|
__ usat(r3, 1, Operand(r0, LSL, 16)); // Sat (0xFFFF<<16) to 0-1 = 0x0.
|
|
|
|
__ add(r0, r1, Operand(r2));
|
|
|
|
__ add(r0, r0, Operand(r3));
|
|
|
|
__ mov(pc, Operand(lr));
|
2010-07-21 07:42:51 +00:00
|
|
|
|
2016-05-12 18:18:23 +00:00
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2010-07-21 07:42:51 +00:00
|
|
|
#ifdef DEBUG
|
2016-05-12 18:18:23 +00:00
|
|
|
OFStream os(stdout);
|
|
|
|
code->Print(os);
|
2010-07-21 07:42:51 +00:00
|
|
|
#endif
|
2016-05-12 18:18:23 +00:00
|
|
|
F1 f = FUNCTION_CAST<F1>(code->entry());
|
|
|
|
int res = reinterpret_cast<int>(
|
|
|
|
CALL_GENERATED_CODE(isolate, f, 0xFFFF, 0, 0, 0, 0));
|
|
|
|
::printf("f() = %d\n", res);
|
|
|
|
CHECK_EQ(382, res);
|
2010-07-21 07:42:51 +00:00
|
|
|
}
|
|
|
|
|
2010-11-09 08:26:02 +00:00
|
|
|
|
2011-02-04 07:08:50 +00:00
|
|
|
enum VCVTTypes {
|
|
|
|
s32_f64,
|
|
|
|
u32_f64
|
|
|
|
};
|
|
|
|
|
|
|
|
static void TestRoundingMode(VCVTTypes types,
|
|
|
|
VFPRoundingMode mode,
|
|
|
|
double value,
|
|
|
|
int expected,
|
|
|
|
bool expected_exception = false) {
|
2013-09-19 09:17:13 +00:00
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
2013-03-15 12:06:53 +00:00
|
|
|
HandleScope scope(isolate);
|
|
|
|
|
2013-02-25 14:46:09 +00:00
|
|
|
Assembler assm(isolate, NULL, 0);
|
2010-11-09 08:26:02 +00:00
|
|
|
|
2016-09-23 15:29:11 +00:00
|
|
|
Label wrong_exception;
|
2010-11-09 08:26:02 +00:00
|
|
|
|
2016-09-23 15:29:11 +00:00
|
|
|
__ vmrs(r1);
|
|
|
|
// Set custom FPSCR.
|
|
|
|
__ bic(r2, r1, Operand(kVFPRoundingModeMask | kVFPExceptionMask));
|
|
|
|
__ orr(r2, r2, Operand(mode));
|
|
|
|
__ vmsr(r2);
|
2010-11-09 08:26:02 +00:00
|
|
|
|
2016-09-23 15:29:11 +00:00
|
|
|
// Load value, convert, and move back result to r0 if everything went well.
|
2017-07-05 13:48:12 +00:00
|
|
|
__ vmov(d1, Double(value));
|
2016-09-23 15:29:11 +00:00
|
|
|
switch (types) {
|
|
|
|
case s32_f64:
|
|
|
|
__ vcvt_s32_f64(s0, d1, kFPSCRRounding);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case u32_f64:
|
|
|
|
__ vcvt_u32_f64(s0, d1, kFPSCRRounding);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
// Check for vfp exceptions
|
|
|
|
__ vmrs(r2);
|
|
|
|
__ tst(r2, Operand(kVFPExceptionMask));
|
|
|
|
// Check that we behaved as expected.
|
|
|
|
__ b(&wrong_exception, expected_exception ? eq : ne);
|
|
|
|
// There was no exception. Retrieve the result and return.
|
|
|
|
__ vmov(r0, s0);
|
|
|
|
__ mov(pc, Operand(lr));
|
|
|
|
|
|
|
|
// The exception behaviour is not what we expected.
|
|
|
|
// Load a special value and return.
|
|
|
|
__ bind(&wrong_exception);
|
|
|
|
__ mov(r0, Operand(11223344));
|
|
|
|
__ mov(pc, Operand(lr));
|
|
|
|
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2010-11-09 08:26:02 +00:00
|
|
|
#ifdef DEBUG
|
2016-09-23 15:29:11 +00:00
|
|
|
OFStream os(stdout);
|
|
|
|
code->Print(os);
|
2010-11-09 08:26:02 +00:00
|
|
|
#endif
|
2016-09-23 15:29:11 +00:00
|
|
|
F1 f = FUNCTION_CAST<F1>(code->entry());
|
|
|
|
int res =
|
|
|
|
reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
|
|
|
|
::printf("res = %d\n", res);
|
|
|
|
CHECK_EQ(expected, res);
|
2010-11-09 08:26:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
TEST(7) {
|
2013-09-19 13:30:47 +00:00
|
|
|
CcTest::InitializeVM();
|
2010-11-09 08:26:02 +00:00
|
|
|
// Test vfp rounding modes.
|
|
|
|
|
2011-02-04 07:08:50 +00:00
|
|
|
// s32_f64 (double to integer).
|
|
|
|
|
|
|
|
TestRoundingMode(s32_f64, RN, 0, 0);
|
|
|
|
TestRoundingMode(s32_f64, RN, 0.5, 0);
|
|
|
|
TestRoundingMode(s32_f64, RN, -0.5, 0);
|
|
|
|
TestRoundingMode(s32_f64, RN, 1.5, 2);
|
|
|
|
TestRoundingMode(s32_f64, RN, -1.5, -2);
|
|
|
|
TestRoundingMode(s32_f64, RN, 123.7, 124);
|
|
|
|
TestRoundingMode(s32_f64, RN, -123.7, -124);
|
|
|
|
TestRoundingMode(s32_f64, RN, 123456.2, 123456);
|
|
|
|
TestRoundingMode(s32_f64, RN, -123456.2, -123456);
|
|
|
|
TestRoundingMode(s32_f64, RN, static_cast<double>(kMaxInt), kMaxInt);
|
|
|
|
TestRoundingMode(s32_f64, RN, (kMaxInt + 0.49), kMaxInt);
|
|
|
|
TestRoundingMode(s32_f64, RN, (kMaxInt + 1.0), kMaxInt, true);
|
|
|
|
TestRoundingMode(s32_f64, RN, (kMaxInt + 0.5), kMaxInt, true);
|
|
|
|
TestRoundingMode(s32_f64, RN, static_cast<double>(kMinInt), kMinInt);
|
|
|
|
TestRoundingMode(s32_f64, RN, (kMinInt - 0.5), kMinInt);
|
|
|
|
TestRoundingMode(s32_f64, RN, (kMinInt - 1.0), kMinInt, true);
|
|
|
|
TestRoundingMode(s32_f64, RN, (kMinInt - 0.51), kMinInt, true);
|
|
|
|
|
|
|
|
TestRoundingMode(s32_f64, RM, 0, 0);
|
|
|
|
TestRoundingMode(s32_f64, RM, 0.5, 0);
|
|
|
|
TestRoundingMode(s32_f64, RM, -0.5, -1);
|
|
|
|
TestRoundingMode(s32_f64, RM, 123.7, 123);
|
|
|
|
TestRoundingMode(s32_f64, RM, -123.7, -124);
|
|
|
|
TestRoundingMode(s32_f64, RM, 123456.2, 123456);
|
|
|
|
TestRoundingMode(s32_f64, RM, -123456.2, -123457);
|
|
|
|
TestRoundingMode(s32_f64, RM, static_cast<double>(kMaxInt), kMaxInt);
|
|
|
|
TestRoundingMode(s32_f64, RM, (kMaxInt + 0.5), kMaxInt);
|
|
|
|
TestRoundingMode(s32_f64, RM, (kMaxInt + 1.0), kMaxInt, true);
|
|
|
|
TestRoundingMode(s32_f64, RM, static_cast<double>(kMinInt), kMinInt);
|
|
|
|
TestRoundingMode(s32_f64, RM, (kMinInt - 0.5), kMinInt, true);
|
|
|
|
TestRoundingMode(s32_f64, RM, (kMinInt + 0.5), kMinInt);
|
|
|
|
|
|
|
|
TestRoundingMode(s32_f64, RZ, 0, 0);
|
|
|
|
TestRoundingMode(s32_f64, RZ, 0.5, 0);
|
|
|
|
TestRoundingMode(s32_f64, RZ, -0.5, 0);
|
|
|
|
TestRoundingMode(s32_f64, RZ, 123.7, 123);
|
|
|
|
TestRoundingMode(s32_f64, RZ, -123.7, -123);
|
|
|
|
TestRoundingMode(s32_f64, RZ, 123456.2, 123456);
|
|
|
|
TestRoundingMode(s32_f64, RZ, -123456.2, -123456);
|
|
|
|
TestRoundingMode(s32_f64, RZ, static_cast<double>(kMaxInt), kMaxInt);
|
|
|
|
TestRoundingMode(s32_f64, RZ, (kMaxInt + 0.5), kMaxInt);
|
|
|
|
TestRoundingMode(s32_f64, RZ, (kMaxInt + 1.0), kMaxInt, true);
|
|
|
|
TestRoundingMode(s32_f64, RZ, static_cast<double>(kMinInt), kMinInt);
|
|
|
|
TestRoundingMode(s32_f64, RZ, (kMinInt - 0.5), kMinInt);
|
|
|
|
TestRoundingMode(s32_f64, RZ, (kMinInt - 1.0), kMinInt, true);
|
|
|
|
|
|
|
|
|
|
|
|
// u32_f64 (double to integer).
|
|
|
|
|
|
|
|
// Negative values.
|
|
|
|
TestRoundingMode(u32_f64, RN, -0.5, 0);
|
|
|
|
TestRoundingMode(u32_f64, RN, -123456.7, 0, true);
|
|
|
|
TestRoundingMode(u32_f64, RN, static_cast<double>(kMinInt), 0, true);
|
|
|
|
TestRoundingMode(u32_f64, RN, kMinInt - 1.0, 0, true);
|
|
|
|
|
|
|
|
TestRoundingMode(u32_f64, RM, -0.5, 0, true);
|
|
|
|
TestRoundingMode(u32_f64, RM, -123456.7, 0, true);
|
|
|
|
TestRoundingMode(u32_f64, RM, static_cast<double>(kMinInt), 0, true);
|
|
|
|
TestRoundingMode(u32_f64, RM, kMinInt - 1.0, 0, true);
|
|
|
|
|
|
|
|
TestRoundingMode(u32_f64, RZ, -0.5, 0);
|
|
|
|
TestRoundingMode(u32_f64, RZ, -123456.7, 0, true);
|
|
|
|
TestRoundingMode(u32_f64, RZ, static_cast<double>(kMinInt), 0, true);
|
|
|
|
TestRoundingMode(u32_f64, RZ, kMinInt - 1.0, 0, true);
|
|
|
|
|
|
|
|
// Positive values.
|
|
|
|
// kMaxInt is the maximum *signed* integer: 0x7fffffff.
|
|
|
|
static const uint32_t kMaxUInt = 0xffffffffu;
|
|
|
|
TestRoundingMode(u32_f64, RZ, 0, 0);
|
|
|
|
TestRoundingMode(u32_f64, RZ, 0.5, 0);
|
|
|
|
TestRoundingMode(u32_f64, RZ, 123.7, 123);
|
|
|
|
TestRoundingMode(u32_f64, RZ, 123456.2, 123456);
|
|
|
|
TestRoundingMode(u32_f64, RZ, static_cast<double>(kMaxInt), kMaxInt);
|
|
|
|
TestRoundingMode(u32_f64, RZ, (kMaxInt + 0.5), kMaxInt);
|
|
|
|
TestRoundingMode(u32_f64, RZ, (kMaxInt + 1.0),
|
|
|
|
static_cast<uint32_t>(kMaxInt) + 1);
|
|
|
|
TestRoundingMode(u32_f64, RZ, (kMaxUInt + 0.5), kMaxUInt);
|
|
|
|
TestRoundingMode(u32_f64, RZ, (kMaxUInt + 1.0), kMaxUInt, true);
|
|
|
|
|
|
|
|
TestRoundingMode(u32_f64, RM, 0, 0);
|
|
|
|
TestRoundingMode(u32_f64, RM, 0.5, 0);
|
|
|
|
TestRoundingMode(u32_f64, RM, 123.7, 123);
|
|
|
|
TestRoundingMode(u32_f64, RM, 123456.2, 123456);
|
|
|
|
TestRoundingMode(u32_f64, RM, static_cast<double>(kMaxInt), kMaxInt);
|
|
|
|
TestRoundingMode(u32_f64, RM, (kMaxInt + 0.5), kMaxInt);
|
|
|
|
TestRoundingMode(u32_f64, RM, (kMaxInt + 1.0),
|
|
|
|
static_cast<uint32_t>(kMaxInt) + 1);
|
|
|
|
TestRoundingMode(u32_f64, RM, (kMaxUInt + 0.5), kMaxUInt);
|
|
|
|
TestRoundingMode(u32_f64, RM, (kMaxUInt + 1.0), kMaxUInt, true);
|
|
|
|
|
|
|
|
TestRoundingMode(u32_f64, RN, 0, 0);
|
|
|
|
TestRoundingMode(u32_f64, RN, 0.5, 0);
|
|
|
|
TestRoundingMode(u32_f64, RN, 1.5, 2);
|
|
|
|
TestRoundingMode(u32_f64, RN, 123.7, 124);
|
|
|
|
TestRoundingMode(u32_f64, RN, 123456.2, 123456);
|
|
|
|
TestRoundingMode(u32_f64, RN, static_cast<double>(kMaxInt), kMaxInt);
|
|
|
|
TestRoundingMode(u32_f64, RN, (kMaxInt + 0.49), kMaxInt);
|
|
|
|
TestRoundingMode(u32_f64, RN, (kMaxInt + 0.5),
|
|
|
|
static_cast<uint32_t>(kMaxInt) + 1);
|
|
|
|
TestRoundingMode(u32_f64, RN, (kMaxUInt + 0.49), kMaxUInt);
|
|
|
|
TestRoundingMode(u32_f64, RN, (kMaxUInt + 0.5), kMaxUInt, true);
|
|
|
|
TestRoundingMode(u32_f64, RN, (kMaxUInt + 1.0), kMaxUInt, true);
|
2010-11-09 08:26:02 +00:00
|
|
|
}
|
|
|
|
|
2013-07-05 09:52:11 +00:00
|
|
|
|
2011-04-06 09:06:23 +00:00
|
|
|
TEST(8) {
|
|
|
|
// Test VFP multi load/store with ia_w.
|
2013-04-10 08:29:39 +00:00
|
|
|
CcTest::InitializeVM();
|
2013-09-19 09:17:13 +00:00
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
2013-03-15 12:06:53 +00:00
|
|
|
HandleScope scope(isolate);
|
2011-04-06 09:06:23 +00:00
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
double a;
|
|
|
|
double b;
|
|
|
|
double c;
|
|
|
|
double d;
|
|
|
|
double e;
|
|
|
|
double f;
|
|
|
|
double g;
|
|
|
|
double h;
|
|
|
|
} D;
|
|
|
|
D d;
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
float a;
|
|
|
|
float b;
|
|
|
|
float c;
|
|
|
|
float d;
|
|
|
|
float e;
|
|
|
|
float f;
|
|
|
|
float g;
|
|
|
|
float h;
|
|
|
|
} F;
|
|
|
|
F f;
|
|
|
|
|
|
|
|
// Create a function that uses vldm/vstm to move some double and
|
|
|
|
// single precision values around in memory.
|
2013-02-25 14:46:09 +00:00
|
|
|
Assembler assm(isolate, NULL, 0);
|
2011-04-06 09:06:23 +00:00
|
|
|
|
2013-04-07 04:34:20 +00:00
|
|
|
__ mov(ip, Operand(sp));
|
|
|
|
__ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
|
|
|
|
__ sub(fp, ip, Operand(4));
|
2011-04-06 09:06:23 +00:00
|
|
|
|
2015-06-17 09:06:44 +00:00
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(D, a))));
|
2013-04-07 04:34:20 +00:00
|
|
|
__ vldm(ia_w, r4, d0, d3);
|
|
|
|
__ vldm(ia_w, r4, d4, d7);
|
2011-04-06 09:06:23 +00:00
|
|
|
|
2015-06-17 09:06:44 +00:00
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(D, a))));
|
2013-04-07 04:34:20 +00:00
|
|
|
__ vstm(ia_w, r4, d6, d7);
|
|
|
|
__ vstm(ia_w, r4, d0, d5);
|
2011-04-06 09:06:23 +00:00
|
|
|
|
2015-06-17 09:06:44 +00:00
|
|
|
__ add(r4, r1, Operand(static_cast<int32_t>(offsetof(F, a))));
|
2013-04-07 04:34:20 +00:00
|
|
|
__ vldm(ia_w, r4, s0, s3);
|
|
|
|
__ vldm(ia_w, r4, s4, s7);
|
2011-04-06 09:06:23 +00:00
|
|
|
|
2015-06-17 09:06:44 +00:00
|
|
|
__ add(r4, r1, Operand(static_cast<int32_t>(offsetof(F, a))));
|
2013-04-07 04:34:20 +00:00
|
|
|
__ vstm(ia_w, r4, s6, s7);
|
|
|
|
__ vstm(ia_w, r4, s0, s5);
|
2011-04-06 09:06:23 +00:00
|
|
|
|
2013-04-07 04:34:20 +00:00
|
|
|
__ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
|
2011-04-06 09:06:23 +00:00
|
|
|
|
2013-04-07 04:34:20 +00:00
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2011-04-06 09:06:23 +00:00
|
|
|
#ifdef DEBUG
|
2014-07-07 09:57:29 +00:00
|
|
|
OFStream os(stdout);
|
|
|
|
code->Print(os);
|
2011-04-06 09:06:23 +00:00
|
|
|
#endif
|
2014-04-16 11:38:56 +00:00
|
|
|
F4 fn = FUNCTION_CAST<F4>(code->entry());
|
2013-04-07 04:34:20 +00:00
|
|
|
d.a = 1.1;
|
|
|
|
d.b = 2.2;
|
|
|
|
d.c = 3.3;
|
|
|
|
d.d = 4.4;
|
|
|
|
d.e = 5.5;
|
|
|
|
d.f = 6.6;
|
|
|
|
d.g = 7.7;
|
|
|
|
d.h = 8.8;
|
|
|
|
|
|
|
|
f.a = 1.0;
|
|
|
|
f.b = 2.0;
|
|
|
|
f.c = 3.0;
|
|
|
|
f.d = 4.0;
|
|
|
|
f.e = 5.0;
|
|
|
|
f.f = 6.0;
|
|
|
|
f.g = 7.0;
|
|
|
|
f.h = 8.0;
|
|
|
|
|
2015-11-23 08:09:34 +00:00
|
|
|
Object* dummy = CALL_GENERATED_CODE(isolate, fn, &d, &f, 0, 0, 0);
|
2013-04-07 04:34:20 +00:00
|
|
|
USE(dummy);
|
|
|
|
|
|
|
|
CHECK_EQ(7.7, d.a);
|
|
|
|
CHECK_EQ(8.8, d.b);
|
|
|
|
CHECK_EQ(1.1, d.c);
|
|
|
|
CHECK_EQ(2.2, d.d);
|
|
|
|
CHECK_EQ(3.3, d.e);
|
|
|
|
CHECK_EQ(4.4, d.f);
|
|
|
|
CHECK_EQ(5.5, d.g);
|
|
|
|
CHECK_EQ(6.6, d.h);
|
|
|
|
|
2015-10-06 14:50:29 +00:00
|
|
|
CHECK_EQ(7.0f, f.a);
|
|
|
|
CHECK_EQ(8.0f, f.b);
|
|
|
|
CHECK_EQ(1.0f, f.c);
|
|
|
|
CHECK_EQ(2.0f, f.d);
|
|
|
|
CHECK_EQ(3.0f, f.e);
|
|
|
|
CHECK_EQ(4.0f, f.f);
|
|
|
|
CHECK_EQ(5.0f, f.g);
|
|
|
|
CHECK_EQ(6.0f, f.h);
|
2011-04-06 09:06:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
TEST(9) {
|
|
|
|
// Test VFP multi load/store with ia.
|
2013-04-10 08:29:39 +00:00
|
|
|
CcTest::InitializeVM();
|
2013-09-19 09:17:13 +00:00
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
2013-03-15 12:06:53 +00:00
|
|
|
HandleScope scope(isolate);
|
2011-04-06 09:06:23 +00:00
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
double a;
|
|
|
|
double b;
|
|
|
|
double c;
|
|
|
|
double d;
|
|
|
|
double e;
|
|
|
|
double f;
|
|
|
|
double g;
|
|
|
|
double h;
|
|
|
|
} D;
|
|
|
|
D d;
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
float a;
|
|
|
|
float b;
|
|
|
|
float c;
|
|
|
|
float d;
|
|
|
|
float e;
|
|
|
|
float f;
|
|
|
|
float g;
|
|
|
|
float h;
|
|
|
|
} F;
|
|
|
|
F f;
|
|
|
|
|
|
|
|
// Create a function that uses vldm/vstm to move some double and
|
|
|
|
// single precision values around in memory.
|
2013-02-25 14:46:09 +00:00
|
|
|
Assembler assm(isolate, NULL, 0);
|
2011-04-06 09:06:23 +00:00
|
|
|
|
2013-04-07 04:34:20 +00:00
|
|
|
__ mov(ip, Operand(sp));
|
|
|
|
__ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
|
|
|
|
__ sub(fp, ip, Operand(4));
|
2011-04-06 09:06:23 +00:00
|
|
|
|
2015-06-17 09:06:44 +00:00
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(D, a))));
|
2013-04-07 04:34:20 +00:00
|
|
|
__ vldm(ia, r4, d0, d3);
|
|
|
|
__ add(r4, r4, Operand(4 * 8));
|
|
|
|
__ vldm(ia, r4, d4, d7);
|
2011-04-06 09:06:23 +00:00
|
|
|
|
2015-06-17 09:06:44 +00:00
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(D, a))));
|
2013-04-07 04:34:20 +00:00
|
|
|
__ vstm(ia, r4, d6, d7);
|
|
|
|
__ add(r4, r4, Operand(2 * 8));
|
|
|
|
__ vstm(ia, r4, d0, d5);
|
2011-04-06 09:06:23 +00:00
|
|
|
|
2015-06-17 09:06:44 +00:00
|
|
|
__ add(r4, r1, Operand(static_cast<int32_t>(offsetof(F, a))));
|
2013-04-07 04:34:20 +00:00
|
|
|
__ vldm(ia, r4, s0, s3);
|
|
|
|
__ add(r4, r4, Operand(4 * 4));
|
|
|
|
__ vldm(ia, r4, s4, s7);
|
2011-04-06 09:06:23 +00:00
|
|
|
|
2015-06-17 09:06:44 +00:00
|
|
|
__ add(r4, r1, Operand(static_cast<int32_t>(offsetof(F, a))));
|
2013-04-07 04:34:20 +00:00
|
|
|
__ vstm(ia, r4, s6, s7);
|
|
|
|
__ add(r4, r4, Operand(2 * 4));
|
|
|
|
__ vstm(ia, r4, s0, s5);
|
2011-04-06 09:06:23 +00:00
|
|
|
|
2013-04-07 04:34:20 +00:00
|
|
|
__ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
|
2011-04-06 09:06:23 +00:00
|
|
|
|
2013-04-07 04:34:20 +00:00
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2011-04-06 09:06:23 +00:00
|
|
|
#ifdef DEBUG
|
2014-07-07 09:57:29 +00:00
|
|
|
OFStream os(stdout);
|
|
|
|
code->Print(os);
|
2011-04-06 09:06:23 +00:00
|
|
|
#endif
|
2014-04-16 11:38:56 +00:00
|
|
|
F4 fn = FUNCTION_CAST<F4>(code->entry());
|
2013-04-07 04:34:20 +00:00
|
|
|
d.a = 1.1;
|
|
|
|
d.b = 2.2;
|
|
|
|
d.c = 3.3;
|
|
|
|
d.d = 4.4;
|
|
|
|
d.e = 5.5;
|
|
|
|
d.f = 6.6;
|
|
|
|
d.g = 7.7;
|
|
|
|
d.h = 8.8;
|
|
|
|
|
|
|
|
f.a = 1.0;
|
|
|
|
f.b = 2.0;
|
|
|
|
f.c = 3.0;
|
|
|
|
f.d = 4.0;
|
|
|
|
f.e = 5.0;
|
|
|
|
f.f = 6.0;
|
|
|
|
f.g = 7.0;
|
|
|
|
f.h = 8.0;
|
|
|
|
|
2015-11-23 08:09:34 +00:00
|
|
|
Object* dummy = CALL_GENERATED_CODE(isolate, fn, &d, &f, 0, 0, 0);
|
2013-04-07 04:34:20 +00:00
|
|
|
USE(dummy);
|
|
|
|
|
|
|
|
CHECK_EQ(7.7, d.a);
|
|
|
|
CHECK_EQ(8.8, d.b);
|
|
|
|
CHECK_EQ(1.1, d.c);
|
|
|
|
CHECK_EQ(2.2, d.d);
|
|
|
|
CHECK_EQ(3.3, d.e);
|
|
|
|
CHECK_EQ(4.4, d.f);
|
|
|
|
CHECK_EQ(5.5, d.g);
|
|
|
|
CHECK_EQ(6.6, d.h);
|
|
|
|
|
2015-10-06 14:50:29 +00:00
|
|
|
CHECK_EQ(7.0f, f.a);
|
|
|
|
CHECK_EQ(8.0f, f.b);
|
|
|
|
CHECK_EQ(1.0f, f.c);
|
|
|
|
CHECK_EQ(2.0f, f.d);
|
|
|
|
CHECK_EQ(3.0f, f.e);
|
|
|
|
CHECK_EQ(4.0f, f.f);
|
|
|
|
CHECK_EQ(5.0f, f.g);
|
|
|
|
CHECK_EQ(6.0f, f.h);
|
2011-04-06 09:06:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
TEST(10) {
|
|
|
|
// Test VFP multi load/store with db_w.
|
2013-04-10 08:29:39 +00:00
|
|
|
CcTest::InitializeVM();
|
2013-09-19 09:17:13 +00:00
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
2013-03-15 12:06:53 +00:00
|
|
|
HandleScope scope(isolate);
|
2011-04-06 09:06:23 +00:00
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
double a;
|
|
|
|
double b;
|
|
|
|
double c;
|
|
|
|
double d;
|
|
|
|
double e;
|
|
|
|
double f;
|
|
|
|
double g;
|
|
|
|
double h;
|
|
|
|
} D;
|
|
|
|
D d;
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
float a;
|
|
|
|
float b;
|
|
|
|
float c;
|
|
|
|
float d;
|
|
|
|
float e;
|
|
|
|
float f;
|
|
|
|
float g;
|
|
|
|
float h;
|
|
|
|
} F;
|
|
|
|
F f;
|
|
|
|
|
|
|
|
// Create a function that uses vldm/vstm to move some double and
|
|
|
|
// single precision values around in memory.
|
2013-02-25 14:46:09 +00:00
|
|
|
Assembler assm(isolate, NULL, 0);
|
2011-04-06 09:06:23 +00:00
|
|
|
|
2013-04-07 04:34:20 +00:00
|
|
|
__ mov(ip, Operand(sp));
|
|
|
|
__ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
|
|
|
|
__ sub(fp, ip, Operand(4));
|
2011-04-06 09:06:23 +00:00
|
|
|
|
2015-06-17 09:06:44 +00:00
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(D, h)) + 8));
|
2013-04-07 04:34:20 +00:00
|
|
|
__ vldm(db_w, r4, d4, d7);
|
|
|
|
__ vldm(db_w, r4, d0, d3);
|
2011-04-06 09:06:23 +00:00
|
|
|
|
2015-06-17 09:06:44 +00:00
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(D, h)) + 8));
|
2013-04-07 04:34:20 +00:00
|
|
|
__ vstm(db_w, r4, d0, d5);
|
|
|
|
__ vstm(db_w, r4, d6, d7);
|
2011-04-06 09:06:23 +00:00
|
|
|
|
2015-06-17 09:06:44 +00:00
|
|
|
__ add(r4, r1, Operand(static_cast<int32_t>(offsetof(F, h)) + 4));
|
2013-04-07 04:34:20 +00:00
|
|
|
__ vldm(db_w, r4, s4, s7);
|
|
|
|
__ vldm(db_w, r4, s0, s3);
|
2011-04-06 09:06:23 +00:00
|
|
|
|
2015-06-17 09:06:44 +00:00
|
|
|
__ add(r4, r1, Operand(static_cast<int32_t>(offsetof(F, h)) + 4));
|
2013-04-07 04:34:20 +00:00
|
|
|
__ vstm(db_w, r4, s0, s5);
|
|
|
|
__ vstm(db_w, r4, s6, s7);
|
2011-04-06 09:06:23 +00:00
|
|
|
|
2013-04-07 04:34:20 +00:00
|
|
|
__ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
|
2011-04-06 09:06:23 +00:00
|
|
|
|
2013-04-07 04:34:20 +00:00
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2011-04-06 09:06:23 +00:00
|
|
|
#ifdef DEBUG
|
2014-07-07 09:57:29 +00:00
|
|
|
OFStream os(stdout);
|
|
|
|
code->Print(os);
|
2011-04-06 09:06:23 +00:00
|
|
|
#endif
|
2014-04-16 11:38:56 +00:00
|
|
|
F4 fn = FUNCTION_CAST<F4>(code->entry());
|
2013-04-07 04:34:20 +00:00
|
|
|
d.a = 1.1;
|
|
|
|
d.b = 2.2;
|
|
|
|
d.c = 3.3;
|
|
|
|
d.d = 4.4;
|
|
|
|
d.e = 5.5;
|
|
|
|
d.f = 6.6;
|
|
|
|
d.g = 7.7;
|
|
|
|
d.h = 8.8;
|
|
|
|
|
|
|
|
f.a = 1.0;
|
|
|
|
f.b = 2.0;
|
|
|
|
f.c = 3.0;
|
|
|
|
f.d = 4.0;
|
|
|
|
f.e = 5.0;
|
|
|
|
f.f = 6.0;
|
|
|
|
f.g = 7.0;
|
|
|
|
f.h = 8.0;
|
|
|
|
|
2015-11-23 08:09:34 +00:00
|
|
|
Object* dummy = CALL_GENERATED_CODE(isolate, fn, &d, &f, 0, 0, 0);
|
2013-04-07 04:34:20 +00:00
|
|
|
USE(dummy);
|
|
|
|
|
|
|
|
CHECK_EQ(7.7, d.a);
|
|
|
|
CHECK_EQ(8.8, d.b);
|
|
|
|
CHECK_EQ(1.1, d.c);
|
|
|
|
CHECK_EQ(2.2, d.d);
|
|
|
|
CHECK_EQ(3.3, d.e);
|
|
|
|
CHECK_EQ(4.4, d.f);
|
|
|
|
CHECK_EQ(5.5, d.g);
|
|
|
|
CHECK_EQ(6.6, d.h);
|
|
|
|
|
2015-10-06 14:50:29 +00:00
|
|
|
CHECK_EQ(7.0f, f.a);
|
|
|
|
CHECK_EQ(8.0f, f.b);
|
|
|
|
CHECK_EQ(1.0f, f.c);
|
|
|
|
CHECK_EQ(2.0f, f.d);
|
|
|
|
CHECK_EQ(3.0f, f.e);
|
|
|
|
CHECK_EQ(4.0f, f.f);
|
|
|
|
CHECK_EQ(5.0f, f.g);
|
|
|
|
CHECK_EQ(6.0f, f.h);
|
2011-04-06 09:06:23 +00:00
|
|
|
}
|
|
|
|
|
2011-04-18 13:53:11 +00:00
|
|
|
|
|
|
|
TEST(11) {
|
|
|
|
// Test instructions using the carry flag.
|
2013-04-10 08:29:39 +00:00
|
|
|
CcTest::InitializeVM();
|
2013-09-19 09:17:13 +00:00
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
2013-03-15 12:06:53 +00:00
|
|
|
HandleScope scope(isolate);
|
2011-04-18 13:53:11 +00:00
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
int32_t a;
|
|
|
|
int32_t b;
|
|
|
|
int32_t c;
|
|
|
|
int32_t d;
|
|
|
|
} I;
|
|
|
|
I i;
|
|
|
|
|
|
|
|
i.a = 0xabcd0001;
|
|
|
|
i.b = 0xabcd0000;
|
|
|
|
|
2013-02-25 14:46:09 +00:00
|
|
|
Assembler assm(isolate, NULL, 0);
|
2011-04-18 13:53:11 +00:00
|
|
|
|
|
|
|
// Test HeapObject untagging.
|
2015-06-17 09:06:44 +00:00
|
|
|
__ ldr(r1, MemOperand(r0, offsetof(I, a)));
|
2011-04-18 13:53:11 +00:00
|
|
|
__ mov(r1, Operand(r1, ASR, 1), SetCC);
|
|
|
|
__ adc(r1, r1, Operand(r1), LeaveCC, cs);
|
2015-06-17 09:06:44 +00:00
|
|
|
__ str(r1, MemOperand(r0, offsetof(I, a)));
|
2011-04-18 13:53:11 +00:00
|
|
|
|
2015-06-17 09:06:44 +00:00
|
|
|
__ ldr(r2, MemOperand(r0, offsetof(I, b)));
|
2011-04-18 13:53:11 +00:00
|
|
|
__ mov(r2, Operand(r2, ASR, 1), SetCC);
|
|
|
|
__ adc(r2, r2, Operand(r2), LeaveCC, cs);
|
2015-06-17 09:06:44 +00:00
|
|
|
__ str(r2, MemOperand(r0, offsetof(I, b)));
|
2011-04-18 13:53:11 +00:00
|
|
|
|
|
|
|
// Test corner cases.
|
|
|
|
__ mov(r1, Operand(0xffffffff));
|
2013-01-07 09:43:12 +00:00
|
|
|
__ mov(r2, Operand::Zero());
|
2011-04-18 13:53:11 +00:00
|
|
|
__ mov(r3, Operand(r1, ASR, 1), SetCC); // Set the carry.
|
|
|
|
__ adc(r3, r1, Operand(r2));
|
2015-06-17 09:06:44 +00:00
|
|
|
__ str(r3, MemOperand(r0, offsetof(I, c)));
|
2011-04-18 13:53:11 +00:00
|
|
|
|
|
|
|
__ mov(r1, Operand(0xffffffff));
|
2013-01-07 09:43:12 +00:00
|
|
|
__ mov(r2, Operand::Zero());
|
2011-04-18 13:53:11 +00:00
|
|
|
__ mov(r3, Operand(r2, ASR, 1), SetCC); // Unset the carry.
|
|
|
|
__ adc(r3, r1, Operand(r2));
|
2015-06-17 09:06:44 +00:00
|
|
|
__ str(r3, MemOperand(r0, offsetof(I, d)));
|
2011-04-18 13:53:11 +00:00
|
|
|
|
|
|
|
__ mov(pc, Operand(lr));
|
|
|
|
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2011-04-18 13:53:11 +00:00
|
|
|
#ifdef DEBUG
|
2014-07-07 09:57:29 +00:00
|
|
|
OFStream os(stdout);
|
|
|
|
code->Print(os);
|
2011-04-18 13:53:11 +00:00
|
|
|
#endif
|
2014-04-16 11:38:56 +00:00
|
|
|
F3 f = FUNCTION_CAST<F3>(code->entry());
|
2015-11-23 08:09:34 +00:00
|
|
|
Object* dummy = CALL_GENERATED_CODE(isolate, f, &i, 0, 0, 0, 0);
|
2011-04-18 13:53:11 +00:00
|
|
|
USE(dummy);
|
|
|
|
|
2015-01-30 09:29:25 +00:00
|
|
|
CHECK_EQ(static_cast<int32_t>(0xabcd0001), i.a);
|
2011-04-18 13:53:11 +00:00
|
|
|
CHECK_EQ(static_cast<int32_t>(0xabcd0000) >> 1, i.b);
|
|
|
|
CHECK_EQ(0x00000000, i.c);
|
2015-01-30 09:29:25 +00:00
|
|
|
CHECK_EQ(static_cast<int32_t>(0xffffffff), i.d);
|
2011-04-18 13:53:11 +00:00
|
|
|
}
|
|
|
|
|
2011-08-30 07:36:31 +00:00
|
|
|
|
|
|
|
TEST(12) {
|
|
|
|
// Test chaining of label usages within instructions (issue 1644).
|
2013-04-10 08:29:39 +00:00
|
|
|
CcTest::InitializeVM();
|
2013-09-19 09:17:13 +00:00
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
2013-03-15 12:06:53 +00:00
|
|
|
HandleScope scope(isolate);
|
2011-08-30 07:36:31 +00:00
|
|
|
|
2013-03-15 12:06:53 +00:00
|
|
|
Assembler assm(isolate, NULL, 0);
|
2011-08-30 07:36:31 +00:00
|
|
|
Label target;
|
|
|
|
__ b(eq, &target);
|
|
|
|
__ b(ne, &target);
|
|
|
|
__ bind(&target);
|
|
|
|
__ nop();
|
|
|
|
}
|
|
|
|
|
2013-01-23 16:29:48 +00:00
|
|
|
|
|
|
|
TEST(13) {
|
|
|
|
// Test VFP instructions using registers d16-d31.
|
2013-04-10 08:29:39 +00:00
|
|
|
CcTest::InitializeVM();
|
2013-09-19 09:17:13 +00:00
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
2013-03-15 12:06:53 +00:00
|
|
|
HandleScope scope(isolate);
|
2013-01-23 16:29:48 +00:00
|
|
|
|
|
|
|
if (!CpuFeatures::IsSupported(VFP32DREGS)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
double a;
|
|
|
|
double b;
|
|
|
|
double c;
|
|
|
|
double x;
|
|
|
|
double y;
|
|
|
|
double z;
|
|
|
|
double i;
|
|
|
|
double j;
|
|
|
|
double k;
|
2013-07-25 15:04:38 +00:00
|
|
|
uint32_t low;
|
|
|
|
uint32_t high;
|
2013-01-23 16:29:48 +00:00
|
|
|
} T;
|
|
|
|
T t;
|
|
|
|
|
|
|
|
// Create a function that accepts &t, and loads, manipulates, and stores
|
|
|
|
// the doubles and floats.
|
2013-02-25 14:46:09 +00:00
|
|
|
Assembler assm(isolate, NULL, 0);
|
2013-01-23 16:29:48 +00:00
|
|
|
Label L, C;
|
|
|
|
|
2016-09-23 15:29:11 +00:00
|
|
|
if (CpuFeatures::IsSupported(VFPv3)) {
|
|
|
|
CpuFeatureScope scope(&assm, VFPv3);
|
2013-01-23 16:29:48 +00:00
|
|
|
|
|
|
|
__ stm(db_w, sp, r4.bit() | lr.bit());
|
|
|
|
|
|
|
|
// Load a, b, c into d16, d17, d18.
|
|
|
|
__ mov(r4, Operand(r0));
|
2015-06-17 09:06:44 +00:00
|
|
|
__ vldr(d16, r4, offsetof(T, a));
|
|
|
|
__ vldr(d17, r4, offsetof(T, b));
|
|
|
|
__ vldr(d18, r4, offsetof(T, c));
|
2013-01-23 16:29:48 +00:00
|
|
|
|
|
|
|
__ vneg(d25, d16);
|
|
|
|
__ vadd(d25, d25, d17);
|
|
|
|
__ vsub(d25, d25, d18);
|
|
|
|
__ vmul(d25, d25, d25);
|
|
|
|
__ vdiv(d25, d25, d18);
|
|
|
|
|
|
|
|
__ vmov(d16, d25);
|
|
|
|
__ vsqrt(d17, d25);
|
|
|
|
__ vneg(d17, d17);
|
|
|
|
__ vabs(d17, d17);
|
|
|
|
__ vmla(d18, d16, d17);
|
|
|
|
|
|
|
|
// Store d16, d17, d18 into a, b, c.
|
|
|
|
__ mov(r4, Operand(r0));
|
2015-06-17 09:06:44 +00:00
|
|
|
__ vstr(d16, r4, offsetof(T, a));
|
|
|
|
__ vstr(d17, r4, offsetof(T, b));
|
|
|
|
__ vstr(d18, r4, offsetof(T, c));
|
2013-01-23 16:29:48 +00:00
|
|
|
|
|
|
|
// Load x, y, z into d29-d31.
|
2015-06-17 09:06:44 +00:00
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, x))));
|
2013-01-23 16:29:48 +00:00
|
|
|
__ vldm(ia_w, r4, d29, d31);
|
|
|
|
|
|
|
|
// Swap d29 and d30 via r registers.
|
|
|
|
__ vmov(r1, r2, d29);
|
|
|
|
__ vmov(d29, d30);
|
|
|
|
__ vmov(d30, r1, r2);
|
|
|
|
|
|
|
|
// Convert to and from integer.
|
|
|
|
__ vcvt_s32_f64(s1, d31);
|
|
|
|
__ vcvt_f64_u32(d31, s1);
|
|
|
|
|
|
|
|
// Store d29-d31 into x, y, z.
|
2015-06-17 09:06:44 +00:00
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, x))));
|
2013-01-23 16:29:48 +00:00
|
|
|
__ vstm(ia_w, r4, d29, d31);
|
|
|
|
|
|
|
|
// Move constants into d20, d21, d22 and store into i, j, k.
|
2017-07-05 13:48:12 +00:00
|
|
|
__ vmov(d20, Double(14.7610017472335499));
|
|
|
|
__ vmov(d21, Double(16.0));
|
2013-01-23 16:29:48 +00:00
|
|
|
__ mov(r1, Operand(372106121));
|
|
|
|
__ mov(r2, Operand(1079146608));
|
2013-02-14 12:28:02 +00:00
|
|
|
__ vmov(d22, VmovIndexLo, r1);
|
|
|
|
__ vmov(d22, VmovIndexHi, r2);
|
2015-06-17 09:06:44 +00:00
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, i))));
|
2013-01-23 16:29:48 +00:00
|
|
|
__ vstm(ia_w, r4, d20, d22);
|
2013-07-25 15:04:38 +00:00
|
|
|
// Move d22 into low and high.
|
|
|
|
__ vmov(r4, VmovIndexLo, d22);
|
2015-06-17 09:06:44 +00:00
|
|
|
__ str(r4, MemOperand(r0, offsetof(T, low)));
|
2013-07-25 15:04:38 +00:00
|
|
|
__ vmov(r4, VmovIndexHi, d22);
|
2015-06-17 09:06:44 +00:00
|
|
|
__ str(r4, MemOperand(r0, offsetof(T, high)));
|
2013-01-23 16:29:48 +00:00
|
|
|
|
|
|
|
__ ldm(ia_w, sp, r4.bit() | pc.bit());
|
|
|
|
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2013-01-23 16:29:48 +00:00
|
|
|
#ifdef DEBUG
|
2014-07-07 09:57:29 +00:00
|
|
|
OFStream os(stdout);
|
|
|
|
code->Print(os);
|
2013-01-23 16:29:48 +00:00
|
|
|
#endif
|
2014-04-16 11:38:56 +00:00
|
|
|
F3 f = FUNCTION_CAST<F3>(code->entry());
|
2013-01-23 16:29:48 +00:00
|
|
|
t.a = 1.5;
|
|
|
|
t.b = 2.75;
|
|
|
|
t.c = 17.17;
|
|
|
|
t.x = 1.5;
|
|
|
|
t.y = 2.75;
|
|
|
|
t.z = 17.17;
|
2015-11-23 08:09:34 +00:00
|
|
|
Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
|
2013-01-23 16:29:48 +00:00
|
|
|
USE(dummy);
|
|
|
|
CHECK_EQ(14.7610017472335499, t.a);
|
|
|
|
CHECK_EQ(3.84200491244266251, t.b);
|
|
|
|
CHECK_EQ(73.8818412254460241, t.c);
|
|
|
|
CHECK_EQ(2.75, t.x);
|
|
|
|
CHECK_EQ(1.5, t.y);
|
|
|
|
CHECK_EQ(17.0, t.z);
|
|
|
|
CHECK_EQ(14.7610017472335499, t.i);
|
|
|
|
CHECK_EQ(16.0, t.j);
|
|
|
|
CHECK_EQ(73.8818412254460241, t.k);
|
2015-01-30 09:29:25 +00:00
|
|
|
CHECK_EQ(372106121u, t.low);
|
|
|
|
CHECK_EQ(1079146608u, t.high);
|
2013-01-23 16:29:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-15 15:55:47 +00:00
|
|
|
|
|
|
|
TEST(14) {
|
|
|
|
// Test the VFP Canonicalized Nan mode.
|
|
|
|
CcTest::InitializeVM();
|
2013-09-19 09:17:13 +00:00
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
2013-04-15 15:55:47 +00:00
|
|
|
HandleScope scope(isolate);
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
double left;
|
|
|
|
double right;
|
|
|
|
double add_result;
|
|
|
|
double sub_result;
|
|
|
|
double mul_result;
|
|
|
|
double div_result;
|
|
|
|
} T;
|
|
|
|
T t;
|
|
|
|
|
|
|
|
// Create a function that makes the four basic operations.
|
|
|
|
Assembler assm(isolate, NULL, 0);
|
|
|
|
|
|
|
|
// Ensure FPSCR state (as JSEntryStub does).
|
|
|
|
Label fpscr_done;
|
|
|
|
__ vmrs(r1);
|
|
|
|
__ tst(r1, Operand(kVFPDefaultNaNModeControlBit));
|
|
|
|
__ b(ne, &fpscr_done);
|
|
|
|
__ orr(r1, r1, Operand(kVFPDefaultNaNModeControlBit));
|
|
|
|
__ vmsr(r1);
|
|
|
|
__ bind(&fpscr_done);
|
|
|
|
|
2015-06-17 09:06:44 +00:00
|
|
|
__ vldr(d0, r0, offsetof(T, left));
|
|
|
|
__ vldr(d1, r0, offsetof(T, right));
|
2013-04-15 15:55:47 +00:00
|
|
|
__ vadd(d2, d0, d1);
|
2015-06-17 09:06:44 +00:00
|
|
|
__ vstr(d2, r0, offsetof(T, add_result));
|
2013-04-15 15:55:47 +00:00
|
|
|
__ vsub(d2, d0, d1);
|
2015-06-17 09:06:44 +00:00
|
|
|
__ vstr(d2, r0, offsetof(T, sub_result));
|
2013-04-15 15:55:47 +00:00
|
|
|
__ vmul(d2, d0, d1);
|
2015-06-17 09:06:44 +00:00
|
|
|
__ vstr(d2, r0, offsetof(T, mul_result));
|
2013-04-15 15:55:47 +00:00
|
|
|
__ vdiv(d2, d0, d1);
|
2015-06-17 09:06:44 +00:00
|
|
|
__ vstr(d2, r0, offsetof(T, div_result));
|
2013-04-15 15:55:47 +00:00
|
|
|
|
|
|
|
__ mov(pc, Operand(lr));
|
|
|
|
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2013-04-15 15:55:47 +00:00
|
|
|
#ifdef DEBUG
|
2014-07-07 09:57:29 +00:00
|
|
|
OFStream os(stdout);
|
|
|
|
code->Print(os);
|
2013-04-15 15:55:47 +00:00
|
|
|
#endif
|
2014-04-16 11:38:56 +00:00
|
|
|
F3 f = FUNCTION_CAST<F3>(code->entry());
|
2014-09-08 09:11:11 +00:00
|
|
|
t.left = bit_cast<double>(kHoleNanInt64);
|
2013-04-15 15:55:47 +00:00
|
|
|
t.right = 1;
|
|
|
|
t.add_result = 0;
|
|
|
|
t.sub_result = 0;
|
|
|
|
t.mul_result = 0;
|
|
|
|
t.div_result = 0;
|
2015-11-23 08:09:34 +00:00
|
|
|
Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
|
2013-04-15 15:55:47 +00:00
|
|
|
USE(dummy);
|
|
|
|
const uint32_t kArmNanUpper32 = 0x7ff80000;
|
|
|
|
const uint32_t kArmNanLower32 = 0x00000000;
|
|
|
|
#ifdef DEBUG
|
|
|
|
const uint64_t kArmNanInt64 =
|
|
|
|
(static_cast<uint64_t>(kArmNanUpper32) << 32) | kArmNanLower32;
|
2015-12-07 05:36:41 +00:00
|
|
|
CHECK(kArmNanInt64 != kHoleNanInt64);
|
2013-04-15 15:55:47 +00:00
|
|
|
#endif
|
|
|
|
// With VFP2 the sign of the canonicalized Nan is undefined. So
|
|
|
|
// we remove the sign bit for the upper tests.
|
2014-09-08 09:11:11 +00:00
|
|
|
CHECK_EQ(kArmNanUpper32,
|
|
|
|
(bit_cast<int64_t>(t.add_result) >> 32) & 0x7fffffff);
|
|
|
|
CHECK_EQ(kArmNanLower32, bit_cast<int64_t>(t.add_result) & 0xffffffffu);
|
|
|
|
CHECK_EQ(kArmNanUpper32,
|
|
|
|
(bit_cast<int64_t>(t.sub_result) >> 32) & 0x7fffffff);
|
|
|
|
CHECK_EQ(kArmNanLower32, bit_cast<int64_t>(t.sub_result) & 0xffffffffu);
|
|
|
|
CHECK_EQ(kArmNanUpper32,
|
|
|
|
(bit_cast<int64_t>(t.mul_result) >> 32) & 0x7fffffff);
|
|
|
|
CHECK_EQ(kArmNanLower32, bit_cast<int64_t>(t.mul_result) & 0xffffffffu);
|
|
|
|
CHECK_EQ(kArmNanUpper32,
|
|
|
|
(bit_cast<int64_t>(t.div_result) >> 32) & 0x7fffffff);
|
|
|
|
CHECK_EQ(kArmNanLower32, bit_cast<int64_t>(t.div_result) & 0xffffffffu);
|
2013-04-15 15:55:47 +00:00
|
|
|
}
|
|
|
|
|
2016-12-17 21:56:26 +00:00
|
|
|
#define CHECK_EQ_SPLAT(field, ex) \
|
|
|
|
CHECK_EQ(ex, t.field[0]); \
|
|
|
|
CHECK_EQ(ex, t.field[1]); \
|
|
|
|
CHECK_EQ(ex, t.field[2]); \
|
|
|
|
CHECK_EQ(ex, t.field[3]);
|
|
|
|
|
2017-03-02 19:50:33 +00:00
|
|
|
#define CHECK_EQ_32X2(field, ex0, ex1) \
|
|
|
|
CHECK_EQ(ex0, t.field[0]); \
|
|
|
|
CHECK_EQ(ex1, t.field[1]);
|
|
|
|
|
2016-12-17 21:56:26 +00:00
|
|
|
#define CHECK_EQ_32X4(field, ex0, ex1, ex2, ex3) \
|
|
|
|
CHECK_EQ(ex0, t.field[0]); \
|
|
|
|
CHECK_EQ(ex1, t.field[1]); \
|
|
|
|
CHECK_EQ(ex2, t.field[2]); \
|
|
|
|
CHECK_EQ(ex3, t.field[3]);
|
|
|
|
|
2017-01-13 12:03:23 +00:00
|
|
|
#define CHECK_ESTIMATE(expected, tolerance, value) \
|
|
|
|
CHECK_LT((expected) - (tolerance), value); \
|
|
|
|
CHECK_GT((expected) + (tolerance), value);
|
|
|
|
|
|
|
|
#define CHECK_ESTIMATE_SPLAT(field, ex, tol) \
|
|
|
|
CHECK_ESTIMATE(ex, tol, t.field[0]); \
|
|
|
|
CHECK_ESTIMATE(ex, tol, t.field[1]); \
|
|
|
|
CHECK_ESTIMATE(ex, tol, t.field[2]); \
|
|
|
|
CHECK_ESTIMATE(ex, tol, t.field[3]);
|
|
|
|
|
2016-12-15 18:15:23 +00:00
|
|
|
#define INT32_TO_FLOAT(val) \
|
|
|
|
std::round(static_cast<float>(bit_cast<int32_t>(val)))
|
|
|
|
#define UINT32_TO_FLOAT(val) \
|
|
|
|
std::round(static_cast<float>(bit_cast<uint32_t>(val)))
|
2013-07-10 15:32:39 +00:00
|
|
|
|
|
|
|
TEST(15) {
|
|
|
|
// Test the Neon instructions.
|
|
|
|
CcTest::InitializeVM();
|
2013-09-19 09:17:13 +00:00
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
2013-07-10 15:32:39 +00:00
|
|
|
HandleScope scope(isolate);
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
uint32_t src0;
|
|
|
|
uint32_t src1;
|
|
|
|
uint32_t src2;
|
|
|
|
uint32_t src3;
|
|
|
|
uint32_t src4;
|
|
|
|
uint32_t src5;
|
|
|
|
uint32_t src6;
|
|
|
|
uint32_t src7;
|
|
|
|
uint32_t dst0;
|
|
|
|
uint32_t dst1;
|
|
|
|
uint32_t dst2;
|
|
|
|
uint32_t dst3;
|
|
|
|
uint32_t dst4;
|
|
|
|
uint32_t dst5;
|
|
|
|
uint32_t dst6;
|
|
|
|
uint32_t dst7;
|
|
|
|
uint32_t srcA0;
|
|
|
|
uint32_t srcA1;
|
|
|
|
uint32_t dstA0;
|
|
|
|
uint32_t dstA1;
|
|
|
|
uint32_t dstA2;
|
|
|
|
uint32_t dstA3;
|
2016-12-17 21:56:26 +00:00
|
|
|
uint32_t lane_test[4];
|
2016-12-15 18:15:23 +00:00
|
|
|
uint64_t vmov_to_scalar1, vmov_to_scalar2;
|
|
|
|
uint32_t vmov_from_scalar_s8, vmov_from_scalar_u8;
|
|
|
|
uint32_t vmov_from_scalar_s16, vmov_from_scalar_u16;
|
|
|
|
uint32_t vmov_from_scalar_32;
|
2016-12-17 21:56:26 +00:00
|
|
|
uint32_t vmov[4], vmvn[4];
|
2017-03-27 18:23:42 +00:00
|
|
|
uint32_t vmovl_s8[4], vmovl_u16[4], vmovl_s32[4];
|
|
|
|
uint32_t vqmovn_s8[2], vqmovn_u16[2], vqmovn_s32[2];
|
2016-12-15 18:15:23 +00:00
|
|
|
int32_t vcvt_s32_f32[4];
|
|
|
|
uint32_t vcvt_u32_f32[4];
|
|
|
|
float vcvt_f32_s32[4], vcvt_f32_u32[4];
|
2016-12-17 21:56:26 +00:00
|
|
|
uint32_t vdup8[4], vdup16[4], vdup32[4];
|
|
|
|
float vabsf[4], vnegf[4];
|
|
|
|
uint32_t vabs_s8[4], vabs_s16[4], vabs_s32[4];
|
|
|
|
uint32_t vneg_s8[4], vneg_s16[4], vneg_s32[4];
|
2017-01-12 15:36:29 +00:00
|
|
|
uint32_t veor[4], vand[4], vorr[4];
|
2017-04-24 18:53:16 +00:00
|
|
|
float vdupf[4], vaddf[4], vpaddf[2], vsubf[4], vmulf[4];
|
2017-05-16 20:37:16 +00:00
|
|
|
uint32_t vdupf_16[2], vdupf_8[4];
|
2017-01-16 19:36:25 +00:00
|
|
|
uint32_t vmin_s8[4], vmin_u16[4], vmin_s32[4];
|
|
|
|
uint32_t vmax_s8[4], vmax_u16[4], vmax_s32[4];
|
2017-04-24 18:53:16 +00:00
|
|
|
uint32_t vpadd_i8[2], vpadd_i16[2], vpadd_i32[2];
|
2017-03-02 19:50:33 +00:00
|
|
|
uint32_t vpmin_s8[2], vpmin_u16[2], vpmin_s32[2];
|
|
|
|
uint32_t vpmax_s8[2], vpmax_u16[2], vpmax_s32[2];
|
2016-12-15 18:15:23 +00:00
|
|
|
uint32_t vadd8[4], vadd16[4], vadd32[4];
|
2017-02-01 18:56:28 +00:00
|
|
|
uint32_t vqadd_s8[4], vqadd_u16[4], vqadd_s32[4];
|
2016-12-15 18:15:23 +00:00
|
|
|
uint32_t vsub8[4], vsub16[4], vsub32[4];
|
2017-02-01 18:56:28 +00:00
|
|
|
uint32_t vqsub_u8[4], vqsub_s16[4], vqsub_u32[4];
|
2016-12-17 21:56:26 +00:00
|
|
|
uint32_t vmul8[4], vmul16[4], vmul32[4];
|
2017-01-23 18:24:27 +00:00
|
|
|
uint32_t vshl8[4], vshl16[4], vshl32[5];
|
|
|
|
uint32_t vshr_s8[4], vshr_u16[4], vshr_s32[5];
|
2017-05-16 20:37:16 +00:00
|
|
|
uint32_t vsli_64[2], vsri_64[2], vsli_32[2], vsri_32[2];
|
2017-01-12 11:20:08 +00:00
|
|
|
uint32_t vceq[4], vceqf[4], vcgef[4], vcgtf[4];
|
|
|
|
uint32_t vcge_s8[4], vcge_u16[4], vcge_s32[4];
|
|
|
|
uint32_t vcgt_s8[4], vcgt_u16[4], vcgt_s32[4];
|
2017-01-10 12:36:59 +00:00
|
|
|
float vrecpe[4], vrecps[4], vrsqrte[4], vrsqrts[4];
|
2017-01-16 19:36:25 +00:00
|
|
|
float vminf[4], vmaxf[4];
|
2017-01-12 11:20:08 +00:00
|
|
|
uint32_t vtst[4], vbsl[4];
|
2016-12-17 21:56:26 +00:00
|
|
|
uint32_t vext[4];
|
|
|
|
uint32_t vzip8a[4], vzip8b[4], vzip16a[4], vzip16b[4], vzip32a[4],
|
|
|
|
vzip32b[4];
|
2017-04-10 21:41:02 +00:00
|
|
|
uint32_t vzipd8a[2], vzipd8b[2], vzipd16a[2], vzipd16b[2];
|
2017-03-14 16:28:45 +00:00
|
|
|
uint32_t vuzp8a[4], vuzp8b[4], vuzp16a[4], vuzp16b[4], vuzp32a[4],
|
|
|
|
vuzp32b[4];
|
2017-04-10 21:41:02 +00:00
|
|
|
uint32_t vuzpd8a[2], vuzpd8b[2], vuzpd16a[2], vuzpd16b[2];
|
2016-12-17 21:56:26 +00:00
|
|
|
uint32_t vrev64_32[4], vrev64_16[4], vrev64_8[4];
|
2017-03-14 16:28:45 +00:00
|
|
|
uint32_t vrev32_16[4], vrev32_8[4], vrev16_8[4];
|
|
|
|
uint32_t vtrn8a[4], vtrn8b[4], vtrn16a[4], vtrn16b[4], vtrn32a[4],
|
|
|
|
vtrn32b[4];
|
2017-04-10 21:41:02 +00:00
|
|
|
uint32_t vtrnd8a[2], vtrnd8b[2], vtrnd16a[2], vtrnd16b[2], vtrnd32a[2],
|
|
|
|
vtrnd32b[2];
|
2016-12-17 21:56:26 +00:00
|
|
|
uint32_t vtbl[2], vtbx[2];
|
2013-07-10 15:32:39 +00:00
|
|
|
} T;
|
|
|
|
T t;
|
|
|
|
|
|
|
|
// Create a function that accepts &t, and loads, manipulates, and stores
|
2016-12-17 21:56:26 +00:00
|
|
|
// the doubles, floats, and SIMD values.
|
2013-07-10 15:32:39 +00:00
|
|
|
Assembler assm(isolate, NULL, 0);
|
|
|
|
|
|
|
|
if (CpuFeatures::IsSupported(NEON)) {
|
|
|
|
CpuFeatureScope scope(&assm, NEON);
|
|
|
|
|
2016-12-15 18:15:23 +00:00
|
|
|
__ stm(db_w, sp, r4.bit() | r5.bit() | lr.bit());
|
2013-07-10 15:32:39 +00:00
|
|
|
// Move 32 bytes with neon.
|
2015-06-17 09:06:44 +00:00
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, src0))));
|
2013-07-10 15:32:39 +00:00
|
|
|
__ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(r4));
|
2015-06-17 09:06:44 +00:00
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, dst0))));
|
2013-07-10 15:32:39 +00:00
|
|
|
__ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(r4));
|
|
|
|
|
|
|
|
// Expand 8 bytes into 8 words(16 bits).
|
2015-06-17 09:06:44 +00:00
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, srcA0))));
|
2013-07-10 15:32:39 +00:00
|
|
|
__ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(r4));
|
|
|
|
__ vmovl(NeonU8, q0, d0);
|
2015-06-17 09:06:44 +00:00
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, dstA0))));
|
2013-07-10 15:32:39 +00:00
|
|
|
__ vst1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(r4));
|
|
|
|
|
2014-03-21 13:04:20 +00:00
|
|
|
// The same expansion, but with different source and destination registers.
|
2015-06-17 09:06:44 +00:00
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, srcA0))));
|
2014-03-21 13:04:20 +00:00
|
|
|
__ vld1(Neon8, NeonListOperand(d1), NeonMemOperand(r4));
|
2017-03-27 18:23:42 +00:00
|
|
|
__ vmovl(NeonS8, q1, d1);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vmovl_s8))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ vmovl(NeonU16, q2, d3);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vmovl_u16))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q2), NeonMemOperand(r4));
|
|
|
|
__ vmovl(NeonS32, q3, d4);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vmovl_s32))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q3), NeonMemOperand(r4));
|
|
|
|
// Narrow what we widened.
|
|
|
|
__ vqmovn(NeonU16, d0, q2);
|
|
|
|
__ vstr(d0, r0, offsetof(T, vqmovn_u16));
|
|
|
|
__ vmov(d1, d0);
|
|
|
|
__ vqmovn(NeonS8, d2, q0);
|
|
|
|
__ vstr(d2, r0, offsetof(T, vqmovn_s8));
|
|
|
|
__ vqmovn(NeonS32, d4, q3);
|
|
|
|
__ vstr(d4, r0, offsetof(T, vqmovn_s32));
|
2014-03-21 13:04:20 +00:00
|
|
|
|
2016-12-15 18:15:23 +00:00
|
|
|
// ARM core register to scalar.
|
2016-12-17 21:56:26 +00:00
|
|
|
__ mov(r4, Operand(0xfffffff8));
|
2017-07-05 13:48:12 +00:00
|
|
|
__ vmov(d0, Double(0.0));
|
2016-12-15 18:15:23 +00:00
|
|
|
__ vmov(NeonS8, d0, 1, r4);
|
|
|
|
__ vmov(NeonS16, d0, 1, r4);
|
|
|
|
__ vmov(NeonS32, d0, 1, r4);
|
|
|
|
__ vstr(d0, r0, offsetof(T, vmov_to_scalar1));
|
2017-07-05 13:48:12 +00:00
|
|
|
__ vmov(d0, Double(0.0));
|
2016-12-15 18:15:23 +00:00
|
|
|
__ vmov(NeonS8, d0, 3, r4);
|
|
|
|
__ vmov(NeonS16, d0, 3, r4);
|
|
|
|
__ vstr(d0, r0, offsetof(T, vmov_to_scalar2));
|
|
|
|
|
|
|
|
// Scalar to ARM core register.
|
2016-12-17 21:56:26 +00:00
|
|
|
__ mov(r4, Operand(0xffffff00));
|
|
|
|
__ mov(r5, Operand(0xffffffff));
|
2016-12-15 18:15:23 +00:00
|
|
|
__ vmov(d0, r4, r5);
|
|
|
|
__ vmov(NeonS8, r4, d0, 1);
|
|
|
|
__ str(r4, MemOperand(r0, offsetof(T, vmov_from_scalar_s8)));
|
|
|
|
__ vmov(NeonU8, r4, d0, 1);
|
|
|
|
__ str(r4, MemOperand(r0, offsetof(T, vmov_from_scalar_u8)));
|
|
|
|
__ vmov(NeonS16, r4, d0, 1);
|
|
|
|
__ str(r4, MemOperand(r0, offsetof(T, vmov_from_scalar_s16)));
|
|
|
|
__ vmov(NeonU16, r4, d0, 1);
|
|
|
|
__ str(r4, MemOperand(r0, offsetof(T, vmov_from_scalar_u16)));
|
|
|
|
__ vmov(NeonS32, r4, d0, 1);
|
|
|
|
__ str(r4, MemOperand(r0, offsetof(T, vmov_from_scalar_32)));
|
|
|
|
|
|
|
|
// vmov for q-registers.
|
2016-12-17 21:56:26 +00:00
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test))));
|
2016-12-15 18:15:23 +00:00
|
|
|
__ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
|
2016-11-25 19:36:45 +00:00
|
|
|
__ vmov(q1, q0);
|
2016-12-17 21:56:26 +00:00
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vmov))));
|
2016-12-15 18:15:23 +00:00
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
|
|
|
|
// vmvn.
|
2016-12-17 21:56:26 +00:00
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test))));
|
|
|
|
__ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
|
2016-12-15 18:15:23 +00:00
|
|
|
__ vmvn(q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vmvn))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
|
|
|
|
// vcvt for q-registers.
|
2017-07-07 11:32:01 +00:00
|
|
|
__ vmov(s0, Float32(-1.5f));
|
|
|
|
__ vmov(s1, Float32(-1.0f));
|
|
|
|
__ vmov(s2, Float32(1.0f));
|
|
|
|
__ vmov(s3, Float32(1.5f));
|
2016-12-15 18:15:23 +00:00
|
|
|
__ vcvt_s32_f32(q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vcvt_s32_f32))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ vcvt_u32_f32(q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vcvt_u32_f32))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ mov(r4, Operand(kMinInt));
|
|
|
|
__ mov(r5, Operand(kMaxInt));
|
|
|
|
__ vmov(d0, r4, r5);
|
|
|
|
__ mov(r4, Operand(kMaxUInt32));
|
|
|
|
__ mov(r5, Operand(kMinInt + 1));
|
|
|
|
__ vmov(d1, r4, r5); // q0 = [kMinInt, kMaxInt, kMaxUInt32, kMinInt + 1]
|
|
|
|
__ vcvt_f32_s32(q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vcvt_f32_s32))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ vcvt_f32_u32(q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vcvt_f32_u32))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
|
2017-05-16 20:37:16 +00:00
|
|
|
// vdup (from register).
|
2016-12-15 18:15:23 +00:00
|
|
|
__ mov(r4, Operand(0xa));
|
|
|
|
__ vdup(Neon8, q0, r4);
|
|
|
|
__ vdup(Neon16, q1, r4);
|
|
|
|
__ vdup(Neon32, q2, r4);
|
2016-12-17 21:56:26 +00:00
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vdup8))));
|
2016-12-15 18:15:23 +00:00
|
|
|
__ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
|
2016-12-17 21:56:26 +00:00
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vdup16))));
|
2016-12-15 18:15:23 +00:00
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
2016-12-17 21:56:26 +00:00
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vdup32))));
|
2016-12-15 18:15:23 +00:00
|
|
|
__ vst1(Neon8, NeonListOperand(q2), NeonMemOperand(r4));
|
2016-12-17 21:56:26 +00:00
|
|
|
|
2017-05-16 20:37:16 +00:00
|
|
|
// vdup (from scalar).
|
2017-07-07 11:32:01 +00:00
|
|
|
__ vmov(s0, Float32(-1.0f));
|
2017-05-16 20:37:16 +00:00
|
|
|
__ vdup(Neon32, q1, d0, 0);
|
2016-12-17 21:56:26 +00:00
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vdupf))));
|
2017-05-16 20:37:16 +00:00
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ vdup(Neon16, d2, d0, 1);
|
|
|
|
__ vstr(d2, r0, offsetof(T, vdupf_16));
|
|
|
|
__ vdup(Neon8, q1, d0, 3);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vdupf_8))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
2016-12-15 18:15:23 +00:00
|
|
|
|
2016-12-17 21:56:26 +00:00
|
|
|
// vabs (float).
|
2017-07-07 11:32:01 +00:00
|
|
|
__ vmov(s0, Float32(-1.0f));
|
|
|
|
__ vmov(s1, Float32(-0.0f));
|
|
|
|
__ vmov(s2, Float32(0.0f));
|
|
|
|
__ vmov(s3, Float32(1.0f));
|
2016-12-17 21:56:26 +00:00
|
|
|
__ vabs(q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vabsf))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
// vneg (float).
|
|
|
|
__ vneg(q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vnegf))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
|
|
|
|
// vabs (integer).
|
|
|
|
__ mov(r4, Operand(0x7f7f7f7f));
|
|
|
|
__ mov(r5, Operand(0x01010101));
|
|
|
|
__ vmov(d0, r4, r5);
|
|
|
|
__ mov(r4, Operand(0xffffffff));
|
|
|
|
__ mov(r5, Operand(0x80808080));
|
|
|
|
__ vmov(d1, r4, r5);
|
|
|
|
__ vabs(Neon8, q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vabs_s8))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ vabs(Neon16, q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vabs_s16))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ vabs(Neon32, q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vabs_s32))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
// vneg (integer).
|
|
|
|
__ vneg(Neon8, q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vneg_s8))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ vneg(Neon16, q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vneg_s16))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ vneg(Neon32, q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vneg_s32))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
|
2016-12-15 18:15:23 +00:00
|
|
|
// veor.
|
2017-01-12 15:36:29 +00:00
|
|
|
__ mov(r4, Operand(0xaa));
|
2016-12-15 18:15:23 +00:00
|
|
|
__ vdup(Neon16, q0, r4);
|
2017-01-12 15:36:29 +00:00
|
|
|
__ mov(r4, Operand(0x55));
|
2016-12-15 18:15:23 +00:00
|
|
|
__ vdup(Neon16, q1, r4);
|
2016-11-25 19:36:45 +00:00
|
|
|
__ veor(q1, q1, q0);
|
2016-12-15 18:15:23 +00:00
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, veor))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
2017-01-12 15:36:29 +00:00
|
|
|
// vand.
|
|
|
|
__ mov(r4, Operand(0xff));
|
|
|
|
__ vdup(Neon16, q0, r4);
|
|
|
|
__ mov(r4, Operand(0xfe));
|
|
|
|
__ vdup(Neon16, q1, r4);
|
|
|
|
__ vand(q1, q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vand))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
// vorr.
|
|
|
|
__ mov(r4, Operand(0xaa));
|
|
|
|
__ vdup(Neon16, q0, r4);
|
|
|
|
__ mov(r4, Operand(0x55));
|
|
|
|
__ vdup(Neon16, q1, r4);
|
|
|
|
__ vorr(q1, q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vorr))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
2016-12-15 18:15:23 +00:00
|
|
|
|
2017-01-16 19:36:25 +00:00
|
|
|
// vmin (float).
|
2017-07-07 11:32:01 +00:00
|
|
|
__ vmov(s4, Float32(2.0f));
|
2017-05-16 20:37:16 +00:00
|
|
|
__ vdup(Neon32, q0, d2, 0);
|
2017-07-07 11:32:01 +00:00
|
|
|
__ vmov(s4, Float32(1.0f));
|
2017-05-16 20:37:16 +00:00
|
|
|
__ vdup(Neon32, q1, d2, 0);
|
2017-01-16 19:36:25 +00:00
|
|
|
__ vmin(q1, q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vminf))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
// vmax (float).
|
2017-07-07 11:32:01 +00:00
|
|
|
__ vmov(s4, Float32(2.0f));
|
2017-05-16 20:37:16 +00:00
|
|
|
__ vdup(Neon32, q0, d2, 0);
|
2017-07-07 11:32:01 +00:00
|
|
|
__ vmov(s4, Float32(1.0f));
|
2017-05-16 20:37:16 +00:00
|
|
|
__ vdup(Neon32, q1, d2, 0);
|
2017-01-16 19:36:25 +00:00
|
|
|
__ vmax(q1, q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vmaxf))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
2016-12-17 21:56:26 +00:00
|
|
|
// vadd (float).
|
2017-07-07 11:32:01 +00:00
|
|
|
__ vmov(s4, Float32(1.0f));
|
2017-05-16 20:37:16 +00:00
|
|
|
__ vdup(Neon32, q0, d2, 0);
|
|
|
|
__ vdup(Neon32, q1, d2, 0);
|
2016-12-17 21:56:26 +00:00
|
|
|
__ vadd(q1, q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vaddf))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
2017-04-24 18:53:16 +00:00
|
|
|
// vpadd (float).
|
2017-07-07 11:32:01 +00:00
|
|
|
__ vmov(s0, Float32(1.0f));
|
|
|
|
__ vmov(s1, Float32(2.0f));
|
|
|
|
__ vmov(s2, Float32(3.0f));
|
|
|
|
__ vmov(s3, Float32(4.0f));
|
2017-04-24 18:53:16 +00:00
|
|
|
__ vpadd(d2, d0, d1);
|
|
|
|
__ vstr(d2, r0, offsetof(T, vpaddf));
|
2016-12-17 21:56:26 +00:00
|
|
|
// vsub (float).
|
2017-07-07 11:32:01 +00:00
|
|
|
__ vmov(s4, Float32(2.0f));
|
2017-05-16 20:37:16 +00:00
|
|
|
__ vdup(Neon32, q0, d2, 0);
|
2017-07-07 11:32:01 +00:00
|
|
|
__ vmov(s4, Float32(1.0f));
|
2017-05-16 20:37:16 +00:00
|
|
|
__ vdup(Neon32, q1, d2, 0);
|
2016-12-17 21:56:26 +00:00
|
|
|
__ vsub(q1, q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vsubf))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
// vmul (float).
|
2017-07-07 11:32:01 +00:00
|
|
|
__ vmov(s4, Float32(2.0f));
|
2017-05-16 20:37:16 +00:00
|
|
|
__ vdup(Neon32, q0, d2, 0);
|
|
|
|
__ vdup(Neon32, q1, d2, 0);
|
2016-12-17 21:56:26 +00:00
|
|
|
__ vmul(q1, q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vmulf))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
2017-01-10 12:36:59 +00:00
|
|
|
// vrecpe.
|
2017-07-07 11:32:01 +00:00
|
|
|
__ vmov(s4, Float32(2.0f));
|
2017-05-16 20:37:16 +00:00
|
|
|
__ vdup(Neon32, q0, d2, 0);
|
2017-01-10 12:36:59 +00:00
|
|
|
__ vrecpe(q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vrecpe))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
// vrecps.
|
2017-07-07 11:32:01 +00:00
|
|
|
__ vmov(s4, Float32(2.0f));
|
2017-05-16 20:37:16 +00:00
|
|
|
__ vdup(Neon32, q0, d2, 0);
|
2017-07-07 11:32:01 +00:00
|
|
|
__ vmov(s4, Float32(1.5f));
|
2017-05-16 20:37:16 +00:00
|
|
|
__ vdup(Neon32, q1, d2, 0);
|
2017-01-10 12:36:59 +00:00
|
|
|
__ vrecps(q1, q0, q1);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vrecps))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
// vrsqrte.
|
2017-07-07 11:32:01 +00:00
|
|
|
__ vmov(s4, Float32(4.0f));
|
2017-05-16 20:37:16 +00:00
|
|
|
__ vdup(Neon32, q0, d2, 0);
|
2017-01-10 12:36:59 +00:00
|
|
|
__ vrsqrte(q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vrsqrte))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
// vrsqrts.
|
2017-07-07 11:32:01 +00:00
|
|
|
__ vmov(s4, Float32(2.0f));
|
2017-05-16 20:37:16 +00:00
|
|
|
__ vdup(Neon32, q0, d2, 0);
|
2017-07-07 11:32:01 +00:00
|
|
|
__ vmov(s4, Float32(2.5f));
|
2017-05-16 20:37:16 +00:00
|
|
|
__ vdup(Neon32, q1, d2, 0);
|
2017-01-10 12:36:59 +00:00
|
|
|
__ vrsqrts(q1, q0, q1);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vrsqrts))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
2016-12-20 13:06:58 +00:00
|
|
|
// vceq (float).
|
2017-07-07 11:32:01 +00:00
|
|
|
__ vmov(s4, Float32(1.0f));
|
2017-05-16 20:37:16 +00:00
|
|
|
__ vdup(Neon32, q0, d2, 0);
|
|
|
|
__ vdup(Neon32, q1, d2, 0);
|
2016-12-20 13:06:58 +00:00
|
|
|
__ vceq(q1, q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vceqf))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
2017-01-12 11:20:08 +00:00
|
|
|
// vcge (float).
|
2017-07-07 11:32:01 +00:00
|
|
|
__ vmov(s0, Float32(1.0f));
|
|
|
|
__ vmov(s1, Float32(-1.0f));
|
|
|
|
__ vmov(s2, Float32(-0.0f));
|
|
|
|
__ vmov(s3, Float32(0.0f));
|
2017-05-16 20:37:16 +00:00
|
|
|
__ vdup(Neon32, q1, d1, 1);
|
2017-01-12 11:20:08 +00:00
|
|
|
__ vcge(q2, q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vcgef))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q2), NeonMemOperand(r4));
|
|
|
|
__ vcgt(q2, q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vcgtf))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q2), NeonMemOperand(r4));
|
2016-12-17 21:56:26 +00:00
|
|
|
|
2017-01-16 19:36:25 +00:00
|
|
|
// vmin/vmax integer.
|
|
|
|
__ mov(r4, Operand(0x03));
|
|
|
|
__ vdup(Neon16, q0, r4);
|
|
|
|
__ vdup(Neon8, q1, r4);
|
|
|
|
__ vmin(NeonS8, q2, q0, q1);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vmin_s8))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q2), NeonMemOperand(r4));
|
|
|
|
__ vmax(NeonS8, q2, q0, q1);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vmax_s8))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q2), NeonMemOperand(r4));
|
|
|
|
__ mov(r4, Operand(0xff));
|
|
|
|
__ vdup(Neon16, q0, r4);
|
|
|
|
__ vdup(Neon8, q1, r4);
|
|
|
|
__ vmin(NeonU16, q2, q0, q1);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vmin_u16))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q2), NeonMemOperand(r4));
|
|
|
|
__ vmax(NeonU16, q2, q0, q1);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vmax_u16))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q2), NeonMemOperand(r4));
|
|
|
|
__ mov(r4, Operand(0xff));
|
|
|
|
__ vdup(Neon32, q0, r4);
|
|
|
|
__ vdup(Neon8, q1, r4);
|
|
|
|
__ vmin(NeonS32, q2, q0, q1);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vmin_s32))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q2), NeonMemOperand(r4));
|
|
|
|
__ vmax(NeonS32, q2, q0, q1);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vmax_s32))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q2), NeonMemOperand(r4));
|
|
|
|
|
2017-04-24 18:53:16 +00:00
|
|
|
// vpadd integer.
|
|
|
|
__ mov(r4, Operand(0x03));
|
|
|
|
__ vdup(Neon16, q0, r4);
|
|
|
|
__ vdup(Neon8, q1, r4);
|
|
|
|
__ vpadd(Neon8, d0, d0, d2);
|
|
|
|
__ vstr(d0, r0, offsetof(T, vpadd_i8));
|
|
|
|
__ vpadd(Neon16, d0, d0, d2);
|
|
|
|
__ vstr(d0, r0, offsetof(T, vpadd_i16));
|
|
|
|
__ vpadd(Neon32, d0, d0, d2);
|
|
|
|
__ vstr(d0, r0, offsetof(T, vpadd_i32));
|
|
|
|
|
2017-03-02 19:50:33 +00:00
|
|
|
// vpmin/vpmax integer.
|
|
|
|
__ mov(r4, Operand(0x03));
|
|
|
|
__ vdup(Neon16, q0, r4);
|
|
|
|
__ vdup(Neon8, q1, r4);
|
|
|
|
__ vpmin(NeonS8, d4, d0, d2);
|
|
|
|
__ vstr(d4, r0, offsetof(T, vpmin_s8));
|
|
|
|
__ vpmax(NeonS8, d4, d0, d2);
|
|
|
|
__ vstr(d4, r0, offsetof(T, vpmax_s8));
|
|
|
|
__ mov(r4, Operand(0xffff));
|
|
|
|
__ vdup(Neon32, q0, r4);
|
|
|
|
__ vdup(Neon16, q1, r4);
|
|
|
|
__ vpmin(NeonU16, d4, d0, d2);
|
|
|
|
__ vstr(d4, r0, offsetof(T, vpmin_u16));
|
|
|
|
__ vpmax(NeonU16, d4, d0, d2);
|
|
|
|
__ vstr(d4, r0, offsetof(T, vpmax_u16));
|
|
|
|
__ mov(r4, Operand(0xff));
|
|
|
|
__ veor(q0, q0, q0);
|
|
|
|
__ vmov(s0, r4);
|
|
|
|
__ vdup(Neon8, q1, r4);
|
|
|
|
__ vpmin(NeonS32, d4, d0, d2);
|
|
|
|
__ vstr(d4, r0, offsetof(T, vpmin_s32));
|
|
|
|
__ vpmax(NeonS32, d4, d0, d2);
|
|
|
|
__ vstr(d4, r0, offsetof(T, vpmax_s32));
|
|
|
|
|
2016-12-17 21:56:26 +00:00
|
|
|
// vadd (integer).
|
2016-12-15 18:15:23 +00:00
|
|
|
__ mov(r4, Operand(0x81));
|
|
|
|
__ vdup(Neon8, q0, r4);
|
|
|
|
__ mov(r4, Operand(0x82));
|
|
|
|
__ vdup(Neon8, q1, r4);
|
|
|
|
__ vadd(Neon8, q1, q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vadd8))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ mov(r4, Operand(0x8001));
|
|
|
|
__ vdup(Neon16, q0, r4);
|
|
|
|
__ mov(r4, Operand(0x8002));
|
|
|
|
__ vdup(Neon16, q1, r4);
|
|
|
|
__ vadd(Neon16, q1, q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vadd16))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ mov(r4, Operand(0x80000001));
|
|
|
|
__ vdup(Neon32, q0, r4);
|
|
|
|
__ mov(r4, Operand(0x80000002));
|
|
|
|
__ vdup(Neon32, q1, r4);
|
|
|
|
__ vadd(Neon32, q1, q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vadd32))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
|
2017-02-01 18:56:28 +00:00
|
|
|
// vqadd.
|
|
|
|
__ mov(r4, Operand(0x81));
|
|
|
|
__ vdup(Neon8, q0, r4);
|
|
|
|
__ mov(r4, Operand(0x82));
|
|
|
|
__ vdup(Neon8, q1, r4);
|
|
|
|
__ vqadd(NeonS8, q1, q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vqadd_s8))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ mov(r4, Operand(0x8000));
|
|
|
|
__ vdup(Neon16, q0, r4);
|
|
|
|
__ vdup(Neon16, q1, r4);
|
|
|
|
__ vqadd(NeonU16, q1, q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vqadd_u16))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ mov(r4, Operand(0x80000001));
|
|
|
|
__ vdup(Neon32, q0, r4);
|
|
|
|
__ mov(r4, Operand(0x80000002));
|
|
|
|
__ vdup(Neon32, q1, r4);
|
|
|
|
__ vqadd(NeonS32, q1, q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vqadd_s32))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
|
2016-12-17 21:56:26 +00:00
|
|
|
// vsub (integer).
|
2016-12-15 18:15:23 +00:00
|
|
|
__ mov(r4, Operand(0x01));
|
|
|
|
__ vdup(Neon8, q0, r4);
|
2016-12-17 21:56:26 +00:00
|
|
|
__ mov(r4, Operand(0x03));
|
2016-12-15 18:15:23 +00:00
|
|
|
__ vdup(Neon8, q1, r4);
|
|
|
|
__ vsub(Neon8, q1, q0, q1);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vsub8))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ mov(r4, Operand(0x0001));
|
|
|
|
__ vdup(Neon16, q0, r4);
|
2016-12-17 21:56:26 +00:00
|
|
|
__ mov(r4, Operand(0x0003));
|
2016-12-15 18:15:23 +00:00
|
|
|
__ vdup(Neon16, q1, r4);
|
|
|
|
__ vsub(Neon16, q1, q0, q1);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vsub16))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ mov(r4, Operand(0x00000001));
|
|
|
|
__ vdup(Neon32, q0, r4);
|
2016-12-17 21:56:26 +00:00
|
|
|
__ mov(r4, Operand(0x00000003));
|
2016-12-15 18:15:23 +00:00
|
|
|
__ vdup(Neon32, q1, r4);
|
|
|
|
__ vsub(Neon32, q1, q0, q1);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vsub32))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
|
2017-02-01 18:56:28 +00:00
|
|
|
// vqsub.
|
|
|
|
__ mov(r4, Operand(0x7f));
|
|
|
|
__ vdup(Neon8, q0, r4);
|
|
|
|
__ mov(r4, Operand(0x3f));
|
|
|
|
__ vdup(Neon8, q1, r4);
|
|
|
|
__ vqsub(NeonU8, q1, q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vqsub_u8))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ mov(r4, Operand(0x8000));
|
|
|
|
__ vdup(Neon16, q0, r4);
|
|
|
|
__ mov(r4, Operand(0x7fff));
|
|
|
|
__ vdup(Neon16, q1, r4);
|
|
|
|
__ vqsub(NeonS16, q1, q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vqsub_s16))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ mov(r4, Operand(0x80000001));
|
|
|
|
__ vdup(Neon32, q0, r4);
|
|
|
|
__ mov(r4, Operand(0x80000000));
|
|
|
|
__ vdup(Neon32, q1, r4);
|
|
|
|
__ vqsub(NeonU32, q1, q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vqsub_u32))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
|
2016-12-17 21:56:26 +00:00
|
|
|
// vmul (integer).
|
|
|
|
__ mov(r4, Operand(0x02));
|
|
|
|
__ vdup(Neon8, q0, r4);
|
|
|
|
__ vmul(Neon8, q1, q0, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vmul8))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ mov(r4, Operand(0x0002));
|
|
|
|
__ vdup(Neon16, q0, r4);
|
|
|
|
__ vmul(Neon16, q1, q0, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vmul16))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ mov(r4, Operand(0x00000002));
|
|
|
|
__ vdup(Neon32, q0, r4);
|
|
|
|
__ vmul(Neon32, q1, q0, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vmul32))));
|
2016-12-15 18:15:23 +00:00
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
|
2017-01-23 18:24:27 +00:00
|
|
|
// vshl.
|
|
|
|
__ mov(r4, Operand(0x55));
|
|
|
|
__ vdup(Neon8, q0, r4);
|
|
|
|
__ vshl(NeonS8, q1, q0, 1);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vshl8))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ vshl(NeonU16, q1, q0, 9);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vshl16))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ vshl(NeonS32, q1, q0, 17);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vshl32))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
|
|
|
|
// vshr.s, vshr.u.
|
|
|
|
__ mov(r4, Operand(0x80));
|
|
|
|
__ vdup(Neon8, q0, r4);
|
|
|
|
__ vshr(NeonS8, q1, q0, 1);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vshr_s8))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ vshr(NeonU16, q1, q0, 9);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vshr_u16))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ vshr(NeonS32, q1, q0, 17);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vshr_s32))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
|
2017-05-16 20:37:16 +00:00
|
|
|
// vsli, vsri.
|
|
|
|
__ mov(r4, Operand(0xffffffff));
|
|
|
|
__ mov(r5, Operand(0x1));
|
|
|
|
__ vmov(d0, r4, r5);
|
|
|
|
__ vmov(d1, r5, r5);
|
|
|
|
__ vsli(Neon64, d1, d0, 32);
|
|
|
|
__ vstr(d1, r0, offsetof(T, vsli_64));
|
|
|
|
__ vmov(d0, r5, r4);
|
|
|
|
__ vmov(d1, r5, r5);
|
|
|
|
__ vsri(Neon64, d1, d0, 32);
|
|
|
|
__ vstr(d1, r0, offsetof(T, vsri_64));
|
|
|
|
__ vmov(d0, r4, r5);
|
|
|
|
__ vmov(d1, r5, r5);
|
|
|
|
__ vsli(Neon32, d1, d0, 16);
|
|
|
|
__ vstr(d1, r0, offsetof(T, vsli_32));
|
|
|
|
__ vmov(d0, r5, r4);
|
|
|
|
__ vmov(d1, r5, r5);
|
|
|
|
__ vsri(Neon32, d1, d0, 16);
|
|
|
|
__ vstr(d1, r0, offsetof(T, vsri_32));
|
|
|
|
|
2016-12-15 18:15:23 +00:00
|
|
|
// vceq.
|
|
|
|
__ mov(r4, Operand(0x03));
|
|
|
|
__ vdup(Neon8, q0, r4);
|
|
|
|
__ vdup(Neon16, q1, r4);
|
|
|
|
__ vceq(Neon8, q1, q0, q1);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vceq))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
|
2017-01-16 19:36:25 +00:00
|
|
|
// vcge/vcgt (integer).
|
2017-01-12 11:20:08 +00:00
|
|
|
__ mov(r4, Operand(0x03));
|
|
|
|
__ vdup(Neon16, q0, r4);
|
|
|
|
__ vdup(Neon8, q1, r4);
|
|
|
|
__ vcge(NeonS8, q2, q0, q1);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vcge_s8))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q2), NeonMemOperand(r4));
|
|
|
|
__ vcgt(NeonS8, q2, q0, q1);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vcgt_s8))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q2), NeonMemOperand(r4));
|
|
|
|
__ mov(r4, Operand(0xff));
|
|
|
|
__ vdup(Neon16, q0, r4);
|
|
|
|
__ vdup(Neon8, q1, r4);
|
|
|
|
__ vcge(NeonU16, q2, q0, q1);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vcge_u16))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q2), NeonMemOperand(r4));
|
|
|
|
__ vcgt(NeonU16, q2, q0, q1);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vcgt_u16))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q2), NeonMemOperand(r4));
|
|
|
|
__ mov(r4, Operand(0xff));
|
|
|
|
__ vdup(Neon32, q0, r4);
|
|
|
|
__ vdup(Neon8, q1, r4);
|
|
|
|
__ vcge(NeonS32, q2, q0, q1);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vcge_s32))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q2), NeonMemOperand(r4));
|
|
|
|
__ vcgt(NeonS32, q2, q0, q1);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vcgt_s32))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q2), NeonMemOperand(r4));
|
|
|
|
|
2016-12-15 18:15:23 +00:00
|
|
|
// vtst.
|
|
|
|
__ mov(r4, Operand(0x03));
|
|
|
|
__ vdup(Neon8, q0, r4);
|
|
|
|
__ mov(r4, Operand(0x02));
|
|
|
|
__ vdup(Neon16, q1, r4);
|
|
|
|
__ vtst(Neon8, q1, q0, q1);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vtst))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
|
|
|
|
// vbsl.
|
|
|
|
__ mov(r4, Operand(0x00ff));
|
|
|
|
__ vdup(Neon16, q0, r4);
|
|
|
|
__ mov(r4, Operand(0x01));
|
|
|
|
__ vdup(Neon8, q1, r4);
|
|
|
|
__ mov(r4, Operand(0x02));
|
|
|
|
__ vdup(Neon8, q2, r4);
|
|
|
|
__ vbsl(q0, q1, q2);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vbsl))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
|
|
|
|
|
2016-12-17 21:56:26 +00:00
|
|
|
// vext.
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test))));
|
|
|
|
__ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
|
|
|
|
__ vmov(q1, q0);
|
|
|
|
__ vext(q2, q0, q1, 3);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vext))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q2), NeonMemOperand(r4));
|
|
|
|
|
2017-04-10 21:41:02 +00:00
|
|
|
// vzip (q-register).
|
2016-12-17 21:56:26 +00:00
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test))));
|
|
|
|
__ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
|
|
|
|
__ vmov(q1, q0);
|
|
|
|
__ vzip(Neon8, q0, q1);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vzip8a))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vzip8b))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test))));
|
|
|
|
__ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
|
|
|
|
__ vmov(q1, q0);
|
|
|
|
__ vzip(Neon16, q0, q1);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vzip16a))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vzip16b))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test))));
|
|
|
|
__ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
|
|
|
|
__ vmov(q1, q0);
|
|
|
|
__ vzip(Neon32, q0, q1);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vzip32a))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vzip32b))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
|
2017-04-10 21:41:02 +00:00
|
|
|
// vzip (d-register).
|
|
|
|
__ vldr(d2, r0, offsetof(T, lane_test));
|
|
|
|
__ vmov(d0, d2);
|
|
|
|
__ vmov(d1, d2);
|
|
|
|
__ vzip(Neon8, d0, d1);
|
|
|
|
__ vstr(d0, r0, offsetof(T, vzipd8a));
|
|
|
|
__ vstr(d1, r0, offsetof(T, vzipd8b));
|
|
|
|
__ vmov(d0, d2);
|
|
|
|
__ vmov(d1, d2);
|
|
|
|
__ vzip(Neon16, d0, d1);
|
|
|
|
__ vstr(d0, r0, offsetof(T, vzipd16a));
|
|
|
|
__ vstr(d1, r0, offsetof(T, vzipd16b));
|
|
|
|
|
|
|
|
// vuzp (q-register).
|
2017-03-14 16:28:45 +00:00
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test))));
|
|
|
|
__ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
|
|
|
|
__ vmov(q1, q0);
|
|
|
|
__ vuzp(Neon8, q0, q1);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vuzp8a))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vuzp8b))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test))));
|
|
|
|
__ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
|
|
|
|
__ vmov(q1, q0);
|
|
|
|
__ vuzp(Neon16, q0, q1);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vuzp16a))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vuzp16b))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test))));
|
|
|
|
__ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
|
|
|
|
__ vmov(q1, q0);
|
|
|
|
__ vuzp(Neon32, q0, q1);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vuzp32a))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vuzp32b))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
|
2017-04-10 21:41:02 +00:00
|
|
|
// vuzp (d-register).
|
|
|
|
__ vldr(d2, r0, offsetof(T, lane_test));
|
|
|
|
__ vmov(d0, d2);
|
|
|
|
__ vmov(d1, d2);
|
|
|
|
__ vuzp(Neon8, d0, d1);
|
|
|
|
__ vstr(d0, r0, offsetof(T, vuzpd8a));
|
|
|
|
__ vstr(d1, r0, offsetof(T, vuzpd8b));
|
|
|
|
__ vmov(d0, d2);
|
|
|
|
__ vmov(d1, d2);
|
|
|
|
__ vuzp(Neon16, d0, d1);
|
|
|
|
__ vstr(d0, r0, offsetof(T, vuzpd16a));
|
|
|
|
__ vstr(d1, r0, offsetof(T, vuzpd16b));
|
|
|
|
|
|
|
|
// vtrn (q-register).
|
2017-03-14 16:28:45 +00:00
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test))));
|
|
|
|
__ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
|
|
|
|
__ vmov(q1, q0);
|
|
|
|
__ vtrn(Neon8, q0, q1);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vtrn8a))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vtrn8b))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test))));
|
|
|
|
__ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
|
|
|
|
__ vmov(q1, q0);
|
|
|
|
__ vtrn(Neon16, q0, q1);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vtrn16a))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vtrn16b))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test))));
|
|
|
|
__ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
|
|
|
|
__ vmov(q1, q0);
|
|
|
|
__ vtrn(Neon32, q0, q1);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vtrn32a))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vtrn32b))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
|
2017-04-10 21:41:02 +00:00
|
|
|
// vtrn (d-register).
|
|
|
|
__ vldr(d2, r0, offsetof(T, lane_test));
|
|
|
|
__ vmov(d0, d2);
|
|
|
|
__ vmov(d1, d2);
|
|
|
|
__ vtrn(Neon8, d0, d1);
|
|
|
|
__ vstr(d0, r0, offsetof(T, vtrnd8a));
|
|
|
|
__ vstr(d1, r0, offsetof(T, vtrnd8b));
|
|
|
|
__ vmov(d0, d2);
|
|
|
|
__ vmov(d1, d2);
|
|
|
|
__ vtrn(Neon16, d0, d1);
|
|
|
|
__ vstr(d0, r0, offsetof(T, vtrnd16a));
|
|
|
|
__ vstr(d1, r0, offsetof(T, vtrnd16b));
|
|
|
|
__ vmov(d0, d2);
|
|
|
|
__ vmov(d1, d2);
|
|
|
|
__ vtrn(Neon32, d0, d1);
|
|
|
|
__ vstr(d0, r0, offsetof(T, vtrnd32a));
|
|
|
|
__ vstr(d1, r0, offsetof(T, vtrnd32b));
|
|
|
|
|
2016-12-17 21:56:26 +00:00
|
|
|
// vrev64/32/16
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test))));
|
|
|
|
__ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
|
|
|
|
__ vrev64(Neon32, q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vrev64_32))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ vrev64(Neon16, q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vrev64_16))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ vrev64(Neon8, q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vrev64_8))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ vrev32(Neon16, q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vrev32_16))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ vrev32(Neon8, q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vrev32_8))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
__ vrev16(Neon8, q1, q0);
|
|
|
|
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vrev16_8))));
|
|
|
|
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
|
|
|
|
|
2016-12-15 18:15:23 +00:00
|
|
|
// vtb[l/x].
|
|
|
|
__ mov(r4, Operand(0x06040200));
|
|
|
|
__ mov(r5, Operand(0xff050301));
|
|
|
|
__ vmov(d2, r4, r5); // d2 = ff05030106040200
|
|
|
|
__ vtbl(d0, NeonListOperand(d2, 1), d2);
|
|
|
|
__ vstr(d0, r0, offsetof(T, vtbl));
|
|
|
|
__ vtbx(d2, NeonListOperand(d2, 1), d2);
|
|
|
|
__ vstr(d2, r0, offsetof(T, vtbx));
|
2016-11-25 19:36:45 +00:00
|
|
|
|
|
|
|
// Restore and return.
|
2016-12-15 18:15:23 +00:00
|
|
|
__ ldm(ia_w, sp, r4.bit() | r5.bit() | pc.bit());
|
2013-07-10 15:32:39 +00:00
|
|
|
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2013-07-10 15:32:39 +00:00
|
|
|
#ifdef DEBUG
|
2014-07-07 09:57:29 +00:00
|
|
|
OFStream os(stdout);
|
|
|
|
code->Print(os);
|
2013-07-10 15:32:39 +00:00
|
|
|
#endif
|
2014-04-16 11:38:56 +00:00
|
|
|
F3 f = FUNCTION_CAST<F3>(code->entry());
|
2013-07-10 15:32:39 +00:00
|
|
|
t.src0 = 0x01020304;
|
|
|
|
t.src1 = 0x11121314;
|
|
|
|
t.src2 = 0x21222324;
|
|
|
|
t.src3 = 0x31323334;
|
|
|
|
t.src4 = 0x41424344;
|
|
|
|
t.src5 = 0x51525354;
|
|
|
|
t.src6 = 0x61626364;
|
|
|
|
t.src7 = 0x71727374;
|
|
|
|
t.dst0 = 0;
|
|
|
|
t.dst1 = 0;
|
|
|
|
t.dst2 = 0;
|
|
|
|
t.dst3 = 0;
|
|
|
|
t.dst4 = 0;
|
|
|
|
t.dst5 = 0;
|
|
|
|
t.dst6 = 0;
|
|
|
|
t.dst7 = 0;
|
|
|
|
t.srcA0 = 0x41424344;
|
|
|
|
t.srcA1 = 0x81828384;
|
|
|
|
t.dstA0 = 0;
|
|
|
|
t.dstA1 = 0;
|
|
|
|
t.dstA2 = 0;
|
|
|
|
t.dstA3 = 0;
|
2016-12-17 21:56:26 +00:00
|
|
|
t.lane_test[0] = 0x03020100;
|
|
|
|
t.lane_test[1] = 0x07060504;
|
|
|
|
t.lane_test[2] = 0x0b0a0908;
|
|
|
|
t.lane_test[3] = 0x0f0e0d0c;
|
2015-11-23 08:09:34 +00:00
|
|
|
Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
|
2013-07-10 15:32:39 +00:00
|
|
|
USE(dummy);
|
2016-12-15 18:15:23 +00:00
|
|
|
|
2015-01-30 09:29:25 +00:00
|
|
|
CHECK_EQ(0x01020304u, t.dst0);
|
|
|
|
CHECK_EQ(0x11121314u, t.dst1);
|
|
|
|
CHECK_EQ(0x21222324u, t.dst2);
|
|
|
|
CHECK_EQ(0x31323334u, t.dst3);
|
|
|
|
CHECK_EQ(0x41424344u, t.dst4);
|
|
|
|
CHECK_EQ(0x51525354u, t.dst5);
|
|
|
|
CHECK_EQ(0x61626364u, t.dst6);
|
|
|
|
CHECK_EQ(0x71727374u, t.dst7);
|
|
|
|
CHECK_EQ(0x00430044u, t.dstA0);
|
|
|
|
CHECK_EQ(0x00410042u, t.dstA1);
|
|
|
|
CHECK_EQ(0x00830084u, t.dstA2);
|
|
|
|
CHECK_EQ(0x00810082u, t.dstA3);
|
2017-03-27 18:23:42 +00:00
|
|
|
|
|
|
|
CHECK_EQ_32X4(vmovl_s8, 0x00430044u, 0x00410042u, 0xff83ff84u, 0xff81ff82u);
|
|
|
|
CHECK_EQ_32X4(vmovl_u16, 0xff84u, 0xff83u, 0xff82u, 0xff81u);
|
|
|
|
CHECK_EQ_32X4(vmovl_s32, 0xff84u, 0x0u, 0xff83u, 0x0u);
|
|
|
|
CHECK_EQ_32X2(vqmovn_u16, 0xff83ff84u, 0xff81ff82u);
|
|
|
|
CHECK_EQ_32X2(vqmovn_s8, 0x81828384u, 0x81828384u);
|
|
|
|
CHECK_EQ_32X2(vqmovn_s32, 0xff84u, 0xff83u);
|
2016-12-17 21:56:26 +00:00
|
|
|
|
2016-12-15 18:15:23 +00:00
|
|
|
CHECK_EQ(0xfffffff8fff8f800u, t.vmov_to_scalar1);
|
|
|
|
CHECK_EQ(0xfff80000f8000000u, t.vmov_to_scalar2);
|
|
|
|
CHECK_EQ(0xFFFFFFFFu, t.vmov_from_scalar_s8);
|
|
|
|
CHECK_EQ(0xFFu, t.vmov_from_scalar_u8);
|
|
|
|
CHECK_EQ(0xFFFFFFFFu, t.vmov_from_scalar_s16);
|
|
|
|
CHECK_EQ(0xFFFFu, t.vmov_from_scalar_u16);
|
|
|
|
CHECK_EQ(0xFFFFFFFFu, t.vmov_from_scalar_32);
|
|
|
|
|
2016-12-17 21:56:26 +00:00
|
|
|
CHECK_EQ_32X4(vmov, 0x03020100u, 0x07060504u, 0x0b0a0908u, 0x0f0e0d0cu);
|
|
|
|
CHECK_EQ_32X4(vmvn, 0xfcfdfeffu, 0xf8f9fafbu, 0xf4f5f6f7u, 0xf0f1f2f3u);
|
|
|
|
|
|
|
|
CHECK_EQ_SPLAT(vdup8, 0x0a0a0a0au);
|
|
|
|
CHECK_EQ_SPLAT(vdup16, 0x000a000au);
|
|
|
|
CHECK_EQ_SPLAT(vdup32, 0x0000000au);
|
2017-05-16 20:37:16 +00:00
|
|
|
CHECK_EQ_SPLAT(vdupf, -1.0); // bit pattern is 0xbf800000.
|
|
|
|
CHECK_EQ_32X2(vdupf_16, 0xbf80bf80u, 0xbf80bf80u);
|
|
|
|
CHECK_EQ_SPLAT(vdupf_8, 0xbfbfbfbfu);
|
2016-12-17 21:56:26 +00:00
|
|
|
|
|
|
|
// src: [-1, -1, 1, 1]
|
|
|
|
CHECK_EQ_32X4(vcvt_s32_f32, -1, -1, 1, 1);
|
|
|
|
CHECK_EQ_32X4(vcvt_u32_f32, 0u, 0u, 1u, 1u);
|
2016-12-15 18:15:23 +00:00
|
|
|
// src: [kMinInt, kMaxInt, kMaxUInt32, kMinInt + 1]
|
2016-12-17 21:56:26 +00:00
|
|
|
CHECK_EQ_32X4(vcvt_f32_s32, INT32_TO_FLOAT(kMinInt),
|
|
|
|
INT32_TO_FLOAT(kMaxInt), INT32_TO_FLOAT(kMaxUInt32),
|
|
|
|
INT32_TO_FLOAT(kMinInt + 1));
|
|
|
|
CHECK_EQ_32X4(vcvt_f32_u32, UINT32_TO_FLOAT(kMinInt),
|
|
|
|
UINT32_TO_FLOAT(kMaxInt), UINT32_TO_FLOAT(kMaxUInt32),
|
|
|
|
UINT32_TO_FLOAT(kMinInt + 1));
|
|
|
|
|
|
|
|
CHECK_EQ_32X4(vabsf, 1.0, 0.0, 0.0, 1.0);
|
|
|
|
CHECK_EQ_32X4(vnegf, 1.0, 0.0, -0.0, -1.0);
|
|
|
|
// src: [0x7f7f7f7f, 0x01010101, 0xffffffff, 0x80808080]
|
|
|
|
CHECK_EQ_32X4(vabs_s8, 0x7f7f7f7fu, 0x01010101u, 0x01010101u, 0x80808080u);
|
|
|
|
CHECK_EQ_32X4(vabs_s16, 0x7f7f7f7fu, 0x01010101u, 0x00010001u, 0x7f807f80u);
|
|
|
|
CHECK_EQ_32X4(vabs_s32, 0x7f7f7f7fu, 0x01010101u, 0x00000001u, 0x7f7f7f80u);
|
|
|
|
CHECK_EQ_32X4(vneg_s8, 0x81818181u, 0xffffffffu, 0x01010101u, 0x80808080u);
|
|
|
|
CHECK_EQ_32X4(vneg_s16, 0x80818081u, 0xfefffeffu, 0x00010001u, 0x7f807f80u);
|
|
|
|
CHECK_EQ_32X4(vneg_s32, 0x80808081u, 0xfefefeffu, 0x00000001u, 0x7f7f7f80u);
|
|
|
|
|
|
|
|
CHECK_EQ_SPLAT(veor, 0x00ff00ffu);
|
2017-01-12 15:36:29 +00:00
|
|
|
CHECK_EQ_SPLAT(vand, 0x00fe00feu);
|
|
|
|
CHECK_EQ_SPLAT(vorr, 0x00ff00ffu);
|
2016-12-17 21:56:26 +00:00
|
|
|
CHECK_EQ_SPLAT(vaddf, 2.0);
|
2017-04-24 18:53:16 +00:00
|
|
|
CHECK_EQ_32X2(vpaddf, 3.0, 7.0);
|
2017-01-16 19:36:25 +00:00
|
|
|
CHECK_EQ_SPLAT(vminf, 1.0);
|
|
|
|
CHECK_EQ_SPLAT(vmaxf, 2.0);
|
2016-12-17 21:56:26 +00:00
|
|
|
CHECK_EQ_SPLAT(vsubf, -1.0);
|
|
|
|
CHECK_EQ_SPLAT(vmulf, 4.0);
|
2017-01-13 12:03:23 +00:00
|
|
|
CHECK_ESTIMATE_SPLAT(vrecpe, 0.5f, 0.1f); // 1 / 2
|
2017-01-10 12:36:59 +00:00
|
|
|
CHECK_EQ_SPLAT(vrecps, -1.0f); // 2 - (2 * 1.5)
|
2017-01-13 12:03:23 +00:00
|
|
|
CHECK_ESTIMATE_SPLAT(vrsqrte, 0.5f, 0.1f); // 1 / sqrt(4)
|
2017-01-10 12:36:59 +00:00
|
|
|
CHECK_EQ_SPLAT(vrsqrts, -1.0f); // (3 - (2 * 2.5)) / 2
|
2016-12-20 13:06:58 +00:00
|
|
|
CHECK_EQ_SPLAT(vceqf, 0xffffffffu);
|
2017-01-12 11:20:08 +00:00
|
|
|
// [0] >= [-1, 1, -0, 0]
|
|
|
|
CHECK_EQ_32X4(vcgef, 0u, 0xffffffffu, 0xffffffffu, 0xffffffffu);
|
|
|
|
CHECK_EQ_32X4(vcgtf, 0u, 0xffffffffu, 0u, 0u);
|
2017-01-16 19:36:25 +00:00
|
|
|
// [0, 3, 0, 3, ...] and [3, 3, 3, 3, ...]
|
|
|
|
CHECK_EQ_SPLAT(vmin_s8, 0x00030003u);
|
|
|
|
CHECK_EQ_SPLAT(vmax_s8, 0x03030303u);
|
|
|
|
// [0x00ff, 0x00ff, ...] and [0xffff, 0xffff, ...]
|
|
|
|
CHECK_EQ_SPLAT(vmin_u16, 0x00ff00ffu);
|
|
|
|
CHECK_EQ_SPLAT(vmax_u16, 0xffffffffu);
|
|
|
|
// [0x000000ff, 0x000000ff, ...] and [0xffffffff, 0xffffffff, ...]
|
|
|
|
CHECK_EQ_SPLAT(vmin_s32, 0xffffffffu);
|
|
|
|
CHECK_EQ_SPLAT(vmax_s32, 0xffu);
|
2017-03-02 19:50:33 +00:00
|
|
|
// [0, 3, 0, 3, ...] and [3, 3, 3, 3, ...]
|
2017-04-24 18:53:16 +00:00
|
|
|
CHECK_EQ_32X2(vpadd_i8, 0x03030303u, 0x06060606u);
|
|
|
|
CHECK_EQ_32X2(vpadd_i16, 0x0c0c0606u, 0x06060606u);
|
|
|
|
CHECK_EQ_32X2(vpadd_i32, 0x12120c0cu, 0x06060606u);
|
2017-03-02 19:50:33 +00:00
|
|
|
CHECK_EQ_32X2(vpmin_s8, 0x00000000u, 0x03030303u);
|
|
|
|
CHECK_EQ_32X2(vpmax_s8, 0x03030303u, 0x03030303u);
|
|
|
|
// [0, ffff, 0, ffff] and [ffff, ffff]
|
|
|
|
CHECK_EQ_32X2(vpmin_u16, 0x00000000u, 0xffffffffu);
|
|
|
|
CHECK_EQ_32X2(vpmax_u16, 0xffffffffu, 0xffffffffu);
|
|
|
|
// [0x000000ff, 0x00000000u] and [0xffffffff, 0xffffffff, ...]
|
|
|
|
CHECK_EQ_32X2(vpmin_s32, 0x00u, 0xffffffffu);
|
|
|
|
CHECK_EQ_32X2(vpmax_s32, 0xffu, 0xffffffffu);
|
2016-12-17 21:56:26 +00:00
|
|
|
CHECK_EQ_SPLAT(vadd8, 0x03030303u);
|
|
|
|
CHECK_EQ_SPLAT(vadd16, 0x00030003u);
|
|
|
|
CHECK_EQ_SPLAT(vadd32, 0x00000003u);
|
2017-02-01 18:56:28 +00:00
|
|
|
CHECK_EQ_SPLAT(vqadd_s8, 0x80808080u);
|
|
|
|
CHECK_EQ_SPLAT(vqadd_u16, 0xffffffffu);
|
|
|
|
CHECK_EQ_SPLAT(vqadd_s32, 0x80000000u);
|
|
|
|
CHECK_EQ_SPLAT(vqsub_u8, 0x00000000u);
|
|
|
|
CHECK_EQ_SPLAT(vqsub_s16, 0x7fff7fffu);
|
|
|
|
CHECK_EQ_SPLAT(vqsub_u32, 0x00000000u);
|
2016-12-17 21:56:26 +00:00
|
|
|
CHECK_EQ_SPLAT(vsub8, 0xfefefefeu);
|
|
|
|
CHECK_EQ_SPLAT(vsub16, 0xfffefffeu);
|
|
|
|
CHECK_EQ_SPLAT(vsub32, 0xfffffffeu);
|
|
|
|
CHECK_EQ_SPLAT(vmul8, 0x04040404u);
|
|
|
|
CHECK_EQ_SPLAT(vmul16, 0x00040004u);
|
|
|
|
CHECK_EQ_SPLAT(vmul32, 0x00000004u);
|
2017-01-23 18:24:27 +00:00
|
|
|
CHECK_EQ_SPLAT(vshl8, 0xaaaaaaaau);
|
|
|
|
CHECK_EQ_SPLAT(vshl16, 0xaa00aa00u);
|
|
|
|
CHECK_EQ_SPLAT(vshl32, 0xaaaa0000u);
|
|
|
|
CHECK_EQ_SPLAT(vshr_s8, 0xc0c0c0c0u);
|
|
|
|
CHECK_EQ_SPLAT(vshr_u16, 0x00400040u);
|
|
|
|
CHECK_EQ_SPLAT(vshr_s32, 0xffffc040u);
|
2017-05-16 20:37:16 +00:00
|
|
|
CHECK_EQ_32X2(vsli_64, 0x01u, 0xffffffffu);
|
|
|
|
CHECK_EQ_32X2(vsri_64, 0xffffffffu, 0x01u);
|
|
|
|
CHECK_EQ_32X2(vsli_32, 0xffff0001u, 0x00010001u);
|
|
|
|
CHECK_EQ_32X2(vsri_32, 0x00000000u, 0x0000ffffu);
|
2016-12-17 21:56:26 +00:00
|
|
|
CHECK_EQ_SPLAT(vceq, 0x00ff00ffu);
|
2017-01-12 11:20:08 +00:00
|
|
|
// [0, 3, 0, 3, ...] >= [3, 3, 3, 3, ...]
|
|
|
|
CHECK_EQ_SPLAT(vcge_s8, 0x00ff00ffu);
|
|
|
|
CHECK_EQ_SPLAT(vcgt_s8, 0u);
|
|
|
|
// [0x00ff, 0x00ff, ...] >= [0xffff, 0xffff, ...]
|
|
|
|
CHECK_EQ_SPLAT(vcge_u16, 0u);
|
|
|
|
CHECK_EQ_SPLAT(vcgt_u16, 0u);
|
|
|
|
// [0x000000ff, 0x000000ff, ...] >= [0xffffffff, 0xffffffff, ...]
|
|
|
|
CHECK_EQ_SPLAT(vcge_s32, 0xffffffffu);
|
|
|
|
CHECK_EQ_SPLAT(vcgt_s32, 0xffffffffu);
|
2016-12-17 21:56:26 +00:00
|
|
|
CHECK_EQ_SPLAT(vtst, 0x00ff00ffu);
|
|
|
|
CHECK_EQ_SPLAT(vbsl, 0x02010201u);
|
|
|
|
|
|
|
|
CHECK_EQ_32X4(vext, 0x06050403u, 0x0a090807u, 0x0e0d0c0bu, 0x0201000fu);
|
|
|
|
|
|
|
|
CHECK_EQ_32X4(vzip8a, 0x01010000u, 0x03030202u, 0x05050404u, 0x07070606u);
|
|
|
|
CHECK_EQ_32X4(vzip8b, 0x09090808u, 0x0b0b0a0au, 0x0d0d0c0cu, 0x0f0f0e0eu);
|
|
|
|
CHECK_EQ_32X4(vzip16a, 0x01000100u, 0x03020302u, 0x05040504u, 0x07060706u);
|
|
|
|
CHECK_EQ_32X4(vzip16b, 0x09080908u, 0x0b0a0b0au, 0x0d0c0d0cu, 0x0f0e0f0eu);
|
|
|
|
CHECK_EQ_32X4(vzip32a, 0x03020100u, 0x03020100u, 0x07060504u, 0x07060504u);
|
|
|
|
CHECK_EQ_32X4(vzip32b, 0x0b0a0908u, 0x0b0a0908u, 0x0f0e0d0cu, 0x0f0e0d0cu);
|
|
|
|
|
2017-04-10 21:41:02 +00:00
|
|
|
CHECK_EQ_32X2(vzipd8a, 0x01010000u, 0x03030202u);
|
|
|
|
CHECK_EQ_32X2(vzipd8b, 0x05050404u, 0x07070606u);
|
|
|
|
CHECK_EQ_32X2(vzipd16a, 0x01000100u, 0x03020302u);
|
|
|
|
CHECK_EQ_32X2(vzipd16b, 0x05040504u, 0x07060706u);
|
|
|
|
|
2017-03-14 16:28:45 +00:00
|
|
|
CHECK_EQ_32X4(vuzp8a, 0x06040200u, 0x0e0c0a08u, 0x06040200u, 0x0e0c0a08u);
|
|
|
|
CHECK_EQ_32X4(vuzp8b, 0x07050301u, 0x0f0d0b09u, 0x07050301u, 0x0f0d0b09u);
|
|
|
|
CHECK_EQ_32X4(vuzp16a, 0x05040100u, 0x0d0c0908u, 0x05040100u, 0x0d0c0908u);
|
|
|
|
CHECK_EQ_32X4(vuzp16b, 0x07060302u, 0x0f0e0b0au, 0x07060302u, 0x0f0e0b0au);
|
|
|
|
CHECK_EQ_32X4(vuzp32a, 0x03020100u, 0x0b0a0908u, 0x03020100u, 0x0b0a0908u);
|
|
|
|
CHECK_EQ_32X4(vuzp32b, 0x07060504u, 0x0f0e0d0cu, 0x07060504u, 0x0f0e0d0cu);
|
|
|
|
|
2017-04-10 21:41:02 +00:00
|
|
|
CHECK_EQ_32X2(vuzpd8a, 0x06040200u, 0x06040200u);
|
|
|
|
CHECK_EQ_32X2(vuzpd8b, 0x07050301u, 0x07050301u);
|
|
|
|
CHECK_EQ_32X2(vuzpd16a, 0x05040100u, 0x05040100u);
|
|
|
|
CHECK_EQ_32X2(vuzpd16b, 0x07060302u, 0x07060302u);
|
|
|
|
|
2017-03-14 16:28:45 +00:00
|
|
|
CHECK_EQ_32X4(vtrn8a, 0x02020000u, 0x06060404u, 0x0a0a0808u, 0x0e0e0c0cu);
|
|
|
|
CHECK_EQ_32X4(vtrn8b, 0x03030101u, 0x07070505u, 0x0b0b0909u, 0x0f0f0d0du);
|
|
|
|
CHECK_EQ_32X4(vtrn16a, 0x01000100u, 0x05040504u, 0x09080908u, 0x0d0c0d0cu);
|
|
|
|
CHECK_EQ_32X4(vtrn16b, 0x03020302u, 0x07060706u, 0x0b0a0b0au, 0x0f0e0f0eu);
|
|
|
|
CHECK_EQ_32X4(vtrn32a, 0x03020100u, 0x03020100u, 0x0b0a0908u, 0x0b0a0908u);
|
|
|
|
CHECK_EQ_32X4(vtrn32b, 0x07060504u, 0x07060504u, 0x0f0e0d0cu, 0x0f0e0d0cu);
|
|
|
|
|
2017-04-10 21:41:02 +00:00
|
|
|
CHECK_EQ_32X2(vtrnd8a, 0x02020000u, 0x06060404u);
|
|
|
|
CHECK_EQ_32X2(vtrnd8b, 0x03030101u, 0x07070505u);
|
|
|
|
CHECK_EQ_32X2(vtrnd16a, 0x01000100u, 0x05040504u);
|
|
|
|
CHECK_EQ_32X2(vtrnd16b, 0x03020302u, 0x07060706u);
|
|
|
|
CHECK_EQ_32X2(vtrnd32a, 0x03020100u, 0x03020100u);
|
|
|
|
CHECK_EQ_32X2(vtrnd32b, 0x07060504u, 0x07060504u);
|
|
|
|
|
2016-12-17 21:56:26 +00:00
|
|
|
// src: 0 1 2 3 4 5 6 7 8 9 a b c d e f (little endian)
|
|
|
|
CHECK_EQ_32X4(vrev64_32, 0x07060504u, 0x03020100u, 0x0f0e0d0cu,
|
|
|
|
0x0b0a0908u);
|
|
|
|
CHECK_EQ_32X4(vrev64_16, 0x05040706u, 0x01000302u, 0x0d0c0f0eu,
|
|
|
|
0x09080b0au);
|
|
|
|
CHECK_EQ_32X4(vrev64_8, 0x04050607u, 0x00010203u, 0x0c0d0e0fu, 0x08090a0bu);
|
|
|
|
CHECK_EQ_32X4(vrev32_16, 0x01000302u, 0x05040706u, 0x09080b0au,
|
|
|
|
0x0d0c0f0eu);
|
|
|
|
CHECK_EQ_32X4(vrev32_8, 0x00010203u, 0x04050607u, 0x08090a0bu, 0x0c0d0e0fu);
|
|
|
|
CHECK_EQ_32X4(vrev16_8, 0x02030001u, 0x06070405u, 0x0a0b0809u, 0x0e0f0c0du);
|
|
|
|
|
2016-12-15 18:15:23 +00:00
|
|
|
CHECK_EQ(0x05010400u, t.vtbl[0]);
|
|
|
|
CHECK_EQ(0x00030602u, t.vtbl[1]);
|
|
|
|
CHECK_EQ(0x05010400u, t.vtbx[0]);
|
|
|
|
CHECK_EQ(0xff030602u, t.vtbx[1]);
|
2013-07-10 15:32:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(16) {
|
|
|
|
// Test the pkh, uxtb, uxtab and uxtb16 instructions.
|
|
|
|
CcTest::InitializeVM();
|
2013-09-19 09:17:13 +00:00
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
2013-07-10 15:32:39 +00:00
|
|
|
HandleScope scope(isolate);
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
uint32_t src0;
|
|
|
|
uint32_t src1;
|
|
|
|
uint32_t src2;
|
|
|
|
uint32_t dst0;
|
|
|
|
uint32_t dst1;
|
|
|
|
uint32_t dst2;
|
|
|
|
uint32_t dst3;
|
|
|
|
uint32_t dst4;
|
|
|
|
} T;
|
|
|
|
T t;
|
|
|
|
|
|
|
|
// Create a function that accepts &t, and loads, manipulates, and stores
|
|
|
|
// the doubles and floats.
|
|
|
|
Assembler assm(isolate, NULL, 0);
|
|
|
|
|
|
|
|
__ stm(db_w, sp, r4.bit() | lr.bit());
|
|
|
|
|
|
|
|
__ mov(r4, Operand(r0));
|
2015-06-17 09:06:44 +00:00
|
|
|
__ ldr(r0, MemOperand(r4, offsetof(T, src0)));
|
|
|
|
__ ldr(r1, MemOperand(r4, offsetof(T, src1)));
|
2013-07-10 15:32:39 +00:00
|
|
|
|
|
|
|
__ pkhbt(r2, r0, Operand(r1, LSL, 8));
|
2015-06-17 09:06:44 +00:00
|
|
|
__ str(r2, MemOperand(r4, offsetof(T, dst0)));
|
2013-07-10 15:32:39 +00:00
|
|
|
|
|
|
|
__ pkhtb(r2, r0, Operand(r1, ASR, 8));
|
2015-06-17 09:06:44 +00:00
|
|
|
__ str(r2, MemOperand(r4, offsetof(T, dst1)));
|
2013-07-10 15:32:39 +00:00
|
|
|
|
2014-11-10 05:48:38 +00:00
|
|
|
__ uxtb16(r2, r0, 8);
|
2015-06-17 09:06:44 +00:00
|
|
|
__ str(r2, MemOperand(r4, offsetof(T, dst2)));
|
2013-07-10 15:32:39 +00:00
|
|
|
|
2014-11-10 05:48:38 +00:00
|
|
|
__ uxtb(r2, r0, 8);
|
2015-06-17 09:06:44 +00:00
|
|
|
__ str(r2, MemOperand(r4, offsetof(T, dst3)));
|
2013-07-10 15:32:39 +00:00
|
|
|
|
2015-06-17 09:06:44 +00:00
|
|
|
__ ldr(r0, MemOperand(r4, offsetof(T, src2)));
|
2014-11-10 05:48:38 +00:00
|
|
|
__ uxtab(r2, r0, r1, 8);
|
2015-06-17 09:06:44 +00:00
|
|
|
__ str(r2, MemOperand(r4, offsetof(T, dst4)));
|
2013-07-10 15:32:39 +00:00
|
|
|
|
|
|
|
__ ldm(ia_w, sp, r4.bit() | pc.bit());
|
|
|
|
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2013-07-10 15:32:39 +00:00
|
|
|
#ifdef DEBUG
|
2014-07-07 09:57:29 +00:00
|
|
|
OFStream os(stdout);
|
|
|
|
code->Print(os);
|
2013-07-10 15:32:39 +00:00
|
|
|
#endif
|
2014-04-16 11:38:56 +00:00
|
|
|
F3 f = FUNCTION_CAST<F3>(code->entry());
|
2013-07-10 15:32:39 +00:00
|
|
|
t.src0 = 0x01020304;
|
|
|
|
t.src1 = 0x11121314;
|
|
|
|
t.src2 = 0x11121300;
|
|
|
|
t.dst0 = 0;
|
|
|
|
t.dst1 = 0;
|
|
|
|
t.dst2 = 0;
|
|
|
|
t.dst3 = 0;
|
|
|
|
t.dst4 = 0;
|
2015-11-23 08:09:34 +00:00
|
|
|
Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
|
2013-07-10 15:32:39 +00:00
|
|
|
USE(dummy);
|
2015-01-30 09:29:25 +00:00
|
|
|
CHECK_EQ(0x12130304u, t.dst0);
|
|
|
|
CHECK_EQ(0x01021213u, t.dst1);
|
|
|
|
CHECK_EQ(0x00010003u, t.dst2);
|
|
|
|
CHECK_EQ(0x00000003u, t.dst3);
|
|
|
|
CHECK_EQ(0x11121313u, t.dst4);
|
2013-07-10 15:32:39 +00:00
|
|
|
}
|
|
|
|
|
2013-08-01 08:13:08 +00:00
|
|
|
|
|
|
|
TEST(17) {
|
|
|
|
// Test generating labels at high addresses.
|
|
|
|
// Should not assert.
|
|
|
|
CcTest::InitializeVM();
|
2013-09-19 09:17:13 +00:00
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
2013-08-01 08:13:08 +00:00
|
|
|
HandleScope scope(isolate);
|
|
|
|
|
|
|
|
// Generate a code segment that will be longer than 2^24 bytes.
|
|
|
|
Assembler assm(isolate, NULL, 0);
|
|
|
|
for (size_t i = 0; i < 1 << 23 ; ++i) { // 2^23
|
|
|
|
__ nop();
|
|
|
|
}
|
|
|
|
|
|
|
|
Label target;
|
|
|
|
__ b(eq, &target);
|
|
|
|
__ bind(&target);
|
|
|
|
__ nop();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-11-23 08:09:34 +00:00
|
|
|
#define TEST_SDIV(expected_, dividend_, divisor_) \
|
|
|
|
t.dividend = dividend_; \
|
|
|
|
t.divisor = divisor_; \
|
|
|
|
t.result = 0; \
|
|
|
|
dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0); \
|
|
|
|
CHECK_EQ(expected_, t.result);
|
2014-01-02 16:36:21 +00:00
|
|
|
|
|
|
|
|
2014-10-26 12:49:56 +00:00
|
|
|
TEST(sdiv) {
|
2014-01-02 16:36:21 +00:00
|
|
|
// Test the sdiv.
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
|
|
|
HandleScope scope(isolate);
|
|
|
|
Assembler assm(isolate, NULL, 0);
|
|
|
|
|
2014-10-26 12:49:56 +00:00
|
|
|
struct T {
|
|
|
|
int32_t dividend;
|
|
|
|
int32_t divisor;
|
|
|
|
int32_t result;
|
|
|
|
} t;
|
|
|
|
|
2014-01-02 16:36:21 +00:00
|
|
|
if (CpuFeatures::IsSupported(SUDIV)) {
|
|
|
|
CpuFeatureScope scope(&assm, SUDIV);
|
|
|
|
|
|
|
|
__ mov(r3, Operand(r0));
|
|
|
|
|
2015-06-17 09:06:44 +00:00
|
|
|
__ ldr(r0, MemOperand(r3, offsetof(T, dividend)));
|
|
|
|
__ ldr(r1, MemOperand(r3, offsetof(T, divisor)));
|
2014-01-02 16:36:21 +00:00
|
|
|
|
|
|
|
__ sdiv(r2, r0, r1);
|
2015-06-17 09:06:44 +00:00
|
|
|
__ str(r2, MemOperand(r3, offsetof(T, result)));
|
2014-01-02 16:36:21 +00:00
|
|
|
|
|
|
|
__ bx(lr);
|
|
|
|
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2014-01-02 16:36:21 +00:00
|
|
|
#ifdef DEBUG
|
2014-07-07 09:57:29 +00:00
|
|
|
OFStream os(stdout);
|
|
|
|
code->Print(os);
|
2014-01-02 16:36:21 +00:00
|
|
|
#endif
|
2014-04-16 11:38:56 +00:00
|
|
|
F3 f = FUNCTION_CAST<F3>(code->entry());
|
2014-01-02 16:36:21 +00:00
|
|
|
Object* dummy;
|
2014-10-26 12:49:56 +00:00
|
|
|
TEST_SDIV(0, kMinInt, 0);
|
|
|
|
TEST_SDIV(0, 1024, 0);
|
2014-01-02 16:36:21 +00:00
|
|
|
TEST_SDIV(1073741824, kMinInt, -2);
|
|
|
|
TEST_SDIV(kMinInt, kMinInt, -1);
|
|
|
|
TEST_SDIV(5, 10, 2);
|
|
|
|
TEST_SDIV(3, 10, 3);
|
|
|
|
TEST_SDIV(-5, 10, -2);
|
|
|
|
TEST_SDIV(-3, 10, -3);
|
|
|
|
TEST_SDIV(-5, -10, 2);
|
|
|
|
TEST_SDIV(-3, -10, 3);
|
|
|
|
TEST_SDIV(5, -10, -2);
|
|
|
|
TEST_SDIV(3, -10, -3);
|
|
|
|
USE(dummy);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#undef TEST_SDIV
|
|
|
|
|
|
|
|
|
2015-11-23 08:09:34 +00:00
|
|
|
#define TEST_UDIV(expected_, dividend_, divisor_) \
|
|
|
|
t.dividend = dividend_; \
|
|
|
|
t.divisor = divisor_; \
|
|
|
|
t.result = 0; \
|
|
|
|
dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0); \
|
2014-10-26 12:49:56 +00:00
|
|
|
CHECK_EQ(expected_, t.result);
|
|
|
|
|
|
|
|
|
|
|
|
TEST(udiv) {
|
|
|
|
// Test the udiv.
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
|
|
|
HandleScope scope(isolate);
|
|
|
|
Assembler assm(isolate, NULL, 0);
|
|
|
|
|
|
|
|
struct T {
|
|
|
|
uint32_t dividend;
|
|
|
|
uint32_t divisor;
|
|
|
|
uint32_t result;
|
|
|
|
} t;
|
|
|
|
|
|
|
|
if (CpuFeatures::IsSupported(SUDIV)) {
|
|
|
|
CpuFeatureScope scope(&assm, SUDIV);
|
|
|
|
|
|
|
|
__ mov(r3, Operand(r0));
|
|
|
|
|
2015-06-17 09:06:44 +00:00
|
|
|
__ ldr(r0, MemOperand(r3, offsetof(T, dividend)));
|
|
|
|
__ ldr(r1, MemOperand(r3, offsetof(T, divisor)));
|
2014-10-26 12:49:56 +00:00
|
|
|
|
|
|
|
__ sdiv(r2, r0, r1);
|
2015-06-17 09:06:44 +00:00
|
|
|
__ str(r2, MemOperand(r3, offsetof(T, result)));
|
2014-10-26 12:49:56 +00:00
|
|
|
|
|
|
|
__ bx(lr);
|
|
|
|
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2014-10-26 12:49:56 +00:00
|
|
|
#ifdef DEBUG
|
|
|
|
OFStream os(stdout);
|
|
|
|
code->Print(os);
|
|
|
|
#endif
|
|
|
|
F3 f = FUNCTION_CAST<F3>(code->entry());
|
|
|
|
Object* dummy;
|
2015-01-30 09:29:25 +00:00
|
|
|
TEST_UDIV(0u, 0, 0);
|
|
|
|
TEST_UDIV(0u, 1024, 0);
|
|
|
|
TEST_UDIV(5u, 10, 2);
|
|
|
|
TEST_UDIV(3u, 10, 3);
|
2014-10-26 12:49:56 +00:00
|
|
|
USE(dummy);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#undef TEST_UDIV
|
|
|
|
|
|
|
|
|
2014-10-14 05:03:04 +00:00
|
|
|
TEST(smmla) {
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* const isolate = CcTest::i_isolate();
|
|
|
|
HandleScope scope(isolate);
|
|
|
|
RandomNumberGenerator* const rng = isolate->random_number_generator();
|
|
|
|
Assembler assm(isolate, nullptr, 0);
|
|
|
|
__ smmla(r1, r1, r2, r3);
|
|
|
|
__ str(r1, MemOperand(r0));
|
|
|
|
__ bx(lr);
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2014-10-14 05:03:04 +00:00
|
|
|
#ifdef OBJECT_PRINT
|
|
|
|
code->Print(std::cout);
|
|
|
|
#endif
|
|
|
|
F3 f = FUNCTION_CAST<F3>(code->entry());
|
|
|
|
for (size_t i = 0; i < 128; ++i) {
|
|
|
|
int32_t r, x = rng->NextInt(), y = rng->NextInt(), z = rng->NextInt();
|
2015-11-23 08:09:34 +00:00
|
|
|
Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, y, z, 0);
|
2017-08-31 12:34:55 +00:00
|
|
|
CHECK_EQ(base::bits::SignedMulHighAndAdd32(x, y, z), r);
|
2014-10-14 05:03:04 +00:00
|
|
|
USE(dummy);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
TEST(smmul) {
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* const isolate = CcTest::i_isolate();
|
|
|
|
HandleScope scope(isolate);
|
|
|
|
RandomNumberGenerator* const rng = isolate->random_number_generator();
|
|
|
|
Assembler assm(isolate, nullptr, 0);
|
|
|
|
__ smmul(r1, r1, r2);
|
|
|
|
__ str(r1, MemOperand(r0));
|
|
|
|
__ bx(lr);
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2014-10-14 05:03:04 +00:00
|
|
|
#ifdef OBJECT_PRINT
|
|
|
|
code->Print(std::cout);
|
|
|
|
#endif
|
|
|
|
F3 f = FUNCTION_CAST<F3>(code->entry());
|
|
|
|
for (size_t i = 0; i < 128; ++i) {
|
|
|
|
int32_t r, x = rng->NextInt(), y = rng->NextInt();
|
2015-11-23 08:09:34 +00:00
|
|
|
Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, y, 0, 0);
|
2017-08-31 12:34:55 +00:00
|
|
|
CHECK_EQ(base::bits::SignedMulHigh32(x, y), r);
|
2014-10-14 05:03:04 +00:00
|
|
|
USE(dummy);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-11-10 05:48:38 +00:00
|
|
|
TEST(sxtb) {
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* const isolate = CcTest::i_isolate();
|
|
|
|
HandleScope scope(isolate);
|
|
|
|
RandomNumberGenerator* const rng = isolate->random_number_generator();
|
|
|
|
Assembler assm(isolate, nullptr, 0);
|
|
|
|
__ sxtb(r1, r1);
|
|
|
|
__ str(r1, MemOperand(r0));
|
|
|
|
__ bx(lr);
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2014-11-10 05:48:38 +00:00
|
|
|
#ifdef OBJECT_PRINT
|
|
|
|
code->Print(std::cout);
|
|
|
|
#endif
|
|
|
|
F3 f = FUNCTION_CAST<F3>(code->entry());
|
|
|
|
for (size_t i = 0; i < 128; ++i) {
|
|
|
|
int32_t r, x = rng->NextInt();
|
2015-11-23 08:09:34 +00:00
|
|
|
Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, 0, 0, 0);
|
2014-11-10 05:48:38 +00:00
|
|
|
CHECK_EQ(static_cast<int32_t>(static_cast<int8_t>(x)), r);
|
|
|
|
USE(dummy);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
TEST(sxtab) {
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* const isolate = CcTest::i_isolate();
|
|
|
|
HandleScope scope(isolate);
|
|
|
|
RandomNumberGenerator* const rng = isolate->random_number_generator();
|
|
|
|
Assembler assm(isolate, nullptr, 0);
|
|
|
|
__ sxtab(r1, r2, r1);
|
|
|
|
__ str(r1, MemOperand(r0));
|
|
|
|
__ bx(lr);
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2014-11-10 05:48:38 +00:00
|
|
|
#ifdef OBJECT_PRINT
|
|
|
|
code->Print(std::cout);
|
|
|
|
#endif
|
|
|
|
F3 f = FUNCTION_CAST<F3>(code->entry());
|
|
|
|
for (size_t i = 0; i < 128; ++i) {
|
|
|
|
int32_t r, x = rng->NextInt(), y = rng->NextInt();
|
2015-11-23 08:09:34 +00:00
|
|
|
Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, y, 0, 0);
|
2014-11-10 05:48:38 +00:00
|
|
|
CHECK_EQ(static_cast<int32_t>(static_cast<int8_t>(x)) + y, r);
|
|
|
|
USE(dummy);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
TEST(sxth) {
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* const isolate = CcTest::i_isolate();
|
|
|
|
HandleScope scope(isolate);
|
|
|
|
RandomNumberGenerator* const rng = isolate->random_number_generator();
|
|
|
|
Assembler assm(isolate, nullptr, 0);
|
|
|
|
__ sxth(r1, r1);
|
|
|
|
__ str(r1, MemOperand(r0));
|
|
|
|
__ bx(lr);
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2014-11-10 05:48:38 +00:00
|
|
|
#ifdef OBJECT_PRINT
|
|
|
|
code->Print(std::cout);
|
|
|
|
#endif
|
|
|
|
F3 f = FUNCTION_CAST<F3>(code->entry());
|
|
|
|
for (size_t i = 0; i < 128; ++i) {
|
|
|
|
int32_t r, x = rng->NextInt();
|
2015-11-23 08:09:34 +00:00
|
|
|
Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, 0, 0, 0);
|
2014-11-10 05:48:38 +00:00
|
|
|
CHECK_EQ(static_cast<int32_t>(static_cast<int16_t>(x)), r);
|
|
|
|
USE(dummy);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
TEST(sxtah) {
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* const isolate = CcTest::i_isolate();
|
|
|
|
HandleScope scope(isolate);
|
|
|
|
RandomNumberGenerator* const rng = isolate->random_number_generator();
|
|
|
|
Assembler assm(isolate, nullptr, 0);
|
|
|
|
__ sxtah(r1, r2, r1);
|
|
|
|
__ str(r1, MemOperand(r0));
|
|
|
|
__ bx(lr);
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2014-11-10 05:48:38 +00:00
|
|
|
#ifdef OBJECT_PRINT
|
|
|
|
code->Print(std::cout);
|
|
|
|
#endif
|
|
|
|
F3 f = FUNCTION_CAST<F3>(code->entry());
|
|
|
|
for (size_t i = 0; i < 128; ++i) {
|
|
|
|
int32_t r, x = rng->NextInt(), y = rng->NextInt();
|
2015-11-23 08:09:34 +00:00
|
|
|
Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, y, 0, 0);
|
2014-11-10 05:48:38 +00:00
|
|
|
CHECK_EQ(static_cast<int32_t>(static_cast<int16_t>(x)) + y, r);
|
|
|
|
USE(dummy);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
TEST(uxtb) {
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* const isolate = CcTest::i_isolate();
|
|
|
|
HandleScope scope(isolate);
|
|
|
|
RandomNumberGenerator* const rng = isolate->random_number_generator();
|
|
|
|
Assembler assm(isolate, nullptr, 0);
|
|
|
|
__ uxtb(r1, r1);
|
|
|
|
__ str(r1, MemOperand(r0));
|
|
|
|
__ bx(lr);
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2014-11-10 05:48:38 +00:00
|
|
|
#ifdef OBJECT_PRINT
|
|
|
|
code->Print(std::cout);
|
|
|
|
#endif
|
|
|
|
F3 f = FUNCTION_CAST<F3>(code->entry());
|
|
|
|
for (size_t i = 0; i < 128; ++i) {
|
|
|
|
int32_t r, x = rng->NextInt();
|
2015-11-23 08:09:34 +00:00
|
|
|
Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, 0, 0, 0);
|
2014-11-10 05:48:38 +00:00
|
|
|
CHECK_EQ(static_cast<int32_t>(static_cast<uint8_t>(x)), r);
|
|
|
|
USE(dummy);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
TEST(uxtab) {
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* const isolate = CcTest::i_isolate();
|
|
|
|
HandleScope scope(isolate);
|
|
|
|
RandomNumberGenerator* const rng = isolate->random_number_generator();
|
|
|
|
Assembler assm(isolate, nullptr, 0);
|
|
|
|
__ uxtab(r1, r2, r1);
|
|
|
|
__ str(r1, MemOperand(r0));
|
|
|
|
__ bx(lr);
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2014-11-10 05:48:38 +00:00
|
|
|
#ifdef OBJECT_PRINT
|
|
|
|
code->Print(std::cout);
|
|
|
|
#endif
|
|
|
|
F3 f = FUNCTION_CAST<F3>(code->entry());
|
|
|
|
for (size_t i = 0; i < 128; ++i) {
|
|
|
|
int32_t r, x = rng->NextInt(), y = rng->NextInt();
|
2015-11-23 08:09:34 +00:00
|
|
|
Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, y, 0, 0);
|
2014-11-10 05:48:38 +00:00
|
|
|
CHECK_EQ(static_cast<int32_t>(static_cast<uint8_t>(x)) + y, r);
|
|
|
|
USE(dummy);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
TEST(uxth) {
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* const isolate = CcTest::i_isolate();
|
|
|
|
HandleScope scope(isolate);
|
|
|
|
RandomNumberGenerator* const rng = isolate->random_number_generator();
|
|
|
|
Assembler assm(isolate, nullptr, 0);
|
|
|
|
__ uxth(r1, r1);
|
|
|
|
__ str(r1, MemOperand(r0));
|
|
|
|
__ bx(lr);
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2014-11-10 05:48:38 +00:00
|
|
|
#ifdef OBJECT_PRINT
|
|
|
|
code->Print(std::cout);
|
|
|
|
#endif
|
|
|
|
F3 f = FUNCTION_CAST<F3>(code->entry());
|
|
|
|
for (size_t i = 0; i < 128; ++i) {
|
|
|
|
int32_t r, x = rng->NextInt();
|
2015-11-23 08:09:34 +00:00
|
|
|
Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, 0, 0, 0);
|
2014-11-10 05:48:38 +00:00
|
|
|
CHECK_EQ(static_cast<int32_t>(static_cast<uint16_t>(x)), r);
|
|
|
|
USE(dummy);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
TEST(uxtah) {
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* const isolate = CcTest::i_isolate();
|
|
|
|
HandleScope scope(isolate);
|
|
|
|
RandomNumberGenerator* const rng = isolate->random_number_generator();
|
|
|
|
Assembler assm(isolate, nullptr, 0);
|
|
|
|
__ uxtah(r1, r2, r1);
|
|
|
|
__ str(r1, MemOperand(r0));
|
|
|
|
__ bx(lr);
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2014-11-10 05:48:38 +00:00
|
|
|
#ifdef OBJECT_PRINT
|
|
|
|
code->Print(std::cout);
|
|
|
|
#endif
|
|
|
|
F3 f = FUNCTION_CAST<F3>(code->entry());
|
|
|
|
for (size_t i = 0; i < 128; ++i) {
|
|
|
|
int32_t r, x = rng->NextInt(), y = rng->NextInt();
|
2015-11-23 08:09:34 +00:00
|
|
|
Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, y, 0, 0);
|
2014-11-10 05:48:38 +00:00
|
|
|
CHECK_EQ(static_cast<int32_t>(static_cast<uint16_t>(x)) + y, r);
|
|
|
|
USE(dummy);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-16 11:14:01 +00:00
|
|
|
#define TEST_RBIT(expected_, input_) \
|
|
|
|
t.input = input_; \
|
|
|
|
t.result = 0; \
|
|
|
|
dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0); \
|
2016-11-11 12:00:34 +00:00
|
|
|
CHECK_EQ(static_cast<uint32_t>(expected_), t.result);
|
2016-02-16 11:14:01 +00:00
|
|
|
|
|
|
|
TEST(rbit) {
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* const isolate = CcTest::i_isolate();
|
|
|
|
HandleScope scope(isolate);
|
|
|
|
Assembler assm(isolate, nullptr, 0);
|
|
|
|
|
|
|
|
if (CpuFeatures::IsSupported(ARMv7)) {
|
|
|
|
CpuFeatureScope scope(&assm, ARMv7);
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
uint32_t input;
|
|
|
|
uint32_t result;
|
|
|
|
} T;
|
|
|
|
T t;
|
|
|
|
|
|
|
|
__ ldr(r1, MemOperand(r0, offsetof(T, input)));
|
|
|
|
__ rbit(r1, r1);
|
|
|
|
__ str(r1, MemOperand(r0, offsetof(T, result)));
|
|
|
|
__ bx(lr);
|
|
|
|
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2016-02-16 11:14:01 +00:00
|
|
|
|
|
|
|
#ifdef OBJECT_PRINT
|
|
|
|
code->Print(std::cout);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
F3 f = FUNCTION_CAST<F3>(code->entry());
|
|
|
|
Object* dummy = NULL;
|
|
|
|
TEST_RBIT(0xffffffff, 0xffffffff);
|
|
|
|
TEST_RBIT(0x00000000, 0x00000000);
|
|
|
|
TEST_RBIT(0xffff0000, 0x0000ffff);
|
|
|
|
TEST_RBIT(0xff00ff00, 0x00ff00ff);
|
|
|
|
TEST_RBIT(0xf0f0f0f0, 0x0f0f0f0f);
|
|
|
|
TEST_RBIT(0x1e6a2c48, 0x12345678);
|
|
|
|
USE(dummy);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-09-12 10:37:42 +00:00
|
|
|
TEST(code_relative_offset) {
|
|
|
|
// Test extracting the offset of a label from the beginning of the code
|
|
|
|
// in a register.
|
|
|
|
CcTest::InitializeVM();
|
2013-09-19 09:17:13 +00:00
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
2013-09-12 10:37:42 +00:00
|
|
|
HandleScope scope(isolate);
|
|
|
|
// Initialize a code object that will contain the code.
|
2017-07-06 09:13:09 +00:00
|
|
|
Handle<HeapObject> code_object(isolate->heap()->undefined_value(), isolate);
|
2013-09-12 10:37:42 +00:00
|
|
|
|
|
|
|
Assembler assm(isolate, NULL, 0);
|
|
|
|
|
|
|
|
Label start, target_away, target_faraway;
|
|
|
|
|
|
|
|
__ stm(db_w, sp, r4.bit() | r5.bit() | lr.bit());
|
|
|
|
|
|
|
|
// r3 is used as the address zero, the test will crash when we load it.
|
|
|
|
__ mov(r3, Operand::Zero());
|
|
|
|
|
|
|
|
// r5 will be a pointer to the start of the code.
|
|
|
|
__ mov(r5, Operand(code_object));
|
|
|
|
__ mov_label_offset(r4, &start);
|
|
|
|
|
|
|
|
__ mov_label_offset(r1, &target_faraway);
|
|
|
|
__ str(r1, MemOperand(sp, kPointerSize, NegPreIndex));
|
|
|
|
|
|
|
|
__ mov_label_offset(r1, &target_away);
|
|
|
|
|
|
|
|
// Jump straight to 'target_away' the first time and use the relative
|
|
|
|
// position the second time. This covers the case when extracting the
|
|
|
|
// position of a label which is linked.
|
|
|
|
__ mov(r2, Operand::Zero());
|
|
|
|
__ bind(&start);
|
|
|
|
__ cmp(r2, Operand::Zero());
|
|
|
|
__ b(eq, &target_away);
|
|
|
|
__ add(pc, r5, r1);
|
|
|
|
// Emit invalid instructions to push the label between 2^8 and 2^16
|
|
|
|
// instructions away. The test will crash if they are reached.
|
|
|
|
for (int i = 0; i < (1 << 10); i++) {
|
|
|
|
__ ldr(r3, MemOperand(r3));
|
|
|
|
}
|
|
|
|
__ bind(&target_away);
|
|
|
|
// This will be hit twice: r0 = r0 + 5 + 5.
|
|
|
|
__ add(r0, r0, Operand(5));
|
|
|
|
|
|
|
|
__ ldr(r1, MemOperand(sp, kPointerSize, PostIndex), ne);
|
|
|
|
__ add(pc, r5, r4, LeaveCC, ne);
|
|
|
|
|
|
|
|
__ mov(r2, Operand(1));
|
|
|
|
__ b(&start);
|
|
|
|
// Emit invalid instructions to push the label between 2^16 and 2^24
|
|
|
|
// instructions away. The test will crash if they are reached.
|
|
|
|
for (int i = 0; i < (1 << 21); i++) {
|
|
|
|
__ ldr(r3, MemOperand(r3));
|
|
|
|
}
|
|
|
|
__ bind(&target_faraway);
|
|
|
|
// r0 = r0 + 5 + 5 + 11
|
|
|
|
__ add(r0, r0, Operand(11));
|
|
|
|
|
|
|
|
__ ldm(ia_w, sp, r4.bit() | r5.bit() | pc.bit());
|
|
|
|
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, code_object);
|
2013-09-12 10:37:42 +00:00
|
|
|
F1 f = FUNCTION_CAST<F1>(code->entry());
|
2015-11-23 08:09:34 +00:00
|
|
|
int res =
|
|
|
|
reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 21, 0, 0, 0, 0));
|
2013-09-12 10:37:42 +00:00
|
|
|
::printf("f() = %d\n", res);
|
|
|
|
CHECK_EQ(42, res);
|
|
|
|
}
|
|
|
|
|
2016-03-10 11:38:17 +00:00
|
|
|
TEST(msr_mrs) {
|
|
|
|
// Test msr and mrs.
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
|
|
|
HandleScope scope(isolate);
|
|
|
|
|
|
|
|
Assembler assm(isolate, NULL, 0);
|
|
|
|
|
|
|
|
// Create a helper function:
|
|
|
|
// void TestMsrMrs(uint32_t nzcv,
|
|
|
|
// uint32_t * result_conditionals,
|
|
|
|
// uint32_t * result_mrs);
|
|
|
|
__ msr(CPSR_f, Operand(r0));
|
|
|
|
|
|
|
|
// Test that the condition flags have taken effect.
|
|
|
|
__ mov(r3, Operand(0));
|
|
|
|
__ orr(r3, r3, Operand(1 << 31), LeaveCC, mi); // N
|
|
|
|
__ orr(r3, r3, Operand(1 << 30), LeaveCC, eq); // Z
|
|
|
|
__ orr(r3, r3, Operand(1 << 29), LeaveCC, cs); // C
|
|
|
|
__ orr(r3, r3, Operand(1 << 28), LeaveCC, vs); // V
|
|
|
|
__ str(r3, MemOperand(r1));
|
|
|
|
|
|
|
|
// Also check mrs, ignoring everything other than the flags.
|
|
|
|
__ mrs(r3, CPSR);
|
|
|
|
__ and_(r3, r3, Operand(kSpecialCondition));
|
|
|
|
__ str(r3, MemOperand(r2));
|
|
|
|
|
|
|
|
__ bx(lr);
|
|
|
|
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2016-03-10 11:38:17 +00:00
|
|
|
#ifdef DEBUG
|
|
|
|
OFStream os(stdout);
|
|
|
|
code->Print(os);
|
|
|
|
#endif
|
|
|
|
F5 f = FUNCTION_CAST<F5>(code->entry());
|
|
|
|
Object* dummy = nullptr;
|
|
|
|
USE(dummy);
|
|
|
|
|
|
|
|
#define CHECK_MSR_MRS(n, z, c, v) \
|
|
|
|
do { \
|
|
|
|
uint32_t nzcv = (n << 31) | (z << 30) | (c << 29) | (v << 28); \
|
|
|
|
uint32_t result_conditionals = -1; \
|
|
|
|
uint32_t result_mrs = -1; \
|
|
|
|
dummy = CALL_GENERATED_CODE(isolate, f, nzcv, &result_conditionals, \
|
|
|
|
&result_mrs, 0, 0); \
|
|
|
|
CHECK_EQ(nzcv, result_conditionals); \
|
|
|
|
CHECK_EQ(nzcv, result_mrs); \
|
|
|
|
} while (0);
|
|
|
|
|
|
|
|
// N Z C V
|
|
|
|
CHECK_MSR_MRS(0, 0, 0, 0);
|
|
|
|
CHECK_MSR_MRS(0, 0, 0, 1);
|
|
|
|
CHECK_MSR_MRS(0, 0, 1, 0);
|
|
|
|
CHECK_MSR_MRS(0, 0, 1, 1);
|
|
|
|
CHECK_MSR_MRS(0, 1, 0, 0);
|
|
|
|
CHECK_MSR_MRS(0, 1, 0, 1);
|
|
|
|
CHECK_MSR_MRS(0, 1, 1, 0);
|
|
|
|
CHECK_MSR_MRS(0, 1, 1, 1);
|
|
|
|
CHECK_MSR_MRS(1, 0, 0, 0);
|
|
|
|
CHECK_MSR_MRS(1, 0, 0, 1);
|
|
|
|
CHECK_MSR_MRS(1, 0, 1, 0);
|
|
|
|
CHECK_MSR_MRS(1, 0, 1, 1);
|
|
|
|
CHECK_MSR_MRS(1, 1, 0, 0);
|
|
|
|
CHECK_MSR_MRS(1, 1, 0, 1);
|
|
|
|
CHECK_MSR_MRS(1, 1, 1, 0);
|
|
|
|
CHECK_MSR_MRS(1, 1, 1, 1);
|
|
|
|
|
|
|
|
#undef CHECK_MSR_MRS
|
|
|
|
}
|
2014-10-30 11:00:47 +00:00
|
|
|
|
2015-11-25 19:28:42 +00:00
|
|
|
TEST(ARMv8_float32_vrintX) {
|
|
|
|
// Test the vrintX floating point instructions.
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
|
|
|
HandleScope scope(isolate);
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
float input;
|
|
|
|
float ar;
|
|
|
|
float nr;
|
|
|
|
float mr;
|
|
|
|
float pr;
|
|
|
|
float zr;
|
|
|
|
} T;
|
|
|
|
T t;
|
|
|
|
|
|
|
|
// Create a function that accepts &t, and loads, manipulates, and stores
|
|
|
|
// the floats.
|
|
|
|
Assembler assm(isolate, NULL, 0);
|
|
|
|
Label L, C;
|
|
|
|
|
|
|
|
|
|
|
|
if (CpuFeatures::IsSupported(ARMv8)) {
|
|
|
|
CpuFeatureScope scope(&assm, ARMv8);
|
|
|
|
|
|
|
|
__ mov(ip, Operand(sp));
|
|
|
|
__ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
|
|
|
|
|
|
|
|
__ mov(r4, Operand(r0));
|
|
|
|
|
|
|
|
// Test vrinta
|
|
|
|
__ vldr(s6, r4, offsetof(T, input));
|
|
|
|
__ vrinta(s5, s6);
|
|
|
|
__ vstr(s5, r4, offsetof(T, ar));
|
|
|
|
|
|
|
|
// Test vrintn
|
|
|
|
__ vldr(s6, r4, offsetof(T, input));
|
|
|
|
__ vrintn(s5, s6);
|
|
|
|
__ vstr(s5, r4, offsetof(T, nr));
|
|
|
|
|
|
|
|
// Test vrintp
|
|
|
|
__ vldr(s6, r4, offsetof(T, input));
|
|
|
|
__ vrintp(s5, s6);
|
|
|
|
__ vstr(s5, r4, offsetof(T, pr));
|
|
|
|
|
|
|
|
// Test vrintm
|
|
|
|
__ vldr(s6, r4, offsetof(T, input));
|
|
|
|
__ vrintm(s5, s6);
|
|
|
|
__ vstr(s5, r4, offsetof(T, mr));
|
|
|
|
|
|
|
|
// Test vrintz
|
|
|
|
__ vldr(s6, r4, offsetof(T, input));
|
|
|
|
__ vrintz(s5, s6);
|
|
|
|
__ vstr(s5, r4, offsetof(T, zr));
|
|
|
|
|
|
|
|
__ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
|
|
|
|
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2015-11-25 19:28:42 +00:00
|
|
|
#ifdef DEBUG
|
|
|
|
OFStream os(stdout);
|
|
|
|
code->Print(os);
|
|
|
|
#endif
|
|
|
|
F3 f = FUNCTION_CAST<F3>(code->entry());
|
|
|
|
|
|
|
|
Object* dummy = nullptr;
|
|
|
|
USE(dummy);
|
|
|
|
|
|
|
|
#define CHECK_VRINT(input_val, ares, nres, mres, pres, zres) \
|
|
|
|
t.input = input_val; \
|
|
|
|
dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0); \
|
|
|
|
CHECK_EQ(ares, t.ar); \
|
|
|
|
CHECK_EQ(nres, t.nr); \
|
|
|
|
CHECK_EQ(mres, t.mr); \
|
|
|
|
CHECK_EQ(pres, t.pr); \
|
|
|
|
CHECK_EQ(zres, t.zr);
|
|
|
|
|
|
|
|
CHECK_VRINT(-0.5, -1.0, -0.0, -1.0, -0.0, -0.0)
|
|
|
|
CHECK_VRINT(-0.6, -1.0, -1.0, -1.0, -0.0, -0.0)
|
|
|
|
CHECK_VRINT(-1.1, -1.0, -1.0, -2.0, -1.0, -1.0)
|
|
|
|
CHECK_VRINT(0.5, 1.0, 0.0, 0.0, 1.0, 0.0)
|
|
|
|
CHECK_VRINT(0.6, 1.0, 1.0, 0.0, 1.0, 0.0)
|
|
|
|
CHECK_VRINT(1.1, 1.0, 1.0, 1.0, 2.0, 1.0)
|
|
|
|
float inf = std::numeric_limits<float>::infinity();
|
|
|
|
CHECK_VRINT(inf, inf, inf, inf, inf, inf)
|
|
|
|
CHECK_VRINT(-inf, -inf, -inf, -inf, -inf, -inf)
|
|
|
|
CHECK_VRINT(-0.0, -0.0, -0.0, -0.0, -0.0, -0.0)
|
|
|
|
|
|
|
|
// Check NaN propagation.
|
|
|
|
float nan = std::numeric_limits<float>::quiet_NaN();
|
|
|
|
t.input = nan;
|
|
|
|
dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
|
|
|
|
CHECK_EQ(bit_cast<int32_t>(nan), bit_cast<int32_t>(t.ar));
|
|
|
|
CHECK_EQ(bit_cast<int32_t>(nan), bit_cast<int32_t>(t.nr));
|
|
|
|
CHECK_EQ(bit_cast<int32_t>(nan), bit_cast<int32_t>(t.mr));
|
|
|
|
CHECK_EQ(bit_cast<int32_t>(nan), bit_cast<int32_t>(t.pr));
|
|
|
|
CHECK_EQ(bit_cast<int32_t>(nan), bit_cast<int32_t>(t.zr));
|
|
|
|
|
|
|
|
#undef CHECK_VRINT
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-10-30 11:00:47 +00:00
|
|
|
TEST(ARMv8_vrintX) {
|
|
|
|
// Test the vrintX floating point instructions.
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
|
|
|
HandleScope scope(isolate);
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
double input;
|
|
|
|
double ar;
|
|
|
|
double nr;
|
|
|
|
double mr;
|
|
|
|
double pr;
|
|
|
|
double zr;
|
|
|
|
} T;
|
|
|
|
T t;
|
|
|
|
|
|
|
|
// Create a function that accepts &t, and loads, manipulates, and stores
|
|
|
|
// the doubles and floats.
|
|
|
|
Assembler assm(isolate, NULL, 0);
|
|
|
|
Label L, C;
|
|
|
|
|
|
|
|
|
|
|
|
if (CpuFeatures::IsSupported(ARMv8)) {
|
|
|
|
CpuFeatureScope scope(&assm, ARMv8);
|
|
|
|
|
|
|
|
__ mov(ip, Operand(sp));
|
|
|
|
__ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
|
|
|
|
|
|
|
|
__ mov(r4, Operand(r0));
|
|
|
|
|
|
|
|
// Test vrinta
|
2015-06-17 09:06:44 +00:00
|
|
|
__ vldr(d6, r4, offsetof(T, input));
|
2014-10-30 11:00:47 +00:00
|
|
|
__ vrinta(d5, d6);
|
2015-06-17 09:06:44 +00:00
|
|
|
__ vstr(d5, r4, offsetof(T, ar));
|
2014-10-30 11:00:47 +00:00
|
|
|
|
|
|
|
// Test vrintn
|
2015-06-17 09:06:44 +00:00
|
|
|
__ vldr(d6, r4, offsetof(T, input));
|
2014-10-30 11:00:47 +00:00
|
|
|
__ vrintn(d5, d6);
|
2015-06-17 09:06:44 +00:00
|
|
|
__ vstr(d5, r4, offsetof(T, nr));
|
2014-10-30 11:00:47 +00:00
|
|
|
|
|
|
|
// Test vrintp
|
2015-06-17 09:06:44 +00:00
|
|
|
__ vldr(d6, r4, offsetof(T, input));
|
2014-10-30 11:00:47 +00:00
|
|
|
__ vrintp(d5, d6);
|
2015-06-17 09:06:44 +00:00
|
|
|
__ vstr(d5, r4, offsetof(T, pr));
|
2014-10-30 11:00:47 +00:00
|
|
|
|
|
|
|
// Test vrintm
|
2015-06-17 09:06:44 +00:00
|
|
|
__ vldr(d6, r4, offsetof(T, input));
|
2014-10-30 11:00:47 +00:00
|
|
|
__ vrintm(d5, d6);
|
2015-06-17 09:06:44 +00:00
|
|
|
__ vstr(d5, r4, offsetof(T, mr));
|
2014-10-30 11:00:47 +00:00
|
|
|
|
|
|
|
// Test vrintz
|
2015-06-17 09:06:44 +00:00
|
|
|
__ vldr(d6, r4, offsetof(T, input));
|
2014-10-30 11:00:47 +00:00
|
|
|
__ vrintz(d5, d6);
|
2015-06-17 09:06:44 +00:00
|
|
|
__ vstr(d5, r4, offsetof(T, zr));
|
2014-10-30 11:00:47 +00:00
|
|
|
|
|
|
|
__ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
|
|
|
|
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2014-10-30 11:00:47 +00:00
|
|
|
#ifdef DEBUG
|
|
|
|
OFStream os(stdout);
|
|
|
|
code->Print(os);
|
|
|
|
#endif
|
|
|
|
F3 f = FUNCTION_CAST<F3>(code->entry());
|
|
|
|
|
|
|
|
Object* dummy = nullptr;
|
|
|
|
USE(dummy);
|
|
|
|
|
|
|
|
#define CHECK_VRINT(input_val, ares, nres, mres, pres, zres) \
|
|
|
|
t.input = input_val; \
|
2015-11-23 08:09:34 +00:00
|
|
|
dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0); \
|
2014-10-30 11:00:47 +00:00
|
|
|
CHECK_EQ(ares, t.ar); \
|
|
|
|
CHECK_EQ(nres, t.nr); \
|
|
|
|
CHECK_EQ(mres, t.mr); \
|
|
|
|
CHECK_EQ(pres, t.pr); \
|
|
|
|
CHECK_EQ(zres, t.zr);
|
|
|
|
|
|
|
|
CHECK_VRINT(-0.5, -1.0, -0.0, -1.0, -0.0, -0.0)
|
|
|
|
CHECK_VRINT(-0.6, -1.0, -1.0, -1.0, -0.0, -0.0)
|
|
|
|
CHECK_VRINT(-1.1, -1.0, -1.0, -2.0, -1.0, -1.0)
|
|
|
|
CHECK_VRINT(0.5, 1.0, 0.0, 0.0, 1.0, 0.0)
|
|
|
|
CHECK_VRINT(0.6, 1.0, 1.0, 0.0, 1.0, 0.0)
|
|
|
|
CHECK_VRINT(1.1, 1.0, 1.0, 1.0, 2.0, 1.0)
|
|
|
|
double inf = std::numeric_limits<double>::infinity();
|
|
|
|
CHECK_VRINT(inf, inf, inf, inf, inf, inf)
|
|
|
|
CHECK_VRINT(-inf, -inf, -inf, -inf, -inf, -inf)
|
|
|
|
CHECK_VRINT(-0.0, -0.0, -0.0, -0.0, -0.0, -0.0)
|
2015-10-30 14:10:23 +00:00
|
|
|
|
|
|
|
// Check NaN propagation.
|
2014-10-30 11:00:47 +00:00
|
|
|
double nan = std::numeric_limits<double>::quiet_NaN();
|
2015-10-30 14:10:23 +00:00
|
|
|
t.input = nan;
|
2015-11-23 08:09:34 +00:00
|
|
|
dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
|
2015-10-30 14:10:23 +00:00
|
|
|
CHECK_EQ(bit_cast<int64_t>(nan), bit_cast<int64_t>(t.ar));
|
|
|
|
CHECK_EQ(bit_cast<int64_t>(nan), bit_cast<int64_t>(t.nr));
|
|
|
|
CHECK_EQ(bit_cast<int64_t>(nan), bit_cast<int64_t>(t.mr));
|
|
|
|
CHECK_EQ(bit_cast<int64_t>(nan), bit_cast<int64_t>(t.pr));
|
|
|
|
CHECK_EQ(bit_cast<int64_t>(nan), bit_cast<int64_t>(t.zr));
|
2014-10-30 11:00:47 +00:00
|
|
|
|
|
|
|
#undef CHECK_VRINT
|
|
|
|
}
|
|
|
|
}
|
2015-07-09 09:33:17 +00:00
|
|
|
|
2016-04-06 10:17:14 +00:00
|
|
|
TEST(ARMv8_vsel) {
|
|
|
|
// Test the vsel floating point instructions.
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
|
|
|
HandleScope scope(isolate);
|
|
|
|
|
|
|
|
Assembler assm(isolate, NULL, 0);
|
|
|
|
|
|
|
|
// Used to indicate whether a condition passed or failed.
|
|
|
|
static constexpr float kResultPass = 1.0f;
|
|
|
|
static constexpr float kResultFail = -kResultPass;
|
|
|
|
|
|
|
|
struct ResultsF32 {
|
|
|
|
float vseleq_;
|
|
|
|
float vselge_;
|
|
|
|
float vselgt_;
|
|
|
|
float vselvs_;
|
|
|
|
|
|
|
|
// The following conditions aren't architecturally supported, but the
|
|
|
|
// assembler implements them by swapping the inputs.
|
|
|
|
float vselne_;
|
|
|
|
float vsellt_;
|
|
|
|
float vselle_;
|
|
|
|
float vselvc_;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ResultsF64 {
|
|
|
|
double vseleq_;
|
|
|
|
double vselge_;
|
|
|
|
double vselgt_;
|
|
|
|
double vselvs_;
|
|
|
|
|
|
|
|
// The following conditions aren't architecturally supported, but the
|
|
|
|
// assembler implements them by swapping the inputs.
|
|
|
|
double vselne_;
|
|
|
|
double vsellt_;
|
|
|
|
double vselle_;
|
|
|
|
double vselvc_;
|
|
|
|
};
|
|
|
|
|
|
|
|
if (CpuFeatures::IsSupported(ARMv8)) {
|
|
|
|
CpuFeatureScope scope(&assm, ARMv8);
|
|
|
|
|
|
|
|
// Create a helper function:
|
|
|
|
// void TestVsel(uint32_t nzcv,
|
|
|
|
// ResultsF32* results_f32,
|
|
|
|
// ResultsF64* results_f64);
|
|
|
|
__ msr(CPSR_f, Operand(r0));
|
|
|
|
|
2017-07-07 11:32:01 +00:00
|
|
|
__ vmov(s1, Float32(kResultPass));
|
|
|
|
__ vmov(s2, Float32(kResultFail));
|
2016-04-06 10:17:14 +00:00
|
|
|
|
|
|
|
__ vsel(eq, s0, s1, s2);
|
|
|
|
__ vstr(s0, r1, offsetof(ResultsF32, vseleq_));
|
|
|
|
__ vsel(ge, s0, s1, s2);
|
|
|
|
__ vstr(s0, r1, offsetof(ResultsF32, vselge_));
|
|
|
|
__ vsel(gt, s0, s1, s2);
|
|
|
|
__ vstr(s0, r1, offsetof(ResultsF32, vselgt_));
|
|
|
|
__ vsel(vs, s0, s1, s2);
|
|
|
|
__ vstr(s0, r1, offsetof(ResultsF32, vselvs_));
|
|
|
|
|
|
|
|
__ vsel(ne, s0, s1, s2);
|
|
|
|
__ vstr(s0, r1, offsetof(ResultsF32, vselne_));
|
|
|
|
__ vsel(lt, s0, s1, s2);
|
|
|
|
__ vstr(s0, r1, offsetof(ResultsF32, vsellt_));
|
|
|
|
__ vsel(le, s0, s1, s2);
|
|
|
|
__ vstr(s0, r1, offsetof(ResultsF32, vselle_));
|
|
|
|
__ vsel(vc, s0, s1, s2);
|
|
|
|
__ vstr(s0, r1, offsetof(ResultsF32, vselvc_));
|
|
|
|
|
2017-07-05 13:48:12 +00:00
|
|
|
__ vmov(d1, Double(kResultPass));
|
|
|
|
__ vmov(d2, Double(kResultFail));
|
2016-04-06 10:17:14 +00:00
|
|
|
|
|
|
|
__ vsel(eq, d0, d1, d2);
|
|
|
|
__ vstr(d0, r2, offsetof(ResultsF64, vseleq_));
|
|
|
|
__ vsel(ge, d0, d1, d2);
|
|
|
|
__ vstr(d0, r2, offsetof(ResultsF64, vselge_));
|
|
|
|
__ vsel(gt, d0, d1, d2);
|
|
|
|
__ vstr(d0, r2, offsetof(ResultsF64, vselgt_));
|
|
|
|
__ vsel(vs, d0, d1, d2);
|
|
|
|
__ vstr(d0, r2, offsetof(ResultsF64, vselvs_));
|
|
|
|
|
|
|
|
__ vsel(ne, d0, d1, d2);
|
|
|
|
__ vstr(d0, r2, offsetof(ResultsF64, vselne_));
|
|
|
|
__ vsel(lt, d0, d1, d2);
|
|
|
|
__ vstr(d0, r2, offsetof(ResultsF64, vsellt_));
|
|
|
|
__ vsel(le, d0, d1, d2);
|
|
|
|
__ vstr(d0, r2, offsetof(ResultsF64, vselle_));
|
|
|
|
__ vsel(vc, d0, d1, d2);
|
|
|
|
__ vstr(d0, r2, offsetof(ResultsF64, vselvc_));
|
|
|
|
|
|
|
|
__ bx(lr);
|
|
|
|
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2016-04-06 10:17:14 +00:00
|
|
|
#ifdef DEBUG
|
|
|
|
OFStream os(stdout);
|
|
|
|
code->Print(os);
|
|
|
|
#endif
|
|
|
|
F5 f = FUNCTION_CAST<F5>(code->entry());
|
|
|
|
Object* dummy = nullptr;
|
|
|
|
USE(dummy);
|
|
|
|
|
|
|
|
STATIC_ASSERT(kResultPass == -kResultFail);
|
|
|
|
#define CHECK_VSEL(n, z, c, v, vseleq, vselge, vselgt, vselvs) \
|
|
|
|
do { \
|
|
|
|
ResultsF32 results_f32; \
|
|
|
|
ResultsF64 results_f64; \
|
|
|
|
uint32_t nzcv = (n << 31) | (z << 30) | (c << 29) | (v << 28); \
|
|
|
|
dummy = CALL_GENERATED_CODE(isolate, f, nzcv, &results_f32, &results_f64, \
|
|
|
|
0, 0); \
|
|
|
|
CHECK_EQ(vseleq, results_f32.vseleq_); \
|
|
|
|
CHECK_EQ(vselge, results_f32.vselge_); \
|
|
|
|
CHECK_EQ(vselgt, results_f32.vselgt_); \
|
|
|
|
CHECK_EQ(vselvs, results_f32.vselvs_); \
|
|
|
|
CHECK_EQ(-vseleq, results_f32.vselne_); \
|
|
|
|
CHECK_EQ(-vselge, results_f32.vsellt_); \
|
|
|
|
CHECK_EQ(-vselgt, results_f32.vselle_); \
|
|
|
|
CHECK_EQ(-vselvs, results_f32.vselvc_); \
|
|
|
|
CHECK_EQ(vseleq, results_f64.vseleq_); \
|
|
|
|
CHECK_EQ(vselge, results_f64.vselge_); \
|
|
|
|
CHECK_EQ(vselgt, results_f64.vselgt_); \
|
|
|
|
CHECK_EQ(vselvs, results_f64.vselvs_); \
|
|
|
|
CHECK_EQ(-vseleq, results_f64.vselne_); \
|
|
|
|
CHECK_EQ(-vselge, results_f64.vsellt_); \
|
|
|
|
CHECK_EQ(-vselgt, results_f64.vselle_); \
|
|
|
|
CHECK_EQ(-vselvs, results_f64.vselvc_); \
|
|
|
|
} while (0);
|
|
|
|
|
|
|
|
// N Z C V vseleq vselge vselgt vselvs
|
|
|
|
CHECK_VSEL(0, 0, 0, 0, kResultFail, kResultPass, kResultPass, kResultFail);
|
|
|
|
CHECK_VSEL(0, 0, 0, 1, kResultFail, kResultFail, kResultFail, kResultPass);
|
|
|
|
CHECK_VSEL(0, 0, 1, 0, kResultFail, kResultPass, kResultPass, kResultFail);
|
|
|
|
CHECK_VSEL(0, 0, 1, 1, kResultFail, kResultFail, kResultFail, kResultPass);
|
|
|
|
CHECK_VSEL(0, 1, 0, 0, kResultPass, kResultPass, kResultFail, kResultFail);
|
|
|
|
CHECK_VSEL(0, 1, 0, 1, kResultPass, kResultFail, kResultFail, kResultPass);
|
|
|
|
CHECK_VSEL(0, 1, 1, 0, kResultPass, kResultPass, kResultFail, kResultFail);
|
|
|
|
CHECK_VSEL(0, 1, 1, 1, kResultPass, kResultFail, kResultFail, kResultPass);
|
|
|
|
CHECK_VSEL(1, 0, 0, 0, kResultFail, kResultFail, kResultFail, kResultFail);
|
|
|
|
CHECK_VSEL(1, 0, 0, 1, kResultFail, kResultPass, kResultPass, kResultPass);
|
|
|
|
CHECK_VSEL(1, 0, 1, 0, kResultFail, kResultFail, kResultFail, kResultFail);
|
|
|
|
CHECK_VSEL(1, 0, 1, 1, kResultFail, kResultPass, kResultPass, kResultPass);
|
|
|
|
CHECK_VSEL(1, 1, 0, 0, kResultPass, kResultFail, kResultFail, kResultFail);
|
|
|
|
CHECK_VSEL(1, 1, 0, 1, kResultPass, kResultPass, kResultFail, kResultPass);
|
|
|
|
CHECK_VSEL(1, 1, 1, 0, kResultPass, kResultFail, kResultFail, kResultFail);
|
|
|
|
CHECK_VSEL(1, 1, 1, 1, kResultPass, kResultPass, kResultFail, kResultPass);
|
|
|
|
|
|
|
|
#undef CHECK_VSEL
|
|
|
|
}
|
|
|
|
}
|
2015-07-09 09:33:17 +00:00
|
|
|
|
2016-09-06 08:46:17 +00:00
|
|
|
TEST(ARMv8_vminmax_f64) {
|
2016-09-06 11:27:41 +00:00
|
|
|
// Test the vminnm and vmaxnm floating point instructions.
|
2016-09-06 08:46:17 +00:00
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
|
|
|
HandleScope scope(isolate);
|
|
|
|
|
|
|
|
Assembler assm(isolate, NULL, 0);
|
|
|
|
|
|
|
|
struct Inputs {
|
|
|
|
double left_;
|
|
|
|
double right_;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct Results {
|
|
|
|
double vminnm_;
|
|
|
|
double vmaxnm_;
|
|
|
|
};
|
|
|
|
|
|
|
|
if (CpuFeatures::IsSupported(ARMv8)) {
|
|
|
|
CpuFeatureScope scope(&assm, ARMv8);
|
|
|
|
|
|
|
|
// Create a helper function:
|
|
|
|
// void TestVminmax(const Inputs* inputs,
|
|
|
|
// Results* results);
|
|
|
|
__ vldr(d1, r0, offsetof(Inputs, left_));
|
|
|
|
__ vldr(d2, r0, offsetof(Inputs, right_));
|
|
|
|
|
|
|
|
__ vminnm(d0, d1, d2);
|
|
|
|
__ vstr(d0, r1, offsetof(Results, vminnm_));
|
|
|
|
__ vmaxnm(d0, d1, d2);
|
|
|
|
__ vstr(d0, r1, offsetof(Results, vmaxnm_));
|
|
|
|
|
|
|
|
__ bx(lr);
|
|
|
|
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2016-09-06 08:46:17 +00:00
|
|
|
#ifdef DEBUG
|
|
|
|
OFStream os(stdout);
|
|
|
|
code->Print(os);
|
|
|
|
#endif
|
|
|
|
F4 f = FUNCTION_CAST<F4>(code->entry());
|
|
|
|
Object* dummy = nullptr;
|
|
|
|
USE(dummy);
|
|
|
|
|
|
|
|
#define CHECK_VMINMAX(left, right, vminnm, vmaxnm) \
|
|
|
|
do { \
|
|
|
|
Inputs inputs = {left, right}; \
|
|
|
|
Results results; \
|
|
|
|
dummy = CALL_GENERATED_CODE(isolate, f, &inputs, &results, 0, 0, 0); \
|
|
|
|
/* Use a bit_cast to correctly identify -0.0 and NaNs. */ \
|
|
|
|
CHECK_EQ(bit_cast<uint64_t>(vminnm), bit_cast<uint64_t>(results.vminnm_)); \
|
|
|
|
CHECK_EQ(bit_cast<uint64_t>(vmaxnm), bit_cast<uint64_t>(results.vmaxnm_)); \
|
|
|
|
} while (0);
|
|
|
|
|
|
|
|
double nan_a = bit_cast<double>(UINT64_C(0x7ff8000000000001));
|
|
|
|
double nan_b = bit_cast<double>(UINT64_C(0x7ff8000000000002));
|
|
|
|
|
|
|
|
CHECK_VMINMAX(1.0, -1.0, -1.0, 1.0);
|
|
|
|
CHECK_VMINMAX(-1.0, 1.0, -1.0, 1.0);
|
|
|
|
CHECK_VMINMAX(0.0, -1.0, -1.0, 0.0);
|
|
|
|
CHECK_VMINMAX(-1.0, 0.0, -1.0, 0.0);
|
|
|
|
CHECK_VMINMAX(-0.0, -1.0, -1.0, -0.0);
|
|
|
|
CHECK_VMINMAX(-1.0, -0.0, -1.0, -0.0);
|
|
|
|
CHECK_VMINMAX(0.0, 1.0, 0.0, 1.0);
|
|
|
|
CHECK_VMINMAX(1.0, 0.0, 0.0, 1.0);
|
|
|
|
|
|
|
|
CHECK_VMINMAX(0.0, 0.0, 0.0, 0.0);
|
|
|
|
CHECK_VMINMAX(-0.0, -0.0, -0.0, -0.0);
|
|
|
|
CHECK_VMINMAX(-0.0, 0.0, -0.0, 0.0);
|
|
|
|
CHECK_VMINMAX(0.0, -0.0, -0.0, 0.0);
|
|
|
|
|
|
|
|
CHECK_VMINMAX(0.0, nan_a, 0.0, 0.0);
|
|
|
|
CHECK_VMINMAX(nan_a, 0.0, 0.0, 0.0);
|
|
|
|
CHECK_VMINMAX(nan_a, nan_b, nan_a, nan_a);
|
|
|
|
CHECK_VMINMAX(nan_b, nan_a, nan_b, nan_b);
|
|
|
|
|
|
|
|
#undef CHECK_VMINMAX
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(ARMv8_vminmax_f32) {
|
2016-09-06 11:27:41 +00:00
|
|
|
// Test the vminnm and vmaxnm floating point instructions.
|
2016-09-06 08:46:17 +00:00
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
|
|
|
HandleScope scope(isolate);
|
|
|
|
|
|
|
|
Assembler assm(isolate, NULL, 0);
|
|
|
|
|
|
|
|
struct Inputs {
|
|
|
|
float left_;
|
|
|
|
float right_;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct Results {
|
|
|
|
float vminnm_;
|
|
|
|
float vmaxnm_;
|
|
|
|
};
|
|
|
|
|
|
|
|
if (CpuFeatures::IsSupported(ARMv8)) {
|
|
|
|
CpuFeatureScope scope(&assm, ARMv8);
|
|
|
|
|
|
|
|
// Create a helper function:
|
|
|
|
// void TestVminmax(const Inputs* inputs,
|
|
|
|
// Results* results);
|
|
|
|
__ vldr(s1, r0, offsetof(Inputs, left_));
|
|
|
|
__ vldr(s2, r0, offsetof(Inputs, right_));
|
|
|
|
|
|
|
|
__ vminnm(s0, s1, s2);
|
|
|
|
__ vstr(s0, r1, offsetof(Results, vminnm_));
|
|
|
|
__ vmaxnm(s0, s1, s2);
|
|
|
|
__ vstr(s0, r1, offsetof(Results, vmaxnm_));
|
|
|
|
|
|
|
|
__ bx(lr);
|
|
|
|
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2016-09-06 08:46:17 +00:00
|
|
|
#ifdef DEBUG
|
|
|
|
OFStream os(stdout);
|
|
|
|
code->Print(os);
|
|
|
|
#endif
|
|
|
|
F4 f = FUNCTION_CAST<F4>(code->entry());
|
|
|
|
Object* dummy = nullptr;
|
|
|
|
USE(dummy);
|
|
|
|
|
|
|
|
#define CHECK_VMINMAX(left, right, vminnm, vmaxnm) \
|
|
|
|
do { \
|
|
|
|
Inputs inputs = {left, right}; \
|
|
|
|
Results results; \
|
|
|
|
dummy = CALL_GENERATED_CODE(isolate, f, &inputs, &results, 0, 0, 0); \
|
|
|
|
/* Use a bit_cast to correctly identify -0.0 and NaNs. */ \
|
|
|
|
CHECK_EQ(bit_cast<uint32_t>(vminnm), bit_cast<uint32_t>(results.vminnm_)); \
|
|
|
|
CHECK_EQ(bit_cast<uint32_t>(vmaxnm), bit_cast<uint32_t>(results.vmaxnm_)); \
|
|
|
|
} while (0);
|
|
|
|
|
|
|
|
float nan_a = bit_cast<float>(UINT32_C(0x7fc00001));
|
|
|
|
float nan_b = bit_cast<float>(UINT32_C(0x7fc00002));
|
|
|
|
|
|
|
|
CHECK_VMINMAX(1.0f, -1.0f, -1.0f, 1.0f);
|
|
|
|
CHECK_VMINMAX(-1.0f, 1.0f, -1.0f, 1.0f);
|
|
|
|
CHECK_VMINMAX(0.0f, -1.0f, -1.0f, 0.0f);
|
|
|
|
CHECK_VMINMAX(-1.0f, 0.0f, -1.0f, 0.0f);
|
|
|
|
CHECK_VMINMAX(-0.0f, -1.0f, -1.0f, -0.0f);
|
|
|
|
CHECK_VMINMAX(-1.0f, -0.0f, -1.0f, -0.0f);
|
|
|
|
CHECK_VMINMAX(0.0f, 1.0f, 0.0f, 1.0f);
|
|
|
|
CHECK_VMINMAX(1.0f, 0.0f, 0.0f, 1.0f);
|
|
|
|
|
|
|
|
CHECK_VMINMAX(0.0f, 0.0f, 0.0f, 0.0f);
|
|
|
|
CHECK_VMINMAX(-0.0f, -0.0f, -0.0f, -0.0f);
|
|
|
|
CHECK_VMINMAX(-0.0f, 0.0f, -0.0f, 0.0f);
|
|
|
|
CHECK_VMINMAX(0.0f, -0.0f, -0.0f, 0.0f);
|
|
|
|
|
|
|
|
CHECK_VMINMAX(0.0f, nan_a, 0.0f, 0.0f);
|
|
|
|
CHECK_VMINMAX(nan_a, 0.0f, 0.0f, 0.0f);
|
|
|
|
CHECK_VMINMAX(nan_a, nan_b, nan_a, nan_a);
|
|
|
|
CHECK_VMINMAX(nan_b, nan_a, nan_b, nan_b);
|
|
|
|
|
|
|
|
#undef CHECK_VMINMAX
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-06 11:27:41 +00:00
|
|
|
template <typename T, typename Inputs, typename Results>
|
|
|
|
static F4 GenerateMacroFloatMinMax(MacroAssembler& assm) {
|
|
|
|
T a = T::from_code(0); // d0/s0
|
|
|
|
T b = T::from_code(1); // d1/s1
|
|
|
|
T c = T::from_code(2); // d2/s2
|
|
|
|
|
|
|
|
// Create a helper function:
|
|
|
|
// void TestFloatMinMax(const Inputs* inputs,
|
|
|
|
// Results* results);
|
|
|
|
Label ool_min_abc, ool_min_aab, ool_min_aba;
|
|
|
|
Label ool_max_abc, ool_max_aab, ool_max_aba;
|
|
|
|
|
|
|
|
Label done_min_abc, done_min_aab, done_min_aba;
|
|
|
|
Label done_max_abc, done_max_aab, done_max_aba;
|
|
|
|
|
|
|
|
// a = min(b, c);
|
|
|
|
__ vldr(b, r0, offsetof(Inputs, left_));
|
|
|
|
__ vldr(c, r0, offsetof(Inputs, right_));
|
|
|
|
__ FloatMin(a, b, c, &ool_min_abc);
|
|
|
|
__ bind(&done_min_abc);
|
|
|
|
__ vstr(a, r1, offsetof(Results, min_abc_));
|
|
|
|
|
|
|
|
// a = min(a, b);
|
|
|
|
__ vldr(a, r0, offsetof(Inputs, left_));
|
|
|
|
__ vldr(b, r0, offsetof(Inputs, right_));
|
|
|
|
__ FloatMin(a, a, b, &ool_min_aab);
|
|
|
|
__ bind(&done_min_aab);
|
|
|
|
__ vstr(a, r1, offsetof(Results, min_aab_));
|
|
|
|
|
|
|
|
// a = min(b, a);
|
|
|
|
__ vldr(b, r0, offsetof(Inputs, left_));
|
|
|
|
__ vldr(a, r0, offsetof(Inputs, right_));
|
|
|
|
__ FloatMin(a, b, a, &ool_min_aba);
|
|
|
|
__ bind(&done_min_aba);
|
|
|
|
__ vstr(a, r1, offsetof(Results, min_aba_));
|
|
|
|
|
|
|
|
// a = max(b, c);
|
|
|
|
__ vldr(b, r0, offsetof(Inputs, left_));
|
|
|
|
__ vldr(c, r0, offsetof(Inputs, right_));
|
|
|
|
__ FloatMax(a, b, c, &ool_max_abc);
|
|
|
|
__ bind(&done_max_abc);
|
|
|
|
__ vstr(a, r1, offsetof(Results, max_abc_));
|
|
|
|
|
|
|
|
// a = max(a, b);
|
|
|
|
__ vldr(a, r0, offsetof(Inputs, left_));
|
|
|
|
__ vldr(b, r0, offsetof(Inputs, right_));
|
|
|
|
__ FloatMax(a, a, b, &ool_max_aab);
|
|
|
|
__ bind(&done_max_aab);
|
|
|
|
__ vstr(a, r1, offsetof(Results, max_aab_));
|
|
|
|
|
|
|
|
// a = max(b, a);
|
|
|
|
__ vldr(b, r0, offsetof(Inputs, left_));
|
|
|
|
__ vldr(a, r0, offsetof(Inputs, right_));
|
|
|
|
__ FloatMax(a, b, a, &ool_max_aba);
|
|
|
|
__ bind(&done_max_aba);
|
|
|
|
__ vstr(a, r1, offsetof(Results, max_aba_));
|
|
|
|
|
|
|
|
__ bx(lr);
|
|
|
|
|
|
|
|
// Generate out-of-line cases.
|
|
|
|
__ bind(&ool_min_abc);
|
|
|
|
__ FloatMinOutOfLine(a, b, c);
|
|
|
|
__ b(&done_min_abc);
|
|
|
|
|
|
|
|
__ bind(&ool_min_aab);
|
|
|
|
__ FloatMinOutOfLine(a, a, b);
|
|
|
|
__ b(&done_min_aab);
|
|
|
|
|
|
|
|
__ bind(&ool_min_aba);
|
|
|
|
__ FloatMinOutOfLine(a, b, a);
|
|
|
|
__ b(&done_min_aba);
|
|
|
|
|
|
|
|
__ bind(&ool_max_abc);
|
|
|
|
__ FloatMaxOutOfLine(a, b, c);
|
|
|
|
__ b(&done_max_abc);
|
|
|
|
|
|
|
|
__ bind(&ool_max_aab);
|
|
|
|
__ FloatMaxOutOfLine(a, a, b);
|
|
|
|
__ b(&done_max_aab);
|
|
|
|
|
|
|
|
__ bind(&ool_max_aba);
|
|
|
|
__ FloatMaxOutOfLine(a, b, a);
|
|
|
|
__ b(&done_max_aba);
|
|
|
|
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(assm.isolate(), &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
assm.isolate()->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2016-09-06 11:27:41 +00:00
|
|
|
#ifdef DEBUG
|
|
|
|
OFStream os(stdout);
|
|
|
|
code->Print(os);
|
|
|
|
#endif
|
|
|
|
return FUNCTION_CAST<F4>(code->entry());
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(macro_float_minmax_f64) {
|
|
|
|
// Test the FloatMin and FloatMax macros.
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
|
|
|
HandleScope scope(isolate);
|
|
|
|
|
|
|
|
MacroAssembler assm(isolate, NULL, 0, CodeObjectRequired::kYes);
|
|
|
|
|
|
|
|
struct Inputs {
|
|
|
|
double left_;
|
|
|
|
double right_;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct Results {
|
|
|
|
// Check all register aliasing possibilities in order to exercise all
|
|
|
|
// code-paths in the macro assembler.
|
|
|
|
double min_abc_;
|
|
|
|
double min_aab_;
|
|
|
|
double min_aba_;
|
|
|
|
double max_abc_;
|
|
|
|
double max_aab_;
|
|
|
|
double max_aba_;
|
|
|
|
};
|
|
|
|
|
|
|
|
F4 f = GenerateMacroFloatMinMax<DwVfpRegister, Inputs, Results>(assm);
|
|
|
|
|
|
|
|
Object* dummy = nullptr;
|
|
|
|
USE(dummy);
|
|
|
|
|
|
|
|
#define CHECK_MINMAX(left, right, min, max) \
|
|
|
|
do { \
|
|
|
|
Inputs inputs = {left, right}; \
|
|
|
|
Results results; \
|
|
|
|
dummy = CALL_GENERATED_CODE(isolate, f, &inputs, &results, 0, 0, 0); \
|
|
|
|
/* Use a bit_cast to correctly identify -0.0 and NaNs. */ \
|
|
|
|
CHECK_EQ(bit_cast<uint64_t>(min), bit_cast<uint64_t>(results.min_abc_)); \
|
|
|
|
CHECK_EQ(bit_cast<uint64_t>(min), bit_cast<uint64_t>(results.min_aab_)); \
|
|
|
|
CHECK_EQ(bit_cast<uint64_t>(min), bit_cast<uint64_t>(results.min_aba_)); \
|
|
|
|
CHECK_EQ(bit_cast<uint64_t>(max), bit_cast<uint64_t>(results.max_abc_)); \
|
|
|
|
CHECK_EQ(bit_cast<uint64_t>(max), bit_cast<uint64_t>(results.max_aab_)); \
|
|
|
|
CHECK_EQ(bit_cast<uint64_t>(max), bit_cast<uint64_t>(results.max_aba_)); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
double nan_a = bit_cast<double>(UINT64_C(0x7ff8000000000001));
|
|
|
|
double nan_b = bit_cast<double>(UINT64_C(0x7ff8000000000002));
|
|
|
|
|
|
|
|
CHECK_MINMAX(1.0, -1.0, -1.0, 1.0);
|
|
|
|
CHECK_MINMAX(-1.0, 1.0, -1.0, 1.0);
|
|
|
|
CHECK_MINMAX(0.0, -1.0, -1.0, 0.0);
|
|
|
|
CHECK_MINMAX(-1.0, 0.0, -1.0, 0.0);
|
|
|
|
CHECK_MINMAX(-0.0, -1.0, -1.0, -0.0);
|
|
|
|
CHECK_MINMAX(-1.0, -0.0, -1.0, -0.0);
|
|
|
|
CHECK_MINMAX(0.0, 1.0, 0.0, 1.0);
|
|
|
|
CHECK_MINMAX(1.0, 0.0, 0.0, 1.0);
|
|
|
|
|
|
|
|
CHECK_MINMAX(0.0, 0.0, 0.0, 0.0);
|
|
|
|
CHECK_MINMAX(-0.0, -0.0, -0.0, -0.0);
|
|
|
|
CHECK_MINMAX(-0.0, 0.0, -0.0, 0.0);
|
|
|
|
CHECK_MINMAX(0.0, -0.0, -0.0, 0.0);
|
|
|
|
|
|
|
|
CHECK_MINMAX(0.0, nan_a, nan_a, nan_a);
|
|
|
|
CHECK_MINMAX(nan_a, 0.0, nan_a, nan_a);
|
|
|
|
CHECK_MINMAX(nan_a, nan_b, nan_a, nan_a);
|
|
|
|
CHECK_MINMAX(nan_b, nan_a, nan_b, nan_b);
|
|
|
|
|
|
|
|
#undef CHECK_MINMAX
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(macro_float_minmax_f32) {
|
|
|
|
// Test the FloatMin and FloatMax macros.
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
|
|
|
HandleScope scope(isolate);
|
|
|
|
|
|
|
|
MacroAssembler assm(isolate, NULL, 0, CodeObjectRequired::kYes);
|
|
|
|
|
|
|
|
struct Inputs {
|
|
|
|
float left_;
|
|
|
|
float right_;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct Results {
|
|
|
|
// Check all register aliasing possibilities in order to exercise all
|
|
|
|
// code-paths in the macro assembler.
|
|
|
|
float min_abc_;
|
|
|
|
float min_aab_;
|
|
|
|
float min_aba_;
|
|
|
|
float max_abc_;
|
|
|
|
float max_aab_;
|
|
|
|
float max_aba_;
|
|
|
|
};
|
|
|
|
|
|
|
|
F4 f = GenerateMacroFloatMinMax<SwVfpRegister, Inputs, Results>(assm);
|
|
|
|
Object* dummy = nullptr;
|
|
|
|
USE(dummy);
|
|
|
|
|
|
|
|
#define CHECK_MINMAX(left, right, min, max) \
|
|
|
|
do { \
|
|
|
|
Inputs inputs = {left, right}; \
|
|
|
|
Results results; \
|
|
|
|
dummy = CALL_GENERATED_CODE(isolate, f, &inputs, &results, 0, 0, 0); \
|
|
|
|
/* Use a bit_cast to correctly identify -0.0 and NaNs. */ \
|
|
|
|
CHECK_EQ(bit_cast<uint32_t>(min), bit_cast<uint32_t>(results.min_abc_)); \
|
|
|
|
CHECK_EQ(bit_cast<uint32_t>(min), bit_cast<uint32_t>(results.min_aab_)); \
|
|
|
|
CHECK_EQ(bit_cast<uint32_t>(min), bit_cast<uint32_t>(results.min_aba_)); \
|
|
|
|
CHECK_EQ(bit_cast<uint32_t>(max), bit_cast<uint32_t>(results.max_abc_)); \
|
|
|
|
CHECK_EQ(bit_cast<uint32_t>(max), bit_cast<uint32_t>(results.max_aab_)); \
|
|
|
|
CHECK_EQ(bit_cast<uint32_t>(max), bit_cast<uint32_t>(results.max_aba_)); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
float nan_a = bit_cast<float>(UINT32_C(0x7fc00001));
|
|
|
|
float nan_b = bit_cast<float>(UINT32_C(0x7fc00002));
|
|
|
|
|
|
|
|
CHECK_MINMAX(1.0f, -1.0f, -1.0f, 1.0f);
|
|
|
|
CHECK_MINMAX(-1.0f, 1.0f, -1.0f, 1.0f);
|
|
|
|
CHECK_MINMAX(0.0f, -1.0f, -1.0f, 0.0f);
|
|
|
|
CHECK_MINMAX(-1.0f, 0.0f, -1.0f, 0.0f);
|
|
|
|
CHECK_MINMAX(-0.0f, -1.0f, -1.0f, -0.0f);
|
|
|
|
CHECK_MINMAX(-1.0f, -0.0f, -1.0f, -0.0f);
|
|
|
|
CHECK_MINMAX(0.0f, 1.0f, 0.0f, 1.0f);
|
|
|
|
CHECK_MINMAX(1.0f, 0.0f, 0.0f, 1.0f);
|
|
|
|
|
|
|
|
CHECK_MINMAX(0.0f, 0.0f, 0.0f, 0.0f);
|
|
|
|
CHECK_MINMAX(-0.0f, -0.0f, -0.0f, -0.0f);
|
|
|
|
CHECK_MINMAX(-0.0f, 0.0f, -0.0f, 0.0f);
|
|
|
|
CHECK_MINMAX(0.0f, -0.0f, -0.0f, 0.0f);
|
|
|
|
|
|
|
|
CHECK_MINMAX(0.0f, nan_a, nan_a, nan_a);
|
|
|
|
CHECK_MINMAX(nan_a, 0.0f, nan_a, nan_a);
|
|
|
|
CHECK_MINMAX(nan_a, nan_b, nan_a, nan_a);
|
|
|
|
CHECK_MINMAX(nan_b, nan_a, nan_b, nan_b);
|
|
|
|
|
|
|
|
#undef CHECK_MINMAX
|
|
|
|
}
|
|
|
|
|
2016-07-27 13:24:28 +00:00
|
|
|
TEST(unaligned_loads) {
|
|
|
|
// All supported ARM targets allow unaligned accesses.
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
|
|
|
HandleScope scope(isolate);
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
uint32_t ldrh;
|
|
|
|
uint32_t ldrsh;
|
|
|
|
uint32_t ldr;
|
|
|
|
} T;
|
|
|
|
T t;
|
|
|
|
|
|
|
|
Assembler assm(isolate, NULL, 0);
|
|
|
|
__ ldrh(ip, MemOperand(r1, r2));
|
|
|
|
__ str(ip, MemOperand(r0, offsetof(T, ldrh)));
|
|
|
|
__ ldrsh(ip, MemOperand(r1, r2));
|
|
|
|
__ str(ip, MemOperand(r0, offsetof(T, ldrsh)));
|
|
|
|
__ ldr(ip, MemOperand(r1, r2));
|
|
|
|
__ str(ip, MemOperand(r0, offsetof(T, ldr)));
|
|
|
|
__ bx(lr);
|
|
|
|
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2016-07-27 13:24:28 +00:00
|
|
|
#ifdef DEBUG
|
|
|
|
OFStream os(stdout);
|
|
|
|
code->Print(os);
|
|
|
|
#endif
|
|
|
|
F4 f = FUNCTION_CAST<F4>(code->entry());
|
|
|
|
|
|
|
|
Object* dummy = nullptr;
|
|
|
|
USE(dummy);
|
|
|
|
|
|
|
|
#ifndef V8_TARGET_LITTLE_ENDIAN
|
|
|
|
#error This test assumes a little-endian layout.
|
|
|
|
#endif
|
|
|
|
uint64_t data = UINT64_C(0x84838281807f7e7d);
|
|
|
|
dummy = CALL_GENERATED_CODE(isolate, f, &t, &data, 0, 0, 0);
|
2016-11-11 12:00:34 +00:00
|
|
|
CHECK_EQ(0x00007e7du, t.ldrh);
|
|
|
|
CHECK_EQ(0x00007e7du, t.ldrsh);
|
|
|
|
CHECK_EQ(0x807f7e7du, t.ldr);
|
2016-07-27 13:24:28 +00:00
|
|
|
dummy = CALL_GENERATED_CODE(isolate, f, &t, &data, 1, 0, 0);
|
2016-11-11 12:00:34 +00:00
|
|
|
CHECK_EQ(0x00007f7eu, t.ldrh);
|
|
|
|
CHECK_EQ(0x00007f7eu, t.ldrsh);
|
|
|
|
CHECK_EQ(0x81807f7eu, t.ldr);
|
2016-07-27 13:24:28 +00:00
|
|
|
dummy = CALL_GENERATED_CODE(isolate, f, &t, &data, 2, 0, 0);
|
2016-11-11 12:00:34 +00:00
|
|
|
CHECK_EQ(0x0000807fu, t.ldrh);
|
|
|
|
CHECK_EQ(0xffff807fu, t.ldrsh);
|
|
|
|
CHECK_EQ(0x8281807fu, t.ldr);
|
2016-07-27 13:24:28 +00:00
|
|
|
dummy = CALL_GENERATED_CODE(isolate, f, &t, &data, 3, 0, 0);
|
2016-11-11 12:00:34 +00:00
|
|
|
CHECK_EQ(0x00008180u, t.ldrh);
|
|
|
|
CHECK_EQ(0xffff8180u, t.ldrsh);
|
|
|
|
CHECK_EQ(0x83828180u, t.ldr);
|
2016-07-27 13:24:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST(unaligned_stores) {
|
|
|
|
// All supported ARM targets allow unaligned accesses.
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
|
|
|
HandleScope scope(isolate);
|
|
|
|
|
|
|
|
Assembler assm(isolate, NULL, 0);
|
|
|
|
__ strh(r3, MemOperand(r0, r2));
|
|
|
|
__ str(r3, MemOperand(r1, r2));
|
|
|
|
__ bx(lr);
|
|
|
|
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2016-07-27 13:24:28 +00:00
|
|
|
#ifdef DEBUG
|
|
|
|
OFStream os(stdout);
|
|
|
|
code->Print(os);
|
|
|
|
#endif
|
|
|
|
F4 f = FUNCTION_CAST<F4>(code->entry());
|
|
|
|
|
|
|
|
Object* dummy = nullptr;
|
|
|
|
USE(dummy);
|
|
|
|
|
|
|
|
#ifndef V8_TARGET_LITTLE_ENDIAN
|
|
|
|
#error This test assumes a little-endian layout.
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
uint64_t strh = 0;
|
|
|
|
uint64_t str = 0;
|
|
|
|
dummy = CALL_GENERATED_CODE(isolate, f, &strh, &str, 0, 0xfedcba98, 0);
|
|
|
|
CHECK_EQ(UINT64_C(0x000000000000ba98), strh);
|
|
|
|
CHECK_EQ(UINT64_C(0x00000000fedcba98), str);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
uint64_t strh = 0;
|
|
|
|
uint64_t str = 0;
|
|
|
|
dummy = CALL_GENERATED_CODE(isolate, f, &strh, &str, 1, 0xfedcba98, 0);
|
|
|
|
CHECK_EQ(UINT64_C(0x0000000000ba9800), strh);
|
|
|
|
CHECK_EQ(UINT64_C(0x000000fedcba9800), str);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
uint64_t strh = 0;
|
|
|
|
uint64_t str = 0;
|
|
|
|
dummy = CALL_GENERATED_CODE(isolate, f, &strh, &str, 2, 0xfedcba98, 0);
|
|
|
|
CHECK_EQ(UINT64_C(0x00000000ba980000), strh);
|
|
|
|
CHECK_EQ(UINT64_C(0x0000fedcba980000), str);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
uint64_t strh = 0;
|
|
|
|
uint64_t str = 0;
|
|
|
|
dummy = CALL_GENERATED_CODE(isolate, f, &strh, &str, 3, 0xfedcba98, 0);
|
|
|
|
CHECK_EQ(UINT64_C(0x000000ba98000000), strh);
|
|
|
|
CHECK_EQ(UINT64_C(0x00fedcba98000000), str);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-08 07:11:32 +00:00
|
|
|
TEST(vswp) {
|
2016-11-25 19:36:45 +00:00
|
|
|
if (!CpuFeatures::IsSupported(NEON)) return;
|
|
|
|
|
2016-09-08 07:11:32 +00:00
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
|
|
|
HandleScope scope(isolate);
|
|
|
|
Assembler assm(isolate, NULL, 0);
|
|
|
|
|
|
|
|
typedef struct {
|
2016-11-30 19:44:45 +00:00
|
|
|
uint64_t vswp_d0;
|
|
|
|
uint64_t vswp_d1;
|
|
|
|
uint64_t vswp_d30;
|
|
|
|
uint64_t vswp_d31;
|
|
|
|
uint32_t vswp_q4[4];
|
|
|
|
uint32_t vswp_q5[4];
|
2016-09-08 07:11:32 +00:00
|
|
|
} T;
|
|
|
|
T t;
|
|
|
|
|
2016-11-30 19:44:45 +00:00
|
|
|
__ stm(db_w, sp, r4.bit() | r5.bit() | r6.bit() | r7.bit() | lr.bit());
|
|
|
|
|
|
|
|
uint64_t one = bit_cast<uint64_t>(1.0);
|
|
|
|
__ mov(r5, Operand(one >> 32));
|
|
|
|
__ mov(r4, Operand(one & 0xffffffff));
|
|
|
|
uint64_t minus_one = bit_cast<uint64_t>(-1.0);
|
|
|
|
__ mov(r7, Operand(minus_one >> 32));
|
|
|
|
__ mov(r6, Operand(minus_one & 0xffffffff));
|
|
|
|
|
|
|
|
__ vmov(d0, r4, r5); // d0 = 1.0
|
|
|
|
__ vmov(d1, r6, r7); // d1 = -1.0
|
2016-09-08 07:11:32 +00:00
|
|
|
__ vswp(d0, d1);
|
2016-11-30 19:44:45 +00:00
|
|
|
__ vstr(d0, r0, offsetof(T, vswp_d0));
|
|
|
|
__ vstr(d1, r0, offsetof(T, vswp_d1));
|
2016-09-08 07:11:32 +00:00
|
|
|
|
|
|
|
if (CpuFeatures::IsSupported(VFP32DREGS)) {
|
2016-11-30 19:44:45 +00:00
|
|
|
__ vmov(d30, r4, r5); // d30 = 1.0
|
|
|
|
__ vmov(d31, r6, r7); // d31 = -1.0
|
2016-09-08 07:11:32 +00:00
|
|
|
__ vswp(d30, d31);
|
2016-11-30 19:44:45 +00:00
|
|
|
__ vstr(d30, r0, offsetof(T, vswp_d30));
|
|
|
|
__ vstr(d31, r0, offsetof(T, vswp_d31));
|
2016-09-08 07:11:32 +00:00
|
|
|
}
|
|
|
|
|
2016-11-25 19:36:45 +00:00
|
|
|
// q-register swap.
|
2016-11-30 19:44:45 +00:00
|
|
|
const uint32_t test_1 = 0x01234567;
|
|
|
|
const uint32_t test_2 = 0x89abcdef;
|
|
|
|
__ mov(r4, Operand(test_1));
|
|
|
|
__ mov(r5, Operand(test_2));
|
2017-05-16 20:37:16 +00:00
|
|
|
__ vdup(Neon32, q4, r4);
|
|
|
|
__ vdup(Neon32, q5, r5);
|
2016-11-25 19:36:45 +00:00
|
|
|
__ vswp(q4, q5);
|
2016-11-30 19:44:45 +00:00
|
|
|
__ add(r6, r0, Operand(static_cast<int32_t>(offsetof(T, vswp_q4))));
|
2016-12-15 18:15:23 +00:00
|
|
|
__ vst1(Neon8, NeonListOperand(q4), NeonMemOperand(r6));
|
2016-11-30 19:44:45 +00:00
|
|
|
__ add(r6, r0, Operand(static_cast<int32_t>(offsetof(T, vswp_q5))));
|
2016-12-15 18:15:23 +00:00
|
|
|
__ vst1(Neon8, NeonListOperand(q5), NeonMemOperand(r6));
|
2016-11-25 19:36:45 +00:00
|
|
|
|
2016-11-30 19:44:45 +00:00
|
|
|
__ ldm(ia_w, sp, r4.bit() | r5.bit() | r6.bit() | r7.bit() | pc.bit());
|
2016-09-08 07:11:32 +00:00
|
|
|
__ bx(lr);
|
|
|
|
|
|
|
|
CodeDesc desc;
|
2017-05-31 14:00:11 +00:00
|
|
|
assm.GetCode(isolate, &desc);
|
2017-09-29 14:59:24 +00:00
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
2016-09-08 07:11:32 +00:00
|
|
|
#ifdef DEBUG
|
|
|
|
OFStream os(stdout);
|
|
|
|
code->Print(os);
|
|
|
|
#endif
|
|
|
|
F3 f = FUNCTION_CAST<F3>(code->entry());
|
|
|
|
Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
|
|
|
|
USE(dummy);
|
2016-11-30 19:44:45 +00:00
|
|
|
CHECK_EQ(minus_one, t.vswp_d0);
|
|
|
|
CHECK_EQ(one, t.vswp_d1);
|
2016-09-08 07:11:32 +00:00
|
|
|
if (CpuFeatures::IsSupported(VFP32DREGS)) {
|
2016-11-30 19:44:45 +00:00
|
|
|
CHECK_EQ(minus_one, t.vswp_d30);
|
|
|
|
CHECK_EQ(one, t.vswp_d31);
|
2016-09-08 07:11:32 +00:00
|
|
|
}
|
2016-11-30 19:44:45 +00:00
|
|
|
CHECK_EQ(t.vswp_q4[0], test_2);
|
|
|
|
CHECK_EQ(t.vswp_q4[1], test_2);
|
|
|
|
CHECK_EQ(t.vswp_q4[2], test_2);
|
|
|
|
CHECK_EQ(t.vswp_q4[3], test_2);
|
|
|
|
CHECK_EQ(t.vswp_q5[0], test_1);
|
|
|
|
CHECK_EQ(t.vswp_q5[1], test_1);
|
|
|
|
CHECK_EQ(t.vswp_q5[2], test_1);
|
|
|
|
CHECK_EQ(t.vswp_q5[3], test_1);
|
2016-09-08 07:11:32 +00:00
|
|
|
}
|
|
|
|
|
2015-07-09 09:33:17 +00:00
|
|
|
TEST(regress4292_b) {
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
|
|
|
HandleScope scope(isolate);
|
|
|
|
|
|
|
|
Assembler assm(isolate, NULL, 0);
|
|
|
|
Label end;
|
|
|
|
__ mov(r0, Operand(isolate->factory()->infinity_value()));
|
|
|
|
for (int i = 0; i < 1020; ++i) {
|
|
|
|
__ b(hi, &end);
|
|
|
|
}
|
|
|
|
__ bind(&end);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
TEST(regress4292_bl) {
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
|
|
|
HandleScope scope(isolate);
|
|
|
|
|
|
|
|
Assembler assm(isolate, NULL, 0);
|
|
|
|
Label end;
|
|
|
|
__ mov(r0, Operand(isolate->factory()->infinity_value()));
|
|
|
|
for (int i = 0; i < 1020; ++i) {
|
|
|
|
__ bl(hi, &end);
|
|
|
|
}
|
|
|
|
__ bind(&end);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
TEST(regress4292_blx) {
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
|
|
|
HandleScope scope(isolate);
|
|
|
|
|
|
|
|
Assembler assm(isolate, NULL, 0);
|
|
|
|
Label end;
|
|
|
|
__ mov(r0, Operand(isolate->factory()->infinity_value()));
|
|
|
|
for (int i = 0; i < 1020; ++i) {
|
|
|
|
__ blx(&end);
|
|
|
|
}
|
|
|
|
__ bind(&end);
|
|
|
|
}
|
|
|
|
|
2015-07-09 10:32:23 +00:00
|
|
|
|
|
|
|
TEST(regress4292_CheckConstPool) {
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
|
|
|
HandleScope scope(isolate);
|
|
|
|
|
|
|
|
Assembler assm(isolate, NULL, 0);
|
|
|
|
__ mov(r0, Operand(isolate->factory()->infinity_value()));
|
|
|
|
__ BlockConstPoolFor(1019);
|
|
|
|
for (int i = 0; i < 1019; ++i) __ nop();
|
|
|
|
__ vldr(d0, MemOperand(r0, 0));
|
|
|
|
}
|
|
|
|
|
2017-06-22 10:00:56 +00:00
|
|
|
TEST(use_scratch_register_scope) {
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
|
|
|
HandleScope scope(isolate);
|
|
|
|
|
|
|
|
Assembler assm(isolate, NULL, 0);
|
|
|
|
|
|
|
|
// The assembler should have ip as a scratch by default.
|
|
|
|
CHECK_EQ(*assm.GetScratchRegisterList(), ip.bit());
|
|
|
|
|
|
|
|
{
|
|
|
|
UseScratchRegisterScope temps(&assm);
|
|
|
|
CHECK_EQ(*assm.GetScratchRegisterList(), ip.bit());
|
|
|
|
|
|
|
|
Register scratch = temps.Acquire();
|
|
|
|
CHECK_EQ(scratch.code(), ip.code());
|
|
|
|
CHECK_EQ(*assm.GetScratchRegisterList(), 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
CHECK_EQ(*assm.GetScratchRegisterList(), ip.bit());
|
|
|
|
}
|
|
|
|
|
[arm] Support splitting add with immediate instructions
When an immediate does not fit an add instruction we use a temporary register to
hold the value, using movw/movt to encode it. However, in order to remove a use
of r9 in TurboFan's code generator, we need to cope with no scratch registers
being available. That is to say that the destination and source registers are
the same, and `ip` is not available to use.
In this case, we can split an add instruction into a sequence of additions:
```
UseScratchRegisterScope temps(...);
Register my_scratch = temps.Acquire();
__ add(r0, r0, Operand(0xabcd); // add r0, r0, #0xcd
// add r0, r0, #0xab00
```
As a drive-by fix, make the disassembler test fail if we expected a different
number of instructions generated.
Bug: v8:6553
Change-Id: Ib7fcc765d28bccafe39257f47cd73f922c5873bf
Reviewed-on: https://chromium-review.googlesource.com/685014
Reviewed-by: Benedikt Meurer <bmeurer@chromium.org>
Commit-Queue: Pierre Langlois <pierre.langlois@arm.com>
Cr-Commit-Position: refs/heads/master@{#48491}
2017-10-11 12:00:21 +00:00
|
|
|
TEST(split_add_immediate) {
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
|
|
|
HandleScope scope(isolate);
|
|
|
|
|
|
|
|
{
|
|
|
|
Assembler assm(isolate, NULL, 0);
|
|
|
|
__ mov(r1, r0);
|
|
|
|
// Re-use the destination as a scratch.
|
|
|
|
__ add(r0, r1, Operand(0x12345678));
|
|
|
|
__ blx(lr);
|
|
|
|
|
|
|
|
CodeDesc desc;
|
|
|
|
assm.GetCode(isolate, &desc);
|
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
|
|
|
#ifdef DEBUG
|
|
|
|
OFStream os(stdout);
|
|
|
|
code->Print(os);
|
|
|
|
#endif
|
|
|
|
F1 f = FUNCTION_CAST<F1>(code->entry());
|
|
|
|
uint32_t res =
|
|
|
|
reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
|
|
|
|
::printf("f() = 0x%x\n", res);
|
|
|
|
CHECK_EQ(0x12345678, res);
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
Assembler assm(isolate, NULL, 0);
|
|
|
|
// Use ip as a scratch.
|
|
|
|
__ add(r0, r0, Operand(0x12345678));
|
|
|
|
__ blx(lr);
|
|
|
|
|
|
|
|
CodeDesc desc;
|
|
|
|
assm.GetCode(isolate, &desc);
|
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
|
|
|
#ifdef DEBUG
|
|
|
|
OFStream os(stdout);
|
|
|
|
code->Print(os);
|
|
|
|
#endif
|
|
|
|
F1 f = FUNCTION_CAST<F1>(code->entry());
|
|
|
|
uint32_t res =
|
|
|
|
reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
|
|
|
|
::printf("f() = 0x%x\n", res);
|
|
|
|
CHECK_EQ(0x12345678, res);
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
Assembler assm(isolate, NULL, 0);
|
|
|
|
UseScratchRegisterScope temps(&assm);
|
|
|
|
Register reserved = temps.Acquire();
|
|
|
|
USE(reserved);
|
|
|
|
// If ip is not available, split the operation into multiple additions.
|
|
|
|
__ add(r0, r0, Operand(0x12345678));
|
|
|
|
__ blx(lr);
|
|
|
|
|
|
|
|
CodeDesc desc;
|
|
|
|
assm.GetCode(isolate, &desc);
|
|
|
|
Handle<Code> code =
|
|
|
|
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
|
|
|
#ifdef DEBUG
|
|
|
|
OFStream os(stdout);
|
|
|
|
code->Print(os);
|
|
|
|
#endif
|
|
|
|
F1 f = FUNCTION_CAST<F1>(code->entry());
|
|
|
|
uint32_t res =
|
|
|
|
reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
|
|
|
|
::printf("f() = 0x%x\n", res);
|
|
|
|
CHECK_EQ(0x12345678, res);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-08-22 13:33:59 +00:00
|
|
|
#undef __
|
2017-08-31 12:34:55 +00:00
|
|
|
|
2017-10-02 21:19:21 +00:00
|
|
|
} // namespace test_assembler_arm
|
2017-08-31 12:34:55 +00:00
|
|
|
} // namespace internal
|
|
|
|
} // namespace v8
|