2019-05-29 17:57:54 +00:00
|
|
|
/*
|
|
|
|
* Copyright 2019 Google LLC
|
|
|
|
*
|
|
|
|
* Use of this source code is governed by a BSD-style license that can be
|
|
|
|
* found in the LICENSE file.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "include/core/SkColorPriv.h"
|
|
|
|
#include "include/private/SkColorData.h"
|
2019-11-07 16:33:56 +00:00
|
|
|
#include "src/core/SkMSAN.h"
|
2019-05-29 17:57:54 +00:00
|
|
|
#include "src/core/SkVM.h"
|
|
|
|
#include "tests/Test.h"
|
2019-06-03 21:27:46 +00:00
|
|
|
#include "tools/Resources.h"
|
2019-06-03 22:10:59 +00:00
|
|
|
#include "tools/SkVMBuilders.h"
|
2019-05-29 17:57:54 +00:00
|
|
|
|
2019-06-03 22:10:59 +00:00
|
|
|
using Fmt = SrcoverBuilder_F32::Fmt;
|
2019-05-29 17:57:54 +00:00
|
|
|
const char* fmt_name(Fmt fmt) {
|
|
|
|
switch (fmt) {
|
2019-06-03 22:10:59 +00:00
|
|
|
case Fmt::A8: return "A8";
|
|
|
|
case Fmt::G8: return "G8";
|
|
|
|
case Fmt::RGBA_8888: return "RGBA_8888";
|
2019-05-29 17:57:54 +00:00
|
|
|
}
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
|
2019-09-18 16:49:29 +00:00
|
|
|
static void dump(skvm::Builder& builder, SkWStream* o) {
|
|
|
|
skvm::Program program = builder.done();
|
|
|
|
builder.dump(o);
|
|
|
|
o->writeText("\n");
|
|
|
|
program.dump(o);
|
|
|
|
o->writeText("\n");
|
|
|
|
}
|
2019-07-02 20:21:11 +00:00
|
|
|
|
2019-10-16 15:11:56 +00:00
|
|
|
// TODO: I'd like this to go away and have every test in here run both JIT and interpreter.
|
2019-07-15 17:22:36 +00:00
|
|
|
template <typename Fn>
|
2019-10-16 15:11:56 +00:00
|
|
|
static void test_interpreter_only(skiatest::Reporter* r, skvm::Program&& program, Fn&& test) {
|
|
|
|
REPORTER_ASSERT(r, !program.hasJIT());
|
2019-10-16 16:48:52 +00:00
|
|
|
test((const skvm::Program&) program);
|
2019-07-15 17:22:36 +00:00
|
|
|
}
|
2019-07-02 20:21:11 +00:00
|
|
|
|
2019-10-16 15:11:56 +00:00
|
|
|
template <typename Fn>
|
|
|
|
static void test_jit_and_interpreter(skiatest::Reporter* r, skvm::Program&& program, Fn&& test) {
|
2019-11-07 16:33:56 +00:00
|
|
|
static const bool can_jit = []{
|
|
|
|
// This is about the simplest program we can write, setting an int buffer to a constant.
|
|
|
|
// If this can't JIT, the platform does not support JITing.
|
|
|
|
skvm::Builder b;
|
|
|
|
b.store32(b.varying<int>(), b.splat(42));
|
|
|
|
skvm::Program p = b.done();
|
|
|
|
return p.hasJIT();
|
|
|
|
}();
|
|
|
|
|
|
|
|
if (can_jit) {
|
2019-10-16 15:11:56 +00:00
|
|
|
REPORTER_ASSERT(r, program.hasJIT());
|
|
|
|
test((const skvm::Program&) program);
|
|
|
|
program.dropJIT();
|
|
|
|
}
|
|
|
|
test_interpreter_only(r, std::move(program), std::move(test));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-05-29 17:57:54 +00:00
|
|
|
DEF_TEST(SkVM, r) {
|
2019-06-03 21:27:46 +00:00
|
|
|
SkDynamicMemoryWStream buf;
|
2019-06-03 22:10:59 +00:00
|
|
|
|
|
|
|
// Write all combinations of SrcoverBuilder_F32
|
2019-05-29 17:57:54 +00:00
|
|
|
for (int s = 0; s < 3; s++)
|
|
|
|
for (int d = 0; d < 3; d++) {
|
|
|
|
auto srcFmt = (Fmt)s,
|
|
|
|
dstFmt = (Fmt)d;
|
2019-06-10 17:05:48 +00:00
|
|
|
SrcoverBuilder_F32 builder{srcFmt, dstFmt};
|
2019-05-29 17:57:54 +00:00
|
|
|
|
2019-06-03 21:27:46 +00:00
|
|
|
buf.writeText(fmt_name(srcFmt));
|
|
|
|
buf.writeText(" over ");
|
|
|
|
buf.writeText(fmt_name(dstFmt));
|
|
|
|
buf.writeText("\n");
|
2019-07-18 15:36:45 +00:00
|
|
|
dump(builder, &buf);
|
2019-06-03 21:27:46 +00:00
|
|
|
}
|
|
|
|
|
2019-06-03 22:10:59 +00:00
|
|
|
// Write the I32 Srcovers also.
|
2019-06-20 16:37:10 +00:00
|
|
|
{
|
2019-07-02 20:39:23 +00:00
|
|
|
SrcoverBuilder_I32_Naive builder;
|
2019-06-20 16:37:10 +00:00
|
|
|
buf.writeText("I32 (Naive) 8888 over 8888\n");
|
2019-07-18 15:36:45 +00:00
|
|
|
dump(builder, &buf);
|
2019-06-20 16:37:10 +00:00
|
|
|
}
|
2019-06-03 22:10:59 +00:00
|
|
|
{
|
2019-07-02 20:39:23 +00:00
|
|
|
SrcoverBuilder_I32 builder;
|
2019-06-03 22:10:59 +00:00
|
|
|
buf.writeText("I32 8888 over 8888\n");
|
2019-07-18 15:36:45 +00:00
|
|
|
dump(builder, &buf);
|
2019-06-03 22:10:59 +00:00
|
|
|
}
|
|
|
|
{
|
2019-07-02 20:39:23 +00:00
|
|
|
SrcoverBuilder_I32_SWAR builder;
|
2019-06-03 22:10:59 +00:00
|
|
|
buf.writeText("I32 (SWAR) 8888 over 8888\n");
|
2019-07-18 15:36:45 +00:00
|
|
|
dump(builder, &buf);
|
2019-06-03 22:10:59 +00:00
|
|
|
}
|
|
|
|
|
2019-08-08 19:13:25 +00:00
|
|
|
{
|
|
|
|
skvm::Builder b;
|
|
|
|
skvm::Arg arg = b.varying<int>();
|
|
|
|
|
|
|
|
// x and y can both be hoisted,
|
2019-10-16 15:46:01 +00:00
|
|
|
// and x can die at y, while y must live for the loop.
|
2019-08-08 19:13:25 +00:00
|
|
|
skvm::I32 x = b.splat(1),
|
|
|
|
y = b.add(x, b.splat(2));
|
|
|
|
b.store32(arg, b.mul(b.load32(arg), y));
|
|
|
|
|
|
|
|
skvm::Program program = b.done();
|
|
|
|
REPORTER_ASSERT(r, program.nregs() == 2);
|
|
|
|
|
|
|
|
std::vector<skvm::Builder::Instruction> insts = b.program();
|
|
|
|
REPORTER_ASSERT(r, insts.size() == 6);
|
2019-10-16 15:46:01 +00:00
|
|
|
REPORTER_ASSERT(r, insts[0].can_hoist && insts[0].death == 2 && !insts[0].used_in_loop);
|
|
|
|
REPORTER_ASSERT(r, insts[1].can_hoist && insts[1].death == 2 && !insts[1].used_in_loop);
|
|
|
|
REPORTER_ASSERT(r, insts[2].can_hoist && insts[2].death == 4 && insts[2].used_in_loop);
|
|
|
|
REPORTER_ASSERT(r, !insts[3].can_hoist);
|
|
|
|
REPORTER_ASSERT(r, !insts[4].can_hoist);
|
|
|
|
REPORTER_ASSERT(r, !insts[5].can_hoist);
|
2019-08-08 19:13:25 +00:00
|
|
|
|
|
|
|
dump(b, &buf);
|
|
|
|
|
2019-10-16 15:11:56 +00:00
|
|
|
test_jit_and_interpreter(r, std::move(program), [&](const skvm::Program& program) {
|
2019-08-08 19:13:25 +00:00
|
|
|
int arg[] = {0,1,2,3,4,5,6,7,8,9};
|
|
|
|
|
|
|
|
program.eval(SK_ARRAY_COUNT(arg), arg);
|
|
|
|
|
|
|
|
for (int i = 0; i < (int)SK_ARRAY_COUNT(arg); i++) {
|
|
|
|
REPORTER_ASSERT(r, arg[i] == i*3);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
reorder to minimize register pressure
Rewrite program instructions so that each value becomes available as
late as possible, just before it's used by another instruction. This
reorders blocks of instructions to reduce them number of temporary
registers in flight.
Take this example of the sort of program that we naturally write,
noting the registers needed as we progress down the right:
src = load32 ... (1)
sr = extract src ... (2)
sg = extract src ... (3)
sb = extract src ... (4)
sa = extract src ... (4, src dies)
dst = load32 ... (5)
dr = extract dst ... (6)
dg = extract dst ... (7)
db = extract dst ... (8)
da = extract dst ... (8, dst dies)
r = add sr dr (7, sr and dr die)
g = add sg dg (6, sg and dg die)
b = add sb db (5, sb and db die)
a = add sa da (4, sa and da die)
rg = pack r g ... (3, r and g die)
ba = pack b a ... (2, b and a die)
rgba = pack rg ba ... (1, rg and ba die)
store32 rgba ... (0, rgba dies)
That original ordering of the code needs 8 registers (perhaps with a
temporary 9th, but we'll ignore that here). This CL will rewrite the
program to something more like this by recursively issuing inputs only
once needed:
src = load32 ... (1)
sr = extract src ... (2)
dst = load32 ... (3)
dr = extract dst ... (4)
r = add sr dr (3, sr and dr die)
sg = extract src ... (4)
dg = extract dst ... (5)
g = add sg dg (4, sg and dg die)
rg = pack r g (3, r and g die)
sb = extract src ... (4)
db = extract dst ... (5)
b = add sb db (4, sb and db die)
sa = extract src ... (4, src dies)
da = extract dst ... (4, dst dies)
a = add sa da (3, sa and da die)
ba = pack b a (2, b and a die)
rgba = pack rg ba ... (1, rg and ba die)
store32 rgba ... (0)
That trims 3 registers off the example, just by reordering!
I've added the real version of this example to SkVMTest.cpp.
(Its 6th register comes from holding the 0xff byte mask used
by extract, in case you're curious).
I'll admit it's not exactly easy to work out how this reordering works
without a pen and paper or trial and error. I've tried to make the
implementation preserve the original program's order as much as makes
sense (i.e. when order is an otherwise arbitrary choice) to keep it
somewhat sane to follow.
This reordering naturally skips dead code, so pour one out for ☠️ .
We lose our cute dead code emoji marker, but on the other hand all code
downstream of Builder::done() can assume every instruction is live.
Change-Id: Iceffcd10fd7465eae51a39ef8eec7a7189766ba2
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/249999
Commit-Queue: Mike Klein <mtklein@google.com>
Reviewed-by: Herb Derby <herb@google.com>
2019-10-22 17:27:58 +00:00
|
|
|
{
|
|
|
|
// Demonstrate the value of program reordering.
|
|
|
|
skvm::Builder b;
|
|
|
|
skvm::Arg sp = b.varying<int>(),
|
|
|
|
dp = b.varying<int>();
|
|
|
|
|
|
|
|
skvm::I32 byte = b.splat(0xff);
|
|
|
|
|
|
|
|
skvm::I32 src = b.load32(sp),
|
|
|
|
sr = b.extract(src, 0, byte),
|
|
|
|
sg = b.extract(src, 8, byte),
|
|
|
|
sb = b.extract(src, 16, byte),
|
|
|
|
sa = b.extract(src, 24, byte);
|
|
|
|
|
|
|
|
skvm::I32 dst = b.load32(dp),
|
|
|
|
dr = b.extract(dst, 0, byte),
|
|
|
|
dg = b.extract(dst, 8, byte),
|
|
|
|
db = b.extract(dst, 16, byte),
|
|
|
|
da = b.extract(dst, 24, byte);
|
|
|
|
|
|
|
|
skvm::I32 R = b.add(sr, dr),
|
|
|
|
G = b.add(sg, dg),
|
|
|
|
B = b.add(sb, db),
|
|
|
|
A = b.add(sa, da);
|
|
|
|
|
|
|
|
skvm::I32 rg = b.pack(R, G, 8),
|
|
|
|
ba = b.pack(B, A, 8),
|
|
|
|
rgba = b.pack(rg, ba, 16);
|
|
|
|
|
|
|
|
b.store32(dp, rgba);
|
|
|
|
|
|
|
|
dump(b, &buf);
|
|
|
|
}
|
|
|
|
|
2019-11-13 18:14:00 +00:00
|
|
|
#if defined(SK_CPU_X86)
|
2019-06-03 21:27:46 +00:00
|
|
|
sk_sp<SkData> blob = buf.detachAsData();
|
|
|
|
{
|
|
|
|
|
|
|
|
sk_sp<SkData> expected = GetResourceAsData("SkVMTest.expected");
|
2019-06-04 18:35:32 +00:00
|
|
|
REPORTER_ASSERT(r, expected, "Couldn't load SkVMTest.expected.");
|
|
|
|
if (expected) {
|
|
|
|
if (blob->size() != expected->size()
|
|
|
|
|| 0 != memcmp(blob->data(), expected->data(), blob->size())) {
|
2019-05-29 17:57:54 +00:00
|
|
|
|
2019-06-04 18:35:32 +00:00
|
|
|
ERRORF(r, "SkVMTest expected\n%.*s\nbut got\n%.*s\n",
|
|
|
|
expected->size(), expected->data(),
|
|
|
|
blob->size(), blob->data());
|
|
|
|
}
|
|
|
|
|
|
|
|
SkFILEWStream out(GetResourcePath("SkVMTest.expected").c_str());
|
|
|
|
if (out.isValid()) {
|
|
|
|
out.write(blob->data(), blob->size());
|
|
|
|
}
|
2019-05-29 17:57:54 +00:00
|
|
|
}
|
|
|
|
}
|
2019-11-13 18:14:00 +00:00
|
|
|
#endif
|
2019-05-29 17:57:54 +00:00
|
|
|
|
2019-07-15 17:22:36 +00:00
|
|
|
auto test_8888 = [&](skvm::Program&& program) {
|
2019-06-12 17:54:52 +00:00
|
|
|
uint32_t src[9];
|
|
|
|
uint32_t dst[SK_ARRAY_COUNT(src)];
|
2019-06-03 22:10:59 +00:00
|
|
|
|
2020-01-08 21:49:47 +00:00
|
|
|
test_jit_and_interpreter(r, std::move(program), [&](const skvm::Program& program) {
|
2019-07-15 17:22:36 +00:00
|
|
|
for (int i = 0; i < (int)SK_ARRAY_COUNT(src); i++) {
|
|
|
|
src[i] = 0xbb007733;
|
|
|
|
dst[i] = 0xffaaccee;
|
|
|
|
}
|
2019-06-03 22:10:59 +00:00
|
|
|
|
2019-07-15 17:22:36 +00:00
|
|
|
SkPMColor expected = SkPMSrcOver(src[0], dst[0]); // 0xff2dad73
|
2019-06-03 22:10:59 +00:00
|
|
|
|
2019-07-15 17:22:36 +00:00
|
|
|
program.eval((int)SK_ARRAY_COUNT(src), src, dst);
|
2019-06-03 22:10:59 +00:00
|
|
|
|
2019-07-15 17:22:36 +00:00
|
|
|
// dst is probably 0xff2dad72.
|
|
|
|
for (auto got : dst) {
|
|
|
|
auto want = expected;
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
uint8_t d = got & 0xff,
|
|
|
|
w = want & 0xff;
|
2019-07-18 15:17:28 +00:00
|
|
|
if (abs(d-w) >= 2) {
|
|
|
|
SkDebugf("d %02x, w %02x\n", d,w);
|
|
|
|
}
|
2019-07-15 17:22:36 +00:00
|
|
|
REPORTER_ASSERT(r, abs(d-w) < 2);
|
|
|
|
got >>= 8;
|
|
|
|
want >>= 8;
|
|
|
|
}
|
2019-06-12 17:54:52 +00:00
|
|
|
}
|
2019-07-15 17:22:36 +00:00
|
|
|
});
|
2019-06-12 17:54:52 +00:00
|
|
|
};
|
2019-06-03 22:10:59 +00:00
|
|
|
|
2019-07-18 15:17:28 +00:00
|
|
|
test_8888(SrcoverBuilder_F32{Fmt::RGBA_8888, Fmt::RGBA_8888}.done("srcover_f32"));
|
|
|
|
test_8888(SrcoverBuilder_I32_Naive{}.done("srcover_i32_naive"));
|
|
|
|
test_8888(SrcoverBuilder_I32{}.done("srcover_i32"));
|
|
|
|
test_8888(SrcoverBuilder_I32_SWAR{}.done("srcover_i32_SWAR"));
|
2019-05-29 17:57:54 +00:00
|
|
|
|
2020-01-08 21:49:47 +00:00
|
|
|
test_jit_and_interpreter(r, SrcoverBuilder_F32{Fmt::RGBA_8888, Fmt::G8}.done(),
|
2019-07-15 17:22:36 +00:00
|
|
|
[&](const skvm::Program& program) {
|
2019-06-12 17:54:52 +00:00
|
|
|
uint32_t src[9];
|
|
|
|
uint8_t dst[SK_ARRAY_COUNT(src)];
|
2019-05-29 17:57:54 +00:00
|
|
|
|
2019-06-12 17:54:52 +00:00
|
|
|
for (int i = 0; i < (int)SK_ARRAY_COUNT(src); i++) {
|
|
|
|
src[i] = 0xbb007733;
|
|
|
|
dst[i] = 0x42;
|
2019-05-29 17:57:54 +00:00
|
|
|
}
|
|
|
|
|
2019-06-12 17:54:52 +00:00
|
|
|
SkPMColor over = SkPMSrcOver(SkPackARGB32(0xbb, 0x33, 0x77, 0x00),
|
|
|
|
0xff424242);
|
2019-05-29 17:57:54 +00:00
|
|
|
|
|
|
|
uint8_t want = SkComputeLuminance(SkGetPackedR32(over),
|
|
|
|
SkGetPackedG32(over),
|
|
|
|
SkGetPackedB32(over));
|
2019-06-12 17:54:52 +00:00
|
|
|
program.eval((int)SK_ARRAY_COUNT(src), src, dst);
|
2019-05-29 17:57:54 +00:00
|
|
|
|
2019-06-12 17:54:52 +00:00
|
|
|
for (auto got : dst) {
|
|
|
|
REPORTER_ASSERT(r, abs(got-want) < 3);
|
|
|
|
}
|
2019-07-15 17:22:36 +00:00
|
|
|
});
|
2019-05-29 17:57:54 +00:00
|
|
|
|
2019-10-16 15:11:56 +00:00
|
|
|
test_jit_and_interpreter(r, SrcoverBuilder_F32{Fmt::A8, Fmt::A8}.done(),
|
2019-07-15 17:22:36 +00:00
|
|
|
[&](const skvm::Program& program) {
|
2019-05-29 17:57:54 +00:00
|
|
|
uint8_t src[256],
|
|
|
|
dst[256];
|
|
|
|
for (int i = 0; i < 256; i++) {
|
|
|
|
src[i] = 255 - i;
|
|
|
|
dst[i] = i;
|
|
|
|
}
|
|
|
|
|
|
|
|
program.eval(256, src, dst);
|
|
|
|
|
|
|
|
for (int i = 0; i < 256; i++) {
|
|
|
|
uint8_t want = SkGetPackedA32(SkPMSrcOver(SkPackARGB32(src[i], 0,0,0),
|
|
|
|
SkPackARGB32( i, 0,0,0)));
|
|
|
|
REPORTER_ASSERT(r, abs(dst[i]-want) < 2);
|
|
|
|
}
|
2019-07-15 17:22:36 +00:00
|
|
|
});
|
2019-05-29 17:57:54 +00:00
|
|
|
}
|
2019-06-12 16:36:28 +00:00
|
|
|
|
2019-07-30 17:30:13 +00:00
|
|
|
DEF_TEST(SkVM_Pointless, r) {
|
|
|
|
// Let's build a program with no memory arguments.
|
|
|
|
// It should all be pegged as dead code, but we should be able to "run" it.
|
|
|
|
skvm::Builder b;
|
|
|
|
{
|
|
|
|
b.add(b.splat(5.0f),
|
|
|
|
b.splat(4.0f));
|
|
|
|
}
|
|
|
|
|
2019-10-16 15:11:56 +00:00
|
|
|
test_jit_and_interpreter(r, b.done(), [&](const skvm::Program& program) {
|
2019-07-30 17:30:13 +00:00
|
|
|
for (int N = 0; N < 64; N++) {
|
|
|
|
program.eval(N);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
for (const skvm::Builder::Instruction& inst : b.program()) {
|
2019-10-16 15:46:01 +00:00
|
|
|
REPORTER_ASSERT(r, inst.death == 0 && inst.can_hoist == true);
|
2019-07-30 17:30:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-12 16:36:28 +00:00
|
|
|
DEF_TEST(SkVM_LoopCounts, r) {
|
|
|
|
// Make sure we cover all the exact N we want.
|
|
|
|
|
2019-07-15 17:22:36 +00:00
|
|
|
// buf[i] += 1
|
|
|
|
skvm::Builder b;
|
2019-07-30 14:44:30 +00:00
|
|
|
skvm::Arg arg = b.varying<int>();
|
2019-07-15 17:22:36 +00:00
|
|
|
b.store32(arg,
|
|
|
|
b.add(b.splat(1),
|
|
|
|
b.load32(arg)));
|
|
|
|
|
2019-10-16 15:11:56 +00:00
|
|
|
test_jit_and_interpreter(r, b.done(), [&](const skvm::Program& program) {
|
2019-07-19 16:13:42 +00:00
|
|
|
int buf[64];
|
|
|
|
for (int N = 0; N <= (int)SK_ARRAY_COUNT(buf); N++) {
|
2019-07-15 17:22:36 +00:00
|
|
|
for (int i = 0; i < (int)SK_ARRAY_COUNT(buf); i++) {
|
|
|
|
buf[i] = i;
|
|
|
|
}
|
|
|
|
program.eval(N, buf);
|
2019-06-12 16:36:28 +00:00
|
|
|
|
2019-07-15 17:22:36 +00:00
|
|
|
for (int i = 0; i < N; i++) {
|
|
|
|
REPORTER_ASSERT(r, buf[i] == i+1);
|
|
|
|
}
|
|
|
|
for (int i = N; i < (int)SK_ARRAY_COUNT(buf); i++) {
|
|
|
|
REPORTER_ASSERT(r, buf[i] == i);
|
|
|
|
}
|
2019-07-19 16:13:42 +00:00
|
|
|
}
|
|
|
|
});
|
2019-06-12 16:36:28 +00:00
|
|
|
}
|
2019-06-18 17:16:06 +00:00
|
|
|
|
2019-07-30 16:11:09 +00:00
|
|
|
DEF_TEST(SkVM_gathers, r) {
|
|
|
|
skvm::Builder b;
|
|
|
|
{
|
2020-01-06 17:50:37 +00:00
|
|
|
skvm::Arg uniforms = b.uniform(),
|
|
|
|
buf32 = b.varying<int>(),
|
|
|
|
buf16 = b.varying<uint16_t>(),
|
|
|
|
buf8 = b.varying<uint8_t>();
|
2019-07-30 16:11:09 +00:00
|
|
|
|
|
|
|
skvm::I32 x = b.load32(buf32);
|
|
|
|
|
2020-01-06 17:50:37 +00:00
|
|
|
b.store32(buf32, b.gather32(uniforms,0, b.bit_and(x, b.splat( 7))));
|
|
|
|
b.store16(buf16, b.gather16(uniforms,0, b.bit_and(x, b.splat(15))));
|
|
|
|
b.store8 (buf8 , b.gather8 (uniforms,0, b.bit_and(x, b.splat(31))));
|
2019-07-30 16:11:09 +00:00
|
|
|
}
|
|
|
|
|
2019-10-16 15:11:56 +00:00
|
|
|
test_interpreter_only(r, b.done(), [&](const skvm::Program& program) {
|
2019-07-30 16:11:09 +00:00
|
|
|
const int img[] = {12,34,56,78, 90,98,76,54};
|
|
|
|
|
|
|
|
constexpr int N = 20;
|
|
|
|
int buf32[N];
|
|
|
|
uint16_t buf16[N];
|
|
|
|
uint8_t buf8 [N];
|
|
|
|
|
|
|
|
for (int i = 0; i < 20; i++) {
|
|
|
|
buf32[i] = i;
|
|
|
|
}
|
|
|
|
|
2020-01-06 17:50:37 +00:00
|
|
|
struct Uniforms {
|
|
|
|
const int* img;
|
|
|
|
} uniforms{img};
|
|
|
|
|
|
|
|
program.eval(N, &uniforms, buf32, buf16, buf8);
|
2019-07-30 16:11:09 +00:00
|
|
|
int i = 0;
|
|
|
|
REPORTER_ASSERT(r, buf32[i] == 12 && buf16[i] == 12 && buf8[i] == 12); i++;
|
|
|
|
REPORTER_ASSERT(r, buf32[i] == 34 && buf16[i] == 0 && buf8[i] == 0); i++;
|
|
|
|
REPORTER_ASSERT(r, buf32[i] == 56 && buf16[i] == 34 && buf8[i] == 0); i++;
|
|
|
|
REPORTER_ASSERT(r, buf32[i] == 78 && buf16[i] == 0 && buf8[i] == 0); i++;
|
|
|
|
REPORTER_ASSERT(r, buf32[i] == 90 && buf16[i] == 56 && buf8[i] == 34); i++;
|
|
|
|
REPORTER_ASSERT(r, buf32[i] == 98 && buf16[i] == 0 && buf8[i] == 0); i++;
|
|
|
|
REPORTER_ASSERT(r, buf32[i] == 76 && buf16[i] == 78 && buf8[i] == 0); i++;
|
|
|
|
REPORTER_ASSERT(r, buf32[i] == 54 && buf16[i] == 0 && buf8[i] == 0); i++;
|
|
|
|
|
|
|
|
REPORTER_ASSERT(r, buf32[i] == 12 && buf16[i] == 90 && buf8[i] == 56); i++;
|
|
|
|
REPORTER_ASSERT(r, buf32[i] == 34 && buf16[i] == 0 && buf8[i] == 0); i++;
|
|
|
|
REPORTER_ASSERT(r, buf32[i] == 56 && buf16[i] == 98 && buf8[i] == 0); i++;
|
|
|
|
REPORTER_ASSERT(r, buf32[i] == 78 && buf16[i] == 0 && buf8[i] == 0); i++;
|
|
|
|
REPORTER_ASSERT(r, buf32[i] == 90 && buf16[i] == 76 && buf8[i] == 78); i++;
|
|
|
|
REPORTER_ASSERT(r, buf32[i] == 98 && buf16[i] == 0 && buf8[i] == 0); i++;
|
|
|
|
REPORTER_ASSERT(r, buf32[i] == 76 && buf16[i] == 54 && buf8[i] == 0); i++;
|
|
|
|
REPORTER_ASSERT(r, buf32[i] == 54 && buf16[i] == 0 && buf8[i] == 0); i++;
|
|
|
|
|
|
|
|
REPORTER_ASSERT(r, buf32[i] == 12 && buf16[i] == 12 && buf8[i] == 90); i++;
|
|
|
|
REPORTER_ASSERT(r, buf32[i] == 34 && buf16[i] == 0 && buf8[i] == 0); i++;
|
|
|
|
REPORTER_ASSERT(r, buf32[i] == 56 && buf16[i] == 34 && buf8[i] == 0); i++;
|
|
|
|
REPORTER_ASSERT(r, buf32[i] == 78 && buf16[i] == 0 && buf8[i] == 0); i++;
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
DEF_TEST(SkVM_bitops, r) {
|
|
|
|
skvm::Builder b;
|
|
|
|
{
|
|
|
|
skvm::Arg ptr = b.varying<int>();
|
|
|
|
|
|
|
|
skvm::I32 x = b.load32(ptr);
|
|
|
|
|
|
|
|
x = b.bit_and (x, b.splat(0xf1)); // 0x40
|
|
|
|
x = b.bit_or (x, b.splat(0x80)); // 0xc0
|
|
|
|
x = b.bit_xor (x, b.splat(0xfe)); // 0x3e
|
|
|
|
x = b.bit_clear(x, b.splat(0x30)); // 0x0e
|
|
|
|
|
|
|
|
x = b.shl(x, 28); // 0xe000'0000
|
|
|
|
x = b.sra(x, 28); // 0xffff'fffe
|
|
|
|
x = b.shr(x, 1); // 0x7fff'ffff
|
|
|
|
|
|
|
|
b.store32(ptr, x);
|
|
|
|
}
|
|
|
|
|
2020-01-08 21:49:47 +00:00
|
|
|
test_jit_and_interpreter(r, b.done(), [&](const skvm::Program& program) {
|
2019-07-30 16:11:09 +00:00
|
|
|
int x = 0x42;
|
|
|
|
program.eval(1, &x);
|
|
|
|
REPORTER_ASSERT(r, x == 0x7fff'ffff);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
DEF_TEST(SkVM_f32, r) {
|
|
|
|
skvm::Builder b;
|
|
|
|
{
|
|
|
|
skvm::Arg arg = b.varying<float>();
|
|
|
|
|
|
|
|
skvm::F32 x = b.bit_cast(b.load32(arg)),
|
|
|
|
y = b.add(x,x), // y = 2x
|
|
|
|
z = b.sub(y,x), // z = 2x-x = x
|
|
|
|
w = b.div(z,x); // w = x/x = 1
|
|
|
|
b.store32(arg, b.bit_cast(w));
|
|
|
|
}
|
|
|
|
|
2019-10-16 15:11:56 +00:00
|
|
|
test_jit_and_interpreter(r, b.done(), [&](const skvm::Program& program) {
|
2019-07-30 16:11:09 +00:00
|
|
|
float buf[] = { 1,2,3,4,5,6,7,8,9 };
|
|
|
|
program.eval(SK_ARRAY_COUNT(buf), buf);
|
|
|
|
for (float v : buf) {
|
|
|
|
REPORTER_ASSERT(r, v == 1.0f);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
DEF_TEST(SkVM_cmp_i32, r) {
|
|
|
|
skvm::Builder b;
|
|
|
|
{
|
|
|
|
skvm::I32 x = b.load32(b.varying<int>());
|
|
|
|
|
|
|
|
auto to_bit = [&](int shift, skvm::I32 mask) {
|
|
|
|
return b.shl(b.bit_and(mask, b.splat(0x1)), shift);
|
|
|
|
};
|
|
|
|
|
|
|
|
skvm::I32 m = b.splat(0);
|
|
|
|
m = b.bit_or(m, to_bit(0, b. eq(x, b.splat(0))));
|
|
|
|
m = b.bit_or(m, to_bit(1, b.neq(x, b.splat(1))));
|
|
|
|
m = b.bit_or(m, to_bit(2, b. lt(x, b.splat(2))));
|
|
|
|
m = b.bit_or(m, to_bit(3, b.lte(x, b.splat(3))));
|
|
|
|
m = b.bit_or(m, to_bit(4, b. gt(x, b.splat(4))));
|
|
|
|
m = b.bit_or(m, to_bit(5, b.gte(x, b.splat(5))));
|
|
|
|
|
|
|
|
b.store32(b.varying<int>(), m);
|
|
|
|
}
|
|
|
|
|
2019-10-16 15:11:56 +00:00
|
|
|
test_interpreter_only(r, b.done(), [&](const skvm::Program& program) {
|
2019-07-30 16:11:09 +00:00
|
|
|
int in[] = { 0,1,2,3,4,5,6,7,8,9 };
|
|
|
|
int out[SK_ARRAY_COUNT(in)];
|
|
|
|
|
|
|
|
program.eval(SK_ARRAY_COUNT(in), in, out);
|
|
|
|
|
|
|
|
REPORTER_ASSERT(r, out[0] == 0b001111);
|
|
|
|
REPORTER_ASSERT(r, out[1] == 0b001100);
|
|
|
|
REPORTER_ASSERT(r, out[2] == 0b001010);
|
|
|
|
REPORTER_ASSERT(r, out[3] == 0b001010);
|
|
|
|
REPORTER_ASSERT(r, out[4] == 0b000010);
|
|
|
|
for (int i = 5; i < (int)SK_ARRAY_COUNT(out); i++) {
|
|
|
|
REPORTER_ASSERT(r, out[i] == 0b110010);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
DEF_TEST(SkVM_cmp_f32, r) {
|
|
|
|
skvm::Builder b;
|
|
|
|
{
|
|
|
|
skvm::F32 x = b.bit_cast(b.load32(b.varying<float>()));
|
|
|
|
|
|
|
|
auto to_bit = [&](int shift, skvm::I32 mask) {
|
|
|
|
return b.shl(b.bit_and(mask, b.splat(0x1)), shift);
|
|
|
|
};
|
|
|
|
|
|
|
|
skvm::I32 m = b.splat(0);
|
|
|
|
m = b.bit_or(m, to_bit(0, b. eq(x, b.splat(0.0f))));
|
|
|
|
m = b.bit_or(m, to_bit(1, b.neq(x, b.splat(1.0f))));
|
|
|
|
m = b.bit_or(m, to_bit(2, b. lt(x, b.splat(2.0f))));
|
|
|
|
m = b.bit_or(m, to_bit(3, b.lte(x, b.splat(3.0f))));
|
|
|
|
m = b.bit_or(m, to_bit(4, b. gt(x, b.splat(4.0f))));
|
|
|
|
m = b.bit_or(m, to_bit(5, b.gte(x, b.splat(5.0f))));
|
|
|
|
|
|
|
|
b.store32(b.varying<int>(), m);
|
|
|
|
}
|
|
|
|
|
2020-01-08 21:49:47 +00:00
|
|
|
test_jit_and_interpreter(r, b.done(), [&](const skvm::Program& program) {
|
2019-07-30 16:11:09 +00:00
|
|
|
float in[] = { 0,1,2,3,4,5,6,7,8,9 };
|
|
|
|
int out[SK_ARRAY_COUNT(in)];
|
|
|
|
|
|
|
|
program.eval(SK_ARRAY_COUNT(in), in, out);
|
|
|
|
|
|
|
|
REPORTER_ASSERT(r, out[0] == 0b001111);
|
|
|
|
REPORTER_ASSERT(r, out[1] == 0b001100);
|
|
|
|
REPORTER_ASSERT(r, out[2] == 0b001010);
|
|
|
|
REPORTER_ASSERT(r, out[3] == 0b001010);
|
|
|
|
REPORTER_ASSERT(r, out[4] == 0b000010);
|
|
|
|
for (int i = 5; i < (int)SK_ARRAY_COUNT(out); i++) {
|
|
|
|
REPORTER_ASSERT(r, out[i] == 0b110010);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
DEF_TEST(SkVM_i16x2, r) {
|
|
|
|
skvm::Builder b;
|
|
|
|
{
|
|
|
|
skvm::Arg buf = b.varying<int>();
|
|
|
|
|
|
|
|
skvm::I32 x = b.load32(buf),
|
|
|
|
y = b.add_16x2(x,x), // y = 2x
|
|
|
|
z = b.mul_16x2(x,y), // z = 2x^2
|
|
|
|
w = b.sub_16x2(z,x), // w = x(2x-1)
|
|
|
|
v = b.shl_16x2(w,7), // These shifts will be a no-op
|
|
|
|
u = b.sra_16x2(v,7); // for all but x=12 and x=13.
|
|
|
|
b.store32(buf, u);
|
|
|
|
}
|
|
|
|
|
2019-10-16 15:11:56 +00:00
|
|
|
test_interpreter_only(r, b.done(), [&](const skvm::Program& program) {
|
2019-07-30 16:11:09 +00:00
|
|
|
uint16_t buf[] = { 0,1,2,3,4,5,6,7,8,9,10,11,12,13 };
|
|
|
|
|
|
|
|
program.eval(SK_ARRAY_COUNT(buf)/2, buf);
|
|
|
|
for (int i = 0; i < 12; i++) {
|
|
|
|
REPORTER_ASSERT(r, buf[i] == i*(2*i-1));
|
|
|
|
}
|
|
|
|
REPORTER_ASSERT(r, buf[12] == 0xff14); // 12*23 = 0x114
|
|
|
|
REPORTER_ASSERT(r, buf[13] == 0xff45); // 13*25 = 0x145
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
DEF_TEST(SkVM_cmp_i16, r) {
|
|
|
|
skvm::Builder b;
|
|
|
|
{
|
|
|
|
skvm::Arg buf = b.varying<int>();
|
|
|
|
skvm::I32 x = b.load32(buf);
|
|
|
|
|
|
|
|
auto to_bit = [&](int shift, skvm::I32 mask) {
|
|
|
|
return b.shl_16x2(b.bit_and(mask, b.splat(0x0001'0001)), shift);
|
|
|
|
};
|
|
|
|
|
|
|
|
skvm::I32 m = b.splat(0);
|
|
|
|
m = b.bit_or(m, to_bit(0, b. eq_16x2(x, b.splat(0x0000'0000))));
|
|
|
|
m = b.bit_or(m, to_bit(1, b.neq_16x2(x, b.splat(0x0001'0001))));
|
|
|
|
m = b.bit_or(m, to_bit(2, b. lt_16x2(x, b.splat(0x0002'0002))));
|
|
|
|
m = b.bit_or(m, to_bit(3, b.lte_16x2(x, b.splat(0x0003'0003))));
|
|
|
|
m = b.bit_or(m, to_bit(4, b. gt_16x2(x, b.splat(0x0004'0004))));
|
|
|
|
m = b.bit_or(m, to_bit(5, b.gte_16x2(x, b.splat(0x0005'0005))));
|
|
|
|
|
|
|
|
b.store32(buf, m);
|
|
|
|
}
|
|
|
|
|
2019-10-16 15:11:56 +00:00
|
|
|
test_interpreter_only(r, b.done(), [&](const skvm::Program& program) {
|
2019-07-30 16:11:09 +00:00
|
|
|
int16_t buf[] = { 0,1, 2,3, 4,5, 6,7, 8,9 };
|
|
|
|
|
|
|
|
program.eval(SK_ARRAY_COUNT(buf)/2, buf);
|
|
|
|
|
|
|
|
REPORTER_ASSERT(r, buf[0] == 0b001111);
|
|
|
|
REPORTER_ASSERT(r, buf[1] == 0b001100);
|
|
|
|
REPORTER_ASSERT(r, buf[2] == 0b001010);
|
|
|
|
REPORTER_ASSERT(r, buf[3] == 0b001010);
|
|
|
|
REPORTER_ASSERT(r, buf[4] == 0b000010);
|
|
|
|
for (int i = 5; i < (int)SK_ARRAY_COUNT(buf); i++) {
|
|
|
|
REPORTER_ASSERT(r, buf[i] == 0b110010);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
|
always fma in mad_f32()
We can always move data around so that an FMA is possible using no more
registers than we would otherwise, and on x86, evne using no more
instructions.
The basic idea here is that if we can't reuse one of the inputs to
destructively host the FMA instruction, the next best thing is to copy
one of the arguments into tmp() and accumulate the FMA there.
Once the FMA has happened, we just need to copy that result to dst().
We can of course skip that copy if dst() == tmp(). On x86 we never need
that copy; dst() and tmp() are picked using the same logic except that
dst may alias one of its inputs, and we only fall into this case after
we've already found it doesn't. So we can just assert dst() == tmp()
rather than check it like we do on ARM.
It's subtle, but I think sound.
I'm using logical-or to copy registers around. This is a little lazy,
but maybe not as lazy as it looks: on ARM that is _the_ way to copy
registers. There's a vmovdqa instruction I could use on x86, TBD.
All paths through this new code were being exercised on ARM, but we
didn't have anything hitting the tmp case on x86, so I've added a new
unit test that hits the corner cases of both implementations.
Change-Id: I5422414fc50c64d491b4933b4b580b784596f291
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/228630
Reviewed-by: Mike Klein <mtklein@google.com>
Commit-Queue: Mike Klein <mtklein@google.com>
2019-07-19 18:56:41 +00:00
|
|
|
DEF_TEST(SkVM_mad, r) {
|
|
|
|
// This program is designed to exercise the tricky corners of instruction
|
|
|
|
// and register selection for Op::mad_f32.
|
|
|
|
|
|
|
|
skvm::Builder b;
|
|
|
|
{
|
2019-07-30 14:44:30 +00:00
|
|
|
skvm::Arg arg = b.varying<int>();
|
always fma in mad_f32()
We can always move data around so that an FMA is possible using no more
registers than we would otherwise, and on x86, evne using no more
instructions.
The basic idea here is that if we can't reuse one of the inputs to
destructively host the FMA instruction, the next best thing is to copy
one of the arguments into tmp() and accumulate the FMA there.
Once the FMA has happened, we just need to copy that result to dst().
We can of course skip that copy if dst() == tmp(). On x86 we never need
that copy; dst() and tmp() are picked using the same logic except that
dst may alias one of its inputs, and we only fall into this case after
we've already found it doesn't. So we can just assert dst() == tmp()
rather than check it like we do on ARM.
It's subtle, but I think sound.
I'm using logical-or to copy registers around. This is a little lazy,
but maybe not as lazy as it looks: on ARM that is _the_ way to copy
registers. There's a vmovdqa instruction I could use on x86, TBD.
All paths through this new code were being exercised on ARM, but we
didn't have anything hitting the tmp case on x86, so I've added a new
unit test that hits the corner cases of both implementations.
Change-Id: I5422414fc50c64d491b4933b4b580b784596f291
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/228630
Reviewed-by: Mike Klein <mtklein@google.com>
Commit-Queue: Mike Klein <mtklein@google.com>
2019-07-19 18:56:41 +00:00
|
|
|
|
|
|
|
skvm::F32 x = b.to_f32(b.load32(arg)),
|
|
|
|
y = b.mad(x,x,x), // x is needed in the future, so r[x] != r[y].
|
|
|
|
z = b.mad(y,y,x), // y is needed in the future, but r[z] = r[x] is ok.
|
|
|
|
w = b.mad(z,z,y), // w can alias z but not y.
|
|
|
|
v = b.mad(w,y,w); // Got to stop somewhere.
|
2019-11-08 20:13:15 +00:00
|
|
|
b.store32(arg, b.trunc(v));
|
always fma in mad_f32()
We can always move data around so that an FMA is possible using no more
registers than we would otherwise, and on x86, evne using no more
instructions.
The basic idea here is that if we can't reuse one of the inputs to
destructively host the FMA instruction, the next best thing is to copy
one of the arguments into tmp() and accumulate the FMA there.
Once the FMA has happened, we just need to copy that result to dst().
We can of course skip that copy if dst() == tmp(). On x86 we never need
that copy; dst() and tmp() are picked using the same logic except that
dst may alias one of its inputs, and we only fall into this case after
we've already found it doesn't. So we can just assert dst() == tmp()
rather than check it like we do on ARM.
It's subtle, but I think sound.
I'm using logical-or to copy registers around. This is a little lazy,
but maybe not as lazy as it looks: on ARM that is _the_ way to copy
registers. There's a vmovdqa instruction I could use on x86, TBD.
All paths through this new code were being exercised on ARM, but we
didn't have anything hitting the tmp case on x86, so I've added a new
unit test that hits the corner cases of both implementations.
Change-Id: I5422414fc50c64d491b4933b4b580b784596f291
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/228630
Reviewed-by: Mike Klein <mtklein@google.com>
Commit-Queue: Mike Klein <mtklein@google.com>
2019-07-19 18:56:41 +00:00
|
|
|
}
|
|
|
|
|
2019-10-16 15:11:56 +00:00
|
|
|
test_jit_and_interpreter(r, b.done(), [&](const skvm::Program& program) {
|
always fma in mad_f32()
We can always move data around so that an FMA is possible using no more
registers than we would otherwise, and on x86, evne using no more
instructions.
The basic idea here is that if we can't reuse one of the inputs to
destructively host the FMA instruction, the next best thing is to copy
one of the arguments into tmp() and accumulate the FMA there.
Once the FMA has happened, we just need to copy that result to dst().
We can of course skip that copy if dst() == tmp(). On x86 we never need
that copy; dst() and tmp() are picked using the same logic except that
dst may alias one of its inputs, and we only fall into this case after
we've already found it doesn't. So we can just assert dst() == tmp()
rather than check it like we do on ARM.
It's subtle, but I think sound.
I'm using logical-or to copy registers around. This is a little lazy,
but maybe not as lazy as it looks: on ARM that is _the_ way to copy
registers. There's a vmovdqa instruction I could use on x86, TBD.
All paths through this new code were being exercised on ARM, but we
didn't have anything hitting the tmp case on x86, so I've added a new
unit test that hits the corner cases of both implementations.
Change-Id: I5422414fc50c64d491b4933b4b580b784596f291
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/228630
Reviewed-by: Mike Klein <mtklein@google.com>
Commit-Queue: Mike Klein <mtklein@google.com>
2019-07-19 18:56:41 +00:00
|
|
|
int x = 2;
|
|
|
|
program.eval(1, &x);
|
|
|
|
// x = 2
|
|
|
|
// y = 2*2 + 2 = 6
|
|
|
|
// z = 6*6 + 2 = 38
|
|
|
|
// w = 38*38 + 6 = 1450
|
|
|
|
// v = 1450*6 + 1450 = 10150
|
|
|
|
REPORTER_ASSERT(r, x == 10150);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2019-07-30 16:11:09 +00:00
|
|
|
DEF_TEST(SkVM_madder, r) {
|
|
|
|
skvm::Builder b;
|
|
|
|
{
|
|
|
|
skvm::Arg arg = b.varying<float>();
|
|
|
|
|
|
|
|
skvm::F32 x = b.bit_cast(b.load32(arg)),
|
|
|
|
y = b.mad(x,x,x), // x is needed in the future, so r[x] != r[y].
|
|
|
|
z = b.mad(y,x,y), // r[x] can be reused after this instruction, but not r[y].
|
|
|
|
w = b.mad(y,y,z);
|
|
|
|
b.store32(arg, b.bit_cast(w));
|
|
|
|
}
|
|
|
|
|
2019-10-16 15:11:56 +00:00
|
|
|
test_jit_and_interpreter(r, b.done(), [&](const skvm::Program& program) {
|
2019-07-30 16:11:09 +00:00
|
|
|
float x = 2.0f;
|
|
|
|
// y = 2*2 + 2 = 6
|
|
|
|
// z = 6*2 + 6 = 18
|
|
|
|
// w = 6*6 + 18 = 54
|
|
|
|
program.eval(1, &x);
|
|
|
|
REPORTER_ASSERT(r, x == 54.0f);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2020-01-09 13:27:39 +00:00
|
|
|
DEF_TEST(SkVM_floor, r) {
|
|
|
|
skvm::Builder b;
|
|
|
|
{
|
|
|
|
skvm::Arg arg = b.varying<float>();
|
|
|
|
b.store32(arg, b.bit_cast(b.floor(b.bit_cast(b.load32(arg)))));
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(SK_CPU_X86)
|
|
|
|
test_jit_and_interpreter
|
|
|
|
#else
|
|
|
|
test_interpreter_only
|
|
|
|
#endif
|
|
|
|
(r, b.done(), [&](const skvm::Program& program) {
|
|
|
|
float buf[] = { -2.0f, -1.5f, -1.0f, 0.0f, 1.0f, 1.5f, 2.0f };
|
|
|
|
float want[] = { -2.0f, -2.0f, -1.0f, 0.0f, 1.0f, 1.0f, 2.0f };
|
|
|
|
program.eval(SK_ARRAY_COUNT(buf), buf);
|
|
|
|
for (int i = 0; i < (int)SK_ARRAY_COUNT(buf); i++) {
|
|
|
|
REPORTER_ASSERT(r, buf[i] == want[i]);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2019-07-22 19:30:18 +00:00
|
|
|
DEF_TEST(SkVM_hoist, r) {
|
|
|
|
// This program uses enough constants that it will fail to JIT if we hoist them.
|
|
|
|
// The JIT will try again without hoisting, and that'll just need 2 registers.
|
|
|
|
skvm::Builder b;
|
|
|
|
{
|
2019-07-30 14:44:30 +00:00
|
|
|
skvm::Arg arg = b.varying<int>();
|
2019-07-22 19:30:18 +00:00
|
|
|
skvm::I32 x = b.load32(arg);
|
|
|
|
for (int i = 0; i < 32; i++) {
|
|
|
|
x = b.add(x, b.splat(i));
|
|
|
|
}
|
|
|
|
b.store32(arg, x);
|
|
|
|
}
|
|
|
|
|
2019-10-16 15:46:01 +00:00
|
|
|
test_jit_and_interpreter(r, b.done(), [&](const skvm::Program& program) {
|
2019-07-22 19:30:18 +00:00
|
|
|
int x = 4;
|
|
|
|
program.eval(1, &x);
|
|
|
|
// x += 0 + 1 + 2 + 3 + ... + 30 + 31
|
|
|
|
// x += 496
|
|
|
|
REPORTER_ASSERT(r, x == 500);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
select, {eq,lt,gt}_i32 on x86
Add vpblendvb, vpcmpeqd, and vpcmpgtd, to implement select and eq/lt/gt.
I want to think just a touch bit more about neq, lte, and gte.
This is enough to JIT everything SkVMBlitter creates today.
There are 24 possible argument orders to vpblendvb,
so I'm sure I've got them wrong somehow, even with the new test.
Change-Id: I357664b866d8258a2b5438d520f47542ad581c50
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/232060
Reviewed-by: Mike Klein <mtklein@google.com>
Commit-Queue: Mike Klein <mtklein@google.com>
2019-08-02 17:22:39 +00:00
|
|
|
DEF_TEST(SkVM_select, r) {
|
|
|
|
skvm::Builder b;
|
|
|
|
{
|
|
|
|
skvm::Arg buf = b.varying<int>();
|
|
|
|
|
|
|
|
skvm::I32 x = b.load32(buf);
|
|
|
|
|
|
|
|
x = b.select( b.gt(x, b.splat(4)), x, b.splat(42) );
|
|
|
|
|
|
|
|
b.store32(buf, x);
|
|
|
|
}
|
|
|
|
|
2019-10-16 19:11:27 +00:00
|
|
|
test_jit_and_interpreter(r, b.done(), [&](const skvm::Program& program) {
|
select, {eq,lt,gt}_i32 on x86
Add vpblendvb, vpcmpeqd, and vpcmpgtd, to implement select and eq/lt/gt.
I want to think just a touch bit more about neq, lte, and gte.
This is enough to JIT everything SkVMBlitter creates today.
There are 24 possible argument orders to vpblendvb,
so I'm sure I've got them wrong somehow, even with the new test.
Change-Id: I357664b866d8258a2b5438d520f47542ad581c50
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/232060
Reviewed-by: Mike Klein <mtklein@google.com>
Commit-Queue: Mike Klein <mtklein@google.com>
2019-08-02 17:22:39 +00:00
|
|
|
int buf[] = { 0,1,2,3,4,5,6,7,8 };
|
|
|
|
program.eval(SK_ARRAY_COUNT(buf), buf);
|
|
|
|
for (int i = 0; i < (int)SK_ARRAY_COUNT(buf); i++) {
|
|
|
|
REPORTER_ASSERT(r, buf[i] == (i > 4 ? i : 42));
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2019-07-25 19:32:19 +00:00
|
|
|
DEF_TEST(SkVM_NewOps, r) {
|
|
|
|
// Exercise a somewhat arbitrary set of new ops.
|
|
|
|
skvm::Builder b;
|
|
|
|
{
|
2019-07-30 14:44:30 +00:00
|
|
|
skvm::Arg buf = b.varying<int16_t>(),
|
2019-07-25 19:32:19 +00:00
|
|
|
uniforms = b.uniform();
|
|
|
|
|
|
|
|
skvm::I32 x = b.load16(buf);
|
|
|
|
|
2020-01-06 17:50:37 +00:00
|
|
|
const size_t kPtr = sizeof(const int*);
|
|
|
|
|
|
|
|
x = b.add(x, b.uniform32(uniforms, kPtr+0));
|
|
|
|
x = b.mul(x, b.uniform8 (uniforms, kPtr+4));
|
|
|
|
x = b.sub(x, b.uniform16(uniforms, kPtr+6));
|
2019-07-25 19:32:19 +00:00
|
|
|
|
2020-01-06 17:50:37 +00:00
|
|
|
skvm::I32 limit = b.uniform32(uniforms, kPtr+8);
|
2019-07-25 19:32:19 +00:00
|
|
|
x = b.select(b.lt(x, b.splat(0)), b.splat(0), x);
|
|
|
|
x = b.select(b.gt(x, limit ), limit , x);
|
|
|
|
|
2020-01-06 17:50:37 +00:00
|
|
|
x = b.gather8(uniforms,0, x);
|
2019-07-25 19:32:19 +00:00
|
|
|
|
|
|
|
b.store16(buf, x);
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((false)) {
|
|
|
|
SkDynamicMemoryWStream buf;
|
|
|
|
dump(b, &buf);
|
|
|
|
sk_sp<SkData> blob = buf.detachAsData();
|
|
|
|
SkDebugf("%.*s\n", blob->size(), blob->data());
|
|
|
|
}
|
|
|
|
|
2019-10-16 15:11:56 +00:00
|
|
|
test_interpreter_only(r, b.done(), [&](const skvm::Program& program) {
|
2019-07-25 19:32:19 +00:00
|
|
|
const int N = 31;
|
|
|
|
int16_t buf[N];
|
|
|
|
for (int i = 0; i < N; i++) {
|
|
|
|
buf[i] = i;
|
|
|
|
}
|
|
|
|
|
|
|
|
const int M = 16;
|
|
|
|
uint8_t img[M];
|
|
|
|
for (int i = 0; i < M; i++) {
|
|
|
|
img[i] = i*i;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct {
|
2020-01-06 17:50:37 +00:00
|
|
|
const uint8_t* img;
|
2019-07-25 19:32:19 +00:00
|
|
|
int add = 5;
|
|
|
|
uint8_t mul = 3;
|
|
|
|
uint16_t sub = 18;
|
|
|
|
int limit = M-1;
|
2020-01-06 17:50:37 +00:00
|
|
|
} uniforms{img};
|
2019-07-25 19:32:19 +00:00
|
|
|
|
2020-01-06 17:50:37 +00:00
|
|
|
program.eval(N, buf, &uniforms);
|
2019-07-25 19:32:19 +00:00
|
|
|
|
|
|
|
for (int i = 0; i < N; i++) {
|
|
|
|
// Our first math calculates x = (i+5)*3 - 18 a.k.a 3*(i-1).
|
|
|
|
int x = 3*(i-1);
|
|
|
|
|
|
|
|
// Then that's pinned to the limits of img.
|
|
|
|
if (i < 2) { x = 0; } // Notice i == 1 hits x == 0 exactly...
|
|
|
|
if (i > 5) { x = 15; } // ...and i == 6 hits x == 15 exactly
|
|
|
|
REPORTER_ASSERT(r, buf[i] == img[x]);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2019-11-07 16:33:56 +00:00
|
|
|
DEF_TEST(SkVM_MSAN, r) {
|
|
|
|
// This little memset32() program should be able to JIT, but if we run that
|
|
|
|
// JIT code in an MSAN build, it won't see the writes initialize buf. So
|
|
|
|
// this tests that we're using the interpreter instead.
|
|
|
|
skvm::Builder b;
|
|
|
|
b.store32(b.varying<int>(), b.splat(42));
|
|
|
|
|
|
|
|
test_jit_and_interpreter(r, b.done(), [&](const skvm::Program& program) {
|
|
|
|
constexpr int K = 17;
|
|
|
|
int buf[K]; // Intentionally uninitialized.
|
|
|
|
program.eval(K, buf);
|
|
|
|
sk_msan_assert_initialized(buf, buf+K);
|
|
|
|
for (int x : buf) {
|
|
|
|
REPORTER_ASSERT(r, x == 42);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2019-11-08 21:01:02 +00:00
|
|
|
DEF_TEST(SkVM_assert, r) {
|
|
|
|
skvm::Builder b;
|
|
|
|
b.assert_true(b.lt(b.load32(b.varying<int>()),
|
|
|
|
b.splat(42)));
|
|
|
|
|
|
|
|
test_jit_and_interpreter(r, b.done(), [&](const skvm::Program& program) {
|
2019-11-11 15:47:44 +00:00
|
|
|
int buf[] = { 0,1,2,3,4,5,6,7,8,9 };
|
2019-11-08 21:01:02 +00:00
|
|
|
program.eval(SK_ARRAY_COUNT(buf), buf);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2019-06-18 17:16:06 +00:00
|
|
|
|
|
|
|
template <typename Fn>
|
|
|
|
static void test_asm(skiatest::Reporter* r, Fn&& fn, std::initializer_list<uint8_t> expected) {
|
2019-06-24 19:34:02 +00:00
|
|
|
uint8_t buf[4096];
|
|
|
|
skvm::Assembler a{buf};
|
2019-06-18 17:16:06 +00:00
|
|
|
fn(a);
|
|
|
|
|
|
|
|
REPORTER_ASSERT(r, a.size() == expected.size());
|
|
|
|
|
2019-06-24 19:34:02 +00:00
|
|
|
auto got = (const uint8_t*)buf,
|
2019-06-18 17:16:06 +00:00
|
|
|
want = expected.begin();
|
|
|
|
for (int i = 0; i < (int)std::min(a.size(), expected.size()); i++) {
|
2019-06-18 20:01:12 +00:00
|
|
|
REPORTER_ASSERT(r, got[i] == want[i],
|
|
|
|
"byte %d was %02x, want %02x", i, got[i], want[i]);
|
2019-06-18 17:16:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
DEF_TEST(SkVM_Assembler, r) {
|
2019-06-20 16:37:10 +00:00
|
|
|
// Easiest way to generate test cases is
|
|
|
|
//
|
|
|
|
// echo '...some asm...' | llvm-mc -show-encoding -x86-asm-syntax=intel
|
|
|
|
//
|
|
|
|
// The -x86-asm-syntax=intel bit is optional, controlling the
|
|
|
|
// input syntax only; the output will always be AT&T op x,y,dst style.
|
|
|
|
// Our APIs read more like Intel op dst,x,y as op(dst,x,y), so I find
|
|
|
|
// that a bit easier to use here, despite maybe favoring AT&T overall.
|
|
|
|
|
|
|
|
using A = skvm::Assembler;
|
2019-06-18 17:16:06 +00:00
|
|
|
// Our exit strategy from AVX code.
|
2019-06-20 16:37:10 +00:00
|
|
|
test_asm(r, [&](A& a) {
|
2019-11-11 15:16:44 +00:00
|
|
|
a.int3();
|
2019-06-18 17:16:06 +00:00
|
|
|
a.vzeroupper();
|
|
|
|
a.ret();
|
|
|
|
},{
|
2019-11-11 15:16:44 +00:00
|
|
|
0xcc,
|
2019-06-18 17:16:06 +00:00
|
|
|
0xc5, 0xf8, 0x77,
|
|
|
|
0xc3,
|
|
|
|
});
|
|
|
|
|
2019-07-19 14:44:47 +00:00
|
|
|
// Align should pad with zero
|
2019-06-20 16:37:10 +00:00
|
|
|
test_asm(r, [&](A& a) {
|
2019-06-18 17:16:06 +00:00
|
|
|
a.ret();
|
|
|
|
a.align(4);
|
|
|
|
},{
|
|
|
|
0xc3,
|
2019-07-19 14:44:47 +00:00
|
|
|
0x00, 0x00, 0x00,
|
2019-06-18 17:16:06 +00:00
|
|
|
});
|
2019-06-18 20:01:12 +00:00
|
|
|
|
2019-06-20 16:37:10 +00:00
|
|
|
test_asm(r, [&](A& a) {
|
|
|
|
a.add(A::rax, 8); // Always good to test rax.
|
|
|
|
a.sub(A::rax, 32);
|
2019-06-18 20:26:08 +00:00
|
|
|
|
2019-06-20 16:37:10 +00:00
|
|
|
a.add(A::rdi, 12); // Last 0x48 REX
|
|
|
|
a.sub(A::rdi, 8);
|
2019-06-18 20:26:08 +00:00
|
|
|
|
2019-07-12 17:29:39 +00:00
|
|
|
a.add(A::r8 , 7); // First 0x49 REX
|
2019-06-20 16:37:10 +00:00
|
|
|
a.sub(A::r8 , 4);
|
2019-06-18 20:26:08 +00:00
|
|
|
|
2019-06-20 16:37:10 +00:00
|
|
|
a.add(A::rsi, 128); // Requires 4 byte immediate.
|
|
|
|
a.sub(A::r8 , 1000000);
|
2019-06-18 20:01:12 +00:00
|
|
|
},{
|
2019-06-18 20:26:08 +00:00
|
|
|
0x48, 0x83, 0b11'000'000, 0x08,
|
2019-06-18 20:01:12 +00:00
|
|
|
0x48, 0x83, 0b11'101'000, 0x20,
|
2019-06-18 20:26:08 +00:00
|
|
|
|
|
|
|
0x48, 0x83, 0b11'000'111, 0x0c,
|
2019-06-18 20:01:12 +00:00
|
|
|
0x48, 0x83, 0b11'101'111, 0x08,
|
2019-06-18 20:26:08 +00:00
|
|
|
|
2019-07-12 17:29:39 +00:00
|
|
|
0x49, 0x83, 0b11'000'000, 0x07,
|
|
|
|
0x49, 0x83, 0b11'101'000, 0x04,
|
2019-06-18 20:26:08 +00:00
|
|
|
|
|
|
|
0x48, 0x81, 0b11'000'110, 0x80, 0x00, 0x00, 0x00,
|
2019-07-12 17:29:39 +00:00
|
|
|
0x49, 0x81, 0b11'101'000, 0x40, 0x42, 0x0f, 0x00,
|
2019-06-18 20:01:12 +00:00
|
|
|
});
|
2019-06-20 16:37:10 +00:00
|
|
|
|
|
|
|
|
|
|
|
test_asm(r, [&](A& a) {
|
|
|
|
a.vpaddd (A::ymm0, A::ymm1, A::ymm2); // Low registers and 0x0f map -> 2-byte VEX.
|
|
|
|
a.vpaddd (A::ymm8, A::ymm1, A::ymm2); // A high dst register is ok -> 2-byte VEX.
|
|
|
|
a.vpaddd (A::ymm0, A::ymm8, A::ymm2); // A high first argument register -> 2-byte VEX.
|
|
|
|
a.vpaddd (A::ymm0, A::ymm1, A::ymm8); // A high second argument -> 3-byte VEX.
|
|
|
|
a.vpmulld(A::ymm0, A::ymm1, A::ymm2); // Using non-0x0f map instruction -> 3-byte VEX.
|
|
|
|
a.vpsubd (A::ymm0, A::ymm1, A::ymm2); // Test vpsubd to ensure argument order is right.
|
|
|
|
},{
|
|
|
|
/* VEX */ /*op*/ /*modRM*/
|
|
|
|
0xc5, 0xf5, 0xfe, 0xc2,
|
|
|
|
0xc5, 0x75, 0xfe, 0xc2,
|
|
|
|
0xc5, 0xbd, 0xfe, 0xc2,
|
|
|
|
0xc4, 0xc1, 0x75, 0xfe, 0xc0,
|
|
|
|
0xc4, 0xe2, 0x75, 0x40, 0xc2,
|
|
|
|
0xc5, 0xf5, 0xfa, 0xc2,
|
|
|
|
});
|
2019-06-20 20:03:44 +00:00
|
|
|
|
select, {eq,lt,gt}_i32 on x86
Add vpblendvb, vpcmpeqd, and vpcmpgtd, to implement select and eq/lt/gt.
I want to think just a touch bit more about neq, lte, and gte.
This is enough to JIT everything SkVMBlitter creates today.
There are 24 possible argument orders to vpblendvb,
so I'm sure I've got them wrong somehow, even with the new test.
Change-Id: I357664b866d8258a2b5438d520f47542ad581c50
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/232060
Reviewed-by: Mike Klein <mtklein@google.com>
Commit-Queue: Mike Klein <mtklein@google.com>
2019-08-02 17:22:39 +00:00
|
|
|
test_asm(r, [&](A& a) {
|
2019-11-06 18:54:46 +00:00
|
|
|
a.vpcmpeqd (A::ymm0, A::ymm1, A::ymm2);
|
|
|
|
a.vpcmpgtd (A::ymm0, A::ymm1, A::ymm2);
|
|
|
|
a.vcmpeqps (A::ymm0, A::ymm1, A::ymm2);
|
|
|
|
a.vcmpltps (A::ymm0, A::ymm1, A::ymm2);
|
|
|
|
a.vcmpleps (A::ymm0, A::ymm1, A::ymm2);
|
|
|
|
a.vcmpneqps(A::ymm0, A::ymm1, A::ymm2);
|
select, {eq,lt,gt}_i32 on x86
Add vpblendvb, vpcmpeqd, and vpcmpgtd, to implement select and eq/lt/gt.
I want to think just a touch bit more about neq, lte, and gte.
This is enough to JIT everything SkVMBlitter creates today.
There are 24 possible argument orders to vpblendvb,
so I'm sure I've got them wrong somehow, even with the new test.
Change-Id: I357664b866d8258a2b5438d520f47542ad581c50
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/232060
Reviewed-by: Mike Klein <mtklein@google.com>
Commit-Queue: Mike Klein <mtklein@google.com>
2019-08-02 17:22:39 +00:00
|
|
|
},{
|
|
|
|
0xc5,0xf5,0x76,0xc2,
|
|
|
|
0xc5,0xf5,0x66,0xc2,
|
2019-11-06 18:54:46 +00:00
|
|
|
0xc5,0xf4,0xc2,0xc2,0x00,
|
|
|
|
0xc5,0xf4,0xc2,0xc2,0x01,
|
|
|
|
0xc5,0xf4,0xc2,0xc2,0x02,
|
|
|
|
0xc5,0xf4,0xc2,0xc2,0x04,
|
select, {eq,lt,gt}_i32 on x86
Add vpblendvb, vpcmpeqd, and vpcmpgtd, to implement select and eq/lt/gt.
I want to think just a touch bit more about neq, lte, and gte.
This is enough to JIT everything SkVMBlitter creates today.
There are 24 possible argument orders to vpblendvb,
so I'm sure I've got them wrong somehow, even with the new test.
Change-Id: I357664b866d8258a2b5438d520f47542ad581c50
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/232060
Reviewed-by: Mike Klein <mtklein@google.com>
Commit-Queue: Mike Klein <mtklein@google.com>
2019-08-02 17:22:39 +00:00
|
|
|
});
|
|
|
|
|
2019-11-08 19:38:47 +00:00
|
|
|
test_asm(r, [&](A& a) {
|
|
|
|
a.vminps(A::ymm0, A::ymm1, A::ymm2);
|
|
|
|
a.vmaxps(A::ymm0, A::ymm1, A::ymm2);
|
|
|
|
},{
|
|
|
|
0xc5,0xf4,0x5d,0xc2,
|
|
|
|
0xc5,0xf4,0x5f,0xc2,
|
|
|
|
});
|
|
|
|
|
select, {eq,lt,gt}_i32 on x86
Add vpblendvb, vpcmpeqd, and vpcmpgtd, to implement select and eq/lt/gt.
I want to think just a touch bit more about neq, lte, and gte.
This is enough to JIT everything SkVMBlitter creates today.
There are 24 possible argument orders to vpblendvb,
so I'm sure I've got them wrong somehow, even with the new test.
Change-Id: I357664b866d8258a2b5438d520f47542ad581c50
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/232060
Reviewed-by: Mike Klein <mtklein@google.com>
Commit-Queue: Mike Klein <mtklein@google.com>
2019-08-02 17:22:39 +00:00
|
|
|
test_asm(r, [&](A& a) {
|
|
|
|
a.vpblendvb(A::ymm0, A::ymm1, A::ymm2, A::ymm3);
|
|
|
|
},{
|
|
|
|
0xc4,0xe3,0x75, 0x4c, 0xc2, 0x30,
|
|
|
|
});
|
|
|
|
|
2019-06-20 20:03:44 +00:00
|
|
|
test_asm(r, [&](A& a) {
|
|
|
|
a.vpsrld(A::ymm15, A::ymm2, 8);
|
|
|
|
a.vpsrld(A::ymm0 , A::ymm8, 5);
|
|
|
|
},{
|
|
|
|
0xc5, 0x85, 0x72,0xd2, 0x08,
|
|
|
|
0xc4,0xc1,0x7d, 0x72,0xd0, 0x05,
|
|
|
|
});
|
|
|
|
|
|
|
|
test_asm(r, [&](A& a) {
|
|
|
|
a.vpermq(A::ymm1, A::ymm2, 5);
|
|
|
|
},{
|
|
|
|
0xc4,0xe3,0xfd, 0x00,0xca, 0x05,
|
|
|
|
});
|
2019-06-21 17:37:22 +00:00
|
|
|
|
2020-01-09 13:27:39 +00:00
|
|
|
test_asm(r, [&](A& a) {
|
|
|
|
a.vroundps(A::ymm1, A::ymm2, A::NEAREST);
|
|
|
|
a.vroundps(A::ymm1, A::ymm2, A::FLOOR);
|
|
|
|
a.vroundps(A::ymm1, A::ymm2, A::CEIL);
|
|
|
|
a.vroundps(A::ymm1, A::ymm2, A::TRUNC);
|
|
|
|
},{
|
|
|
|
0xc4,0xe3,0x7d,0x08,0xca,0x00,
|
|
|
|
0xc4,0xe3,0x7d,0x08,0xca,0x01,
|
|
|
|
0xc4,0xe3,0x7d,0x08,0xca,0x02,
|
|
|
|
0xc4,0xe3,0x7d,0x08,0xca,0x03,
|
|
|
|
});
|
|
|
|
|
2019-06-21 17:37:22 +00:00
|
|
|
test_asm(r, [&](A& a) {
|
|
|
|
A::Label l = a.here();
|
|
|
|
a.byte(1);
|
|
|
|
a.byte(2);
|
|
|
|
a.byte(3);
|
|
|
|
a.byte(4);
|
2019-06-21 19:19:21 +00:00
|
|
|
|
2019-07-12 14:22:21 +00:00
|
|
|
a.vbroadcastss(A::ymm0 , &l);
|
|
|
|
a.vbroadcastss(A::ymm1 , &l);
|
|
|
|
a.vbroadcastss(A::ymm8 , &l);
|
|
|
|
a.vbroadcastss(A::ymm15, &l);
|
2019-06-21 19:19:21 +00:00
|
|
|
|
2019-07-12 14:22:21 +00:00
|
|
|
a.vpshufb(A::ymm4, A::ymm3, &l);
|
x86-64 JIT support for Op::index
This lets shaders that use 'x' JIT on x86.
I started with paddd and {0,-1,-2,-3,...}, which worked fine but on
second thought seemed a bit odd. I've switched to psubd and
{0,1,2,3,...} but I've left in support for paddd with a memory arg.
gm/skvm.cpp now JITs fully again and continues to draw the same as
the interpreter did.
Simplify embedded data alignment a little... memory operands don't
need full register alignment in AVX like they used to in SSE. So
just align everything to the vector element size like we do on ARM,
and reorder [splats,bytes_masks,iota] to match the order we declare
and handle them in the code above.
Add unit tests for vpaddd + vpsubd.
Cq-Include-Trybots: skia.primary:Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Debug-All-SK_USE_SKVM_BLITTER
Change-Id: I6b8d060450cca7f437a1d2a597a8a0e0e8d51b33
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/252797
Commit-Queue: Mike Klein <mtklein@google.com>
Reviewed-by: Mike Reed <reed@google.com>
2019-11-05 13:46:02 +00:00
|
|
|
a.vpaddd (A::ymm4, A::ymm3, &l);
|
|
|
|
a.vpsubd (A::ymm4, A::ymm3, &l);
|
2019-11-11 15:16:44 +00:00
|
|
|
|
|
|
|
a.vptest(A::ymm4, &l);
|
sketch out structure for ops with immediates
Lots of x86 instructions can take their right hand side argument from
memory directly rather than a register. We can use this to avoid the
need to allocate a register for many constants.
The strategy in this CL is one of several I've been stewing over, the
simplest of those strategies I think. There are some trade offs
particularly on ARM; this naive ARM implementation means we'll load&op
every time, even though the load part of the operation can logically be
hoisted. From here on I'm going to just briefly enumerate a few other
approaches that allow the optimization on x86 and still allow the
immediate splats to hoist on ARM.
1) don't do it on ARM
A very simple approach is to simply not perform this optimization on
ARM. ARM has more vector registers than x86, and so register pressure
is lower there. We're going to end up with splatted constants in
registers anyway, so maybe just let that happen the normal way instead
of some roundabout complicated hack like I'll talk about in 2). The
only downside in my mind is that this approach would make high-level
program descriptions platform dependent, which isn't so bad, but it's
been nice to be able to compare and diff debug dumps.
2) split Op::splat up
The next less-simple approach to this problem could fix this by
splitting splats into two Ops internally, one inner Op::immediate that
guantees at least the constant is in memory and is compatible with
immediate-aware Ops like mul_f32_imm, and an outer Op::constant that
depends on that Op::immediate and further guarantees that constant has
been broadcast into a register to be compatible with non-immediate-aware
ops like div_f32. When building a program, immediate-aware ops would
peek for Op::constants as they do today for Op::splats, but instead of
embedding the immediate themselves, they'd replace their dependency with
the inner Op::immediate.
On x86 these new Ops would work just as advertised, with Op::immediate a
runtime no-op, Op::constant the usual vbroadcastss. On ARM
Op::immediate needs to go all the way and splat out a register to make
the constant compatible with immediate-aware ops, and the Op::constant
becomes a noop now instead. All this comes together to let the
Op::immediate splat hoist up out of the loop while still feeding
Op::mul_f32_imm and co. It's a rather complicated approach to solving
this issue, but I might want to explore it just to see how bad it is.
3) do it inside the x86 JIT
The conceptually best approach is to find a way to do this peepholing
only inside the JIT only on x86, avoiding the need for new
Op::mul_f32_imm and co. ARM and the interpreter don't benefit from this
peephole, so the x86 JIT is the logical owner of this optimization.
Finding a clean way to do this without too much disruption is the least
baked idea I've got here, though I think the most desirable long-term.
Cq-Include-Trybots: skia.primary:Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Debug-All-SK_USE_SKVM_BLITTER,Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Release-All-SK_USE_SKVM_BLITTER
Change-Id: Ie9c6336ed08b6fbeb89acf920a48a319f74f3643
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/254217
Commit-Queue: Mike Klein <mtklein@google.com>
Reviewed-by: Herb Derby <herb@google.com>
2019-11-12 15:07:23 +00:00
|
|
|
|
|
|
|
a.vmulps (A::ymm4, A::ymm3, &l);
|
2019-06-21 17:37:22 +00:00
|
|
|
},{
|
|
|
|
0x01, 0x02, 0x03, 0x4,
|
2019-06-21 19:19:21 +00:00
|
|
|
|
2019-06-21 17:37:22 +00:00
|
|
|
/* VEX */ /*op*/ /* ModRM */ /* offset */
|
|
|
|
0xc4, 0xe2, 0x7d, 0x18, 0b00'000'101, 0xf3,0xff,0xff,0xff, // 0xfffffff3 == -13
|
|
|
|
0xc4, 0xe2, 0x7d, 0x18, 0b00'001'101, 0xea,0xff,0xff,0xff, // 0xffffffea == -22
|
|
|
|
0xc4, 0x62, 0x7d, 0x18, 0b00'000'101, 0xe1,0xff,0xff,0xff, // 0xffffffe1 == -31
|
|
|
|
0xc4, 0x62, 0x7d, 0x18, 0b00'111'101, 0xd8,0xff,0xff,0xff, // 0xffffffd8 == -40
|
2019-06-21 19:19:21 +00:00
|
|
|
|
|
|
|
0xc4, 0xe2, 0x65, 0x00, 0b00'100'101, 0xcf,0xff,0xff,0xff, // 0xffffffcf == -49
|
x86-64 JIT support for Op::index
This lets shaders that use 'x' JIT on x86.
I started with paddd and {0,-1,-2,-3,...}, which worked fine but on
second thought seemed a bit odd. I've switched to psubd and
{0,1,2,3,...} but I've left in support for paddd with a memory arg.
gm/skvm.cpp now JITs fully again and continues to draw the same as
the interpreter did.
Simplify embedded data alignment a little... memory operands don't
need full register alignment in AVX like they used to in SSE. So
just align everything to the vector element size like we do on ARM,
and reorder [splats,bytes_masks,iota] to match the order we declare
and handle them in the code above.
Add unit tests for vpaddd + vpsubd.
Cq-Include-Trybots: skia.primary:Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Debug-All-SK_USE_SKVM_BLITTER
Change-Id: I6b8d060450cca7f437a1d2a597a8a0e0e8d51b33
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/252797
Commit-Queue: Mike Klein <mtklein@google.com>
Reviewed-by: Mike Reed <reed@google.com>
2019-11-05 13:46:02 +00:00
|
|
|
|
|
|
|
0xc5, 0xe5, 0xfe, 0b00'100'101, 0xc7,0xff,0xff,0xff, // 0xffffffc7 == -57
|
|
|
|
0xc5, 0xe5, 0xfa, 0b00'100'101, 0xbf,0xff,0xff,0xff, // 0xffffffbf == -65
|
2019-11-11 15:16:44 +00:00
|
|
|
|
sketch out structure for ops with immediates
Lots of x86 instructions can take their right hand side argument from
memory directly rather than a register. We can use this to avoid the
need to allocate a register for many constants.
The strategy in this CL is one of several I've been stewing over, the
simplest of those strategies I think. There are some trade offs
particularly on ARM; this naive ARM implementation means we'll load&op
every time, even though the load part of the operation can logically be
hoisted. From here on I'm going to just briefly enumerate a few other
approaches that allow the optimization on x86 and still allow the
immediate splats to hoist on ARM.
1) don't do it on ARM
A very simple approach is to simply not perform this optimization on
ARM. ARM has more vector registers than x86, and so register pressure
is lower there. We're going to end up with splatted constants in
registers anyway, so maybe just let that happen the normal way instead
of some roundabout complicated hack like I'll talk about in 2). The
only downside in my mind is that this approach would make high-level
program descriptions platform dependent, which isn't so bad, but it's
been nice to be able to compare and diff debug dumps.
2) split Op::splat up
The next less-simple approach to this problem could fix this by
splitting splats into two Ops internally, one inner Op::immediate that
guantees at least the constant is in memory and is compatible with
immediate-aware Ops like mul_f32_imm, and an outer Op::constant that
depends on that Op::immediate and further guarantees that constant has
been broadcast into a register to be compatible with non-immediate-aware
ops like div_f32. When building a program, immediate-aware ops would
peek for Op::constants as they do today for Op::splats, but instead of
embedding the immediate themselves, they'd replace their dependency with
the inner Op::immediate.
On x86 these new Ops would work just as advertised, with Op::immediate a
runtime no-op, Op::constant the usual vbroadcastss. On ARM
Op::immediate needs to go all the way and splat out a register to make
the constant compatible with immediate-aware ops, and the Op::constant
becomes a noop now instead. All this comes together to let the
Op::immediate splat hoist up out of the loop while still feeding
Op::mul_f32_imm and co. It's a rather complicated approach to solving
this issue, but I might want to explore it just to see how bad it is.
3) do it inside the x86 JIT
The conceptually best approach is to find a way to do this peepholing
only inside the JIT only on x86, avoiding the need for new
Op::mul_f32_imm and co. ARM and the interpreter don't benefit from this
peephole, so the x86 JIT is the logical owner of this optimization.
Finding a clean way to do this without too much disruption is the least
baked idea I've got here, though I think the most desirable long-term.
Cq-Include-Trybots: skia.primary:Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Debug-All-SK_USE_SKVM_BLITTER,Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Release-All-SK_USE_SKVM_BLITTER
Change-Id: Ie9c6336ed08b6fbeb89acf920a48a319f74f3643
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/254217
Commit-Queue: Mike Klein <mtklein@google.com>
Reviewed-by: Herb Derby <herb@google.com>
2019-11-12 15:07:23 +00:00
|
|
|
0xc4, 0xe2, 0x7d, 0x17, 0b00'100'101, 0xb6,0xff,0xff,0xff, // 0xffffffb6 == -74
|
|
|
|
|
|
|
|
0xc5, 0xe4, 0x59, 0b00'100'101, 0xae,0xff,0xff,0xff, // 0xffffffaf == -82
|
2019-06-21 17:37:22 +00:00
|
|
|
});
|
2019-06-21 19:42:09 +00:00
|
|
|
|
2019-08-02 15:15:51 +00:00
|
|
|
test_asm(r, [&](A& a) {
|
|
|
|
a.vbroadcastss(A::ymm0, A::rdi, 0);
|
|
|
|
a.vbroadcastss(A::ymm13, A::r14, 7);
|
|
|
|
a.vbroadcastss(A::ymm8, A::rdx, -12);
|
|
|
|
a.vbroadcastss(A::ymm8, A::rdx, 400);
|
2019-08-02 15:54:23 +00:00
|
|
|
|
|
|
|
a.vbroadcastss(A::ymm8, A::xmm0);
|
|
|
|
a.vbroadcastss(A::ymm0, A::xmm13);
|
2019-08-02 15:15:51 +00:00
|
|
|
},{
|
|
|
|
/* VEX */ /*op*/ /*ModRM*/ /*offset*/
|
|
|
|
0xc4,0xe2,0x7d, 0x18, 0b00'000'111,
|
|
|
|
0xc4,0x42,0x7d, 0x18, 0b01'101'110, 0x07,
|
|
|
|
0xc4,0x62,0x7d, 0x18, 0b01'000'010, 0xf4,
|
|
|
|
0xc4,0x62,0x7d, 0x18, 0b10'000'010, 0x90,0x01,0x00,0x00,
|
2019-08-02 15:54:23 +00:00
|
|
|
|
|
|
|
0xc4,0x62,0x7d, 0x18, 0b11'000'000,
|
|
|
|
0xc4,0xc2,0x7d, 0x18, 0b11'000'101,
|
2019-08-02 15:15:51 +00:00
|
|
|
});
|
|
|
|
|
2019-06-21 19:42:09 +00:00
|
|
|
test_asm(r, [&](A& a) {
|
|
|
|
A::Label l = a.here();
|
2019-07-12 14:22:21 +00:00
|
|
|
a.jne(&l);
|
|
|
|
a.jne(&l);
|
2019-07-12 17:32:45 +00:00
|
|
|
a.je (&l);
|
|
|
|
a.jmp(&l);
|
|
|
|
a.jl (&l);
|
2019-11-11 15:16:44 +00:00
|
|
|
a.jc (&l);
|
2019-07-12 17:32:45 +00:00
|
|
|
|
|
|
|
a.cmp(A::rdx, 0);
|
|
|
|
a.cmp(A::rax, 12);
|
|
|
|
a.cmp(A::r14, 2000000000);
|
2019-06-21 19:42:09 +00:00
|
|
|
},{
|
2019-07-12 17:32:45 +00:00
|
|
|
0x0f,0x85, 0xfa,0xff,0xff,0xff, // near jne -6 bytes
|
|
|
|
0x0f,0x85, 0xf4,0xff,0xff,0xff, // near jne -12 bytes
|
|
|
|
0x0f,0x84, 0xee,0xff,0xff,0xff, // near je -18 bytes
|
|
|
|
0xe9, 0xe9,0xff,0xff,0xff, // near jmp -23 bytes
|
|
|
|
0x0f,0x8c, 0xe3,0xff,0xff,0xff, // near jl -29 bytes
|
2019-11-11 15:16:44 +00:00
|
|
|
0x0f,0x82, 0xdd,0xff,0xff,0xff, // near jc -35 bytes
|
2019-07-12 17:32:45 +00:00
|
|
|
|
|
|
|
0x48,0x83,0xfa,0x00,
|
|
|
|
0x48,0x83,0xf8,0x0c,
|
|
|
|
0x49,0x81,0xfe,0x00,0x94,0x35,0x77,
|
2019-06-21 19:42:09 +00:00
|
|
|
});
|
2019-06-21 20:52:55 +00:00
|
|
|
|
|
|
|
test_asm(r, [&](A& a) {
|
|
|
|
a.vmovups(A::ymm5, A::rsi);
|
|
|
|
a.vmovups(A::rsi, A::ymm5);
|
2019-06-21 21:06:03 +00:00
|
|
|
|
2019-08-02 16:43:43 +00:00
|
|
|
a.vmovups(A::rsi, A::xmm5);
|
|
|
|
|
2019-08-02 16:18:00 +00:00
|
|
|
a.vpmovzxwd(A::ymm4, A::rsi);
|
2019-06-21 21:06:03 +00:00
|
|
|
a.vpmovzxbd(A::ymm4, A::rsi);
|
2019-06-21 21:20:24 +00:00
|
|
|
|
|
|
|
a.vmovq(A::rdx, A::xmm15);
|
2019-06-21 20:52:55 +00:00
|
|
|
},{
|
2019-06-21 21:06:03 +00:00
|
|
|
/* VEX */ /*Op*/ /* ModRM */
|
|
|
|
0xc5, 0xfc, 0x10, 0b00'101'110,
|
|
|
|
0xc5, 0xfc, 0x11, 0b00'101'110,
|
|
|
|
|
2019-08-02 16:43:43 +00:00
|
|
|
0xc5, 0xf8, 0x11, 0b00'101'110,
|
|
|
|
|
2019-08-02 16:18:00 +00:00
|
|
|
0xc4,0xe2,0x7d, 0x33, 0b00'100'110,
|
2019-06-21 21:06:03 +00:00
|
|
|
0xc4,0xe2,0x7d, 0x31, 0b00'100'110,
|
2019-06-21 21:20:24 +00:00
|
|
|
|
|
|
|
0xc5, 0x79, 0xd6, 0b00'111'010,
|
2019-06-21 20:52:55 +00:00
|
|
|
});
|
2019-06-24 00:35:28 +00:00
|
|
|
|
2019-07-12 17:32:45 +00:00
|
|
|
test_asm(r, [&](A& a) {
|
2019-08-02 15:54:23 +00:00
|
|
|
a.movzbl(A::rax, A::rsi, 0); // Low registers for src and dst.
|
|
|
|
a.movzbl(A::rax, A::r8, 0); // High src register.
|
|
|
|
a.movzbl(A::r8 , A::rsi, 0); // High dst register.
|
|
|
|
a.movzbl(A::r8, A::rsi, 12);
|
|
|
|
a.movzbl(A::r8, A::rsi, 400);
|
2019-07-12 17:32:45 +00:00
|
|
|
|
|
|
|
a.vmovd(A::rax, A::xmm0);
|
|
|
|
a.vmovd(A::rax, A::xmm8);
|
|
|
|
a.vmovd(A::r8, A::xmm0);
|
|
|
|
|
|
|
|
a.vmovd(A::xmm0, A::rax);
|
|
|
|
a.vmovd(A::xmm8, A::rax);
|
|
|
|
a.vmovd(A::xmm0, A::r8);
|
|
|
|
|
|
|
|
a.vmovd_direct(A::rax, A::xmm0);
|
|
|
|
a.vmovd_direct(A::rax, A::xmm8);
|
|
|
|
a.vmovd_direct(A::r8, A::xmm0);
|
|
|
|
|
|
|
|
a.vmovd_direct(A::xmm0, A::rax);
|
|
|
|
a.vmovd_direct(A::xmm8, A::rax);
|
|
|
|
a.vmovd_direct(A::xmm0, A::r8);
|
|
|
|
|
|
|
|
a.movb(A::rdx, A::rax);
|
|
|
|
a.movb(A::rdx, A::r8);
|
|
|
|
a.movb(A::r8 , A::rax);
|
|
|
|
},{
|
|
|
|
0x0f,0xb6,0x06,
|
|
|
|
0x41,0x0f,0xb6,0x00,
|
|
|
|
0x44,0x0f,0xb6,0x06,
|
2019-08-02 15:54:23 +00:00
|
|
|
0x44,0x0f,0xb6,0x46, 12,
|
|
|
|
0x44,0x0f,0xb6,0x86, 0x90,0x01,0x00,0x00,
|
2019-07-12 17:32:45 +00:00
|
|
|
|
|
|
|
0xc5,0xf9,0x7e,0x00,
|
|
|
|
0xc5,0x79,0x7e,0x00,
|
|
|
|
0xc4,0xc1,0x79,0x7e,0x00,
|
|
|
|
|
|
|
|
0xc5,0xf9,0x6e,0x00,
|
|
|
|
0xc5,0x79,0x6e,0x00,
|
|
|
|
0xc4,0xc1,0x79,0x6e,0x00,
|
|
|
|
|
|
|
|
0xc5,0xf9,0x7e,0xc0,
|
|
|
|
0xc5,0x79,0x7e,0xc0,
|
|
|
|
0xc4,0xc1,0x79,0x7e,0xc0,
|
|
|
|
|
|
|
|
0xc5,0xf9,0x6e,0xc0,
|
|
|
|
0xc5,0x79,0x6e,0xc0,
|
|
|
|
0xc4,0xc1,0x79,0x6e,0xc0,
|
|
|
|
|
|
|
|
0x88, 0x02,
|
|
|
|
0x44, 0x88, 0x02,
|
|
|
|
0x41, 0x88, 0x00,
|
|
|
|
});
|
|
|
|
|
|
|
|
test_asm(r, [&](A& a) {
|
2019-08-02 16:18:00 +00:00
|
|
|
a.vpinsrw(A::xmm1, A::xmm8, A::rsi, 4);
|
|
|
|
a.vpinsrw(A::xmm8, A::xmm1, A::r8, 12);
|
|
|
|
|
2019-07-12 17:32:45 +00:00
|
|
|
a.vpinsrb(A::xmm1, A::xmm8, A::rsi, 4);
|
|
|
|
a.vpinsrb(A::xmm8, A::xmm1, A::r8, 12);
|
|
|
|
|
2019-08-02 16:43:43 +00:00
|
|
|
a.vpextrw(A::rsi, A::xmm8, 7);
|
|
|
|
a.vpextrw(A::r8, A::xmm1, 15);
|
|
|
|
|
2019-07-12 17:32:45 +00:00
|
|
|
a.vpextrb(A::rsi, A::xmm8, 7);
|
|
|
|
a.vpextrb(A::r8, A::xmm1, 15);
|
|
|
|
},{
|
2019-08-02 16:18:00 +00:00
|
|
|
0xc5,0xb9, 0xc4, 0x0e, 4,
|
|
|
|
0xc4,0x41,0x71, 0xc4, 0x00, 12,
|
|
|
|
|
2019-07-12 17:32:45 +00:00
|
|
|
0xc4,0xe3,0x39, 0x20, 0x0e, 4,
|
|
|
|
0xc4,0x43,0x71, 0x20, 0x00, 12,
|
|
|
|
|
2019-08-02 16:43:43 +00:00
|
|
|
0xc4,0x63,0x79, 0x15, 0x06, 7,
|
|
|
|
0xc4,0xc3,0x79, 0x15, 0x08, 15,
|
|
|
|
|
2019-07-12 17:32:45 +00:00
|
|
|
0xc4,0x63,0x79, 0x14, 0x06, 7,
|
|
|
|
0xc4,0xc3,0x79, 0x14, 0x08, 15,
|
|
|
|
});
|
|
|
|
|
2019-06-24 00:35:28 +00:00
|
|
|
test_asm(r, [&](A& a) {
|
|
|
|
a.vpandn(A::ymm3, A::ymm12, A::ymm2);
|
|
|
|
},{
|
|
|
|
0xc5, 0x9d, 0xdf, 0xda,
|
|
|
|
});
|
2019-06-24 22:47:16 +00:00
|
|
|
|
2019-07-30 17:15:40 +00:00
|
|
|
test_asm(r, [&](A& a) {
|
|
|
|
a.vmovdqa (A::ymm3, A::ymm2);
|
|
|
|
a.vcvttps2dq(A::ymm3, A::ymm2);
|
|
|
|
a.vcvtdq2ps (A::ymm3, A::ymm2);
|
2019-11-08 20:13:15 +00:00
|
|
|
a.vcvtps2dq (A::ymm3, A::ymm2);
|
2019-07-30 17:15:40 +00:00
|
|
|
},{
|
|
|
|
0xc5,0xfd,0x6f,0xda,
|
|
|
|
0xc5,0xfe,0x5b,0xda,
|
|
|
|
0xc5,0xfc,0x5b,0xda,
|
2019-11-08 20:13:15 +00:00
|
|
|
0xc5,0xfd,0x5b,0xda,
|
2019-07-30 17:15:40 +00:00
|
|
|
});
|
|
|
|
|
2019-06-24 22:47:16 +00:00
|
|
|
// echo "fmul v4.4s, v3.4s, v1.4s" | llvm-mc -show-encoding -arch arm64
|
|
|
|
|
|
|
|
test_asm(r, [&](A& a) {
|
2019-06-25 13:44:02 +00:00
|
|
|
a.and16b(A::v4, A::v3, A::v1);
|
|
|
|
a.orr16b(A::v4, A::v3, A::v1);
|
|
|
|
a.eor16b(A::v4, A::v3, A::v1);
|
|
|
|
a.bic16b(A::v4, A::v3, A::v1);
|
2019-10-16 19:11:27 +00:00
|
|
|
a.bsl16b(A::v4, A::v3, A::v1);
|
2019-11-06 21:11:01 +00:00
|
|
|
a.not16b(A::v4, A::v3);
|
2019-06-25 13:44:02 +00:00
|
|
|
|
|
|
|
a.add4s(A::v4, A::v3, A::v1);
|
|
|
|
a.sub4s(A::v4, A::v3, A::v1);
|
|
|
|
a.mul4s(A::v4, A::v3, A::v1);
|
|
|
|
|
2019-10-16 19:11:27 +00:00
|
|
|
a.cmeq4s(A::v4, A::v3, A::v1);
|
|
|
|
a.cmgt4s(A::v4, A::v3, A::v1);
|
|
|
|
|
2019-06-25 13:44:02 +00:00
|
|
|
a.sub8h(A::v4, A::v3, A::v1);
|
|
|
|
a.mul8h(A::v4, A::v3, A::v1);
|
|
|
|
|
2019-06-24 22:47:16 +00:00
|
|
|
a.fadd4s(A::v4, A::v3, A::v1);
|
|
|
|
a.fsub4s(A::v4, A::v3, A::v1);
|
|
|
|
a.fmul4s(A::v4, A::v3, A::v1);
|
|
|
|
a.fdiv4s(A::v4, A::v3, A::v1);
|
2019-11-08 19:38:47 +00:00
|
|
|
a.fmin4s(A::v4, A::v3, A::v1);
|
|
|
|
a.fmax4s(A::v4, A::v3, A::v1);
|
2019-06-24 22:47:16 +00:00
|
|
|
|
2019-06-25 13:44:02 +00:00
|
|
|
a.fmla4s(A::v4, A::v3, A::v1);
|
2019-11-06 21:11:01 +00:00
|
|
|
|
|
|
|
a.fcmeq4s(A::v4, A::v3, A::v1);
|
|
|
|
a.fcmgt4s(A::v4, A::v3, A::v1);
|
|
|
|
a.fcmge4s(A::v4, A::v3, A::v1);
|
2019-06-24 22:47:16 +00:00
|
|
|
},{
|
2019-06-25 13:44:02 +00:00
|
|
|
0x64,0x1c,0x21,0x4e,
|
|
|
|
0x64,0x1c,0xa1,0x4e,
|
|
|
|
0x64,0x1c,0x21,0x6e,
|
|
|
|
0x64,0x1c,0x61,0x4e,
|
2019-10-16 19:11:27 +00:00
|
|
|
0x64,0x1c,0x61,0x6e,
|
2019-11-06 21:11:01 +00:00
|
|
|
0x64,0x58,0x20,0x6e,
|
2019-06-25 13:44:02 +00:00
|
|
|
|
|
|
|
0x64,0x84,0xa1,0x4e,
|
|
|
|
0x64,0x84,0xa1,0x6e,
|
|
|
|
0x64,0x9c,0xa1,0x4e,
|
|
|
|
|
2019-10-16 19:11:27 +00:00
|
|
|
0x64,0x8c,0xa1,0x6e,
|
|
|
|
0x64,0x34,0xa1,0x4e,
|
|
|
|
|
2019-06-25 13:44:02 +00:00
|
|
|
0x64,0x84,0x61,0x6e,
|
|
|
|
0x64,0x9c,0x61,0x4e,
|
|
|
|
|
2019-06-24 22:47:16 +00:00
|
|
|
0x64,0xd4,0x21,0x4e,
|
|
|
|
0x64,0xd4,0xa1,0x4e,
|
|
|
|
0x64,0xdc,0x21,0x6e,
|
|
|
|
0x64,0xfc,0x21,0x6e,
|
2019-11-08 19:38:47 +00:00
|
|
|
0x64,0xf4,0xa1,0x4e,
|
|
|
|
0x64,0xf4,0x21,0x4e,
|
2019-06-24 22:47:16 +00:00
|
|
|
|
2019-06-25 13:44:02 +00:00
|
|
|
0x64,0xcc,0x21,0x4e,
|
2019-11-06 21:11:01 +00:00
|
|
|
|
|
|
|
0x64,0xe4,0x21,0x4e,
|
|
|
|
0x64,0xe4,0xa1,0x6e,
|
|
|
|
0x64,0xe4,0x21,0x6e,
|
2019-06-25 13:44:02 +00:00
|
|
|
});
|
|
|
|
|
|
|
|
test_asm(r, [&](A& a) {
|
|
|
|
a.shl4s(A::v4, A::v3, 0);
|
|
|
|
a.shl4s(A::v4, A::v3, 1);
|
|
|
|
a.shl4s(A::v4, A::v3, 8);
|
|
|
|
a.shl4s(A::v4, A::v3, 16);
|
|
|
|
a.shl4s(A::v4, A::v3, 31);
|
|
|
|
|
|
|
|
a.sshr4s(A::v4, A::v3, 1);
|
|
|
|
a.sshr4s(A::v4, A::v3, 8);
|
|
|
|
a.sshr4s(A::v4, A::v3, 31);
|
|
|
|
|
|
|
|
a.ushr4s(A::v4, A::v3, 1);
|
|
|
|
a.ushr4s(A::v4, A::v3, 8);
|
|
|
|
a.ushr4s(A::v4, A::v3, 31);
|
|
|
|
|
|
|
|
a.ushr8h(A::v4, A::v3, 1);
|
|
|
|
a.ushr8h(A::v4, A::v3, 8);
|
|
|
|
a.ushr8h(A::v4, A::v3, 15);
|
|
|
|
},{
|
|
|
|
0x64,0x54,0x20,0x4f,
|
|
|
|
0x64,0x54,0x21,0x4f,
|
|
|
|
0x64,0x54,0x28,0x4f,
|
|
|
|
0x64,0x54,0x30,0x4f,
|
|
|
|
0x64,0x54,0x3f,0x4f,
|
|
|
|
|
|
|
|
0x64,0x04,0x3f,0x4f,
|
|
|
|
0x64,0x04,0x38,0x4f,
|
|
|
|
0x64,0x04,0x21,0x4f,
|
|
|
|
|
|
|
|
0x64,0x04,0x3f,0x6f,
|
|
|
|
0x64,0x04,0x38,0x6f,
|
|
|
|
0x64,0x04,0x21,0x6f,
|
|
|
|
|
|
|
|
0x64,0x04,0x1f,0x6f,
|
|
|
|
0x64,0x04,0x18,0x6f,
|
|
|
|
0x64,0x04,0x11,0x6f,
|
|
|
|
});
|
|
|
|
|
add sli.4s, use it in pack sometimes
We have pack(x,y,imm) = x | (y<<imm) assuming (x & (y<<imm)) == 0.
If we can destroy x, sli (shift-left-insert) lets us implement that
as x |= y << imm. This happens quite often, so you'll see sequences
of pack that used to look like this
shl v4.4s, v2.4s, #8
orr v1.16b, v4.16b, v1.16b
shl v2.4s, v0.4s, #8
orr v0.16b, v2.16b, v3.16b
shl v2.4s, v0.4s, #16
orr v0.16b, v2.16b, v1.16b
now look like this
sli v1.4s, v2.4s, #8
sli v3.4s, v0.4s, #8
sli v1.4s, v3.4s, #16
We can do this thanks to the new simultaneous register assignment
and instruction selection I added. We used to never hit this case.
Change-Id: I75fa3defc1afd38779b3993887ca302a0885c5b1
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/228611
Reviewed-by: Mike Klein <mtklein@google.com>
Commit-Queue: Mike Klein <mtklein@google.com>
2019-07-19 17:21:19 +00:00
|
|
|
test_asm(r, [&](A& a) {
|
|
|
|
a.sli4s(A::v4, A::v3, 0);
|
|
|
|
a.sli4s(A::v4, A::v3, 1);
|
|
|
|
a.sli4s(A::v4, A::v3, 8);
|
|
|
|
a.sli4s(A::v4, A::v3, 16);
|
|
|
|
a.sli4s(A::v4, A::v3, 31);
|
|
|
|
},{
|
|
|
|
0x64,0x54,0x20,0x6f,
|
|
|
|
0x64,0x54,0x21,0x6f,
|
|
|
|
0x64,0x54,0x28,0x6f,
|
|
|
|
0x64,0x54,0x30,0x6f,
|
|
|
|
0x64,0x54,0x3f,0x6f,
|
|
|
|
});
|
|
|
|
|
2019-06-25 13:44:02 +00:00
|
|
|
test_asm(r, [&](A& a) {
|
|
|
|
a.scvtf4s (A::v4, A::v3);
|
|
|
|
a.fcvtzs4s(A::v4, A::v3);
|
2019-11-08 20:13:15 +00:00
|
|
|
a.fcvtns4s(A::v4, A::v3);
|
2019-06-25 13:44:02 +00:00
|
|
|
},{
|
|
|
|
0x64,0xd8,0x21,0x4e,
|
|
|
|
0x64,0xb8,0xa1,0x4e,
|
2019-11-08 20:13:15 +00:00
|
|
|
0x64,0xa8,0x21,0x4e,
|
2019-06-24 22:47:16 +00:00
|
|
|
});
|
2019-06-26 14:21:12 +00:00
|
|
|
|
|
|
|
test_asm(r, [&](A& a) {
|
2019-11-13 19:19:01 +00:00
|
|
|
a.brk(0);
|
|
|
|
a.brk(65535);
|
|
|
|
|
2019-06-26 14:21:12 +00:00
|
|
|
a.ret(A::x30); // Conventional ret using link register.
|
|
|
|
a.ret(A::x13); // Can really return using any register if we like.
|
|
|
|
|
|
|
|
a.add(A::x2, A::x2, 4);
|
|
|
|
a.add(A::x3, A::x2, 32);
|
|
|
|
|
2019-07-11 16:25:37 +00:00
|
|
|
a.sub(A::x2, A::x2, 4);
|
|
|
|
a.sub(A::x3, A::x2, 32);
|
|
|
|
|
2019-06-26 14:21:12 +00:00
|
|
|
a.subs(A::x2, A::x2, 4);
|
|
|
|
a.subs(A::x3, A::x2, 32);
|
|
|
|
|
2019-07-11 16:25:37 +00:00
|
|
|
a.subs(A::xzr, A::x2, 4); // These are actually the same instruction!
|
|
|
|
a.cmp(A::x2, 4);
|
|
|
|
|
2019-06-26 14:21:12 +00:00
|
|
|
A::Label l = a.here();
|
2019-07-12 14:22:21 +00:00
|
|
|
a.bne(&l);
|
|
|
|
a.bne(&l);
|
|
|
|
a.blt(&l);
|
|
|
|
a.b(&l);
|
|
|
|
a.cbnz(A::x2, &l);
|
2019-07-11 19:06:40 +00:00
|
|
|
a.cbz(A::x2, &l);
|
2019-06-26 14:21:12 +00:00
|
|
|
},{
|
2019-11-13 19:19:01 +00:00
|
|
|
0x00,0x00,0x20,0xd4,
|
|
|
|
0xe0,0xff,0x3f,0xd4,
|
|
|
|
|
2019-06-26 14:21:12 +00:00
|
|
|
0xc0,0x03,0x5f,0xd6,
|
|
|
|
0xa0,0x01,0x5f,0xd6,
|
|
|
|
|
|
|
|
0x42,0x10,0x00,0x91,
|
|
|
|
0x43,0x80,0x00,0x91,
|
|
|
|
|
2019-07-11 16:25:37 +00:00
|
|
|
0x42,0x10,0x00,0xd1,
|
|
|
|
0x43,0x80,0x00,0xd1,
|
|
|
|
|
2019-06-26 14:21:12 +00:00
|
|
|
0x42,0x10,0x00,0xf1,
|
|
|
|
0x43,0x80,0x00,0xf1,
|
|
|
|
|
2019-07-11 16:25:37 +00:00
|
|
|
0x5f,0x10,0x00,0xf1,
|
|
|
|
0x5f,0x10,0x00,0xf1,
|
|
|
|
|
|
|
|
0x01,0x00,0x00,0x54, // b.ne #0
|
|
|
|
0xe1,0xff,0xff,0x54, // b.ne #-4
|
|
|
|
0xcb,0xff,0xff,0x54, // b.lt #-8
|
|
|
|
0xae,0xff,0xff,0x54, // b.al #-12
|
|
|
|
0x82,0xff,0xff,0xb5, // cbnz x2, #-16
|
|
|
|
0x62,0xff,0xff,0xb4, // cbz x2, #-20
|
2019-06-26 14:21:12 +00:00
|
|
|
});
|
2019-06-26 18:47:43 +00:00
|
|
|
|
2019-07-11 19:06:40 +00:00
|
|
|
// Can we cbz() to a not-yet-defined label?
|
|
|
|
test_asm(r, [&](A& a) {
|
|
|
|
A::Label l;
|
|
|
|
a.cbz(A::x2, &l);
|
|
|
|
a.add(A::x3, A::x2, 32);
|
|
|
|
a.label(&l);
|
|
|
|
a.ret(A::x30);
|
|
|
|
},{
|
|
|
|
0x42,0x00,0x00,0xb4, // cbz x2, #8
|
|
|
|
0x43,0x80,0x00,0x91, // add x3, x2, #32
|
|
|
|
0xc0,0x03,0x5f,0xd6, // ret
|
|
|
|
});
|
|
|
|
|
|
|
|
// If we start a label as a backward label,
|
|
|
|
// can we redefine it to be a future label?
|
|
|
|
// (Not sure this is useful... just want to test it works.)
|
|
|
|
test_asm(r, [&](A& a) {
|
|
|
|
A::Label l1 = a.here();
|
|
|
|
a.add(A::x3, A::x2, 32);
|
|
|
|
a.cbz(A::x2, &l1); // This will jump backward... nothing sneaky.
|
|
|
|
|
|
|
|
A::Label l2 = a.here(); // Start off the same...
|
|
|
|
a.add(A::x3, A::x2, 32);
|
|
|
|
a.cbz(A::x2, &l2); // Looks like this will go backward...
|
|
|
|
a.add(A::x2, A::x2, 4);
|
|
|
|
a.add(A::x3, A::x2, 32);
|
|
|
|
a.label(&l2); // But no... actually forward! What a switcheroo!
|
|
|
|
},{
|
|
|
|
0x43,0x80,0x00,0x91, // add x3, x2, #32
|
|
|
|
0xe2,0xff,0xff,0xb4, // cbz x2, #-4
|
|
|
|
|
|
|
|
0x43,0x80,0x00,0x91, // add x3, x2, #32
|
|
|
|
0x62,0x00,0x00,0xb4, // cbz x2, #12
|
|
|
|
0x42,0x10,0x00,0x91, // add x2, x2, #4
|
|
|
|
0x43,0x80,0x00,0x91, // add x3, x2, #32
|
|
|
|
});
|
|
|
|
|
2019-07-30 16:11:09 +00:00
|
|
|
// Loading from a label on ARM.
|
|
|
|
test_asm(r, [&](A& a) {
|
|
|
|
A::Label fore,aft;
|
|
|
|
a.label(&fore);
|
|
|
|
a.word(0x01234567);
|
|
|
|
a.ldrq(A::v1, &fore);
|
|
|
|
a.ldrq(A::v2, &aft);
|
|
|
|
a.label(&aft);
|
|
|
|
a.word(0x76543210);
|
|
|
|
},{
|
|
|
|
0x67,0x45,0x23,0x01,
|
|
|
|
0xe1,0xff,0xff,0x9c, // ldr q1, #-4
|
|
|
|
0x22,0x00,0x00,0x9c, // ldr q2, #4
|
|
|
|
0x10,0x32,0x54,0x76,
|
|
|
|
});
|
|
|
|
|
2019-06-26 18:47:43 +00:00
|
|
|
test_asm(r, [&](A& a) {
|
|
|
|
a.ldrq(A::v0, A::x8);
|
|
|
|
a.strq(A::v0, A::x8);
|
|
|
|
},{
|
2019-07-11 16:25:37 +00:00
|
|
|
0x00,0x01,0xc0,0x3d,
|
|
|
|
0x00,0x01,0x80,0x3d,
|
2019-06-26 18:47:43 +00:00
|
|
|
});
|
finish up arm64 ops
Some small refactoring to common up redundant opcode building.
Oddly, I think I've got better codegen than what Clang would do here.
Clang doesn't generate uxtl-based code to unpack 8-bit to 32-bit,
instead preferring to load each byte one at a time and insert them one
at a time.
Me:
ldr s0, [x0]
uxtl v0.8h, v0.8b
uxtl v0.4s, v0.8h
Clang:
ldrb w8, [x0]
ldrb w9, [x0, #1]
ldrb w10, [x0, #2]
ldrb w11, [x0, #3]
fmov s0, w8
mov v0.s[1], w9
mov v0.s[2], w10
mov v0.s[3], w11
Change-Id: I0fdf5c6cdcde6a4eb9290936284fd3ffcb2159f6
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/224821
Reviewed-by: Herb Derby <herb@google.com>
Commit-Queue: Mike Klein <mtklein@google.com>
2019-07-01 16:18:08 +00:00
|
|
|
|
|
|
|
test_asm(r, [&](A& a) {
|
|
|
|
a.xtns2h(A::v0, A::v0);
|
|
|
|
a.xtnh2b(A::v0, A::v0);
|
|
|
|
a.strs (A::v0, A::x0);
|
|
|
|
|
|
|
|
a.ldrs (A::v0, A::x0);
|
|
|
|
a.uxtlb2h(A::v0, A::v0);
|
|
|
|
a.uxtlh2s(A::v0, A::v0);
|
2019-11-13 19:19:01 +00:00
|
|
|
|
|
|
|
a.uminv4s(A::v3, A::v4);
|
|
|
|
a.fmovs (A::x3, A::v4); // fmov w3,s4
|
finish up arm64 ops
Some small refactoring to common up redundant opcode building.
Oddly, I think I've got better codegen than what Clang would do here.
Clang doesn't generate uxtl-based code to unpack 8-bit to 32-bit,
instead preferring to load each byte one at a time and insert them one
at a time.
Me:
ldr s0, [x0]
uxtl v0.8h, v0.8b
uxtl v0.4s, v0.8h
Clang:
ldrb w8, [x0]
ldrb w9, [x0, #1]
ldrb w10, [x0, #2]
ldrb w11, [x0, #3]
fmov s0, w8
mov v0.s[1], w9
mov v0.s[2], w10
mov v0.s[3], w11
Change-Id: I0fdf5c6cdcde6a4eb9290936284fd3ffcb2159f6
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/224821
Reviewed-by: Herb Derby <herb@google.com>
Commit-Queue: Mike Klein <mtklein@google.com>
2019-07-01 16:18:08 +00:00
|
|
|
},{
|
|
|
|
0x00,0x28,0x61,0x0e,
|
|
|
|
0x00,0x28,0x21,0x0e,
|
|
|
|
0x00,0x00,0x00,0xbd,
|
|
|
|
|
|
|
|
0x00,0x00,0x40,0xbd,
|
|
|
|
0x00,0xa4,0x08,0x2f,
|
|
|
|
0x00,0xa4,0x10,0x2f,
|
2019-11-13 19:19:01 +00:00
|
|
|
|
|
|
|
0x83,0xa8,0xb1,0x6e,
|
|
|
|
0x83,0x00,0x26,0x1e,
|
finish up arm64 ops
Some small refactoring to common up redundant opcode building.
Oddly, I think I've got better codegen than what Clang would do here.
Clang doesn't generate uxtl-based code to unpack 8-bit to 32-bit,
instead preferring to load each byte one at a time and insert them one
at a time.
Me:
ldr s0, [x0]
uxtl v0.8h, v0.8b
uxtl v0.4s, v0.8h
Clang:
ldrb w8, [x0]
ldrb w9, [x0, #1]
ldrb w10, [x0, #2]
ldrb w11, [x0, #3]
fmov s0, w8
mov v0.s[1], w9
mov v0.s[2], w10
mov v0.s[3], w11
Change-Id: I0fdf5c6cdcde6a4eb9290936284fd3ffcb2159f6
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/224821
Reviewed-by: Herb Derby <herb@google.com>
Commit-Queue: Mike Klein <mtklein@google.com>
2019-07-01 16:18:08 +00:00
|
|
|
});
|
2019-07-11 16:25:37 +00:00
|
|
|
|
|
|
|
test_asm(r, [&](A& a) {
|
|
|
|
a.ldrb(A::v0, A::x8);
|
|
|
|
a.strb(A::v0, A::x8);
|
|
|
|
},{
|
|
|
|
0x00,0x01,0x40,0x3d,
|
|
|
|
0x00,0x01,0x00,0x3d,
|
|
|
|
});
|
2019-07-30 16:11:09 +00:00
|
|
|
|
|
|
|
test_asm(r, [&](A& a) {
|
|
|
|
a.tbl(A::v0, A::v1, A::v2);
|
|
|
|
},{
|
|
|
|
0x20,0x00,0x02,0x4e,
|
|
|
|
});
|
2019-06-18 17:16:06 +00:00
|
|
|
}
|