2019-11-15 16:02:50 +00:00
|
|
|
/*
|
|
|
|
* Copyright 2019 Google LLC
|
|
|
|
*
|
|
|
|
* Use of this source code is governed by a BSD-style license that can be
|
|
|
|
* found in the LICENSE file.
|
|
|
|
*/
|
2020-07-06 14:56:46 +00:00
|
|
|
|
2022-04-05 16:56:01 +00:00
|
|
|
#include "include/core/SkColorSpace.h"
|
2020-07-06 14:56:46 +00:00
|
|
|
#include "include/gpu/GrDirectContext.h"
|
2019-11-21 15:26:41 +00:00
|
|
|
#include "src/core/SkBlendModePriv.h"
|
2022-04-07 15:20:24 +00:00
|
|
|
#include "src/gpu/ganesh/GrDirectContextPriv.h"
|
|
|
|
#include "src/gpu/ganesh/GrOpsTypes.h"
|
|
|
|
#include "src/gpu/ganesh/GrProxyProvider.h"
|
|
|
|
#include "src/gpu/ganesh/GrResourceProvider.h"
|
|
|
|
#include "src/gpu/ganesh/ops/FillRectOp.h"
|
|
|
|
#include "src/gpu/ganesh/ops/TextureOp.h"
|
|
|
|
#include "src/gpu/ganesh/v1/SurfaceDrawContext_v1.h"
|
2019-11-15 16:02:50 +00:00
|
|
|
#include "tests/Test.h"
|
|
|
|
|
2021-07-28 19:13:20 +00:00
|
|
|
static std::unique_ptr<skgpu::v1::SurfaceDrawContext> new_SDC(GrRecordingContext* rContext) {
|
|
|
|
return skgpu::v1::SurfaceDrawContext::Make(
|
2021-04-19 23:27:09 +00:00
|
|
|
rContext, GrColorType::kRGBA_8888, nullptr, SkBackingFit::kExact, {128, 128},
|
|
|
|
SkSurfaceProps());
|
2019-11-15 16:02:50 +00:00
|
|
|
}
|
|
|
|
|
2020-08-12 15:57:07 +00:00
|
|
|
static sk_sp<GrSurfaceProxy> create_proxy(GrRecordingContext* rContext) {
|
2020-02-07 19:17:25 +00:00
|
|
|
static constexpr SkISize kDimensions = {128, 128};
|
2019-11-15 16:02:50 +00:00
|
|
|
|
2020-07-17 18:15:51 +00:00
|
|
|
const GrBackendFormat format = rContext->priv().caps()->getDefaultBackendFormat(
|
2019-11-15 16:02:50 +00:00
|
|
|
GrColorType::kRGBA_8888,
|
|
|
|
GrRenderable::kYes);
|
2020-07-17 18:15:51 +00:00
|
|
|
return rContext->priv().proxyProvider()->createProxy(
|
2020-07-21 13:27:25 +00:00
|
|
|
format, kDimensions, GrRenderable::kYes, 1, GrMipmapped::kNo, SkBackingFit::kExact,
|
2020-03-27 00:37:01 +00:00
|
|
|
SkBudgeted::kNo, GrProtected::kNo, GrInternalSurfaceFlags::kNone);
|
2019-11-15 16:02:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
typedef GrQuadAAFlags (*PerQuadAAFunc)(int i);
|
|
|
|
|
2020-07-17 18:15:51 +00:00
|
|
|
typedef void (*BulkRectTest)(skiatest::Reporter*,
|
|
|
|
GrDirectContext*,
|
2020-07-31 16:15:00 +00:00
|
|
|
PerQuadAAFunc,
|
2020-07-17 18:15:51 +00:00
|
|
|
GrAAType overallAA,
|
|
|
|
SkBlendMode,
|
2020-07-31 16:15:00 +00:00
|
|
|
bool addOneByOne,
|
|
|
|
bool allUniqueProxies,
|
2020-07-17 18:15:51 +00:00
|
|
|
int requestedTotNumQuads,
|
|
|
|
int expectedNumOps);
|
2019-11-15 16:02:50 +00:00
|
|
|
|
|
|
|
//-------------------------------------------------------------------------------------------------
|
2020-07-31 16:15:00 +00:00
|
|
|
static void fillrectop_creation_test(skiatest::Reporter* reporter, GrDirectContext* dContext,
|
|
|
|
PerQuadAAFunc perQuadAA, GrAAType overallAA,
|
|
|
|
SkBlendMode blendMode, bool addOneByOne,
|
|
|
|
bool allUniqueProxies,
|
|
|
|
int requestedTotNumQuads, int expectedNumOps) {
|
|
|
|
|
|
|
|
if (addOneByOne || allUniqueProxies) {
|
|
|
|
return;
|
|
|
|
}
|
2019-11-15 16:02:50 +00:00
|
|
|
|
2021-07-28 19:13:20 +00:00
|
|
|
std::unique_ptr<skgpu::v1::SurfaceDrawContext> sdc = new_SDC(dContext);
|
2019-11-15 16:02:50 +00:00
|
|
|
|
2021-07-20 13:40:13 +00:00
|
|
|
auto quads = new GrQuadSetEntry[requestedTotNumQuads];
|
2019-11-15 16:02:50 +00:00
|
|
|
|
|
|
|
for (int i = 0; i < requestedTotNumQuads; ++i) {
|
|
|
|
quads[i].fRect = SkRect::MakeWH(100.5f, 100.5f); // prevent the int non-AA optimization
|
|
|
|
quads[i].fColor = SK_PMColor4fWHITE;
|
|
|
|
quads[i].fLocalMatrix = SkMatrix::I();
|
|
|
|
quads[i].fAAFlags = perQuadAA(i);
|
|
|
|
}
|
|
|
|
|
|
|
|
GrPaint paint;
|
2019-11-21 15:26:41 +00:00
|
|
|
paint.setXPFactory(SkBlendMode_AsXPFactory(blendMode));
|
2020-07-31 16:15:00 +00:00
|
|
|
|
2021-09-01 20:31:34 +00:00
|
|
|
skgpu::v1::FillRectOp::AddFillRectOps(sdc.get(), nullptr, dContext, std::move(paint), overallAA,
|
|
|
|
SkMatrix::I(), quads, requestedTotNumQuads);
|
2019-11-15 16:02:50 +00:00
|
|
|
|
2021-08-25 17:22:24 +00:00
|
|
|
auto opsTask = sdc->testingOnly_PeekLastOpsTask();
|
2019-11-15 16:02:50 +00:00
|
|
|
int actualNumOps = opsTask->numOpChains();
|
|
|
|
|
|
|
|
int actualTotNumQuads = 0;
|
|
|
|
|
|
|
|
for (int i = 0; i < actualNumOps; ++i) {
|
|
|
|
const GrOp* tmp = opsTask->getChain(i);
|
2021-09-01 20:31:34 +00:00
|
|
|
REPORTER_ASSERT(reporter, tmp->classID() == skgpu::v1::FillRectOp::ClassID());
|
2019-11-15 16:02:50 +00:00
|
|
|
REPORTER_ASSERT(reporter, tmp->isChainTail());
|
|
|
|
actualTotNumQuads += ((GrDrawOp*) tmp)->numQuads();
|
|
|
|
}
|
|
|
|
|
|
|
|
REPORTER_ASSERT(reporter, expectedNumOps == actualNumOps);
|
|
|
|
REPORTER_ASSERT(reporter, requestedTotNumQuads == actualTotNumQuads);
|
|
|
|
|
2020-07-17 18:15:51 +00:00
|
|
|
dContext->flushAndSubmit();
|
2019-11-15 16:02:50 +00:00
|
|
|
|
|
|
|
delete[] quads;
|
|
|
|
}
|
|
|
|
|
|
|
|
//-------------------------------------------------------------------------------------------------
|
2020-07-31 16:15:00 +00:00
|
|
|
static void textureop_creation_test(skiatest::Reporter* reporter, GrDirectContext* dContext,
|
|
|
|
PerQuadAAFunc perQuadAA, GrAAType overallAA,
|
|
|
|
SkBlendMode blendMode, bool addOneByOne,
|
|
|
|
bool allUniqueProxies,
|
|
|
|
int requestedTotNumQuads, int expectedNumOps) {
|
2019-11-15 16:02:50 +00:00
|
|
|
|
2021-07-28 19:13:20 +00:00
|
|
|
std::unique_ptr<skgpu::v1::SurfaceDrawContext> sdc = new_SDC(dContext);
|
2019-11-15 16:02:50 +00:00
|
|
|
|
2020-07-31 16:15:00 +00:00
|
|
|
GrSurfaceProxyView proxyViewA, proxyViewB;
|
|
|
|
|
|
|
|
if (!allUniqueProxies) {
|
|
|
|
sk_sp<GrSurfaceProxy> proxyA = create_proxy(dContext);
|
|
|
|
sk_sp<GrSurfaceProxy> proxyB = create_proxy(dContext);
|
|
|
|
proxyViewA = GrSurfaceProxyView(std::move(proxyA),
|
|
|
|
kTopLeft_GrSurfaceOrigin,
|
2022-02-15 21:30:59 +00:00
|
|
|
skgpu::Swizzle::RGBA());
|
2020-07-31 16:15:00 +00:00
|
|
|
proxyViewB = GrSurfaceProxyView(std::move(proxyB),
|
|
|
|
kTopLeft_GrSurfaceOrigin,
|
2022-02-15 21:30:59 +00:00
|
|
|
skgpu::Swizzle::RGBA());
|
2020-07-31 16:15:00 +00:00
|
|
|
}
|
2019-11-15 16:02:50 +00:00
|
|
|
|
2021-07-20 13:40:13 +00:00
|
|
|
auto set = new GrTextureSetEntry[requestedTotNumQuads];
|
2019-11-15 16:02:50 +00:00
|
|
|
|
|
|
|
for (int i = 0; i < requestedTotNumQuads; ++i) {
|
2020-07-31 16:15:00 +00:00
|
|
|
if (!allUniqueProxies) {
|
|
|
|
// Alternate between two proxies to prevent op merging if the batch API was forced to
|
|
|
|
// submit one op at a time (to work, this does require that all fDstRects overlap).
|
|
|
|
set[i].fProxyView = i % 2 == 0 ? proxyViewA : proxyViewB;
|
|
|
|
} else {
|
|
|
|
// Each op gets its own proxy to force chaining only
|
|
|
|
sk_sp<GrSurfaceProxy> proxyA = create_proxy(dContext);
|
|
|
|
set[i].fProxyView = GrSurfaceProxyView(std::move(proxyA),
|
|
|
|
kTopLeft_GrSurfaceOrigin,
|
2022-02-15 21:30:59 +00:00
|
|
|
skgpu::Swizzle::RGBA());
|
2020-07-31 16:15:00 +00:00
|
|
|
}
|
|
|
|
|
2019-11-23 00:09:27 +00:00
|
|
|
set[i].fSrcAlphaType = kPremul_SkAlphaType;
|
2019-11-15 16:02:50 +00:00
|
|
|
set[i].fSrcRect = SkRect::MakeWH(100.0f, 100.0f);
|
|
|
|
set[i].fDstRect = SkRect::MakeWH(100.5f, 100.5f); // prevent the int non-AA optimization
|
|
|
|
set[i].fDstClipQuad = nullptr;
|
|
|
|
set[i].fPreViewMatrix = nullptr;
|
2020-07-10 12:59:44 +00:00
|
|
|
set[i].fColor = {1.f, 1.f, 1.f, 1.f};
|
2019-11-15 16:02:50 +00:00
|
|
|
set[i].fAAFlags = perQuadAA(i);
|
|
|
|
}
|
|
|
|
|
2020-07-31 16:15:00 +00:00
|
|
|
if (addOneByOne) {
|
|
|
|
for (int i = 0; i < requestedTotNumQuads; ++i) {
|
|
|
|
DrawQuad quad;
|
|
|
|
|
|
|
|
quad.fDevice = GrQuad::MakeFromRect(set[i].fDstRect, SkMatrix::I());
|
|
|
|
quad.fLocal = GrQuad(set[i].fSrcRect);
|
|
|
|
quad.fEdgeFlags = set[i].fAAFlags;
|
|
|
|
|
2021-08-27 20:05:04 +00:00
|
|
|
GrOp::Owner op = skgpu::v1::TextureOp::Make(dContext,
|
|
|
|
set[i].fProxyView,
|
|
|
|
set[i].fSrcAlphaType,
|
|
|
|
nullptr,
|
|
|
|
GrSamplerState::Filter::kNearest,
|
|
|
|
GrSamplerState::MipmapMode::kNone,
|
|
|
|
set[i].fColor,
|
|
|
|
skgpu::v1::TextureOp::Saturate::kYes,
|
|
|
|
blendMode,
|
|
|
|
overallAA,
|
|
|
|
&quad,
|
|
|
|
nullptr);
|
|
|
|
sdc->addDrawOp(nullptr, std::move(op));
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
skgpu::v1::TextureOp::AddTextureSetOps(sdc.get(),
|
2020-10-07 20:46:15 +00:00
|
|
|
nullptr,
|
2021-08-27 20:05:04 +00:00
|
|
|
dContext,
|
|
|
|
set,
|
|
|
|
requestedTotNumQuads,
|
|
|
|
requestedTotNumQuads, // We alternate so proxyCnt == cnt
|
2020-10-07 20:46:15 +00:00
|
|
|
GrSamplerState::Filter::kNearest,
|
|
|
|
GrSamplerState::MipmapMode::kNone,
|
2021-08-27 20:05:04 +00:00
|
|
|
skgpu::v1::TextureOp::Saturate::kYes,
|
2020-10-07 20:46:15 +00:00
|
|
|
blendMode,
|
|
|
|
overallAA,
|
2021-08-27 20:05:04 +00:00
|
|
|
SkCanvas::kStrict_SrcRectConstraint,
|
|
|
|
SkMatrix::I(),
|
2020-10-07 20:46:15 +00:00
|
|
|
nullptr);
|
2020-07-31 16:15:00 +00:00
|
|
|
}
|
2019-11-15 16:02:50 +00:00
|
|
|
|
2021-08-25 17:22:24 +00:00
|
|
|
auto opsTask = sdc->testingOnly_PeekLastOpsTask();
|
2019-11-15 16:02:50 +00:00
|
|
|
int actualNumOps = opsTask->numOpChains();
|
|
|
|
|
|
|
|
int actualTotNumQuads = 0;
|
|
|
|
|
2019-11-21 15:26:41 +00:00
|
|
|
if (blendMode != SkBlendMode::kSrcOver ||
|
2020-07-17 18:15:51 +00:00
|
|
|
!dContext->priv().caps()->dynamicStateArrayGeometryProcessorTextureSupport()) {
|
2021-09-01 20:31:34 +00:00
|
|
|
// In either of these two cases, TextureOp creates one op per quad instead. Since
|
2019-11-21 15:26:41 +00:00
|
|
|
// each entry alternates proxies but overlaps geometrically, this will prevent the ops
|
|
|
|
// from being merged back into fewer ops.
|
|
|
|
expectedNumOps = requestedTotNumQuads;
|
|
|
|
}
|
2021-08-27 20:05:04 +00:00
|
|
|
uint32_t expectedOpID = blendMode == SkBlendMode::kSrcOver ? skgpu::v1::TextureOp::ClassID()
|
2021-09-01 20:31:34 +00:00
|
|
|
: skgpu::v1::FillRectOp::ClassID();
|
2019-11-15 16:02:50 +00:00
|
|
|
for (int i = 0; i < actualNumOps; ++i) {
|
|
|
|
const GrOp* tmp = opsTask->getChain(i);
|
2020-07-31 16:15:00 +00:00
|
|
|
REPORTER_ASSERT(reporter, allUniqueProxies || tmp->isChainTail());
|
|
|
|
while (tmp) {
|
|
|
|
REPORTER_ASSERT(reporter, tmp->classID() == expectedOpID);
|
|
|
|
actualTotNumQuads += ((GrDrawOp*) tmp)->numQuads();
|
|
|
|
tmp = tmp->nextInChain();
|
|
|
|
}
|
2019-11-15 16:02:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
REPORTER_ASSERT(reporter, expectedNumOps == actualNumOps);
|
|
|
|
REPORTER_ASSERT(reporter, requestedTotNumQuads == actualTotNumQuads);
|
|
|
|
|
2020-07-17 18:15:51 +00:00
|
|
|
dContext->flushAndSubmit();
|
2019-11-15 16:02:50 +00:00
|
|
|
|
|
|
|
delete[] set;
|
|
|
|
}
|
|
|
|
|
|
|
|
//-------------------------------------------------------------------------------------------------
|
2020-07-17 18:15:51 +00:00
|
|
|
static void run_test(GrDirectContext* dContext, skiatest::Reporter* reporter, BulkRectTest test) {
|
2020-07-31 16:15:00 +00:00
|
|
|
|
2019-11-15 16:02:50 +00:00
|
|
|
// This is the simple case where there is no AA at all. We expect 2 non-AA clumps of quads.
|
|
|
|
{
|
|
|
|
auto noAA = [](int i) -> GrQuadAAFlags {
|
|
|
|
return GrQuadAAFlags::kNone;
|
|
|
|
};
|
|
|
|
|
|
|
|
static const int kNumExpectedOps = 2;
|
|
|
|
|
2020-07-17 18:15:51 +00:00
|
|
|
test(reporter, dContext, noAA, GrAAType::kNone, SkBlendMode::kSrcOver,
|
2020-07-31 16:15:00 +00:00
|
|
|
false, false, 2*GrResourceProvider::MaxNumNonAAQuads(), kNumExpectedOps);
|
2019-11-15 16:02:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// This is the same as the above case except the overall AA is kCoverage. However, since
|
|
|
|
// the per-quad AA is still none, all the quads should be downgraded to non-AA.
|
|
|
|
{
|
|
|
|
auto noAA = [](int i) -> GrQuadAAFlags {
|
|
|
|
return GrQuadAAFlags::kNone;
|
|
|
|
};
|
|
|
|
|
|
|
|
static const int kNumExpectedOps = 2;
|
|
|
|
|
2020-07-17 18:15:51 +00:00
|
|
|
test(reporter, dContext, noAA, GrAAType::kCoverage, SkBlendMode::kSrcOver,
|
2020-07-31 16:15:00 +00:00
|
|
|
false, false, 2*GrResourceProvider::MaxNumNonAAQuads(), kNumExpectedOps);
|
2019-11-15 16:02:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// This case has an overall AA of kCoverage but the per-quad AA alternates.
|
|
|
|
// We should end up with several aa-sized clumps
|
|
|
|
{
|
|
|
|
auto alternateAA = [](int i) -> GrQuadAAFlags {
|
|
|
|
return (i % 2) ? GrQuadAAFlags::kAll : GrQuadAAFlags::kNone;
|
|
|
|
};
|
|
|
|
|
|
|
|
int numExpectedOps = 2*GrResourceProvider::MaxNumNonAAQuads() /
|
|
|
|
GrResourceProvider::MaxNumAAQuads();
|
|
|
|
|
2020-07-17 18:15:51 +00:00
|
|
|
test(reporter, dContext, alternateAA, GrAAType::kCoverage, SkBlendMode::kSrcOver,
|
2020-07-31 16:15:00 +00:00
|
|
|
false, false, 2*GrResourceProvider::MaxNumNonAAQuads(), numExpectedOps);
|
2019-11-15 16:02:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// In this case we have a run of MaxNumAAQuads non-AA quads and then AA quads. This
|
|
|
|
// exercises the case where we have a clump of quads that can't be upgraded to AA bc of
|
|
|
|
// its size. We expect one clump of non-AA quads followed by one clump of AA quads.
|
|
|
|
{
|
|
|
|
auto runOfNonAA = [](int i) -> GrQuadAAFlags {
|
|
|
|
return (i < GrResourceProvider::MaxNumAAQuads()) ? GrQuadAAFlags::kNone
|
|
|
|
: GrQuadAAFlags::kAll;
|
|
|
|
};
|
|
|
|
|
|
|
|
static const int kNumExpectedOps = 2;
|
|
|
|
|
2020-07-17 18:15:51 +00:00
|
|
|
test(reporter, dContext, runOfNonAA, GrAAType::kCoverage, SkBlendMode::kSrcOver,
|
2020-07-31 16:15:00 +00:00
|
|
|
false, false, 2*GrResourceProvider::MaxNumAAQuads(), kNumExpectedOps);
|
2019-11-21 15:26:41 +00:00
|
|
|
}
|
|
|
|
|
2021-09-01 20:31:34 +00:00
|
|
|
// In this case we use a blend mode other than src-over, which hits the FillRectOp fallback
|
|
|
|
// code path for TextureOp. We pass in the expected results if batching was successful, to
|
2019-11-21 15:26:41 +00:00
|
|
|
// that bulk_fill_rect_create_test batches on all modes; bulk_texture_rect_create_test is
|
|
|
|
// responsible for revising its expectations.
|
|
|
|
{
|
|
|
|
auto fixedAA = [](int i) -> GrQuadAAFlags {
|
|
|
|
return GrQuadAAFlags::kAll;
|
|
|
|
};
|
|
|
|
|
|
|
|
static const int kNumExpectedOps = 2;
|
|
|
|
|
2020-07-17 18:15:51 +00:00
|
|
|
test(reporter, dContext, fixedAA, GrAAType::kCoverage, SkBlendMode::kSrcATop,
|
2020-07-31 16:15:00 +00:00
|
|
|
false, false, 2*GrResourceProvider::MaxNumAAQuads(), kNumExpectedOps);
|
|
|
|
}
|
|
|
|
|
|
|
|
// This repros crbug.com/1108475, where we create 1024 non-AA texture ops w/ one coverage-AA
|
|
|
|
// texture op in the middle. Because each op has its own texture, all the texture ops
|
|
|
|
// get chained together so the quad count can exceed the AA maximum.
|
|
|
|
{
|
|
|
|
auto onlyOneAA = [](int i) -> GrQuadAAFlags {
|
|
|
|
return i == 256 ? GrQuadAAFlags::kAll : GrQuadAAFlags::kNone;
|
|
|
|
};
|
|
|
|
|
|
|
|
static const int kNumExpectedOps = 3;
|
|
|
|
|
|
|
|
test(reporter, dContext, onlyOneAA, GrAAType::kCoverage, SkBlendMode::kSrcOver,
|
|
|
|
true, true, 1024, kNumExpectedOps);
|
2019-11-15 16:02:50 +00:00
|
|
|
}
|
2020-07-31 16:15:00 +00:00
|
|
|
|
|
|
|
// This repros a problem related to crbug.com/1108475. In this case, the bulk creation
|
|
|
|
// method had no way to break up the set of texture ops at the AA quad limit.
|
|
|
|
{
|
|
|
|
auto onlyOneAA = [](int i) -> GrQuadAAFlags {
|
|
|
|
return i == 256 ? GrQuadAAFlags::kAll : GrQuadAAFlags::kNone;
|
|
|
|
};
|
|
|
|
|
|
|
|
static const int kNumExpectedOps = 2;
|
|
|
|
|
|
|
|
test(reporter, dContext, onlyOneAA, GrAAType::kCoverage, SkBlendMode::kSrcOver,
|
|
|
|
false, true, 1024, kNumExpectedOps);
|
|
|
|
}
|
|
|
|
|
2019-11-15 16:02:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(BulkFillRectTest, reporter, ctxInfo) {
|
2020-07-31 16:15:00 +00:00
|
|
|
run_test(ctxInfo.directContext(), reporter, fillrectop_creation_test);
|
2019-11-15 16:02:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(BulkTextureRectTest, reporter, ctxInfo) {
|
2020-07-31 16:15:00 +00:00
|
|
|
run_test(ctxInfo.directContext(), reporter, textureop_creation_test);
|
2019-11-15 16:02:50 +00:00
|
|
|
}
|