Reland "Add option to GrGpuBuffer::updateData to *not* discard non-updated area."

This is a reland of commit a9b52ca52a

Original change's description:
> Add option to GrGpuBuffer::updateData to *not* discard non-updated area.
>
> Also support minimal map region in Dawn implementation when onUpdateBuffer uses mapping.
>
> Bug: skia:13427
> Change-Id: I5c8a2872b520e04f8a55085c86430e635da2f43f
> Reviewed-on: https://skia-review.googlesource.com/c/skia/+/553585
> Reviewed-by: Greg Daniel <egdaniel@google.com>
> Commit-Queue: Brian Salomon <bsalomon@google.com>

Bug: skia:13427
Change-Id: I04e8a4cba5dd3d77e457a4df2d8c20397c2ae760
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/554997
Commit-Queue: Brian Salomon <bsalomon@google.com>
Reviewed-by: Jim Van Verth <jvanverth@google.com>
This commit is contained in:
Brian Salomon 2022-06-29 16:31:55 -04:00 committed by SkCQ
parent cc01efda73
commit b8c26f8bfe
22 changed files with 250 additions and 112 deletions

View File

@ -219,10 +219,10 @@ void* GrBufferAllocPool::makeSpace(size_t size,
// We could honor the space request using by a partial update of the current
// VB (if there is room). But we don't currently use draw calls to GL that
// allow the driver to know that previously issued draws won't read from
// the part of the buffer we update. Also, the GL buffer implementation
// may be cheating on the actual buffer size by shrinking the buffer on
// updateData() if the amount of data passed is less than the full buffer
// size.
// the part of the buffer we update. Also, when this was written the GL
// buffer implementation was cheating on the actual buffer size by shrinking
// the buffer in updateData() if the amount of data passed was less than
// the full buffer size. This is old code and both concerns may be obsolete.
if (!this->createBlock(size)) {
return nullptr;
@ -402,7 +402,7 @@ void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize)
return;
}
}
buffer->updateData(fBufferPtr, /*offset=*/0, flushSize);
buffer->updateData(fBufferPtr, /*offset=*/0, flushSize, /*preserve=*/false);
VALIDATE(true);
}

View File

@ -225,6 +225,12 @@ public:
return fTransferFromBufferToBufferAlignment;
}
// Alignment requirement for offset and size passed to in GrGpuBuffer::updateData when the
// preserve param is true.
size_t bufferUpdateDataPreserveAlignment() const {
return fBufferUpdateDataPreserveAlignment;
}
virtual bool isFormatSRGB(const GrBackendFormat&) const = 0;
bool isFormatCompressed(const GrBackendFormat& format) const;
@ -608,6 +614,7 @@ protected:
uint32_t fMaxPushConstantsSize = 0;
size_t fTransferBufferRowBytesAlignment = 1;
size_t fTransferFromBufferToBufferAlignment = 1;
size_t fBufferUpdateDataPreserveAlignment = 1;
GrDriverBugWorkarounds fDriverBugWorkarounds;

View File

@ -39,7 +39,7 @@ void GrGpuBuffer::unmap() {
bool GrGpuBuffer::isMapped() const { return SkToBool(fMapPtr); }
bool GrGpuBuffer::updateData(const void* src, size_t offset, size_t size) {
bool GrGpuBuffer::updateData(const void* src, size_t offset, size_t size, bool preserve) {
SkASSERT(!this->isMapped());
SkASSERT(size > 0 && offset + size <= fSizeInBytes);
SkASSERT(src);
@ -48,11 +48,18 @@ bool GrGpuBuffer::updateData(const void* src, size_t offset, size_t size) {
return false;
}
if (preserve) {
size_t a = this->getGpu()->caps()->bufferUpdateDataPreserveAlignment();
if (SkAlignTo(offset, a) != offset || SkAlignTo(size, a) != size) {
return false;
}
}
if (this->intendedType() == GrGpuBufferType::kXferGpuToCpu) {
return false;
}
return this->onUpdateData(src, offset, size);
return this->onUpdateData(src, offset, size, preserve);
}
void GrGpuBuffer::ComputeScratchKeyForDynamicBuffer(size_t size,

View File

@ -66,8 +66,10 @@ public:
* Updates the buffer data.
*
* The size of the buffer will be preserved. The src data will be
* placed at offset and any remaining content before/after the
* range [offset, offset+size) becomes undefined.
* placed at offset. If preserve is false then any remaining content
* before/after the range [offset, offset+size) becomes undefined.
* Preserving updates will fail if the size and offset are not aligned
* to GrCaps::bufferUpdateDataPreserveAlignment().
*
* The buffer must not be mapped.
*
@ -78,7 +80,7 @@ public:
*
* @return returns true if the update succeeds, false otherwise.
*/
bool updateData(const void* src, size_t offset, size_t size);
bool updateData(const void* src, size_t offset, size_t size, bool preserve);
GrGpuBufferType intendedType() const { return fIntendedType; }
@ -111,7 +113,7 @@ private:
virtual void onMap(MapType) = 0;
virtual void onUnmap(MapType) = 0;
virtual bool onUpdateData(const void* src, size_t offset, size_t size) = 0;
virtual bool onUpdateData(const void* src, size_t offset, size_t size, bool preserve) = 0;
size_t onGpuMemorySize() const override { return fSizeInBytes; }
void onSetLabel() override{}

View File

@ -489,7 +489,7 @@ sk_sp<const GrGpuBuffer> GrResourceProvider::findOrMakeStaticBuffer(
if (buffer->isMapped()) {
buffer->unmap();
} else {
buffer->updateData(stagingBuffer, /*offset=*/0, size);
buffer->updateData(stagingBuffer, /*offset=*/0, size, /*preserve=*/false);
}
return std::move(buffer);
@ -522,7 +522,7 @@ sk_sp<const GrGpuBuffer> GrResourceProvider::createPatternedIndexBuffer(
}
}
if (temp.get()) {
if (!buffer->updateData(data, /*offset=*/0, bufferSize)) {
if (!buffer->updateData(data, /*offset=*/0, bufferSize, /*preserve=*/false)) {
return nullptr;
}
} else {
@ -617,6 +617,21 @@ sk_sp<GrGpuBuffer> GrResourceProvider::createBuffer(size_t size,
return buffer;
}
sk_sp<GrGpuBuffer> GrResourceProvider::createBuffer(const void* data,
size_t size,
GrGpuBufferType type,
GrAccessPattern pattern) {
SkASSERT(data);
auto buffer = this->createBuffer(size, type, pattern);
if (!buffer) {
return nullptr;
}
if (!buffer->updateData(data, /*offset=*/0, size, /*preserve=*/false)) {
return nullptr;
}
return buffer;
}
static int num_stencil_samples(const GrRenderTarget* rt, bool useMSAASurface, const GrCaps& caps) {
int numSamples = rt->numSamples();
if (numSamples == 1 && useMSAASurface) { // Are we using dynamic msaa?

View File

@ -298,11 +298,7 @@ public:
sk_sp<GrGpuBuffer> createBuffer(const void* data,
size_t size,
GrGpuBufferType type,
GrAccessPattern pattern) {
SkASSERT(data);
auto buffer = this->createBuffer(size, type, pattern);
return buffer && buffer->updateData(data, /*offset=*/ 0, size) ? buffer : nullptr;
}
GrAccessPattern pattern);
/**
* If passed in render target already has a stencil buffer on the specified surface, return

View File

@ -148,7 +148,7 @@ void GrD3DBuffer::onUnmap(MapType type) {
this->internalUnmap(type, 0, this->size());
}
bool GrD3DBuffer::onUpdateData(const void* src, size_t offset, size_t size) {
bool GrD3DBuffer::onUpdateData(const void* src, size_t offset, size_t size, bool /*preserve*/) {
if (!fD3DResource) {
return false;
}

View File

@ -42,7 +42,7 @@ private:
void onMap(MapType) override;
void onUnmap(MapType) override;
bool onUpdateData(const void* src, size_t offset, size_t size) override;
bool onUpdateData(const void* src, size_t offset, size_t size, bool preserve) override;
void* internalMap(MapType, size_t offset, size_t size);
void internalUnmap(MapType, size_t offset, size_t size);

View File

@ -94,38 +94,50 @@ GrDawnBuffer::GrDawnBuffer(GrDawnGpu* gpu,
this->registerWithCache(SkBudgeted::kYes);
}
void GrDawnBuffer::onMap(MapType type) {
void* GrDawnBuffer::internalMap(MapType type, size_t offset, size_t size) {
if (fUnmapped) {
SkASSERT(fMappable != Mappable::kNot);
if (!this->blockingMap()) {
void* ptr = this->blockingMap(offset, size);
if (!ptr) {
SkDebugf("GrDawnBuffer: failed to map buffer\n");
return;
return nullptr;
}
fUnmapped = false;
return SkTAddOffset<void>(ptr, offset);
}
if (fMappable == Mappable::kNot) {
// Dawn requires that the offset and size be 4 byte aligned. If the offset is not
// then we logically align the staging slice with the previous aligned value, adjust
// the pointer into the slice that we return. We'll do the same adjustment when issuing the
// transfer in internalUnmap so that the data winds up at the right offset.
size_t r = offset & 0x3;
size += r;
SkASSERT(type == MapType::kWriteDiscard);
GrStagingBufferManager::Slice slice =
this->getDawnGpu()->stagingBufferManager()->allocateStagingBufferSlice(
this->size(), /*requiredAlignment=*/4);
size, /*requiredAlignment=*/4);
fStagingBuffer = static_cast<GrDawnBuffer*>(slice.fBuffer)->get();
fStagingOffset = slice.fOffset;
fMapPtr = slice.fOffsetMapPtr;
} else {
return SkTAddOffset<void>(slice.fOffsetMapPtr, r);
}
// We always create this buffers mapped or if they've been used on the gpu before we use the
// async map callback to know when it is safe to reuse them. Thus by the time we get here
// the buffer should always be mapped.
SkASSERT(this->isMapped());
}
return SkTAddOffset<void>(fMapPtr, offset);
}
void GrDawnBuffer::onUnmap(MapType type) {
void GrDawnBuffer::internalUnmap(MapType type, size_t offset, size_t size) {
if (fMappable == Mappable::kNot) {
SkASSERT(type == MapType::kWriteDiscard);
size_t actualSize = SkAlign4(this->size());
// See comment in internalMap() about this adjustment.
size_t r = offset & 0x3;
offset -= r;
size = SkAlign4(size + r);
this->getDawnGpu()->getCopyEncoder().CopyBufferToBuffer(fStagingBuffer, fStagingOffset,
fBuffer, 0, actualSize);
fBuffer, offset, size);
} else {
fBuffer.Unmap();
fUnmapped = true;
@ -146,14 +158,25 @@ void GrDawnBuffer::onRelease() {
this->GrGpuBuffer::onRelease();
}
bool GrDawnBuffer::onUpdateData(const void *src, size_t offset, size_t size) {
this->map();
if (!this->isMapped()) {
void GrDawnBuffer::onMap(MapType type) {
fMapPtr = this->internalMap(type, 0, this->size());
}
void GrDawnBuffer::onUnmap(MapType type) {
this->internalUnmap(type, 0, this->size());
}
bool GrDawnBuffer::onUpdateData(const void* src, size_t offset, size_t size, bool /*preserve*/) {
// Note that this subclass's impl of kWriteDiscard never actually discards.
void* ptr = this->internalMap(MapType::kWriteDiscard, offset, size);
if (!ptr) {
return false;
}
memcpy(SkTAddOffset<void>(fMapPtr, offset), src, size);
this->unmap();
memcpy(ptr, src, size);
this->internalUnmap(MapType::kWriteDiscard, offset, size);
return true;
}
@ -203,12 +226,49 @@ void GrDawnBuffer::mapAsyncDone(WGPUBufferMapAsyncStatus status) {
callback(this->isMapped());
}
bool GrDawnBuffer::blockingMap() {
void* GrDawnBuffer::blockingMap(size_t offset, size_t size) {
SkASSERT(fMappable != Mappable::kNot);
GrDawnAsyncWait wait(this->getDawnGpu()->device());
this->mapAsync([&wait](bool) { wait.signal(); });
wait.busyWait();
struct Context {
GrDawnBuffer* buffer;
void* result;
GrDawnAsyncWait wait;
};
return this->isMapped();
Context context{this, nullptr, GrDawnAsyncWait{this->getDawnGpu()->device()}};
// The offset must be a multiple of 8. If not back it up to the previous 8 byte multiple
// and compensate by extending the size. In either case size must be a multiple of 4.
SkASSERT(SkIsAlign4(offset));
size_t r = offset & 0x7;
offset -= r;
size = SkAlign4(size + r);
fBuffer.MapAsync(
(fMappable == Mappable::kReadOnly) ? wgpu::MapMode::Read : wgpu::MapMode::Write,
offset,
size,
[](WGPUBufferMapAsyncStatus status, void* userData) {
auto* context = static_cast<Context*>(userData);
if (status != WGPUBufferMapAsyncStatus_Success) {
context->result = nullptr;
context->wait.signal();
return;
}
auto* wgpuBuffer = &context->buffer->fBuffer;
if (context->buffer->fMappable == Mappable::kReadOnly) {
context->result = const_cast<void*>(wgpuBuffer->GetConstMappedRange());
} else {
context->result = wgpuBuffer->GetMappedRange();
}
if (context->result) {
context->buffer->fUnmapped = false;
}
context->wait.signal();
},
&context);
context.wait.busyWait();
return context.result ? SkTAddOffset<void>(context.result, r) : nullptr;
}

View File

@ -74,7 +74,7 @@ public:
void onMap(MapType) override;
void onUnmap(MapType) override;
void onRelease() override;
bool onUpdateData(const void* src, size_t offset, size_t size) override;
bool onUpdateData(const void* src, size_t offset, size_t size, bool preserve) override;
GrDawnGpu* getDawnGpu() const;
wgpu::Buffer get() const { return fBuffer; }
@ -116,6 +116,9 @@ private:
wgpu::Buffer buffer,
void* mapPtr);
void* internalMap(MapType type, size_t offset, size_t size);
void internalUnmap(MapType type, size_t offset, size_t size);
// Called to handle the asynchronous mapAsync callback.
void mapAsyncDone(WGPUBufferMapAsyncStatus status);
@ -129,10 +132,10 @@ private:
//
// This procedure is used to cover the case where a buffer that is not managed by a
// GrStagingBufferManager (and thus not asynchronously mapped by the owning GrDawnGpu) is
// unmapped and needs to get re-mapped for use.
// unmapped and needs to get re-mapped for use (e.g. in onUpdateData()).
//
// Returns false if the buffer fails to map.
bool blockingMap();
// Returns nullptr if the buffer fails to map.
void* blockingMap(size_t offset, size_t size);
wgpu::Buffer fBuffer;
Mappable fMappable = Mappable::kNot;

View File

@ -36,6 +36,8 @@ GrDawnCaps::GrDawnCaps(const GrContextOptions& contextOptions) : INHERITED(conte
// We haven't yet implemented GrGpu::transferFromBufferToBuffer for Dawn but GrDawnBuffer uses
// transfers to implement buffer mapping and updates and transfers must be 4 byte aligned.
fTransferFromBufferToBufferAlignment = 4;
// Buffer updates are sometimes implemented through transfers in GrDawnBuffer.
fBufferUpdateDataPreserveAlignment = 4;
this->finishInitialization(contextOptions);
}

View File

@ -243,15 +243,17 @@ void GrGLBuffer::onUnmap(MapType) {
fMapPtr = nullptr;
}
bool GrGLBuffer::onUpdateData(const void* src, size_t offset, size_t size) {
bool GrGLBuffer::onUpdateData(const void* src, size_t offset, size_t size, bool preserve) {
SkASSERT(fBufferID);
// bindbuffer handles dirty context
GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
if (!preserve) {
GrGLenum error = invalidate_buffer(this->glGpu(), target, fUsage, fBufferID, this->size());
if (error != GR_GL_NO_ERROR) {
return false;
}
}
GL_CALL(BufferSubData(target, offset, size, src));
return true;
}

View File

@ -49,7 +49,7 @@ private:
void onMap(MapType) override;
void onUnmap(MapType) override;
bool onUpdateData(const void* src, size_t offset, size_t size) override;
bool onUpdateData(const void* src, size_t offset, size_t size, bool preserve) override;
void onSetLabel() override;

View File

@ -3151,7 +3151,7 @@ bool GrGLGpu::createCopyProgram(GrTexture* srcTex) {
sizeof(vdata),
GrGpuBufferType::kVertex,
kStatic_GrAccessPattern);
fCopyProgramArrayBuffer->updateData(vdata, /*offset=*/0, sizeof(vdata));
fCopyProgramArrayBuffer->updateData(vdata, /*offset=*/0, sizeof(vdata), /*preserve=*/false);
}
if (!fCopyProgramArrayBuffer) {
return false;
@ -3589,7 +3589,10 @@ bool GrGLGpu::onRegenerateMipMapLevels(GrTexture* texture) {
sizeof(vdata),
GrGpuBufferType::kVertex,
kStatic_GrAccessPattern);
fMipmapProgramArrayBuffer->updateData(vdata, /*offset=*/0, sizeof(vdata));
fMipmapProgramArrayBuffer->updateData(vdata, /*offset=*/0,
sizeof(vdata),
/*preserve=*/false);
}
if (!fMipmapProgramArrayBuffer) {
return false;

View File

@ -28,7 +28,9 @@ private:
}
}
void onUnmap(MapType) override { sk_free(fMapPtr); }
bool onUpdateData(const void* src, size_t offset, size_t size) override { return true; }
bool onUpdateData(const void* src, size_t offset, size_t size, bool preserve) override {
return true;
}
using INHERITED = GrGpuBuffer;
};

View File

@ -42,7 +42,7 @@ private:
void onMap(MapType) override;
void onUnmap(MapType) override;
bool onUpdateData(const void* src, size_t offset, size_t size) override;
bool onUpdateData(const void* src, size_t offset, size_t size, bool preserve) override;
void internalMap();
void internalUnmap(size_t writtenOffset, size_t writtenSize);

View File

@ -83,7 +83,7 @@ GrMtlBuffer::~GrMtlBuffer() {
SkASSERT(!fMapPtr);
}
bool GrMtlBuffer::onUpdateData(const void *src, size_t offset, size_t size) {
bool GrMtlBuffer::onUpdateData(const void *src, size_t offset, size_t size, bool preserve) {
if (fIsDynamic) {
this->internalMap();
if (!fMapPtr) {
@ -99,6 +99,7 @@ bool GrMtlBuffer::onUpdateData(const void *src, size_t offset, size_t size) {
// after the region to be updated.
size_t transferAlignment = this->getGpu()->caps()->transferFromBufferToBufferAlignment();
size_t r = offset%transferAlignment;
SkASSERT(!preserve || r == 0); // We can't push extra bytes when preserving.
offset -= r;
size_t transferSize = SkAlignTo(size + r, transferAlignment);

View File

@ -343,6 +343,8 @@ void GrMtlCaps::initGrCaps(id<MTLDevice> device) {
// https://developer.apple.com/documentation/metal/mtlblitcommandencoder/1400767-copyfrombuffer
if (this->isMac()) {
fTransferFromBufferToBufferAlignment = 4;
// Buffer updates are sometimes implemented through transfers in GrMtlBuffer.
fBufferUpdateDataPreserveAlignment = 4;
}
// Init sample counts. All devices support 1 (i.e. 0 in skia).

View File

@ -134,7 +134,10 @@ public:
if (fCanMapVB) {
fVertexBuffer->unmap();
} else {
fVertexBuffer->updateData(fVertices, /*offset=*/0, actualCount*fLockStride);
fVertexBuffer->updateData(fVertices,
/*offset=*/0,
/*size=*/actualCount*fLockStride,
/*preserve=*/false);
sk_free(fVertices);
}

View File

@ -283,7 +283,7 @@ void GrVkBuffer::onUnmap(MapType type) {
this->vkUnmap(0, type == MapType::kWriteDiscard ? this->size() : 0);
}
bool GrVkBuffer::onUpdateData(const void* src, size_t offset, size_t size) {
bool GrVkBuffer::onUpdateData(const void* src, size_t offset, size_t size, bool /*preserve*/) {
if (this->isVkMappable()) {
// We won't be reading the mapped memory so pass an empty range.
this->vkMap(0, 0);

View File

@ -52,7 +52,7 @@ private:
void onMap(MapType) override;
void onUnmap(MapType) override;
bool onUpdateData(const void* src, size_t offset, size_t size) override;
bool onUpdateData(const void* src, size_t offset, size_t size, bool preserve) override;
void vkRelease();

View File

@ -320,13 +320,20 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrGpuBufferUpdateDataTest, reporter, ctxInfo)
auto pm = GrPixmap::Allocate(sdc->imageInfo().makeColorType(GrColorType::kRGBA_F32));
for (bool piecewise : {false, true}) {
size_t alignment = piecewise ? gpu->caps()->bufferUpdateDataPreserveAlignment() : 1;
for (size_t offset : {size_t{0}, 4*sizeof(SkPoint), size_t{1}, size_t{27}}) {
// For non-discarding updates we may not be able to actually put the data at an
// arbitrary offset.
if (alignment > 1) {
offset = SkAlignTo(offset, alignment);
}
for (auto accessPattern : {kStatic_GrAccessPattern,
// kStream_GrAccessPattern, GrVkGpu asserts VBs aren't kStream
// kStream_GrAccessPattern, GrVkGpu asserts on this for VBs.
kDynamic_GrAccessPattern}) {
// Go direct to GrGpu to avoid caching/size adjustments at GrResourceProvider level.
// We add an extra size(SkPoint) to ensure that everything fits when we align the first
// point's location in the vb below.
// We add an extra size(SkPoint) to ensure that everything fits when we align the
// first point's location in the vb below.
auto vb = gpu->createBuffer(sizeof(kUnitQuad) + offset + sizeof(SkPoint),
GrGpuBufferType::kVertex,
accessPattern);
@ -337,21 +344,42 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrGpuBufferUpdateDataTest, reporter, ctxInfo)
const void* src = kUnitQuad;
size_t updateSize = sizeof(kUnitQuad);
// The vertices in the VB must be aligned to the size of a vertex (because our draw call
// takes a base vertex index rather than a byte offset). So if we want our upload to
// begin at a non-aligned byte we shift the data in the src buffer so that it falls at a
// vertex alignment in the vb.
// The vertices in the VB must be aligned to the size of a vertex (because our draw
// call takes a base vertex index rather than a byte offset). So if we want our
// upload to begin at a non-aligned byte we shift the data in the src buffer so that
// it falls at a vertex alignment in the vb.
std::unique_ptr<char[]> tempSrc;
size_t baseVertex = offset/sizeof(SkPoint);
if (size_t r = offset%sizeof(SkPoint); r != 0) {
size_t pad = sizeof(SkPoint) - r;
updateSize += pad;
if (alignment > 1) {
updateSize = SkAlignTo(updateSize, alignment);
}
++baseVertex;
tempSrc.reset(new char[updateSize]);
std::memcpy(tempSrc.get() + pad, kUnitQuad, sizeof(kUnitQuad));
src = tempSrc.get();
}
if (!vb->updateData(src, offset, updateSize)) {
if (piecewise) {
// This is the minimum size we can transfer at once.
size_t pieceSize = alignment;
// Upload each piece from a buffer where the byte before and after the uploaded
// bytes are not the same values as want adjacent to the piece in the buffer.
// Thus, if updateData() transfers extra bytes around the source we should get a
// bad buffer.
auto piece = std::make_unique<unsigned char[]>(pieceSize + 2);
piece[0] = piece[pieceSize + 1] = 0xFF;
for (size_t o = 0; o < updateSize; o += pieceSize) {
memcpy(&piece[1], SkTAddOffset<const void>(src, o), pieceSize);
if (!vb->updateData(&piece[1], offset + o, pieceSize, /*preserve=*/true)) {
ERRORF(reporter, "GrGpuBuffer::updateData returned false.");
return;
}
}
} else if (!vb->updateData(src, offset, updateSize, /*preserve=*/false)) {
ERRORF(reporter, "GrGpuBuffer::updateData returned false.");
return;
}
@ -362,8 +390,11 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrGpuBufferUpdateDataTest, reporter, ctxInfo)
sdc->clear(kRed);
sdc->addDrawOp(nullptr,
TestVertexOp::Make(dc, vb, baseVertex, std::size(kUnitQuad), kBounds));
sdc->addDrawOp(nullptr, TestVertexOp::Make(dc,
vb,
baseVertex,
std::size(kUnitQuad),
kBounds));
auto color = static_cast<SkPMColor4f*>(pm.addr());
*color = kRed.premul();
@ -374,7 +405,9 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrGpuBufferUpdateDataTest, reporter, ctxInfo)
static constexpr SkPMColor4f kGreen{0, 1, 0, 1};
REPORTER_ASSERT(reporter, *color == kGreen, "offset: %zu", offset);
REPORTER_ASSERT(reporter, *color == kGreen, "piecewise: %d, offset: %zu",
piecewise, offset);
}
}
}
}