Release deferred buffers in GrOpsRenderPass::end()
Moves the sk_sp's for deferred buffers out of the individual subclasses and into GrOpsRenderPass. Makes sure to release these refs during end(). This ensures the buffers can actually become purgeable after the render pass completes. Bug: skia:10048 Change-Id: I06838e45b85297b17277f625b313b6e07eda124f Reviewed-on: https://skia-review.googlesource.com/c/skia/+/278256 Reviewed-by: Jim Van Verth <jvanverth@google.com> Commit-Queue: Chris Dalton <csmartdalton@google.com>
This commit is contained in:
parent
71903bd48d
commit
2c3e169183
@ -20,6 +20,25 @@
|
||||
#include "src/gpu/GrSimpleMesh.h"
|
||||
#include "src/gpu/GrTexturePriv.h"
|
||||
|
||||
void GrOpsRenderPass::begin() {
|
||||
fDrawPipelineStatus = DrawPipelineStatus::kNotConfigured;
|
||||
#ifdef SK_DEBUG
|
||||
fScissorStatus = DynamicStateStatus::kDisabled;
|
||||
fTextureBindingStatus = DynamicStateStatus::kDisabled;
|
||||
fHasIndexBuffer = false;
|
||||
fInstanceBufferStatus = DynamicStateStatus::kDisabled;
|
||||
fVertexBufferStatus = DynamicStateStatus::kDisabled;
|
||||
#endif
|
||||
this->onBegin();
|
||||
}
|
||||
|
||||
void GrOpsRenderPass::end() {
|
||||
this->onEnd();
|
||||
fActiveIndexBuffer.reset();
|
||||
fActiveInstanceBuffer.reset();
|
||||
fActiveVertexBuffer.reset();
|
||||
}
|
||||
|
||||
void GrOpsRenderPass::clear(const GrFixedClip& clip, const SkPMColor4f& color) {
|
||||
SkASSERT(fRenderTarget);
|
||||
// A clear at this level will always be a true clear, so make sure clears were not supposed to
|
||||
|
@ -45,9 +45,9 @@ public:
|
||||
GrStoreOp fStoreOp;
|
||||
};
|
||||
|
||||
virtual void begin() = 0;
|
||||
void begin();
|
||||
// Signals the end of recording to the GrOpsRenderPass and that it can now be submitted.
|
||||
virtual void end() = 0;
|
||||
void end();
|
||||
|
||||
// Updates the internal pipeline state for drawing with the provided GrProgramInfo. Enters an
|
||||
// internal "bad" state if the pipeline could not be set.
|
||||
@ -128,12 +128,20 @@ protected:
|
||||
GrSurfaceOrigin fOrigin;
|
||||
GrRenderTarget* fRenderTarget;
|
||||
|
||||
// Backends may defer binding of certain buffers if their draw API requires a buffer, or if
|
||||
// their bind methods don't support base values.
|
||||
sk_sp<const GrBuffer> fActiveIndexBuffer;
|
||||
sk_sp<const GrBuffer> fActiveVertexBuffer;
|
||||
sk_sp<const GrBuffer> fActiveInstanceBuffer;
|
||||
|
||||
private:
|
||||
virtual GrGpu* gpu() = 0;
|
||||
|
||||
bool prepareToDraw();
|
||||
|
||||
// overridden by backend-specific derived class to perform the rendering command.
|
||||
virtual void onBegin() {}
|
||||
virtual void onEnd() {}
|
||||
virtual bool onBindPipeline(const GrProgramInfo&, const SkRect& drawBounds) = 0;
|
||||
virtual void onSetScissorRect(const SkIRect&) = 0;
|
||||
virtual bool onBindTextures(const GrPrimitiveProcessor&,
|
||||
@ -168,11 +176,11 @@ private:
|
||||
kConfigured
|
||||
};
|
||||
|
||||
DynamicStateStatus fScissorStatus = DynamicStateStatus::kDisabled;
|
||||
DynamicStateStatus fTextureBindingStatus = DynamicStateStatus::kDisabled;
|
||||
bool fHasIndexBuffer = false;
|
||||
DynamicStateStatus fInstanceBufferStatus = DynamicStateStatus::kDisabled;
|
||||
DynamicStateStatus fVertexBufferStatus = DynamicStateStatus::kDisabled;
|
||||
DynamicStateStatus fScissorStatus;
|
||||
DynamicStateStatus fTextureBindingStatus;
|
||||
bool fHasIndexBuffer;
|
||||
DynamicStateStatus fInstanceBufferStatus;
|
||||
DynamicStateStatus fVertexBufferStatus;
|
||||
#endif
|
||||
|
||||
typedef GrOpsRenderPass INHERITED;
|
||||
|
@ -21,9 +21,6 @@ public:
|
||||
|
||||
~GrD3DOpsRenderPass() override;
|
||||
|
||||
void begin() override {}
|
||||
void end() override {}
|
||||
|
||||
void inlineUpload(GrOpFlushState* state, GrDeferredTextureUploadFn& upload) override {}
|
||||
|
||||
void onExecuteDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler>) override {}
|
||||
|
@ -90,10 +90,6 @@ GrDawnOpsRenderPass::~GrDawnOpsRenderPass() {
|
||||
|
||||
GrGpu* GrDawnOpsRenderPass::gpu() { return fGpu; }
|
||||
|
||||
void GrDawnOpsRenderPass::end() {
|
||||
fPassEncoder.EndPass();
|
||||
}
|
||||
|
||||
void GrDawnOpsRenderPass::submit() {
|
||||
fGpu->appendCommandBuffer(fEncoder.Finish());
|
||||
}
|
||||
@ -136,6 +132,10 @@ void GrDawnOpsRenderPass::applyState(GrDawnProgram* program, const GrProgramInfo
|
||||
}
|
||||
}
|
||||
|
||||
void GrDawnOpsRenderPass::onEnd() {
|
||||
fPassEncoder.EndPass();
|
||||
}
|
||||
|
||||
bool GrDawnOpsRenderPass::onBindPipeline(const GrProgramInfo& programInfo,
|
||||
const SkRect& drawBounds) {
|
||||
fCurrentProgram = fGpu->getOrCreateRenderPipeline(fRenderTarget, programInfo);
|
||||
|
@ -25,9 +25,6 @@ public:
|
||||
|
||||
~GrDawnOpsRenderPass() override;
|
||||
|
||||
void begin() override { }
|
||||
void end() override;
|
||||
|
||||
wgpu::RenderPassEncoder beginRenderPass(wgpu::LoadOp colorOp, wgpu::LoadOp stencilOp);
|
||||
|
||||
void inlineUpload(GrOpFlushState* state, GrDeferredTextureUploadFn& upload) override;
|
||||
@ -39,6 +36,7 @@ private:
|
||||
|
||||
void applyState(GrDawnProgram*, const GrProgramInfo& programInfo);
|
||||
|
||||
void onEnd() override;
|
||||
bool onBindPipeline(const GrProgramInfo& programInfo, const SkRect& drawBounds) override;
|
||||
void onSetScissorRect(const SkIRect&) override;
|
||||
bool onBindTextures(const GrPrimitiveProcessor&, const GrSurfaceProxy* const primProcTextures[],
|
||||
|
@ -25,6 +25,15 @@ void GrGLOpsRenderPass::set(GrRenderTarget* rt, const SkIRect& contentBounds,
|
||||
fStencilLoadAndStoreInfo = stencilInfo;
|
||||
}
|
||||
|
||||
void GrGLOpsRenderPass::onBegin() {
|
||||
fGpu->beginCommandBuffer(fRenderTarget, fContentBounds, fOrigin, fColorLoadAndStoreInfo,
|
||||
fStencilLoadAndStoreInfo);
|
||||
}
|
||||
|
||||
void GrGLOpsRenderPass::onEnd() {
|
||||
fGpu->endCommandBuffer(fRenderTarget, fColorLoadAndStoreInfo, fStencilLoadAndStoreInfo);
|
||||
}
|
||||
|
||||
bool GrGLOpsRenderPass::onBindPipeline(const GrProgramInfo& programInfo,
|
||||
const SkRect& drawBounds) {
|
||||
fPrimitiveType = programInfo.primitiveType();
|
||||
@ -69,8 +78,8 @@ void GrGLOpsRenderPass::onBindBuffers(const GrBuffer* indexBuffer, const GrBuffe
|
||||
|
||||
// We defer binding of instance and vertex buffers because GL does not (always) support base
|
||||
// instance and/or base vertex.
|
||||
fDeferredInstanceBuffer = sk_ref_sp(instanceBuffer);
|
||||
fDeferredVertexBuffer = sk_ref_sp(vertexBuffer);
|
||||
fActiveInstanceBuffer = sk_ref_sp(instanceBuffer);
|
||||
fActiveVertexBuffer = sk_ref_sp(vertexBuffer);
|
||||
}
|
||||
|
||||
void GrGLOpsRenderPass::setupGeometry(const GrBuffer* vertexBuffer, int baseVertex,
|
||||
@ -111,18 +120,18 @@ void GrGLOpsRenderPass::setupGeometry(const GrBuffer* vertexBuffer, int baseVert
|
||||
|
||||
void GrGLOpsRenderPass::onDraw(int vertexCount, int baseVertex) {
|
||||
if (fGpu->glCaps().drawArraysBaseVertexIsBroken()) {
|
||||
this->setupGeometry(fDeferredVertexBuffer.get(), baseVertex, nullptr, 0);
|
||||
this->setupGeometry(fActiveVertexBuffer.get(), baseVertex, nullptr, 0);
|
||||
fGpu->drawArrays(fPrimitiveType, 0, vertexCount);
|
||||
return;
|
||||
}
|
||||
|
||||
this->setupGeometry(fDeferredVertexBuffer.get(), 0, nullptr, 0);
|
||||
this->setupGeometry(fActiveVertexBuffer.get(), 0, nullptr, 0);
|
||||
fGpu->drawArrays(fPrimitiveType, baseVertex, vertexCount);
|
||||
}
|
||||
|
||||
void GrGLOpsRenderPass::onDrawIndexed(int indexCount, int baseIndex, uint16_t minIndexValue,
|
||||
uint16_t maxIndexValue, int baseVertex) {
|
||||
this->setupGeometry(fDeferredVertexBuffer.get(), baseVertex, nullptr, 0);
|
||||
this->setupGeometry(fActiveVertexBuffer.get(), baseVertex, nullptr, 0);
|
||||
if (fGpu->glCaps().drawRangeElementsSupport()) {
|
||||
fGpu->drawRangeElements(fPrimitiveType, minIndexValue, maxIndexValue, indexCount,
|
||||
GR_GL_UNSIGNED_SHORT, fIndexPointer + baseIndex);
|
||||
@ -136,7 +145,7 @@ void GrGLOpsRenderPass::onDrawInstanced(int instanceCount, int baseInstance, int
|
||||
int baseVertex) {
|
||||
int maxInstances = fGpu->glCaps().maxInstancesPerDrawWithoutCrashing(instanceCount);
|
||||
for (int i = 0; i < instanceCount; i += maxInstances) {
|
||||
this->setupGeometry(fDeferredVertexBuffer.get(), 0, fDeferredInstanceBuffer.get(),
|
||||
this->setupGeometry(fActiveVertexBuffer.get(), 0, fActiveInstanceBuffer.get(),
|
||||
baseInstance + i);
|
||||
fGpu->drawArraysInstanced(fPrimitiveType, baseVertex, vertexCount,
|
||||
std::min(instanceCount - i, maxInstances));
|
||||
@ -147,7 +156,7 @@ void GrGLOpsRenderPass::onDrawIndexedInstanced(int indexCount, int baseIndex, in
|
||||
int baseInstance, int baseVertex) {
|
||||
int maxInstances = fGpu->glCaps().maxInstancesPerDrawWithoutCrashing(instanceCount);
|
||||
for (int i = 0; i < instanceCount; i += maxInstances) {
|
||||
this->setupGeometry(fDeferredVertexBuffer.get(), baseVertex, fDeferredInstanceBuffer.get(),
|
||||
this->setupGeometry(fActiveVertexBuffer.get(), baseVertex, fActiveInstanceBuffer.get(),
|
||||
baseInstance + i);
|
||||
fGpu->drawElementsInstanced(fPrimitiveType, indexCount, GR_GL_UNSIGNED_SHORT,
|
||||
fIndexPointer + baseIndex,
|
||||
|
@ -26,15 +26,6 @@ class GrGLOpsRenderPass : public GrOpsRenderPass {
|
||||
public:
|
||||
GrGLOpsRenderPass(GrGLGpu* gpu) : fGpu(gpu) {}
|
||||
|
||||
void begin() override {
|
||||
fGpu->beginCommandBuffer(fRenderTarget, fContentBounds, fOrigin, fColorLoadAndStoreInfo,
|
||||
fStencilLoadAndStoreInfo);
|
||||
}
|
||||
|
||||
void end() override {
|
||||
fGpu->endCommandBuffer(fRenderTarget, fColorLoadAndStoreInfo, fStencilLoadAndStoreInfo);
|
||||
}
|
||||
|
||||
void inlineUpload(GrOpFlushState* state, GrDeferredTextureUploadFn& upload) override {
|
||||
state->doUpload(upload);
|
||||
}
|
||||
@ -52,6 +43,8 @@ private:
|
||||
void setupGeometry(const GrBuffer* vertexBuffer, int baseVertex, const GrBuffer* instanceBuffer,
|
||||
int baseInstance);
|
||||
|
||||
void onBegin() override;
|
||||
void onEnd() override;
|
||||
bool onBindPipeline(const GrProgramInfo& programInfo, const SkRect& drawBounds) override;
|
||||
void onSetScissorRect(const SkIRect& scissor) override;
|
||||
bool onBindTextures(const GrPrimitiveProcessor&, const GrSurfaceProxy* const primProcTextures[],
|
||||
@ -81,11 +74,6 @@ private:
|
||||
// the indices, or nullptr if they reside physically in GPU memory.
|
||||
const uint16_t* fIndexPointer;
|
||||
|
||||
// We may defer binding of instance and vertex buffers because GL does not always support a base
|
||||
// instance and/or vertex.
|
||||
sk_sp<const GrBuffer> fDeferredInstanceBuffer;
|
||||
sk_sp<const GrBuffer> fDeferredVertexBuffer;
|
||||
|
||||
typedef GrOpsRenderPass INHERITED;
|
||||
};
|
||||
|
||||
|
@ -24,16 +24,15 @@ public:
|
||||
|
||||
GrGpu* gpu() override { return fGpu; }
|
||||
void inlineUpload(GrOpFlushState*, GrDeferredTextureUploadFn&) override {}
|
||||
void begin() override {
|
||||
if (GrLoadOp::kClear == fColorLoadOp) {
|
||||
this->markRenderTargetDirty();
|
||||
}
|
||||
}
|
||||
void end() override {}
|
||||
|
||||
int numDraws() const { return fNumDraws; }
|
||||
|
||||
private:
|
||||
void onBegin() override {
|
||||
if (GrLoadOp::kClear == fColorLoadOp) {
|
||||
this->markRenderTargetDirty();
|
||||
}
|
||||
}
|
||||
bool onBindPipeline(const GrProgramInfo&, const SkRect&) override { return true; }
|
||||
void onSetScissorRect(const SkIRect&) override {}
|
||||
bool onBindTextures(const GrPrimitiveProcessor&, const GrSurfaceProxy* const primProcTextures[],
|
||||
|
@ -27,9 +27,6 @@ public:
|
||||
|
||||
~GrMtlOpsRenderPass() override;
|
||||
|
||||
void begin() override {}
|
||||
void end() override {}
|
||||
|
||||
void initRenderState(id<MTLRenderCommandEncoder>);
|
||||
|
||||
void inlineUpload(GrOpFlushState* state, GrDeferredTextureUploadFn& upload) override;
|
||||
@ -59,7 +56,7 @@ private:
|
||||
void setupRenderPass(const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
|
||||
const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo);
|
||||
|
||||
void setVertexBuffer(id<MTLRenderCommandEncoder>, const GrMtlBuffer*, size_t offset,
|
||||
void setVertexBuffer(id<MTLRenderCommandEncoder>, const GrBuffer*, size_t offset,
|
||||
size_t inputBufferIndex);
|
||||
void resetBufferBindings();
|
||||
void precreateCmdEncoder();
|
||||
@ -71,12 +68,6 @@ private:
|
||||
MTLPrimitiveType fActivePrimitiveType;
|
||||
MTLRenderPassDescriptor* fRenderPassDesc;
|
||||
SkRect fBounds;
|
||||
|
||||
// The index buffer in metal is an argument to the draw call, rather than a stateful binding.
|
||||
sk_sp<const GrMtlBuffer> fIndexBuffer;
|
||||
|
||||
// We defer binding of the vertex buffer because Metal doesn't have baseVertex for drawIndexed.
|
||||
sk_sp<const GrMtlBuffer> fDeferredVertexBuffer;
|
||||
size_t fCurrentVertexStride;
|
||||
|
||||
static constexpr size_t kNumBindings = GrMtlUniformHandler::kLastUniformBinding + 3;
|
||||
|
@ -254,27 +254,25 @@ void GrMtlOpsRenderPass::onBindBuffers(const GrBuffer* indexBuffer, const GrBuff
|
||||
if (vertexBuffer) {
|
||||
SkASSERT(!vertexBuffer->isCpuBuffer());
|
||||
SkASSERT(!static_cast<const GrGpuBuffer*>(vertexBuffer)->isMapped());
|
||||
fDeferredVertexBuffer = sk_ref_sp(static_cast<const GrMtlBuffer*>(vertexBuffer));
|
||||
fActiveVertexBuffer = sk_ref_sp(static_cast<const GrMtlBuffer*>(vertexBuffer));
|
||||
++inputBufferIndex;
|
||||
}
|
||||
if (instanceBuffer) {
|
||||
SkASSERT(!instanceBuffer->isCpuBuffer());
|
||||
SkASSERT(!static_cast<const GrGpuBuffer*>(instanceBuffer)->isMapped());
|
||||
|
||||
const GrMtlBuffer* grMtlBuffer = static_cast<const GrMtlBuffer*>(instanceBuffer);
|
||||
this->setVertexBuffer(fActiveRenderCmdEncoder, grMtlBuffer, 0, inputBufferIndex++);
|
||||
this->setVertexBuffer(fActiveRenderCmdEncoder, instanceBuffer, 0, inputBufferIndex++);
|
||||
}
|
||||
if (indexBuffer) {
|
||||
SkASSERT(!indexBuffer->isCpuBuffer());
|
||||
SkASSERT(!static_cast<const GrGpuBuffer*>(indexBuffer)->isMapped());
|
||||
fIndexBuffer = sk_ref_sp(static_cast<const GrMtlBuffer*>(indexBuffer));
|
||||
fActiveIndexBuffer = sk_ref_sp(static_cast<const GrMtlBuffer*>(indexBuffer));
|
||||
}
|
||||
}
|
||||
|
||||
void GrMtlOpsRenderPass::onDraw(int vertexCount, int baseVertex) {
|
||||
SkASSERT(fActivePipelineState);
|
||||
SkASSERT(nil != fActiveRenderCmdEncoder);
|
||||
this->setVertexBuffer(fActiveRenderCmdEncoder, fDeferredVertexBuffer.get(), 0, 0);
|
||||
this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(), 0, 0);
|
||||
|
||||
[fActiveRenderCmdEncoder drawPrimitives:fActivePrimitiveType
|
||||
vertexStart:baseVertex
|
||||
@ -285,15 +283,16 @@ void GrMtlOpsRenderPass::onDrawIndexed(int indexCount, int baseIndex, uint16_t m
|
||||
uint16_t maxIndexValue, int baseVertex) {
|
||||
SkASSERT(fActivePipelineState);
|
||||
SkASSERT(nil != fActiveRenderCmdEncoder);
|
||||
SkASSERT(fIndexBuffer);
|
||||
this->setVertexBuffer(fActiveRenderCmdEncoder, fDeferredVertexBuffer.get(),
|
||||
SkASSERT(fActiveIndexBuffer);
|
||||
this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(),
|
||||
fCurrentVertexStride * baseVertex, 0);
|
||||
|
||||
size_t indexOffset = fIndexBuffer->offset() + sizeof(uint16_t) * baseIndex;
|
||||
auto mtlIndexBufer = static_cast<const GrMtlBuffer*>(fActiveIndexBuffer.get());
|
||||
size_t indexOffset = mtlIndexBufer->offset() + sizeof(uint16_t) * baseIndex;
|
||||
[fActiveRenderCmdEncoder drawIndexedPrimitives:fActivePrimitiveType
|
||||
indexCount:indexCount
|
||||
indexType:MTLIndexTypeUInt16
|
||||
indexBuffer:fIndexBuffer->mtlBuffer()
|
||||
indexBuffer:mtlIndexBufer->mtlBuffer()
|
||||
indexBufferOffset:indexOffset];
|
||||
fGpu->stats()->incNumDraws();
|
||||
}
|
||||
@ -302,7 +301,7 @@ void GrMtlOpsRenderPass::onDrawInstanced(int instanceCount, int baseInstance, in
|
||||
int baseVertex) {
|
||||
SkASSERT(fActivePipelineState);
|
||||
SkASSERT(nil != fActiveRenderCmdEncoder);
|
||||
this->setVertexBuffer(fActiveRenderCmdEncoder, fDeferredVertexBuffer.get(), 0, 0);
|
||||
this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(), 0, 0);
|
||||
|
||||
if (@available(macOS 10.11, iOS 9.0, *)) {
|
||||
[fActiveRenderCmdEncoder drawPrimitives:fActivePrimitiveType
|
||||
@ -319,15 +318,16 @@ void GrMtlOpsRenderPass::onDrawIndexedInstanced(
|
||||
int indexCount, int baseIndex, int instanceCount, int baseInstance, int baseVertex) {
|
||||
SkASSERT(fActivePipelineState);
|
||||
SkASSERT(nil != fActiveRenderCmdEncoder);
|
||||
SkASSERT(fIndexBuffer);
|
||||
this->setVertexBuffer(fActiveRenderCmdEncoder, fDeferredVertexBuffer.get(), 0, 0);
|
||||
SkASSERT(fActiveIndexBuffer);
|
||||
this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(), 0, 0);
|
||||
|
||||
size_t indexOffset = fIndexBuffer->offset() + sizeof(uint16_t) * baseIndex;
|
||||
auto mtlIndexBufer = static_cast<const GrMtlBuffer*>(fActiveIndexBuffer.get());
|
||||
size_t indexOffset = mtlIndexBufer->offset() + sizeof(uint16_t) * baseIndex;
|
||||
if (@available(macOS 10.11, iOS 9.0, *)) {
|
||||
[fActiveRenderCmdEncoder drawIndexedPrimitives:fActivePrimitiveType
|
||||
indexCount:indexCount
|
||||
indexType:MTLIndexTypeUInt16
|
||||
indexBuffer:fIndexBuffer->mtlBuffer()
|
||||
indexBuffer:mtlIndexBufer->mtlBuffer()
|
||||
indexBufferOffset:indexOffset
|
||||
instanceCount:instanceCount
|
||||
baseVertex:baseVertex
|
||||
@ -339,17 +339,18 @@ void GrMtlOpsRenderPass::onDrawIndexedInstanced(
|
||||
}
|
||||
|
||||
void GrMtlOpsRenderPass::setVertexBuffer(id<MTLRenderCommandEncoder> encoder,
|
||||
const GrMtlBuffer* buffer,
|
||||
const GrBuffer* buffer,
|
||||
size_t vertexOffset,
|
||||
size_t inputBufferIndex) {
|
||||
constexpr static int kFirstBufferBindingIdx = GrMtlUniformHandler::kLastUniformBinding + 1;
|
||||
int index = inputBufferIndex + kFirstBufferBindingIdx;
|
||||
SkASSERT(index < 4);
|
||||
id<MTLBuffer> mtlVertexBuffer = buffer->mtlBuffer();
|
||||
auto mtlBuffer = static_cast<const GrMtlBuffer*>(buffer);
|
||||
id<MTLBuffer> mtlVertexBuffer = mtlBuffer->mtlBuffer();
|
||||
SkASSERT(mtlVertexBuffer);
|
||||
// Apple recommends using setVertexBufferOffset: when changing the offset
|
||||
// for a currently bound vertex buffer, rather than setVertexBuffer:
|
||||
size_t offset = buffer->offset() + vertexOffset;
|
||||
size_t offset = mtlBuffer->offset() + vertexOffset;
|
||||
if (fBufferBindings[index].fBuffer != mtlVertexBuffer) {
|
||||
[encoder setVertexBuffer: mtlVertexBuffer
|
||||
offset: offset
|
||||
|
@ -171,12 +171,6 @@ GrVkCommandBuffer* GrVkOpsRenderPass::currentCommandBuffer() {
|
||||
return fGpu->currentCommandBuffer();
|
||||
}
|
||||
|
||||
void GrVkOpsRenderPass::end() {
|
||||
if (fCurrentSecondaryCommandBuffer) {
|
||||
fCurrentSecondaryCommandBuffer->end(fGpu);
|
||||
}
|
||||
}
|
||||
|
||||
void GrVkOpsRenderPass::submit() {
|
||||
if (!fRenderTarget) {
|
||||
return;
|
||||
@ -442,6 +436,12 @@ void GrVkOpsRenderPass::inlineUpload(GrOpFlushState* state, GrDeferredTextureUpl
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void GrVkOpsRenderPass::onEnd() {
|
||||
if (fCurrentSecondaryCommandBuffer) {
|
||||
fCurrentSecondaryCommandBuffer->end(fGpu);
|
||||
}
|
||||
}
|
||||
|
||||
bool GrVkOpsRenderPass::onBindPipeline(const GrProgramInfo& programInfo, const SkRect& drawBounds) {
|
||||
if (!fCurrentRenderPass) {
|
||||
SkASSERT(fGpu->isDeviceLost());
|
||||
|
@ -28,9 +28,6 @@ public:
|
||||
|
||||
~GrVkOpsRenderPass() override;
|
||||
|
||||
void begin() override { }
|
||||
void end() override;
|
||||
|
||||
void inlineUpload(GrOpFlushState* state, GrDeferredTextureUploadFn& upload) override;
|
||||
|
||||
void onExecuteDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler>) override;
|
||||
@ -62,6 +59,8 @@ private:
|
||||
|
||||
GrVkCommandBuffer* currentCommandBuffer();
|
||||
|
||||
void onEnd() override;
|
||||
|
||||
bool onBindPipeline(const GrProgramInfo&, const SkRect& drawBounds) override;
|
||||
void onSetScissorRect(const SkIRect&) override;
|
||||
bool onBindTextures(const GrPrimitiveProcessor&, const GrSurfaceProxy* const primProcTextures[],
|
||||
|
Loading…
Reference in New Issue
Block a user