Remove discard from GrRenderTarget & force it to always go through a RenderTargetContext

This is a bit sloppy in that it ignores some instances where discards were being issued before.

The creation of the temp RTContext in the RenderTarget's discard method was causing an extra split in the opLists.

This is split out of: https://skia-review.googlesource.com/c/10284/ (Omnibus: Remove GrSurface-derived classes from ops)

Change-Id: Ic366d303280635763b0fae238c4df37c04fb8503
Reviewed-on: https://skia-review.googlesource.com/11125
Commit-Queue: Robert Phillips <robertphillips@google.com>
Reviewed-by: Brian Salomon <bsalomon@google.com>
This commit is contained in:
Robert Phillips 2017-04-11 12:54:57 -04:00 committed by Skia Commit-Bot
parent fafe135349
commit 1119dc366e
16 changed files with 137 additions and 128 deletions

View File

@ -201,17 +201,16 @@ protected:
private:
static void DrawClippedBitmap(SkCanvas* canvas, const SkBitmap& bitmap, const SkPaint& paint,
int x, int y) {
int x, int y) {
canvas->save();
canvas->translate(SkIntToScalar(x), SkIntToScalar(y));
canvas->clipRect(SkRect::MakeWH(
SkIntToScalar(bitmap.width()), SkIntToScalar(bitmap.height())));
canvas->clipRect(SkRect::MakeIWH(bitmap.width(), bitmap.height()));
canvas->drawBitmap(bitmap, 0, 0, &paint);
canvas->restore();
}
static void DrawClippedPaint(SkCanvas* canvas, const SkRect& rect, const SkPaint& paint,
int x, int y) {
int x, int y) {
canvas->save();
canvas->translate(SkIntToScalar(x), SkIntToScalar(y));
canvas->clipRect(rect);

View File

@ -86,12 +86,6 @@ public:
*/
const SkIRect& getResolveRect() const { return fResolveRect; }
/**
* Provide a performance hint that the render target's contents are allowed
* to become undefined.
*/
void discard();
// a MSAA RT may require explicit resolving , it may auto-resolve (e.g. FBO
// 0 in GL), or be unresolvable because the client didn't give us the
// resolve destination.

View File

@ -267,7 +267,7 @@ static bool valid_unpremul_config(GrPixelConfig config) {
return GrPixelConfigIs8888Unorm(config) || kRGBA_half_GrPixelConfig == config;
}
bool GrContextPriv::writeSurfacePixels(GrSurfaceProxy* srcProxy, SkColorSpace* dstColorSpace,
bool GrContextPriv::writeSurfacePixels(GrSurfaceProxy* dstProxy, SkColorSpace* dstColorSpace,
int left, int top, int width, int height,
GrPixelConfig srcConfig, SkColorSpace* srcColorSpace,
const void* buffer, size_t rowBytes,
@ -276,11 +276,11 @@ bool GrContextPriv::writeSurfacePixels(GrSurfaceProxy* srcProxy, SkColorSpace* d
ASSERT_SINGLE_OWNER_PRIV
RETURN_FALSE_IF_ABANDONED_PRIV
ASSERT_OWNED_PROXY_PRIV(srcProxy);
SkASSERT(srcProxy);
ASSERT_OWNED_PROXY_PRIV(dstProxy);
SkASSERT(dstProxy);
GR_AUDIT_TRAIL_AUTO_FRAME(&fContext->fAuditTrail, "GrContextPriv::writeSurfacePixels");
GrSurface* surface = srcProxy->instantiate(fContext->resourceProvider());
GrSurface* surface = dstProxy->instantiate(fContext->resourceProvider());
if (!surface) {
return false;
}
@ -847,6 +847,8 @@ sk_sp<GrRenderTargetContext> GrContext::makeRenderTargetContext(SkBackingFit fit
return nullptr;
}
renderTargetContext->discard();
return renderTargetContext;
}
@ -873,9 +875,18 @@ sk_sp<GrRenderTargetContext> GrContext::makeDeferredRenderTargetContext(
return nullptr;
}
return fDrawingManager->makeRenderTargetContext(std::move(rtp),
std::move(colorSpace),
surfaceProps);
sk_sp<GrRenderTargetContext> renderTargetContext(
fDrawingManager->makeRenderTargetContext(std::move(rtp),
std::move(colorSpace),
surfaceProps));
if (!renderTargetContext) {
return nullptr;
}
renderTargetContext->discard();
return renderTargetContext;
}
bool GrContext::abandoned() const {

View File

@ -136,7 +136,7 @@ public:
/**
* Writes a rectangle of pixels to a surface.
* @param surface the surface to write to.
* @param dst the surface to write to.
* @param dstColorSpace color space of the surface
* @param left left edge of the rectangle to write (inclusive)
* @param top top edge of the rectangle to write (inclusive)
@ -151,7 +151,7 @@ public:
* @return true if the write succeeded, false if not. The write can fail because of an
* unsupported combination of surface and src configs.
*/
bool writeSurfacePixels(GrSurfaceProxy* src, SkColorSpace* dstColorSpace,
bool writeSurfacePixels(GrSurfaceProxy* dst, SkColorSpace* dstColorSpace,
int left, int top, int width, int height,
GrPixelConfig config, SkColorSpace* srcColorSpace, const void* buffer,
size_t rowBytes,

View File

@ -188,14 +188,6 @@ GrTexture* GrGpu::createTexture(const GrSurfaceDesc& origDesc, SkBudgeted budget
fStats.incTextureUploads();
}
}
// This is a current work around to get discards into newly created textures. Once we are in
// MDB world, we should remove this code a rely on the draw target having specified load
// operations.
if (isRT && texels.empty()) {
GrRenderTarget* rt = tex->asRenderTarget();
SkASSERT(rt);
rt->discard();
}
}
return tex;
}

View File

@ -38,9 +38,18 @@ sk_sp<GrRenderTargetContext> GrPreFlushResourceProvider::makeRenderTargetContext
fDrawingMgr->fOptionsForOpLists));
proxy->setLastOpList(opList.get());
return fDrawingMgr->makeRenderTargetContext(std::move(proxy),
std::move(colorSpace),
props);
sk_sp<GrRenderTargetContext> renderTargetContext(
fDrawingMgr->makeRenderTargetContext(std::move(proxy),
std::move(colorSpace),
props));
if (!renderTargetContext) {
return nullptr;
}
renderTargetContext->discard();
return renderTargetContext;
}
// TODO: we only need this entry point as long as we have to pre-allocate the atlas.
@ -58,8 +67,17 @@ sk_sp<GrRenderTargetContext> GrPreFlushResourceProvider::makeRenderTargetContext
fDrawingMgr->fOptionsForOpLists));
proxy->setLastOpList(opList.get());
return fDrawingMgr->makeRenderTargetContext(std::move(proxy),
std::move(colorSpace),
props);
sk_sp<GrRenderTargetContext> renderTargetContext(
fDrawingMgr->makeRenderTargetContext(std::move(proxy),
std::move(colorSpace),
props));
if (!renderTargetContext) {
return nullptr;
}
renderTargetContext->discard();
return renderTargetContext;
}

View File

@ -28,22 +28,6 @@ GrRenderTarget::GrRenderTarget(GrGpu* gpu, const GrSurfaceDesc& desc, Flags flag
fResolveRect.setLargestInverted();
}
void GrRenderTarget::discard() {
// go through context so that all necessary flushing occurs
GrContext* context = this->getContext();
if (!context) {
return;
}
sk_sp<GrRenderTargetContext> renderTargetContext(
context->contextPriv().makeWrappedRenderTargetContext(sk_ref_sp(this), nullptr));
if (!renderTargetContext) {
return;
}
renderTargetContext->discard();
}
void GrRenderTarget::flagAsNeedingResolve(const SkIRect* rect) {
if (kCanResolve_ResolveType == getResolveType()) {
if (rect) {

View File

@ -192,13 +192,6 @@ void GrRenderTargetContext::discard() {
AutoCheckFlush acf(this->drawingManager());
// TODO: This needs to be fixed up since it ends the deferral of the GrRenderTarget.
sk_sp<GrRenderTarget> rt(
sk_ref_sp(fRenderTargetProxy->instantiate(fContext->resourceProvider())));
if (!rt) {
return;
}
this->getOpList()->discard(this);
}

View File

@ -14,7 +14,6 @@
#include "GrRenderTargetContext.h"
#include "GrResourceProvider.h"
#include "ops/GrClearOp.h"
#include "ops/GrClearStencilClipOp.h"
#include "ops/GrCopySurfaceOp.h"
#include "ops/GrDiscardOp.h"
#include "instanced/InstancedRendering.h"
@ -229,8 +228,11 @@ void GrRenderTargetOpList::discard(GrRenderTargetContext* renderTargetContext) {
// Currently this just inserts a discard op. However, once in MDB this can remove all the
// previously recorded ops and change the load op to discard.
if (this->caps()->discardRenderTargetSupport()) {
this->recordOp(GrDiscardOp::Make(renderTargetContext->accessRenderTarget()),
renderTargetContext);
std::unique_ptr<GrOp> op(GrDiscardOp::Make(renderTargetContext));
if (!op) {
return;
}
this->recordOp(std::move(op), renderTargetContext);
}
}
@ -301,12 +303,12 @@ GrOp* GrRenderTargetOpList::recordOp(std::unique_ptr<GrOp> op,
// 3) find a 'blocker'
GR_AUDIT_TRAIL_ADD_OP(fAuditTrail, op.get(), renderTarget->uniqueID(),
renderTargetContext->asRenderTargetProxy()->uniqueID());
GrOP_INFO("Recording (%s, B%u)\n"
"\tBounds LRTB (%f, %f, %f, %f)\n",
GrOP_INFO("Recording (%s, opID: %u)\n"
"\tBounds: [L: %f T: %f R: %f B: %f]\n",
op->name(),
op->uniqueID(),
op->bounds().fLeft, op->bounds().fRight,
op->bounds().fTop, op->bounds().fBottom);
op->bounds().fLeft, op->bounds().fTop,
op->bounds().fRight, op->bounds().fBottom);
GrOP_INFO(SkTabString(op->dumpInfo(), 1).c_str());
GrOP_INFO("\tClipped Bounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n", op->bounds().fLeft,
op->bounds().fTop, op->bounds().fRight, op->bounds().fBottom);
@ -319,12 +321,13 @@ GrOp* GrRenderTargetOpList::recordOp(std::unique_ptr<GrOp> op,
const RecordedOp& candidate = fRecordedOps.fromBack(i);
// We cannot continue to search backwards if the render target changes
if (candidate.fRenderTarget.get() != renderTarget) {
GrOP_INFO("\t\tBreaking because of (%s, B%u) Rendertarget\n", candidate.fOp->name(),
GrOP_INFO("\t\tBreaking because of (%s, opID: %u) Rendertarget mismatch\n",
candidate.fOp->name(),
candidate.fOp->uniqueID());
break;
}
if (this->combineIfPossible(candidate, op.get(), clip, dstTexture)) {
GrOP_INFO("\t\tCombining with (%s, B%u)\n", candidate.fOp->name(),
GrOP_INFO("\t\tCombining with (%s, opID: %u)\n", candidate.fOp->name(),
candidate.fOp->uniqueID());
GrOP_INFO("\t\t\tCombined op info:\n");
GrOP_INFO(SkTabString(candidate.fOp->dumpInfo(), 4).c_str());
@ -333,7 +336,7 @@ GrOp* GrRenderTargetOpList::recordOp(std::unique_ptr<GrOp> op,
}
// Stop going backwards if we would cause a painter's order violation.
if (!can_reorder(fRecordedOps.fromBack(i).fOp->bounds(), op->bounds())) {
GrOP_INFO("\t\tIntersects with (%s, B%u)\n", candidate.fOp->name(),
GrOP_INFO("\t\tIntersects with (%s, opID: %u)\n", candidate.fOp->name(),
candidate.fOp->uniqueID());
break;
}

View File

@ -119,9 +119,13 @@ private:
friend class GrRenderTargetContextPriv; // for stencil clip state. TODO: this is invasive
struct RecordedOp {
RecordedOp(std::unique_ptr<GrOp> op, GrRenderTarget* rt, const GrAppliedClip* appliedClip,
RecordedOp(std::unique_ptr<GrOp> op,
GrRenderTarget* rt,
const GrAppliedClip* appliedClip,
const DstTexture* dstTexture)
: fOp(std::move(op)), fRenderTarget(rt), fAppliedClip(appliedClip) {
: fOp(std::move(op))
, fRenderTarget(rt)
, fAppliedClip(appliedClip) {
if (dstTexture) {
fDstTexture = *dstTexture;
}

View File

@ -58,12 +58,17 @@ sk_sp<GrTextureProxy> GrResourceProvider::createMipMappedTexture(
SkDestinationSurfaceColorMode mipColorMode) {
ASSERT_SINGLE_OWNER
if (!mipLevelCount) {
if (texels) {
return nullptr;
}
return GrSurfaceProxy::MakeDeferred(this, desc, budgeted, nullptr, 0);
}
if (this->isAbandoned()) {
return nullptr;
}
if (mipLevelCount && !texels) {
return nullptr;
}
for (int i = 0; i < mipLevelCount; ++i) {
if (!texels[i].fPixels) {
return nullptr;
@ -82,8 +87,8 @@ sk_sp<GrTextureProxy> GrResourceProvider::createMipMappedTexture(
sk_sp<GrTexture> tex(this->refScratchTexture(desc, flags));
if (tex) {
sk_sp<GrTextureProxy> proxy = GrSurfaceProxy::MakeWrapped(tex);
if (!mipLevelCount ||
fGpu->getContext()->contextPriv().writeSurfacePixels(
if (fGpu->getContext()->contextPriv().writeSurfacePixels(
proxy.get(), nullptr, 0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
nullptr, texels[0].fPixels, texels[0].fRowBytes)) {
if (SkBudgeted::kNo == budgeted) {
@ -143,12 +148,13 @@ GrTexture* GrResourceProvider::createApproxTexture(const GrSurfaceDesc& desc, ui
if (this->isAbandoned()) {
return nullptr;
}
// Currently we don't recycle compressed textures as scratch.
if (GrPixelConfigIsCompressed(desc.fConfig)) {
return nullptr;
} else {
return this->refScratchTexture(desc, flags);
}
return this->refScratchTexture(desc, flags);
}
GrTexture* GrResourceProvider::refScratchTexture(const GrSurfaceDesc& inDesc,
@ -182,10 +188,6 @@ GrTexture* GrResourceProvider::refScratchTexture(const GrSurfaceDesc& inDesc,
scratchFlags);
if (resource) {
GrSurface* surface = static_cast<GrSurface*>(resource);
GrRenderTarget* rt = surface->asRenderTarget();
if (rt && fGpu->caps()->discardRenderTargetSupport()) {
rt->discard();
}
return surface->asTexture();
}
}

View File

@ -55,14 +55,17 @@ public:
const char* name() const override { return "Clear"; }
SkString dumpInfo() const override {
SkString string("Scissor [");
SkString string;
string.appendf("rtID: %d proxyID: %d Scissor [",
fRenderTarget.get()->uniqueID().asUInt(),
fProxyUniqueID.asUInt());
if (fClip.scissorEnabled()) {
const SkIRect& r = fClip.scissorRect();
string.appendf("L: %d, T: %d, R: %d, B: %d", r.fLeft, r.fTop, r.fRight, r.fBottom);
} else {
string.append("disabled");
}
string.appendf("], Color: 0x%08x, RT: %d", fColor, fProxyUniqueID.asUInt());
string.appendf("], Color: 0x%08x ", fColor);
string.append(INHERITED::dumpInfo());
return string;
}

View File

@ -16,23 +16,39 @@
class GrDiscardOp final : public GrOp {
public:
DEFINE_OP_CLASS_ID
static std::unique_ptr<GrOp> Make(GrRenderTarget* rt) {
return std::unique_ptr<GrOp>(new GrDiscardOp(rt));
// MDB TODO: replace the renderTargetContext with just the renderTargetProxy.
// For now, we need the renderTargetContext for its accessRenderTarget powers.
static std::unique_ptr<GrOp> Make(GrRenderTargetContext* rtc) {
// MDB TODO: remove this. In this hybrid state we need to be sure the RT is instantiable
// so it can carry the IO refs. In the future we will just get the proxy and
// it carry the IO refs.
if (!rtc->accessRenderTarget()) {
return nullptr;
}
return std::unique_ptr<GrOp>(new GrDiscardOp(rtc));
}
const char* name() const override { return "Discard"; }
SkString dumpInfo() const override {
SkString string;
string.printf("RT: %d", fRenderTarget.get()->uniqueID().asUInt());
string.printf("rtID: %d proxyID: %d ", fRenderTarget.get()->uniqueID().asUInt(),
fProxyUniqueID.asUInt());
string.append(INHERITED::dumpInfo());
return string;
}
private:
GrDiscardOp(GrRenderTarget* rt) : INHERITED(ClassID()), fRenderTarget(rt) {
this->setBounds(SkRect::MakeIWH(rt->width(), rt->height()), HasAABloat::kNo,
GrDiscardOp(GrRenderTargetContext* rtc)
: INHERITED(ClassID())
, fProxyUniqueID(rtc->asSurfaceProxy()->uniqueID()) {
this->setBounds(SkRect::MakeIWH(rtc->width(), rtc->height()), HasAABloat::kNo,
IsZeroArea::kNo);
fRenderTarget.reset(rtc->accessRenderTarget());
}
bool onCombineIfPossible(GrOp* that, const GrCaps& caps) override {
@ -42,9 +58,12 @@ private:
void onPrepare(GrOpFlushState*) override {}
void onExecute(GrOpFlushState* state) override {
// MDB TODO: instantiate the renderTarget from the proxy in here
state->commandBuffer()->discard(fRenderTarget.get());
}
// MDB TODO: remove this. When the renderTargetProxy carries the refs this will be redundant.
GrSurfaceProxy::UniqueID fProxyUniqueID;
GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget;
typedef GrOp INHERITED;

View File

@ -857,6 +857,7 @@ sk_sp<SkImage> SkImage::MakeTextureFromMipMap(GrContext* ctx, const SkImageInfo&
const GrMipLevel* texels, int mipLevelCount,
SkBudgeted budgeted,
SkDestinationSurfaceColorMode colorMode) {
SkASSERT(mipLevelCount >= 1);
if (!ctx) {
return nullptr;
}

View File

@ -86,20 +86,12 @@ static sk_sp<GrTextureProxy> make_wrapped(GrContext* context) {
sk_sp<GrTexture> tex(context->resourceProvider()->createTexture(desc, SkBudgeted::kNo));
sk_sp<GrTextureProxy> proxy = GrSurfaceProxy::MakeWrapped(std::move(tex));
// Flush the IOWrite from the initial discard or it will confuse the later ref count checks
context->contextPriv().flushSurfaceWrites(proxy.get());
return proxy;
return GrSurfaceProxy::MakeWrapped(std::move(tex));
}
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(ProxyRefTest, reporter, ctxInfo) {
GrResourceProvider* provider = ctxInfo.grContext()->resourceProvider();
const GrCaps& caps = *ctxInfo.grContext()->caps();
// Currently the op itself takes a pending write and the render target op list does as well.
static const int kWritesForDiscard = 2;
for (auto make : { make_deferred, make_wrapped }) {
// A single write
{
@ -107,18 +99,15 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(ProxyRefTest, reporter, ctxInfo) {
GrPendingIOResource<GrSurfaceProxy, kWrite_GrIOType> fWrite(proxy.get());
check_refs(reporter, proxy.get(), 1, 1, 0, 1);
static const int kExpectedReads = 0;
static const int kExpectedWrites = 1;
// In the deferred case, the discard op created on instantiation adds an
// extra ref and write
bool proxyGetsDiscardRef = !proxy->isWrapped_ForTesting() &&
caps.discardRenderTargetSupport();
int expectedWrites = 1 + (proxyGetsDiscardRef ? kWritesForDiscard : 0);
check_refs(reporter, proxy.get(), 1, 1, kExpectedReads, kExpectedWrites);
proxy->instantiate(provider);
// In the deferred case, this checks that the refs transfered to the GrSurface
check_refs(reporter, proxy.get(), 1, 1, 0, expectedWrites);
check_refs(reporter, proxy.get(), 1, 1, kExpectedReads, kExpectedWrites);
}
// A single read
@ -127,18 +116,15 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(ProxyRefTest, reporter, ctxInfo) {
GrPendingIOResource<GrSurfaceProxy, kRead_GrIOType> fRead(proxy.get());
check_refs(reporter, proxy.get(), 1, 1, 1, 0);
static const int kExpectedReads = 1;
static const int kExpectedWrites = 0;
// In the deferred case, the discard op created on instantiation adds an
// extra ref and write
bool proxyGetsDiscardRef = !proxy->isWrapped_ForTesting() &&
caps.discardRenderTargetSupport();
int expectedWrites = proxyGetsDiscardRef ? kWritesForDiscard : 0;
check_refs(reporter, proxy.get(), 1, 1, kExpectedReads, kExpectedWrites);
proxy->instantiate(provider);
// In the deferred case, this checks that the refs transfered to the GrSurface
check_refs(reporter, proxy.get(), 1, 1, 1, expectedWrites);
check_refs(reporter, proxy.get(), 1, 1, kExpectedReads, kExpectedWrites);
}
// A single read/write pair
@ -147,18 +133,15 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(ProxyRefTest, reporter, ctxInfo) {
GrPendingIOResource<GrSurfaceProxy, kRW_GrIOType> fRW(proxy.get());
check_refs(reporter, proxy.get(), 1, 1, 1, 1);
static const int kExpectedReads = 1;
static const int kExpectedWrites = 1;
// In the deferred case, the discard op created on instantiation adds an
// extra ref and write
bool proxyGetsDiscardRef = !proxy->isWrapped_ForTesting() &&
caps.discardRenderTargetSupport();
int expectedWrites = 1 + (proxyGetsDiscardRef ? kWritesForDiscard : 0);
check_refs(reporter, proxy.get(), 1, 1, kExpectedReads, kExpectedWrites);
proxy->instantiate(provider);
// In the deferred case, this checks that the refs transferred to the GrSurface
check_refs(reporter, proxy.get(), 1, 1, 1, expectedWrites);
check_refs(reporter, proxy.get(), 1, 1, kExpectedReads, kExpectedWrites);
}
// Multiple normal refs
@ -167,16 +150,15 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(ProxyRefTest, reporter, ctxInfo) {
proxy->ref();
proxy->ref();
check_refs(reporter, proxy.get(), 3, 3, 0, 0);
static const int kExpectedReads = 0;
static const int kExpectedWrites = 0;
bool proxyGetsDiscardRef = !proxy->isWrapped_ForTesting() &&
caps.discardRenderTargetSupport();
int expectedWrites = proxyGetsDiscardRef ? kWritesForDiscard : 0;
check_refs(reporter, proxy.get(), 3, 3,kExpectedReads, kExpectedWrites);
proxy->instantiate(provider);
// In the deferred case, this checks that the refs transferred to the GrSurface
check_refs(reporter, proxy.get(), 3, 3, 0, expectedWrites);
check_refs(reporter, proxy.get(), 3, 3, kExpectedReads, kExpectedWrites);
proxy->unref();
proxy->unref();
@ -189,22 +171,20 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(ProxyRefTest, reporter, ctxInfo) {
GrPendingIOResource<GrSurfaceProxy, kWrite_GrIOType> fWrite(proxy.get());
check_refs(reporter, proxy.get(), 2, 2, 0, 1);
static const int kExpectedWrites = 1;
bool proxyGetsDiscardRef = !proxy->isWrapped_ForTesting() &&
caps.discardRenderTargetSupport();
int expectedWrites = 1 + (proxyGetsDiscardRef ? kWritesForDiscard : 0);
check_refs(reporter, proxy.get(), 2, 2, 0, kExpectedWrites);
proxy->instantiate(provider);
// In the deferred case, this checks that the refs transfered to the GrSurface
check_refs(reporter, proxy.get(), 2, 2, 0, expectedWrites);
check_refs(reporter, proxy.get(), 2, 2, 0, kExpectedWrites);
proxy->unref();
check_refs(reporter, proxy.get(), 1, 1, 0, expectedWrites);
check_refs(reporter, proxy.get(), 1, 1, 0, kExpectedWrites);
GrPendingIOResource<GrSurfaceProxy, kRead_GrIOType> fRead(proxy.get());
check_refs(reporter, proxy.get(), 1, 1, 1, expectedWrites);
check_refs(reporter, proxy.get(), 1, 1, 1, kExpectedWrites);
}
}
}

View File

@ -9,6 +9,11 @@
#include "Test.h"
// MDB TODO: With the move of the discard calls to the RenderTargetContext, deferred RTCs are being
// instantiated early. This test can be re-enabled once discards do not force an instantiation
// (i.e., when renderTargetProxies carry the op IORefs)
#if 0
#if SK_SUPPORT_GPU
#include "GrTextureProxy.h"
#include "GrRenderTargetContext.h"
@ -85,3 +90,4 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(RenderTargetContextTest, reporter, ctxInfo) {
// GrRenderTargetContext
}
#endif
#endif