Add customData capability to the thread-safe proxy cache ...
and begin using it for cached SW-generated blur masks. This is needed to begin mixing and matching HW & SW-generated blur masks since they have different draw-rects. It will also be useful if/when we add support for triangulated paths to the thread-safe cache. Bug: 1108408 Change-Id: I085ad1127dc2deb98b35d704b06e50b27c72fd1c Reviewed-on: https://skia-review.googlesource.com/c/skia/+/322657 Reviewed-by: Adlai Holler <adlai@google.com> Commit-Queue: Robert Phillips <robertphillips@google.com>
This commit is contained in:
parent
8354e9b8f6
commit
6e17ffe829
@ -266,6 +266,7 @@ public:
|
||||
|
||||
void setCustomData(sk_sp<SkData> data) { fData = std::move(data); }
|
||||
SkData* getCustomData() const { return fData.get(); }
|
||||
sk_sp<SkData> refCustomData() const { return fData; }
|
||||
|
||||
const char* tag() const { return fTag; }
|
||||
|
||||
|
@ -69,48 +69,83 @@ static void mask_release_proc(void* addr, void* /*context*/) {
|
||||
SkMask::FreeImage(addr);
|
||||
}
|
||||
|
||||
#ifdef SK_DEBUG
|
||||
// Brute force computation of the destination bounds of a SW filtered mask
|
||||
static SkIRect sw_calc_draw_rect(const SkMatrix& viewMatrix,
|
||||
const GrStyledShape& shape,
|
||||
const SkMaskFilter* filter,
|
||||
const SkIRect& clipBounds) {
|
||||
SkRect devBounds = shape.bounds();
|
||||
viewMatrix.mapRect(&devBounds);
|
||||
|
||||
SkMask srcM, dstM;
|
||||
if (!SkDraw::ComputeMaskBounds(devBounds, &clipBounds, filter, &viewMatrix, &srcM.fBounds)) {
|
||||
return {};
|
||||
}
|
||||
|
||||
srcM.fFormat = SkMask::kA8_Format;
|
||||
|
||||
if (!as_MFB(filter)->filterMask(&dstM, srcM, viewMatrix, nullptr)) {
|
||||
return {};
|
||||
}
|
||||
|
||||
return dstM.fBounds;
|
||||
}
|
||||
#endif
|
||||
|
||||
// This stores the mapping from an unclipped, integerized, device-space, shape bounds to
|
||||
// the filtered mask's draw rect.
|
||||
struct DrawRectData {
|
||||
SkIVector fOffset;
|
||||
SkISize fSize;
|
||||
};
|
||||
|
||||
static sk_sp<SkData> create_data(const SkIRect& drawRect, const SkIRect& origDevBounds) {
|
||||
|
||||
DrawRectData drawRectData { {drawRect.fLeft - origDevBounds.fLeft,
|
||||
drawRect.fTop - origDevBounds.fTop},
|
||||
drawRect.size() };
|
||||
|
||||
return SkData::MakeWithCopy(&drawRectData, sizeof(drawRectData));
|
||||
}
|
||||
|
||||
static SkIRect extract_draw_rect_from_data(SkData* data, const SkIRect& origDevBounds) {
|
||||
auto drawRectData = static_cast<const DrawRectData*>(data->data());
|
||||
|
||||
return SkIRect::MakeXYWH(origDevBounds.fLeft + drawRectData->fOffset.fX,
|
||||
origDevBounds.fTop + drawRectData->fOffset.fY,
|
||||
drawRectData->fSize.fWidth,
|
||||
drawRectData->fSize.fHeight);
|
||||
}
|
||||
|
||||
static GrSurfaceProxyView sw_create_filtered_mask(GrRecordingContext* rContext,
|
||||
const SkMatrix& viewMatrix,
|
||||
const GrStyledShape& shape,
|
||||
const SkMaskFilter* filter,
|
||||
const SkIRect& unclippedDevShapeBounds,
|
||||
const SkIRect& clipBounds,
|
||||
SkIRect* drawRect,
|
||||
const GrUniqueKey& key) {
|
||||
GrUniqueKey* key) {
|
||||
SkASSERT(filter);
|
||||
SkASSERT(!shape.style().applies());
|
||||
|
||||
auto threadSafeViewCache = rContext->priv().threadSafeViewCache();
|
||||
|
||||
GrSurfaceProxyView filteredMaskView;
|
||||
sk_sp<SkData> data;
|
||||
|
||||
if (key.isValid()) {
|
||||
filteredMaskView = threadSafeViewCache->find(key);
|
||||
if (key->isValid()) {
|
||||
std::tie(filteredMaskView, data) = threadSafeViewCache->findWithData(*key);
|
||||
}
|
||||
|
||||
if (filteredMaskView) {
|
||||
SkASSERT(data);
|
||||
SkASSERT(kMaskOrigin == filteredMaskView.origin());
|
||||
|
||||
SkRect devBounds = shape.bounds();
|
||||
viewMatrix.mapRect(&devBounds);
|
||||
*drawRect = extract_draw_rect_from_data(data.get(), unclippedDevShapeBounds);
|
||||
|
||||
// Here we need to recompute the destination bounds in order to draw the mask correctly
|
||||
SkMask srcM, dstM;
|
||||
if (!SkDraw::ComputeMaskBounds(devBounds, &clipBounds, filter, &viewMatrix,
|
||||
&srcM.fBounds)) {
|
||||
return {};
|
||||
}
|
||||
|
||||
srcM.fFormat = SkMask::kA8_Format;
|
||||
|
||||
if (!as_MFB(filter)->filterMask(&dstM, srcM, viewMatrix, nullptr)) {
|
||||
return {};
|
||||
}
|
||||
|
||||
// Unfortunately, we cannot double check that the computed bounds (i.e., dstM.fBounds)
|
||||
// match the stored bounds of the mask bc the proxy may have been recreated and,
|
||||
// when it is recreated, it just gets the bounds of the underlying GrTexture (which
|
||||
// might be a loose fit).
|
||||
*drawRect = dstM.fBounds;
|
||||
SkDEBUGCODE(auto oldDrawRect = sw_calc_draw_rect(viewMatrix, shape, filter, clipBounds));
|
||||
SkASSERT(*drawRect == oldDrawRect);
|
||||
} else {
|
||||
SkStrokeRec::InitStyle fillOrHairline = shape.style().isSimpleHairline()
|
||||
? SkStrokeRec::kHairline_InitStyle
|
||||
@ -162,8 +197,12 @@ static GrSurfaceProxyView sw_create_filtered_mask(GrRecordingContext* rContext,
|
||||
|
||||
*drawRect = dstM.fBounds;
|
||||
|
||||
if (key.isValid()) {
|
||||
filteredMaskView = threadSafeViewCache->add(key, filteredMaskView);
|
||||
if (key->isValid()) {
|
||||
key->setCustomData(create_data(*drawRect, unclippedDevShapeBounds));
|
||||
std::tie(filteredMaskView, data) = threadSafeViewCache->addWithData(*key,
|
||||
filteredMaskView);
|
||||
// If we got a different view back from 'addWithData' it could have a different drawRect
|
||||
*drawRect = extract_draw_rect_from_data(data.get(), unclippedDevShapeBounds);
|
||||
}
|
||||
}
|
||||
|
||||
@ -256,7 +295,7 @@ static bool get_shape_and_clip_bounds(GrRenderTargetContext* renderTargetContext
|
||||
}
|
||||
|
||||
// The key and clip-bounds are computed together because the caching decision can impact the
|
||||
// clip-bound.
|
||||
// clip-bound - since we only cache un-clipped masks the clip can be removed entirely.
|
||||
// A 'false' return value indicates that the shape is known to be clipped away.
|
||||
static bool compute_key_and_clip_bounds(GrUniqueKey* maskKey,
|
||||
SkIRect* boundsForClip,
|
||||
@ -312,7 +351,8 @@ static bool compute_key_and_clip_bounds(GrUniqueKey* maskKey,
|
||||
SkScalar ky = viewMatrix.get(SkMatrix::kMSkewY);
|
||||
SkScalar tx = viewMatrix.get(SkMatrix::kMTransX);
|
||||
SkScalar ty = viewMatrix.get(SkMatrix::kMTransY);
|
||||
// Allow 8 bits each in x and y of subpixel positioning.
|
||||
// Allow 8 bits each in x and y of subpixel positioning. But, note that we're allowing
|
||||
// reuse for integer translations.
|
||||
SkFixed fracX = SkScalarToFixed(SkScalarFraction(tx)) & 0x0000FF00;
|
||||
SkFixed fracY = SkScalarToFixed(SkScalarFraction(ty)) & 0x0000FF00;
|
||||
|
||||
@ -349,7 +389,7 @@ static GrSurfaceProxyView hw_create_filtered_mask(GrRecordingContext* rContext,
|
||||
const SkIRect& unclippedDevShapeBounds,
|
||||
const SkIRect& clipBounds,
|
||||
SkIRect* maskRect,
|
||||
const GrUniqueKey& key) {
|
||||
GrUniqueKey* key) {
|
||||
GrSurfaceProxyView filteredMaskView;
|
||||
|
||||
if (filter->canFilterMaskGPU(shape,
|
||||
@ -365,8 +405,8 @@ static GrSurfaceProxyView hw_create_filtered_mask(GrRecordingContext* rContext,
|
||||
GrProxyProvider* proxyProvider = rContext->priv().proxyProvider();
|
||||
|
||||
// TODO: this path should also use the thread-safe proxy-view cache!
|
||||
if (key.isValid()) {
|
||||
filteredMaskView = find_filtered_mask(proxyProvider, key);
|
||||
if (key->isValid()) {
|
||||
filteredMaskView = find_filtered_mask(proxyProvider, *key);
|
||||
}
|
||||
|
||||
if (!filteredMaskView) {
|
||||
@ -383,9 +423,12 @@ static GrSurfaceProxyView hw_create_filtered_mask(GrRecordingContext* rContext,
|
||||
maskRTC->colorInfo().alphaType(),
|
||||
viewMatrix,
|
||||
*maskRect);
|
||||
if (filteredMaskView && key.isValid()) {
|
||||
if (filteredMaskView && key->isValid()) {
|
||||
SkASSERT(filteredMaskView.asTextureProxy());
|
||||
proxyProvider->assignUniqueKeyToProxy(key, filteredMaskView.asTextureProxy());
|
||||
|
||||
// This customData isn't being used yet
|
||||
key->setCustomData(create_data(*maskRect, unclippedDevShapeBounds));
|
||||
proxyProvider->assignUniqueKeyToProxy(*key, filteredMaskView.asTextureProxy());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -460,7 +503,7 @@ static void draw_shape_with_mask_filter(GrRecordingContext* rContext,
|
||||
filteredMaskView = hw_create_filtered_mask(rContext, renderTargetContext,
|
||||
viewMatrix, *shape, maskFilter,
|
||||
unclippedDevShapeBounds, boundsForClip,
|
||||
&maskRect, maskKey);
|
||||
&maskRect, &maskKey);
|
||||
if (filteredMaskView) {
|
||||
if (draw_mask(renderTargetContext, clip, viewMatrix, maskRect, std::move(paint),
|
||||
std::move(filteredMaskView))) {
|
||||
@ -474,8 +517,8 @@ static void draw_shape_with_mask_filter(GrRecordingContext* rContext,
|
||||
// Either HW mask rendering failed or we're in a DDL recording thread
|
||||
filteredMaskView = sw_create_filtered_mask(rContext,
|
||||
viewMatrix, *shape, maskFilter,
|
||||
boundsForClip,
|
||||
&maskRect, maskKey);
|
||||
unclippedDevShapeBounds, boundsForClip,
|
||||
&maskRect, &maskKey);
|
||||
if (filteredMaskView) {
|
||||
if (draw_mask(renderTargetContext, clip, viewMatrix, maskRect, std::move(paint),
|
||||
std::move(filteredMaskView))) {
|
||||
|
@ -92,9 +92,8 @@ void GrThreadSafeUniquelyKeyedProxyViewCache::dropUniqueRefsOlderThan(
|
||||
}
|
||||
}
|
||||
|
||||
GrSurfaceProxyView GrThreadSafeUniquelyKeyedProxyViewCache::find(const GrUniqueKey& key) {
|
||||
SkAutoSpinlock lock{fSpinLock};
|
||||
|
||||
std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeUniquelyKeyedProxyViewCache::internalFind(
|
||||
const GrUniqueKey& key) {
|
||||
Entry* tmp = fUniquelyKeyedProxyViewMap.find(key);
|
||||
if (tmp) {
|
||||
SkASSERT(fUniquelyKeyedProxyViewList.isInList(tmp));
|
||||
@ -102,12 +101,27 @@ GrSurfaceProxyView GrThreadSafeUniquelyKeyedProxyViewCache::find(const GrUniqueK
|
||||
tmp->fLastAccess = GrStdSteadyClock::now();
|
||||
fUniquelyKeyedProxyViewList.remove(tmp);
|
||||
fUniquelyKeyedProxyViewList.addToHead(tmp);
|
||||
return tmp->fView;
|
||||
return { tmp->fView, tmp->fKey.refCustomData() };
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
GrSurfaceProxyView GrThreadSafeUniquelyKeyedProxyViewCache::find(const GrUniqueKey& key) {
|
||||
SkAutoSpinlock lock{fSpinLock};
|
||||
|
||||
GrSurfaceProxyView view;
|
||||
std::tie(view, std::ignore) = this->internalFind(key);
|
||||
return view;
|
||||
}
|
||||
|
||||
std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeUniquelyKeyedProxyViewCache::findWithData(
|
||||
const GrUniqueKey& key) {
|
||||
SkAutoSpinlock lock{fSpinLock};
|
||||
|
||||
return this->internalFind(key);
|
||||
}
|
||||
|
||||
GrThreadSafeUniquelyKeyedProxyViewCache::Entry*
|
||||
GrThreadSafeUniquelyKeyedProxyViewCache::getEntry(const GrUniqueKey& key,
|
||||
const GrSurfaceProxyView& view) {
|
||||
@ -141,7 +155,7 @@ void GrThreadSafeUniquelyKeyedProxyViewCache::recycleEntry(Entry* dead) {
|
||||
fFreeEntryList = dead;
|
||||
}
|
||||
|
||||
GrSurfaceProxyView GrThreadSafeUniquelyKeyedProxyViewCache::internalAdd(
|
||||
std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeUniquelyKeyedProxyViewCache::internalAdd(
|
||||
const GrUniqueKey& key,
|
||||
const GrSurfaceProxyView& view) {
|
||||
Entry* tmp = fUniquelyKeyedProxyViewMap.find(key);
|
||||
@ -151,13 +165,23 @@ GrSurfaceProxyView GrThreadSafeUniquelyKeyedProxyViewCache::internalAdd(
|
||||
SkASSERT(fUniquelyKeyedProxyViewMap.find(key));
|
||||
}
|
||||
|
||||
return tmp->fView;
|
||||
return { tmp->fView, tmp->fKey.refCustomData() };
|
||||
}
|
||||
|
||||
GrSurfaceProxyView GrThreadSafeUniquelyKeyedProxyViewCache::add(const GrUniqueKey& key,
|
||||
const GrSurfaceProxyView& view) {
|
||||
SkAutoSpinlock lock{fSpinLock};
|
||||
|
||||
GrSurfaceProxyView newView;
|
||||
std::tie(newView, std::ignore) = this->internalAdd(key, view);
|
||||
return newView;
|
||||
}
|
||||
|
||||
std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeUniquelyKeyedProxyViewCache::addWithData(
|
||||
const GrUniqueKey& key,
|
||||
const GrSurfaceProxyView& view) {
|
||||
SkAutoSpinlock lock{fSpinLock};
|
||||
|
||||
return this->internalAdd(key, view);
|
||||
}
|
||||
|
||||
@ -165,14 +189,24 @@ GrSurfaceProxyView GrThreadSafeUniquelyKeyedProxyViewCache::findOrAdd(const GrUn
|
||||
const GrSurfaceProxyView& v) {
|
||||
SkAutoSpinlock lock{fSpinLock};
|
||||
|
||||
Entry* tmp = fUniquelyKeyedProxyViewMap.find(key);
|
||||
if (tmp) {
|
||||
SkASSERT(fUniquelyKeyedProxyViewList.isInList(tmp));
|
||||
// make the sought out entry the MRU
|
||||
tmp->fLastAccess = GrStdSteadyClock::now();
|
||||
fUniquelyKeyedProxyViewList.remove(tmp);
|
||||
fUniquelyKeyedProxyViewList.addToHead(tmp);
|
||||
return tmp->fView;
|
||||
GrSurfaceProxyView view;
|
||||
std::tie(view, std::ignore) = this->internalFind(key);
|
||||
if (view) {
|
||||
return view;
|
||||
}
|
||||
|
||||
std::tie(view, std::ignore) = this->internalAdd(key, v);
|
||||
return view;
|
||||
}
|
||||
|
||||
std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeUniquelyKeyedProxyViewCache::findOrAddWithData(
|
||||
const GrUniqueKey& key,
|
||||
const GrSurfaceProxyView& v) {
|
||||
SkAutoSpinlock lock{fSpinLock};
|
||||
|
||||
auto [view, data] = this->internalFind(key);
|
||||
if (view) {
|
||||
return { std::move(view), std::move(data) };
|
||||
}
|
||||
|
||||
return this->internalAdd(key, v);
|
||||
|
@ -81,11 +81,17 @@ public:
|
||||
void dropUniqueRefsOlderThan(GrStdSteadyClock::time_point purgeTime) SK_EXCLUDES(fSpinLock);
|
||||
|
||||
GrSurfaceProxyView find(const GrUniqueKey&) SK_EXCLUDES(fSpinLock);
|
||||
std::tuple<GrSurfaceProxyView, sk_sp<SkData>> findWithData(
|
||||
const GrUniqueKey&) SK_EXCLUDES(fSpinLock);
|
||||
|
||||
GrSurfaceProxyView add(const GrUniqueKey&, const GrSurfaceProxyView&) SK_EXCLUDES(fSpinLock);
|
||||
std::tuple<GrSurfaceProxyView, sk_sp<SkData>> addWithData(
|
||||
const GrUniqueKey&, const GrSurfaceProxyView&) SK_EXCLUDES(fSpinLock);
|
||||
|
||||
GrSurfaceProxyView findOrAdd(const GrUniqueKey&,
|
||||
const GrSurfaceProxyView&) SK_EXCLUDES(fSpinLock);
|
||||
std::tuple<GrSurfaceProxyView, sk_sp<SkData>> findOrAddWithData(
|
||||
const GrUniqueKey&, const GrSurfaceProxyView&) SK_EXCLUDES(fSpinLock);
|
||||
|
||||
void remove(const GrUniqueKey&) SK_EXCLUDES(fSpinLock);
|
||||
|
||||
@ -108,8 +114,10 @@ private:
|
||||
Entry* getEntry(const GrUniqueKey&, const GrSurfaceProxyView&) SK_REQUIRES(fSpinLock);
|
||||
void recycleEntry(Entry*) SK_REQUIRES(fSpinLock);
|
||||
|
||||
GrSurfaceProxyView internalAdd(const GrUniqueKey&,
|
||||
const GrSurfaceProxyView&) SK_REQUIRES(fSpinLock);
|
||||
std::tuple<GrSurfaceProxyView, sk_sp<SkData>> internalFind(
|
||||
const GrUniqueKey&) SK_REQUIRES(fSpinLock);
|
||||
std::tuple<GrSurfaceProxyView, sk_sp<SkData>> internalAdd(
|
||||
const GrUniqueKey&, const GrSurfaceProxyView&) SK_REQUIRES(fSpinLock);
|
||||
|
||||
mutable SkSpinlock fSpinLock;
|
||||
|
||||
|
@ -21,6 +21,7 @@
|
||||
|
||||
static constexpr int kImageWH = 32;
|
||||
static constexpr auto kImageOrigin = kBottomLeft_GrSurfaceOrigin;
|
||||
static constexpr int kNoID = -1;
|
||||
|
||||
static SkImageInfo default_ii(int wh) {
|
||||
return SkImageInfo::Make(wh, wh, kRGBA_8888_SkColorType, kPremul_SkAlphaType);
|
||||
@ -39,11 +40,15 @@ static std::unique_ptr<GrRenderTargetContext> new_RTC(GrRecordingContext* rConte
|
||||
SkBudgeted::kYes);
|
||||
}
|
||||
|
||||
static void create_key(GrUniqueKey* key, int wh) {
|
||||
static void create_key(GrUniqueKey* key, int wh, int id) {
|
||||
static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
|
||||
GrUniqueKey::Builder builder(key, kDomain, 1);
|
||||
builder[0] = wh;
|
||||
builder.finish();
|
||||
|
||||
if (id != kNoID) {
|
||||
key->setCustomData(SkData::MakeWithCopy(&id, sizeof(id)));
|
||||
}
|
||||
};
|
||||
|
||||
static SkBitmap create_bitmap(int wh) {
|
||||
@ -134,12 +139,13 @@ public:
|
||||
// Add a draw on 'canvas' that will introduce a ref on the 'wh' view
|
||||
void accessCachedView(SkCanvas* canvas,
|
||||
int wh,
|
||||
int id = kNoID,
|
||||
bool failLookup = false,
|
||||
bool failFillingIn = false) {
|
||||
GrRecordingContext* rContext = canvas->recordingContext();
|
||||
|
||||
auto view = AccessCachedView(rContext, this->threadSafeViewCache(),
|
||||
wh, failLookup, failFillingIn, &fStats);
|
||||
wh, failLookup, failFillingIn, id, &fStats);
|
||||
SkASSERT(view);
|
||||
|
||||
auto rtc = canvas->internal_private_accessTopLayerRenderTargetContext();
|
||||
@ -162,7 +168,7 @@ public:
|
||||
|
||||
// Besides checking that the number of refs and cache hits and misses are as expected, this
|
||||
// method also validates that the unique key doesn't appear in any of the other caches.
|
||||
bool checkView(SkCanvas* canvas, int wh, int hits, int misses, int numRefs) {
|
||||
bool checkView(SkCanvas* canvas, int wh, int hits, int misses, int numRefs, int expectedID) {
|
||||
if (fStats.fCacheHits != hits || fStats.fCacheMisses != misses) {
|
||||
SkDebugf("Hits E: %d A: %d --- Misses E: %d A: %d\n",
|
||||
hits, fStats.fCacheHits, misses, fStats.fCacheMisses);
|
||||
@ -170,15 +176,30 @@ public:
|
||||
}
|
||||
|
||||
GrUniqueKey key;
|
||||
create_key(&key, wh);
|
||||
create_key(&key, wh, kNoID);
|
||||
|
||||
auto threadSafeViewCache = this->threadSafeViewCache();
|
||||
|
||||
GrSurfaceProxyView view = threadSafeViewCache->find(key);
|
||||
auto [view, data] = threadSafeViewCache->findWithData(key);
|
||||
if (!view.proxy()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (expectedID < 0) {
|
||||
if (data) {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
if (!data) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const int* cachedID = static_cast<const int*>(data->data());
|
||||
if (*cachedID != expectedID) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!view.proxy()->refCntGreaterThan(numRefs+1) || // +1 for 'view's ref
|
||||
view.proxy()->refCntGreaterThan(numRefs+2)) {
|
||||
return false;
|
||||
@ -278,7 +299,7 @@ private:
|
||||
static GrSurfaceProxyView AccessCachedView(GrRecordingContext*,
|
||||
GrThreadSafeUniquelyKeyedProxyViewCache*,
|
||||
int wh,
|
||||
bool failLookup, bool failFillingIn,
|
||||
bool failLookup, bool failFillingIn, int id,
|
||||
Stats*);
|
||||
static GrSurfaceProxyView CreateViewOnCpu(GrRecordingContext*, int wh, Stats*);
|
||||
static std::tuple<GrSurfaceProxyView, sk_sp<Trampoline>> CreateLazyView(GrDirectContext*,
|
||||
@ -381,20 +402,25 @@ GrSurfaceProxyView TestHelper::AccessCachedView(
|
||||
GrRecordingContext* rContext,
|
||||
GrThreadSafeUniquelyKeyedProxyViewCache* threadSafeViewCache,
|
||||
int wh,
|
||||
bool failLookup, bool failFillingIn,
|
||||
bool failLookup, bool failFillingIn, int id,
|
||||
Stats* stats) {
|
||||
GrUniqueKey key;
|
||||
create_key(&key, wh);
|
||||
create_key(&key, wh, id);
|
||||
|
||||
if (GrDirectContext* dContext = rContext->asDirectContext()) {
|
||||
// The gpu thread gets priority over the recording threads. If the gpu thread is first,
|
||||
// it crams a lazy proxy into the cache and then fills it in later.
|
||||
auto [lazyView, trampoline] = CreateLazyView(dContext, wh, stats);
|
||||
|
||||
GrSurfaceProxyView view = threadSafeViewCache->findOrAdd(key, lazyView);
|
||||
auto [view, data] = threadSafeViewCache->findOrAddWithData(key, lazyView);
|
||||
if (view != lazyView) {
|
||||
++stats->fCacheHits;
|
||||
return view;
|
||||
} else if (id != kNoID) {
|
||||
// Make sure, in this case, that the customData stuck
|
||||
SkASSERT(data);
|
||||
SkDEBUGCODE(const int* cachedID = static_cast<const int*>(data->data());)
|
||||
SkASSERT(*cachedID == id);
|
||||
}
|
||||
|
||||
++stats->fCacheMisses;
|
||||
@ -427,7 +453,14 @@ GrSurfaceProxyView TestHelper::AccessCachedView(
|
||||
view = CreateViewOnCpu(rContext, wh, stats);
|
||||
SkASSERT(view);
|
||||
|
||||
return threadSafeViewCache->add(key, view);
|
||||
auto [newView, data] = threadSafeViewCache->addWithData(key, view);
|
||||
if (view == newView && id != kNoID) {
|
||||
// Make sure, in this case, that the customData stuck
|
||||
SkASSERT(data);
|
||||
SkDEBUGCODE(const int* cachedID = static_cast<const int*>(data->data());)
|
||||
SkASSERT(*cachedID == id);
|
||||
}
|
||||
return newView;
|
||||
}
|
||||
}
|
||||
|
||||
@ -435,13 +468,13 @@ GrSurfaceProxyView TestHelper::AccessCachedView(
|
||||
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrThreadSafeViewCache1, reporter, ctxInfo) {
|
||||
TestHelper helper(ctxInfo.directContext());
|
||||
|
||||
helper.accessCachedView(helper.ddlCanvas1(), kImageWH);
|
||||
helper.accessCachedView(helper.ddlCanvas1(), kImageWH, 1);
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.ddlCanvas1(), kImageWH,
|
||||
/*hits*/ 0, /*misses*/ 1, /*refs*/ 1));
|
||||
/*hits*/ 0, /*misses*/ 1, /*refs*/ 1, /*id*/ 1));
|
||||
|
||||
helper.accessCachedView(helper.ddlCanvas2(), kImageWH);
|
||||
helper.accessCachedView(helper.ddlCanvas2(), kImageWH, 2);
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.ddlCanvas2(), kImageWH,
|
||||
/*hits*/ 1, /*misses*/ 1, /*refs*/ 2));
|
||||
/*hits*/ 1, /*misses*/ 1, /*refs*/ 2, /*id*/ 1));
|
||||
|
||||
REPORTER_ASSERT(reporter, helper.numCacheEntries() == 1);
|
||||
REPORTER_ASSERT(reporter, helper.stats()->fNumLazyCreations == 0);
|
||||
@ -456,17 +489,17 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrThreadSafeViewCache1, reporter, ctxInfo) {
|
||||
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrThreadSafeViewCache2, reporter, ctxInfo) {
|
||||
TestHelper helper(ctxInfo.directContext());
|
||||
|
||||
helper.accessCachedView(helper.liveCanvas(), kImageWH);
|
||||
helper.accessCachedView(helper.liveCanvas(), kImageWH, 1);
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.liveCanvas(), kImageWH,
|
||||
/*hits*/ 0, /*misses*/ 1, /*refs*/ 1));
|
||||
/*hits*/ 0, /*misses*/ 1, /*refs*/ 1, /*id*/ 1));
|
||||
|
||||
helper.accessCachedView(helper.ddlCanvas1(), kImageWH);
|
||||
helper.accessCachedView(helper.ddlCanvas1(), kImageWH, 2);
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.ddlCanvas1(), kImageWH,
|
||||
/*hits*/ 1, /*misses*/ 1, /*refs*/ 2));
|
||||
/*hits*/ 1, /*misses*/ 1, /*refs*/ 2, /*id*/ 1));
|
||||
|
||||
helper.accessCachedView(helper.ddlCanvas2(), kImageWH);
|
||||
helper.accessCachedView(helper.ddlCanvas2(), kImageWH, 3);
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.ddlCanvas2(), kImageWH,
|
||||
/*hits*/ 2, /*misses*/ 1, /*refs*/ 3));
|
||||
/*hits*/ 2, /*misses*/ 1, /*refs*/ 3, /*id*/ 1));
|
||||
|
||||
REPORTER_ASSERT(reporter, helper.numCacheEntries() == 1);
|
||||
REPORTER_ASSERT(reporter, helper.stats()->fNumLazyCreations == 1);
|
||||
@ -482,13 +515,13 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrThreadSafeViewCache2, reporter, ctxInfo) {
|
||||
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrThreadSafeViewCache3, reporter, ctxInfo) {
|
||||
TestHelper helper(ctxInfo.directContext());
|
||||
|
||||
helper.accessCachedView(helper.ddlCanvas1(), kImageWH);
|
||||
helper.accessCachedView(helper.ddlCanvas1(), kImageWH, 1);
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.ddlCanvas1(), kImageWH,
|
||||
/*hits*/ 0, /*misses*/ 1, /*refs*/ 1));
|
||||
/*hits*/ 0, /*misses*/ 1, /*refs*/ 1, /*id*/ 1));
|
||||
|
||||
helper.accessCachedView(helper.liveCanvas(), kImageWH);
|
||||
helper.accessCachedView(helper.liveCanvas(), kImageWH, 2);
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.liveCanvas(), kImageWH,
|
||||
/*hits*/ 1, /*misses*/ 1, /*refs*/ 2));
|
||||
/*hits*/ 1, /*misses*/ 1, /*refs*/ 2, /*id*/ 1));
|
||||
|
||||
REPORTER_ASSERT(reporter, helper.numCacheEntries() == 1);
|
||||
REPORTER_ASSERT(reporter, helper.stats()->fNumLazyCreations == 1);
|
||||
@ -503,14 +536,14 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrThreadSafeViewCache3, reporter, ctxInfo) {
|
||||
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrThreadSafeViewCache4, reporter, ctxInfo) {
|
||||
TestHelper helper(ctxInfo.directContext());
|
||||
|
||||
helper.accessCachedView(helper.ddlCanvas1(), kImageWH);
|
||||
helper.accessCachedView(helper.ddlCanvas1(), kImageWH, 1);
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.ddlCanvas1(), kImageWH,
|
||||
/*hits*/ 0, /*misses*/ 1, /*refs*/ 1));
|
||||
/*hits*/ 0, /*misses*/ 1, /*refs*/ 1, /*id*/ 1));
|
||||
|
||||
static const bool kFailLookup = true;
|
||||
helper.accessCachedView(helper.ddlCanvas2(), kImageWH, kFailLookup);
|
||||
helper.accessCachedView(helper.ddlCanvas2(), kImageWH, 2, kFailLookup);
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.ddlCanvas2(), kImageWH,
|
||||
/*hits*/ 0, /*misses*/ 2, /*refs*/ 2));
|
||||
/*hits*/ 0, /*misses*/ 2, /*refs*/ 2, /*id*/ 1));
|
||||
|
||||
REPORTER_ASSERT(reporter, helper.numCacheEntries() == 1);
|
||||
REPORTER_ASSERT(reporter, helper.stats()->fNumLazyCreations == 0);
|
||||
@ -526,9 +559,9 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrThreadSafeViewCache4, reporter, ctxInfo) {
|
||||
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrThreadSafeViewCache4_5, reporter, ctxInfo) {
|
||||
TestHelper helper(ctxInfo.directContext());
|
||||
|
||||
helper.accessCachedView(helper.liveCanvas(), kImageWH);
|
||||
helper.accessCachedView(helper.liveCanvas(), kImageWH, 1);
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.liveCanvas(), kImageWH,
|
||||
/*hits*/ 0, /*misses*/ 1, /*refs*/ 1));
|
||||
/*hits*/ 0, /*misses*/ 1, /*refs*/ 1, /*id*/ 1));
|
||||
|
||||
REPORTER_ASSERT(reporter, helper.numCacheEntries() == 1);
|
||||
REPORTER_ASSERT(reporter, helper.stats()->fNumLazyCreations == 1);
|
||||
@ -536,9 +569,9 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrThreadSafeViewCache4_5, reporter, ctxInfo)
|
||||
REPORTER_ASSERT(reporter, helper.stats()->fNumSWCreations == 0);
|
||||
|
||||
static const bool kFailLookup = true;
|
||||
helper.accessCachedView(helper.ddlCanvas1(), kImageWH, kFailLookup);
|
||||
helper.accessCachedView(helper.ddlCanvas1(), kImageWH, 2, kFailLookup);
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.ddlCanvas1(), kImageWH,
|
||||
/*hits*/ 0, /*misses*/ 2, /*refs*/ 2));
|
||||
/*hits*/ 0, /*misses*/ 2, /*refs*/ 2, /*id*/ 1));
|
||||
|
||||
REPORTER_ASSERT(reporter, helper.numCacheEntries() == 1);
|
||||
REPORTER_ASSERT(reporter, helper.stats()->fNumLazyCreations == 1);
|
||||
@ -557,9 +590,9 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrThreadSafeViewCache4_75, reporter, ctxInfo)
|
||||
TestHelper helper(dContext);
|
||||
|
||||
static const bool kFailFillingIn = true;
|
||||
helper.accessCachedView(helper.liveCanvas(), kImageWH, false, kFailFillingIn);
|
||||
helper.accessCachedView(helper.liveCanvas(), kImageWH, kNoID, false, kFailFillingIn);
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.liveCanvas(), kImageWH,
|
||||
/*hits*/ 0, /*misses*/ 1, /*refs*/ 1));
|
||||
/*hits*/ 0, /*misses*/ 1, /*refs*/ 1, kNoID));
|
||||
|
||||
REPORTER_ASSERT(reporter, helper.numCacheEntries() == 1);
|
||||
REPORTER_ASSERT(reporter, helper.stats()->fNumLazyCreations == 1);
|
||||
@ -570,7 +603,7 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrThreadSafeViewCache4_75, reporter, ctxInfo)
|
||||
dContext->submit(true);
|
||||
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.liveCanvas(), kImageWH,
|
||||
/*hits*/ 0, /*misses*/ 1, /*refs*/ 0));
|
||||
/*hits*/ 0, /*misses*/ 1, /*refs*/ 0, kNoID));
|
||||
|
||||
REPORTER_ASSERT(reporter, helper.numCacheEntries() == 1);
|
||||
REPORTER_ASSERT(reporter, helper.stats()->fNumLazyCreations == 1);
|
||||
@ -578,20 +611,29 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrThreadSafeViewCache4_75, reporter, ctxInfo)
|
||||
REPORTER_ASSERT(reporter, helper.stats()->fNumSWCreations == 0);
|
||||
}
|
||||
|
||||
// Case 5: ensure that expanding the map works
|
||||
// Case 5: ensure that expanding the map works (esp. wrt custom data)
|
||||
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrThreadSafeViewCache5, reporter, ctxInfo) {
|
||||
TestHelper helper(ctxInfo.directContext());
|
||||
|
||||
auto threadSafeViewCache = helper.threadSafeViewCache();
|
||||
|
||||
int size = 16;
|
||||
helper.accessCachedView(helper.ddlCanvas1(), size);
|
||||
helper.accessCachedView(helper.ddlCanvas1(), size, /*id*/ size);
|
||||
|
||||
size_t initialSize = threadSafeViewCache->approxBytesUsedForHash();
|
||||
|
||||
while (initialSize == threadSafeViewCache->approxBytesUsedForHash()) {
|
||||
size *= 2;
|
||||
helper.accessCachedView(helper.ddlCanvas1(), size);
|
||||
helper.accessCachedView(helper.ddlCanvas1(), size, /*id*/ size);
|
||||
}
|
||||
|
||||
for (int i = 16; i <= size; i *= 2) {
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.ddlCanvas1(),
|
||||
/*wh*/ i,
|
||||
/*hits*/ 0,
|
||||
/*misses*/ threadSafeViewCache->numEntries(),
|
||||
/*refs*/ 1,
|
||||
/*id*/ i));
|
||||
}
|
||||
}
|
||||
|
||||
@ -603,28 +645,28 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrThreadSafeViewCache6, reporter, ctxInfo) {
|
||||
helper.accessCachedView(helper.ddlCanvas1(), kImageWH);
|
||||
sk_sp<SkDeferredDisplayList> ddl1 = helper.snap1();
|
||||
REPORTER_ASSERT(reporter, helper.checkView(nullptr, kImageWH,
|
||||
/*hits*/ 0, /*misses*/ 1, /*refs*/ 1));
|
||||
/*hits*/ 0, /*misses*/ 1, /*refs*/ 1, kNoID));
|
||||
|
||||
helper.accessCachedView(helper.ddlCanvas2(), kImageWH);
|
||||
sk_sp<SkDeferredDisplayList> ddl2 = helper.snap2();
|
||||
REPORTER_ASSERT(reporter, helper.checkView(nullptr, kImageWH,
|
||||
/*hits*/ 1, /*misses*/ 1, /*refs*/ 2));
|
||||
/*hits*/ 1, /*misses*/ 1, /*refs*/ 2, kNoID));
|
||||
|
||||
REPORTER_ASSERT(reporter, helper.numCacheEntries() == 1);
|
||||
|
||||
ddl1 = nullptr;
|
||||
REPORTER_ASSERT(reporter, helper.checkView(nullptr, kImageWH,
|
||||
/*hits*/ 1, /*misses*/ 1, /*refs*/ 1));
|
||||
/*hits*/ 1, /*misses*/ 1, /*refs*/ 1, kNoID));
|
||||
|
||||
ddl2 = nullptr;
|
||||
REPORTER_ASSERT(reporter, helper.checkView(nullptr, kImageWH,
|
||||
/*hits*/ 1, /*misses*/ 1, /*refs*/ 0));
|
||||
/*hits*/ 1, /*misses*/ 1, /*refs*/ 0, kNoID));
|
||||
|
||||
// The cache still has its ref
|
||||
REPORTER_ASSERT(reporter, helper.numCacheEntries() == 1);
|
||||
|
||||
REPORTER_ASSERT(reporter, helper.checkView(nullptr, kImageWH,
|
||||
/*hits*/ 1, /*misses*/ 1, /*refs*/ 0));
|
||||
/*hits*/ 1, /*misses*/ 1, /*refs*/ 0, kNoID));
|
||||
}
|
||||
|
||||
// Case 7: Check that invoking dropAllRefs and dropUniqueRefs directly works as expected; i.e.,
|
||||
@ -635,12 +677,12 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrThreadSafeViewCache7, reporter, ctxInfo) {
|
||||
helper.accessCachedView(helper.ddlCanvas1(), kImageWH);
|
||||
sk_sp<SkDeferredDisplayList> ddl1 = helper.snap1();
|
||||
REPORTER_ASSERT(reporter, helper.checkView(nullptr, kImageWH,
|
||||
/*hits*/ 0, /*misses*/ 1, /*refs*/ 1));
|
||||
/*hits*/ 0, /*misses*/ 1, /*refs*/ 1, kNoID));
|
||||
|
||||
helper.accessCachedView(helper.ddlCanvas2(), 2*kImageWH);
|
||||
sk_sp<SkDeferredDisplayList> ddl2 = helper.snap2();
|
||||
REPORTER_ASSERT(reporter, helper.checkView(nullptr, 2*kImageWH,
|
||||
/*hits*/ 0, /*misses*/ 2, /*refs*/ 1));
|
||||
/*hits*/ 0, /*misses*/ 2, /*refs*/ 1, kNoID));
|
||||
|
||||
REPORTER_ASSERT(reporter, helper.numCacheEntries() == 2);
|
||||
|
||||
@ -652,7 +694,7 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrThreadSafeViewCache7, reporter, ctxInfo) {
|
||||
helper.threadSafeViewCache()->dropUniqueRefs(nullptr);
|
||||
REPORTER_ASSERT(reporter, helper.numCacheEntries() == 1);
|
||||
REPORTER_ASSERT(reporter, helper.checkView(nullptr, 2*kImageWH,
|
||||
/*hits*/ 0, /*misses*/ 2, /*refs*/ 1));
|
||||
/*hits*/ 0, /*misses*/ 2, /*refs*/ 1, kNoID));
|
||||
|
||||
helper.threadSafeViewCache()->dropAllRefs();
|
||||
REPORTER_ASSERT(reporter, helper.numCacheEntries() == 0);
|
||||
@ -668,16 +710,16 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrThreadSafeViewCache8, reporter, ctxInfo) {
|
||||
|
||||
helper.accessCachedView(helper.liveCanvas(), kImageWH);
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.liveCanvas(), kImageWH,
|
||||
/*hits*/ 0, /*misses*/ 1, /*refs*/ 1));
|
||||
/*hits*/ 0, /*misses*/ 1, /*refs*/ 1, kNoID));
|
||||
|
||||
helper.accessCachedView(helper.ddlCanvas1(), kImageWH);
|
||||
sk_sp<SkDeferredDisplayList> ddl1 = helper.snap1();
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.ddlCanvas1(), kImageWH,
|
||||
/*hits*/ 1, /*misses*/ 1, /*refs*/ 2));
|
||||
/*hits*/ 1, /*misses*/ 1, /*refs*/ 2, kNoID));
|
||||
|
||||
helper.accessCachedView(helper.ddlCanvas2(), kImageWH);
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.ddlCanvas2(), kImageWH,
|
||||
/*hits*/ 2, /*misses*/ 1, /*refs*/ 3));
|
||||
/*hits*/ 2, /*misses*/ 1, /*refs*/ 3, kNoID));
|
||||
|
||||
REPORTER_ASSERT(reporter, helper.numCacheEntries() == 1);
|
||||
REPORTER_ASSERT(reporter, helper.stats()->fNumLazyCreations == 1);
|
||||
@ -702,16 +744,16 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrThreadSafeViewCache9, reporter, ctxInfo) {
|
||||
|
||||
helper.accessCachedView(helper.liveCanvas(), kImageWH);
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.liveCanvas(), kImageWH,
|
||||
/*hits*/ 0, /*misses*/ 1, /*refs*/ 1));
|
||||
/*hits*/ 0, /*misses*/ 1, /*refs*/ 1, kNoID));
|
||||
|
||||
helper.accessCachedView(helper.ddlCanvas1(), kImageWH);
|
||||
sk_sp<SkDeferredDisplayList> ddl1 = helper.snap1();
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.ddlCanvas1(), kImageWH,
|
||||
/*hits*/ 1, /*misses*/ 1, /*refs*/ 2));
|
||||
/*hits*/ 1, /*misses*/ 1, /*refs*/ 2, kNoID));
|
||||
|
||||
helper.accessCachedView(helper.ddlCanvas2(), kImageWH);
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.ddlCanvas2(), kImageWH,
|
||||
/*hits*/ 2, /*misses*/ 1, /*refs*/ 3));
|
||||
/*hits*/ 2, /*misses*/ 1, /*refs*/ 3, kNoID));
|
||||
|
||||
REPORTER_ASSERT(reporter, helper.numCacheEntries() == 1);
|
||||
REPORTER_ASSERT(reporter, helper.stats()->fNumLazyCreations == 1);
|
||||
@ -744,21 +786,21 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrThreadSafeViewCache10, reporter, ctxInfo) {
|
||||
|
||||
helper.accessCachedView(helper.liveCanvas(), kImageWH);
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.liveCanvas(), kImageWH,
|
||||
/*hits*/ 0, /*misses*/ 1, /*refs*/ 1));
|
||||
/*hits*/ 0, /*misses*/ 1, /*refs*/ 1, kNoID));
|
||||
|
||||
helper.accessCachedView(helper.ddlCanvas1(), kImageWH);
|
||||
sk_sp<SkDeferredDisplayList> ddl1 = helper.snap1();
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.ddlCanvas1(), kImageWH,
|
||||
/*hits*/ 1, /*misses*/ 1, /*refs*/ 2));
|
||||
/*hits*/ 1, /*misses*/ 1, /*refs*/ 2, kNoID));
|
||||
|
||||
helper.accessCachedView(helper.liveCanvas(), 2*kImageWH);
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.liveCanvas(), 2*kImageWH,
|
||||
/*hits*/ 1, /*misses*/ 2, /*refs*/ 1));
|
||||
/*hits*/ 1, /*misses*/ 2, /*refs*/ 1, kNoID));
|
||||
|
||||
helper.accessCachedView(helper.ddlCanvas2(), 2*kImageWH);
|
||||
sk_sp<SkDeferredDisplayList> ddl2 = helper.snap2();
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.ddlCanvas2(), 2*kImageWH,
|
||||
/*hits*/ 2, /*misses*/ 2, /*refs*/ 2));
|
||||
/*hits*/ 2, /*misses*/ 2, /*refs*/ 2, kNoID));
|
||||
|
||||
dContext->flush();
|
||||
dContext->submit(true);
|
||||
@ -771,9 +813,9 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrThreadSafeViewCache10, reporter, ctxInfo) {
|
||||
|
||||
REPORTER_ASSERT(reporter, helper.numCacheEntries() == 2);
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.liveCanvas(), kImageWH,
|
||||
/*hits*/ 2, /*misses*/ 2, /*refs*/ 0));
|
||||
/*hits*/ 2, /*misses*/ 2, /*refs*/ 0, kNoID));
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.liveCanvas(), 2*kImageWH,
|
||||
/*hits*/ 2, /*misses*/ 2, /*refs*/ 0));
|
||||
/*hits*/ 2, /*misses*/ 2, /*refs*/ 0, kNoID));
|
||||
|
||||
// Regardless of which image is MRU, this should force the other out
|
||||
size_t desiredBytes = helper.gpuSize(2*kImageWH) + helper.gpuSize(kImageWH)/2;
|
||||
@ -790,7 +832,7 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrThreadSafeViewCache10, reporter, ctxInfo) {
|
||||
REPORTER_ASSERT(reporter, helper.numCacheEntries() == 1);
|
||||
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.liveCanvas(), 2*kImageWH,
|
||||
/*hits*/ 2, /*misses*/ 2, /*refs*/ 0));
|
||||
/*hits*/ 2, /*misses*/ 2, /*refs*/ 0, kNoID));
|
||||
}
|
||||
|
||||
// Case 11: This checks that scratch-only variant of GrContext::purgeUnlockedResources works as
|
||||
@ -803,20 +845,20 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrThreadSafeViewCache11, reporter, ctxInfo) {
|
||||
|
||||
helper.accessCachedView(helper.liveCanvas(), kImageWH);
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.liveCanvas(), kImageWH,
|
||||
/*hits*/ 0, /*misses*/ 1, /*refs*/ 1));
|
||||
/*hits*/ 0, /*misses*/ 1, /*refs*/ 1, kNoID));
|
||||
|
||||
helper.accessCachedView(helper.liveCanvas(), 2*kImageWH);
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.liveCanvas(), 2*kImageWH,
|
||||
/*hits*/ 0, /*misses*/ 2, /*refs*/ 1));
|
||||
/*hits*/ 0, /*misses*/ 2, /*refs*/ 1, kNoID));
|
||||
|
||||
dContext->flush();
|
||||
dContext->submit(true);
|
||||
|
||||
REPORTER_ASSERT(reporter, helper.numCacheEntries() == 2);
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.liveCanvas(), kImageWH,
|
||||
/*hits*/ 0, /*misses*/ 2, /*refs*/ 0));
|
||||
/*hits*/ 0, /*misses*/ 2, /*refs*/ 0, kNoID));
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.liveCanvas(), 2*kImageWH,
|
||||
/*hits*/ 0, /*misses*/ 2, /*refs*/ 0));
|
||||
/*hits*/ 0, /*misses*/ 2, /*refs*/ 0, kNoID));
|
||||
|
||||
// This shouldn't remove anything from the cache
|
||||
dContext->purgeUnlockedResources(/* scratchResourcesOnly */ true);
|
||||
@ -838,24 +880,24 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrThreadSafeViewCache12, reporter, ctxInfo) {
|
||||
|
||||
helper.accessCachedView(helper.liveCanvas(), kImageWH);
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.liveCanvas(), kImageWH,
|
||||
/*hits*/ 0, /*misses*/ 1, /*refs*/ 1));
|
||||
/*hits*/ 0, /*misses*/ 1, /*refs*/ 1, kNoID));
|
||||
helper.accessCachedView(helper.ddlCanvas1(), kImageWH);
|
||||
sk_sp<SkDeferredDisplayList> ddl1 = helper.snap1();
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.ddlCanvas1(), kImageWH,
|
||||
/*hits*/ 1, /*misses*/ 1, /*refs*/ 2));
|
||||
/*hits*/ 1, /*misses*/ 1, /*refs*/ 2, kNoID));
|
||||
|
||||
helper.accessCachedView(helper.liveCanvas(), 2*kImageWH);
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.liveCanvas(), 2*kImageWH,
|
||||
/*hits*/ 1, /*misses*/ 2, /*refs*/ 1));
|
||||
/*hits*/ 1, /*misses*/ 2, /*refs*/ 1, kNoID));
|
||||
|
||||
dContext->flush();
|
||||
dContext->submit(true);
|
||||
|
||||
REPORTER_ASSERT(reporter, helper.numCacheEntries() == 2);
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.liveCanvas(), kImageWH,
|
||||
/*hits*/ 1, /*misses*/ 2, /*refs*/ 1));
|
||||
/*hits*/ 1, /*misses*/ 2, /*refs*/ 1, kNoID));
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.liveCanvas(), 2*kImageWH,
|
||||
/*hits*/ 1, /*misses*/ 2, /*refs*/ 0));
|
||||
/*hits*/ 1, /*misses*/ 2, /*refs*/ 0, kNoID));
|
||||
|
||||
dContext->setResourceCacheLimit(0);
|
||||
|
||||
@ -878,7 +920,7 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrThreadSafeViewCache13, reporter, ctxInfo) {
|
||||
helper.accessCachedView(helper.ddlCanvas1(), kImageWH);
|
||||
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.ddlCanvas1(), kImageWH,
|
||||
/*hits*/ 0, /*misses*/ 1, /*refs*/ 1));
|
||||
/*hits*/ 0, /*misses*/ 1, /*refs*/ 1, kNoID));
|
||||
sk_sp<SkDeferredDisplayList> ddl1 = helper.snap1();
|
||||
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(5));
|
||||
@ -887,7 +929,7 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrThreadSafeViewCache13, reporter, ctxInfo) {
|
||||
|
||||
helper.accessCachedView(helper.ddlCanvas2(), 2*kImageWH);
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.ddlCanvas2(), 2*kImageWH,
|
||||
/*hits*/ 0, /*misses*/ 2, /*refs*/ 1));
|
||||
/*hits*/ 0, /*misses*/ 2, /*refs*/ 1, kNoID));
|
||||
sk_sp<SkDeferredDisplayList> ddl2 = helper.snap2();
|
||||
|
||||
ddl1 = nullptr;
|
||||
@ -902,5 +944,5 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrThreadSafeViewCache13, reporter, ctxInfo) {
|
||||
|
||||
REPORTER_ASSERT(reporter, helper.numCacheEntries() == 1);
|
||||
REPORTER_ASSERT(reporter, helper.checkView(helper.liveCanvas(), 2*kImageWH,
|
||||
/*hits*/ 0, /*misses*/ 2, /*refs*/ 0));
|
||||
/*hits*/ 0, /*misses*/ 2, /*refs*/ 0, kNoID));
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user