move the SkPackedGlyphID out of SkGlyphDigest

Move the packed glyph id out of the digest to make room for the
glyph dimensions. Use SkTHashMap instead of SkTHashTable. Add a
Hash function struct to SkPackedGlyphID.

Bug: skia:13192

Change-Id: I1b59b47cb292402bf8b8e892b3b37edad9ecb9ea
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/529132
Reviewed-by: Ben Wagner <bungeman@google.com>
Commit-Queue: Herb Derby <herb@google.com>
This commit is contained in:
Herb Derby 2022-04-11 15:29:22 -06:00 committed by SkCQ
parent e853b8d2b5
commit 24a1344800
5 changed files with 21 additions and 25 deletions

View File

@ -291,7 +291,7 @@ private:
LowerRangeBitVector fSentLowGlyphIDs;
// The masks and paths that currently reside in the GPU process.
SkTHashTable<SkGlyphDigest, uint32_t, SkGlyphDigest> fSentGlyphs;
SkTHashMap<SkPackedGlyphID, SkGlyphDigest, SkPackedGlyphID::Hash> fSentGlyphs;
SkTHashTable<PathSummary, SkPackedGlyphID, PathSummaryTraits> fSentPaths;
SkTHashTable<DrawableSummary, SkGlyphID, DrawableSummaryTraits> fSentDrawables;
@ -438,7 +438,7 @@ void RemoteStrike::commonMaskLoop(
SkDrawableGlyphBuffer* accepted, SkSourceGlyphBuffer* rejected, Rejector&& reject) {
accepted->forEachInput(
[&](size_t i, SkPackedGlyphID packedID, SkPoint position) {
SkGlyphDigest* digest = fSentGlyphs.find(packedID.value());
SkGlyphDigest* digest = fSentGlyphs.find(packedID);
if (digest == nullptr) {
// Put the new SkGlyph in the glyphs to send.
this->ensureScalerContext();
@ -446,7 +446,7 @@ void RemoteStrike::commonMaskLoop(
SkGlyph* glyph = &fMasksToSend.back();
SkGlyphDigest newDigest{0, *glyph};
digest = fSentGlyphs.set(newDigest);
digest = fSentGlyphs.set(packedID, newDigest);
}
// Reject things that are too big.
@ -462,14 +462,14 @@ void RemoteStrike::prepareForMaskDrawing(
SkPackedGlyphID packedID = variant.packedID();
if (fSentLowGlyphIDs.test(packedID)) {
#ifdef SK_DEBUG
SkGlyphDigest* digest = fSentGlyphs.find(packedID.value());
SkGlyphDigest* digest = fSentGlyphs.find(packedID);
SkASSERT(digest != nullptr);
SkASSERT(digest->canDrawAsMask() && digest->canDrawAsSDFT());
#endif
continue;
}
SkGlyphDigest* digest = fSentGlyphs.find(packedID.value());
SkGlyphDigest* digest = fSentGlyphs.find(packedID);
if (digest == nullptr) {
// Put the new SkGlyph in the glyphs to send.
@ -479,7 +479,7 @@ void RemoteStrike::prepareForMaskDrawing(
SkGlyphDigest newDigest{0, *glyph};
digest = fSentGlyphs.set(newDigest);
digest = fSentGlyphs.set(packedID, newDigest);
if (digest->canDrawAsMask() && digest->canDrawAsSDFT()) {
fSentLowGlyphIDs.setIfLower(packedID);

View File

@ -397,8 +397,7 @@ void SkGlyph::ensureIntercepts(const SkScalar* bounds, SkScalar scale, SkScalar
}
SkGlyphDigest::SkGlyphDigest(size_t index, const SkGlyph& glyph)
: fPackedGlyphID{glyph.getPackedID().value()}
, fIndex{SkTo<uint32_t>(index)}
: fIndex{SkTo<uint32_t>(index)}
, fIsEmpty(glyph.isEmpty())
, fIsColor(glyph.isColor())
, fCanDrawAsMask{SkStrikeForGPU::CanDrawAsMask(glyph)}

View File

@ -46,12 +46,18 @@ struct SkPackedGlyphID {
kFixedPointSubPixelPosBits = kFixedPointBinaryPointPos - kSubPixelPosLen,
};
inline static constexpr SkScalar kSubpixelRound =
inline static const constexpr SkScalar kSubpixelRound =
1.f / (1u << (SkPackedGlyphID::kSubPixelPosLen + 1));
inline static constexpr SkIPoint kXYFieldMask{kSubPixelPosMask << kSubPixelX,
inline static const constexpr SkIPoint kXYFieldMask{kSubPixelPosMask << kSubPixelX,
kSubPixelPosMask << kSubPixelY};
struct Hash {
uint32_t operator() (SkPackedGlyphID packedID) const {
return packedID.hash();
}
};
constexpr explicit SkPackedGlyphID(SkGlyphID glyphID)
: fID{(uint32_t)glyphID << kGlyphID} { }
@ -256,20 +262,10 @@ public:
bool isColor() const {return fIsColor; }
bool canDrawAsMask() const {return fCanDrawAsMask;}
bool canDrawAsSDFT() const {return fCanDrawAsSDFT;}
uint32_t packedGlyphID() const {return fPackedGlyphID;}
uint16_t maxDimension() const {return fMaxDimension; }
// Support mapping from SkPackedGlyphID stored in the digest.
static uint32_t GetKey(SkGlyphDigest digest) {
return digest.packedGlyphID();
}
static uint32_t Hash(uint32_t packedGlyphID) {
return SkGoodHash()(packedGlyphID);
}
private:
static_assert(SkPackedGlyphID::kEndData == 20);
uint64_t fPackedGlyphID : SkPackedGlyphID::kEndData;
uint64_t fIndex : SkPackedGlyphID::kEndData;
uint64_t fIsEmpty : 1;
uint64_t fIsColor : 1;

View File

@ -41,7 +41,7 @@ std::tuple<SkGlyph*, size_t> SkScalerCache::glyph(SkPackedGlyphID packedGlyphID)
}
std::tuple<SkGlyphDigest, size_t> SkScalerCache::digest(SkPackedGlyphID packedGlyphID) {
SkGlyphDigest* digest = fDigestForPackedGlyphID.find(packedGlyphID.value());
SkGlyphDigest* digest = fDigestForPackedGlyphID.find(packedGlyphID);
if (digest != nullptr) {
return {*digest, 0};
@ -54,7 +54,7 @@ std::tuple<SkGlyphDigest, size_t> SkScalerCache::digest(SkPackedGlyphID packedGl
SkGlyphDigest SkScalerCache::addGlyph(SkGlyph* glyph) {
size_t index = fGlyphForIndex.size();
SkGlyphDigest digest = SkGlyphDigest{index, *glyph};
fDigestForPackedGlyphID.set(digest);
fDigestForPackedGlyphID.set(glyph->getPackedID(), digest);
fGlyphForIndex.push_back(glyph);
return digest;
}
@ -137,7 +137,7 @@ std::tuple<SkGlyph*, size_t> SkScalerCache::mergeGlyphAndImage(
SkPackedGlyphID toID, const SkGlyph& from) {
SkAutoMutexExclusive lock{fMu};
// TODO(herb): remove finding the glyph when setting the metrics and image are separated
SkGlyphDigest* digest = fDigestForPackedGlyphID.find(toID.value());
SkGlyphDigest* digest = fDigestForPackedGlyphID.find(toID);
if (digest != nullptr) {
SkGlyph* to = fGlyphForIndex[digest->index()];
size_t delta = 0;

View File

@ -134,7 +134,8 @@ private:
// SkGlyphDigest's fIndex field stores the index. This pointer provides an unchanging
// reference to the SkGlyph as long as the strike is alive, and fGlyphForIndex
// provides a dense index for glyphs.
SkTHashTable<SkGlyphDigest, uint32_t, SkGlyphDigest> fDigestForPackedGlyphID SK_GUARDED_BY(fMu);
SkTHashMap<SkPackedGlyphID, SkGlyphDigest, SkPackedGlyphID::Hash>
fDigestForPackedGlyphID SK_GUARDED_BY(fMu);
std::vector<SkGlyph*> fGlyphForIndex SK_GUARDED_BY(fMu);
// so we don't grow our arrays a lot