Implement Analytic AA for General Paths (with Guard against Chrome)

I've set up a SK_SUPPORT_LEGACY_AAA flag to guard against Chromium layout tests. I also set that flag in this CL so theoretically this CL won't trigger any GM changes. I'll use this to verify my guard, and remove that flag and actually enables concave AAA in a future CL.

When enabled, for most simple concave paths (e.g., rectangle stroke, rrect stroke, sawtooth, stars...), the Analytic AA achieves 1.3x-2x speedup, and they look much prettier. And they probably are the majority in our use cases by number. But they probably are not the majority by time cost; a single complicated path may cost 10x-100x more time to render than a rectangle stroke... For those complicated paths, we fall back to supersampling by default as we're likely to be 1.1-1.2x slower and the quality improvement is not visually significant. However, one can use gSkForceAnalyticAA to disable that fallback.

BUG=skia:

Change-Id: If9549a3acc4a187cfaf7eb51890c148da3083d31
Reviewed-on: https://skia-review.googlesource.com/6091
Reviewed-by: Mike Reed <reed@google.com>
Commit-Queue: Yuqian Li <liyuqian@google.com>
This commit is contained in:
Yuqian Li 2017-01-11 10:58:31 -05:00 committed by Skia Commit-Bot
parent 108f55ed5d
commit 89a0e72287
12 changed files with 759 additions and 158 deletions

View File

@ -1198,6 +1198,10 @@ int nanobench_main() {
gSkUseAnalyticAA = FLAGS_analyticAA;
if (FLAGS_forceAnalyticAA) {
gSkForceAnalyticAA = true;
}
int runs = 0;
BenchmarkStream benchStream;
while (Benchmark* b = benchStream.next()) {

View File

@ -1289,6 +1289,10 @@ int dm_main() {
gSkUseAnalyticAA = FLAGS_analyticAA;
if (FLAGS_forceAnalyticAA) {
gSkForceAnalyticAA = true;
}
if (FLAGS_verbose) {
gVLog = stderr;
} else if (!FLAGS_writePath.isEmpty()) {

View File

@ -1822,7 +1822,12 @@ bool SampleWindow::onHandleChar(SkUnichar uni) {
}
break;
case 'A':
gSkUseAnalyticAA = !gSkUseAnalyticAA.load();
if (gSkUseAnalyticAA.load() && !gSkForceAnalyticAA.load()) {
gSkForceAnalyticAA = true;
} else {
gSkUseAnalyticAA = !gSkUseAnalyticAA.load();
gSkForceAnalyticAA = false;
}
this->inval(nullptr);
this->updateTitle();
break;
@ -2176,7 +2181,11 @@ void SampleWindow::updateTitle() {
title.prepend(gDeviceTypePrefix[fDeviceType]);
if (gSkUseAnalyticAA) {
title.prepend("<AAA> ");
if (gSkForceAnalyticAA) {
title.prepend("<FAAA> ");
} else {
title.prepend("<AAA> ");
}
}
if (fTilingMode != kNo_Tiling) {
title.prependf("<T: %s> ", gTilingInfo[fTilingMode].label);

View File

@ -48,34 +48,22 @@ bool SkAnalyticEdge::updateLine(SkFixed x0, SkFixed y0, SkFixed x1, SkFixed y1,
return true;
}
void SkAnalyticEdge::chopLineWithClip(const SkIRect& clip) {
int top = SkFixedFloorToInt(fUpperY);
SkASSERT(top < clip.fBottom);
// clip the line to the clip top
if (top < clip.fTop) {
SkASSERT(SkFixedCeilToInt(fLowerY) > clip.fTop);
SkFixed newY = SkIntToFixed(clip.fTop);
this->goY(newY);
fUpperY = newY;
}
}
bool SkAnalyticQuadraticEdge::setQuadratic(const SkPoint pts[3]) {
if (!fQEdge.setQuadraticWithoutUpdate(pts, 2)) {
fRiteE = nullptr;
if (!fQEdge.setQuadraticWithoutUpdate(pts, kDefaultAccuracy)) {
return false;
}
fQEdge.fQx >>= 2;
fQEdge.fQy >>= 2;
fQEdge.fQDx >>= 2;
fQEdge.fQDy >>= 2;
fQEdge.fQDDx >>= 2;
fQEdge.fQDDy >>= 2;
fQEdge.fQLastX >>= 2;
fQEdge.fQLastY >>= 2;
fQEdge.fQy = snapY(fQEdge.fQy);
fQEdge.fQLastY = snapY(fQEdge.fQLastY);
fQEdge.fQx >>= kDefaultAccuracy;
fQEdge.fQy >>= kDefaultAccuracy;
fQEdge.fQDx >>= kDefaultAccuracy;
fQEdge.fQDy >>= kDefaultAccuracy;
fQEdge.fQDDx >>= kDefaultAccuracy;
fQEdge.fQDDy >>= kDefaultAccuracy;
fQEdge.fQLastX >>= kDefaultAccuracy;
fQEdge.fQLastY >>= kDefaultAccuracy;
fQEdge.fQy = SnapY(fQEdge.fQy);
fQEdge.fQLastY = SnapY(fQEdge.fQLastY);
fWinding = fQEdge.fWinding;
fCurveCount = fQEdge.fCurveCount;
@ -105,15 +93,28 @@ bool SkAnalyticQuadraticEdge::updateQuadratic() {
{
newx = oldx + (dx >> shift);
newy = oldy + (dy >> shift);
SkFDot6 diffY = (newy - fSnappedY) >> 10;
slope = diffY ? QuickSkFDot6Div((newx - fSnappedX) >> 10, diffY) : SK_MaxS32;
if (SkAbs32(dy >> shift) >= SK_Fixed1 * 2) { // only snap when dy is large enough
SkFDot6 diffY = SkFixedToFDot6(newy - fSnappedY);
slope = diffY ? QuickSkFDot6Div(SkFixedToFDot6(newx - fSnappedX), diffY)
: SK_MaxS32;
newSnappedY = SkTMin<SkFixed>(fQEdge.fQLastY, SkFixedRoundToFixed(newy));
newSnappedX = newx - SkFixedMul(slope, newy - newSnappedY);
} else {
newSnappedY = SkTMin(fQEdge.fQLastY, snapY(newy));
newSnappedY = SkTMin(fQEdge.fQLastY, SnapY(newy));
newSnappedX = newx;
#ifdef SK_SUPPORT_LEGACY_AAA
SkFDot6 diffY = SkFixedToFDot6(newy - fSnappedY);
#else
SkFDot6 diffY = SkFixedToFDot6(newSnappedY - fSnappedY);
#endif
slope = diffY ? QuickSkFDot6Div(SkFixedToFDot6(newx - fSnappedX), diffY)
: SK_MaxS32;
}
#ifndef SK_SUPPORT_LEGACY_AAA
SkASSERT(slope == SK_MaxS32 ||
SkAbs32(fSnappedX + SkFixedMul(slope, newSnappedY - fSnappedY) - newSnappedX)
< SK_FixedHalf);
#endif
dx += fQEdge.fQDDx;
dy += fQEdge.fQDDy;
}
@ -146,28 +147,32 @@ bool SkAnalyticQuadraticEdge::updateQuadratic() {
}
bool SkAnalyticCubicEdge::setCubic(const SkPoint pts[4]) {
if (!fCEdge.setCubicWithoutUpdate(pts, 2)) {
fRiteE = nullptr;
if (!fCEdge.setCubicWithoutUpdate(pts, kDefaultAccuracy)) {
return false;
}
fCEdge.fCx >>= 2;
fCEdge.fCy >>= 2;
fCEdge.fCDx >>= 2;
fCEdge.fCDy >>= 2;
fCEdge.fCDDx >>= 2;
fCEdge.fCDDy >>= 2;
fCEdge.fCDDDx >>= 2;
fCEdge.fCDDDy >>= 2;
fCEdge.fCLastX >>= 2;
fCEdge.fCLastY >>= 2;
fCEdge.fCy = snapY(fCEdge.fCy);
fCEdge.fCLastY = snapY(fCEdge.fCLastY);
fCEdge.fCx >>= kDefaultAccuracy;
fCEdge.fCy >>= kDefaultAccuracy;
fCEdge.fCDx >>= kDefaultAccuracy;
fCEdge.fCDy >>= kDefaultAccuracy;
fCEdge.fCDDx >>= kDefaultAccuracy;
fCEdge.fCDDy >>= kDefaultAccuracy;
fCEdge.fCDDDx >>= kDefaultAccuracy;
fCEdge.fCDDDy >>= kDefaultAccuracy;
fCEdge.fCLastX >>= kDefaultAccuracy;
fCEdge.fCLastY >>= kDefaultAccuracy;
fCEdge.fCy = SnapY(fCEdge.fCy);
fCEdge.fCLastY = SnapY(fCEdge.fCLastY);
fWinding = fCEdge.fWinding;
fCurveCount = fCEdge.fCurveCount;
fCurveShift = fCEdge.fCurveShift;
fCubicDShift = fCEdge.fCubicDShift;
fSnappedY = fCEdge.fCy;
return this->updateCubic();
}
@ -203,17 +208,24 @@ bool SkAnalyticCubicEdge::updateCubic() {
newy = oldy;
}
SkFixed snappedOldY = SkAnalyticEdge::snapY(oldy);
SkFixed snappedNewY = SkAnalyticEdge::snapY(newy);
SkFixed slope = SkFixedToFDot6(snappedNewY - snappedOldY) == 0
SkFixed newSnappedY = SnapY(newy);
// we want to SkASSERT(snappedNewY <= fCEdge.fCLastY), but our finite fixedpoint
// doesn't always achieve that, so we have to explicitly pin it here.
if (fCEdge.fCLastY < newSnappedY) {
newSnappedY = fCEdge.fCLastY;
count = 0;
}
SkFixed slope = SkFixedToFDot6(newSnappedY - fSnappedY) == 0
? SK_MaxS32
: SkFDot6Div(SkFixedToFDot6(newx - oldx),
SkFixedToFDot6(snappedNewY - snappedOldY));
SkFixedToFDot6(newSnappedY - fSnappedY));
success = this->updateLine(oldx, snappedOldY, newx, snappedNewY, slope);
success = this->updateLine(oldx, fSnappedY, newx, newSnappedY, slope);
oldx = newx;
oldy = newy;
fSnappedY = newSnappedY;
} while (count < 0 && !success);
fCEdge.fCx = newx;

View File

@ -21,6 +21,10 @@ struct SkAnalyticEdge {
SkAnalyticEdge* fNext;
SkAnalyticEdge* fPrev;
// During aaa_walk_edges, if this edge is a left edge,
// then fRiteE is its corresponding right edge. Otherwise it's nullptr.
SkAnalyticEdge* fRiteE;
SkFixed fX;
SkFixed fDX;
SkFixed fUpperX; // The x value when y = fUpperY
@ -30,6 +34,10 @@ struct SkAnalyticEdge {
SkFixed fDY; // abs(1/fDX); may be SK_MaxS32 when fDX is close to 0.
// fDY is only used for blitting trapezoids.
SkFixed fSavedX; // For deferred blitting
SkFixed fSavedY; // For deferred blitting
SkFixed fSavedDY; // For deferred blitting
int8_t fCurveCount; // only used by kQuad(+) and kCubic(-)
uint8_t fCurveShift; // appled to all Dx/DDx/DDDx except for fCubicDShift exception
uint8_t fCubicDShift; // applied to fCDx and fCDy only in cubic
@ -37,7 +45,8 @@ struct SkAnalyticEdge {
static const int kDefaultAccuracy = 2; // default accuracy for snapping
static inline SkFixed snapY(SkFixed y, int accuracy = kDefaultAccuracy) {
static inline SkFixed SnapY(SkFixed y) {
const int accuracy = kDefaultAccuracy;
// This approach is safer than left shift, round, then right shift
return ((unsigned)y + (SK_Fixed1 >> (accuracy + 1))) >> (16 - accuracy) << (16 - accuracy);
}
@ -55,15 +64,22 @@ struct SkAnalyticEdge {
}
}
inline bool setLine(const SkPoint& p0, const SkPoint& p1, const SkIRect* clip = nullptr);
inline bool updateLine(SkFixed ax, SkFixed ay, SkFixed bx, SkFixed by, SkFixed slope);
void chopLineWithClip(const SkIRect& clip);
inline bool intersectsClip(const SkIRect& clip) const {
SkASSERT(SkFixedFloorToInt(fUpperY) < clip.fBottom);
return SkFixedCeilToInt(fLowerY) > clip.fTop;
inline void goY(SkFixed y, int yShift) {
SkASSERT(yShift >= 0 && yShift <= kDefaultAccuracy);
SkASSERT(fDX == 0 || y - fY == SK_Fixed1 >> yShift);
fY = y;
fX += fDX >> yShift;
}
inline void saveXY(SkFixed x, SkFixed y, SkFixed dY) {
fSavedX = x;
fSavedY = y;
fSavedDY = dY;
}
inline bool setLine(const SkPoint& p0, const SkPoint& p1);
inline bool updateLine(SkFixed ax, SkFixed ay, SkFixed bx, SkFixed by, SkFixed slope);
#ifdef SK_DEBUG
void dump() const {
SkDebugf("edge: upperY:%d lowerY:%d y:%g x:%g dx:%g w:%d\n",
@ -90,36 +106,47 @@ struct SkAnalyticQuadraticEdge : public SkAnalyticEdge {
bool setQuadratic(const SkPoint pts[3]);
bool updateQuadratic();
inline void keepContinuous() {
// We use fX as the starting x to ensure the continuouty.
// Without it, we may break the sorted edge list.
SkASSERT(SkAbs32(fX - SkFixedMul(fY - fSnappedY, fDX) - fSnappedX) < SK_Fixed1);
SkASSERT(SkAbs32(fY - fSnappedY) < SK_Fixed1); // This may differ due to smooth jump
fSnappedX = fX;
fSnappedY = fY;
}
};
struct SkAnalyticCubicEdge : public SkAnalyticEdge {
SkCubicEdge fCEdge;
SkFixed fSnappedY; // to make sure that y is increasing with smooth jump and snapping
bool setCubic(const SkPoint pts[4]);
bool updateCubic();
inline void keepContinuous() {
SkASSERT(SkAbs32(fX - SkFixedMul(fDX, fY - SnapY(fCEdge.fCy)) - fCEdge.fCx) < SK_Fixed1);
fCEdge.fCx = fX;
fSnappedY = fY;
}
};
bool SkAnalyticEdge::setLine(const SkPoint& p0, const SkPoint& p1, const SkIRect* clip) {
// We must set X/Y using the same way (times 4, to FDot6, then to Fixed) as Quads/Cubics.
bool SkAnalyticEdge::setLine(const SkPoint& p0, const SkPoint& p1) {
fRiteE = nullptr;
// We must set X/Y using the same way (e.g., times 4, to FDot6, then to Fixed) as Quads/Cubics.
// Otherwise the order of the edge might be wrong due to precision limit.
SkFixed x0 = SkFDot6ToFixed(SkScalarToFDot6(p0.fX * 4)) >> 2;
SkFixed y0 = snapY(SkFDot6ToFixed(SkScalarToFDot6(p0.fY * 4)) >> 2);
SkFixed x1 = SkFDot6ToFixed(SkScalarToFDot6(p1.fX * 4)) >> 2;
SkFixed y1 = snapY(SkFDot6ToFixed(SkScalarToFDot6(p1.fY * 4)) >> 2);
const int accuracy = kDefaultAccuracy;
const int multiplier = (1 << kDefaultAccuracy);
SkFixed x0 = SkFDot6ToFixed(SkScalarToFDot6(p0.fX * multiplier)) >> accuracy;
SkFixed y0 = SnapY(SkFDot6ToFixed(SkScalarToFDot6(p0.fY * multiplier)) >> accuracy);
SkFixed x1 = SkFDot6ToFixed(SkScalarToFDot6(p1.fX * multiplier)) >> accuracy;
SkFixed y1 = SnapY(SkFDot6ToFixed(SkScalarToFDot6(p1.fY * multiplier)) >> accuracy);
// are we a zero-height line?
if (y0 == y1) {
return false;
}
int top = SkFixedFloorToInt(y0);
int bot = SkFixedCeilToInt(y1);
// are we completely above or below the clip?
if (clip && (top >= clip->fBottom || bot <= clip->fTop)) {
return false;
}
int winding = 1;
if (y0 > y1) {
@ -128,7 +155,15 @@ bool SkAnalyticEdge::setLine(const SkPoint& p0, const SkPoint& p1, const SkIRect
winding = -1;
}
#ifdef SK_SUPPORT_LEGACY_AAA
SkFixed slope = SkFixedDiv(x1 - x0, y1 - y0);
#else
SkFDot6 dy = SkFixedToFDot6(y1 - y0);
SkFDot6 dx = SkFixedToFDot6(x1 - x0);
SkFixed slope = dy ? QuickSkFDot6Div(dx, dy) : SK_MaxS32;
SkASSERT(dx == 0 || slope != 0);
SkFixed absSlope = SkAbs32(slope);
#endif
fX = x0;
fDX = slope;
@ -136,14 +171,17 @@ bool SkAnalyticEdge::setLine(const SkPoint& p0, const SkPoint& p1, const SkIRect
fY = y0;
fUpperY = y0;
fLowerY = y1;
#ifdef SK_SUPPORT_LEGACY_AAA
fDY = x1 != x0 ? SkAbs32(SkFixedDiv(y1 - y0, x1 - x0)) : SK_MaxS32;
#else
fDY = dx == 0 ? SK_MaxS32 : absSlope < kInverseTableSize
? QuickFDot6Inverse::Lookup(absSlope)
: SkAbs32(QuickSkFDot6Div(dy, dx));
#endif
fCurveCount = 0;
fWinding = SkToS8(winding);
fCurveShift = 0;
if (clip) {
this->chopLineWithClip(*clip);
}
return true;
}

View File

@ -16,6 +16,8 @@
std::atomic<bool> gSkUseAnalyticAA{true};
#endif
std::atomic<bool> gSkForceAnalyticAA{false};
static inline void blitrect(SkBlitter* blitter, const SkIRect& r) {
blitter->blitRect(r.fLeft, r.fTop, r.width(), r.height());
}

View File

@ -23,7 +23,11 @@ class SkPath;
*/
typedef SkIRect SkXRect;
// Use this to check that we successfully guard the change against Chromium layout tests
#define SK_SUPPORT_LEGACY_AAA
extern std::atomic<bool> gSkUseAnalyticAA;
extern std::atomic<bool> gSkForceAnalyticAA;
class AdditiveBlitter;

View File

@ -34,10 +34,6 @@ void sk_fill_path(const SkPath& path, const SkIRect& clipRect,
SkBlitter* blitter, int start_y, int stop_y, int shiftEdgesUp,
bool pathContainedInClip);
void aaa_fill_path(const SkPath& path, const SkIRect& clipRect, AdditiveBlitter*,
int start_y, int stop_y, bool pathContainedInClip, bool isUsingMask,
bool forceRLE);
// blit the rects above and below avoid, clipped to clip
void sk_blit_above(SkBlitter*, const SkIRect& avoid, const SkRegion& clip);
void sk_blit_below(SkBlitter*, const SkIRect& avoid, const SkRegion& clip);

View File

@ -84,9 +84,13 @@ number of scan lines in our algorithm is only about 3 + H while the
///////////////////////////////////////////////////////////////////////////////
static inline void addAlpha(SkAlpha& alpha, SkAlpha delta) {
SkASSERT(alpha + (int)delta <= 256);
alpha = SkAlphaRuns::CatchOverflow(alpha + (int)delta);
static inline void addAlpha(SkAlpha* alpha, SkAlpha delta) {
SkASSERT(*alpha + (int)delta <= 256);
*alpha = SkAlphaRuns::CatchOverflow(*alpha + (int)delta);
}
static inline void safelyAddAlpha(SkAlpha* alpha, SkAlpha delta) {
*alpha = SkTMin(0xFF, *alpha + (int)delta);
}
class AdditiveBlitter : public SkBlitter {
@ -147,7 +151,7 @@ public:
void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override;
// Allowing following methods are used to blit rectangles during aaa_walk_convex_edges
// Since there aren't many rectangles, we can still break the slow speed of virtual functions.
// Since there aren't many rectangles, we can still bear the slow speed of virtual functions.
void blitAntiH(int x, int y, const SkAlpha alpha) override;
void blitAntiH(int x, int y, int width, const SkAlpha alpha) override;
void blitV(int x, int y, int height, SkAlpha alpha) override;
@ -227,14 +231,14 @@ void MaskAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int
void MaskAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) {
SkASSERT(x >= fMask.fBounds.fLeft -1);
addAlpha(this->getRow(y)[x], alpha);
addAlpha(&this->getRow(y)[x], alpha);
}
void MaskAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha alpha) {
SkASSERT(x >= fMask.fBounds.fLeft -1);
uint8_t* row = this->getRow(y);
for (int i=0; i<width; i++) {
addAlpha(row[x + i], alpha);
for (int i = 0; i < width; ++i) {
addAlpha(&row[x + i], alpha);
}
}
@ -246,7 +250,7 @@ void MaskAdditiveBlitter::blitV(int x, int y, int height, SkAlpha alpha) {
// This must be called as if this is a real blitter.
// So we directly set alpha rather than adding it.
uint8_t* row = this->getRow(y);
for (int i=0; i<height; i++) {
for (int i = 0; i < height; ++i) {
row[x] = alpha;
row += fMask.fRowBytes;
}
@ -257,7 +261,7 @@ void MaskAdditiveBlitter::blitRect(int x, int y, int width, int height) {
// This must be called as if this is a real blitter.
// So we directly set alpha rather than adding it.
uint8_t* row = this->getRow(y);
for (int i=0; i<height; i++) {
for (int i = 0; i < height; ++i) {
memset(row + x, 0xFF, width);
row += fMask.fRowBytes;
}
@ -290,7 +294,7 @@ public:
}
}
private:
protected:
SkBlitter* fRealBlitter;
/// Current y coordinate
@ -313,7 +317,7 @@ private:
int fOffsetX;
inline bool check(int x, int width) {
inline bool check(int x, int width) const {
#ifdef SK_DEBUG
if (x < 0 || x + width > fWidth) {
// SkDebugf("Ignore x = %d, width = %d\n", x, width);
@ -431,8 +435,8 @@ void RunBasedAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[],
}
fRuns.fRuns[x + i] = 1;
}
for (int i=0; i<len; i++) {
addAlpha(fRuns.fAlpha[x + i], antialias[i]);
for (int i = 0; i < len; ++i) {
addAlpha(&fRuns.fAlpha[x + i], antialias[i]);
}
}
void RunBasedAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) {
@ -463,12 +467,85 @@ void RunBasedAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha a
int RunBasedAdditiveBlitter::getWidth() { return fWidth; }
// This exists specifically for concave path filling.
// In those cases, we can easily accumulate alpha greater than 0xFF.
class SafeRLEAdditiveBlitter : public RunBasedAdditiveBlitter {
public:
SafeRLEAdditiveBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkRegion& clip,
bool isInverse) : RunBasedAdditiveBlitter(realBlitter, ir, clip, isInverse) {}
void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override;
void blitAntiH(int x, int y, const SkAlpha alpha) override;
void blitAntiH(int x, int y, int width, const SkAlpha alpha) override;
};
void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int len) {
checkY(y);
x -= fLeft;
if (x < 0) {
len += x;
antialias -= x;
x = 0;
}
len = SkTMin(len, fWidth - x);
SkASSERT(check(x, len));
if (x < fOffsetX) {
fOffsetX = 0;
}
fOffsetX = fRuns.add(x, 0, len, 0, 0, fOffsetX); // Break the run
for (int i = 0; i < len; i += fRuns.fRuns[x + i]) {
for (int j = 1; j < fRuns.fRuns[x + i]; j++) {
fRuns.fRuns[x + i + j] = 1;
fRuns.fAlpha[x + i + j] = fRuns.fAlpha[x + i];
}
fRuns.fRuns[x + i] = 1;
}
for (int i = 0; i < len; ++i) {
safelyAddAlpha(&fRuns.fAlpha[x + i], antialias[i]);
}
}
void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) {
checkY(y);
x -= fLeft;
if (x < fOffsetX) {
fOffsetX = 0;
}
if (check(x, 1)) {
// Break the run
fOffsetX = fRuns.add(x, 0, 1, 0, 0, fOffsetX);
safelyAddAlpha(&fRuns.fAlpha[x], alpha);
}
}
void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha alpha) {
checkY(y);
x -= fLeft;
if (x < fOffsetX) {
fOffsetX = 0;
}
if (check(x, width)) {
// Break the run
fOffsetX = fRuns.add(x, 0, width, 0, 0, fOffsetX);
for(int i = x; i < x + width; i += fRuns.fRuns[i]) {
safelyAddAlpha(&fRuns.fAlpha[i], alpha);
}
}
}
///////////////////////////////////////////////////////////////////////////////
// Return the alpha of a trapezoid whose height is 1
static inline SkAlpha trapezoidToAlpha(SkFixed l1, SkFixed l2) {
SkASSERT(l1 >= 0 && l2 >= 0);
return ((l1 + l2) >> 9);
return (l1 + l2) >> 9;
}
// The alpha of right-triangle (a, a*b), in 16 bits
@ -524,7 +601,7 @@ static inline void computeAlphaAboveLine(SkAlpha* alphas, SkFixed l, SkFixed r,
SkFixed firstH = SkFixedMul(first, dY); // vertical edge of the left-most triangle
alphas[0] = SkFixedMul(first, firstH) >> 9; // triangle alpha
SkFixed alpha16 = firstH + (dY >> 1); // rectangle plus triangle
for (int i = 1; i < R - 1; i++) {
for (int i = 1; i < R - 1; ++i) {
alphas[i] = alpha16 >> 8;
alpha16 += dY;
}
@ -557,17 +634,19 @@ static inline void computeAlphaBelowLine(
}
// Note that if fullAlpha != 0xFF, we'll multiply alpha by fullAlpha
static inline void blit_single_alpha(AdditiveBlitter* blitter, int y, int x,
static SK_ALWAYS_INLINE void blit_single_alpha(AdditiveBlitter* blitter, int y, int x,
SkAlpha alpha, SkAlpha fullAlpha, SkAlpha* maskRow,
bool isUsingMask) {
bool isUsingMask, bool noRealBlitter, bool needSafeCheck) {
if (isUsingMask) {
if (fullAlpha == 0xFF) {
if (fullAlpha == 0xFF && !noRealBlitter) { // noRealBlitter is needed for concave paths
maskRow[x] = alpha;
} else if (needSafeCheck) {
safelyAddAlpha(&maskRow[x], getPartialAlpha(alpha, fullAlpha));
} else {
addAlpha(maskRow[x], getPartialAlpha(alpha, fullAlpha));
addAlpha(&maskRow[x], getPartialAlpha(alpha, fullAlpha));
}
} else {
if (fullAlpha == 0xFF) {
if (fullAlpha == 0xFF && !noRealBlitter) {
blitter->getRealBlitter()->blitV(x, y, 1, alpha);
} else {
blitter->blitAntiH(x, y, getPartialAlpha(alpha, fullAlpha));
@ -575,14 +654,19 @@ static inline void blit_single_alpha(AdditiveBlitter* blitter, int y, int x,
}
}
static inline void blit_two_alphas(AdditiveBlitter* blitter, int y, int x,
static SK_ALWAYS_INLINE void blit_two_alphas(AdditiveBlitter* blitter, int y, int x,
SkAlpha a1, SkAlpha a2, SkAlpha fullAlpha, SkAlpha* maskRow,
bool isUsingMask) {
bool isUsingMask, bool noRealBlitter, bool needSafeCheck) {
if (isUsingMask) {
addAlpha(maskRow[x], a1);
addAlpha(maskRow[x + 1], a2);
if (needSafeCheck) {
safelyAddAlpha(&maskRow[x], a1);
safelyAddAlpha(&maskRow[x + 1], a2);
} else {
addAlpha(&maskRow[x], a1);
addAlpha(&maskRow[x + 1], a2);
}
} else {
if (fullAlpha == 0xFF) {
if (fullAlpha == 0xFF && !noRealBlitter) {
blitter->getRealBlitter()->blitAntiH2(x, y, a1, a2);
} else {
blitter->blitAntiH(x, y, a1);
@ -593,13 +677,18 @@ static inline void blit_two_alphas(AdditiveBlitter* blitter, int y, int x,
// It's important that this is inline. Otherwise it'll be much slower.
static SK_ALWAYS_INLINE void blit_full_alpha(AdditiveBlitter* blitter, int y, int x, int len,
SkAlpha fullAlpha, SkAlpha* maskRow, bool isUsingMask) {
SkAlpha fullAlpha, SkAlpha* maskRow, bool isUsingMask,
bool noRealBlitter, bool needSafeCheck) {
if (isUsingMask) {
for (int i=0; i<len; i++) {
addAlpha(maskRow[x + i], fullAlpha);
for (int i = 0; i < len; ++i) {
if (needSafeCheck) {
safelyAddAlpha(&maskRow[x + i], fullAlpha);
} else {
addAlpha(&maskRow[x + i], fullAlpha);
}
}
} else {
if (fullAlpha == 0xFF) {
if (fullAlpha == 0xFF && !noRealBlitter) {
blitter->getRealBlitter()->blitH(x, y, len);
} else {
blitter->blitAntiH(x, y, len, fullAlpha);
@ -610,13 +699,14 @@ static SK_ALWAYS_INLINE void blit_full_alpha(AdditiveBlitter* blitter, int y, in
static void blit_aaa_trapezoid_row(AdditiveBlitter* blitter, int y,
SkFixed ul, SkFixed ur, SkFixed ll, SkFixed lr,
SkFixed lDY, SkFixed rDY, SkAlpha fullAlpha, SkAlpha* maskRow,
bool isUsingMask) {
bool isUsingMask, bool noRealBlitter, bool needSafeCheck) {
int L = SkFixedFloorToInt(ul), R = SkFixedCeilToInt(lr);
int len = R - L;
if (len == 1) {
SkAlpha alpha = trapezoidToAlpha(ur - ul, lr - ll);
blit_single_alpha(blitter, y, L, alpha, fullAlpha, maskRow, isUsingMask);
blit_single_alpha(blitter, y, L, alpha, fullAlpha, maskRow, isUsingMask, noRealBlitter,
needSafeCheck);
return;
}
@ -637,7 +727,7 @@ static void blit_aaa_trapezoid_row(AdditiveBlitter* blitter, int y,
SkAlpha* tempAlphas = alphas + len + 1;
int16_t* runs = (int16_t*)(alphas + (len + 1) * 2);
for (int i = 0; i < len; i++) {
for (int i = 0; i < len; ++i) {
runs[i] = 1;
alphas[i] = fullAlpha;
}
@ -655,7 +745,7 @@ static void blit_aaa_trapezoid_row(AdditiveBlitter* blitter, int y,
} else {
computeAlphaBelowLine(tempAlphas + uL - L, ul - (uL << 16), ll - (uL << 16),
lDY, fullAlpha);
for (int i = uL; i < lL; i++) {
for (int i = uL; i < lL; ++i) {
if (alphas[i - L] > tempAlphas[i - L]) {
alphas[i - L] -= tempAlphas[i - L];
} else {
@ -676,7 +766,7 @@ static void blit_aaa_trapezoid_row(AdditiveBlitter* blitter, int y,
} else {
computeAlphaAboveLine(tempAlphas + uR - L, ur - (uR << 16), lr - (uR << 16),
rDY, fullAlpha);
for (int i = uR; i < lR; i++) {
for (int i = uR; i < lR; ++i) {
if (alphas[i - L] > tempAlphas[i - L]) {
alphas[i - L] -= tempAlphas[i - L];
} else {
@ -686,11 +776,16 @@ static void blit_aaa_trapezoid_row(AdditiveBlitter* blitter, int y,
}
if (isUsingMask) {
for (int i=0; i<len; i++) {
addAlpha(maskRow[L + i], alphas[i]);
for (int i = 0; i < len; ++i) {
if (needSafeCheck) {
safelyAddAlpha(&maskRow[L + i], alphas[i]);
} else {
addAlpha(&maskRow[L + i], alphas[i]);
}
}
} else {
if (fullAlpha == 0xFF) { // Real blitter is faster than RunBasedAdditiveBlitter
if (fullAlpha == 0xFF && !noRealBlitter) {
// Real blitter is faster than RunBasedAdditiveBlitter
blitter->getRealBlitter()->blitAntiH(L, y, alphas, runs);
} else {
blitter->blitAntiH(L, y, alphas, len);
@ -702,10 +797,11 @@ static void blit_aaa_trapezoid_row(AdditiveBlitter* blitter, int y,
}
}
static inline void blit_trapezoid_row(AdditiveBlitter* blitter, int y,
static SK_ALWAYS_INLINE void blit_trapezoid_row(AdditiveBlitter* blitter, int y,
SkFixed ul, SkFixed ur, SkFixed ll, SkFixed lr,
SkFixed lDY, SkFixed rDY, SkAlpha fullAlpha,
SkAlpha* maskRow, bool isUsingMask) {
SkAlpha* maskRow, bool isUsingMask, bool noRealBlitter = false,
bool needSafeCheck = false) {
SkASSERT(lDY >= 0 && rDY >= 0); // We should only send in the absolte value
if (ul > ur) {
@ -743,16 +839,19 @@ static inline void blit_trapezoid_row(AdditiveBlitter* blitter, int y,
int len = SkFixedCeilToInt(joinLeft - ul);
if (len == 1) {
SkAlpha alpha = trapezoidToAlpha(joinLeft - ul, joinLeft - ll);
blit_single_alpha(blitter, y, ul >> 16, alpha, fullAlpha, maskRow, isUsingMask);
blit_single_alpha(blitter, y, ul >> 16, alpha, fullAlpha, maskRow, isUsingMask,
noRealBlitter, needSafeCheck);
} else if (len == 2) {
SkFixed first = joinLeft - SK_Fixed1 - ul;
SkFixed second = ll - ul - first;
SkAlpha a1 = partialTriangleToAlpha(first, lDY);
SkAlpha a2 = fullAlpha - partialTriangleToAlpha(second, lDY);
blit_two_alphas(blitter, y, ul >> 16, a1, a2, fullAlpha, maskRow, isUsingMask);
blit_two_alphas(blitter, y, ul >> 16, a1, a2, fullAlpha, maskRow, isUsingMask,
noRealBlitter, needSafeCheck);
} else {
blit_aaa_trapezoid_row(blitter, y, ul, joinLeft, ll, joinLeft, lDY, SK_MaxS32,
fullAlpha, maskRow, isUsingMask);
fullAlpha, maskRow, isUsingMask, noRealBlitter,
needSafeCheck);
}
}
// SkAAClip requires that we blit from left to right.
@ -760,29 +859,30 @@ static inline void blit_trapezoid_row(AdditiveBlitter* blitter, int y,
if (joinLeft < joinRite) {
blit_full_alpha(blitter, y, SkFixedFloorToInt(joinLeft),
SkFixedFloorToInt(joinRite - joinLeft),
fullAlpha, maskRow, isUsingMask);
fullAlpha, maskRow, isUsingMask, noRealBlitter, needSafeCheck);
}
if (lr > joinRite) {
int len = SkFixedCeilToInt(lr - joinRite);
if (len == 1) {
SkAlpha alpha = trapezoidToAlpha(ur - joinRite, lr - joinRite);
blit_single_alpha(blitter, y, joinRite >> 16, alpha, fullAlpha, maskRow,
isUsingMask);
isUsingMask, noRealBlitter, needSafeCheck);
} else if (len == 2) {
SkFixed first = joinRite + SK_Fixed1 - ur;
SkFixed second = lr - ur - first;
SkAlpha a1 = fullAlpha - partialTriangleToAlpha(first, rDY);
SkAlpha a2 = partialTriangleToAlpha(second, rDY);
blit_two_alphas(blitter, y, joinRite >> 16, a1, a2, fullAlpha, maskRow,
isUsingMask);
isUsingMask, noRealBlitter, needSafeCheck);
} else {
blit_aaa_trapezoid_row(blitter, y, joinRite, ur, joinRite, lr, SK_MaxS32, rDY,
fullAlpha, maskRow, isUsingMask);
fullAlpha, maskRow, isUsingMask, noRealBlitter,
needSafeCheck);
}
}
} else {
blit_aaa_trapezoid_row(blitter, y, ul, ur, ll, lr, lDY, rDY, fullAlpha, maskRow,
isUsingMask);
isUsingMask, noRealBlitter, needSafeCheck);
}
}
@ -809,7 +909,7 @@ static SkAnalyticEdge* sort_edges(SkAnalyticEdge* list[], int count, SkAnalyticE
SkTQSort(list, list + count - 1);
// now make the edges linked in sorted order
for (int i = 1; i < count; i++) {
for (int i = 1; i < count; ++i) {
list[i - 1]->fNext = list[i];
list[i]->fPrev = list[i - 1];
}
@ -899,9 +999,9 @@ static inline bool isSmoothEnough(SkAnalyticEdge* leftE, SkAnalyticEdge* riteE,
return isSmoothEnough(leftE, currE, stop_y) && isSmoothEnough(riteE, nextCurrE, stop_y);
}
static inline void aaa_walk_convex_edges(SkAnalyticEdge* prevHead, AdditiveBlitter* blitter,
int start_y, int stop_y, SkFixed leftBound, SkFixed riteBound,
bool isUsingMask) {
static inline void aaa_walk_convex_edges(SkAnalyticEdge* prevHead,
AdditiveBlitter* blitter, int start_y, int stop_y, SkFixed leftBound, SkFixed riteBound,
bool isUsingMask) {
validate_sort((SkAnalyticEdge*)prevHead->fNext);
SkAnalyticEdge* leftE = (SkAnalyticEdge*) prevHead->fNext;
@ -1130,13 +1230,407 @@ END_WALK:
#endif
}
void aaa_fill_path(const SkPath& path, const SkIRect& clipRect, AdditiveBlitter* blitter,
int start_y, int stop_y, bool pathContainedInClip, bool isUsingMask,
bool forceRLE) { // forceRLE implies that SkAAClip is calling us
SkASSERT(blitter);
///////////////////////////////////////////////////////////////////////////////
// we only implemented the convex shapes yet
SkASSERT(!path.isInverseFillType() && path.isConvex());
static inline void remove_edge(SkAnalyticEdge* edge) {
edge->fPrev->fNext = edge->fNext;
edge->fNext->fPrev = edge->fPrev;
}
static inline void insert_edge_after(SkAnalyticEdge* edge, SkAnalyticEdge* afterMe) {
edge->fPrev = afterMe;
edge->fNext = afterMe->fNext;
afterMe->fNext->fPrev = edge;
afterMe->fNext = edge;
}
static void backward_insert_edge_based_on_x(SkAnalyticEdge* edge) {
SkFixed x = edge->fX;
SkAnalyticEdge* prev = edge->fPrev;
while (prev->fPrev && prev->fX > x) {
prev = prev->fPrev;
}
if (prev->fNext != edge) {
remove_edge(edge);
insert_edge_after(edge, prev);
}
}
static SkAnalyticEdge* backward_insert_start(SkAnalyticEdge* prev, SkFixed x) {
while (prev->fPrev && prev->fX > x) {
prev = prev->fPrev;
}
return prev;
}
static inline void updateNextNextY(SkFixed y, SkFixed nextY, SkFixed* nextNextY) {
*nextNextY = y > nextY && y < *nextNextY ? y : *nextNextY;
}
static inline void checkIntersection(const SkAnalyticEdge* edge, SkFixed nextY, SkFixed* nextNextY)
{
if (edge->fPrev->fPrev && edge->fPrev->fX + edge->fPrev->fDX > edge->fX + edge->fDX) {
*nextNextY = nextY + (SK_Fixed1 >> SkAnalyticEdge::kDefaultAccuracy);
}
}
static void insert_new_edges(SkAnalyticEdge* newEdge, SkFixed y, SkFixed* nextNextY) {
if (newEdge->fUpperY > y) {
updateNextNextY(newEdge->fUpperY, y, nextNextY);
return;
}
SkAnalyticEdge* prev = newEdge->fPrev;
if (prev->fX <= newEdge->fX) {
while (newEdge->fUpperY <= y) {
checkIntersection(newEdge, y, nextNextY);
updateNextNextY(newEdge->fLowerY, y, nextNextY);
newEdge = newEdge->fNext;
}
updateNextNextY(newEdge->fUpperY, y, nextNextY);
return;
}
// find first x pos to insert
SkAnalyticEdge* start = backward_insert_start(prev, newEdge->fX);
//insert the lot, fixing up the links as we go
do {
SkAnalyticEdge* next = newEdge->fNext;
do {
if (start->fNext == newEdge) {
goto nextEdge;
}
SkAnalyticEdge* after = start->fNext;
if (after->fX >= newEdge->fX) {
break;
}
SkASSERT(start != after);
start = after;
} while (true);
remove_edge(newEdge);
insert_edge_after(newEdge, start);
nextEdge:
checkIntersection(newEdge, y, nextNextY);
updateNextNextY(newEdge->fLowerY, y, nextNextY);
start = newEdge;
newEdge = next;
} while (newEdge->fUpperY <= y);
updateNextNextY(newEdge->fUpperY, y, nextNextY);
}
static void validate_edges_for_y(const SkAnalyticEdge* edge, SkFixed y) {
#ifdef SK_DEBUG
while (edge->fUpperY <= y) {
SkASSERT(edge->fPrev && edge->fNext);
SkASSERT(edge->fPrev->fNext == edge);
SkASSERT(edge->fNext->fPrev == edge);
SkASSERT(edge->fUpperY <= edge->fLowerY);
SkASSERT(edge->fPrev->fPrev == nullptr || edge->fPrev->fX <= edge->fX);
edge = edge->fNext;
}
#endif
}
// Return true if prev->fX, next->fX are too close in the current pixel row.
static inline bool edges_too_close(SkAnalyticEdge* prev, SkAnalyticEdge* next, SkFixed lowerY) {
// Note that even if the following test failed, the edges might still be very close to each
// other at some point within the current pixel row because of prev->fDX and next->fDX.
// However, to handle that case, we have to sacrafice more performance.
// I think the current quality is good enough (mainly by looking at Nebraska-StateSeal.svg)
// so I'll ignore fDX for performance tradeoff.
return next && prev && next->fUpperY < lowerY && prev->fX >= next->fX - SkAbs32(next->fDX);
// The following is more accurate but also slower.
// return (prev && prev->fPrev && next && next->fNext != nullptr && next->fUpperY < lowerY &&
// prev->fX + SkAbs32(prev->fDX) >= next->fX - SkAbs32(next->fDX));
}
// This function exists for the case where the previous rite edge is removed because
// its fLowerY <= nextY
static inline bool edges_too_close(int prevRite, SkFixed ul, SkFixed ll) {
return prevRite > SkFixedFloorToInt(ul) || prevRite > SkFixedFloorToInt(ll);
}
static inline void blit_saved_trapezoid(SkAnalyticEdge* leftE, SkFixed lowerY,
SkFixed lowerLeft, SkFixed lowerRite,
AdditiveBlitter* blitter, SkAlpha* maskRow, bool isUsingMask, bool noRealBlitter,
SkFixed leftClip, SkFixed rightClip) {
SkAnalyticEdge* riteE = leftE->fRiteE;
SkASSERT(riteE);
SkASSERT(riteE->fNext == nullptr || leftE->fSavedY == riteE->fSavedY);
SkASSERT(SkFixedFloorToInt(lowerY - 1) == SkFixedFloorToInt(leftE->fSavedY));
int y = SkFixedFloorToInt(leftE->fSavedY);
// Instead of using f2a(lowerY - leftE->fSavedY), we use the following fullAlpha
// to elimiate cumulative error: if there are many fractional y scan lines within the
// same row, the former may accumulate the rounding error while the later won't.
SkAlpha fullAlpha = f2a(lowerY - SkIntToFixed(y)) - f2a(leftE->fSavedY - SkIntToFixed(y));
// We need fSavedDY because the (quad or cubic) edge might be updated
blit_trapezoid_row(blitter, y,
SkTMax(leftE->fSavedX, leftClip), SkTMin(riteE->fSavedX, rightClip),
SkTMax(lowerLeft, leftClip), SkTMin(lowerRite, rightClip),
leftE->fSavedDY, riteE->fSavedDY, fullAlpha, maskRow, isUsingMask,
noRealBlitter ||
(fullAlpha == 0xFF && (edges_too_close(leftE->fPrev, leftE, lowerY)
|| edges_too_close(riteE, riteE->fNext, lowerY))),
true);
leftE->fRiteE = nullptr;
}
static inline void deferred_blit(SkAnalyticEdge* leftE, SkAnalyticEdge* riteE,
SkFixed left, SkFixed leftDY, // don't save leftE->fX/fDY as they may have been updated
SkFixed y, SkFixed nextY, bool isIntegralNextY, bool leftEnds, bool riteEnds,
AdditiveBlitter* blitter, SkAlpha* maskRow, bool isUsingMask, bool noRealBlitter,
SkFixed leftClip, SkFixed rightClip, int yShift) {
if (leftE->fRiteE && leftE->fRiteE != riteE) {
// leftE's right edge changed. Blit the saved trapezoid.
SkASSERT(leftE->fRiteE->fNext == nullptr || leftE->fRiteE->fY == y);
blit_saved_trapezoid(leftE, y, left, leftE->fRiteE->fX,
blitter, maskRow, isUsingMask, noRealBlitter, leftClip, rightClip);
}
if (!leftE->fRiteE) {
// Save and defer blitting the trapezoid
SkASSERT(riteE->fRiteE == nullptr);
SkASSERT(leftE->fPrev == nullptr || leftE->fY == nextY);
SkASSERT(riteE->fNext == nullptr || riteE->fY == y);
leftE->saveXY(left, y, leftDY);
riteE->saveXY(riteE->fX, y, riteE->fDY);
leftE->fRiteE = riteE;
}
SkASSERT(leftE->fPrev == nullptr || leftE->fY == nextY);
riteE->goY(nextY, yShift);
// Always blit when edges end or nextY is integral
if (isIntegralNextY || leftEnds || riteEnds) {
blit_saved_trapezoid(leftE, nextY, leftE->fX, riteE->fX,
blitter, maskRow, isUsingMask, noRealBlitter, leftClip, rightClip);
}
}
static void aaa_walk_edges(SkAnalyticEdge* prevHead, SkAnalyticEdge* nextTail,
SkPath::FillType fillType, AdditiveBlitter* blitter, int start_y, int stop_y,
SkFixed leftClip, SkFixed rightClip, bool isUsingMask, bool forceRLE, bool useDeferred,
bool skipIntersect) {
prevHead->fX = prevHead->fUpperX = leftClip;
nextTail->fX = nextTail->fUpperX = rightClip;
SkFixed y = SkTMax(prevHead->fNext->fUpperY, SkIntToFixed(start_y));
SkFixed nextNextY = SK_MaxS32;
{
SkAnalyticEdge* edge;
for(edge = prevHead->fNext; edge->fUpperY <= y; edge = edge->fNext) {
edge->goY(y);
updateNextNextY(edge->fLowerY, y, &nextNextY);
}
updateNextNextY(edge->fUpperY, y, &nextNextY);
}
// returns 1 for evenodd, -1 for winding, regardless of inverse-ness
int windingMask = (fillType & 1) ? 1 : -1;
bool isInverse = SkPath::IsInverseFillType(fillType);
if (isInverse && SkIntToFixed(start_y) != y) {
int width = SkFixedFloorToInt(rightClip - leftClip);
if (SkFixedFloorToInt(y) != start_y) {
blitter->getRealBlitter()->blitRect(SkFixedFloorToInt(leftClip), start_y,
width, SkFixedFloorToInt(y) - start_y);
start_y = SkFixedFloorToInt(y);
}
SkAlpha* maskRow = isUsingMask ? static_cast<MaskAdditiveBlitter*>(blitter)->getRow(start_y)
: nullptr;
blit_full_alpha(blitter, start_y, SkFixedFloorToInt(leftClip), width,
f2a(y - SkIntToFixed(start_y)), maskRow, isUsingMask, false, false);
}
while (true) {
int w = 0;
bool in_interval = isInverse;
SkFixed prevX = prevHead->fX;
SkFixed nextY = SkTMin(nextNextY, SkFixedCeilToFixed(y + 1));
bool isIntegralNextY = (nextY & (SK_Fixed1 - 1)) == 0;
SkAnalyticEdge* currE = prevHead->fNext;
SkAnalyticEdge* leftE = prevHead;
SkFixed left = leftClip;
SkFixed leftDY = 0;
bool leftEnds = false;
int prevRite = SkFixedFloorToInt(leftClip);
nextNextY = SK_MaxS32;
SkASSERT((nextY & ((SK_Fixed1 >> 2) - 1)) == 0);
int yShift = 0;
if ((nextY - y) & (SK_Fixed1 >> 2)) {
yShift = 2;
nextY = y + (SK_Fixed1 >> 2);
} else if ((nextY - y) & (SK_Fixed1 >> 1)) {
yShift = 1;
SkASSERT(nextY == y + (SK_Fixed1 >> 1));
}
SkAlpha fullAlpha = f2a(nextY - y);
// If we're using mask blitter, we advance the mask row in this function
// to save some "if" condition checks.
SkAlpha* maskRow = nullptr;
if (isUsingMask) {
maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(SkFixedFloorToInt(y));
}
SkASSERT(currE->fPrev == prevHead);
validate_edges_for_y(currE, y);
// Even if next - y == SK_Fixed1, we can still break the left-to-right order requirement
// of the SKAAClip: |\| (two trapezoids with overlapping middle wedges)
bool noRealBlitter = forceRLE; // forceRLE && (nextY - y != SK_Fixed1);
while (currE->fUpperY <= y) {
SkASSERT(currE->fLowerY >= nextY);
SkASSERT(currE->fY == y);
w += currE->fWinding;
bool prev_in_interval = in_interval;
in_interval = !(w & windingMask) == isInverse;
bool isLeft = in_interval && !prev_in_interval;
bool isRite = !in_interval && prev_in_interval;
bool currEnds = currE->fLowerY == nextY;
if (useDeferred) {
if (currE->fRiteE && !isLeft) {
// currE is a left edge previously, but now it's not.
// Blit the trapezoid between fSavedY and y.
SkASSERT(currE->fRiteE->fY == y);
blit_saved_trapezoid(currE, y, currE->fX, currE->fRiteE->fX,
blitter, maskRow, isUsingMask, noRealBlitter, leftClip, rightClip);
}
if (leftE->fRiteE == currE && !isRite) {
// currE is a right edge previously, but now it's not.
// Moreover, its corresponding leftE doesn't change (otherwise we'll handle it
// in the previous if clause). Hence we blit the trapezoid.
blit_saved_trapezoid(leftE, y, left, currE->fX,
blitter, maskRow, isUsingMask, noRealBlitter, leftClip, rightClip);
}
}
if (isRite) {
if (useDeferred) {
deferred_blit(leftE, currE, left, leftDY, y, nextY, isIntegralNextY,
leftEnds, currEnds, blitter, maskRow, isUsingMask, noRealBlitter,
leftClip, rightClip, yShift);
} else {
SkFixed rite = currE->fX;
currE->goY(nextY, yShift);
if (leftE->fDX < 0) {
left = SkTMax(leftClip, left);
leftE->fX = SkTMax(leftClip, leftE->fX);
} else {
left = SkTMin(rightClip, left);
leftE->fX = SkTMin(rightClip, leftE->fX);
}
if (currE->fDX < 0) {
rite = SkTMax(leftClip, rite);
currE->fX = SkTMax(leftClip, currE->fX);
} else {
rite = SkTMin(rightClip, rite);
currE->fX = SkTMin(rightClip, currE->fX);
}
blit_trapezoid_row(blitter, y >> 16, left, rite, leftE->fX, currE->fX,
leftDY, currE->fDY, fullAlpha, maskRow, isUsingMask,
noRealBlitter || (fullAlpha == 0xFF && (
edges_too_close(prevRite, left, leftE->fX) ||
edges_too_close(currE, currE->fNext, nextY)
)),
true);
prevRite = SkFixedCeilToInt(SkTMax(rite, currE->fX));
}
} else {
if (isLeft) {
left = currE->fX;
leftDY = currE->fDY;
leftE = currE;
leftEnds = leftE->fLowerY == nextY;
}
currE->goY(nextY, yShift);
}
SkAnalyticEdge* next = currE->fNext;
SkFixed newX;
while (currE->fLowerY <= nextY) {
if (currE->fCurveCount < 0) {
SkAnalyticCubicEdge* cubicEdge = (SkAnalyticCubicEdge*)currE;
cubicEdge->keepContinuous();
if (!cubicEdge->updateCubic()) {
break;
}
} else if (currE->fCurveCount > 0) {
SkAnalyticQuadraticEdge* quadEdge = (SkAnalyticQuadraticEdge*)currE;
quadEdge->keepContinuous();
if (!quadEdge->updateQuadratic()) {
break;
}
} else {
break;
}
}
SkASSERT(currE->fY == nextY);
if (currE->fLowerY <= nextY) {
remove_edge(currE);
} else {
updateNextNextY(currE->fLowerY, nextY, &nextNextY);
newX = currE->fX;
SkASSERT(currE->fLowerY > nextY);
if (newX < prevX) { // ripple currE backwards until it is x-sorted
// If the crossing edge is a right edge, blit the saved trapezoid.
if (leftE->fRiteE == currE && useDeferred) {
SkASSERT(leftE->fY == nextY && currE->fY == nextY);
blit_saved_trapezoid(leftE, nextY, leftE->fX, currE->fX,
blitter, maskRow, isUsingMask, noRealBlitter, leftClip, rightClip);
}
backward_insert_edge_based_on_x(currE);
} else {
prevX = newX;
}
if (!skipIntersect) {
checkIntersection(currE, nextY, &nextNextY);
}
}
currE = next;
SkASSERT(currE);
}
// was our right-edge culled away?
if (in_interval) {
if (useDeferred) {
deferred_blit(leftE, nextTail, left, leftDY, y, nextY, isIntegralNextY,
leftEnds, false, blitter, maskRow, isUsingMask, noRealBlitter,
leftClip, rightClip, yShift);
} else {
blit_trapezoid_row(blitter, y >> 16, left, rightClip, leftE->fX, rightClip,
leftDY, 0, fullAlpha, maskRow, isUsingMask,
noRealBlitter ||
(fullAlpha == 0xFF && edges_too_close(leftE->fPrev, leftE, nextY)),
true);
}
}
if (forceRLE) {
((RunBasedAdditiveBlitter*)blitter)->flush_if_y_changed(y, nextY);
}
y = nextY;
if (y >= SkIntToFixed(stop_y)) {
break;
}
// now currE points to the first edge with a fUpperY larger than the previous y
insert_new_edges(currE, y, &nextNextY);
}
}
static void aaa_fill_path(const SkPath& path, const SkIRect& clipRect,
AdditiveBlitter* blitter, int start_y, int stop_y, bool pathContainedInClip,
bool isUsingMask, bool forceRLE) { // forceRLE implies that SkAAClip is calling us
SkASSERT(blitter);
SkEdgeBuilder builder;
@ -1166,7 +1660,8 @@ void aaa_fill_path(const SkPath& path, const SkIRect& clipRect, AdditiveBlitter*
rect.fBottom = stop_y;
}
if (!rect.isEmpty()) {
blitter->blitRect(rect.fLeft, rect.fTop, rect.width(), rect.height());
blitter->getRealBlitter()->blitRect(rect.fLeft, rect.fTop,
rect.width(), rect.height());
}
}
return;
@ -1176,6 +1671,7 @@ void aaa_fill_path(const SkPath& path, const SkIRect& clipRect, AdditiveBlitter*
// this returns the first and last edge after they're sorted into a dlink list
SkAnalyticEdge* edge = sort_edges(list, count, &last);
headEdge.fRiteE = nullptr;
headEdge.fPrev = nullptr;
headEdge.fNext = edge;
headEdge.fUpperY = headEdge.fLowerY = SK_MinS32;
@ -1185,13 +1681,14 @@ void aaa_fill_path(const SkPath& path, const SkIRect& clipRect, AdditiveBlitter*
headEdge.fUpperX = SK_MinS32;
edge->fPrev = &headEdge;
tailEdge.fRiteE = nullptr;
tailEdge.fPrev = last;
tailEdge.fNext = nullptr;
tailEdge.fUpperY = tailEdge.fLowerY = SK_MaxS32;
headEdge.fX = SK_MaxS32;
headEdge.fDX = 0;
headEdge.fDY = SK_MaxS32;
headEdge.fUpperX = SK_MaxS32;
tailEdge.fX = SK_MaxS32;
tailEdge.fDX = 0;
tailEdge.fDY = SK_MaxS32;
tailEdge.fUpperX = SK_MaxS32;
last->fNext = &tailEdge;
// now edge is the head of the sorted linklist
@ -1203,22 +1700,32 @@ void aaa_fill_path(const SkPath& path, const SkIRect& clipRect, AdditiveBlitter*
stop_y = clipRect.fBottom;
}
SkFixed leftBound = SkIntToFixed(rect.fLeft);
SkFixed rightBound = SkIntToFixed(rect.fRight);
if (isUsingMask) {
// If we're using mask, then we have to limit the bound within the path bounds.
// Otherwise, the edge drift may access an invalid address inside the mask.
SkIRect ir;
path.getBounds().roundOut(&ir);
leftBound = SkTMax(leftBound, SkIntToFixed(ir.fLeft));
rightBound = SkTMin(rightBound, SkIntToFixed(ir.fRight));
}
if (!path.isInverseFillType() && path.isConvex()) {
SkASSERT(count >= 2); // convex walker does not handle missing right edges
SkFixed leftBound = SkIntToFixed(rect.fLeft);
SkFixed rightBound = SkIntToFixed(rect.fRight);
if (isUsingMask) {
// If we're using mask, then we have to limit the bound within the path bounds.
// Otherwise, the edge drift may access an invalid address inside the mask.
SkIRect ir;
path.getBounds().roundOut(&ir);
leftBound = SkTMax(leftBound, SkIntToFixed(ir.fLeft));
rightBound = SkTMin(rightBound, SkIntToFixed(ir.fRight));
}
aaa_walk_convex_edges(&headEdge, blitter, start_y, stop_y,
leftBound, rightBound, isUsingMask);
} else {
SkFAIL("Concave AAA is not yet implemented!");
// Only use deferred blitting if there are many edges.
bool useDeferred = count >
(SkFixedFloorToInt(tailEdge.fPrev->fLowerY - headEdge.fNext->fUpperY) + 1) * 4;
// We skip intersection computation if there are many points which probably already
// give us enough fractional scan lines.
bool skipIntersect = path.countPoints() > (stop_y - start_y) / 2;
aaa_walk_edges(&headEdge, &tailEdge, path.getFillType(), blitter, start_y, stop_y,
leftBound, rightBound, isUsingMask, forceRLE, useDeferred, skipIntersect);
}
}
@ -1268,11 +1775,13 @@ void SkScan::AAAFillPath(const SkPath& path, const SkRegion& origClip, SkBlitter
if (origClip.isEmpty()) {
return;
}
#ifdef SK_SUPPORT_LEGACY_AAA
if (path.isInverseFillType() || !path.isConvex()) {
// Fall back as we only implemented the algorithm for convex shapes yet.
SkScan::AntiFillPath(path, origClip, blitter, forceRLE);
return;
}
#endif
const bool isInverse = path.isInverseFillType();
SkIRect ir;
@ -1337,9 +1846,7 @@ void SkScan::AAAFillPath(const SkPath& path, const SkRegion& origClip, SkBlitter
blitter = clipper.getBlitter();
if (isInverse) {
// Currently, we use the old path to render the inverse path,
// so we don't need this.
// sk_blit_above(blitter, ir, *clipRgn);
sk_blit_above(blitter, ir, *clipRgn);
}
SkASSERT(SkIntToScalar(ir.fTop) <= path.getBounds().fTop);
@ -1348,16 +1855,18 @@ void SkScan::AAAFillPath(const SkPath& path, const SkRegion& origClip, SkBlitter
MaskAdditiveBlitter additiveBlitter(blitter, ir, *clipRgn, isInverse);
aaa_fill_path(path, clipRgn->getBounds(), &additiveBlitter, ir.fTop, ir.fBottom,
clipRect == nullptr, true, forceRLE);
} else {
} else if (!isInverse && path.isConvex()) {
RunBasedAdditiveBlitter additiveBlitter(blitter, ir, *clipRgn, isInverse);
aaa_fill_path(path, clipRgn->getBounds(), &additiveBlitter, ir.fTop, ir.fBottom,
clipRect == nullptr, false, forceRLE);
} else {
SafeRLEAdditiveBlitter additiveBlitter(blitter, ir, *clipRgn, isInverse);
aaa_fill_path(path, clipRgn->getBounds(), &additiveBlitter, ir.fTop, ir.fBottom,
clipRect == nullptr, false, forceRLE);
}
if (isInverse) {
// Currently, we use the old path to render the inverse path,
// so we don't need this.
// sk_blit_below(blitter, ir, *clipRgn);
sk_blit_below(blitter, ir, *clipRgn);
}
}

View File

@ -753,9 +753,27 @@ void SkScan::FillPath(const SkPath& path, const SkRasterClip& clip,
}
}
static bool suitableForAAA(const SkPath& path) {
#ifdef SK_SUPPORT_LEGACY_AAA
return true;
#endif
if (gSkForceAnalyticAA.load()) {
return true;
}
const SkRect& bounds = path.getBounds();
// When the path have so many points compared to the size of its bounds/resolution,
// it indicates that the path is not quite smooth in the current resolution:
// the expected number of turning points in every pixel row/column is significantly greater than
// zero. Hence Aanlytic AA is not likely to produce visible quality improvents, and Analytic AA
// might be slower than supersampling.
return path.countPoints() < SkTMax(bounds.width(), bounds.height()) / 2 - 10;
}
void SkScan::AntiFillPath(const SkPath& path, const SkRasterClip& clip,
SkBlitter* blitter) {
if (gSkUseAnalyticAA.load()) {
// Do not use AAA if path is too complicated:
// there won't be any speedup or significant visual improvement.
if (gSkUseAnalyticAA.load() && suitableForAAA(path)) {
SkScan::AAAFillPath(path, clip, blitter);
return;
}

View File

@ -68,6 +68,10 @@ DEFINE_bool2(pre_log, p, false, "Log before running each test. May be incomprehe
DEFINE_bool(analyticAA, true, "If false, disable analytic anti-aliasing");
DEFINE_bool(forceAnalyticAA, false, "Force analytic anti-aliasing even if the path is complicated: "
"whether it's concave or convex, we consider a path complicated"
"if its number of points is comparable to its resolution.");
bool CollectImages(SkCommandLineFlags::StringArray images, SkTArray<SkString>* output) {
SkASSERT(output);

View File

@ -33,6 +33,7 @@ DECLARE_bool(veryVerbose);
DECLARE_string(writePath);
DECLARE_bool(pre_log);
DECLARE_bool(analyticAA);
DECLARE_bool(forceAnalyticAA);
DECLARE_string(key);
DECLARE_string(properties);