2011-07-28 14:26:00 +00:00
|
|
|
/*
|
|
|
|
* Copyright 2006 The Android Open Source Project
|
|
|
|
*
|
|
|
|
* Use of this source code is governed by a BSD-style license that can be
|
|
|
|
* found in the LICENSE file.
|
|
|
|
*/
|
|
|
|
|
2018-06-13 13:42:32 +00:00
|
|
|
#include "SkScanPriv.h"
|
2018-06-13 13:59:02 +00:00
|
|
|
|
|
|
|
#include "SkAntiRun.h"
|
|
|
|
#include "SkBlitter.h"
|
|
|
|
#include "SkCoverageDelta.h"
|
|
|
|
#include "SkMatrix.h"
|
2008-12-17 15:59:43 +00:00
|
|
|
#include "SkPath.h"
|
2018-04-11 20:34:06 +00:00
|
|
|
#include "SkPathPriv.h"
|
2008-12-17 15:59:43 +00:00
|
|
|
#include "SkRegion.h"
|
2018-06-13 13:59:02 +00:00
|
|
|
#include "SkTo.h"
|
2008-12-17 15:59:43 +00:00
|
|
|
|
2018-01-16 18:58:01 +00:00
|
|
|
#define SHIFT SK_SUPERSAMPLE_SHIFT
|
2008-12-17 15:59:43 +00:00
|
|
|
#define SCALE (1 << SHIFT)
|
|
|
|
#define MASK (SCALE - 1)
|
|
|
|
|
2011-12-01 20:41:24 +00:00
|
|
|
/** @file
|
2011-06-09 15:54:38 +00:00
|
|
|
We have two techniques for capturing the output of the supersampler:
|
|
|
|
- SUPERMASK, which records a large mask-bitmap
|
|
|
|
this is often faster for small, complex objects
|
|
|
|
- RLE, which records a rle-encoded scanline
|
|
|
|
this is often faster for large objects with big spans
|
|
|
|
|
2011-12-01 20:41:24 +00:00
|
|
|
These blitters use two coordinate systems:
|
|
|
|
- destination coordinates, scale equal to the output - often
|
|
|
|
abbreviated with 'i' or 'I' in variable names
|
|
|
|
- supersampled coordinates, scale equal to the output * SCALE
|
2011-06-09 15:54:38 +00:00
|
|
|
*/
|
2012-05-09 14:07:34 +00:00
|
|
|
|
2011-04-11 19:43:58 +00:00
|
|
|
//#define FORCE_SUPERMASK
|
|
|
|
//#define FORCE_RLE
|
|
|
|
|
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
2008-12-17 15:59:43 +00:00
|
|
|
|
2011-12-01 20:41:24 +00:00
|
|
|
/// Base class for a single-pass supersampled blitter.
|
2008-12-17 15:59:43 +00:00
|
|
|
class BaseSuperBlitter : public SkBlitter {
|
|
|
|
public:
|
|
|
|
BaseSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
|
2017-09-28 14:58:38 +00:00
|
|
|
const SkIRect& clipBounds, bool isInverse);
|
2008-12-17 15:59:43 +00:00
|
|
|
|
2011-12-01 20:41:24 +00:00
|
|
|
/// Must be explicitly defined on subclasses.
|
2008-12-17 15:59:43 +00:00
|
|
|
virtual void blitAntiH(int x, int y, const SkAlpha antialias[],
|
2015-03-26 01:17:31 +00:00
|
|
|
const int16_t runs[]) override {
|
2011-12-28 14:59:50 +00:00
|
|
|
SkDEBUGFAIL("How did I get here?");
|
2008-12-17 15:59:43 +00:00
|
|
|
}
|
2011-12-01 20:41:24 +00:00
|
|
|
/// May not be called on BaseSuperBlitter because it blits out of order.
|
2015-03-26 01:17:31 +00:00
|
|
|
void blitV(int x, int y, int height, SkAlpha alpha) override {
|
2011-12-28 14:59:50 +00:00
|
|
|
SkDEBUGFAIL("How did I get here?");
|
2008-12-17 15:59:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
protected:
|
|
|
|
SkBlitter* fRealBlitter;
|
2011-12-01 20:41:24 +00:00
|
|
|
/// Current y coordinate, in destination coordinates.
|
2008-12-17 15:59:43 +00:00
|
|
|
int fCurrIY;
|
2011-12-01 20:41:24 +00:00
|
|
|
/// Widest row of region to be blitted, in destination coordinates.
|
|
|
|
int fWidth;
|
|
|
|
/// Leftmost x coordinate in any row, in destination coordinates.
|
|
|
|
int fLeft;
|
|
|
|
/// Leftmost x coordinate in any row, in supersampled coordinates.
|
|
|
|
int fSuperLeft;
|
2008-12-17 15:59:43 +00:00
|
|
|
|
|
|
|
SkDEBUGCODE(int fCurrX;)
|
2011-12-01 20:41:24 +00:00
|
|
|
/// Current y coordinate in supersampled coordinates.
|
2011-05-31 19:18:02 +00:00
|
|
|
int fCurrY;
|
2011-12-01 20:41:24 +00:00
|
|
|
/// Initial y coordinate (top of bounds).
|
2011-10-24 12:19:46 +00:00
|
|
|
int fTop;
|
2014-10-15 15:52:00 +00:00
|
|
|
|
|
|
|
SkIRect fSectBounds;
|
2008-12-17 15:59:43 +00:00
|
|
|
};
|
|
|
|
|
2017-09-28 14:58:38 +00:00
|
|
|
BaseSuperBlitter::BaseSuperBlitter(SkBlitter* realBlit, const SkIRect& ir,
|
|
|
|
const SkIRect& clipBounds, bool isInverse) {
|
2014-10-15 15:52:00 +00:00
|
|
|
fRealBlitter = realBlit;
|
|
|
|
|
|
|
|
SkIRect sectBounds;
|
|
|
|
if (isInverse) {
|
2014-10-15 16:00:27 +00:00
|
|
|
// We use the clip bounds instead of the ir, since we may be asked to
|
|
|
|
//draw outside of the rect when we're a inverse filltype
|
2017-09-28 14:58:38 +00:00
|
|
|
sectBounds = clipBounds;
|
2014-10-15 15:52:00 +00:00
|
|
|
} else {
|
2017-09-28 14:58:38 +00:00
|
|
|
if (!sectBounds.intersect(ir, clipBounds)) {
|
2014-10-15 15:52:00 +00:00
|
|
|
sectBounds.setEmpty();
|
|
|
|
}
|
|
|
|
}
|
2011-03-02 15:58:18 +00:00
|
|
|
|
2014-10-15 15:52:00 +00:00
|
|
|
const int left = sectBounds.left();
|
|
|
|
const int right = sectBounds.right();
|
2012-08-23 18:09:54 +00:00
|
|
|
|
2008-12-17 15:59:43 +00:00
|
|
|
fLeft = left;
|
2015-12-09 21:32:58 +00:00
|
|
|
fSuperLeft = SkLeftShift(left, SHIFT);
|
2008-12-17 15:59:43 +00:00
|
|
|
fWidth = right - left;
|
2014-10-15 15:52:00 +00:00
|
|
|
fTop = sectBounds.top();
|
|
|
|
fCurrIY = fTop - 1;
|
2015-12-09 21:32:58 +00:00
|
|
|
fCurrY = SkLeftShift(fTop, SHIFT) - 1;
|
2014-10-15 15:52:00 +00:00
|
|
|
|
2011-06-01 17:32:11 +00:00
|
|
|
SkDEBUGCODE(fCurrX = -1;)
|
2008-12-17 15:59:43 +00:00
|
|
|
}
|
|
|
|
|
2011-12-01 20:41:24 +00:00
|
|
|
/// Run-length-encoded supersampling antialiased blitter.
|
2008-12-17 15:59:43 +00:00
|
|
|
class SuperBlitter : public BaseSuperBlitter {
|
|
|
|
public:
|
2017-09-28 14:58:38 +00:00
|
|
|
SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds,
|
|
|
|
bool isInverse);
|
2008-12-17 15:59:43 +00:00
|
|
|
|
2017-03-22 16:05:03 +00:00
|
|
|
~SuperBlitter() override {
|
2008-12-17 15:59:43 +00:00
|
|
|
this->flush();
|
|
|
|
}
|
|
|
|
|
2011-12-01 20:41:24 +00:00
|
|
|
/// Once fRuns contains a complete supersampled row, flush() blits
|
|
|
|
/// it out through the wrapped blitter.
|
2008-12-17 15:59:43 +00:00
|
|
|
void flush();
|
|
|
|
|
2011-12-01 20:41:24 +00:00
|
|
|
/// Blits a row of pixels, with location and width specified
|
|
|
|
/// in supersampled coordinates.
|
2015-03-26 01:17:31 +00:00
|
|
|
void blitH(int x, int y, int width) override;
|
2011-12-01 20:41:24 +00:00
|
|
|
/// Blits a rectangle of pixels, with location and size specified
|
|
|
|
/// in supersampled coordinates.
|
2015-03-26 01:17:31 +00:00
|
|
|
void blitRect(int x, int y, int width, int height) override;
|
2008-12-17 15:59:43 +00:00
|
|
|
|
|
|
|
private:
|
2014-07-16 20:31:41 +00:00
|
|
|
// The next three variables are used to track a circular buffer that
|
|
|
|
// contains the values used in SkAlphaRuns. These variables should only
|
|
|
|
// ever be updated in advanceRuns(), and fRuns should always point to
|
|
|
|
// a valid SkAlphaRuns...
|
|
|
|
int fRunsToBuffer;
|
|
|
|
void* fRunsBuffer;
|
|
|
|
int fCurrentRun;
|
2008-12-17 15:59:43 +00:00
|
|
|
SkAlphaRuns fRuns;
|
2014-07-16 20:31:41 +00:00
|
|
|
|
|
|
|
// extra one to store the zero at the end
|
|
|
|
int getRunsSz() const { return (fWidth + 1 + (fWidth + 2)/2) * sizeof(int16_t); }
|
|
|
|
|
|
|
|
// This function updates the fRuns variable to point to the next buffer space
|
|
|
|
// with adequate storage for a SkAlphaRuns. It mostly just advances fCurrentRun
|
|
|
|
// and resets fRuns to point to an empty scanline.
|
|
|
|
void advanceRuns() {
|
|
|
|
const size_t kRunsSz = this->getRunsSz();
|
|
|
|
fCurrentRun = (fCurrentRun + 1) % fRunsToBuffer;
|
|
|
|
fRuns.fRuns = reinterpret_cast<int16_t*>(
|
|
|
|
reinterpret_cast<uint8_t*>(fRunsBuffer) + fCurrentRun * kRunsSz);
|
|
|
|
fRuns.fAlpha = reinterpret_cast<SkAlpha*>(fRuns.fRuns + fWidth + 1);
|
|
|
|
fRuns.reset(fWidth);
|
|
|
|
}
|
|
|
|
|
2011-05-31 19:18:02 +00:00
|
|
|
int fOffsetX;
|
2008-12-17 15:59:43 +00:00
|
|
|
};
|
|
|
|
|
2017-09-28 14:58:38 +00:00
|
|
|
SuperBlitter::SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds,
|
2014-10-15 15:52:00 +00:00
|
|
|
bool isInverse)
|
2017-09-28 14:58:38 +00:00
|
|
|
: BaseSuperBlitter(realBlitter, ir, clipBounds, isInverse)
|
2014-10-15 15:52:00 +00:00
|
|
|
{
|
2014-07-16 20:31:41 +00:00
|
|
|
fRunsToBuffer = realBlitter->requestRowsPreserved();
|
2014-07-21 16:54:23 +00:00
|
|
|
fRunsBuffer = realBlitter->allocBlitMemory(fRunsToBuffer * this->getRunsSz());
|
2014-07-16 20:31:41 +00:00
|
|
|
fCurrentRun = -1;
|
2008-12-17 15:59:43 +00:00
|
|
|
|
2014-07-16 20:31:41 +00:00
|
|
|
this->advanceRuns();
|
2011-05-31 19:18:02 +00:00
|
|
|
|
|
|
|
fOffsetX = 0;
|
2008-12-17 15:59:43 +00:00
|
|
|
}
|
|
|
|
|
2011-04-11 19:43:58 +00:00
|
|
|
void SuperBlitter::flush() {
|
2011-10-24 12:19:46 +00:00
|
|
|
if (fCurrIY >= fTop) {
|
2014-07-16 20:31:41 +00:00
|
|
|
|
|
|
|
SkASSERT(fCurrentRun < fRunsToBuffer);
|
2011-04-11 19:43:58 +00:00
|
|
|
if (!fRuns.empty()) {
|
2014-07-16 20:31:41 +00:00
|
|
|
// SkDEBUGCODE(fRuns.dump();)
|
2008-12-17 15:59:43 +00:00
|
|
|
fRealBlitter->blitAntiH(fLeft, fCurrIY, fRuns.fAlpha, fRuns.fRuns);
|
2014-07-16 20:31:41 +00:00
|
|
|
this->advanceRuns();
|
2011-05-31 19:18:02 +00:00
|
|
|
fOffsetX = 0;
|
2008-12-17 15:59:43 +00:00
|
|
|
}
|
2014-07-16 20:31:41 +00:00
|
|
|
|
2011-10-24 12:19:46 +00:00
|
|
|
fCurrIY = fTop - 1;
|
2008-12-17 15:59:43 +00:00
|
|
|
SkDEBUGCODE(fCurrX = -1;)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-02-23 19:34:34 +00:00
|
|
|
/** coverage_to_partial_alpha() is being used by SkAlphaRuns, which
|
|
|
|
*accumulates* SCALE pixels worth of "alpha" in [0,(256/SCALE)]
|
|
|
|
to produce a final value in [0, 255] and handles clamping 256->255
|
|
|
|
itself, with the same (alpha - (alpha >> 8)) correction as
|
|
|
|
coverage_to_exact_alpha().
|
|
|
|
*/
|
|
|
|
static inline int coverage_to_partial_alpha(int aa) {
|
2008-12-17 15:59:43 +00:00
|
|
|
aa <<= 8 - 2*SHIFT;
|
2012-05-09 14:07:34 +00:00
|
|
|
return aa;
|
2008-12-17 15:59:43 +00:00
|
|
|
}
|
|
|
|
|
2012-02-23 19:34:34 +00:00
|
|
|
/** coverage_to_exact_alpha() is being used by our blitter, which wants
|
|
|
|
a final value in [0, 255].
|
|
|
|
*/
|
2011-12-27 13:59:20 +00:00
|
|
|
static inline int coverage_to_exact_alpha(int aa) {
|
2012-01-03 20:12:42 +00:00
|
|
|
int alpha = (256 >> SHIFT) * aa;
|
|
|
|
// clamp 256->255
|
|
|
|
return alpha - (alpha >> 8);
|
2011-12-27 13:59:20 +00:00
|
|
|
}
|
|
|
|
|
2011-04-11 19:43:58 +00:00
|
|
|
void SuperBlitter::blitH(int x, int y, int width) {
|
2011-10-31 19:37:58 +00:00
|
|
|
SkASSERT(width > 0);
|
|
|
|
|
2008-12-17 15:59:43 +00:00
|
|
|
int iy = y >> SHIFT;
|
|
|
|
SkASSERT(iy >= fCurrIY);
|
|
|
|
|
|
|
|
x -= fSuperLeft;
|
|
|
|
// hack, until I figure out why my cubics (I think) go beyond the bounds
|
2011-04-11 19:43:58 +00:00
|
|
|
if (x < 0) {
|
2008-12-17 15:59:43 +00:00
|
|
|
width += x;
|
|
|
|
x = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef SK_DEBUG
|
|
|
|
SkASSERT(y != fCurrY || x >= fCurrX);
|
|
|
|
#endif
|
2011-05-31 19:18:02 +00:00
|
|
|
SkASSERT(y >= fCurrY);
|
|
|
|
if (fCurrY != y) {
|
|
|
|
fOffsetX = 0;
|
|
|
|
fCurrY = y;
|
|
|
|
}
|
2012-08-23 18:09:54 +00:00
|
|
|
|
2011-04-11 19:43:58 +00:00
|
|
|
if (iy != fCurrIY) { // new scanline
|
2008-12-17 15:59:43 +00:00
|
|
|
this->flush();
|
|
|
|
fCurrIY = iy;
|
|
|
|
}
|
|
|
|
|
2011-05-31 14:36:21 +00:00
|
|
|
int start = x;
|
|
|
|
int stop = x + width;
|
|
|
|
|
|
|
|
SkASSERT(start >= 0 && stop > start);
|
2011-12-01 20:41:24 +00:00
|
|
|
// integer-pixel-aligned ends of blit, rounded out
|
|
|
|
int fb = start & MASK;
|
|
|
|
int fe = stop & MASK;
|
2011-05-31 14:36:21 +00:00
|
|
|
int n = (stop >> SHIFT) - (start >> SHIFT) - 1;
|
|
|
|
|
|
|
|
if (n < 0) {
|
|
|
|
fb = fe - fb;
|
|
|
|
n = 0;
|
|
|
|
fe = 0;
|
|
|
|
} else {
|
|
|
|
if (fb == 0) {
|
|
|
|
n += 1;
|
2011-04-11 19:43:58 +00:00
|
|
|
} else {
|
2011-12-27 13:59:20 +00:00
|
|
|
fb = SCALE - fb;
|
2008-12-17 15:59:43 +00:00
|
|
|
}
|
|
|
|
}
|
2011-05-31 19:18:02 +00:00
|
|
|
|
2012-02-23 19:34:34 +00:00
|
|
|
fOffsetX = fRuns.add(x >> SHIFT, coverage_to_partial_alpha(fb),
|
|
|
|
n, coverage_to_partial_alpha(fe),
|
2011-05-31 19:18:02 +00:00
|
|
|
(1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT),
|
|
|
|
fOffsetX);
|
2008-12-17 15:59:43 +00:00
|
|
|
|
|
|
|
#ifdef SK_DEBUG
|
|
|
|
fRuns.assertValid(y & MASK, (1 << (8 - SHIFT)));
|
|
|
|
fCurrX = x + width;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2012-06-06 12:09:34 +00:00
|
|
|
#if 0 // UNUSED
|
2011-12-01 21:47:26 +00:00
|
|
|
static void set_left_rite_runs(SkAlphaRuns& runs, int ileft, U8CPU leftA,
|
|
|
|
int n, U8CPU riteA) {
|
|
|
|
SkASSERT(leftA <= 0xFF);
|
|
|
|
SkASSERT(riteA <= 0xFF);
|
|
|
|
|
|
|
|
int16_t* run = runs.fRuns;
|
|
|
|
uint8_t* aa = runs.fAlpha;
|
|
|
|
|
|
|
|
if (ileft > 0) {
|
|
|
|
run[0] = ileft;
|
|
|
|
aa[0] = 0;
|
|
|
|
run += ileft;
|
|
|
|
aa += ileft;
|
|
|
|
}
|
|
|
|
|
|
|
|
SkASSERT(leftA < 0xFF);
|
|
|
|
if (leftA > 0) {
|
|
|
|
*run++ = 1;
|
|
|
|
*aa++ = leftA;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (n > 0) {
|
|
|
|
run[0] = n;
|
|
|
|
aa[0] = 0xFF;
|
|
|
|
run += n;
|
|
|
|
aa += n;
|
|
|
|
}
|
|
|
|
|
|
|
|
SkASSERT(riteA < 0xFF);
|
|
|
|
if (riteA > 0) {
|
|
|
|
*run++ = 1;
|
|
|
|
*aa++ = riteA;
|
|
|
|
}
|
|
|
|
run[0] = 0;
|
|
|
|
}
|
2012-06-06 12:09:34 +00:00
|
|
|
#endif
|
2011-12-01 21:47:26 +00:00
|
|
|
|
2011-10-31 19:37:58 +00:00
|
|
|
void SuperBlitter::blitRect(int x, int y, int width, int height) {
|
|
|
|
SkASSERT(width > 0);
|
|
|
|
SkASSERT(height > 0);
|
|
|
|
|
2011-12-01 20:41:24 +00:00
|
|
|
// blit leading rows
|
|
|
|
while ((y & MASK)) {
|
2011-10-31 19:37:58 +00:00
|
|
|
this->blitH(x, y++, width);
|
|
|
|
if (--height <= 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
SkASSERT(height > 0);
|
|
|
|
|
2011-12-01 20:41:24 +00:00
|
|
|
// Since this is a rect, instead of blitting supersampled rows one at a
|
|
|
|
// time and then resolving to the destination canvas, we can blit
|
|
|
|
// directly to the destintion canvas one row per SCALE supersampled rows.
|
2011-10-31 19:37:58 +00:00
|
|
|
int start_y = y >> SHIFT;
|
|
|
|
int stop_y = (y + height) >> SHIFT;
|
|
|
|
int count = stop_y - start_y;
|
|
|
|
if (count > 0) {
|
|
|
|
y += count << SHIFT;
|
|
|
|
height -= count << SHIFT;
|
|
|
|
|
|
|
|
// save original X for our tail blitH() loop at the bottom
|
|
|
|
int origX = x;
|
|
|
|
|
|
|
|
x -= fSuperLeft;
|
|
|
|
// hack, until I figure out why my cubics (I think) go beyond the bounds
|
|
|
|
if (x < 0) {
|
|
|
|
width += x;
|
|
|
|
x = 0;
|
|
|
|
}
|
|
|
|
|
2011-12-27 13:59:20 +00:00
|
|
|
// There is always a left column, a middle, and a right column.
|
|
|
|
// ileft is the destination x of the first pixel of the entire rect.
|
|
|
|
// xleft is (SCALE - # of covered supersampled pixels) in that
|
|
|
|
// destination pixel.
|
2011-10-31 19:37:58 +00:00
|
|
|
int ileft = x >> SHIFT;
|
2011-12-01 20:41:24 +00:00
|
|
|
int xleft = x & MASK;
|
2011-12-27 13:59:20 +00:00
|
|
|
// irite is the destination x of the last pixel of the OPAQUE section.
|
|
|
|
// xrite is the number of supersampled pixels extending beyond irite;
|
|
|
|
// xrite/SCALE should give us alpha.
|
2011-10-31 19:37:58 +00:00
|
|
|
int irite = (x + width) >> SHIFT;
|
2011-12-01 20:41:24 +00:00
|
|
|
int xrite = (x + width) & MASK;
|
2011-12-27 13:59:20 +00:00
|
|
|
if (!xrite) {
|
|
|
|
xrite = SCALE;
|
|
|
|
irite--;
|
|
|
|
}
|
|
|
|
|
2011-12-28 17:58:07 +00:00
|
|
|
// Need to call flush() to clean up pending draws before we
|
|
|
|
// even consider blitV(), since otherwise it can look nonmonotonic.
|
|
|
|
SkASSERT(start_y > fCurrIY);
|
|
|
|
this->flush();
|
|
|
|
|
2011-10-31 19:37:58 +00:00
|
|
|
int n = irite - ileft - 1;
|
|
|
|
if (n < 0) {
|
2011-12-28 17:58:07 +00:00
|
|
|
// If n < 0, we'll only have a single partially-transparent column
|
|
|
|
// of pixels to render.
|
2011-10-31 19:37:58 +00:00
|
|
|
xleft = xrite - xleft;
|
2011-12-27 13:59:20 +00:00
|
|
|
SkASSERT(xleft <= SCALE);
|
|
|
|
SkASSERT(xleft > 0);
|
|
|
|
fRealBlitter->blitV(ileft + fLeft, start_y, count,
|
|
|
|
coverage_to_exact_alpha(xleft));
|
2011-10-31 19:37:58 +00:00
|
|
|
} else {
|
2011-12-28 17:58:07 +00:00
|
|
|
// With n = 0, we have two possibly-transparent columns of pixels
|
|
|
|
// to render; with n > 0, we have opaque columns between them.
|
|
|
|
|
2011-12-27 13:59:20 +00:00
|
|
|
xleft = SCALE - xleft;
|
2011-10-31 19:37:58 +00:00
|
|
|
|
2011-12-28 17:58:07 +00:00
|
|
|
// Using coverage_to_exact_alpha is not consistent with blitH()
|
|
|
|
const int coverageL = coverage_to_exact_alpha(xleft);
|
|
|
|
const int coverageR = coverage_to_exact_alpha(xrite);
|
2011-12-27 13:59:20 +00:00
|
|
|
|
2011-12-28 17:58:07 +00:00
|
|
|
SkASSERT(coverageL > 0 || n > 0 || coverageR > 0);
|
|
|
|
SkASSERT((coverageL != 0) + n + (coverageR != 0) <= fWidth);
|
2011-10-31 19:37:58 +00:00
|
|
|
|
2011-12-28 17:58:07 +00:00
|
|
|
fRealBlitter->blitAntiRect(ileft + fLeft, start_y, n, count,
|
|
|
|
coverageL, coverageR);
|
|
|
|
}
|
2011-10-31 19:37:58 +00:00
|
|
|
|
|
|
|
// preamble for our next call to blitH()
|
|
|
|
fCurrIY = stop_y - 1;
|
|
|
|
fOffsetX = 0;
|
|
|
|
fCurrY = y - 1;
|
|
|
|
fRuns.reset(fWidth);
|
|
|
|
x = origX;
|
|
|
|
}
|
|
|
|
|
2011-12-27 13:59:20 +00:00
|
|
|
// catch any remaining few rows
|
2011-12-01 20:41:24 +00:00
|
|
|
SkASSERT(height <= MASK);
|
2011-10-31 19:37:58 +00:00
|
|
|
while (--height >= 0) {
|
|
|
|
this->blitH(x, y++, width);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-12-17 15:59:43 +00:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
2011-12-01 20:41:24 +00:00
|
|
|
/// Masked supersampling antialiased blitter.
|
2008-12-17 15:59:43 +00:00
|
|
|
class MaskSuperBlitter : public BaseSuperBlitter {
|
|
|
|
public:
|
2017-09-28 14:58:38 +00:00
|
|
|
MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect&, bool isInverse);
|
2017-03-22 16:05:03 +00:00
|
|
|
~MaskSuperBlitter() override {
|
2008-12-17 15:59:43 +00:00
|
|
|
fRealBlitter->blitMask(fMask, fClipRect);
|
|
|
|
}
|
|
|
|
|
2015-03-26 01:17:31 +00:00
|
|
|
void blitH(int x, int y, int width) override;
|
2008-12-17 15:59:43 +00:00
|
|
|
|
2011-04-11 19:43:58 +00:00
|
|
|
static bool CanHandleRect(const SkIRect& bounds) {
|
|
|
|
#ifdef FORCE_RLE
|
|
|
|
return false;
|
|
|
|
#endif
|
2008-12-17 15:59:43 +00:00
|
|
|
int width = bounds.width();
|
2012-03-12 16:09:06 +00:00
|
|
|
int64_t rb = SkAlign4(width);
|
|
|
|
// use 64bits to detect overflow
|
|
|
|
int64_t storage = rb * bounds.height();
|
2011-03-02 15:58:18 +00:00
|
|
|
|
2008-12-17 15:59:43 +00:00
|
|
|
return (width <= MaskSuperBlitter::kMAX_WIDTH) &&
|
2012-03-12 16:09:06 +00:00
|
|
|
(storage <= MaskSuperBlitter::kMAX_STORAGE);
|
2008-12-17 15:59:43 +00:00
|
|
|
}
|
2011-03-02 15:58:18 +00:00
|
|
|
|
2008-12-17 15:59:43 +00:00
|
|
|
private:
|
|
|
|
enum {
|
2011-04-11 19:43:58 +00:00
|
|
|
#ifdef FORCE_SUPERMASK
|
|
|
|
kMAX_WIDTH = 2048,
|
|
|
|
kMAX_STORAGE = 1024 * 1024 * 2
|
|
|
|
#else
|
2008-12-17 15:59:43 +00:00
|
|
|
kMAX_WIDTH = 32, // so we don't try to do very wide things, where the RLE blitter would be faster
|
|
|
|
kMAX_STORAGE = 1024
|
2011-04-11 19:43:58 +00:00
|
|
|
#endif
|
2008-12-17 15:59:43 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
SkMask fMask;
|
|
|
|
SkIRect fClipRect;
|
|
|
|
// we add 1 because add_aa_span can write (unchanged) 1 extra byte at the end, rather than
|
|
|
|
// perform a test to see if stopAlpha != 0
|
|
|
|
uint32_t fStorage[(kMAX_STORAGE >> 2) + 1];
|
|
|
|
};
|
|
|
|
|
2017-09-28 14:58:38 +00:00
|
|
|
MaskSuperBlitter::MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
|
|
|
|
const SkIRect& clipBounds, bool isInverse)
|
|
|
|
: BaseSuperBlitter(realBlitter, ir, clipBounds, isInverse)
|
2014-10-15 15:52:00 +00:00
|
|
|
{
|
2008-12-17 15:59:43 +00:00
|
|
|
SkASSERT(CanHandleRect(ir));
|
2014-10-15 15:52:00 +00:00
|
|
|
SkASSERT(!isInverse);
|
2008-12-17 15:59:43 +00:00
|
|
|
|
|
|
|
fMask.fImage = (uint8_t*)fStorage;
|
|
|
|
fMask.fBounds = ir;
|
|
|
|
fMask.fRowBytes = ir.width();
|
|
|
|
fMask.fFormat = SkMask::kA8_Format;
|
2012-08-23 18:09:54 +00:00
|
|
|
|
2008-12-17 15:59:43 +00:00
|
|
|
fClipRect = ir;
|
2017-09-28 14:58:38 +00:00
|
|
|
if (!fClipRect.intersect(clipBounds)) {
|
2015-01-07 20:16:10 +00:00
|
|
|
SkASSERT(0);
|
|
|
|
fClipRect.setEmpty();
|
|
|
|
}
|
2011-03-02 15:58:18 +00:00
|
|
|
|
2008-12-17 15:59:43 +00:00
|
|
|
// For valgrind, write 1 extra byte at the end so we don't read
|
|
|
|
// uninitialized memory. See comment in add_aa_span and fStorage[].
|
|
|
|
memset(fStorage, 0, fMask.fBounds.height() * fMask.fRowBytes + 1);
|
|
|
|
}
|
|
|
|
|
2011-04-11 19:43:58 +00:00
|
|
|
static void add_aa_span(uint8_t* alpha, U8CPU startAlpha) {
|
2008-12-17 15:59:43 +00:00
|
|
|
/* I should be able to just add alpha[x] + startAlpha.
|
|
|
|
However, if the trailing edge of the previous span and the leading
|
|
|
|
edge of the current span round to the same super-sampled x value,
|
|
|
|
I might overflow to 256 with this add, hence the funny subtract.
|
|
|
|
*/
|
|
|
|
unsigned tmp = *alpha + startAlpha;
|
|
|
|
SkASSERT(tmp <= 256);
|
|
|
|
*alpha = SkToU8(tmp - (tmp >> 8));
|
|
|
|
}
|
|
|
|
|
2011-04-11 19:43:58 +00:00
|
|
|
static inline uint32_t quadplicate_byte(U8CPU value) {
|
|
|
|
uint32_t pair = (value << 8) | value;
|
|
|
|
return (pair << 16) | pair;
|
|
|
|
}
|
|
|
|
|
2012-05-09 13:48:50 +00:00
|
|
|
// Perform this tricky subtract, to avoid overflowing to 256. Our caller should
|
|
|
|
// only ever call us with at most enough to hit 256 (never larger), so it is
|
|
|
|
// enough to just subtract the high-bit. Actually clamping with a branch would
|
|
|
|
// be slower (e.g. if (tmp > 255) tmp = 255;)
|
|
|
|
//
|
|
|
|
static inline void saturated_add(uint8_t* ptr, U8CPU add) {
|
|
|
|
unsigned tmp = *ptr + add;
|
|
|
|
SkASSERT(tmp <= 256);
|
|
|
|
*ptr = SkToU8(tmp - (tmp >> 8));
|
|
|
|
}
|
|
|
|
|
2011-04-11 19:43:58 +00:00
|
|
|
// minimum count before we want to setup an inner loop, adding 4-at-a-time
|
|
|
|
#define MIN_COUNT_FOR_QUAD_LOOP 16
|
|
|
|
|
|
|
|
static void add_aa_span(uint8_t* alpha, U8CPU startAlpha, int middleCount,
|
|
|
|
U8CPU stopAlpha, U8CPU maxValue) {
|
2008-12-17 15:59:43 +00:00
|
|
|
SkASSERT(middleCount >= 0);
|
|
|
|
|
2012-05-09 13:48:50 +00:00
|
|
|
saturated_add(alpha, startAlpha);
|
|
|
|
alpha += 1;
|
2008-12-17 15:59:43 +00:00
|
|
|
|
2011-04-11 19:43:58 +00:00
|
|
|
if (middleCount >= MIN_COUNT_FOR_QUAD_LOOP) {
|
|
|
|
// loop until we're quad-byte aligned
|
2018-06-14 18:41:22 +00:00
|
|
|
while (reinterpret_cast<intptr_t>(alpha) & 0x3) {
|
2011-04-11 19:43:58 +00:00
|
|
|
alpha[0] = SkToU8(alpha[0] + maxValue);
|
|
|
|
alpha += 1;
|
|
|
|
middleCount -= 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int bigCount = middleCount >> 2;
|
|
|
|
uint32_t* qptr = reinterpret_cast<uint32_t*>(alpha);
|
|
|
|
uint32_t qval = quadplicate_byte(maxValue);
|
|
|
|
do {
|
|
|
|
*qptr++ += qval;
|
|
|
|
} while (--bigCount > 0);
|
|
|
|
|
|
|
|
middleCount &= 3;
|
|
|
|
alpha = reinterpret_cast<uint8_t*> (qptr);
|
|
|
|
// fall through to the following while-loop
|
|
|
|
}
|
|
|
|
|
|
|
|
while (--middleCount >= 0) {
|
2008-12-17 15:59:43 +00:00
|
|
|
alpha[0] = SkToU8(alpha[0] + maxValue);
|
|
|
|
alpha += 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// potentially this can be off the end of our "legal" alpha values, but that
|
|
|
|
// only happens if stopAlpha is also 0. Rather than test for stopAlpha != 0
|
|
|
|
// every time (slow), we just do it, and ensure that we've allocated extra space
|
|
|
|
// (see the + 1 comment in fStorage[]
|
2012-05-09 13:48:50 +00:00
|
|
|
saturated_add(alpha, stopAlpha);
|
2008-12-17 15:59:43 +00:00
|
|
|
}
|
|
|
|
|
2011-04-11 19:43:58 +00:00
|
|
|
void MaskSuperBlitter::blitH(int x, int y, int width) {
|
2008-12-17 15:59:43 +00:00
|
|
|
int iy = (y >> SHIFT);
|
2011-03-02 15:58:18 +00:00
|
|
|
|
2008-12-17 15:59:43 +00:00
|
|
|
SkASSERT(iy >= fMask.fBounds.fTop && iy < fMask.fBounds.fBottom);
|
|
|
|
iy -= fMask.fBounds.fTop; // make it relative to 0
|
|
|
|
|
2009-08-21 22:00:12 +00:00
|
|
|
// This should never happen, but it does. Until the true cause is
|
|
|
|
// discovered, let's skip this span instead of crashing.
|
|
|
|
// See http://crbug.com/17569.
|
|
|
|
if (iy < 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2008-12-17 15:59:43 +00:00
|
|
|
#ifdef SK_DEBUG
|
|
|
|
{
|
|
|
|
int ix = x >> SHIFT;
|
|
|
|
SkASSERT(ix >= fMask.fBounds.fLeft && ix < fMask.fBounds.fRight);
|
|
|
|
}
|
|
|
|
#endif
|
2011-03-02 15:58:18 +00:00
|
|
|
|
2015-12-09 20:02:30 +00:00
|
|
|
x -= SkLeftShift(fMask.fBounds.fLeft, SHIFT);
|
2008-12-17 15:59:43 +00:00
|
|
|
|
|
|
|
// hack, until I figure out why my cubics (I think) go beyond the bounds
|
2011-04-11 19:43:58 +00:00
|
|
|
if (x < 0) {
|
2008-12-17 15:59:43 +00:00
|
|
|
width += x;
|
|
|
|
x = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint8_t* row = fMask.fImage + iy * fMask.fRowBytes + (x >> SHIFT);
|
|
|
|
|
|
|
|
int start = x;
|
|
|
|
int stop = x + width;
|
|
|
|
|
|
|
|
SkASSERT(start >= 0 && stop > start);
|
2011-12-01 20:41:24 +00:00
|
|
|
int fb = start & MASK;
|
|
|
|
int fe = stop & MASK;
|
2008-12-17 15:59:43 +00:00
|
|
|
int n = (stop >> SHIFT) - (start >> SHIFT) - 1;
|
|
|
|
|
|
|
|
|
2011-04-11 19:43:58 +00:00
|
|
|
if (n < 0) {
|
2009-08-12 20:21:49 +00:00
|
|
|
SkASSERT(row >= fMask.fImage);
|
|
|
|
SkASSERT(row < fMask.fImage + kMAX_STORAGE + 1);
|
2012-02-23 19:34:34 +00:00
|
|
|
add_aa_span(row, coverage_to_partial_alpha(fe - fb));
|
2011-04-11 19:43:58 +00:00
|
|
|
} else {
|
2011-12-27 13:59:20 +00:00
|
|
|
fb = SCALE - fb;
|
2009-08-12 20:21:49 +00:00
|
|
|
SkASSERT(row >= fMask.fImage);
|
|
|
|
SkASSERT(row + n + 1 < fMask.fImage + kMAX_STORAGE + 1);
|
2012-02-23 19:34:34 +00:00
|
|
|
add_aa_span(row, coverage_to_partial_alpha(fb),
|
|
|
|
n, coverage_to_partial_alpha(fe),
|
2008-12-17 15:59:43 +00:00
|
|
|
(1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT));
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef SK_DEBUG
|
|
|
|
fCurrX = x + width;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
Reland "Use DAA for small cubics and non-convex paths that fit into a mask"
This reverts commit 107d53971dab5245e88885f095aef52fd52942d3.
Reason for revert: Ready to rebaseline
Original change's description:
> Revert "Use DAA for small cubics and non-convex paths that fit into a mask"
>
> This reverts commit 1875e053845c4d377a0f64f7233bdb9dc00bdce1.
>
> Reason for revert:
>
> I don't think there's anything wrong with this, but it looks like Yuqian is out today and there is a large number of GM, SKP, and SVG images to triage from this. This is just a triage-by-revert... should be fine to reland when you're ready to triage.
>
> Original change's description:
> > Use DAA for small cubics and non-convex paths that fit into a mask
> >
> > I forgot to benchmark svgs and it turns out that DAA is specifically
> > good for the small cubics and small non-convex paths in svgs. This
> > should make our svg performance fast again:
> >
> > 2.84% faster in svgparse_Florida-StateSeal.svg_1
> > 2.90% faster in svgparse_NewYork-StateSeal.svg_1
> > 2.95% faster in svgparse_Seal_of_Texas.svg_1
> > 3.05% faster in car.svg_1
> > 3.53% faster in svgparse_Vermont_state_seal.svg_1
> > 3.68% faster in svgparse_Wyoming-StateSeal.svg_1
> > 4.88% faster in svgparse_Minnesota-StateSeal.svg_1
> > 5.22% faster in svgparse_NewMexico-StateSeal.svg_1
> > 6.49% faster in svgparse_fsm.svg_1
> >
> >
> > Bug: skia:
> > Change-Id: Ia149944443d72c12c3dda178cb5ebc89d6d0bf18
> > Reviewed-on: https://skia-review.googlesource.com/116185
> > Reviewed-by: Cary Clark <caryclark@google.com>
> > Commit-Queue: Yuqian Li <liyuqian@google.com>
>
> TBR=caryclark@google.com,liyuqian@google.com,reed@google.com,caryclark@skia.org
>
> # Not skipping CQ checks because original CL landed > 1 day ago.
>
> Bug: skia:
> Change-Id: I232f34dcea1cdabef768879a261fe6796f3e4a79
> Reviewed-on: https://skia-review.googlesource.com/116400
> Reviewed-by: Mike Klein <mtklein@google.com>
> Commit-Queue: Mike Klein <mtklein@google.com>
TBR=mtklein@google.com,caryclark@google.com,liyuqian@google.com,reed@google.com,caryclark@skia.org
Change-Id: I6a413e3a2f1ce9182f9e209f6e2654a602170378
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: skia:
Reviewed-on: https://skia-review.googlesource.com/116620
Commit-Queue: Yuqian Li <liyuqian@google.com>
Reviewed-by: Yuqian Li <liyuqian@google.com>
2018-03-27 12:40:18 +00:00
|
|
|
static SkIRect safeRoundOut(const SkRect& src) {
|
|
|
|
// roundOut will pin huge floats to max/min int
|
|
|
|
SkIRect dst = src.roundOut();
|
|
|
|
|
|
|
|
// intersect with a smaller huge rect, so the rect will not be considered empty for being
|
|
|
|
// too large. e.g. { -SK_MaxS32 ... SK_MaxS32 } is considered empty because its width
|
|
|
|
// exceeds signed 32bit.
|
|
|
|
const int32_t limit = SK_MaxS32 >> SK_SUPERSAMPLE_SHIFT;
|
|
|
|
(void)dst.intersect({ -limit, -limit, limit, limit});
|
|
|
|
|
|
|
|
return dst;
|
|
|
|
}
|
|
|
|
|
2018-08-08 16:22:54 +00:00
|
|
|
constexpr int kSampleSize = 8;
|
|
|
|
constexpr SkScalar kComplexityThreshold = 0.25;
|
|
|
|
constexpr SkScalar kSmallCubicThreshold = 16;
|
|
|
|
|
|
|
|
static inline SkScalar sqr(SkScalar x) {
|
|
|
|
return x * x;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ComputeComplexity(const SkPath& path, SkScalar& avgLength, SkScalar& complexity) {
|
|
|
|
int n = path.countPoints();
|
|
|
|
if (n < kSampleSize) {
|
|
|
|
// set to invalid value to indicate that we failed to compute
|
|
|
|
avgLength = -1;
|
|
|
|
complexity = -1;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
SkScalar sumLength = 0;
|
|
|
|
SkPoint lastPoint = path.getPoint(0);
|
|
|
|
for(int i = 1; i < kSampleSize; ++i) {
|
|
|
|
SkPoint point = path.getPoint(i);
|
|
|
|
sumLength += SkPoint::Distance(lastPoint, point);
|
|
|
|
lastPoint = point;
|
|
|
|
}
|
|
|
|
avgLength = sumLength / (kSampleSize - 1);
|
|
|
|
|
|
|
|
SkScalar diagonalSqr = sqr(path.getBounds().width()) + sqr(path.getBounds().height());
|
|
|
|
|
|
|
|
// If the path consists of random line segments, the number of intersections should be
|
|
|
|
// proportional to this.
|
|
|
|
SkScalar intersections = sqr(n) * sqr(avgLength) / diagonalSqr;
|
|
|
|
|
|
|
|
// The number of intersections per scanline should be proportional to this number.
|
|
|
|
complexity = intersections / path.getBounds().height();
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool ShouldUseDAA(const SkPath& path, SkScalar avgLength, SkScalar complexity) {
|
2017-07-26 17:47:26 +00:00
|
|
|
if (gSkForceDeltaAA) {
|
|
|
|
return true;
|
|
|
|
}
|
2018-04-11 20:34:06 +00:00
|
|
|
if (!gSkUseDeltaAA || SkPathPriv::IsBadForDAA(path)) {
|
2017-07-26 17:47:26 +00:00
|
|
|
return false;
|
|
|
|
}
|
2018-03-21 18:09:13 +00:00
|
|
|
|
2018-08-08 13:13:12 +00:00
|
|
|
#ifdef SK_SUPPORT_LEGACY_AA_CHOICE
|
|
|
|
const SkRect& bounds = path.getBounds();
|
|
|
|
return !path.isConvex() && path.countPoints() >= SkTMax(bounds.width(), bounds.height()) / 8;
|
|
|
|
#else
|
2018-08-08 16:22:54 +00:00
|
|
|
if (avgLength < 0 || complexity < 0 || path.getBounds().isEmpty() || path.isConvex()) {
|
2018-03-21 18:09:13 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
Reland "Use DAA for small cubics and non-convex paths that fit into a mask"
This reverts commit 107d53971dab5245e88885f095aef52fd52942d3.
Reason for revert: Ready to rebaseline
Original change's description:
> Revert "Use DAA for small cubics and non-convex paths that fit into a mask"
>
> This reverts commit 1875e053845c4d377a0f64f7233bdb9dc00bdce1.
>
> Reason for revert:
>
> I don't think there's anything wrong with this, but it looks like Yuqian is out today and there is a large number of GM, SKP, and SVG images to triage from this. This is just a triage-by-revert... should be fine to reland when you're ready to triage.
>
> Original change's description:
> > Use DAA for small cubics and non-convex paths that fit into a mask
> >
> > I forgot to benchmark svgs and it turns out that DAA is specifically
> > good for the small cubics and small non-convex paths in svgs. This
> > should make our svg performance fast again:
> >
> > 2.84% faster in svgparse_Florida-StateSeal.svg_1
> > 2.90% faster in svgparse_NewYork-StateSeal.svg_1
> > 2.95% faster in svgparse_Seal_of_Texas.svg_1
> > 3.05% faster in car.svg_1
> > 3.53% faster in svgparse_Vermont_state_seal.svg_1
> > 3.68% faster in svgparse_Wyoming-StateSeal.svg_1
> > 4.88% faster in svgparse_Minnesota-StateSeal.svg_1
> > 5.22% faster in svgparse_NewMexico-StateSeal.svg_1
> > 6.49% faster in svgparse_fsm.svg_1
> >
> >
> > Bug: skia:
> > Change-Id: Ia149944443d72c12c3dda178cb5ebc89d6d0bf18
> > Reviewed-on: https://skia-review.googlesource.com/116185
> > Reviewed-by: Cary Clark <caryclark@google.com>
> > Commit-Queue: Yuqian Li <liyuqian@google.com>
>
> TBR=caryclark@google.com,liyuqian@google.com,reed@google.com,caryclark@skia.org
>
> # Not skipping CQ checks because original CL landed > 1 day ago.
>
> Bug: skia:
> Change-Id: I232f34dcea1cdabef768879a261fe6796f3e4a79
> Reviewed-on: https://skia-review.googlesource.com/116400
> Reviewed-by: Mike Klein <mtklein@google.com>
> Commit-Queue: Mike Klein <mtklein@google.com>
TBR=mtklein@google.com,caryclark@google.com,liyuqian@google.com,reed@google.com,caryclark@skia.org
Change-Id: I6a413e3a2f1ce9182f9e209f6e2654a602170378
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: skia:
Reviewed-on: https://skia-review.googlesource.com/116620
Commit-Queue: Yuqian Li <liyuqian@google.com>
Reviewed-by: Yuqian Li <liyuqian@google.com>
2018-03-27 12:40:18 +00:00
|
|
|
// DAA is fast with mask
|
|
|
|
if (SkCoverageDeltaMask::CanHandle(safeRoundOut(path.getBounds()))) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// DAA is much faster in small cubics (since we don't have to chop them).
|
|
|
|
// If there are many cubics, and the average length if small, use DAA.
|
|
|
|
if (avgLength < kSmallCubicThreshold) {
|
|
|
|
uint8_t sampleVerbs[kSampleSize];
|
|
|
|
int verbCount = SkTMin(kSampleSize, path.getVerbs(sampleVerbs, kSampleSize));
|
|
|
|
int cubicCount = 0;
|
|
|
|
for(int i = 0; i < verbCount; ++i) {
|
|
|
|
cubicCount += (sampleVerbs[i] == SkPath::kCubic_Verb);
|
|
|
|
}
|
|
|
|
if (cubicCount * 2 >= verbCount) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-21 18:09:13 +00:00
|
|
|
return complexity >= kComplexityThreshold;
|
2018-08-08 13:13:12 +00:00
|
|
|
#endif
|
2017-07-26 17:47:26 +00:00
|
|
|
}
|
|
|
|
|
2018-08-08 16:22:54 +00:00
|
|
|
static bool ShouldUseAAA(const SkPath& path, SkScalar avgLength, SkScalar complexity) {
|
2017-07-26 17:47:26 +00:00
|
|
|
if (gSkForceAnalyticAA) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (!gSkUseAnalyticAA) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (path.isRect(nullptr)) {
|
|
|
|
return true;
|
|
|
|
}
|
2018-08-08 16:22:54 +00:00
|
|
|
|
|
|
|
#ifdef SK_SUPPORT_LEGACY_AAA_CHOICE
|
2017-07-26 17:47:26 +00:00
|
|
|
const SkRect& bounds = path.getBounds();
|
|
|
|
// When the path have so many points compared to the size of its bounds/resolution,
|
|
|
|
// it indicates that the path is not quite smooth in the current resolution:
|
|
|
|
// the expected number of turning points in every pixel row/column is significantly greater than
|
|
|
|
// zero. Hence Aanlytic AA is not likely to produce visible quality improvements, and Analytic
|
|
|
|
// AA might be slower than supersampling.
|
|
|
|
return path.countPoints() < SkTMax(bounds.width(), bounds.height()) / 2 - 10;
|
2018-08-08 16:22:54 +00:00
|
|
|
#else
|
|
|
|
// We will use AAA if the number of verbs < kSampleSize and therefore complexity < 0
|
|
|
|
return complexity < kComplexityThreshold;
|
|
|
|
#endif
|
2017-07-26 17:47:26 +00:00
|
|
|
}
|
|
|
|
|
2017-11-06 21:21:06 +00:00
|
|
|
void SkScan::SAAFillPath(const SkPath& path, SkBlitter* blitter, const SkIRect& ir,
|
2018-01-08 18:40:45 +00:00
|
|
|
const SkIRect& clipBounds, bool forceRLE) {
|
|
|
|
bool containedInClip = clipBounds.contains(ir);
|
2017-11-06 21:21:06 +00:00
|
|
|
bool isInverse = path.isInverseFillType();
|
|
|
|
|
|
|
|
// MaskSuperBlitter can't handle drawing outside of ir, so we can't use it
|
|
|
|
// if we're an inverse filltype
|
|
|
|
if (!isInverse && MaskSuperBlitter::CanHandleRect(ir) && !forceRLE) {
|
|
|
|
MaskSuperBlitter superBlit(blitter, ir, clipBounds, isInverse);
|
|
|
|
SkASSERT(SkIntToScalar(ir.fTop) <= path.getBounds().fTop);
|
|
|
|
sk_fill_path(path, clipBounds, &superBlit, ir.fTop, ir.fBottom, SHIFT, containedInClip);
|
|
|
|
} else {
|
|
|
|
SuperBlitter superBlit(blitter, ir, clipBounds, isInverse);
|
|
|
|
sk_fill_path(path, clipBounds, &superBlit, ir.fTop, ir.fBottom, SHIFT, containedInClip);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int overflows_short_shift(int value, int shift) {
|
|
|
|
const int s = 16 + shift;
|
|
|
|
return (SkLeftShift(value, s) >> s) - value;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
Would any of the coordinates of this rectangle not fit in a short,
|
|
|
|
when left-shifted by shift?
|
|
|
|
*/
|
|
|
|
static int rect_overflows_short_shift(SkIRect rect, int shift) {
|
|
|
|
SkASSERT(!overflows_short_shift(8191, shift));
|
|
|
|
SkASSERT(overflows_short_shift(8192, shift));
|
|
|
|
SkASSERT(!overflows_short_shift(32767, 0));
|
|
|
|
SkASSERT(overflows_short_shift(32768, 0));
|
|
|
|
|
|
|
|
// Since we expect these to succeed, we bit-or together
|
|
|
|
// for a tiny extra bit of speed.
|
|
|
|
return overflows_short_shift(rect.fLeft, shift) |
|
|
|
|
overflows_short_shift(rect.fRight, shift) |
|
|
|
|
overflows_short_shift(rect.fTop, shift) |
|
|
|
|
overflows_short_shift(rect.fBottom, shift);
|
|
|
|
}
|
|
|
|
|
2012-01-30 17:09:45 +00:00
|
|
|
void SkScan::AntiFillPath(const SkPath& path, const SkRegion& origClip,
|
2018-02-12 09:02:30 +00:00
|
|
|
SkBlitter* blitter, bool forceRLE, SkDAARecord* daaRecord) {
|
2017-11-06 21:21:06 +00:00
|
|
|
if (origClip.isEmpty()) {
|
2018-03-02 00:53:27 +00:00
|
|
|
SkDAARecord::SetEmpty(daaRecord);
|
2017-11-06 19:55:25 +00:00
|
|
|
return;
|
2017-11-06 21:21:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
const bool isInverse = path.isInverseFillType();
|
2018-01-16 18:58:01 +00:00
|
|
|
SkIRect ir = safeRoundOut(path.getBounds());
|
2017-11-06 21:21:06 +00:00
|
|
|
if (ir.isEmpty()) {
|
|
|
|
if (isInverse) {
|
|
|
|
blitter->blitRegion(origClip);
|
|
|
|
}
|
2018-03-02 00:53:27 +00:00
|
|
|
SkDAARecord::SetEmpty(daaRecord);
|
2017-11-06 19:55:25 +00:00
|
|
|
return;
|
2017-11-06 15:56:30 +00:00
|
|
|
}
|
|
|
|
|
2017-11-06 21:21:06 +00:00
|
|
|
// If the intersection of the path bounds and the clip bounds
|
|
|
|
// will overflow 32767 when << by SHIFT, we can't supersample,
|
|
|
|
// so draw without antialiasing.
|
|
|
|
SkIRect clippedIR;
|
|
|
|
if (isInverse) {
|
|
|
|
// If the path is an inverse fill, it's going to fill the entire
|
|
|
|
// clip, and we care whether the entire clip exceeds our limits.
|
|
|
|
clippedIR = origClip.getBounds();
|
|
|
|
} else {
|
|
|
|
if (!clippedIR.intersect(ir, origClip.getBounds())) {
|
2018-03-02 00:53:27 +00:00
|
|
|
SkDAARecord::SetEmpty(daaRecord);
|
2017-11-06 21:21:06 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2018-03-02 00:53:27 +00:00
|
|
|
if (!daaRecord && rect_overflows_short_shift(clippedIR, SHIFT)) {
|
2017-11-06 21:21:06 +00:00
|
|
|
SkScan::FillPath(path, origClip, blitter);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Our antialiasing can't handle a clip larger than 32767, so we restrict
|
|
|
|
// the clip to that limit here. (the runs[] uses int16_t for its index).
|
|
|
|
//
|
|
|
|
// A more general solution (one that could also eliminate the need to
|
|
|
|
// disable aa based on ir bounds (see overflows_short_shift) would be
|
|
|
|
// to tile the clip/target...
|
|
|
|
SkRegion tmpClipStorage;
|
|
|
|
const SkRegion* clipRgn = &origClip;
|
|
|
|
{
|
|
|
|
static const int32_t kMaxClipCoord = 32767;
|
|
|
|
const SkIRect& bounds = origClip.getBounds();
|
|
|
|
if (bounds.fRight > kMaxClipCoord || bounds.fBottom > kMaxClipCoord) {
|
|
|
|
SkIRect limit = { 0, 0, kMaxClipCoord, kMaxClipCoord };
|
|
|
|
tmpClipStorage.op(origClip, limit, SkRegion::kIntersect_Op);
|
|
|
|
clipRgn = &tmpClipStorage;
|
2017-11-06 19:55:25 +00:00
|
|
|
}
|
2017-11-06 21:21:06 +00:00
|
|
|
}
|
|
|
|
// for here down, use clipRgn, not origClip
|
|
|
|
|
|
|
|
SkScanClipper clipper(blitter, clipRgn, ir);
|
|
|
|
|
|
|
|
if (clipper.getBlitter() == nullptr) { // clipped out
|
|
|
|
if (isInverse) {
|
|
|
|
blitter->blitRegion(*clipRgn);
|
|
|
|
}
|
2018-03-02 00:53:27 +00:00
|
|
|
SkDAARecord::SetEmpty(daaRecord);
|
2017-11-06 21:21:06 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
SkASSERT(clipper.getClipRect() == nullptr ||
|
|
|
|
*clipper.getClipRect() == clipRgn->getBounds());
|
2017-11-06 19:55:25 +00:00
|
|
|
|
2017-11-06 21:21:06 +00:00
|
|
|
// now use the (possibly wrapped) blitter
|
|
|
|
blitter = clipper.getBlitter();
|
|
|
|
|
|
|
|
if (isInverse) {
|
|
|
|
sk_blit_above(blitter, ir, *clipRgn);
|
|
|
|
}
|
|
|
|
|
2018-08-08 16:22:54 +00:00
|
|
|
SkScalar avgLength, complexity;
|
|
|
|
ComputeComplexity(path, avgLength, complexity);
|
|
|
|
|
|
|
|
if (daaRecord || ShouldUseDAA(path, avgLength, complexity)) {
|
2018-02-12 09:02:30 +00:00
|
|
|
SkScan::DAAFillPath(path, blitter, ir, clipRgn->getBounds(), forceRLE, daaRecord);
|
2018-08-08 16:22:54 +00:00
|
|
|
} else if (ShouldUseAAA(path, avgLength, complexity)) {
|
2017-11-06 21:21:06 +00:00
|
|
|
// Do not use AAA if path is too complicated:
|
|
|
|
// there won't be any speedup or significant visual improvement.
|
2018-01-08 18:40:45 +00:00
|
|
|
SkScan::AAAFillPath(path, blitter, ir, clipRgn->getBounds(), forceRLE);
|
2017-11-06 21:21:06 +00:00
|
|
|
} else {
|
2018-01-08 18:40:45 +00:00
|
|
|
SkScan::SAAFillPath(path, blitter, ir, clipRgn->getBounds(), forceRLE);
|
2017-11-06 21:21:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (isInverse) {
|
|
|
|
sk_blit_below(blitter, ir, *clipRgn);
|
|
|
|
}
|
2008-12-17 15:59:43 +00:00
|
|
|
}
|
2011-10-12 20:42:05 +00:00
|
|
|
|
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
#include "SkRasterClip.h"
|
|
|
|
|
2018-01-22 21:49:49 +00:00
|
|
|
void SkScan::FillPath(const SkPath& path, const SkRasterClip& clip, SkBlitter* blitter) {
|
|
|
|
if (clip.isEmpty() || !path.isFinite()) {
|
2011-10-12 20:42:05 +00:00
|
|
|
return;
|
|
|
|
}
|
2012-08-23 18:09:54 +00:00
|
|
|
|
2011-10-12 20:42:05 +00:00
|
|
|
if (clip.isBW()) {
|
|
|
|
FillPath(path, clip.bwRgn(), blitter);
|
|
|
|
} else {
|
|
|
|
SkRegion tmp;
|
|
|
|
SkAAClipBlitter aaBlitter;
|
2012-08-23 18:09:54 +00:00
|
|
|
|
2011-10-12 20:42:05 +00:00
|
|
|
tmp.setRect(clip.getBounds());
|
|
|
|
aaBlitter.init(blitter, &clip.aaRgn());
|
|
|
|
SkScan::FillPath(path, tmp, &aaBlitter);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void SkScan::AntiFillPath(const SkPath& path, const SkRasterClip& clip,
|
2018-02-12 09:02:30 +00:00
|
|
|
SkBlitter* blitter, SkDAARecord* daaRecord) {
|
2018-01-22 21:49:49 +00:00
|
|
|
if (clip.isEmpty() || !path.isFinite()) {
|
2018-03-02 00:53:27 +00:00
|
|
|
SkDAARecord::SetEmpty(daaRecord);
|
2016-10-04 18:23:22 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-10-12 20:42:05 +00:00
|
|
|
if (clip.isBW()) {
|
2018-02-12 09:02:30 +00:00
|
|
|
AntiFillPath(path, clip.bwRgn(), blitter, false, daaRecord);
|
2011-10-12 20:42:05 +00:00
|
|
|
} else {
|
|
|
|
SkRegion tmp;
|
|
|
|
SkAAClipBlitter aaBlitter;
|
|
|
|
|
|
|
|
tmp.setRect(clip.getBounds());
|
|
|
|
aaBlitter.init(blitter, &clip.aaRgn());
|
2018-02-12 09:02:30 +00:00
|
|
|
AntiFillPath(path, tmp, &aaBlitter, true, daaRecord); // SkAAClipBlitter can blitMask, why forceRLE?
|
2011-10-12 20:42:05 +00:00
|
|
|
}
|
|
|
|
}
|