Revert of GrBatchPrototype (patchset #32 id:630001 of https://codereview.chromium.org/845103005/)

Reason for revert:
One last try to fix mac perf regression

Original issue's description:
> GrBatchPrototype
>
> BUG=skia:
>
> Committed: https://skia.googlesource.com/skia/+/d15e4e45374275c045572b304c229237c4a82be4
>
> Committed: https://skia.googlesource.com/skia/+/d5a7db4a867c7e6ccf8451a053d987b470099198

TBR=bsalomon@google.com,kkinnunen@nvidia.com,joshualitt@chromium.org
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=skia:

Review URL: https://codereview.chromium.org/877393002
This commit is contained in:
joshualitt 2015-01-28 06:54:30 -08:00 committed by Commit bot
parent 8b0a05ae44
commit c2893c5e38
34 changed files with 315 additions and 1197 deletions

View File

@ -1,185 +0,0 @@
/*
* Copyright 2013 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "gm.h"
#include "SkBitmap.h"
#include "SkRandom.h"
#include "SkShader.h"
#include "SkXfermode.h"
namespace skiagm {
/**
* Renders overlapping shapes with colorburn against a checkerboard.
*/
class DstReadShuffle : public GM {
public:
DstReadShuffle() {
this->setBGColor(SkColorSetARGB(0xff, 0xff, 0, 0xff));
}
protected:
enum ShapeType {
kCircle_ShapeType,
kRoundRect_ShapeType,
kRect_ShapeType,
kConvexPath_ShapeType,
kConcavePath_ShapeType,
kText_ShapeType,
kNumShapeTypes
};
SkString onShortName() SK_OVERRIDE {
return SkString("dstreadshuffle");
}
SkISize onISize() SK_OVERRIDE {
return SkISize::Make(kWidth, kHeight);
}
void drawShape(SkCanvas* canvas,
SkPaint* paint,
ShapeType type) {
static const SkRect kRect = SkRect::MakeXYWH(SkIntToScalar(-50), SkIntToScalar(-50),
SkIntToScalar(75), SkIntToScalar(105));
switch (type) {
case kCircle_ShapeType:
canvas->drawCircle(0, 0, 50, *paint);
break;
case kRoundRect_ShapeType:
canvas->drawRoundRect(kRect, SkIntToScalar(10), SkIntToScalar(20), *paint);
break;
case kRect_ShapeType:
canvas->drawRect(kRect, *paint);
break;
case kConvexPath_ShapeType:
if (fConvexPath.isEmpty()) {
SkPoint points[4];
kRect.toQuad(points);
fConvexPath.moveTo(points[0]);
fConvexPath.quadTo(points[1], points[2]);
fConvexPath.quadTo(points[3], points[0]);
SkASSERT(fConvexPath.isConvex());
}
canvas->drawPath(fConvexPath, *paint);
break;
case kConcavePath_ShapeType:
if (fConcavePath.isEmpty()) {
SkPoint points[5] = {{0, SkIntToScalar(-50)} };
SkMatrix rot;
rot.setRotate(SkIntToScalar(360) / 5);
for (int i = 1; i < 5; ++i) {
rot.mapPoints(points + i, points + i - 1, 1);
}
fConcavePath.moveTo(points[0]);
for (int i = 0; i < 5; ++i) {
fConcavePath.lineTo(points[(2 * i) % 5]);
}
fConcavePath.setFillType(SkPath::kEvenOdd_FillType);
SkASSERT(!fConcavePath.isConvex());
}
canvas->drawPath(fConcavePath, *paint);
break;
case kText_ShapeType: {
const char* text = "Hello!";
paint->setTextSize(30);
canvas->drawText(text, strlen(text), 0, 0, *paint);
}
default:
break;
}
}
static SkColor GetColor(SkRandom* random, int i) {
SkColor color;
switch (i) {
case 0:
color = SK_ColorTRANSPARENT;
break;
case 1:
color = SkColorSetARGB(0xff,
random->nextULessThan(256),
random->nextULessThan(256),
random->nextULessThan(256));
break;
default:
uint8_t alpha = random->nextULessThan(256);
color = SkColorSetARGB(alpha,
random->nextRangeU(0, alpha),
random->nextRangeU(0, alpha),
random->nextRangeU(0, alpha));
break;
}
return color;
}
static void SetStyle(SkPaint* p, int style, int width) {
switch (style) {
case 0:
p->setStyle(SkPaint::kStroke_Style);
p->setStrokeWidth((SkScalar)width);
break;
case 1:
p->setStyle(SkPaint::kStrokeAndFill_Style);
p->setStrokeWidth((SkScalar)width);
break;
default:
p->setStyle(SkPaint::kFill_Style);
break;
}
}
void onDraw(SkCanvas* canvas) SK_OVERRIDE {
SkRandom random;
SkScalar y = 100;
for (int i = 0; i < kNumShapeTypes; i++) {
ShapeType shapeType = static_cast<ShapeType>(i);
SkScalar x = 25;
for (int style = 0; style < 3; style++) {
for (int width = 0; width <= 1; width++) {
for (int alpha = 0; alpha <= 2; alpha++) {
for (int r = 0; r <= 5; r++) {
SkColor color = GetColor(&random, alpha);
SkPaint p;
p.setAntiAlias(true);
p.setColor(color);
p.setXfermodeMode(r % 3 == 0 ? SkXfermode::kHardLight_Mode :
SkXfermode::kSrcOver_Mode);
SetStyle(&p, style, width);
canvas->save();
canvas->translate(x, y);
canvas->rotate((SkScalar)(r < 3 ? 10 : 0));
this->drawShape(canvas, &p, shapeType);
canvas->restore();
x += 8;
}
}
}
}
y += 50;
}
}
private:
enum {
kNumShapes = 100,
};
SkAutoTUnref<SkShader> fBG;
SkPath fConcavePath;
SkPath fConvexPath;
static const int kWidth = 900;
static const int kHeight = 400;
typedef GM INHERITED;
};
//////////////////////////////////////////////////////////////////////////////
static GM* MyFactory(void*) { return new DstReadShuffle; }
static GMRegistry reg(MyFactory);
}

View File

@ -64,7 +64,6 @@
'../gm/copyTo4444.cpp',
'../gm/cubicpaths.cpp',
'../gm/cmykjpeg.cpp',
'../gm/dstreadshuffle.cpp',
'../gm/degeneratesegments.cpp',
'../gm/dcshader.cpp',
'../gm/discard.cpp',
@ -78,7 +77,6 @@
'../gm/drawlooper.cpp',
'../gm/dropshadowimagefilter.cpp',
'../gm/drrect.cpp',
'../gm/dstreadshuffle.cpp',
'../gm/etc1bitmap.cpp',
'../gm/extractbitmap.cpp',
'../gm/emboss.cpp',

View File

@ -55,10 +55,6 @@
'<(skia_src_path)/gpu/GrAllocator.h',
'<(skia_src_path)/gpu/GrAtlas.cpp',
'<(skia_src_path)/gpu/GrAtlas.h',
'<(skia_src_path)/gpu/GrBatch.cpp',
'<(skia_src_path)/gpu/GrBatch.h',
'<(skia_src_path)/gpu/GrBatchTarget.cpp',
'<(skia_src_path)/gpu/GrBatchTarget.h',
'<(skia_src_path)/gpu/GrBitmapTextContext.cpp',
'<(skia_src_path)/gpu/GrBitmapTextContext.h',
'<(skia_src_path)/gpu/GrBlend.cpp',
@ -86,6 +82,7 @@
'<(skia_src_path)/gpu/GrFontScaler.cpp',
'<(skia_src_path)/gpu/GrFontScaler.h',
'<(skia_src_path)/gpu/GrGeometryBuffer.h',
'<(skia_src_path)/gpu/GrGeometryData.h',
'<(skia_src_path)/gpu/GrGeometryProcessor.h',
'<(skia_src_path)/gpu/GrGeometryProcessor.cpp',
'<(skia_src_path)/gpu/GrGlyph.h',

View File

@ -632,7 +632,7 @@ public:
return SkNEW_ARGS(GLProcessor, (*this, bt));
}
void initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const SK_OVERRIDE {
void initBatchTracker(GrBatchTracker* bt, const InitBT& init) const SK_OVERRIDE {
BatchTracker* local = bt->cast<BatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init, false);
local->fUsesLocalCoords = init.fUsesLocalCoords;

View File

@ -6,9 +6,6 @@
*/
#include "GrAARectRenderer.h"
#include "GrBatch.h"
#include "GrBatchTarget.h"
#include "GrBufferAllocPool.h"
#include "GrDefaultGeoProcFactory.h"
#include "GrGeometryProcessor.h"
#include "GrGpu.h"
@ -20,12 +17,46 @@
///////////////////////////////////////////////////////////////////////////////
namespace {
// Should the coverage be multiplied into the color attrib or use a separate attrib.
enum CoverageAttribType {
kUseColor_CoverageAttribType,
kUseCoverage_CoverageAttribType,
};
}
static const GrGeometryProcessor* create_rect_gp(const GrPipelineBuilder& pipelineBuilder,
GrColor color,
CoverageAttribType* type,
const SkMatrix& localMatrix) {
uint32_t flags = GrDefaultGeoProcFactory::kColor_GPType;
const GrGeometryProcessor* gp;
if (pipelineBuilder.canTweakAlphaForCoverage()) {
gp = GrDefaultGeoProcFactory::Create(flags, color, SkMatrix::I(), localMatrix);
SkASSERT(gp->getVertexStride() == sizeof(GrDefaultGeoProcFactory::PositionColorAttr));
*type = kUseColor_CoverageAttribType;
} else {
flags |= GrDefaultGeoProcFactory::kCoverage_GPType;
gp = GrDefaultGeoProcFactory::Create(flags, color, SkMatrix::I(), localMatrix,
GrColorIsOpaque(color));
SkASSERT(gp->getVertexStride()==sizeof(GrDefaultGeoProcFactory::PositionColorCoverageAttr));
*type = kUseCoverage_CoverageAttribType;
}
return gp;
}
static void set_inset_fan(SkPoint* pts, size_t stride,
const SkRect& r, SkScalar dx, SkScalar dy) {
pts->setRectFan(r.fLeft + dx, r.fTop + dy,
r.fRight - dx, r.fBottom - dy, stride);
}
void GrAARectRenderer::reset() {
SkSafeSetNull(fAAFillRectIndexBuffer);
SkSafeSetNull(fAAMiterStrokeRectIndexBuffer);
SkSafeSetNull(fAABevelStrokeRectIndexBuffer);
}
static const uint16_t gFillAARectIdx[] = {
0, 1, 5, 5, 4, 0,
1, 2, 6, 6, 5, 1,
@ -38,314 +69,6 @@ static const int kIndicesPerAAFillRect = SK_ARRAY_COUNT(gFillAARectIdx);
static const int kVertsPerAAFillRect = 8;
static const int kNumAAFillRectsInIndexBuffer = 256;
static const GrGeometryProcessor* create_fill_rect_gp(bool tweakAlphaForCoverage,
const SkMatrix& localMatrix) {
uint32_t flags = GrDefaultGeoProcFactory::kColor_GPType;
const GrGeometryProcessor* gp;
if (tweakAlphaForCoverage) {
gp = GrDefaultGeoProcFactory::Create(flags, GrColor_WHITE, SkMatrix::I(), localMatrix,
false, 0xff);
} else {
flags |= GrDefaultGeoProcFactory::kCoverage_GPType;
gp = GrDefaultGeoProcFactory::Create(flags, GrColor_WHITE, SkMatrix::I(), localMatrix,
false, 0xff);
}
return gp;
}
class AAFillRectBatch : public GrBatch {
public:
struct Geometry {
GrColor fColor;
SkMatrix fViewMatrix;
SkRect fRect;
SkRect fDevRect;
};
static GrBatch* Create(const Geometry& geometry, const GrIndexBuffer* indexBuffer) {
return SkNEW_ARGS(AAFillRectBatch, (geometry, indexBuffer));
}
const char* name() const SK_OVERRIDE { return "AAFillRectBatch"; }
void getInvariantOutputColor(GrInitInvariantOutput* out) const SK_OVERRIDE {
// When this is called on a batch, there is only one geometry bundle
if (!this->canTweakAlphaForCoverage() && GrColorIsOpaque(fGeoData[0].fColor)) {
out->setUnknownOpaqueFourComponents();
} else {
out->setUnknownFourComponents();
}
}
void getInvariantOutputCoverage(GrInitInvariantOutput* out) const SK_OVERRIDE {
if (this->canTweakAlphaForCoverage()) {
// uniform coverage
out->setKnownSingleComponent(0xff);
} else {
out->setUnknownSingleComponent();
}
}
void initBatchOpt(const GrBatchOpt& batchOpt) {
fBatchOpt = batchOpt;
}
void initBatchTracker(const GrPipelineInfo& init) SK_OVERRIDE {
// Handle any color overrides
if (init.fColorIgnored) {
fGeoData[0].fColor = GrColor_ILLEGAL;
} else if (GrColor_ILLEGAL != init.fOverrideColor) {
fGeoData[0].fColor = init.fOverrideColor;
}
// setup batch properties
fBatch.fColorIgnored = init.fColorIgnored;
fBatch.fColor = fGeoData[0].fColor;
fBatch.fUsesLocalCoords = init.fUsesLocalCoords;
fBatch.fCoverageIgnored = init.fCoverageIgnored;
}
void generateGeometry(GrBatchTarget* batchTarget, const GrPipeline* pipeline) SK_OVERRIDE {
bool canTweakAlphaForCoverage = this->canTweakAlphaForCoverage();
SkMatrix localMatrix;
if (!this->viewMatrix().invert(&localMatrix)) {
SkDebugf("Cannot invert\n");
return;
}
const GrGeometryProcessor* gp = create_fill_rect_gp(canTweakAlphaForCoverage,
localMatrix);
batchTarget->initDraw(gp, pipeline);
gp->unref();
// TODO this is hacky, but the only way we have to initialize the GP is to use the
// GrPipelineInfo struct so we can generate the correct shader. Once we have GrBatch
// everywhere we can remove this nastiness
GrPipelineInfo init;
init.fColorIgnored = fBatch.fColorIgnored;
init.fOverrideColor = GrColor_ILLEGAL;
init.fCoverageIgnored = fBatch.fCoverageIgnored;
init.fUsesLocalCoords = this->usesLocalCoords();
gp->initBatchTracker(batchTarget->currentBatchTracker(), init);
size_t vertexStride = gp->getVertexStride();
SkASSERT(canTweakAlphaForCoverage ?
vertexStride == sizeof(GrDefaultGeoProcFactory::PositionColorAttr) :
vertexStride == sizeof(GrDefaultGeoProcFactory::PositionColorCoverageAttr));
int instanceCount = fGeoData.count();
int vertexCount = kVertsPerAAFillRect * instanceCount;
const GrVertexBuffer* vertexBuffer;
int firstVertex;
void *vertices = batchTarget->vertexPool()->makeSpace(vertexStride,
vertexCount,
&vertexBuffer,
&firstVertex);
for (int i = 0; i < instanceCount; i++) {
const Geometry& args = fGeoData[i];
this->generateAAFillRectGeometry(vertices,
i * kVertsPerAAFillRect * vertexStride,
vertexStride,
args.fColor,
args.fViewMatrix,
args.fRect,
args.fDevRect,
canTweakAlphaForCoverage);
}
GrDrawTarget::DrawInfo drawInfo;
drawInfo.setPrimitiveType(kTriangles_GrPrimitiveType);
drawInfo.setStartVertex(0);
drawInfo.setStartIndex(0);
drawInfo.setVerticesPerInstance(kVertsPerAAFillRect);
drawInfo.setIndicesPerInstance(kIndicesPerAAFillRect);
drawInfo.adjustStartVertex(firstVertex);
drawInfo.setVertexBuffer(vertexBuffer);
drawInfo.setIndexBuffer(fIndexBuffer);
int maxInstancesPerDraw = kNumAAFillRectsInIndexBuffer;
while (instanceCount) {
drawInfo.setInstanceCount(SkTMin(instanceCount, maxInstancesPerDraw));
drawInfo.setVertexCount(drawInfo.instanceCount() * drawInfo.verticesPerInstance());
drawInfo.setIndexCount(drawInfo.instanceCount() * drawInfo.indicesPerInstance());
batchTarget->draw(drawInfo);
drawInfo.setStartVertex(drawInfo.startVertex() + drawInfo.vertexCount());
instanceCount -= drawInfo.instanceCount();
}
}
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
private:
AAFillRectBatch(const Geometry& geometry, const GrIndexBuffer* indexBuffer)
: fIndexBuffer(indexBuffer) {
this->initClassID<AAFillRectBatch>();
fGeoData.push_back(geometry);
}
GrColor color() const { return fBatch.fColor; }
bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
bool canTweakAlphaForCoverage() const { return fBatchOpt.fCanTweakAlphaForCoverage; }
bool colorIgnored() const { return fBatch.fColorIgnored; }
const SkMatrix& viewMatrix() const { return fGeoData[0].fViewMatrix; }
bool onCombineIfPossible(GrBatch* t) SK_OVERRIDE {
AAFillRectBatch* that = t->cast<AAFillRectBatch>();
if (this->canTweakAlphaForCoverage() != that->canTweakAlphaForCoverage()) {
return false;
}
if (this->colorIgnored() != that->colorIgnored()) {
return false;
}
if (this->usesLocalCoords() != that->usesLocalCoords()) {
return false;
}
// We apply the viewmatrix to the rect points on the cpu. However, if the pipeline uses
// local coords then we won't be able to batch. We could actually upload the viewmatrix
// using vertex attributes in these cases, but haven't investigated that
if (this->usesLocalCoords() && !this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
return false;
}
if (this->color() != that->color()) {
fBatch.fColor = GrColor_ILLEGAL;
}
fGeoData.push_back_n(that->geoData()->count(), that->geoData()->begin());
return true;
}
void generateAAFillRectGeometry(void* vertices,
uint32_t offset,
uint32_t vertexStride,
GrColor color,
const SkMatrix& viewMatrix,
const SkRect& rect,
const SkRect& devRect,
bool tweakAlphaForCoverage) const {
intptr_t verts = reinterpret_cast<intptr_t>(vertices) + offset;
SkPoint* fan0Pos = reinterpret_cast<SkPoint*>(verts);
SkPoint* fan1Pos = reinterpret_cast<SkPoint*>(verts + 4 * vertexStride);
SkScalar inset = SkMinScalar(devRect.width(), SK_Scalar1);
inset = SK_ScalarHalf * SkMinScalar(inset, devRect.height());
if (viewMatrix.rectStaysRect()) {
set_inset_fan(fan0Pos, vertexStride, devRect, -SK_ScalarHalf, -SK_ScalarHalf);
set_inset_fan(fan1Pos, vertexStride, devRect, inset, inset);
} else {
// compute transformed (1, 0) and (0, 1) vectors
SkVector vec[2] = {
{ viewMatrix[SkMatrix::kMScaleX], viewMatrix[SkMatrix::kMSkewY] },
{ viewMatrix[SkMatrix::kMSkewX], viewMatrix[SkMatrix::kMScaleY] }
};
vec[0].normalize();
vec[0].scale(SK_ScalarHalf);
vec[1].normalize();
vec[1].scale(SK_ScalarHalf);
// create the rotated rect
fan0Pos->setRectFan(rect.fLeft, rect.fTop,
rect.fRight, rect.fBottom, vertexStride);
viewMatrix.mapPointsWithStride(fan0Pos, vertexStride, 4);
// Now create the inset points and then outset the original
// rotated points
// TL
*((SkPoint*)((intptr_t)fan1Pos + 0 * vertexStride)) =
*((SkPoint*)((intptr_t)fan0Pos + 0 * vertexStride)) + vec[0] + vec[1];
*((SkPoint*)((intptr_t)fan0Pos + 0 * vertexStride)) -= vec[0] + vec[1];
// BL
*((SkPoint*)((intptr_t)fan1Pos + 1 * vertexStride)) =
*((SkPoint*)((intptr_t)fan0Pos + 1 * vertexStride)) + vec[0] - vec[1];
*((SkPoint*)((intptr_t)fan0Pos + 1 * vertexStride)) -= vec[0] - vec[1];
// BR
*((SkPoint*)((intptr_t)fan1Pos + 2 * vertexStride)) =
*((SkPoint*)((intptr_t)fan0Pos + 2 * vertexStride)) - vec[0] - vec[1];
*((SkPoint*)((intptr_t)fan0Pos + 2 * vertexStride)) += vec[0] + vec[1];
// TR
*((SkPoint*)((intptr_t)fan1Pos + 3 * vertexStride)) =
*((SkPoint*)((intptr_t)fan0Pos + 3 * vertexStride)) - vec[0] + vec[1];
*((SkPoint*)((intptr_t)fan0Pos + 3 * vertexStride)) += vec[0] - vec[1];
}
// Make verts point to vertex color and then set all the color and coverage vertex attrs
// values.
verts += sizeof(SkPoint);
for (int i = 0; i < 4; ++i) {
if (tweakAlphaForCoverage) {
*reinterpret_cast<GrColor*>(verts + i * vertexStride) = 0;
} else {
*reinterpret_cast<GrColor*>(verts + i * vertexStride) = color;
*reinterpret_cast<float*>(verts + i * vertexStride + sizeof(GrColor)) = 0;
}
}
int scale;
if (inset < SK_ScalarHalf) {
scale = SkScalarFloorToInt(512.0f * inset / (inset + SK_ScalarHalf));
SkASSERT(scale >= 0 && scale <= 255);
} else {
scale = 0xff;
}
verts += 4 * vertexStride;
float innerCoverage = GrNormalizeByteToFloat(scale);
GrColor scaledColor = (0xff == scale) ? color : SkAlphaMulQ(color, scale);
for (int i = 0; i < 4; ++i) {
if (tweakAlphaForCoverage) {
*reinterpret_cast<GrColor*>(verts + i * vertexStride) = scaledColor;
} else {
*reinterpret_cast<GrColor*>(verts + i * vertexStride) = color;
*reinterpret_cast<float*>(verts + i * vertexStride +
sizeof(GrColor)) = innerCoverage;
}
}
}
struct BatchTracker {
GrColor fColor;
bool fUsesLocalCoords;
bool fColorIgnored;
bool fCoverageIgnored;
};
GrBatchOpt fBatchOpt;
BatchTracker fBatch;
const GrIndexBuffer* fIndexBuffer;
SkSTArray<1, Geometry, true> fGeoData;
};
namespace {
// Should the coverage be multiplied into the color attrib or use a separate attrib.
enum CoverageAttribType {
kUseColor_CoverageAttribType,
kUseCoverage_CoverageAttribType,
};
}
void GrAARectRenderer::reset() {
SkSafeSetNull(fAAFillRectIndexBuffer);
SkSafeSetNull(fAAMiterStrokeRectIndexBuffer);
SkSafeSetNull(fAABevelStrokeRectIndexBuffer);
}
static const uint16_t gMiterStrokeAARectIdx[] = {
0 + 0, 1 + 0, 5 + 0, 5 + 0, 4 + 0, 0 + 0,
1 + 0, 2 + 0, 6 + 0, 6 + 0, 5 + 0, 1 + 0,
@ -461,21 +184,135 @@ void GrAARectRenderer::geometryFillAARect(GrDrawTarget* target,
const SkMatrix& viewMatrix,
const SkRect& rect,
const SkRect& devRect) {
GrPipelineBuilder::AutoRestoreEffects are(pipelineBuilder);
SkMatrix localMatrix;
if (!viewMatrix.invert(&localMatrix)) {
SkDebugf("Cannot invert\n");
return;
}
CoverageAttribType type;
SkAutoTUnref<const GrGeometryProcessor> gp(create_rect_gp(*pipelineBuilder, color, &type,
localMatrix));
size_t vertexStride = gp->getVertexStride();
GrDrawTarget::AutoReleaseGeometry geo(target, 8, vertexStride, 0);
if (!geo.succeeded()) {
SkDebugf("Failed to get space for vertices!\n");
return;
}
if (NULL == fAAFillRectIndexBuffer) {
fAAFillRectIndexBuffer = fGpu->createInstancedIndexBuffer(gFillAARectIdx,
kIndicesPerAAFillRect,
kNumAAFillRectsInIndexBuffer,
kVertsPerAAFillRect);
}
GrIndexBuffer* indexBuffer = fAAFillRectIndexBuffer;
if (NULL == indexBuffer) {
SkDebugf("Failed to create index buffer!\n");
return;
}
AAFillRectBatch::Geometry geometry;
geometry.fRect = rect;
geometry.fViewMatrix = viewMatrix;
geometry.fDevRect = devRect;
geometry.fColor = color;
intptr_t verts = reinterpret_cast<intptr_t>(geo.vertices());
SkAutoTUnref<GrBatch> batch(AAFillRectBatch::Create(geometry, fAAFillRectIndexBuffer));
target->drawBatch(pipelineBuilder, batch, &devRect);
SkPoint* fan0Pos = reinterpret_cast<SkPoint*>(verts);
SkPoint* fan1Pos = reinterpret_cast<SkPoint*>(verts + 4 * vertexStride);
SkScalar inset = SkMinScalar(devRect.width(), SK_Scalar1);
inset = SK_ScalarHalf * SkMinScalar(inset, devRect.height());
if (viewMatrix.rectStaysRect()) {
// Temporarily #if'ed out. We don't want to pass in the devRect but
// right now it is computed in GrContext::apply_aa_to_rect and we don't
// want to throw away the work
#if 0
SkRect devRect;
combinedMatrix.mapRect(&devRect, rect);
#endif
set_inset_fan(fan0Pos, vertexStride, devRect, -SK_ScalarHalf, -SK_ScalarHalf);
set_inset_fan(fan1Pos, vertexStride, devRect, inset, inset);
} else {
// compute transformed (1, 0) and (0, 1) vectors
SkVector vec[2] = {
{ viewMatrix[SkMatrix::kMScaleX], viewMatrix[SkMatrix::kMSkewY] },
{ viewMatrix[SkMatrix::kMSkewX], viewMatrix[SkMatrix::kMScaleY] }
};
vec[0].normalize();
vec[0].scale(SK_ScalarHalf);
vec[1].normalize();
vec[1].scale(SK_ScalarHalf);
// create the rotated rect
fan0Pos->setRectFan(rect.fLeft, rect.fTop,
rect.fRight, rect.fBottom, vertexStride);
viewMatrix.mapPointsWithStride(fan0Pos, vertexStride, 4);
// Now create the inset points and then outset the original
// rotated points
// TL
*((SkPoint*)((intptr_t)fan1Pos + 0 * vertexStride)) =
*((SkPoint*)((intptr_t)fan0Pos + 0 * vertexStride)) + vec[0] + vec[1];
*((SkPoint*)((intptr_t)fan0Pos + 0 * vertexStride)) -= vec[0] + vec[1];
// BL
*((SkPoint*)((intptr_t)fan1Pos + 1 * vertexStride)) =
*((SkPoint*)((intptr_t)fan0Pos + 1 * vertexStride)) + vec[0] - vec[1];
*((SkPoint*)((intptr_t)fan0Pos + 1 * vertexStride)) -= vec[0] - vec[1];
// BR
*((SkPoint*)((intptr_t)fan1Pos + 2 * vertexStride)) =
*((SkPoint*)((intptr_t)fan0Pos + 2 * vertexStride)) - vec[0] - vec[1];
*((SkPoint*)((intptr_t)fan0Pos + 2 * vertexStride)) += vec[0] + vec[1];
// TR
*((SkPoint*)((intptr_t)fan1Pos + 3 * vertexStride)) =
*((SkPoint*)((intptr_t)fan0Pos + 3 * vertexStride)) - vec[0] + vec[1];
*((SkPoint*)((intptr_t)fan0Pos + 3 * vertexStride)) += vec[0] - vec[1];
}
// Make verts point to vertex color and then set all the color and coverage vertex attrs values.
verts += sizeof(SkPoint);
for (int i = 0; i < 4; ++i) {
if (kUseCoverage_CoverageAttribType == type) {
*reinterpret_cast<GrColor*>(verts + i * vertexStride) = color;
*reinterpret_cast<float*>(verts + i * vertexStride + sizeof(GrColor)) = 0;
} else {
*reinterpret_cast<GrColor*>(verts + i * vertexStride) = 0;
}
}
int scale;
if (inset < SK_ScalarHalf) {
scale = SkScalarFloorToInt(512.0f * inset / (inset + SK_ScalarHalf));
SkASSERT(scale >= 0 && scale <= 255);
} else {
scale = 0xff;
}
verts += 4 * vertexStride;
float innerCoverage = GrNormalizeByteToFloat(scale);
GrColor scaledColor = (0xff == scale) ? color : SkAlphaMulQ(color, scale);
for (int i = 0; i < 4; ++i) {
if (kUseCoverage_CoverageAttribType == type) {
*reinterpret_cast<GrColor*>(verts + i * vertexStride) = color;
*reinterpret_cast<float*>(verts + i * vertexStride + sizeof(GrColor)) = innerCoverage;
} else {
*reinterpret_cast<GrColor*>(verts + i * vertexStride) = scaledColor;
}
}
target->setIndexSourceToBuffer(indexBuffer);
target->drawIndexedInstances(pipelineBuilder,
gp,
kTriangles_GrPrimitiveType,
1,
kVertsPerAAFillRect,
kIndicesPerAAFillRect);
target->resetIndexSource();
}
void GrAARectRenderer::strokeAARect(GrDrawTarget* target,
@ -545,31 +382,10 @@ void GrAARectRenderer::strokeAARect(GrDrawTarget* target,
devOutsideAssist.outset(0, ry);
}
this->geometryStrokeAARect(target, pipelineBuilder, color, viewMatrix, devOutside,
devOutsideAssist, devInside, miterStroke);
this->geometryStrokeAARect(target, pipelineBuilder, color, viewMatrix, devOutside, devOutsideAssist,
devInside, miterStroke);
}
static const GrGeometryProcessor* create_rect_gp(const GrPipelineBuilder& pipelneBuilder,
GrColor color,
CoverageAttribType* type,
const SkMatrix& localMatrix) {
uint32_t flags = GrDefaultGeoProcFactory::kColor_GPType;
const GrGeometryProcessor* gp;
if (pipelneBuilder.canTweakAlphaForCoverage()) {
gp = GrDefaultGeoProcFactory::Create(flags, color, SkMatrix::I(), localMatrix);
SkASSERT(gp->getVertexStride() == sizeof(GrDefaultGeoProcFactory::PositionColorAttr));
*type = kUseColor_CoverageAttribType;
} else {
flags |= GrDefaultGeoProcFactory::kCoverage_GPType;
gp = GrDefaultGeoProcFactory::Create(flags, color, SkMatrix::I(), localMatrix,
GrColorIsOpaque(color));
SkASSERT(gp->getVertexStride()==sizeof(GrDefaultGeoProcFactory::PositionColorCoverageAttr));
*type = kUseCoverage_CoverageAttribType;
}
return gp;
}
void GrAARectRenderer::geometryStrokeAARect(GrDrawTarget* target,
GrPipelineBuilder* pipelineBuilder,
GrColor color,
@ -578,6 +394,8 @@ void GrAARectRenderer::geometryStrokeAARect(GrDrawTarget* target,
const SkRect& devOutsideAssist,
const SkRect& devInside,
bool miterStroke) {
GrPipelineBuilder::AutoRestoreEffects are(pipelineBuilder);
SkMatrix localMatrix;
if (!viewMatrix.invert(&localMatrix)) {
SkDebugf("Cannot invert\n");

View File

@ -1,35 +0,0 @@
#include "GrBatch.h"
#include "GrMemoryPool.h"
#include "SkTLS.h"
// TODO I noticed a small benefit to using a larger exclusive pool for batches. Its very small,
// but seems to be mostly consistent. There is a lot in flux right now, but we should really
// revisit this when batch is everywhere
class GrBatch_Globals {
public:
static GrMemoryPool* GetTLS() {
return (GrMemoryPool*)SkTLS::Get(CreateTLS, DeleteTLS);
}
private:
static void* CreateTLS() {
return SkNEW_ARGS(GrMemoryPool, (16384, 16384));
}
static void DeleteTLS(void* pool) {
SkDELETE(reinterpret_cast<GrMemoryPool*>(pool));
}
};
int32_t GrBatch::gCurrBatchClassID =
GrBatch::kIllegalBatchClassID;
void* GrBatch::operator new(size_t size) {
return GrBatch_Globals::GetTLS()->allocate(size);
}
void GrBatch::operator delete(void* target) {
GrBatch_Globals::GetTLS()->release(target);
}

View File

@ -1,132 +0,0 @@
/*
* Copyright 2015 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef GrBatch_DEFINED
#define GrBatch_DEFINED
#include <new>
// TODO remove this header when we move entirely to batch
#include "GrGeometryProcessor.h"
#include "SkRefCnt.h"
#include "SkThread.h"
#include "SkTypes.h"
class GrBatchTarget;
class GrGpu;
class GrIndexBufferAllocPool;
class GrPipeline;
class GrVertexBufferAllocPool;
struct GrInitInvariantOutput;
/*
* GrBatch is the base class for all Ganesh deferred geometry generators. To facilitate
* reorderable batching, Ganesh does not generate geometry inline with draw calls. Instead, it
* captures the arguments to the draw and then generates the geometry on demand. This gives GrBatch
* subclasses complete freedom to decide how / what they can batch.
*
* Batches are created when GrContext processes a draw call. Batches of the same subclass may be
* merged using combineIfPossible. When two batches merge, one takes on the union of the data
* and the other is left empty. The merged batch becomes responsible for drawing the data from both
* the original batches.
*
* If there are any possible optimizations which might require knowing more about the full state of
* the draw, ie whether or not the GrBatch is allowed to tweak alpha for coverage, then this
* information will be communicated to the GrBatch prior to geometry generation.
*/
struct GrBatchOpt {
bool fCanTweakAlphaForCoverage;
};
class GrBatch : public SkRefCnt {
public:
SK_DECLARE_INST_COUNT(GrBatch)
GrBatch() { SkDEBUGCODE(fUsed = false;) }
virtual ~GrBatch() {}
virtual const char* name() const = 0;
virtual void getInvariantOutputColor(GrInitInvariantOutput* out) const = 0;
virtual void getInvariantOutputCoverage(GrInitInvariantOutput* out) const = 0;
/*
* initBatchOpt is used to communicate possible optimizations to the GrBatch. initBatchTracker
* is a hook for the some additional overrides from the GrXferProcessor. This is a bit
* confusing but has to be like this until GrBatch is everywhere.
*
* TODO combine to a single init call when GrBatch is everywhere.
*/
virtual void initBatchOpt(const GrBatchOpt&) = 0;
virtual void initBatchTracker(const GrPipelineInfo& init) = 0;
bool combineIfPossible(GrBatch* that) {
if (this->classID() != that->classID()) {
return false;
}
return onCombineIfPossible(that);
}
virtual bool onCombineIfPossible(GrBatch*) = 0;
virtual void generateGeometry(GrBatchTarget*, const GrPipeline*) = 0;
void* operator new(size_t size);
void operator delete(void* target);
void* operator new(size_t size, void* placement) {
return ::operator new(size, placement);
}
void operator delete(void* target, void* placement) {
::operator delete(target, placement);
}
/**
* Helper for down-casting to a GrBatch subclass
*/
template <typename T> const T& cast() const { return *static_cast<const T*>(this); }
template <typename T> T* cast() { return static_cast<T*>(this); }
uint32_t classID() const { SkASSERT(kIllegalBatchClassID != fClassID); return fClassID; }
// TODO no GrPrimitiveProcessors yet read fragment position
bool willReadFragmentPosition() const { return false; }
SkDEBUGCODE(bool isUsed() const { return fUsed; })
protected:
template <typename PROC_SUBCLASS> void initClassID() {
static uint32_t kClassID = GenClassID();
fClassID = kClassID;
}
uint32_t fClassID;
private:
static uint32_t GenClassID() {
// fCurrProcessorClassID has been initialized to kIllegalProcessorClassID. The
// atomic inc returns the old value not the incremented value. So we add
// 1 to the returned value.
uint32_t id = static_cast<uint32_t>(sk_atomic_inc(&gCurrBatchClassID)) + 1;
if (!id) {
SkFAIL("This should never wrap as it should only be called once for each GrBatch "
"subclass.");
}
return id;
}
enum {
kIllegalBatchClassID = 0,
};
static int32_t gCurrBatchClassID;
SkDEBUGCODE(bool fUsed;)
typedef SkRefCnt INHERITED;
};
#endif

View File

@ -1,48 +0,0 @@
/*
* Copyright 2015 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "GrBatchTarget.h"
#include "GrBufferAllocPool.h"
#include "GrPipeline.h"
/*
void GrBatchTarget::flush() {
FlushBuffer::Iter iter(fFlushBuffer);
fVertexPool->unmap();
fIndexPool->unmap();
while (iter.next()) {
GrProgramDesc desc;
BufferedFlush* bf = iter.get();
const GrPipeline* pipeline = bf->fPipeline;
const GrPrimitiveProcessor* primProc = bf->fPrimitiveProcessor.get();
fGpu->buildProgramDesc(&desc, *primProc, *pipeline, pipeline->descInfo(),
bf->fBatchTracker);
GrGpu::DrawArgs args(primProc, pipeline, &desc, &bf->fBatchTracker);
for (int i = 0; i < bf->fDraws.count(); i++) {
fGpu->draw(args, bf->fDraws[i]);
}
}
fFlushBuffer.reset();
}*/
void GrBatchTarget::flushNext() {
fIter.next();
GrProgramDesc desc;
BufferedFlush* bf = fIter.get();
const GrPipeline* pipeline = bf->fPipeline;
const GrPrimitiveProcessor* primProc = bf->fPrimitiveProcessor.get();
fGpu->buildProgramDesc(&desc, *primProc, *pipeline, pipeline->descInfo(),
bf->fBatchTracker);
GrGpu::DrawArgs args(primProc, pipeline, &desc, &bf->fBatchTracker);
for (int i = 0; i < bf->fDraws.count(); i++) {
fGpu->draw(args, bf->fDraws[i]);
}
}

View File

@ -1,86 +0,0 @@
/*
* Copyright 2015 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef GrBatchBuffer_DEFINED
#define GrBatchBuffer_DEFINED
#include "GrPendingProgramElement.h"
#include "GrGpu.h"
#include "GrTRecorder.h"
/*
* GrBatch instances use this object to allocate space for their geometry and to issue the draws
* that render their batch.
*/
class GrBatchTarget : public SkNoncopyable {
public:
GrBatchTarget(GrGpu* gpu,
GrVertexBufferAllocPool* vpool,
GrIndexBufferAllocPool* ipool)
: fGpu(gpu)
, fVertexPool(vpool)
, fIndexPool(ipool)
, fFlushBuffer(kFlushBufferInitialSizeInBytes)
, fIter(fFlushBuffer) {}
typedef GrDrawTarget::DrawInfo DrawInfo;
void initDraw(const GrPrimitiveProcessor* primProc, const GrPipeline* pipeline) {
GrNEW_APPEND_TO_RECORDER(fFlushBuffer, BufferedFlush, (primProc, pipeline));
}
void draw(const GrDrawTarget::DrawInfo& draw) {
fFlushBuffer.back().fDraws.push_back(draw);
}
// TODO this is temporary until batch is everywhere
//void flush();
void preFlush() { fIter = FlushBuffer::Iter(fFlushBuffer); }
void flushNext();
void postFlush() { fFlushBuffer.reset(); }
// TODO This goes away when everything uses batch
GrBatchTracker* currentBatchTracker() {
SkASSERT(!fFlushBuffer.empty());
return &fFlushBuffer.back().fBatchTracker;
}
GrVertexBufferAllocPool* vertexPool() { return fVertexPool; }
GrIndexBufferAllocPool* indexPool() { return fIndexPool; }
private:
GrGpu* fGpu;
GrVertexBufferAllocPool* fVertexPool;
GrIndexBufferAllocPool* fIndexPool;
typedef void* TBufferAlign; // This wouldn't be enough align if a command used long double.
struct BufferedFlush {
BufferedFlush(const GrPrimitiveProcessor* primProc, const GrPipeline* pipeline)
: fPrimitiveProcessor(primProc)
, fPipeline(pipeline)
, fDraws(kDrawRecorderInitialSizeInBytes) {}
typedef GrPendingProgramElement<const GrPrimitiveProcessor> ProgramPrimitiveProcessor;
ProgramPrimitiveProcessor fPrimitiveProcessor;
const GrPipeline* fPipeline;
GrBatchTracker fBatchTracker;
SkSTArray<4, DrawInfo, true> fDraws;
};
enum {
kFlushBufferInitialSizeInBytes = 8 * sizeof(BufferedFlush),
kDrawRecorderInitialSizeInBytes = 8 * sizeof(DrawInfo),
};
typedef GrTRecorder<BufferedFlush, TBufferAlign> FlushBuffer;
FlushBuffer fFlushBuffer;
// TODO this is temporary
FlushBuffer::Iter fIter;
};
#endif

View File

@ -42,7 +42,7 @@ public:
const Attribute* inCoverage() const { return fInCoverage; }
uint8_t coverage() const { return fCoverage; }
void initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const SK_OVERRIDE {
void initBatchTracker(GrBatchTracker* bt, const InitBT& init) const SK_OVERRIDE {
BatchTracker* local = bt->cast<BatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init,
SkToBool(fInColor));

View File

@ -6,9 +6,9 @@
* found in the LICENSE file.
*/
#include "GrDrawTarget.h"
#include "GrBatch.h"
#include "GrDrawTarget.h"
#include "GrContext.h"
#include "GrDrawTargetCaps.h"
#include "GrPath.h"
@ -526,29 +526,6 @@ void GrDrawTarget::drawNonIndexed(GrPipelineBuilder* pipelineBuilder,
}
}
void GrDrawTarget::drawBatch(GrPipelineBuilder* pipelineBuilder,
GrBatch* batch,
const SkRect* devBounds) {
SkASSERT(pipelineBuilder);
// TODO some kind of checkdraw, but not at this level
// Setup clip
GrScissorState scissorState;
GrPipelineBuilder::AutoRestoreEffects are;
GrPipelineBuilder::AutoRestoreStencil ars;
if (!this->setupClip(pipelineBuilder, &are, &ars, &scissorState, devBounds)) {
return;
}
GrDeviceCoordTexture dstCopy;
if (!this->setupDstReadIfNecessary(pipelineBuilder, &dstCopy, devBounds)) {
return;
}
this->onDrawBatch(batch, *pipelineBuilder, scissorState, dstCopy.texture() ? &dstCopy : NULL);
}
static const GrStencilSettings& winding_path_stencil_settings() {
GR_STATIC_CONST_SAME_STENCIL_STRUCT(gSettings,
kIncClamp_StencilOp,

View File

@ -26,7 +26,6 @@
#include "SkTypes.h"
#include "SkXfermode.h"
class GrBatch;
class GrClipData;
class GrDrawTargetCaps;
class GrPath;
@ -260,11 +259,6 @@ public:
int vertexCount,
const SkRect* devBounds = NULL);
// TODO devbounds should live on the batch
void drawBatch(GrPipelineBuilder*,
GrBatch*,
const SkRect* devBounds = NULL);
/**
* Draws path into the stencil buffer. The fill must be either even/odd or
* winding (not inverse or hairline). It will respect the HW antialias flag
@ -316,14 +310,14 @@ public:
* that rectangle before it is input to GrCoordTransforms that read local
* coordinates
*/
void drawRect(GrPipelineBuilder* pipelineBuilder,
void drawRect(GrPipelineBuilder* ds,
GrColor color,
const SkMatrix& viewMatrix,
const SkRect& rect,
const SkRect* localRect,
const SkMatrix* localMatrix) {
AutoGeometryPush agp(this);
this->onDrawRect(pipelineBuilder, color, viewMatrix, rect, localRect, localMatrix);
this->onDrawRect(ds, color, viewMatrix, rect, localRect, localMatrix);
}
/**
@ -533,7 +527,6 @@ public:
*/
class DrawInfo {
public:
DrawInfo() { fDevBounds = NULL; }
DrawInfo(const DrawInfo& di) { (*this) = di; }
DrawInfo& operator =(const DrawInfo& di);
@ -546,15 +539,6 @@ public:
int indicesPerInstance() const { return fIndicesPerInstance; }
int instanceCount() const { return fInstanceCount; }
void setPrimitiveType(GrPrimitiveType type) { fPrimitiveType = type; }
void setStartVertex(int startVertex) { fStartVertex = startVertex; }
void setStartIndex(int startIndex) { fStartIndex = startIndex; }
void setVertexCount(int vertexCount) { fVertexCount = vertexCount; }
void setIndexCount(int indexCount) { fIndexCount = indexCount; }
void setVerticesPerInstance(int verticesPerI) { fVerticesPerInstance = verticesPerI; }
void setIndicesPerInstance(int indicesPerI) { fIndicesPerInstance = indicesPerI; }
void setInstanceCount(int instanceCount) { fInstanceCount = instanceCount; }
bool isIndexed() const { return fIndexCount > 0; }
#ifdef SK_DEBUG
bool isInstanced() const; // this version is longer because of asserts
@ -584,6 +568,8 @@ public:
const SkRect* getDevBounds() const { return fDevBounds; }
private:
DrawInfo() { fDevBounds = NULL; }
friend class GrDrawTarget;
GrPrimitiveType fPrimitiveType;
@ -722,10 +708,6 @@ private:
const DrawInfo&,
const GrScissorState&,
const GrDeviceCoordTexture* dstCopy) = 0;
virtual void onDrawBatch(GrBatch*,
const GrPipelineBuilder&,
const GrScissorState&,
const GrDeviceCoordTexture* dstCopy) = 0;
// TODO copy in order drawbuffer onDrawRect to here
virtual void onDrawRect(GrPipelineBuilder*,
GrColor color,

View File

@ -87,6 +87,8 @@ void GrFlushToGpuDrawTarget::flush() {
fFlushing = true;
fGpu->getContext()->getFontCache()->updateTextures();
fVertexPool->unmap();
fIndexPool->unmap();
fGpu->saveActiveTraceMarkers();

View File

@ -46,9 +46,6 @@ protected:
GrGpu* getGpu() { return fGpu; }
const GrGpu* getGpu() const{ return fGpu; }
GrVertexBufferAllocPool* getVertexAllocPool() { return fVertexPool; }
GrIndexBufferAllocPool* getIndexAllocPool() { return fIndexPool; }
private:
enum {
kGeoPoolStatePreAllocCnt = 4,

42
src/gpu/GrGeometryData.h Normal file
View File

@ -0,0 +1,42 @@
/*
* Copyright 2014 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef GrGeometryData_DEFINED
#define GrGeometryData_DEFINED
#include <new>
#include "SkTypes.h"
/*
* A super lightweight base class for GeometryProcessor's to use to store draw data in a reorderable
* fashion. Its most important feature is a pool allocator. Its virtual, but only so subclasses
* will have their destructors called.
*/
class GrGeometryData : SkNoncopyable {
public:
virtual ~GrGeometryData() {}
/**
* Helper for down-casting to a GrGeometryData subclass
*/
template <typename T> const T& cast() const { return *static_cast<const T*>(this); }
void* operator new(size_t size);
void operator delete(void* target);
void* operator new(size_t size, void* placement) {
return ::operator new(size, placement);
}
void operator delete(void* target, void* placement) {
::operator delete(target, placement);
}
};
#endif

View File

@ -516,7 +516,7 @@ void GrPathProcessor::getInvariantOutputCoverage(GrInitInvariantOutput* out) con
out->setKnownSingleComponent(0xff);
}
void GrPathProcessor::initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const {
void GrPathProcessor::initBatchTracker(GrBatchTracker* bt, const InitBT& init) const {
PathBatchTracker* local = bt->cast<PathBatchTracker>();
if (init.fColorIgnored) {
local->fInputColorType = kIgnored_GrGPInput;

View File

@ -9,6 +9,7 @@
#define GrGeometryProcessor_DEFINED
#include "GrColor.h"
#include "GrGeometryData.h"
#include "GrProcessor.h"
#include "GrShaderVar.h"
@ -45,8 +46,6 @@
/*
* A struct for tracking batching decisions. While this lives on GrOptState, it is managed
* entirely by the derived classes of the GP.
* // TODO this was an early attempt at handling out of order batching. It should be
* used carefully as it is being replaced by GrBatch
*/
class GrBatchTracker {
public:
@ -66,24 +65,12 @@ private:
SkAlignedSStorage<kMaxSize> fData;
};
class GrIndexBufferAllocPool;
class GrGLCaps;
class GrGLPrimitiveProcessor;
class GrVertexBufferAllocPool;
class GrOptDrawState;
struct GrInitInvariantOutput;
/*
* This struct allows the GrPipeline to communicate information about the pipeline. Most of this
* is overrides, but some of it is general information. Logically it should live in GrPipeline.h,
* but this is problematic due to circular dependencies.
*/
struct GrPipelineInfo {
bool fColorIgnored;
bool fCoverageIgnored;
GrColor fOverrideColor;
bool fUsesLocalCoords;
};
/*
* This enum is shared by GrPrimitiveProcessors and GrGLPrimitiveProcessors to coordinate shaders
@ -108,7 +95,17 @@ public:
const SkMatrix& viewMatrix() const { return fViewMatrix; }
const SkMatrix& localMatrix() const { return fLocalMatrix; }
virtual void initBatchTracker(GrBatchTracker*, const GrPipelineInfo&) const = 0;
/*
* This struct allows the optstate to communicate requirements to the GrPrimitiveProcessor.
*/
struct InitBT {
bool fColorIgnored;
bool fCoverageIgnored;
GrColor fOverrideColor;
bool fUsesLocalCoords;
};
virtual void initBatchTracker(GrBatchTracker*, const InitBT&) const = 0;
virtual bool canMakeEqual(const GrBatchTracker& mine,
const GrPrimitiveProcessor& that,
@ -307,8 +304,7 @@ protected:
* TODO this function changes quite a bit with deferred geometry. There the GrGeometryProcessor
* can upload a new color via attribute if needed.
*/
static GrGPInput GetColorInputType(GrColor* color, GrColor primitiveColor,
const GrPipelineInfo& init,
static GrGPInput GetColorInputType(GrColor* color, GrColor primitiveColor, const InitBT& init,
bool hasVertexColor) {
if (init.fColorIgnored) {
*color = GrColor_ILLEGAL;
@ -382,7 +378,7 @@ public:
return SkNEW_ARGS(GrPathProcessor, (color, viewMatrix, localMatrix));
}
void initBatchTracker(GrBatchTracker*, const GrPipelineInfo&) const SK_OVERRIDE;
void initBatchTracker(GrBatchTracker*, const InitBT&) const SK_OVERRIDE;
bool canMakeEqual(const GrBatchTracker& mine,
const GrPrimitiveProcessor& that,

View File

@ -7,7 +7,6 @@
#include "GrInOrderDrawBuffer.h"
#include "GrBufferAllocPool.h"
#include "GrDefaultGeoProcFactory.h"
#include "GrDrawTargetCaps.h"
#include "GrGpu.h"
@ -21,9 +20,7 @@ GrInOrderDrawBuffer::GrInOrderDrawBuffer(GrGpu* gpu,
: INHERITED(gpu, vertexPool, indexPool)
, fCmdBuffer(kCmdBufferInitialSizeInBytes)
, fPrevState(NULL)
, fDrawID(0)
, fBatchTarget(gpu, vertexPool, indexPool)
, fFlushBatches(false) {
, fDrawID(0) {
SkASSERT(vertexPool);
SkASSERT(indexPool);
@ -213,7 +210,6 @@ int GrInOrderDrawBuffer::concatInstancedDraw(const GrPipelineBuilder& pipelineBu
Draw* draw = static_cast<Draw*>(&fCmdBuffer.back());
if (!draw->fInfo.isInstanced() ||
draw->fInfo.primitiveType() != info.primitiveType() ||
draw->fInfo.verticesPerInstance() != info.verticesPerInstance() ||
draw->fInfo.indicesPerInstance() != info.indicesPerInstance() ||
draw->fInfo.vertexBuffer() != info.vertexBuffer() ||
@ -270,32 +266,6 @@ void GrInOrderDrawBuffer::onDraw(const GrPipelineBuilder& pipelineBuilder,
this->recordTraceMarkersIfNecessary();
}
void GrInOrderDrawBuffer::onDrawBatch(GrBatch* batch,
const GrPipelineBuilder& pipelineBuilder,
const GrScissorState& scissorState,
const GrDeviceCoordTexture* dstCopy) {
if (!this->recordStateAndShouldDraw(batch, pipelineBuilder, scissorState, dstCopy)) {
return;
}
// TODO hack until batch is everywhere
fFlushBatches = true;
// Check if there is a Batch Draw we can batch with
if (kDrawBatch_Cmd != strip_trace_bit(fCmdBuffer.back().fType)) {
GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch));
return;
}
DrawBatch* draw = static_cast<DrawBatch*>(&fCmdBuffer.back());
if (draw->fBatch->combineIfPossible(batch)) {
return;
} else {
GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch));
}
this->recordTraceMarkersIfNecessary();
}
void GrInOrderDrawBuffer::onStencilPath(const GrPipelineBuilder& pipelineBuilder,
const GrPathProcessor* pathProc,
const GrPath* path,
@ -441,39 +411,14 @@ void GrInOrderDrawBuffer::onFlush() {
}
// Updated every time we find a set state cmd to reflect the current state in the playback
// stream.
SetState* currentState = NULL;
// TODO we noticed a huge regression on MacMinis with the initial implementation of GrBatch
// Because of vertex buffer mismanagement between batch and non batch. To compensate we
// flush all the batches into one contigous buffer
if (fFlushBatches) {
fFlushBatches = false;
CmdBuffer::Iter preflush(fCmdBuffer);
while(preflush.next()) {
bool isSetState = kSetState_Cmd == strip_trace_bit(preflush->fType);
if (isSetState) {
SetState* ss = reinterpret_cast<SetState*>(preflush.get());
if (!ss->fPrimitiveProcessor) {
currentState = ss;
}
} else if (kDrawBatch_Cmd == strip_trace_bit(preflush->fType)) {
preflush->execute(this, currentState);
}
}
}
// TODO this is temporary while batch is being rolled out
this->getVertexAllocPool()->unmap();
this->getIndexAllocPool()->unmap();
fBatchTarget.preFlush();
currentState = NULL;
CmdBuffer::Iter iter(fCmdBuffer);
int currCmdMarker = 0;
// Updated every time we find a set state cmd to reflect the current state in the playback
// stream.
SetState* currentState = NULL;
while (iter.next()) {
GrGpuTraceMarker newMarker("", -1);
SkString traceString;
@ -484,25 +429,13 @@ void GrInOrderDrawBuffer::onFlush() {
++currCmdMarker;
}
// TODO temporary hack
if (kDrawBatch_Cmd == strip_trace_bit(iter->fType)) {
fBatchTarget.flushNext();
continue;
}
bool isSetState = kSetState_Cmd == strip_trace_bit(iter->fType);
if (isSetState) {
if (kSetState_Cmd == strip_trace_bit(iter->fType)) {
SetState* ss = reinterpret_cast<SetState*>(iter.get());
// TODO sometimes we have a prim proc, othertimes we have a GrBatch. Eventually we will
// only have GrBatch and we can delete this
if (ss->fPrimitiveProcessor) {
this->getGpu()->buildProgramDesc(&ss->fDesc, *ss->fPrimitiveProcessor,
ss->fPipeline,
ss->fPipeline.descInfo(),
ss->fBatchTracker);
}
this->getGpu()->buildProgramDesc(&ss->fDesc, *ss->fPrimitiveProcessor, ss->fPipeline,
ss->fPipeline.descInfo(), ss->fBatchTracker);
currentState = ss;
} else {
iter->execute(this, currentState);
}
@ -512,9 +445,6 @@ void GrInOrderDrawBuffer::onFlush() {
}
}
// TODO see copious notes about hack
fBatchTarget.postFlush();
SkASSERT(fGpuCmdMarkers.count() == currCmdMarker);
++fDrawID;
}
@ -554,11 +484,6 @@ void GrInOrderDrawBuffer::DrawPaths::execute(GrInOrderDrawBuffer* buf, const Set
fCount, fStencilSettings);
}
void GrInOrderDrawBuffer::DrawBatch::execute(GrInOrderDrawBuffer* buf, const SetState* state) {
SkASSERT(state);
fBatch->generateGeometry(buf->getBatchTarget(), &state->fPipeline);
}
void GrInOrderDrawBuffer::SetState::execute(GrInOrderDrawBuffer*, const SetState*) {}
void GrInOrderDrawBuffer::Clear::execute(GrInOrderDrawBuffer* buf, const SetState*) {
@ -606,7 +531,7 @@ bool GrInOrderDrawBuffer::recordStateAndShouldDraw(const GrPipelineBuilder& pipe
ss->fPrimitiveProcessor->initBatchTracker(&ss->fBatchTracker,
ss->fPipeline.getInitBatchTracker());
if (fPrevState && fPrevState->fPrimitiveProcessor.get() &&
if (fPrevState &&
fPrevState->fPrimitiveProcessor->canMakeEqual(fPrevState->fBatchTracker,
*ss->fPrimitiveProcessor,
ss->fBatchTracker) &&
@ -619,33 +544,6 @@ bool GrInOrderDrawBuffer::recordStateAndShouldDraw(const GrPipelineBuilder& pipe
return true;
}
bool GrInOrderDrawBuffer::recordStateAndShouldDraw(GrBatch* batch,
const GrPipelineBuilder& pipelineBuilder,
const GrScissorState& scissor,
const GrDeviceCoordTexture* dstCopy) {
// TODO this gets much simpler when we have batches everywhere.
// If the previous command is also a set state, then we check to see if it has a Batch. If so,
// and we can make the two batches equal, and we can combine the states, then we make them equal
SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState,
(batch, pipelineBuilder, *this->getGpu()->caps(), scissor,
dstCopy));
if (ss->fPipeline.mustSkip()) {
fCmdBuffer.pop_back();
return false;
}
batch->initBatchTracker(ss->fPipeline.getInitBatchTracker());
if (fPrevState && !fPrevState->fPrimitiveProcessor.get() &&
fPrevState->fPipeline.isEqual(ss->fPipeline)) {
fCmdBuffer.pop_back();
} else {
fPrevState = ss;
this->recordTraceMarkersIfNecessary();
}
return true;
}
void GrInOrderDrawBuffer::recordTraceMarkersIfNecessary() {
SkASSERT(!fCmdBuffer.empty());
SkASSERT(!cmd_has_trace_marker(fCmdBuffer.back().fType));

View File

@ -9,9 +9,6 @@
#define GrInOrderDrawBuffer_DEFINED
#include "GrFlushToGpuDrawTarget.h"
#include "GrBatch.h"
#include "GrBatchTarget.h"
#include "GrPipeline.h"
#include "GrPath.h"
#include "GrTRecorder.h"
@ -56,14 +53,13 @@ public:
private:
typedef GrGpu::DrawArgs DrawArgs;
enum {
kDraw_Cmd = 1,
kStencilPath_Cmd = 2,
kSetState_Cmd = 3,
kClear_Cmd = 4,
kCopySurface_Cmd = 5,
kDrawPath_Cmd = 6,
kDrawPaths_Cmd = 7,
kDrawBatch_Cmd = 8,
kDraw_Cmd = 1,
kStencilPath_Cmd = 2,
kSetState_Cmd = 3,
kClear_Cmd = 4,
kCopySurface_Cmd = 5,
kDrawPath_Cmd = 6,
kDrawPaths_Cmd = 7,
};
struct SetState;
@ -184,7 +180,6 @@ private:
// TODO: rename to SetPipeline once pp, batch tracker, and desc are removed
struct SetState : public Cmd {
// TODO get rid of the prim proc version of this when we use batch everywhere
SetState(const GrPipelineBuilder& pipelineBuilder, const GrPrimitiveProcessor* primProc,
const GrDrawTargetCaps& caps,
const GrScissorState& scissor, const GrDeviceCoordTexture* dstCopy)
@ -192,13 +187,6 @@ private:
, fPrimitiveProcessor(primProc)
, fPipeline(pipelineBuilder, primProc, caps, scissor, dstCopy) {}
SetState(GrBatch* batch,
const GrPipelineBuilder& pipelineBuilder,
const GrDrawTargetCaps& caps,
const GrScissorState& scissor, const GrDeviceCoordTexture* dstCopy)
: Cmd(kSetState_Cmd)
, fPipeline(batch, pipelineBuilder, caps, scissor, dstCopy) {}
void execute(GrInOrderDrawBuffer*, const SetState*) SK_OVERRIDE;
typedef GrPendingProgramElement<const GrPrimitiveProcessor> ProgramPrimitiveProcessor;
@ -208,17 +196,6 @@ private:
GrBatchTracker fBatchTracker;
};
struct DrawBatch : public Cmd {
DrawBatch(GrBatch* batch) : Cmd(kDrawBatch_Cmd), fBatch(SkRef(batch)) {
SkASSERT(!batch->isUsed());
}
void execute(GrInOrderDrawBuffer*, const SetState*) SK_OVERRIDE;
// TODO it wouldn't be too hard to let batches allocate in the cmd buffer
SkAutoTUnref<GrBatch> fBatch;
};
typedef void* TCmdAlign; // This wouldn't be enough align if a command used long double.
typedef GrTRecorder<Cmd, TCmdAlign> CmdBuffer;
@ -231,10 +208,6 @@ private:
const DrawInfo&,
const GrScissorState&,
const GrDeviceCoordTexture* dstCopy) SK_OVERRIDE;
void onDrawBatch(GrBatch*,
const GrPipelineBuilder&,
const GrScissorState&,
const GrDeviceCoordTexture* dstCopy) SK_OVERRIDE;
void onDrawRect(GrPipelineBuilder*,
GrColor,
const SkMatrix& viewMatrix,
@ -280,16 +253,10 @@ private:
// Determines whether the current draw operation requires a new GrPipeline and if so
// records it. If the draw can be skipped false is returned and no new GrPipeline is
// recorded.
// TODO delete the primproc variant when we have batches everywhere
bool SK_WARN_UNUSED_RESULT recordStateAndShouldDraw(const GrPipelineBuilder&,
const GrPrimitiveProcessor*,
const GrScissorState&,
const GrDeviceCoordTexture*);
bool SK_WARN_UNUSED_RESULT recordStateAndShouldDraw(GrBatch*,
const GrPipelineBuilder&,
const GrScissorState&,
const GrDeviceCoordTexture*);
// We lazily record clip changes in order to skip clips that have no effect.
void recordClipIfNecessary();
// Records any trace markers for a command after adding it to the buffer.
@ -297,8 +264,6 @@ private:
bool isIssued(uint32_t drawID) SK_OVERRIDE { return drawID != fDrawID; }
GrBatchTarget* getBatchTarget() { return &fBatchTarget; }
// TODO: Use a single allocator for commands and records
enum {
kCmdBufferInitialSizeInBytes = 8 * 1024,
@ -312,9 +277,6 @@ private:
SkTDArray<char> fPathIndexBuffer;
SkTDArray<float> fPathTransformBuffer;
uint32_t fDrawID;
GrBatchTarget fBatchTarget;
// TODO hack until batch is everywhere
bool fFlushBatches;
typedef GrFlushToGpuDrawTarget INHERITED;
};

View File

@ -166,7 +166,7 @@ public:
return SkNEW_ARGS(GLProcessor, (*this, bt));
}
void initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const SK_OVERRIDE {
void initBatchTracker(GrBatchTracker* bt, const InitBT& init) const SK_OVERRIDE {
BatchTracker* local = bt->cast<BatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init, false);
local->fUsesLocalCoords = init.fUsesLocalCoords;
@ -365,7 +365,7 @@ public:
return SkNEW_ARGS(GLProcessor, (*this, bt));
}
void initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const SK_OVERRIDE {
void initBatchTracker(GrBatchTracker* bt, const InitBT& init) const SK_OVERRIDE {
BatchTracker* local = bt->cast<BatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init, false);
local->fUsesLocalCoords = init.fUsesLocalCoords;
@ -584,7 +584,7 @@ public:
return SkNEW_ARGS(GLProcessor, (*this, bt));
}
void initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const SK_OVERRIDE {
void initBatchTracker(GrBatchTracker* bt, const InitBT& init) const SK_OVERRIDE {
BatchTracker* local = bt->cast<BatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init, false);
local->fUsesLocalCoords = init.fUsesLocalCoords;

View File

@ -7,7 +7,6 @@
#include "GrPipeline.h"
#include "GrBatch.h"
#include "GrDrawTargetCaps.h"
#include "GrGpu.h"
#include "GrPipelineBuilder.h"
@ -15,37 +14,13 @@
#include "GrXferProcessor.h"
GrPipeline::GrPipeline(const GrPipelineBuilder& pipelineBuilder,
const GrPrimitiveProcessor* primProc,
const GrDrawTargetCaps& caps,
const GrScissorState& scissorState,
const GrDeviceCoordTexture* dstCopy) {
const GrPrimitiveProcessor* primProc,
const GrDrawTargetCaps& caps,
const GrScissorState& scissorState,
const GrDeviceCoordTexture* dstCopy) {
const GrProcOptInfo& colorPOI = pipelineBuilder.colorProcInfo(primProc);
const GrProcOptInfo& coveragePOI = pipelineBuilder.coverageProcInfo(primProc);
this->internalConstructor(pipelineBuilder, colorPOI, coveragePOI, caps, scissorState, dstCopy);
}
GrPipeline::GrPipeline(GrBatch* batch,
const GrPipelineBuilder& pipelineBuilder,
const GrDrawTargetCaps& caps,
const GrScissorState& scissorState,
const GrDeviceCoordTexture* dstCopy) {
GrBatchOpt batchOpt;
batchOpt.fCanTweakAlphaForCoverage = pipelineBuilder.canTweakAlphaForCoverage();
batch->initBatchOpt(batchOpt);
const GrProcOptInfo& colorPOI = pipelineBuilder.colorProcInfo(batch);
const GrProcOptInfo& coveragePOI = pipelineBuilder.coverageProcInfo(batch);
this->internalConstructor(pipelineBuilder, colorPOI, coveragePOI, caps, scissorState, dstCopy);
}
void GrPipeline::internalConstructor(const GrPipelineBuilder& pipelineBuilder,
const GrProcOptInfo& colorPOI,
const GrProcOptInfo& coveragePOI,
const GrDrawTargetCaps& caps,
const GrScissorState& scissorState,
const GrDeviceCoordTexture* dstCopy) {
// Create XferProcessor from DS's XPFactory
SkAutoTUnref<GrXferProcessor> xferProcessor(
pipelineBuilder.getXPFactory()->createXferProcessor(colorPOI, coveragePOI));

View File

@ -17,8 +17,8 @@
#include "SkMatrix.h"
#include "SkRefCnt.h"
class GrBatch;
class GrDeviceCoordTexture;
class GrPathProcessor;
class GrPipelineBuilder;
/**
@ -29,14 +29,10 @@ class GrPipeline {
public:
SK_DECLARE_INST_COUNT(GrPipeline)
// TODO get rid of this version of the constructor when we use batch everywhere
GrPipeline(const GrPipelineBuilder& pipelineBuilder, const GrPrimitiveProcessor*,
const GrDrawTargetCaps&, const GrScissorState&,
const GrDeviceCoordTexture* dstCopy);
GrPipeline(GrBatch*, const GrPipelineBuilder&, const GrDrawTargetCaps&,
const GrScissorState&, const GrDeviceCoordTexture* dstCopy);
/*
* Returns true if it is possible to combine the two GrPipelines and it will update 'this'
* to subsume 'that''s draw.
@ -136,17 +132,9 @@ public:
const GrProgramDesc::DescInfo& descInfo() const { return fDescInfo; }
const GrPipelineInfo& getInitBatchTracker() const { return fInitBT; }
const GrGeometryProcessor::InitBT& getInitBatchTracker() const { return fInitBT; }
private:
// TODO we can have one constructor once GrBatch is complete
void internalConstructor(const GrPipelineBuilder&,
const GrProcOptInfo& colorPOI,
const GrProcOptInfo& coveragePOI,
const GrDrawTargetCaps&,
const GrScissorState&,
const GrDeviceCoordTexture* dstCopy);
/**
* Alter the program desc and inputs (attribs and processors) based on the blend optimization.
*/
@ -176,13 +164,13 @@ private:
RenderTarget fRenderTarget;
GrScissorState fScissorState;
GrStencilSettings fStencilSettings;
GrPipelineBuilder::DrawFace fDrawFace;
GrPipelineBuilder::DrawFace fDrawFace;
GrDeviceCoordTexture fDstCopy;
uint32_t fFlags;
ProgramXferProcessor fXferProcessor;
FragmentStageArray fFragmentStages;
GrProgramDesc::DescInfo fDescInfo;
GrPipelineInfo fInitBT;
GrGeometryProcessor::InitBT fInitBT;
// This function is equivalent to the offset into fFragmentStages where coverage stages begin.
int fNumColorStages;

View File

@ -20,7 +20,9 @@ GrPipelineBuilder::GrPipelineBuilder()
, fColorProcInfoValid(false)
, fCoverageProcInfoValid(false)
, fColorCache(GrColor_ILLEGAL)
, fCoverageCache(GrColor_ILLEGAL) {
, fCoverageCache(GrColor_ILLEGAL)
, fColorPrimProc(NULL)
, fCoveragePrimProc(NULL) {
SkDEBUGCODE(fBlockEffectRemovalCnt = 0;)
}
@ -37,6 +39,8 @@ GrPipelineBuilder& GrPipelineBuilder::operator=(const GrPipelineBuilder& that) {
fCoverageProcInfoValid = that.fCoverageProcInfoValid;
fColorCache = that.fColorCache;
fCoverageCache = that.fCoverageCache;
fColorPrimProc = that.fColorPrimProc;
fCoveragePrimProc = that.fCoveragePrimProc;
if (fColorProcInfoValid) {
fColorProcInfo = that.fColorProcInfo;
}
@ -80,6 +84,9 @@ void GrPipelineBuilder::setFromPaint(const GrPaint& paint, GrRenderTarget* rt) {
fColorCache = GrColor_ILLEGAL;
fCoverageCache = GrColor_ILLEGAL;
fColorPrimProc = NULL;
fCoveragePrimProc = NULL;
}
////////////////////////////////////////////////////////////////////////////////
@ -154,29 +161,22 @@ bool GrPipelineBuilder::willBlendWithDst(const GrPrimitiveProcessor* pp) const {
}
void GrPipelineBuilder::calcColorInvariantOutput(const GrPrimitiveProcessor* pp) const {
fColorProcInfo.calcColorWithPrimProc(pp, fColorStages.begin(), this->numColorStages());
fColorProcInfoValid = false;
if (!fColorProcInfoValid || fColorPrimProc != pp) {
fColorProcInfo.calcColorWithPrimProc(pp, fColorStages.begin(), this->numColorStages());
fColorProcInfoValid = true;
fColorPrimProc = pp;
}
}
void GrPipelineBuilder::calcCoverageInvariantOutput(const GrPrimitiveProcessor* pp) const {
fCoverageProcInfo.calcCoverageWithPrimProc(pp, fCoverageStages.begin(),
this->numCoverageStages());
fCoverageProcInfoValid = false;
if (!fCoverageProcInfoValid || fCoveragePrimProc != pp) {
fCoverageProcInfo.calcCoverageWithPrimProc(pp, fCoverageStages.begin(),
this->numCoverageStages());
fCoverageProcInfoValid = true;
fCoveragePrimProc = pp;
}
}
void GrPipelineBuilder::calcColorInvariantOutput(const GrBatch* batch) const {
fColorProcInfo.calcColorWithBatch(batch, fColorStages.begin(), this->numColorStages());
fColorProcInfoValid = false;
}
void GrPipelineBuilder::calcCoverageInvariantOutput(const GrBatch* batch) const {
fCoverageProcInfo.calcCoverageWithBatch(batch, fCoverageStages.begin(),
this->numCoverageStages());
fCoverageProcInfoValid = false;
}
void GrPipelineBuilder::calcColorInvariantOutput(GrColor color) const {
if (!fColorProcInfoValid || color != fColorCache) {
GrColorComponentFlags flags = kRGBA_GrColorComponentFlags;

View File

@ -8,7 +8,7 @@
#ifndef GrPipelineBuilder_DEFINED
#define GrPipelineBuilder_DEFINED
#include "GrBatch.h"
#include "GrBlend.h"
#include "GrDrawTargetCaps.h"
#include "GrGeometryProcessor.h"
@ -391,15 +391,6 @@ public:
GrPipelineBuilder& operator= (const GrPipelineBuilder& that);
private:
// Calculating invariant color / coverage information is expensive, so we partially cache the
// results.
//
// canUseFracCoveragePrimProc() - Called in regular skia draw, caches results but only for a
// specific color and coverage. May be called multiple times
// willBlendWithDst() - only called by Nvpr, does not cache results
// GrOptDrawState constructor - never caches results
// TODO delete when we have Batch
const GrProcOptInfo& colorProcInfo(const GrPrimitiveProcessor* pp) const {
this->calcColorInvariantOutput(pp);
return fColorProcInfo;
@ -410,28 +401,17 @@ private:
return fCoverageProcInfo;
}
const GrProcOptInfo& colorProcInfo(const GrBatch* batch) const {
this->calcColorInvariantOutput(batch);
return fColorProcInfo;
}
const GrProcOptInfo& coverageProcInfo(const GrBatch* batch) const {
this->calcCoverageInvariantOutput(batch);
return fCoverageProcInfo;
}
/**
* Primproc variants of the calc functions
* TODO remove these when batch is everywhere
* If fColorProcInfoValid is false, function calculates the invariant output for the color
* stages and results are stored in fColorProcInfo.
*/
void calcColorInvariantOutput(const GrPrimitiveProcessor*) const;
void calcCoverageInvariantOutput(const GrPrimitiveProcessor*) const;
/**
* GrBatch provides the initial seed for these loops based off of its initial geometry data
* If fCoverageProcInfoValid is false, function calculates the invariant output for the coverage
* stages and results are stored in fCoverageProcInfo.
*/
void calcColorInvariantOutput(const GrBatch*) const;
void calcCoverageInvariantOutput(const GrBatch*) const;
void calcCoverageInvariantOutput(const GrPrimitiveProcessor*) const;
/**
* If fColorProcInfoValid is false, function calculates the invariant output for the color
@ -465,6 +445,8 @@ private:
mutable bool fCoverageProcInfoValid;
mutable GrColor fColorCache;
mutable GrColor fCoverageCache;
mutable const GrPrimitiveProcessor* fColorPrimProc;
mutable const GrPrimitiveProcessor* fCoveragePrimProc;
friend class GrPipeline;
};

View File

@ -7,29 +7,10 @@
#include "GrProcOptInfo.h"
#include "GrBatch.h"
#include "GrFragmentProcessor.h"
#include "GrFragmentStage.h"
#include "GrGeometryProcessor.h"
void GrProcOptInfo::calcColorWithBatch(const GrBatch* batch,
const GrFragmentStage* stages,
int stageCount) {
GrInitInvariantOutput out;
batch->getInvariantOutputColor(&out);
fInOut.reset(out);
this->internalCalc(stages, stageCount, batch->willReadFragmentPosition());
}
void GrProcOptInfo::calcCoverageWithBatch(const GrBatch* batch,
const GrFragmentStage* stages,
int stageCount) {
GrInitInvariantOutput out;
batch->getInvariantOutputCoverage(&out);
fInOut.reset(out);
this->internalCalc(stages, stageCount, batch->willReadFragmentPosition());
}
void GrProcOptInfo::calcColorWithPrimProc(const GrPrimitiveProcessor* primProc,
const GrFragmentStage* stages,
int stageCount) {

View File

@ -11,7 +11,6 @@
#include "GrColor.h"
#include "GrInvariantOutput.h"
class GrBatch;
class GrFragmentStage;
class GrFragmentProcessor;
class GrPrimitiveProcessor;
@ -34,10 +33,6 @@ public:
void calcWithInitialValues(const GrFragmentStage*, int stageCount, GrColor startColor,
GrColorComponentFlags flags, bool areCoverageStages);
void calcColorWithBatch(const GrBatch*, const GrFragmentStage*, int stagecount);
void calcCoverageWithBatch(const GrBatch*, const GrFragmentStage*, int stagecount);
// TODO delete these when batch is everywhere
void calcColorWithPrimProc(const GrPrimitiveProcessor*, const GrFragmentStage*, int stagecount);
void calcCoverageWithPrimProc(const GrPrimitiveProcessor*, const GrFragmentStage*,
int stagecount);

View File

@ -8,6 +8,7 @@
#include "GrProcessor.h"
#include "GrContext.h"
#include "GrCoordTransform.h"
#include "GrGeometryData.h"
#include "GrGeometryProcessor.h"
#include "GrInvariantOutput.h"
#include "GrMemoryPool.h"
@ -171,6 +172,19 @@ void GrFragmentProcessor::computeInvariantOutput(GrInvariantOutput* inout) const
///////////////////////////////////////////////////////////////////////////////////////////////////
/*
* GrGeometryData shares the same pool so it lives in this file too
*/
void* GrGeometryData::operator new(size_t size) {
return GrProcessor_Globals::GetTLS()->allocate(size);
}
void GrGeometryData::operator delete(void* target) {
GrProcessor_Globals::GetTLS()->release(target);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
// Initial static variable from GrXPFactory
int32_t GrXPFactory::gCurrXPFClassID =
GrXPFactory::kIllegalXPFClassID;

View File

@ -208,7 +208,7 @@ bool GrConicEffect::onIsEqual(const GrGeometryProcessor& other) const {
return (ce.fEdgeType == fEdgeType);
}
void GrConicEffect::initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const {
void GrConicEffect::initBatchTracker(GrBatchTracker* bt, const InitBT& init) const {
ConicBatchTracker* local = bt->cast<ConicBatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init, false);
local->fCoverageScale = fCoverageScale;
@ -432,7 +432,7 @@ bool GrQuadEffect::onIsEqual(const GrGeometryProcessor& other) const {
return (ce.fEdgeType == fEdgeType);
}
void GrQuadEffect::initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const {
void GrQuadEffect::initBatchTracker(GrBatchTracker* bt, const InitBT& init) const {
QuadBatchTracker* local = bt->cast<QuadBatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init, false);
local->fCoverageScale = fCoverageScale;
@ -677,7 +677,7 @@ bool GrCubicEffect::onIsEqual(const GrGeometryProcessor& other) const {
return (ce.fEdgeType == fEdgeType);
}
void GrCubicEffect::initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const {
void GrCubicEffect::initBatchTracker(GrBatchTracker* bt, const InitBT& init) const {
CubicBatchTracker* local = bt->cast<CubicBatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init, false);
local->fUsesLocalCoords = init.fUsesLocalCoords;

View File

@ -105,7 +105,7 @@ public:
virtual GrGLPrimitiveProcessor* createGLInstance(const GrBatchTracker& bt,
const GrGLCaps&) const SK_OVERRIDE;
void initBatchTracker(GrBatchTracker*, const GrPipelineInfo&) const SK_OVERRIDE;
void initBatchTracker(GrBatchTracker*, const InitBT&) const SK_OVERRIDE;
bool onCanMakeEqual(const GrBatchTracker&,
const GrGeometryProcessor&,
const GrBatchTracker&) const SK_OVERRIDE;
@ -190,7 +190,7 @@ public:
virtual GrGLPrimitiveProcessor* createGLInstance(const GrBatchTracker& bt,
const GrGLCaps&) const SK_OVERRIDE;
void initBatchTracker(GrBatchTracker*, const GrPipelineInfo&) const SK_OVERRIDE;
void initBatchTracker(GrBatchTracker*, const InitBT&) const SK_OVERRIDE;
bool onCanMakeEqual(const GrBatchTracker&,
const GrGeometryProcessor&,
const GrBatchTracker&) const SK_OVERRIDE;
@ -271,7 +271,7 @@ public:
virtual GrGLPrimitiveProcessor* createGLInstance(const GrBatchTracker& bt,
const GrGLCaps&) const SK_OVERRIDE;
void initBatchTracker(GrBatchTracker*, const GrPipelineInfo&) const SK_OVERRIDE;
void initBatchTracker(GrBatchTracker*, const InitBT&) const SK_OVERRIDE;
bool onCanMakeEqual(const GrBatchTracker&,
const GrGeometryProcessor&,
const GrBatchTracker&) const SK_OVERRIDE;

View File

@ -145,7 +145,7 @@ GrBitmapTextGeoProc::createGLInstance(const GrBatchTracker& bt,
return SkNEW_ARGS(GrGLBitmapTextGeoProc, (*this, bt));
}
void GrBitmapTextGeoProc::initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const {
void GrBitmapTextGeoProc::initBatchTracker(GrBatchTracker* bt, const InitBT& init) const {
BitmapTextBatchTracker* local = bt->cast<BitmapTextBatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init,
SkToBool(fInColor));

View File

@ -43,7 +43,7 @@ public:
virtual GrGLPrimitiveProcessor* createGLInstance(const GrBatchTracker& bt,
const GrGLCaps& caps) const SK_OVERRIDE;
void initBatchTracker(GrBatchTracker*, const GrPipelineInfo&) const SK_OVERRIDE;
void initBatchTracker(GrBatchTracker*, const InitBT&) const SK_OVERRIDE;
bool onCanMakeEqual(const GrBatchTracker&,
const GrGeometryProcessor&,
const GrBatchTracker&) const SK_OVERRIDE;

View File

@ -501,7 +501,7 @@ public:
virtual GrGLPrimitiveProcessor* createGLInstance(const GrBatchTracker&,
const GrGLCaps&) const SK_OVERRIDE;
void initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const SK_OVERRIDE;
void initBatchTracker(GrBatchTracker* bt, const InitBT& init) const SK_OVERRIDE;
bool onCanMakeEqual(const GrBatchTracker&,
const GrGeometryProcessor&,
@ -706,7 +706,7 @@ bool DashingCircleEffect::onIsEqual(const GrGeometryProcessor& other) const {
fCenterX == dce.fCenterX);
}
void DashingCircleEffect::initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const {
void DashingCircleEffect::initBatchTracker(GrBatchTracker* bt, const InitBT& init) const {
DashingCircleBatchTracker* local = bt->cast<DashingCircleBatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init, false);
local->fUsesLocalCoords = init.fUsesLocalCoords;
@ -795,7 +795,7 @@ public:
virtual GrGLPrimitiveProcessor* createGLInstance(const GrBatchTracker& bt,
const GrGLCaps&) const SK_OVERRIDE;
void initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const SK_OVERRIDE;
void initBatchTracker(GrBatchTracker* bt, const InitBT& init) const SK_OVERRIDE;
bool onCanMakeEqual(const GrBatchTracker&,
const GrGeometryProcessor&,
@ -1013,7 +1013,7 @@ bool DashingLineEffect::onIsEqual(const GrGeometryProcessor& other) const {
fIntervalLength == de.fIntervalLength);
}
void DashingLineEffect::initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const {
void DashingLineEffect::initBatchTracker(GrBatchTracker* bt, const InitBT& init) const {
DashingLineBatchTracker* local = bt->cast<DashingLineBatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init, false);
local->fUsesLocalCoords = init.fUsesLocalCoords;

View File

@ -258,7 +258,7 @@ GrDistanceFieldTextureEffect::createGLInstance(const GrBatchTracker& bt,
return SkNEW_ARGS(GrGLDistanceFieldTextureEffect, (*this, bt));
}
void GrDistanceFieldTextureEffect::initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const {
void GrDistanceFieldTextureEffect::initBatchTracker(GrBatchTracker* bt, const InitBT& init) const {
DistanceFieldBatchTracker* local = bt->cast<DistanceFieldBatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init,
SkToBool(fInColor));
@ -520,7 +520,7 @@ GrDistanceFieldNoGammaTextureEffect::createGLInstance(const GrBatchTracker& bt,
}
void GrDistanceFieldNoGammaTextureEffect::initBatchTracker(GrBatchTracker* bt,
const GrPipelineInfo& init) const {
const InitBT& init) const {
DistanceFieldNoGammaBatchTracker* local = bt->cast<DistanceFieldNoGammaBatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init,
SkToBool(fInColor));
@ -841,7 +841,7 @@ GrDistanceFieldLCDTextureEffect::createGLInstance(const GrBatchTracker& bt,
}
void GrDistanceFieldLCDTextureEffect::initBatchTracker(GrBatchTracker* bt,
const GrPipelineInfo& init) const {
const InitBT& init) const {
DistanceFieldLCDBatchTracker* local = bt->cast<DistanceFieldLCDBatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init, false);
local->fUsesLocalCoords = init.fUsesLocalCoords;

View File

@ -83,7 +83,7 @@ public:
virtual GrGLPrimitiveProcessor* createGLInstance(const GrBatchTracker& bt,
const GrGLCaps&) const SK_OVERRIDE;
void initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const SK_OVERRIDE;
void initBatchTracker(GrBatchTracker* bt, const InitBT& init) const SK_OVERRIDE;
bool onCanMakeEqual(const GrBatchTracker&,
const GrGeometryProcessor&,
@ -148,7 +148,7 @@ public:
virtual GrGLPrimitiveProcessor* createGLInstance(const GrBatchTracker& bt,
const GrGLCaps&) const SK_OVERRIDE;
void initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const SK_OVERRIDE;
void initBatchTracker(GrBatchTracker* bt, const InitBT& init) const SK_OVERRIDE;
bool onCanMakeEqual(const GrBatchTracker&,
const GrGeometryProcessor&,
@ -206,7 +206,7 @@ public:
virtual GrGLPrimitiveProcessor* createGLInstance(const GrBatchTracker& bt,
const GrGLCaps&) const SK_OVERRIDE;
void initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const SK_OVERRIDE;
void initBatchTracker(GrBatchTracker* bt, const InitBT& init) const SK_OVERRIDE;
bool onCanMakeEqual(const GrBatchTracker&,
const GrGeometryProcessor&,