2016-07-01 15:22:01 +00:00
|
|
|
/*
|
|
|
|
* Copyright 2016 Google Inc.
|
|
|
|
*
|
|
|
|
* Use of this source code is governed by a BSD-style license that can be
|
|
|
|
* found in the LICENSE file.
|
|
|
|
*/
|
2016-10-26 14:35:22 +00:00
|
|
|
|
2019-04-23 17:05:21 +00:00
|
|
|
#include "src/sksl/SkSLCompiler.h"
|
|
|
|
|
2020-08-03 17:21:46 +00:00
|
|
|
#include <memory>
|
2020-08-11 22:05:39 +00:00
|
|
|
#include <unordered_set>
|
2020-08-03 17:21:46 +00:00
|
|
|
|
2020-08-31 22:09:01 +00:00
|
|
|
#include "src/sksl/SkSLAnalysis.h"
|
2019-04-23 17:05:21 +00:00
|
|
|
#include "src/sksl/SkSLByteCodeGenerator.h"
|
|
|
|
#include "src/sksl/SkSLCFGGenerator.h"
|
|
|
|
#include "src/sksl/SkSLCPPCodeGenerator.h"
|
|
|
|
#include "src/sksl/SkSLGLSLCodeGenerator.h"
|
|
|
|
#include "src/sksl/SkSLHCodeGenerator.h"
|
|
|
|
#include "src/sksl/SkSLIRGenerator.h"
|
|
|
|
#include "src/sksl/SkSLMetalCodeGenerator.h"
|
|
|
|
#include "src/sksl/SkSLPipelineStageCodeGenerator.h"
|
2020-07-28 18:46:53 +00:00
|
|
|
#include "src/sksl/SkSLRehydrator.h"
|
2019-04-23 17:05:21 +00:00
|
|
|
#include "src/sksl/SkSLSPIRVCodeGenerator.h"
|
2020-02-19 20:35:26 +00:00
|
|
|
#include "src/sksl/SkSLSPIRVtoHLSL.h"
|
2019-04-23 17:05:21 +00:00
|
|
|
#include "src/sksl/ir/SkSLEnum.h"
|
|
|
|
#include "src/sksl/ir/SkSLExpression.h"
|
|
|
|
#include "src/sksl/ir/SkSLExpressionStatement.h"
|
|
|
|
#include "src/sksl/ir/SkSLFunctionCall.h"
|
|
|
|
#include "src/sksl/ir/SkSLIntLiteral.h"
|
|
|
|
#include "src/sksl/ir/SkSLModifiersDeclaration.h"
|
|
|
|
#include "src/sksl/ir/SkSLNop.h"
|
|
|
|
#include "src/sksl/ir/SkSLSymbolTable.h"
|
|
|
|
#include "src/sksl/ir/SkSLTernaryExpression.h"
|
|
|
|
#include "src/sksl/ir/SkSLUnresolvedFunction.h"
|
|
|
|
#include "src/sksl/ir/SkSLVarDeclarations.h"
|
2020-10-05 16:03:53 +00:00
|
|
|
#include "src/utils/SkBitSet.h"
|
2016-07-01 15:22:01 +00:00
|
|
|
|
2020-08-06 17:00:19 +00:00
|
|
|
#include <fstream>
|
|
|
|
|
2019-11-26 21:27:47 +00:00
|
|
|
#if !defined(SKSL_STANDALONE) & SK_SUPPORT_GPU
|
|
|
|
#include "include/gpu/GrContextOptions.h"
|
|
|
|
#include "src/gpu/GrShaderCaps.h"
|
|
|
|
#endif
|
|
|
|
|
2017-03-16 13:56:54 +00:00
|
|
|
#ifdef SK_ENABLE_SPIRV_VALIDATION
|
|
|
|
#include "spirv-tools/libspirv.hpp"
|
|
|
|
#endif
|
|
|
|
|
2020-10-08 15:50:22 +00:00
|
|
|
#if defined(SKSL_STANDALONE)
|
2017-11-16 16:20:11 +00:00
|
|
|
|
2020-10-08 15:50:22 +00:00
|
|
|
// In standalone mode, we load the textual sksl source files. GN generates or copies these files
|
|
|
|
// to the skslc executable directory. The "data" in this mode is just the filename.
|
|
|
|
#define MODULE_DATA(name) MakeModulePath("sksl_" #name ".sksl")
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
// At runtime, we load the dehydrated sksl data files. The data is a (pointer, size) pair.
|
2020-07-28 18:46:53 +00:00
|
|
|
#include "src/sksl/generated/sksl_fp.dehydrated.sksl"
|
|
|
|
#include "src/sksl/generated/sksl_frag.dehydrated.sksl"
|
|
|
|
#include "src/sksl/generated/sksl_geom.dehydrated.sksl"
|
|
|
|
#include "src/sksl/generated/sksl_gpu.dehydrated.sksl"
|
|
|
|
#include "src/sksl/generated/sksl_interp.dehydrated.sksl"
|
|
|
|
#include "src/sksl/generated/sksl_pipeline.dehydrated.sksl"
|
2020-11-06 16:45:36 +00:00
|
|
|
#include "src/sksl/generated/sksl_public.dehydrated.sksl"
|
2020-07-28 18:46:53 +00:00
|
|
|
#include "src/sksl/generated/sksl_vert.dehydrated.sksl"
|
2019-05-24 15:01:59 +00:00
|
|
|
|
2020-10-08 15:50:22 +00:00
|
|
|
#define MODULE_DATA(name) MakeModuleData(SKSL_INCLUDE_sksl_##name,\
|
|
|
|
SKSL_INCLUDE_sksl_##name##_LENGTH)
|
2020-07-28 18:46:53 +00:00
|
|
|
|
|
|
|
#endif
|
2019-04-08 13:46:01 +00:00
|
|
|
|
2016-07-01 15:22:01 +00:00
|
|
|
namespace SkSL {
|
|
|
|
|
Reorganization of IR generator's API and interaction with compiler
- Move all of IR generator's fields private (except for fContext, which
is used ~everywhere).
- Eliminate start() and finish(), fold this logic into convertProgram.
The division of what was set/reset in different places was pretty
arbitrary. Now, convertProgram does everything. Along that line, have
it actually return the "outputs" as an IRBundle (a small collection of
the things that the compiler needs). This seems better than the
compiler ripping out IR generator's internals.
- IR generator's POD field initialization was a mix of in-class and
constructor. Move all the constant initialization to declarations.
- No need to look up sk_PerVertex at start (or convertProgram) time, so
remove fSkPerVertex, and just do the lookup when we're about to use
it.
- IRGenerator::convertProgram is fairly long now, but all the code is in
one place. You don't have to think about the order that three
different member functions are called (along with the caller mutating
the internal state between those three calls).
- In the compiler, add an AutoSource helper to manage changing and
restoring the fSource pointer everywhere.
- Rename the loadXXXIntrinsics functions to loadXXXModule, have them
return the module, and wrap the whole thing up in a single
moduleForProgramKind() helper.
Change-Id: I0c9b6702f8786792963e3d9408d6619e5ab393e2
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/324696
Reviewed-by: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
2020-10-09 16:05:16 +00:00
|
|
|
class AutoSource {
|
|
|
|
public:
|
|
|
|
AutoSource(Compiler* compiler, const String* source)
|
|
|
|
: fCompiler(compiler), fOldSource(fCompiler->fSource) {
|
|
|
|
fCompiler->fSource = source;
|
|
|
|
}
|
|
|
|
|
|
|
|
~AutoSource() { fCompiler->fSource = fOldSource; }
|
|
|
|
|
|
|
|
Compiler* fCompiler;
|
|
|
|
const String* fOldSource;
|
|
|
|
};
|
|
|
|
|
2020-11-02 17:26:22 +00:00
|
|
|
Compiler::Compiler(const ShaderCapsClass* caps, Flags flags)
|
2020-11-18 20:38:39 +00:00
|
|
|
: fContext(std::make_shared<Context>())
|
|
|
|
, fCaps(caps)
|
|
|
|
, fInliner(fContext.get(), fCaps)
|
|
|
|
, fFlags(flags)
|
|
|
|
, fErrorCount(0) {
|
|
|
|
SkASSERT(fCaps);
|
2020-10-16 22:38:39 +00:00
|
|
|
fRootSymbolTable = std::make_shared<SymbolTable>(this, /*builtin=*/true);
|
2020-11-06 16:45:36 +00:00
|
|
|
fPrivateSymbolTable = std::make_shared<SymbolTable>(fRootSymbolTable, /*builtin=*/true);
|
Reland "Remove inliner from IR generation stage."
This reverts commit 4c412bce4cacf556fbf87e9e9066ba709bc671f9.
Reason for revert: investigating Pinpoint failure cases, if any
Original change's description:
> Revert "Reland "Remove inliner from IR generation stage.""
>
> This reverts commit e497a080651c05678cf4760fa6e4daa53a10355b.
>
> Reason for revert: Pinpoint disagrees
>
> Original change's description:
> > Reland "Remove inliner from IR generation stage."
> >
> > This reverts commit 941fc7174fc20e8a8b707f1b636c85bbf2e1294f.
> >
> > Reason for revert: performance now seems to be roughly equal or better
> > (~1%) over several trials.
> > Nanobench: http://screen/A8e8sojaXBgbMgF
> >
> > Original change's description:
> > > Revert "Remove inliner from IR generation stage."
> > >
> > > This reverts commit 21d7778cb5c13e729b513eaa22dbf53f58a877c8.
> > >
> > > Reason for revert: Pinpoint absolutely hates this change
> > >
> > > Original change's description:
> > > > Remove inliner from IR generation stage.
> > > >
> > > > There is no need to inline code during IR generation, as the optimizer
> > > > can now handle this.
> > > >
> > > > Change-Id: If272bfb98e945a75ec91fb4aa026e5631ac51b5b
> > > > Reviewed-on: https://skia-review.googlesource.com/c/skia/+/315971
> > > > Commit-Queue: John Stiles <johnstiles@google.com>
> > > > Commit-Queue: Brian Osman <brianosman@google.com>
> > > > Reviewed-by: Brian Osman <brianosman@google.com>
> > > > Auto-Submit: John Stiles <johnstiles@google.com>
> > >
> > > TBR=brianosman@google.com,ethannicholas@google.com,johnstiles@google.com
> > >
> > > Change-Id: I62c235415bcdc92a088e2a7f9c3d7dbf7e1bf669
> > > No-Presubmit: true
> > > No-Tree-Checks: true
> > > No-Try: true
> > > Reviewed-on: https://skia-review.googlesource.com/c/skia/+/317976
> > > Reviewed-by: John Stiles <johnstiles@google.com>
> > > Commit-Queue: John Stiles <johnstiles@google.com>
> >
> > TBR=brianosman@google.com,ethannicholas@google.com,johnstiles@google.com
> >
> > Change-Id: I6189806c678283188f4b67ee61e5886f88c2d6fc
> > Reviewed-on: https://skia-review.googlesource.com/c/skia/+/324891
> > Reviewed-by: John Stiles <johnstiles@google.com>
> > Reviewed-by: Ethan Nicholas <ethannicholas@google.com>
> > Commit-Queue: Ethan Nicholas <ethannicholas@google.com>
> > Auto-Submit: John Stiles <johnstiles@google.com>
>
> TBR=brianosman@google.com,ethannicholas@google.com,johnstiles@google.com
>
> Change-Id: I79149467565f22f53b8c28868dd53b80f3421137
> No-Presubmit: true
> No-Tree-Checks: true
> No-Try: true
> Reviewed-on: https://skia-review.googlesource.com/c/skia/+/325626
> Reviewed-by: John Stiles <johnstiles@google.com>
> Commit-Queue: John Stiles <johnstiles@google.com>
> Auto-Submit: John Stiles <johnstiles@google.com>
TBR=brianosman@google.com,ethannicholas@google.com,johnstiles@google.com
Change-Id: I2727bd4a2b43e8d12b36b1979ce6fe4a2d935380
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/335936
Reviewed-by: John Stiles <johnstiles@google.com>
Commit-Queue: John Stiles <johnstiles@google.com>
Auto-Submit: John Stiles <johnstiles@google.com>
2020-11-20 17:52:22 +00:00
|
|
|
fIRGenerator = std::make_unique<IRGenerator>(fContext.get(), fCaps, *this);
|
2020-11-06 16:45:36 +00:00
|
|
|
|
|
|
|
#define TYPE(t) fContext->f##t##_Type.get()
|
|
|
|
|
|
|
|
const SkSL::Symbol* rootTypes[] = {
|
|
|
|
TYPE(Void),
|
|
|
|
|
|
|
|
TYPE( Float), TYPE( Float2), TYPE( Float3), TYPE( Float4),
|
|
|
|
TYPE( Half), TYPE( Half2), TYPE( Half3), TYPE( Half4),
|
|
|
|
TYPE( Int), TYPE( Int2), TYPE( Int3), TYPE( Int4),
|
|
|
|
TYPE( UInt), TYPE( UInt2), TYPE( UInt3), TYPE( UInt4),
|
|
|
|
TYPE( Short), TYPE( Short2), TYPE( Short3), TYPE( Short4),
|
|
|
|
TYPE(UShort), TYPE(UShort2), TYPE(UShort3), TYPE(UShort4),
|
|
|
|
TYPE( Byte), TYPE( Byte2), TYPE( Byte3), TYPE( Byte4),
|
|
|
|
TYPE( UByte), TYPE( UByte2), TYPE( UByte3), TYPE( UByte4),
|
|
|
|
TYPE( Bool), TYPE( Bool2), TYPE( Bool3), TYPE( Bool4),
|
|
|
|
|
|
|
|
TYPE(Float2x2), TYPE(Float2x3), TYPE(Float2x4),
|
|
|
|
TYPE(Float3x2), TYPE(Float3x3), TYPE(Float3x4),
|
|
|
|
TYPE(Float4x2), TYPE(Float4x3), TYPE(Float4x4),
|
|
|
|
|
|
|
|
TYPE(Half2x2), TYPE(Half2x3), TYPE(Half2x4),
|
|
|
|
TYPE(Half3x2), TYPE(Half3x3), TYPE(Half3x4),
|
|
|
|
TYPE(Half4x2), TYPE(Half4x3), TYPE(Half4x4),
|
|
|
|
|
|
|
|
TYPE(GenType), TYPE(GenHType), TYPE(GenIType), TYPE(GenUType), TYPE(GenBType),
|
|
|
|
TYPE(Mat), TYPE(Vec),
|
|
|
|
TYPE(GVec), TYPE(GVec2), TYPE(GVec3), TYPE(GVec4),
|
|
|
|
TYPE(HVec), TYPE(IVec), TYPE(UVec), TYPE(SVec), TYPE(USVec),
|
|
|
|
TYPE(ByteVec), TYPE(UByteVec), TYPE(BVec),
|
|
|
|
|
|
|
|
TYPE(FragmentProcessor),
|
|
|
|
};
|
|
|
|
|
|
|
|
const SkSL::Symbol* privateTypes[] = {
|
|
|
|
TYPE(Sampler1D), TYPE(Sampler2D), TYPE(Sampler3D),
|
|
|
|
TYPE(SamplerExternalOES),
|
|
|
|
TYPE(SamplerCube),
|
|
|
|
TYPE(Sampler2DRect),
|
|
|
|
TYPE(Sampler1DArray), TYPE(Sampler2DArray), TYPE(SamplerCubeArray),
|
|
|
|
TYPE(SamplerBuffer),
|
|
|
|
TYPE(Sampler2DMS), TYPE(Sampler2DMSArray),
|
|
|
|
|
|
|
|
TYPE(ISampler2D),
|
|
|
|
TYPE(Image2D), TYPE(IImage2D),
|
|
|
|
TYPE(SubpassInput), TYPE(SubpassInputMS),
|
|
|
|
|
|
|
|
TYPE(GSampler1D), TYPE(GSampler2D), TYPE(GSampler3D),
|
|
|
|
TYPE(GSamplerCube),
|
|
|
|
TYPE(GSampler2DRect),
|
|
|
|
TYPE(GSampler1DArray), TYPE(GSampler2DArray), TYPE(GSamplerCubeArray),
|
|
|
|
TYPE(GSamplerBuffer),
|
|
|
|
TYPE(GSampler2DMS), TYPE(GSampler2DMSArray),
|
|
|
|
|
|
|
|
TYPE(Sampler1DShadow), TYPE(Sampler2DShadow), TYPE(SamplerCubeShadow),
|
|
|
|
TYPE(Sampler2DRectShadow),
|
|
|
|
TYPE(Sampler1DArrayShadow), TYPE(Sampler2DArrayShadow), TYPE(SamplerCubeArrayShadow),
|
|
|
|
|
|
|
|
TYPE(GSampler2DArrayShadow), TYPE(GSamplerCubeArrayShadow),
|
|
|
|
TYPE(Sampler),
|
|
|
|
TYPE(Texture2D),
|
|
|
|
};
|
|
|
|
|
|
|
|
for (const SkSL::Symbol* type : rootTypes) {
|
|
|
|
fRootSymbolTable->addWithoutOwnership(type);
|
|
|
|
}
|
|
|
|
for (const SkSL::Symbol* type : privateTypes) {
|
|
|
|
fPrivateSymbolTable->addWithoutOwnership(type);
|
|
|
|
}
|
|
|
|
|
|
|
|
#undef TYPE
|
2016-07-01 15:22:01 +00:00
|
|
|
|
2020-09-30 17:22:27 +00:00
|
|
|
// sk_Caps is "builtin", but all references to it are resolved to Settings, so we don't need to
|
|
|
|
// treat it as builtin (ie, no need to clone it into the Program).
|
2020-11-06 16:45:36 +00:00
|
|
|
fPrivateSymbolTable->add(
|
|
|
|
std::make_unique<Variable>(/*offset=*/-1,
|
2020-11-12 23:27:13 +00:00
|
|
|
fIRGenerator->fModifiers->addToPool(Modifiers()),
|
2020-11-06 16:45:36 +00:00
|
|
|
"sk_Caps",
|
|
|
|
fContext->fSkCaps_Type.get(),
|
|
|
|
/*builtin=*/false,
|
|
|
|
Variable::Storage::kGlobal));
|
2016-11-21 20:59:48 +00:00
|
|
|
|
2020-10-08 15:50:22 +00:00
|
|
|
fRootModule = {fRootSymbolTable, /*fIntrinsics=*/nullptr};
|
2020-11-06 16:45:36 +00:00
|
|
|
fPrivateModule = {fPrivateSymbolTable, /*fIntrinsics=*/nullptr};
|
2016-07-01 15:22:01 +00:00
|
|
|
}
|
|
|
|
|
2020-10-29 14:45:34 +00:00
|
|
|
Compiler::~Compiler() {}
|
2016-07-01 15:22:01 +00:00
|
|
|
|
2020-11-20 17:38:07 +00:00
|
|
|
const ParsedModule& Compiler::loadGPUModule() {
|
|
|
|
if (!fGPUModule.fSymbols) {
|
|
|
|
fGPUModule = this->parseModule(Program::kFragment_Kind, MODULE_DATA(gpu), fPrivateModule);
|
|
|
|
}
|
|
|
|
return fGPUModule;
|
|
|
|
}
|
|
|
|
|
|
|
|
const ParsedModule& Compiler::loadFragmentModule() {
|
|
|
|
if (!fFragmentModule.fSymbols) {
|
|
|
|
fFragmentModule = this->parseModule(Program::kFragment_Kind, MODULE_DATA(frag),
|
|
|
|
this->loadGPUModule());
|
|
|
|
}
|
|
|
|
return fFragmentModule;
|
|
|
|
}
|
|
|
|
|
|
|
|
const ParsedModule& Compiler::loadVertexModule() {
|
|
|
|
if (!fVertexModule.fSymbols) {
|
|
|
|
fVertexModule = this->parseModule(Program::kVertex_Kind, MODULE_DATA(vert),
|
|
|
|
this->loadGPUModule());
|
|
|
|
}
|
|
|
|
return fVertexModule;
|
|
|
|
}
|
|
|
|
|
Reorganization of IR generator's API and interaction with compiler
- Move all of IR generator's fields private (except for fContext, which
is used ~everywhere).
- Eliminate start() and finish(), fold this logic into convertProgram.
The division of what was set/reset in different places was pretty
arbitrary. Now, convertProgram does everything. Along that line, have
it actually return the "outputs" as an IRBundle (a small collection of
the things that the compiler needs). This seems better than the
compiler ripping out IR generator's internals.
- IR generator's POD field initialization was a mix of in-class and
constructor. Move all the constant initialization to declarations.
- No need to look up sk_PerVertex at start (or convertProgram) time, so
remove fSkPerVertex, and just do the lookup when we're about to use
it.
- IRGenerator::convertProgram is fairly long now, but all the code is in
one place. You don't have to think about the order that three
different member functions are called (along with the caller mutating
the internal state between those three calls).
- In the compiler, add an AutoSource helper to manage changing and
restoring the fSource pointer everywhere.
- Rename the loadXXXIntrinsics functions to loadXXXModule, have them
return the module, and wrap the whole thing up in a single
moduleForProgramKind() helper.
Change-Id: I0c9b6702f8786792963e3d9408d6619e5ab393e2
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/324696
Reviewed-by: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
2020-10-09 16:05:16 +00:00
|
|
|
const ParsedModule& Compiler::loadGeometryModule() {
|
2020-10-08 15:50:22 +00:00
|
|
|
if (!fGeometryModule.fSymbols) {
|
2020-11-20 17:38:07 +00:00
|
|
|
fGeometryModule = this->parseModule(Program::kGeometry_Kind, MODULE_DATA(geom),
|
|
|
|
this->loadGPUModule());
|
2020-07-28 18:46:53 +00:00
|
|
|
}
|
Reorganization of IR generator's API and interaction with compiler
- Move all of IR generator's fields private (except for fContext, which
is used ~everywhere).
- Eliminate start() and finish(), fold this logic into convertProgram.
The division of what was set/reset in different places was pretty
arbitrary. Now, convertProgram does everything. Along that line, have
it actually return the "outputs" as an IRBundle (a small collection of
the things that the compiler needs). This seems better than the
compiler ripping out IR generator's internals.
- IR generator's POD field initialization was a mix of in-class and
constructor. Move all the constant initialization to declarations.
- No need to look up sk_PerVertex at start (or convertProgram) time, so
remove fSkPerVertex, and just do the lookup when we're about to use
it.
- IRGenerator::convertProgram is fairly long now, but all the code is in
one place. You don't have to think about the order that three
different member functions are called (along with the caller mutating
the internal state between those three calls).
- In the compiler, add an AutoSource helper to manage changing and
restoring the fSource pointer everywhere.
- Rename the loadXXXIntrinsics functions to loadXXXModule, have them
return the module, and wrap the whole thing up in a single
moduleForProgramKind() helper.
Change-Id: I0c9b6702f8786792963e3d9408d6619e5ab393e2
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/324696
Reviewed-by: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
2020-10-09 16:05:16 +00:00
|
|
|
return fGeometryModule;
|
2020-07-28 18:46:53 +00:00
|
|
|
}
|
|
|
|
|
Reorganization of IR generator's API and interaction with compiler
- Move all of IR generator's fields private (except for fContext, which
is used ~everywhere).
- Eliminate start() and finish(), fold this logic into convertProgram.
The division of what was set/reset in different places was pretty
arbitrary. Now, convertProgram does everything. Along that line, have
it actually return the "outputs" as an IRBundle (a small collection of
the things that the compiler needs). This seems better than the
compiler ripping out IR generator's internals.
- IR generator's POD field initialization was a mix of in-class and
constructor. Move all the constant initialization to declarations.
- No need to look up sk_PerVertex at start (or convertProgram) time, so
remove fSkPerVertex, and just do the lookup when we're about to use
it.
- IRGenerator::convertProgram is fairly long now, but all the code is in
one place. You don't have to think about the order that three
different member functions are called (along with the caller mutating
the internal state between those three calls).
- In the compiler, add an AutoSource helper to manage changing and
restoring the fSource pointer everywhere.
- Rename the loadXXXIntrinsics functions to loadXXXModule, have them
return the module, and wrap the whole thing up in a single
moduleForProgramKind() helper.
Change-Id: I0c9b6702f8786792963e3d9408d6619e5ab393e2
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/324696
Reviewed-by: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
2020-10-09 16:05:16 +00:00
|
|
|
const ParsedModule& Compiler::loadFPModule() {
|
2020-10-08 15:50:22 +00:00
|
|
|
if (!fFPModule.fSymbols) {
|
2020-11-20 17:38:07 +00:00
|
|
|
fFPModule = this->parseModule(Program::kFragmentProcessor_Kind, MODULE_DATA(fp),
|
|
|
|
this->loadGPUModule());
|
2020-09-30 17:26:43 +00:00
|
|
|
}
|
Reorganization of IR generator's API and interaction with compiler
- Move all of IR generator's fields private (except for fContext, which
is used ~everywhere).
- Eliminate start() and finish(), fold this logic into convertProgram.
The division of what was set/reset in different places was pretty
arbitrary. Now, convertProgram does everything. Along that line, have
it actually return the "outputs" as an IRBundle (a small collection of
the things that the compiler needs). This seems better than the
compiler ripping out IR generator's internals.
- IR generator's POD field initialization was a mix of in-class and
constructor. Move all the constant initialization to declarations.
- No need to look up sk_PerVertex at start (or convertProgram) time, so
remove fSkPerVertex, and just do the lookup when we're about to use
it.
- IRGenerator::convertProgram is fairly long now, but all the code is in
one place. You don't have to think about the order that three
different member functions are called (along with the caller mutating
the internal state between those three calls).
- In the compiler, add an AutoSource helper to manage changing and
restoring the fSource pointer everywhere.
- Rename the loadXXXIntrinsics functions to loadXXXModule, have them
return the module, and wrap the whole thing up in a single
moduleForProgramKind() helper.
Change-Id: I0c9b6702f8786792963e3d9408d6619e5ab393e2
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/324696
Reviewed-by: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
2020-10-09 16:05:16 +00:00
|
|
|
return fFPModule;
|
2020-09-30 17:26:43 +00:00
|
|
|
}
|
|
|
|
|
2020-11-06 16:45:36 +00:00
|
|
|
const ParsedModule& Compiler::loadPublicModule() {
|
|
|
|
if (!fPublicModule.fSymbols) {
|
|
|
|
fPublicModule = this->parseModule(Program::kGeneric_Kind, MODULE_DATA(public), fRootModule);
|
|
|
|
}
|
|
|
|
return fPublicModule;
|
|
|
|
}
|
|
|
|
|
Reorganization of IR generator's API and interaction with compiler
- Move all of IR generator's fields private (except for fContext, which
is used ~everywhere).
- Eliminate start() and finish(), fold this logic into convertProgram.
The division of what was set/reset in different places was pretty
arbitrary. Now, convertProgram does everything. Along that line, have
it actually return the "outputs" as an IRBundle (a small collection of
the things that the compiler needs). This seems better than the
compiler ripping out IR generator's internals.
- IR generator's POD field initialization was a mix of in-class and
constructor. Move all the constant initialization to declarations.
- No need to look up sk_PerVertex at start (or convertProgram) time, so
remove fSkPerVertex, and just do the lookup when we're about to use
it.
- IRGenerator::convertProgram is fairly long now, but all the code is in
one place. You don't have to think about the order that three
different member functions are called (along with the caller mutating
the internal state between those three calls).
- In the compiler, add an AutoSource helper to manage changing and
restoring the fSource pointer everywhere.
- Rename the loadXXXIntrinsics functions to loadXXXModule, have them
return the module, and wrap the whole thing up in a single
moduleForProgramKind() helper.
Change-Id: I0c9b6702f8786792963e3d9408d6619e5ab393e2
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/324696
Reviewed-by: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
2020-10-09 16:05:16 +00:00
|
|
|
const ParsedModule& Compiler::loadPipelineModule() {
|
2020-10-08 15:50:22 +00:00
|
|
|
if (!fPipelineModule.fSymbols) {
|
2020-11-06 16:45:36 +00:00
|
|
|
fPipelineModule = this->parseModule(Program::kPipelineStage_Kind, MODULE_DATA(pipeline),
|
|
|
|
this->loadPublicModule());
|
2020-10-13 13:34:23 +00:00
|
|
|
|
|
|
|
// Add some aliases to the pipeline module so that it's friendlier, and more like GLSL
|
|
|
|
fPipelineModule.fSymbols->addAlias("shader", fContext->fFragmentProcessor_Type.get());
|
|
|
|
|
|
|
|
fPipelineModule.fSymbols->addAlias("vec2", fContext->fFloat2_Type.get());
|
|
|
|
fPipelineModule.fSymbols->addAlias("vec3", fContext->fFloat3_Type.get());
|
|
|
|
fPipelineModule.fSymbols->addAlias("vec4", fContext->fFloat4_Type.get());
|
|
|
|
|
|
|
|
fPipelineModule.fSymbols->addAlias("bvec2", fContext->fBool2_Type.get());
|
|
|
|
fPipelineModule.fSymbols->addAlias("bvec3", fContext->fBool3_Type.get());
|
|
|
|
fPipelineModule.fSymbols->addAlias("bvec4", fContext->fBool4_Type.get());
|
|
|
|
|
|
|
|
fPipelineModule.fSymbols->addAlias("mat2", fContext->fFloat2x2_Type.get());
|
|
|
|
fPipelineModule.fSymbols->addAlias("mat3", fContext->fFloat3x3_Type.get());
|
|
|
|
fPipelineModule.fSymbols->addAlias("mat4", fContext->fFloat4x4_Type.get());
|
|
|
|
|
|
|
|
fPipelineModule.fSymbols->addAlias("mat2x2", fContext->fFloat2x2_Type.get());
|
|
|
|
fPipelineModule.fSymbols->addAlias("mat2x3", fContext->fFloat2x3_Type.get());
|
|
|
|
fPipelineModule.fSymbols->addAlias("mat2x4", fContext->fFloat2x4_Type.get());
|
|
|
|
|
|
|
|
fPipelineModule.fSymbols->addAlias("mat3x2", fContext->fFloat3x2_Type.get());
|
|
|
|
fPipelineModule.fSymbols->addAlias("mat3x3", fContext->fFloat3x3_Type.get());
|
|
|
|
fPipelineModule.fSymbols->addAlias("mat3x4", fContext->fFloat3x4_Type.get());
|
|
|
|
|
|
|
|
fPipelineModule.fSymbols->addAlias("mat4x2", fContext->fFloat4x2_Type.get());
|
|
|
|
fPipelineModule.fSymbols->addAlias("mat4x3", fContext->fFloat4x3_Type.get());
|
|
|
|
fPipelineModule.fSymbols->addAlias("mat4x4", fContext->fFloat4x4_Type.get());
|
2020-07-28 18:46:53 +00:00
|
|
|
}
|
Reorganization of IR generator's API and interaction with compiler
- Move all of IR generator's fields private (except for fContext, which
is used ~everywhere).
- Eliminate start() and finish(), fold this logic into convertProgram.
The division of what was set/reset in different places was pretty
arbitrary. Now, convertProgram does everything. Along that line, have
it actually return the "outputs" as an IRBundle (a small collection of
the things that the compiler needs). This seems better than the
compiler ripping out IR generator's internals.
- IR generator's POD field initialization was a mix of in-class and
constructor. Move all the constant initialization to declarations.
- No need to look up sk_PerVertex at start (or convertProgram) time, so
remove fSkPerVertex, and just do the lookup when we're about to use
it.
- IRGenerator::convertProgram is fairly long now, but all the code is in
one place. You don't have to think about the order that three
different member functions are called (along with the caller mutating
the internal state between those three calls).
- In the compiler, add an AutoSource helper to manage changing and
restoring the fSource pointer everywhere.
- Rename the loadXXXIntrinsics functions to loadXXXModule, have them
return the module, and wrap the whole thing up in a single
moduleForProgramKind() helper.
Change-Id: I0c9b6702f8786792963e3d9408d6619e5ab393e2
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/324696
Reviewed-by: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
2020-10-09 16:05:16 +00:00
|
|
|
return fPipelineModule;
|
2020-07-28 18:46:53 +00:00
|
|
|
}
|
|
|
|
|
Reorganization of IR generator's API and interaction with compiler
- Move all of IR generator's fields private (except for fContext, which
is used ~everywhere).
- Eliminate start() and finish(), fold this logic into convertProgram.
The division of what was set/reset in different places was pretty
arbitrary. Now, convertProgram does everything. Along that line, have
it actually return the "outputs" as an IRBundle (a small collection of
the things that the compiler needs). This seems better than the
compiler ripping out IR generator's internals.
- IR generator's POD field initialization was a mix of in-class and
constructor. Move all the constant initialization to declarations.
- No need to look up sk_PerVertex at start (or convertProgram) time, so
remove fSkPerVertex, and just do the lookup when we're about to use
it.
- IRGenerator::convertProgram is fairly long now, but all the code is in
one place. You don't have to think about the order that three
different member functions are called (along with the caller mutating
the internal state between those three calls).
- In the compiler, add an AutoSource helper to manage changing and
restoring the fSource pointer everywhere.
- Rename the loadXXXIntrinsics functions to loadXXXModule, have them
return the module, and wrap the whole thing up in a single
moduleForProgramKind() helper.
Change-Id: I0c9b6702f8786792963e3d9408d6619e5ab393e2
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/324696
Reviewed-by: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
2020-10-09 16:05:16 +00:00
|
|
|
const ParsedModule& Compiler::loadInterpreterModule() {
|
2020-10-08 15:50:22 +00:00
|
|
|
if (!fInterpreterModule.fSymbols) {
|
2020-11-06 16:45:36 +00:00
|
|
|
fInterpreterModule = this->parseModule(Program::kGeneric_Kind, MODULE_DATA(interp),
|
|
|
|
this->loadPublicModule());
|
2020-07-28 18:46:53 +00:00
|
|
|
}
|
Reorganization of IR generator's API and interaction with compiler
- Move all of IR generator's fields private (except for fContext, which
is used ~everywhere).
- Eliminate start() and finish(), fold this logic into convertProgram.
The division of what was set/reset in different places was pretty
arbitrary. Now, convertProgram does everything. Along that line, have
it actually return the "outputs" as an IRBundle (a small collection of
the things that the compiler needs). This seems better than the
compiler ripping out IR generator's internals.
- IR generator's POD field initialization was a mix of in-class and
constructor. Move all the constant initialization to declarations.
- No need to look up sk_PerVertex at start (or convertProgram) time, so
remove fSkPerVertex, and just do the lookup when we're about to use
it.
- IRGenerator::convertProgram is fairly long now, but all the code is in
one place. You don't have to think about the order that three
different member functions are called (along with the caller mutating
the internal state between those three calls).
- In the compiler, add an AutoSource helper to manage changing and
restoring the fSource pointer everywhere.
- Rename the loadXXXIntrinsics functions to loadXXXModule, have them
return the module, and wrap the whole thing up in a single
moduleForProgramKind() helper.
Change-Id: I0c9b6702f8786792963e3d9408d6619e5ab393e2
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/324696
Reviewed-by: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
2020-10-09 16:05:16 +00:00
|
|
|
return fInterpreterModule;
|
|
|
|
}
|
|
|
|
|
|
|
|
const ParsedModule& Compiler::moduleForProgramKind(Program::Kind kind) {
|
|
|
|
switch (kind) {
|
2020-11-20 17:38:07 +00:00
|
|
|
case Program::kVertex_Kind: return this->loadVertexModule(); break;
|
|
|
|
case Program::kFragment_Kind: return this->loadFragmentModule(); break;
|
Reorganization of IR generator's API and interaction with compiler
- Move all of IR generator's fields private (except for fContext, which
is used ~everywhere).
- Eliminate start() and finish(), fold this logic into convertProgram.
The division of what was set/reset in different places was pretty
arbitrary. Now, convertProgram does everything. Along that line, have
it actually return the "outputs" as an IRBundle (a small collection of
the things that the compiler needs). This seems better than the
compiler ripping out IR generator's internals.
- IR generator's POD field initialization was a mix of in-class and
constructor. Move all the constant initialization to declarations.
- No need to look up sk_PerVertex at start (or convertProgram) time, so
remove fSkPerVertex, and just do the lookup when we're about to use
it.
- IRGenerator::convertProgram is fairly long now, but all the code is in
one place. You don't have to think about the order that three
different member functions are called (along with the caller mutating
the internal state between those three calls).
- In the compiler, add an AutoSource helper to manage changing and
restoring the fSource pointer everywhere.
- Rename the loadXXXIntrinsics functions to loadXXXModule, have them
return the module, and wrap the whole thing up in a single
moduleForProgramKind() helper.
Change-Id: I0c9b6702f8786792963e3d9408d6619e5ab393e2
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/324696
Reviewed-by: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
2020-10-09 16:05:16 +00:00
|
|
|
case Program::kGeometry_Kind: return this->loadGeometryModule(); break;
|
|
|
|
case Program::kFragmentProcessor_Kind: return this->loadFPModule(); break;
|
|
|
|
case Program::kPipelineStage_Kind: return this->loadPipelineModule(); break;
|
|
|
|
case Program::kGeneric_Kind: return this->loadInterpreterModule(); break;
|
|
|
|
}
|
|
|
|
SkUNREACHABLE;
|
2020-07-28 18:46:53 +00:00
|
|
|
}
|
|
|
|
|
2020-10-08 15:50:22 +00:00
|
|
|
LoadedModule Compiler::loadModule(Program::Kind kind,
|
|
|
|
ModuleData data,
|
|
|
|
std::shared_ptr<SymbolTable> base) {
|
|
|
|
if (!base) {
|
2020-11-06 16:45:36 +00:00
|
|
|
// NOTE: This is a workaround. The only time 'base' is null is when dehydrating includes.
|
|
|
|
// In that case, skslc doesn't know which module it's preparing, nor what the correct base
|
|
|
|
// module is. We can't use 'Root', because many GPU intrinsics reference private types,
|
|
|
|
// like samplers or textures. Today, 'Private' does contain the union of all known types,
|
|
|
|
// so this is safe. If we ever have types that only exist in 'Public' (for example), this
|
|
|
|
// logic needs to be smarter (by choosing the correct base for the module we're compiling).
|
|
|
|
base = fPrivateSymbolTable;
|
2020-10-08 15:50:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(SKSL_STANDALONE)
|
|
|
|
SkASSERT(data.fPath);
|
|
|
|
std::ifstream in(data.fPath);
|
Various cleanup related to symbol tables
- Remove a spurious symbol table inserted by convertProgram. start()
already pushes a symbol table, and this was pushing a second one,
which didn't seem necessary. (The Parser can inject symbols for types
it discovers, but I can't justify those needing to be in a different
table than the rest of the program elements?)
- The convertProgram one had a comment indicating that it was popped by
the Compiler. That wasn't true, so this gets us one step closer to
balance.
- The one in start() is meant to be balanced by a pop in finish(), but
no one ever called finish(). Add that call in, and also rearrange
things so that the base symbol table is a parameter to start(), rather
than just setting it on the IR generator. (There's more of this
pattern around, but I wanted to limit the scope of this CL).
- When dehydrating the include files, we had logic to work around the
extra symbol table (absorbing the symbols) - that's not needed now.
- Simplify some other logic in processIncludeFile (no need to make so
many string copies). Always just put the incoming include file strings
into the root table, also. It's largely irrelevant where they go.
Change-Id: I18d897af3d5fa6506e11024beb9bb70e6cc5b538
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/319038
Reviewed-by: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
2020-09-23 18:42:11 +00:00
|
|
|
std::unique_ptr<String> text = std::make_unique<String>(std::istreambuf_iterator<char>(in),
|
|
|
|
std::istreambuf_iterator<char>());
|
2020-08-06 17:00:19 +00:00
|
|
|
if (in.rdstate()) {
|
2020-10-08 15:50:22 +00:00
|
|
|
printf("error reading %s\n", data.fPath);
|
2020-08-06 17:00:19 +00:00
|
|
|
abort();
|
|
|
|
}
|
Various cleanup related to symbol tables
- Remove a spurious symbol table inserted by convertProgram. start()
already pushes a symbol table, and this was pushing a second one,
which didn't seem necessary. (The Parser can inject symbols for types
it discovers, but I can't justify those needing to be in a different
table than the rest of the program elements?)
- The convertProgram one had a comment indicating that it was popped by
the Compiler. That wasn't true, so this gets us one step closer to
balance.
- The one in start() is meant to be balanced by a pop in finish(), but
no one ever called finish(). Add that call in, and also rearrange
things so that the base symbol table is a parameter to start(), rather
than just setting it on the IR generator. (There's more of this
pattern around, but I wanted to limit the scope of this CL).
- When dehydrating the include files, we had logic to work around the
extra symbol table (absorbing the symbols) - that's not needed now.
- Simplify some other logic in processIncludeFile (no need to make so
many string copies). Always just put the incoming include file strings
into the root table, also. It's largely irrelevant where they go.
Change-Id: I18d897af3d5fa6506e11024beb9bb70e6cc5b538
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/319038
Reviewed-by: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
2020-09-23 18:42:11 +00:00
|
|
|
const String* source = fRootSymbolTable->takeOwnershipOfString(std::move(text));
|
Reorganization of IR generator's API and interaction with compiler
- Move all of IR generator's fields private (except for fContext, which
is used ~everywhere).
- Eliminate start() and finish(), fold this logic into convertProgram.
The division of what was set/reset in different places was pretty
arbitrary. Now, convertProgram does everything. Along that line, have
it actually return the "outputs" as an IRBundle (a small collection of
the things that the compiler needs). This seems better than the
compiler ripping out IR generator's internals.
- IR generator's POD field initialization was a mix of in-class and
constructor. Move all the constant initialization to declarations.
- No need to look up sk_PerVertex at start (or convertProgram) time, so
remove fSkPerVertex, and just do the lookup when we're about to use
it.
- IRGenerator::convertProgram is fairly long now, but all the code is in
one place. You don't have to think about the order that three
different member functions are called (along with the caller mutating
the internal state between those three calls).
- In the compiler, add an AutoSource helper to manage changing and
restoring the fSource pointer everywhere.
- Rename the loadXXXIntrinsics functions to loadXXXModule, have them
return the module, and wrap the whole thing up in a single
moduleForProgramKind() helper.
Change-Id: I0c9b6702f8786792963e3d9408d6619e5ab393e2
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/324696
Reviewed-by: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
2020-10-09 16:05:16 +00:00
|
|
|
AutoSource as(this, source);
|
2019-05-24 15:01:59 +00:00
|
|
|
Program::Settings settings;
|
2020-09-19 14:13:24 +00:00
|
|
|
SkASSERT(fIRGenerator->fCanInline);
|
|
|
|
fIRGenerator->fCanInline = false;
|
Reorganization of IR generator's API and interaction with compiler
- Move all of IR generator's fields private (except for fContext, which
is used ~everywhere).
- Eliminate start() and finish(), fold this logic into convertProgram.
The division of what was set/reset in different places was pretty
arbitrary. Now, convertProgram does everything. Along that line, have
it actually return the "outputs" as an IRBundle (a small collection of
the things that the compiler needs). This seems better than the
compiler ripping out IR generator's internals.
- IR generator's POD field initialization was a mix of in-class and
constructor. Move all the constant initialization to declarations.
- No need to look up sk_PerVertex at start (or convertProgram) time, so
remove fSkPerVertex, and just do the lookup when we're about to use
it.
- IRGenerator::convertProgram is fairly long now, but all the code is in
one place. You don't have to think about the order that three
different member functions are called (along with the caller mutating
the internal state between those three calls).
- In the compiler, add an AutoSource helper to manage changing and
restoring the fSource pointer everywhere.
- Rename the loadXXXIntrinsics functions to loadXXXModule, have them
return the module, and wrap the whole thing up in a single
moduleForProgramKind() helper.
Change-Id: I0c9b6702f8786792963e3d9408d6619e5ab393e2
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/324696
Reviewed-by: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
2020-10-09 16:05:16 +00:00
|
|
|
ParsedModule baseModule = {base, /*fIntrinsics=*/nullptr};
|
2020-11-02 17:26:22 +00:00
|
|
|
IRGenerator::IRBundle ir =
|
2020-11-18 20:38:39 +00:00
|
|
|
fIRGenerator->convertProgram(kind, &settings, baseModule,
|
2020-11-02 17:26:22 +00:00
|
|
|
/*isBuiltinCode=*/true, source->c_str(), source->length(),
|
|
|
|
/*externalValues=*/nullptr);
|
2020-10-28 18:14:39 +00:00
|
|
|
SkASSERT(ir.fSharedElements.empty());
|
2020-11-18 20:38:39 +00:00
|
|
|
LoadedModule module = { kind, std::move(ir.fSymbolTable), std::move(ir.fElements) };
|
2020-09-19 14:13:24 +00:00
|
|
|
fIRGenerator->fCanInline = true;
|
2019-05-24 15:01:59 +00:00
|
|
|
if (this->fErrorCount) {
|
|
|
|
printf("Unexpected errors: %s\n", this->fErrorText.c_str());
|
2020-10-08 15:50:22 +00:00
|
|
|
SkDEBUGFAILF("%s %s\n", data.fPath, this->fErrorText.c_str());
|
2019-05-24 15:01:59 +00:00
|
|
|
}
|
Reorganization of IR generator's API and interaction with compiler
- Move all of IR generator's fields private (except for fContext, which
is used ~everywhere).
- Eliminate start() and finish(), fold this logic into convertProgram.
The division of what was set/reset in different places was pretty
arbitrary. Now, convertProgram does everything. Along that line, have
it actually return the "outputs" as an IRBundle (a small collection of
the things that the compiler needs). This seems better than the
compiler ripping out IR generator's internals.
- IR generator's POD field initialization was a mix of in-class and
constructor. Move all the constant initialization to declarations.
- No need to look up sk_PerVertex at start (or convertProgram) time, so
remove fSkPerVertex, and just do the lookup when we're about to use
it.
- IRGenerator::convertProgram is fairly long now, but all the code is in
one place. You don't have to think about the order that three
different member functions are called (along with the caller mutating
the internal state between those three calls).
- In the compiler, add an AutoSource helper to manage changing and
restoring the fSource pointer everywhere.
- Rename the loadXXXIntrinsics functions to loadXXXModule, have them
return the module, and wrap the whole thing up in a single
moduleForProgramKind() helper.
Change-Id: I0c9b6702f8786792963e3d9408d6619e5ab393e2
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/324696
Reviewed-by: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
2020-10-09 16:05:16 +00:00
|
|
|
fModifiers.push_back(std::move(ir.fModifiers));
|
2020-10-08 15:50:22 +00:00
|
|
|
#else
|
|
|
|
SkASSERT(data.fData && (data.fSize != 0));
|
|
|
|
Rehydrator rehydrator(fContext.get(), fIRGenerator->fModifiers.get(), base, this,
|
|
|
|
data.fData, data.fSize);
|
2020-11-18 20:38:39 +00:00
|
|
|
LoadedModule module = { kind, rehydrator.symbolTable(), rehydrator.elements() };
|
2020-10-08 15:50:22 +00:00
|
|
|
fModifiers.push_back(fIRGenerator->releaseModifiers());
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return module;
|
|
|
|
}
|
|
|
|
|
|
|
|
ParsedModule Compiler::parseModule(Program::Kind kind, ModuleData data, const ParsedModule& base) {
|
2020-11-18 20:38:39 +00:00
|
|
|
LoadedModule module = this->loadModule(kind, data, base.fSymbols);
|
|
|
|
this->optimize(module);
|
2020-10-08 15:50:22 +00:00
|
|
|
|
|
|
|
// For modules that just declare (but don't define) intrinsic functions, there will be no new
|
|
|
|
// program elements. In that case, we can share our parent's intrinsic map:
|
2020-11-18 20:38:39 +00:00
|
|
|
if (module.fElements.empty()) {
|
|
|
|
return {module.fSymbols, base.fIntrinsics};
|
2020-10-08 15:50:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
auto intrinsics = std::make_shared<IRIntrinsicMap>(base.fIntrinsics.get());
|
|
|
|
|
|
|
|
// Now, transfer all of the program elements to an intrinsic map. This maps certain types of
|
|
|
|
// global objects to the declaring ProgramElement.
|
2020-11-18 20:38:39 +00:00
|
|
|
for (std::unique_ptr<ProgramElement>& element : module.fElements) {
|
2020-10-08 15:50:22 +00:00
|
|
|
switch (element->kind()) {
|
|
|
|
case ProgramElement::Kind::kFunction: {
|
|
|
|
const FunctionDefinition& f = element->as<FunctionDefinition>();
|
2020-10-14 17:33:18 +00:00
|
|
|
SkASSERT(f.declaration().isBuiltin());
|
|
|
|
intrinsics->insertOrDie(f.declaration().description(), std::move(element));
|
2020-10-08 15:50:22 +00:00
|
|
|
break;
|
|
|
|
}
|
2020-11-03 17:18:22 +00:00
|
|
|
case ProgramElement::Kind::kFunctionPrototype: {
|
|
|
|
// These are already in the symbol table.
|
|
|
|
break;
|
|
|
|
}
|
2020-10-08 15:50:22 +00:00
|
|
|
case ProgramElement::Kind::kEnum: {
|
|
|
|
const Enum& e = element->as<Enum>();
|
|
|
|
SkASSERT(e.isBuiltin());
|
|
|
|
intrinsics->insertOrDie(e.typeName(), std::move(element));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case ProgramElement::Kind::kGlobalVar: {
|
2020-10-13 17:49:44 +00:00
|
|
|
const GlobalVarDeclaration& global = element->as<GlobalVarDeclaration>();
|
|
|
|
const Variable& var = global.declaration()->as<VarDeclaration>().var();
|
|
|
|
SkASSERT(var.isBuiltin());
|
|
|
|
intrinsics->insertOrDie(var.name(), std::move(element));
|
2020-10-08 15:50:22 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case ProgramElement::Kind::kInterfaceBlock: {
|
2020-10-15 14:10:08 +00:00
|
|
|
const Variable& var = element->as<InterfaceBlock>().variable();
|
|
|
|
SkASSERT(var.isBuiltin());
|
|
|
|
intrinsics->insertOrDie(var.name(), std::move(element));
|
2020-10-08 15:50:22 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
printf("Unsupported element: %s\n", element->description().c_str());
|
|
|
|
SkASSERT(false);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-18 20:38:39 +00:00
|
|
|
return {module.fSymbols, std::move(intrinsics)};
|
2019-05-24 15:01:59 +00:00
|
|
|
}
|
|
|
|
|
2016-10-13 20:25:34 +00:00
|
|
|
// add the definition created by assigning to the lvalue to the definition set
|
2017-01-19 18:32:00 +00:00
|
|
|
void Compiler::addDefinition(const Expression* lvalue, std::unique_ptr<Expression>* expr,
|
|
|
|
DefinitionMap* definitions) {
|
2020-09-08 14:22:09 +00:00
|
|
|
switch (lvalue->kind()) {
|
|
|
|
case Expression::Kind::kVariableReference: {
|
2020-10-08 10:46:27 +00:00
|
|
|
const Variable& var = *lvalue->as<VariableReference>().variable();
|
2020-10-09 14:43:45 +00:00
|
|
|
if (var.storage() == Variable::Storage::kLocal) {
|
2020-10-08 16:06:53 +00:00
|
|
|
definitions->set(&var, expr);
|
2016-10-13 20:25:34 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2020-09-08 14:22:09 +00:00
|
|
|
case Expression::Kind::kSwizzle:
|
2016-10-13 20:25:34 +00:00
|
|
|
// We consider the variable written to as long as at least some of its components have
|
|
|
|
// been written to. This will lead to some false negatives (we won't catch it if you
|
|
|
|
// write to foo.x and then read foo.y), but being stricter could lead to false positives
|
2016-10-26 14:35:22 +00:00
|
|
|
// (we write to foo.x, and then pass foo to a function which happens to only read foo.x,
|
|
|
|
// but since we pass foo as a whole it is flagged as an error) unless we perform a much
|
2016-10-13 20:25:34 +00:00
|
|
|
// more complicated whole-program analysis. This is probably good enough.
|
2020-10-12 20:11:51 +00:00
|
|
|
this->addDefinition(lvalue->as<Swizzle>().base().get(),
|
2018-03-27 18:10:52 +00:00
|
|
|
(std::unique_ptr<Expression>*) &fContext->fDefined_Expression,
|
2016-10-13 20:25:34 +00:00
|
|
|
definitions);
|
|
|
|
break;
|
2020-09-08 14:22:09 +00:00
|
|
|
case Expression::Kind::kIndex:
|
2016-10-13 20:25:34 +00:00
|
|
|
// see comments in Swizzle
|
2020-10-08 19:35:56 +00:00
|
|
|
this->addDefinition(lvalue->as<IndexExpression>().base().get(),
|
2018-03-27 18:10:52 +00:00
|
|
|
(std::unique_ptr<Expression>*) &fContext->fDefined_Expression,
|
2016-10-13 20:25:34 +00:00
|
|
|
definitions);
|
|
|
|
break;
|
2020-09-08 14:22:09 +00:00
|
|
|
case Expression::Kind::kFieldAccess:
|
2016-10-13 20:25:34 +00:00
|
|
|
// see comments in Swizzle
|
2020-10-09 15:55:40 +00:00
|
|
|
this->addDefinition(lvalue->as<FieldAccess>().base().get(),
|
2018-03-27 18:10:52 +00:00
|
|
|
(std::unique_ptr<Expression>*) &fContext->fDefined_Expression,
|
2016-10-13 20:25:34 +00:00
|
|
|
definitions);
|
|
|
|
break;
|
2020-09-08 14:22:09 +00:00
|
|
|
case Expression::Kind::kTernary:
|
2018-01-18 18:32:11 +00:00
|
|
|
// To simplify analysis, we just pretend that we write to both sides of the ternary.
|
|
|
|
// This allows for false positives (meaning we fail to detect that a variable might not
|
|
|
|
// have been assigned), but is preferable to false negatives.
|
2020-10-08 09:48:01 +00:00
|
|
|
this->addDefinition(lvalue->as<TernaryExpression>().ifTrue().get(),
|
2018-03-27 18:10:52 +00:00
|
|
|
(std::unique_ptr<Expression>*) &fContext->fDefined_Expression,
|
2018-01-18 18:32:11 +00:00
|
|
|
definitions);
|
2020-10-08 09:48:01 +00:00
|
|
|
this->addDefinition(lvalue->as<TernaryExpression>().ifFalse().get(),
|
2018-03-27 18:10:52 +00:00
|
|
|
(std::unique_ptr<Expression>*) &fContext->fDefined_Expression,
|
2018-01-18 18:32:11 +00:00
|
|
|
definitions);
|
|
|
|
break;
|
2020-09-08 14:22:09 +00:00
|
|
|
case Expression::Kind::kExternalValue:
|
2019-05-15 19:29:54 +00:00
|
|
|
break;
|
2016-10-13 20:25:34 +00:00
|
|
|
default:
|
|
|
|
// not an lvalue, can't happen
|
2018-06-12 15:05:59 +00:00
|
|
|
SkASSERT(false);
|
2016-10-13 20:25:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// add local variables defined by this node to the set
|
2020-10-08 16:06:53 +00:00
|
|
|
void Compiler::addDefinitions(const BasicBlock::Node& node, DefinitionMap* definitions) {
|
2020-09-28 20:08:58 +00:00
|
|
|
if (node.isExpression()) {
|
|
|
|
Expression* expr = node.expression()->get();
|
|
|
|
switch (expr->kind()) {
|
|
|
|
case Expression::Kind::kBinary: {
|
|
|
|
BinaryExpression* b = &expr->as<BinaryExpression>();
|
|
|
|
if (b->getOperator() == Token::Kind::TK_EQ) {
|
2020-10-30 14:29:12 +00:00
|
|
|
this->addDefinition(b->left().get(), &b->right(), definitions);
|
2020-09-28 20:08:58 +00:00
|
|
|
} else if (Compiler::IsAssignment(b->getOperator())) {
|
|
|
|
this->addDefinition(
|
2020-10-30 14:29:12 +00:00
|
|
|
b->left().get(),
|
2020-09-28 20:08:58 +00:00
|
|
|
(std::unique_ptr<Expression>*) &fContext->fDefined_Expression,
|
|
|
|
definitions);
|
2017-01-19 18:32:00 +00:00
|
|
|
|
2018-03-29 20:46:56 +00:00
|
|
|
}
|
2020-09-28 20:08:58 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Expression::Kind::kFunctionCall: {
|
|
|
|
const FunctionCall& c = expr->as<FunctionCall>();
|
2020-10-13 14:34:18 +00:00
|
|
|
const std::vector<const Variable*>& parameters = c.function().parameters();
|
2020-10-08 15:45:44 +00:00
|
|
|
for (size_t i = 0; i < parameters.size(); ++i) {
|
|
|
|
if (parameters[i]->modifiers().fFlags & Modifiers::kOut_Flag) {
|
2017-01-19 18:32:00 +00:00
|
|
|
this->addDefinition(
|
2020-10-05 19:51:52 +00:00
|
|
|
c.arguments()[i].get(),
|
2020-09-28 20:08:58 +00:00
|
|
|
(std::unique_ptr<Expression>*) &fContext->fDefined_Expression,
|
|
|
|
definitions);
|
2017-01-19 18:32:00 +00:00
|
|
|
}
|
2016-10-13 20:25:34 +00:00
|
|
|
}
|
2020-09-28 20:08:58 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Expression::Kind::kPrefix: {
|
|
|
|
const PrefixExpression* p = &expr->as<PrefixExpression>();
|
2020-10-09 14:16:22 +00:00
|
|
|
if (p->getOperator() == Token::Kind::TK_MINUSMINUS ||
|
|
|
|
p->getOperator() == Token::Kind::TK_PLUSPLUS) {
|
2020-09-28 20:08:58 +00:00
|
|
|
this->addDefinition(
|
2020-10-09 14:16:22 +00:00
|
|
|
p->operand().get(),
|
2020-09-28 20:08:58 +00:00
|
|
|
(std::unique_ptr<Expression>*) &fContext->fDefined_Expression,
|
|
|
|
definitions);
|
2017-01-19 18:32:00 +00:00
|
|
|
}
|
2020-09-28 20:08:58 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Expression::Kind::kPostfix: {
|
|
|
|
const PostfixExpression* p = &expr->as<PostfixExpression>();
|
2020-10-09 14:16:22 +00:00
|
|
|
if (p->getOperator() == Token::Kind::TK_MINUSMINUS ||
|
|
|
|
p->getOperator() == Token::Kind::TK_PLUSPLUS) {
|
2020-09-28 20:08:58 +00:00
|
|
|
this->addDefinition(
|
2020-10-09 14:16:22 +00:00
|
|
|
p->operand().get(),
|
2020-09-28 20:08:58 +00:00
|
|
|
(std::unique_ptr<Expression>*) &fContext->fDefined_Expression,
|
|
|
|
definitions);
|
2017-04-20 23:31:52 +00:00
|
|
|
}
|
2020-09-28 20:08:58 +00:00
|
|
|
break;
|
2016-10-13 20:25:34 +00:00
|
|
|
}
|
2020-09-28 20:08:58 +00:00
|
|
|
case Expression::Kind::kVariableReference: {
|
|
|
|
const VariableReference* v = &expr->as<VariableReference>();
|
2020-10-09 14:43:45 +00:00
|
|
|
if (v->refKind() != VariableReference::RefKind::kRead) {
|
2020-09-28 20:08:58 +00:00
|
|
|
this->addDefinition(
|
|
|
|
v,
|
|
|
|
(std::unique_ptr<Expression>*) &fContext->fDefined_Expression,
|
|
|
|
definitions);
|
2016-10-13 20:25:34 +00:00
|
|
|
}
|
2020-09-28 20:08:58 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else if (node.isStatement()) {
|
|
|
|
Statement* stmt = node.statement()->get();
|
|
|
|
if (stmt->is<VarDeclaration>()) {
|
|
|
|
VarDeclaration& vd = stmt->as<VarDeclaration>();
|
2020-10-13 17:49:44 +00:00
|
|
|
if (vd.value()) {
|
|
|
|
definitions->set(&vd.var(), &vd.value());
|
2016-10-13 20:25:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-05 16:03:53 +00:00
|
|
|
void Compiler::scanCFG(CFG* cfg, BlockId blockId, SkBitSet* processedSet) {
|
2016-10-13 20:25:34 +00:00
|
|
|
BasicBlock& block = cfg->fBlocks[blockId];
|
|
|
|
|
|
|
|
// compute definitions after this block
|
2017-01-19 18:32:00 +00:00
|
|
|
DefinitionMap after = block.fBefore;
|
2016-10-13 20:25:34 +00:00
|
|
|
for (const BasicBlock::Node& n : block.fNodes) {
|
|
|
|
this->addDefinitions(n, &after);
|
|
|
|
}
|
|
|
|
|
|
|
|
// propagate definitions to exits
|
|
|
|
for (BlockId exitId : block.fExits) {
|
2018-03-27 18:10:52 +00:00
|
|
|
if (exitId == blockId) {
|
|
|
|
continue;
|
|
|
|
}
|
2016-10-13 20:25:34 +00:00
|
|
|
BasicBlock& exit = cfg->fBlocks[exitId];
|
2020-10-08 16:06:53 +00:00
|
|
|
after.foreach([&](const Variable* var, std::unique_ptr<Expression>** e1Ptr) {
|
|
|
|
std::unique_ptr<Expression>* e1 = *e1Ptr;
|
|
|
|
std::unique_ptr<Expression>** exitDef = exit.fBefore.find(var);
|
|
|
|
if (!exitDef) {
|
2020-10-05 16:03:53 +00:00
|
|
|
// exit has no definition for it, just copy it and reprocess exit block
|
|
|
|
processedSet->reset(exitId);
|
2020-10-08 16:06:53 +00:00
|
|
|
exit.fBefore[var] = e1;
|
2016-10-13 20:25:34 +00:00
|
|
|
} else {
|
2017-01-19 18:32:00 +00:00
|
|
|
// exit has a (possibly different) value already defined
|
2020-10-08 16:06:53 +00:00
|
|
|
std::unique_ptr<Expression>* e2 = *exitDef;
|
2016-10-13 20:25:34 +00:00
|
|
|
if (e1 != e2) {
|
2020-10-05 16:03:53 +00:00
|
|
|
// definition has changed, merge and reprocess the exit block
|
|
|
|
processedSet->reset(exitId);
|
2017-02-27 18:26:45 +00:00
|
|
|
if (e1 && e2) {
|
2020-10-08 16:06:53 +00:00
|
|
|
*exitDef = (std::unique_ptr<Expression>*)&fContext->fDefined_Expression;
|
2017-02-27 18:26:45 +00:00
|
|
|
} else {
|
2020-10-08 16:06:53 +00:00
|
|
|
*exitDef = nullptr;
|
2017-02-27 18:26:45 +00:00
|
|
|
}
|
2016-10-13 20:25:34 +00:00
|
|
|
}
|
|
|
|
}
|
2020-10-08 16:06:53 +00:00
|
|
|
});
|
2016-10-13 20:25:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// returns a map which maps all local variables in the function to null, indicating that their value
|
|
|
|
// is initially unknown
|
2017-01-19 18:32:00 +00:00
|
|
|
static DefinitionMap compute_start_state(const CFG& cfg) {
|
|
|
|
DefinitionMap result;
|
2016-10-26 14:35:22 +00:00
|
|
|
for (const auto& block : cfg.fBlocks) {
|
|
|
|
for (const auto& node : block.fNodes) {
|
2020-09-28 20:08:58 +00:00
|
|
|
if (node.isStatement()) {
|
2017-04-20 23:31:52 +00:00
|
|
|
const Statement* s = node.statement()->get();
|
2020-10-06 18:43:32 +00:00
|
|
|
if (s->is<VarDeclaration>()) {
|
2020-10-13 17:49:44 +00:00
|
|
|
result[&s->as<VarDeclaration>().var()] = nullptr;
|
2016-10-13 20:25:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2017-04-20 23:31:52 +00:00
|
|
|
/**
|
|
|
|
* Returns true if assigning to this lvalue has no effect.
|
|
|
|
*/
|
2020-10-19 20:34:10 +00:00
|
|
|
static bool is_dead(const Expression& lvalue, ProgramUsage* usage) {
|
2020-09-08 14:22:09 +00:00
|
|
|
switch (lvalue.kind()) {
|
|
|
|
case Expression::Kind::kVariableReference:
|
2020-10-19 20:34:10 +00:00
|
|
|
return usage->isDead(*lvalue.as<VariableReference>().variable());
|
2020-09-08 14:22:09 +00:00
|
|
|
case Expression::Kind::kSwizzle:
|
2020-10-19 20:34:10 +00:00
|
|
|
return is_dead(*lvalue.as<Swizzle>().base(), usage);
|
2020-09-08 14:22:09 +00:00
|
|
|
case Expression::Kind::kFieldAccess:
|
2020-10-19 20:34:10 +00:00
|
|
|
return is_dead(*lvalue.as<FieldAccess>().base(), usage);
|
2020-09-08 14:22:09 +00:00
|
|
|
case Expression::Kind::kIndex: {
|
2020-08-18 15:19:07 +00:00
|
|
|
const IndexExpression& idx = lvalue.as<IndexExpression>();
|
2020-10-19 20:34:10 +00:00
|
|
|
return is_dead(*idx.base(), usage) &&
|
2020-10-08 19:35:56 +00:00
|
|
|
!idx.index()->hasProperty(Expression::Property::kSideEffects);
|
2017-04-20 23:31:52 +00:00
|
|
|
}
|
2020-09-08 14:22:09 +00:00
|
|
|
case Expression::Kind::kTernary: {
|
2020-08-18 15:19:07 +00:00
|
|
|
const TernaryExpression& t = lvalue.as<TernaryExpression>();
|
2020-10-19 20:34:10 +00:00
|
|
|
return !t.test()->hasSideEffects() &&
|
|
|
|
is_dead(*t.ifTrue(), usage) &&
|
|
|
|
is_dead(*t.ifFalse(), usage);
|
2018-01-18 18:32:11 +00:00
|
|
|
}
|
2020-09-08 14:22:09 +00:00
|
|
|
case Expression::Kind::kExternalValue:
|
2019-05-15 19:29:54 +00:00
|
|
|
return false;
|
2017-04-20 23:31:52 +00:00
|
|
|
default:
|
2020-01-02 19:40:54 +00:00
|
|
|
#ifdef SK_DEBUG
|
2017-04-20 23:31:52 +00:00
|
|
|
ABORT("invalid lvalue: %s\n", lvalue.description().c_str());
|
2020-01-02 19:40:54 +00:00
|
|
|
#endif
|
|
|
|
return false;
|
2017-04-20 23:31:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Returns true if this is an assignment which can be collapsed down to just the right hand side due
|
|
|
|
* to a dead target and lack of side effects on the left hand side.
|
|
|
|
*/
|
2020-10-19 20:34:10 +00:00
|
|
|
static bool dead_assignment(const BinaryExpression& b, ProgramUsage* usage) {
|
2020-09-22 19:05:37 +00:00
|
|
|
if (!Compiler::IsAssignment(b.getOperator())) {
|
2017-04-20 23:31:52 +00:00
|
|
|
return false;
|
|
|
|
}
|
2020-10-30 14:29:12 +00:00
|
|
|
return is_dead(*b.left(), usage);
|
2017-04-20 23:31:52 +00:00
|
|
|
}
|
2016-10-13 20:25:34 +00:00
|
|
|
|
2017-04-20 23:31:52 +00:00
|
|
|
void Compiler::computeDataFlow(CFG* cfg) {
|
|
|
|
cfg->fBlocks[cfg->fStart].fBefore = compute_start_state(*cfg);
|
2020-10-05 16:03:53 +00:00
|
|
|
|
|
|
|
// We set bits in the "processed" set after a block has been scanned.
|
|
|
|
SkBitSet processedSet(cfg->fBlocks.size());
|
|
|
|
while (SkBitSet::OptionalIndex blockId = processedSet.findFirstUnset()) {
|
|
|
|
processedSet.set(*blockId);
|
|
|
|
this->scanCFG(cfg, *blockId, &processedSet);
|
2017-04-20 23:31:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Attempts to replace the expression pointed to by iter with a new one (in both the CFG and the
|
|
|
|
* IR). If the expression can be cleanly removed, returns true and updates the iterator to point to
|
|
|
|
* the newly-inserted element. Otherwise updates only the IR and returns false (and the CFG will
|
|
|
|
* need to be regenerated).
|
|
|
|
*/
|
2020-08-18 14:08:21 +00:00
|
|
|
static bool try_replace_expression(BasicBlock* b,
|
|
|
|
std::vector<BasicBlock::Node>::iterator* iter,
|
|
|
|
std::unique_ptr<Expression>* newExpression) {
|
2017-04-20 23:31:52 +00:00
|
|
|
std::unique_ptr<Expression>* target = (*iter)->expression();
|
|
|
|
if (!b->tryRemoveExpression(iter)) {
|
|
|
|
*target = std::move(*newExpression);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
*target = std::move(*newExpression);
|
|
|
|
return b->tryInsertExpression(iter, target);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2017-04-27 20:24:51 +00:00
|
|
|
* Returns true if the expression is a constant numeric literal with the specified value, or a
|
|
|
|
* constant vector with all elements equal to the specified value.
|
2017-04-20 23:31:52 +00:00
|
|
|
*/
|
2020-10-01 16:13:17 +00:00
|
|
|
template <typename T = SKSL_FLOAT>
|
2020-08-19 13:56:49 +00:00
|
|
|
static bool is_constant(const Expression& expr, T value) {
|
2020-09-08 14:22:09 +00:00
|
|
|
switch (expr.kind()) {
|
|
|
|
case Expression::Kind::kIntLiteral:
|
2020-09-28 20:27:18 +00:00
|
|
|
return expr.as<IntLiteral>().value() == value;
|
2020-08-19 13:56:49 +00:00
|
|
|
|
2020-09-08 14:22:09 +00:00
|
|
|
case Expression::Kind::kFloatLiteral:
|
2020-10-01 16:13:17 +00:00
|
|
|
return expr.as<FloatLiteral>().value() == value;
|
2020-08-19 13:56:49 +00:00
|
|
|
|
2020-09-08 14:22:09 +00:00
|
|
|
case Expression::Kind::kConstructor: {
|
2020-08-19 13:56:49 +00:00
|
|
|
const Constructor& constructor = expr.as<Constructor>();
|
|
|
|
if (constructor.isCompileTimeConstant()) {
|
2020-09-11 16:27:26 +00:00
|
|
|
const Type& constructorType = constructor.type();
|
|
|
|
switch (constructorType.typeKind()) {
|
2020-09-08 14:22:09 +00:00
|
|
|
case Type::TypeKind::kVector:
|
2020-11-24 17:04:47 +00:00
|
|
|
if (constructor.componentType().isFloat()) {
|
|
|
|
for (int i = 0; i < constructorType.columns(); ++i) {
|
2020-08-19 13:56:49 +00:00
|
|
|
if (constructor.getFVecComponent(i) != value) {
|
|
|
|
return false;
|
|
|
|
}
|
2020-11-24 17:04:47 +00:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
} else if (constructor.componentType().isInteger()) {
|
|
|
|
for (int i = 0; i < constructorType.columns(); ++i) {
|
2020-08-19 13:56:49 +00:00
|
|
|
if (constructor.getIVecComponent(i) != value) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2020-11-24 17:04:47 +00:00
|
|
|
return true;
|
2019-06-10 19:55:38 +00:00
|
|
|
}
|
2020-11-24 17:04:47 +00:00
|
|
|
// Other types (e.g. boolean) might occur, but aren't supported here.
|
|
|
|
return false;
|
2020-08-19 13:56:49 +00:00
|
|
|
|
2020-09-08 14:22:09 +00:00
|
|
|
case Type::TypeKind::kScalar:
|
2020-09-29 16:41:35 +00:00
|
|
|
SkASSERT(constructor.arguments().size() == 1);
|
|
|
|
return is_constant<T>(*constructor.arguments()[0], value);
|
2020-08-19 13:56:49 +00:00
|
|
|
|
|
|
|
default:
|
2017-04-27 20:24:51 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
2017-04-20 23:31:52 +00:00
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Collapses the binary expression pointed to by iter down to just the right side (in both the IR
|
|
|
|
* and CFG structures).
|
|
|
|
*/
|
2020-08-18 14:08:21 +00:00
|
|
|
static void delete_left(BasicBlock* b,
|
|
|
|
std::vector<BasicBlock::Node>::iterator* iter,
|
2020-10-22 18:39:46 +00:00
|
|
|
Compiler::OptimizationContext* optimizationContext) {
|
|
|
|
optimizationContext->fUpdated = true;
|
2017-05-05 14:04:06 +00:00
|
|
|
std::unique_ptr<Expression>* target = (*iter)->expression();
|
2020-08-18 15:19:07 +00:00
|
|
|
BinaryExpression& bin = (*target)->as<BinaryExpression>();
|
2020-10-30 14:29:12 +00:00
|
|
|
Expression& left = *bin.left();
|
|
|
|
std::unique_ptr<Expression>& rightPointer = bin.right();
|
2020-09-22 19:05:37 +00:00
|
|
|
SkASSERT(!left.hasSideEffects());
|
2017-05-05 14:04:06 +00:00
|
|
|
bool result;
|
2020-09-22 19:05:37 +00:00
|
|
|
if (bin.getOperator() == Token::Kind::TK_EQ) {
|
|
|
|
result = b->tryRemoveLValueBefore(iter, &left);
|
2017-05-05 14:04:06 +00:00
|
|
|
} else {
|
2020-09-22 19:05:37 +00:00
|
|
|
result = b->tryRemoveExpressionBefore(iter, &left);
|
2017-05-05 14:04:06 +00:00
|
|
|
}
|
2020-10-19 20:34:10 +00:00
|
|
|
// Remove references within LHS.
|
|
|
|
optimizationContext->fUsage->remove(&left);
|
2020-09-22 19:05:37 +00:00
|
|
|
*target = std::move(rightPointer);
|
2017-05-05 14:04:06 +00:00
|
|
|
if (!result) {
|
2020-10-22 18:39:46 +00:00
|
|
|
optimizationContext->fNeedsRescan = true;
|
2017-05-17 14:52:55 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (*iter == b->fNodes.begin()) {
|
2020-10-22 18:39:46 +00:00
|
|
|
optimizationContext->fNeedsRescan = true;
|
2017-05-05 14:04:06 +00:00
|
|
|
return;
|
2017-04-20 23:31:52 +00:00
|
|
|
}
|
2017-05-05 14:04:06 +00:00
|
|
|
--(*iter);
|
2020-09-28 20:08:58 +00:00
|
|
|
if (!(*iter)->isExpression() || (*iter)->expression() != &rightPointer) {
|
2020-10-22 18:39:46 +00:00
|
|
|
optimizationContext->fNeedsRescan = true;
|
2017-05-17 14:52:55 +00:00
|
|
|
return;
|
|
|
|
}
|
2017-05-05 14:04:06 +00:00
|
|
|
*iter = b->fNodes.erase(*iter);
|
2018-06-12 15:05:59 +00:00
|
|
|
SkASSERT((*iter)->expression() == target);
|
2017-04-20 23:31:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Collapses the binary expression pointed to by iter down to just the left side (in both the IR and
|
|
|
|
* CFG structures).
|
|
|
|
*/
|
2020-08-18 14:08:21 +00:00
|
|
|
static void delete_right(BasicBlock* b,
|
|
|
|
std::vector<BasicBlock::Node>::iterator* iter,
|
2020-10-22 18:39:46 +00:00
|
|
|
Compiler::OptimizationContext* optimizationContext) {
|
|
|
|
optimizationContext->fUpdated = true;
|
2017-05-05 14:04:06 +00:00
|
|
|
std::unique_ptr<Expression>* target = (*iter)->expression();
|
2020-08-18 15:19:07 +00:00
|
|
|
BinaryExpression& bin = (*target)->as<BinaryExpression>();
|
2020-10-30 14:29:12 +00:00
|
|
|
std::unique_ptr<Expression>& leftPointer = bin.left();
|
|
|
|
Expression& right = *bin.right();
|
2020-09-22 19:05:37 +00:00
|
|
|
SkASSERT(!right.hasSideEffects());
|
2020-10-19 20:34:10 +00:00
|
|
|
// Remove references within RHS.
|
|
|
|
optimizationContext->fUsage->remove(&right);
|
2020-09-22 19:05:37 +00:00
|
|
|
if (!b->tryRemoveExpressionBefore(iter, &right)) {
|
|
|
|
*target = std::move(leftPointer);
|
2020-10-22 18:39:46 +00:00
|
|
|
optimizationContext->fNeedsRescan = true;
|
2017-05-05 14:04:06 +00:00
|
|
|
return;
|
2017-04-20 23:31:52 +00:00
|
|
|
}
|
2020-09-22 19:05:37 +00:00
|
|
|
*target = std::move(leftPointer);
|
2017-05-17 14:52:55 +00:00
|
|
|
if (*iter == b->fNodes.begin()) {
|
2020-10-22 18:39:46 +00:00
|
|
|
optimizationContext->fNeedsRescan = true;
|
2017-05-17 14:52:55 +00:00
|
|
|
return;
|
|
|
|
}
|
2017-05-05 14:04:06 +00:00
|
|
|
--(*iter);
|
2020-09-28 20:08:58 +00:00
|
|
|
if ((!(*iter)->isExpression() || (*iter)->expression() != &leftPointer)) {
|
2020-10-22 18:39:46 +00:00
|
|
|
optimizationContext->fNeedsRescan = true;
|
2017-05-17 14:52:55 +00:00
|
|
|
return;
|
|
|
|
}
|
2017-05-05 14:04:06 +00:00
|
|
|
*iter = b->fNodes.erase(*iter);
|
2018-06-12 15:05:59 +00:00
|
|
|
SkASSERT((*iter)->expression() == target);
|
2017-04-20 23:31:52 +00:00
|
|
|
}
|
|
|
|
|
2017-04-27 20:24:51 +00:00
|
|
|
/**
|
|
|
|
* Constructs the specified type using a single argument.
|
|
|
|
*/
|
2020-09-11 16:27:26 +00:00
|
|
|
static std::unique_ptr<Expression> construct(const Type* type, std::unique_ptr<Expression> v) {
|
2020-10-13 15:14:08 +00:00
|
|
|
ExpressionArray args;
|
2017-04-27 20:24:51 +00:00
|
|
|
args.push_back(std::move(v));
|
2020-09-08 14:22:09 +00:00
|
|
|
std::unique_ptr<Expression> result = std::make_unique<Constructor>(-1, type, std::move(args));
|
2017-04-27 20:24:51 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Used in the implementations of vectorize_left and vectorize_right. Given a vector type and an
|
|
|
|
* expression x, deletes the expression pointed to by iter and replaces it with <type>(x).
|
|
|
|
*/
|
|
|
|
static void vectorize(BasicBlock* b,
|
|
|
|
std::vector<BasicBlock::Node>::iterator* iter,
|
|
|
|
const Type& type,
|
|
|
|
std::unique_ptr<Expression>* otherExpression,
|
2020-10-22 18:39:46 +00:00
|
|
|
Compiler::OptimizationContext* optimizationContext) {
|
2020-09-08 14:22:09 +00:00
|
|
|
SkASSERT((*(*iter)->expression())->kind() == Expression::Kind::kBinary);
|
2020-11-24 22:36:06 +00:00
|
|
|
SkASSERT(type.isVector());
|
|
|
|
SkASSERT((*otherExpression)->type().isScalar());
|
2020-10-22 18:39:46 +00:00
|
|
|
optimizationContext->fUpdated = true;
|
2017-04-27 20:24:51 +00:00
|
|
|
std::unique_ptr<Expression>* target = (*iter)->expression();
|
|
|
|
if (!b->tryRemoveExpression(iter)) {
|
2020-09-11 16:27:26 +00:00
|
|
|
*target = construct(&type, std::move(*otherExpression));
|
2020-10-22 18:39:46 +00:00
|
|
|
optimizationContext->fNeedsRescan = true;
|
2017-04-27 20:24:51 +00:00
|
|
|
} else {
|
2020-09-11 16:27:26 +00:00
|
|
|
*target = construct(&type, std::move(*otherExpression));
|
2017-04-27 20:24:51 +00:00
|
|
|
if (!b->tryInsertExpression(iter, target)) {
|
2020-10-22 18:39:46 +00:00
|
|
|
optimizationContext->fNeedsRescan = true;
|
2017-04-27 20:24:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Given a binary expression of the form x <op> vec<n>(y), deletes the right side and vectorizes the
|
|
|
|
* left to yield vec<n>(x).
|
|
|
|
*/
|
|
|
|
static void vectorize_left(BasicBlock* b,
|
|
|
|
std::vector<BasicBlock::Node>::iterator* iter,
|
2020-10-22 18:39:46 +00:00
|
|
|
Compiler::OptimizationContext* optimizationContext) {
|
2020-08-18 15:19:07 +00:00
|
|
|
BinaryExpression& bin = (*(*iter)->expression())->as<BinaryExpression>();
|
2020-10-19 20:34:10 +00:00
|
|
|
// Remove references within RHS. Vectorization of LHS doesn't change reference counts.
|
2020-10-30 14:29:12 +00:00
|
|
|
optimizationContext->fUsage->remove(bin.right().get());
|
|
|
|
vectorize(b, iter, bin.right()->type(), &bin.left(), optimizationContext);
|
2017-04-27 20:24:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Given a binary expression of the form vec<n>(x) <op> y, deletes the left side and vectorizes the
|
|
|
|
* right to yield vec<n>(y).
|
|
|
|
*/
|
|
|
|
static void vectorize_right(BasicBlock* b,
|
|
|
|
std::vector<BasicBlock::Node>::iterator* iter,
|
2020-10-22 18:39:46 +00:00
|
|
|
Compiler::OptimizationContext* optimizationContext) {
|
2020-08-18 15:19:07 +00:00
|
|
|
BinaryExpression& bin = (*(*iter)->expression())->as<BinaryExpression>();
|
2020-10-19 20:34:10 +00:00
|
|
|
// Remove references within LHS. Vectorization of RHS doesn't change reference counts.
|
2020-10-30 14:29:12 +00:00
|
|
|
optimizationContext->fUsage->remove(bin.left().get());
|
|
|
|
vectorize(b, iter, bin.left()->type(), &bin.right(), optimizationContext);
|
2017-04-27 20:24:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Mark that an expression which we were writing to is no longer being written to
|
2020-08-18 15:19:07 +00:00
|
|
|
static void clear_write(Expression& expr) {
|
2020-09-08 14:22:09 +00:00
|
|
|
switch (expr.kind()) {
|
|
|
|
case Expression::Kind::kVariableReference: {
|
2020-10-09 14:43:45 +00:00
|
|
|
expr.as<VariableReference>().setRefKind(VariableReference::RefKind::kRead);
|
2017-04-27 20:24:51 +00:00
|
|
|
break;
|
|
|
|
}
|
2020-09-08 14:22:09 +00:00
|
|
|
case Expression::Kind::kFieldAccess:
|
2020-10-09 15:55:40 +00:00
|
|
|
clear_write(*expr.as<FieldAccess>().base());
|
2017-04-27 20:24:51 +00:00
|
|
|
break;
|
2020-09-08 14:22:09 +00:00
|
|
|
case Expression::Kind::kSwizzle:
|
2020-10-12 20:11:51 +00:00
|
|
|
clear_write(*expr.as<Swizzle>().base());
|
2017-04-27 20:24:51 +00:00
|
|
|
break;
|
2020-09-08 14:22:09 +00:00
|
|
|
case Expression::Kind::kIndex:
|
2020-10-08 19:35:56 +00:00
|
|
|
clear_write(*expr.as<IndexExpression>().base());
|
2017-04-27 20:24:51 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ABORT("shouldn't be writing to this kind of expression\n");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-20 23:31:52 +00:00
|
|
|
void Compiler::simplifyExpression(DefinitionMap& definitions,
|
|
|
|
BasicBlock& b,
|
|
|
|
std::vector<BasicBlock::Node>::iterator* iter,
|
2020-10-22 18:39:46 +00:00
|
|
|
OptimizationContext* optimizationContext) {
|
2017-04-20 23:31:52 +00:00
|
|
|
Expression* expr = (*iter)->expression()->get();
|
2018-06-12 15:05:59 +00:00
|
|
|
SkASSERT(expr);
|
2020-11-18 16:10:38 +00:00
|
|
|
|
2017-04-20 23:31:52 +00:00
|
|
|
if ((*iter)->fConstantPropagation) {
|
2020-10-22 18:39:46 +00:00
|
|
|
std::unique_ptr<Expression> optimized = expr->constantPropagate(*fIRGenerator,
|
|
|
|
definitions);
|
2017-04-20 23:31:52 +00:00
|
|
|
if (optimized) {
|
2020-10-22 18:39:46 +00:00
|
|
|
optimizationContext->fUpdated = true;
|
2020-09-11 16:27:26 +00:00
|
|
|
optimized = fIRGenerator->coerce(std::move(optimized), expr->type());
|
2020-09-11 13:32:54 +00:00
|
|
|
SkASSERT(optimized);
|
2020-10-19 20:34:10 +00:00
|
|
|
// Remove references within 'expr', add references within 'optimized'
|
|
|
|
optimizationContext->fUsage->replace(expr, optimized.get());
|
2017-04-20 23:31:52 +00:00
|
|
|
if (!try_replace_expression(&b, iter, &optimized)) {
|
2020-10-22 18:39:46 +00:00
|
|
|
optimizationContext->fNeedsRescan = true;
|
2017-05-17 14:52:55 +00:00
|
|
|
return;
|
2017-04-20 23:31:52 +00:00
|
|
|
}
|
2020-09-28 20:08:58 +00:00
|
|
|
SkASSERT((*iter)->isExpression());
|
2017-04-20 23:31:52 +00:00
|
|
|
expr = (*iter)->expression()->get();
|
|
|
|
}
|
|
|
|
}
|
2020-09-08 14:22:09 +00:00
|
|
|
switch (expr->kind()) {
|
|
|
|
case Expression::Kind::kVariableReference: {
|
2020-08-18 15:19:07 +00:00
|
|
|
const VariableReference& ref = expr->as<VariableReference>();
|
2020-10-08 10:46:27 +00:00
|
|
|
const Variable* var = ref.variable();
|
2020-10-09 14:43:45 +00:00
|
|
|
if (ref.refKind() != VariableReference::RefKind::kWrite &&
|
|
|
|
ref.refKind() != VariableReference::RefKind::kPointer &&
|
|
|
|
var->storage() == Variable::Storage::kLocal && !definitions[var] &&
|
2020-10-22 18:39:46 +00:00
|
|
|
optimizationContext->fSilences.find(var) == optimizationContext->fSilences.end()) {
|
|
|
|
optimizationContext->fSilences.insert(var);
|
2017-11-07 14:42:10 +00:00
|
|
|
this->error(expr->fOffset,
|
2020-10-05 15:49:11 +00:00
|
|
|
"'" + var->name() + "' has not been assigned");
|
2017-04-20 23:31:52 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2020-09-08 14:22:09 +00:00
|
|
|
case Expression::Kind::kTernary: {
|
2020-08-20 16:11:48 +00:00
|
|
|
TernaryExpression* t = &expr->as<TernaryExpression>();
|
2020-10-08 09:48:01 +00:00
|
|
|
if (t->test()->is<BoolLiteral>()) {
|
2017-04-20 23:31:52 +00:00
|
|
|
// ternary has a constant test, replace it with either the true or
|
|
|
|
// false branch
|
2020-10-08 09:48:01 +00:00
|
|
|
if (t->test()->as<BoolLiteral>().value()) {
|
2020-10-19 20:34:10 +00:00
|
|
|
(*iter)->setExpression(std::move(t->ifTrue()), optimizationContext->fUsage);
|
2017-04-20 23:31:52 +00:00
|
|
|
} else {
|
2020-10-19 20:34:10 +00:00
|
|
|
(*iter)->setExpression(std::move(t->ifFalse()), optimizationContext->fUsage);
|
2017-04-20 23:31:52 +00:00
|
|
|
}
|
2020-10-22 18:39:46 +00:00
|
|
|
optimizationContext->fUpdated = true;
|
|
|
|
optimizationContext->fNeedsRescan = true;
|
2017-04-20 23:31:52 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2020-09-08 14:22:09 +00:00
|
|
|
case Expression::Kind::kBinary: {
|
2020-08-20 16:11:48 +00:00
|
|
|
BinaryExpression* bin = &expr->as<BinaryExpression>();
|
2020-10-19 20:34:10 +00:00
|
|
|
if (dead_assignment(*bin, optimizationContext->fUsage)) {
|
2020-10-22 18:39:46 +00:00
|
|
|
delete_left(&b, iter, optimizationContext);
|
2017-05-05 14:04:06 +00:00
|
|
|
break;
|
|
|
|
}
|
2020-10-30 14:29:12 +00:00
|
|
|
Expression& left = *bin->left();
|
|
|
|
Expression& right = *bin->right();
|
2020-09-22 19:05:37 +00:00
|
|
|
const Type& leftType = left.type();
|
|
|
|
const Type& rightType = right.type();
|
2017-05-05 14:04:06 +00:00
|
|
|
// collapse useless expressions like x * 1 or x + 0
|
2020-11-24 22:36:06 +00:00
|
|
|
if ((!leftType.isScalar() && !leftType.isVector()) ||
|
|
|
|
(!rightType.isScalar() && !rightType.isVector())) {
|
2017-04-27 20:24:51 +00:00
|
|
|
break;
|
|
|
|
}
|
2020-09-22 19:05:37 +00:00
|
|
|
switch (bin->getOperator()) {
|
2020-04-17 16:45:51 +00:00
|
|
|
case Token::Kind::TK_STAR:
|
2020-09-22 19:05:37 +00:00
|
|
|
if (is_constant(left, 1)) {
|
2020-11-24 22:36:06 +00:00
|
|
|
if (leftType.isVector() && rightType.isScalar()) {
|
2017-07-28 19:19:46 +00:00
|
|
|
// float4(1) * x -> float4(x)
|
2020-10-22 18:39:46 +00:00
|
|
|
vectorize_right(&b, iter, optimizationContext);
|
2017-04-27 20:24:51 +00:00
|
|
|
} else {
|
|
|
|
// 1 * x -> x
|
2017-07-28 19:19:46 +00:00
|
|
|
// 1 * float4(x) -> float4(x)
|
|
|
|
// float4(1) * float4(x) -> float4(x)
|
2020-10-22 18:39:46 +00:00
|
|
|
delete_left(&b, iter, optimizationContext);
|
2017-04-27 20:24:51 +00:00
|
|
|
}
|
|
|
|
}
|
2020-09-22 19:05:37 +00:00
|
|
|
else if (is_constant(left, 0)) {
|
2020-11-24 22:36:06 +00:00
|
|
|
if (leftType.isScalar() && rightType.isVector() &&
|
2020-09-22 19:05:37 +00:00
|
|
|
!right.hasSideEffects()) {
|
2017-07-28 19:19:46 +00:00
|
|
|
// 0 * float4(x) -> float4(0)
|
2020-10-22 18:39:46 +00:00
|
|
|
vectorize_left(&b, iter, optimizationContext);
|
2017-04-27 20:24:51 +00:00
|
|
|
} else {
|
|
|
|
// 0 * x -> 0
|
2017-07-28 19:19:46 +00:00
|
|
|
// float4(0) * x -> float4(0)
|
|
|
|
// float4(0) * float4(x) -> float4(0)
|
2020-09-22 19:05:37 +00:00
|
|
|
if (!right.hasSideEffects()) {
|
2020-10-22 18:39:46 +00:00
|
|
|
delete_right(&b, iter, optimizationContext);
|
2017-12-11 17:34:33 +00:00
|
|
|
}
|
2017-04-27 20:24:51 +00:00
|
|
|
}
|
2017-04-20 23:31:52 +00:00
|
|
|
}
|
2020-09-22 19:05:37 +00:00
|
|
|
else if (is_constant(right, 1)) {
|
2020-11-24 22:36:06 +00:00
|
|
|
if (leftType.isScalar() && rightType.isVector()) {
|
2017-07-28 19:19:46 +00:00
|
|
|
// x * float4(1) -> float4(x)
|
2020-10-22 18:39:46 +00:00
|
|
|
vectorize_left(&b, iter, optimizationContext);
|
2017-04-27 20:24:51 +00:00
|
|
|
} else {
|
|
|
|
// x * 1 -> x
|
2017-07-28 19:19:46 +00:00
|
|
|
// float4(x) * 1 -> float4(x)
|
|
|
|
// float4(x) * float4(1) -> float4(x)
|
2020-10-22 18:39:46 +00:00
|
|
|
delete_right(&b, iter, optimizationContext);
|
2017-04-27 20:24:51 +00:00
|
|
|
}
|
|
|
|
}
|
2020-09-22 19:05:37 +00:00
|
|
|
else if (is_constant(right, 0)) {
|
2020-11-24 22:36:06 +00:00
|
|
|
if (leftType.isVector() && rightType.isScalar() && !left.hasSideEffects()) {
|
2017-07-28 19:19:46 +00:00
|
|
|
// float4(x) * 0 -> float4(0)
|
2020-10-22 18:39:46 +00:00
|
|
|
vectorize_right(&b, iter, optimizationContext);
|
2017-04-27 20:24:51 +00:00
|
|
|
} else {
|
|
|
|
// x * 0 -> 0
|
2017-07-28 19:19:46 +00:00
|
|
|
// x * float4(0) -> float4(0)
|
|
|
|
// float4(x) * float4(0) -> float4(0)
|
2020-09-22 19:05:37 +00:00
|
|
|
if (!left.hasSideEffects()) {
|
2020-10-22 18:39:46 +00:00
|
|
|
delete_left(&b, iter, optimizationContext);
|
2017-12-11 17:34:33 +00:00
|
|
|
}
|
2017-04-27 20:24:51 +00:00
|
|
|
}
|
2017-04-20 23:31:52 +00:00
|
|
|
}
|
|
|
|
break;
|
2020-04-17 16:45:51 +00:00
|
|
|
case Token::Kind::TK_PLUS:
|
2020-09-22 19:05:37 +00:00
|
|
|
if (is_constant(left, 0)) {
|
2020-11-24 22:36:06 +00:00
|
|
|
if (leftType.isVector() && rightType.isScalar()) {
|
2017-07-28 19:19:46 +00:00
|
|
|
// float4(0) + x -> float4(x)
|
2020-10-22 18:39:46 +00:00
|
|
|
vectorize_right(&b, iter, optimizationContext);
|
2017-04-27 20:24:51 +00:00
|
|
|
} else {
|
|
|
|
// 0 + x -> x
|
2017-07-28 19:19:46 +00:00
|
|
|
// 0 + float4(x) -> float4(x)
|
|
|
|
// float4(0) + float4(x) -> float4(x)
|
2020-10-22 18:39:46 +00:00
|
|
|
delete_left(&b, iter, optimizationContext);
|
2017-04-27 20:24:51 +00:00
|
|
|
}
|
2020-09-22 19:05:37 +00:00
|
|
|
} else if (is_constant(right, 0)) {
|
2020-11-24 22:36:06 +00:00
|
|
|
if (leftType.isScalar() && rightType.isVector()) {
|
2017-07-28 19:19:46 +00:00
|
|
|
// x + float4(0) -> float4(x)
|
2020-10-22 18:39:46 +00:00
|
|
|
vectorize_left(&b, iter, optimizationContext);
|
2017-04-27 20:24:51 +00:00
|
|
|
} else {
|
|
|
|
// x + 0 -> x
|
2017-07-28 19:19:46 +00:00
|
|
|
// float4(x) + 0 -> float4(x)
|
|
|
|
// float4(x) + float4(0) -> float4(x)
|
2020-10-22 18:39:46 +00:00
|
|
|
delete_right(&b, iter, optimizationContext);
|
2017-04-27 20:24:51 +00:00
|
|
|
}
|
2017-04-20 23:31:52 +00:00
|
|
|
}
|
2017-04-27 20:24:51 +00:00
|
|
|
break;
|
2020-04-17 16:45:51 +00:00
|
|
|
case Token::Kind::TK_MINUS:
|
2020-09-22 19:05:37 +00:00
|
|
|
if (is_constant(right, 0)) {
|
2020-11-24 22:36:06 +00:00
|
|
|
if (leftType.isScalar() && rightType.isVector()) {
|
2017-07-28 19:19:46 +00:00
|
|
|
// x - float4(0) -> float4(x)
|
2020-10-22 18:39:46 +00:00
|
|
|
vectorize_left(&b, iter, optimizationContext);
|
2017-04-27 20:24:51 +00:00
|
|
|
} else {
|
|
|
|
// x - 0 -> x
|
2017-07-28 19:19:46 +00:00
|
|
|
// float4(x) - 0 -> float4(x)
|
|
|
|
// float4(x) - float4(0) -> float4(x)
|
2020-10-22 18:39:46 +00:00
|
|
|
delete_right(&b, iter, optimizationContext);
|
2017-04-27 20:24:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2020-04-17 16:45:51 +00:00
|
|
|
case Token::Kind::TK_SLASH:
|
2020-09-22 19:05:37 +00:00
|
|
|
if (is_constant(right, 1)) {
|
2020-11-24 22:36:06 +00:00
|
|
|
if (leftType.isScalar() && rightType.isVector()) {
|
2017-07-28 19:19:46 +00:00
|
|
|
// x / float4(1) -> float4(x)
|
2020-10-22 18:39:46 +00:00
|
|
|
vectorize_left(&b, iter, optimizationContext);
|
2017-04-27 20:24:51 +00:00
|
|
|
} else {
|
|
|
|
// x / 1 -> x
|
2017-07-28 19:19:46 +00:00
|
|
|
// float4(x) / 1 -> float4(x)
|
|
|
|
// float4(x) / float4(1) -> float4(x)
|
2020-10-22 18:39:46 +00:00
|
|
|
delete_right(&b, iter, optimizationContext);
|
2017-04-27 20:24:51 +00:00
|
|
|
}
|
2020-09-22 19:05:37 +00:00
|
|
|
} else if (is_constant(left, 0)) {
|
2020-11-24 22:36:06 +00:00
|
|
|
if (leftType.isScalar() && rightType.isVector() &&
|
2020-09-22 19:05:37 +00:00
|
|
|
!right.hasSideEffects()) {
|
2017-07-28 19:19:46 +00:00
|
|
|
// 0 / float4(x) -> float4(0)
|
2020-10-22 18:39:46 +00:00
|
|
|
vectorize_left(&b, iter, optimizationContext);
|
2017-04-27 20:24:51 +00:00
|
|
|
} else {
|
|
|
|
// 0 / x -> 0
|
2017-07-28 19:19:46 +00:00
|
|
|
// float4(0) / x -> float4(0)
|
|
|
|
// float4(0) / float4(x) -> float4(0)
|
2020-09-22 19:05:37 +00:00
|
|
|
if (!right.hasSideEffects()) {
|
2020-10-22 18:39:46 +00:00
|
|
|
delete_right(&b, iter, optimizationContext);
|
2017-12-11 17:34:33 +00:00
|
|
|
}
|
2017-04-27 20:24:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2020-04-17 16:45:51 +00:00
|
|
|
case Token::Kind::TK_PLUSEQ:
|
2020-09-22 19:05:37 +00:00
|
|
|
if (is_constant(right, 0)) {
|
|
|
|
clear_write(left);
|
2020-10-22 18:39:46 +00:00
|
|
|
delete_right(&b, iter, optimizationContext);
|
2017-04-21 14:23:37 +00:00
|
|
|
}
|
|
|
|
break;
|
2020-04-17 16:45:51 +00:00
|
|
|
case Token::Kind::TK_MINUSEQ:
|
2020-09-22 19:05:37 +00:00
|
|
|
if (is_constant(right, 0)) {
|
|
|
|
clear_write(left);
|
2020-10-22 18:39:46 +00:00
|
|
|
delete_right(&b, iter, optimizationContext);
|
2017-04-20 23:31:52 +00:00
|
|
|
}
|
|
|
|
break;
|
2020-04-17 16:45:51 +00:00
|
|
|
case Token::Kind::TK_STAREQ:
|
2020-09-22 19:05:37 +00:00
|
|
|
if (is_constant(right, 1)) {
|
|
|
|
clear_write(left);
|
2020-10-22 18:39:46 +00:00
|
|
|
delete_right(&b, iter, optimizationContext);
|
2017-04-27 20:24:51 +00:00
|
|
|
}
|
|
|
|
break;
|
2020-04-17 16:45:51 +00:00
|
|
|
case Token::Kind::TK_SLASHEQ:
|
2020-09-22 19:05:37 +00:00
|
|
|
if (is_constant(right, 1)) {
|
|
|
|
clear_write(left);
|
2020-10-22 18:39:46 +00:00
|
|
|
delete_right(&b, iter, optimizationContext);
|
2017-04-20 23:31:52 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2019-09-17 16:34:39 +00:00
|
|
|
break;
|
|
|
|
}
|
Flatten out constructors nested inside constructors.
- float4(float2(1, 2), 3, 4) --> float4(1, 2, 3, 4)
- half3(z, half2(fn(x), y*2)) --> half3(z, fn(x), y*2)
Single-argument constructors will be ignored by this optimization; these
might be casts or splats.
This had an unexpected side benefit of simplifying some Metal output,
as we need to output fewer Metal matrix construction helper functions
when matrices use more simple scalars for construction.
Change-Id: I0a161db060c107e35247901619291bf83801cb11
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/337400
Auto-Submit: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
Reviewed-by: Brian Osman <brianosman@google.com>
2020-11-22 04:26:07 +00:00
|
|
|
case Expression::Kind::kConstructor: {
|
|
|
|
// Find constructors embedded inside constructors and flatten them out where possible.
|
|
|
|
// - float4(float2(1, 2), 3, 4) --> float4(1, 2, 3, 4)
|
|
|
|
// - float4(w, float3(sin(x), cos(y), tan(z))) --> float4(w, sin(x), cos(y), tan(z))
|
|
|
|
// Leave single-argument constructors alone, though. These might be casts or splats.
|
|
|
|
Constructor& c = expr->as<Constructor>();
|
|
|
|
if (c.type().columns() > 1) {
|
|
|
|
// Inspect each constructor argument to see if it's a candidate for flattening.
|
|
|
|
// Remember matched arguments in a bitfield, "argsToOptimize".
|
|
|
|
int argsToOptimize = 0;
|
|
|
|
int currBit = 1;
|
|
|
|
for (const std::unique_ptr<Expression>& arg : c.arguments()) {
|
|
|
|
if (arg->is<Constructor>()) {
|
|
|
|
Constructor& inner = arg->as<Constructor>();
|
|
|
|
if (inner.arguments().size() > 1 &&
|
|
|
|
inner.type().componentType() == c.type().componentType()) {
|
|
|
|
argsToOptimize |= currBit;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
currBit <<= 1;
|
|
|
|
}
|
|
|
|
if (argsToOptimize) {
|
|
|
|
// We found at least one argument that could be flattened out. Re-walk the
|
|
|
|
// constructor args and flatten the candidates we found during our initial pass.
|
|
|
|
ExpressionArray flattened;
|
|
|
|
flattened.reserve_back(c.type().columns());
|
|
|
|
currBit = 1;
|
|
|
|
for (const std::unique_ptr<Expression>& arg : c.arguments()) {
|
|
|
|
if (argsToOptimize & currBit) {
|
|
|
|
Constructor& inner = arg->as<Constructor>();
|
|
|
|
for (const std::unique_ptr<Expression>& innerArg : inner.arguments()) {
|
|
|
|
flattened.push_back(innerArg->clone());
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
flattened.push_back(arg->clone());
|
|
|
|
}
|
|
|
|
currBit <<= 1;
|
|
|
|
}
|
|
|
|
auto optimized = std::unique_ptr<Expression>(
|
|
|
|
new Constructor(c.fOffset, &c.type(), std::move(flattened)));
|
|
|
|
// No fUsage change; no references have been added or removed anywhere.
|
|
|
|
optimizationContext->fUpdated = true;
|
|
|
|
if (!try_replace_expression(&b, iter, &optimized)) {
|
|
|
|
optimizationContext->fNeedsRescan = true;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
SkASSERT((*iter)->isExpression());
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2020-09-08 14:22:09 +00:00
|
|
|
case Expression::Kind::kSwizzle: {
|
2020-08-20 16:11:48 +00:00
|
|
|
Swizzle& s = expr->as<Swizzle>();
|
2020-11-18 16:10:38 +00:00
|
|
|
// Detect identity swizzles like `foo.rgba`.
|
2020-10-12 20:11:51 +00:00
|
|
|
if ((int) s.components().size() == s.base()->type().columns()) {
|
2019-09-17 16:34:39 +00:00
|
|
|
bool identity = true;
|
2020-10-12 20:11:51 +00:00
|
|
|
for (int i = 0; i < (int) s.components().size(); ++i) {
|
|
|
|
if (s.components()[i] != i) {
|
2019-09-17 16:34:39 +00:00
|
|
|
identity = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (identity) {
|
2020-10-22 18:39:46 +00:00
|
|
|
optimizationContext->fUpdated = true;
|
2020-10-19 20:34:10 +00:00
|
|
|
// No fUsage change: foo.rgba and foo have equivalent reference counts
|
2020-10-12 20:11:51 +00:00
|
|
|
if (!try_replace_expression(&b, iter, &s.base())) {
|
2020-10-22 18:39:46 +00:00
|
|
|
optimizationContext->fNeedsRescan = true;
|
2019-09-17 16:34:39 +00:00
|
|
|
return;
|
|
|
|
}
|
2020-09-28 20:08:58 +00:00
|
|
|
SkASSERT((*iter)->isExpression());
|
2019-09-17 16:34:39 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2020-11-18 16:10:38 +00:00
|
|
|
// Detect swizzles of swizzles, e.g. replace `foo.argb.r000` with `foo.a000`.
|
|
|
|
if (s.base()->is<Swizzle>()) {
|
2020-10-12 20:11:51 +00:00
|
|
|
Swizzle& base = s.base()->as<Swizzle>();
|
2020-10-30 17:45:46 +00:00
|
|
|
ComponentArray final;
|
2020-10-12 20:11:51 +00:00
|
|
|
for (int c : s.components()) {
|
|
|
|
final.push_back(base.components()[c]);
|
2019-09-17 16:34:39 +00:00
|
|
|
}
|
2020-10-22 18:39:46 +00:00
|
|
|
optimizationContext->fUpdated = true;
|
2020-10-12 20:11:51 +00:00
|
|
|
std::unique_ptr<Expression> replacement(new Swizzle(*fContext, base.base()->clone(),
|
2020-10-30 17:45:46 +00:00
|
|
|
final));
|
2020-11-18 16:10:38 +00:00
|
|
|
// No fUsage change: `foo.gbr.gbr` and `foo.brg` have equivalent reference counts
|
2019-09-17 16:34:39 +00:00
|
|
|
if (!try_replace_expression(&b, iter, &replacement)) {
|
2020-10-22 18:39:46 +00:00
|
|
|
optimizationContext->fNeedsRescan = true;
|
2019-09-17 16:34:39 +00:00
|
|
|
return;
|
|
|
|
}
|
2020-09-28 20:08:58 +00:00
|
|
|
SkASSERT((*iter)->isExpression());
|
2020-11-18 16:10:38 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
// Optimize swizzles of constructors.
|
|
|
|
if (s.base()->is<Constructor>()) {
|
|
|
|
Constructor& base = s.base()->as<Constructor>();
|
|
|
|
std::unique_ptr<Expression> replacement;
|
|
|
|
const Type& componentType = base.type().componentType();
|
|
|
|
int swizzleSize = s.components().size();
|
|
|
|
|
|
|
|
// The IR generator has already converted any zero/one swizzle components into
|
|
|
|
// constructors containing zero/one args. Confirm that this is true by checking that
|
|
|
|
// our swizzle components are all `xyzw` (values 0 through 3).
|
|
|
|
SkASSERT(std::all_of(s.components().begin(), s.components().end(),
|
|
|
|
[](int8_t c) { return c >= 0 && c <= 3; }));
|
|
|
|
|
2020-11-24 22:36:06 +00:00
|
|
|
if (base.arguments().size() == 1 && base.arguments().front()->type().isScalar()) {
|
2020-11-18 16:10:38 +00:00
|
|
|
// `half4(scalar).zyy` can be optimized to `half3(scalar)`. The swizzle
|
|
|
|
// components don't actually matter since all fields are the same.
|
|
|
|
ExpressionArray newArgs;
|
|
|
|
newArgs.push_back(base.arguments().front()->clone());
|
|
|
|
replacement = std::make_unique<Constructor>(
|
|
|
|
base.fOffset,
|
|
|
|
&componentType.toCompound(*fContext, swizzleSize, /*rows=*/1),
|
|
|
|
std::move(newArgs));
|
|
|
|
|
|
|
|
// No fUsage change: `half4(foo).xy` and `half2(foo)` have equivalent reference
|
|
|
|
// counts.
|
Optimize swizzled multiple-argument constructors.
This will reorder constructors with swizzles applied, such as
`half4(1, 2, 3, 4).xxyz` --> `half4(1, 1, 2, 3)`
`half4(1, colRGB).yzwx` --> `half4(colRGB.x, colRGB.y, colRGB.z, 1)`
Note that, depending on the swizzle components, some elements of the
constructor may be duplicated and others may be eliminated. The
optimizer makes sure to leave the swizzle alone if it would duplicate
anything non-trivial, or if it would eliminate anything with a side
effect.
Change-Id: I470fda217ae8cf5828406b89a5696ca6aebf608d
Bug: skia:10954
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/335860
Commit-Queue: Brian Osman <brianosman@google.com>
Reviewed-by: Brian Osman <brianosman@google.com>
Auto-Submit: John Stiles <johnstiles@google.com>
2020-11-19 16:06:47 +00:00
|
|
|
optimizationContext->fUpdated = true;
|
2020-11-18 16:10:38 +00:00
|
|
|
if (!try_replace_expression(&b, iter, &replacement)) {
|
|
|
|
optimizationContext->fNeedsRescan = true;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
SkASSERT((*iter)->isExpression());
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
Optimize swizzled multiple-argument constructors.
This will reorder constructors with swizzles applied, such as
`half4(1, 2, 3, 4).xxyz` --> `half4(1, 1, 2, 3)`
`half4(1, colRGB).yzwx` --> `half4(colRGB.x, colRGB.y, colRGB.z, 1)`
Note that, depending on the swizzle components, some elements of the
constructor may be duplicated and others may be eliminated. The
optimizer makes sure to leave the swizzle alone if it would duplicate
anything non-trivial, or if it would eliminate anything with a side
effect.
Change-Id: I470fda217ae8cf5828406b89a5696ca6aebf608d
Bug: skia:10954
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/335860
Commit-Queue: Brian Osman <brianosman@google.com>
Reviewed-by: Brian Osman <brianosman@google.com>
Auto-Submit: John Stiles <johnstiles@google.com>
2020-11-19 16:06:47 +00:00
|
|
|
// Swizzles can duplicate some elements and discard others, e.g.
|
|
|
|
// `half4(1, 2, 3, 4).xxz` --> `half3(1, 1, 3)`. However, there are constraints:
|
|
|
|
// - Expressions with side effects need to occur exactly once, even if they
|
|
|
|
// would otherwise be swizzle-eliminated
|
|
|
|
// - Non-trivial expressions should not be repeated, but elimination is OK.
|
|
|
|
//
|
|
|
|
// Look up the argument for the constructor at each index. This is typically simple
|
|
|
|
// but for weird cases like `half4(bar.yz, half2(foo))`, it can be harder than it
|
|
|
|
// seems. This example would result in:
|
|
|
|
// argMap[0] = {.fArgIndex = 0, .fComponent = 0} (bar.yz .x)
|
|
|
|
// argMap[1] = {.fArgIndex = 0, .fComponent = 1} (bar.yz .y)
|
|
|
|
// argMap[2] = {.fArgIndex = 1, .fComponent = 0} (half2(foo) .x)
|
|
|
|
// argMap[3] = {.fArgIndex = 1, .fComponent = 1} (half2(foo) .y)
|
|
|
|
struct ConstructorArgMap {
|
|
|
|
int8_t fArgIndex;
|
|
|
|
int8_t fComponent;
|
|
|
|
};
|
|
|
|
|
|
|
|
int numConstructorArgs = base.type().columns();
|
|
|
|
ConstructorArgMap argMap[4] = {};
|
|
|
|
int writeIdx = 0;
|
|
|
|
for (int argIdx = 0; argIdx < (int) base.arguments().size(); ++argIdx) {
|
|
|
|
const Expression& expr = *base.arguments()[argIdx];
|
|
|
|
int argWidth = expr.type().columns();
|
|
|
|
for (int componentIdx = 0; componentIdx < argWidth; ++componentIdx) {
|
|
|
|
argMap[writeIdx].fArgIndex = argIdx;
|
|
|
|
argMap[writeIdx].fComponent = componentIdx;
|
|
|
|
++writeIdx;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
SkASSERT(writeIdx == numConstructorArgs);
|
|
|
|
|
|
|
|
// Count up the number of times each constructor argument is used by the
|
|
|
|
// swizzle.
|
|
|
|
// `half4(bar.yz, half2(foo)).xwxy` -> { 3, 1 }
|
|
|
|
// - bar.yz is referenced 3 times, by `.x_xy`
|
|
|
|
// - half(foo) is referenced 1 time, by `._w__`
|
|
|
|
int8_t exprUsed[4] = {};
|
|
|
|
for (int c : s.components()) {
|
|
|
|
exprUsed[argMap[c].fArgIndex]++;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool safeToOptimize = true;
|
|
|
|
for (int index = 0; index < numConstructorArgs; ++index) {
|
|
|
|
int8_t constructorArgIndex = argMap[index].fArgIndex;
|
|
|
|
const Expression& baseArg = *base.arguments()[constructorArgIndex];
|
|
|
|
|
|
|
|
// Check that non-trivial expressions are not swizzled in more than once.
|
2020-11-19 21:25:49 +00:00
|
|
|
if (exprUsed[constructorArgIndex] > 1 &&
|
|
|
|
!Analysis::IsTrivialExpression(baseArg)) {
|
Optimize swizzled multiple-argument constructors.
This will reorder constructors with swizzles applied, such as
`half4(1, 2, 3, 4).xxyz` --> `half4(1, 1, 2, 3)`
`half4(1, colRGB).yzwx` --> `half4(colRGB.x, colRGB.y, colRGB.z, 1)`
Note that, depending on the swizzle components, some elements of the
constructor may be duplicated and others may be eliminated. The
optimizer makes sure to leave the swizzle alone if it would duplicate
anything non-trivial, or if it would eliminate anything with a side
effect.
Change-Id: I470fda217ae8cf5828406b89a5696ca6aebf608d
Bug: skia:10954
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/335860
Commit-Queue: Brian Osman <brianosman@google.com>
Reviewed-by: Brian Osman <brianosman@google.com>
Auto-Submit: John Stiles <johnstiles@google.com>
2020-11-19 16:06:47 +00:00
|
|
|
safeToOptimize = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
// Check that side-effect-bearing expressions are swizzled in exactly once.
|
|
|
|
if (exprUsed[constructorArgIndex] != 1 && baseArg.hasSideEffects()) {
|
|
|
|
safeToOptimize = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (safeToOptimize) {
|
Merge `foo.x, foo.y, foo.z` into `foo.xyz` when optimizing swizzles.
When values from the same argument are used consecutively by the outer
swizzle, they can be merged in the inner swizzle. Merging isn't always
possible, of course, but it will be used where it can be:
`half4(1, colRGB).yzwx` --> `half4(colRGB.xyz, 1)`
`half4(1, colRGB).yxzw` --> `half4(colRGB.x, 1, colRGB.yz)`
Change-Id: Id164b046bc15022ded331c06d722f1ae3605a3bd
Bug: skia:10954
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/335872
Commit-Queue: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
Reviewed-by: Brian Osman <brianosman@google.com>
Auto-Submit: John Stiles <johnstiles@google.com>
2020-11-19 17:18:36 +00:00
|
|
|
struct ReorderedArgument {
|
|
|
|
int8_t fArgIndex;
|
|
|
|
ComponentArray fComponents;
|
|
|
|
};
|
|
|
|
SkSTArray<4, ReorderedArgument> reorderedArgs;
|
Optimize swizzled multiple-argument constructors.
This will reorder constructors with swizzles applied, such as
`half4(1, 2, 3, 4).xxyz` --> `half4(1, 1, 2, 3)`
`half4(1, colRGB).yzwx` --> `half4(colRGB.x, colRGB.y, colRGB.z, 1)`
Note that, depending on the swizzle components, some elements of the
constructor may be duplicated and others may be eliminated. The
optimizer makes sure to leave the swizzle alone if it would duplicate
anything non-trivial, or if it would eliminate anything with a side
effect.
Change-Id: I470fda217ae8cf5828406b89a5696ca6aebf608d
Bug: skia:10954
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/335860
Commit-Queue: Brian Osman <brianosman@google.com>
Reviewed-by: Brian Osman <brianosman@google.com>
Auto-Submit: John Stiles <johnstiles@google.com>
2020-11-19 16:06:47 +00:00
|
|
|
for (int c : s.components()) {
|
|
|
|
const ConstructorArgMap& argument = argMap[c];
|
|
|
|
const Expression& baseArg = *base.arguments()[argument.fArgIndex];
|
|
|
|
|
2020-11-24 22:36:06 +00:00
|
|
|
if (baseArg.type().isScalar()) {
|
Merge `foo.x, foo.y, foo.z` into `foo.xyz` when optimizing swizzles.
When values from the same argument are used consecutively by the outer
swizzle, they can be merged in the inner swizzle. Merging isn't always
possible, of course, but it will be used where it can be:
`half4(1, colRGB).yzwx` --> `half4(colRGB.xyz, 1)`
`half4(1, colRGB).yxzw` --> `half4(colRGB.x, 1, colRGB.yz)`
Change-Id: Id164b046bc15022ded331c06d722f1ae3605a3bd
Bug: skia:10954
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/335872
Commit-Queue: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
Reviewed-by: Brian Osman <brianosman@google.com>
Auto-Submit: John Stiles <johnstiles@google.com>
2020-11-19 17:18:36 +00:00
|
|
|
// This argument is a scalar; add it to the list as-is.
|
Optimize swizzled multiple-argument constructors.
This will reorder constructors with swizzles applied, such as
`half4(1, 2, 3, 4).xxyz` --> `half4(1, 1, 2, 3)`
`half4(1, colRGB).yzwx` --> `half4(colRGB.x, colRGB.y, colRGB.z, 1)`
Note that, depending on the swizzle components, some elements of the
constructor may be duplicated and others may be eliminated. The
optimizer makes sure to leave the swizzle alone if it would duplicate
anything non-trivial, or if it would eliminate anything with a side
effect.
Change-Id: I470fda217ae8cf5828406b89a5696ca6aebf608d
Bug: skia:10954
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/335860
Commit-Queue: Brian Osman <brianosman@google.com>
Reviewed-by: Brian Osman <brianosman@google.com>
Auto-Submit: John Stiles <johnstiles@google.com>
2020-11-19 16:06:47 +00:00
|
|
|
SkASSERT(argument.fComponent == 0);
|
Merge `foo.x, foo.y, foo.z` into `foo.xyz` when optimizing swizzles.
When values from the same argument are used consecutively by the outer
swizzle, they can be merged in the inner swizzle. Merging isn't always
possible, of course, but it will be used where it can be:
`half4(1, colRGB).yzwx` --> `half4(colRGB.xyz, 1)`
`half4(1, colRGB).yxzw` --> `half4(colRGB.x, 1, colRGB.yz)`
Change-Id: Id164b046bc15022ded331c06d722f1ae3605a3bd
Bug: skia:10954
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/335872
Commit-Queue: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
Reviewed-by: Brian Osman <brianosman@google.com>
Auto-Submit: John Stiles <johnstiles@google.com>
2020-11-19 17:18:36 +00:00
|
|
|
reorderedArgs.push_back({argument.fArgIndex,
|
|
|
|
ComponentArray{}});
|
Optimize swizzled multiple-argument constructors.
This will reorder constructors with swizzles applied, such as
`half4(1, 2, 3, 4).xxyz` --> `half4(1, 1, 2, 3)`
`half4(1, colRGB).yzwx` --> `half4(colRGB.x, colRGB.y, colRGB.z, 1)`
Note that, depending on the swizzle components, some elements of the
constructor may be duplicated and others may be eliminated. The
optimizer makes sure to leave the swizzle alone if it would duplicate
anything non-trivial, or if it would eliminate anything with a side
effect.
Change-Id: I470fda217ae8cf5828406b89a5696ca6aebf608d
Bug: skia:10954
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/335860
Commit-Queue: Brian Osman <brianosman@google.com>
Reviewed-by: Brian Osman <brianosman@google.com>
Auto-Submit: John Stiles <johnstiles@google.com>
2020-11-19 16:06:47 +00:00
|
|
|
} else {
|
Merge `foo.x, foo.y, foo.z` into `foo.xyz` when optimizing swizzles.
When values from the same argument are used consecutively by the outer
swizzle, they can be merged in the inner swizzle. Merging isn't always
possible, of course, but it will be used where it can be:
`half4(1, colRGB).yzwx` --> `half4(colRGB.xyz, 1)`
`half4(1, colRGB).yxzw` --> `half4(colRGB.x, 1, colRGB.yz)`
Change-Id: Id164b046bc15022ded331c06d722f1ae3605a3bd
Bug: skia:10954
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/335872
Commit-Queue: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
Reviewed-by: Brian Osman <brianosman@google.com>
Auto-Submit: John Stiles <johnstiles@google.com>
2020-11-19 17:18:36 +00:00
|
|
|
// This argument is a component from a vector.
|
Optimize swizzled multiple-argument constructors.
This will reorder constructors with swizzles applied, such as
`half4(1, 2, 3, 4).xxyz` --> `half4(1, 1, 2, 3)`
`half4(1, colRGB).yzwx` --> `half4(colRGB.x, colRGB.y, colRGB.z, 1)`
Note that, depending on the swizzle components, some elements of the
constructor may be duplicated and others may be eliminated. The
optimizer makes sure to leave the swizzle alone if it would duplicate
anything non-trivial, or if it would eliminate anything with a side
effect.
Change-Id: I470fda217ae8cf5828406b89a5696ca6aebf608d
Bug: skia:10954
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/335860
Commit-Queue: Brian Osman <brianosman@google.com>
Reviewed-by: Brian Osman <brianosman@google.com>
Auto-Submit: John Stiles <johnstiles@google.com>
2020-11-19 16:06:47 +00:00
|
|
|
SkASSERT(argument.fComponent < baseArg.type().columns());
|
Merge `foo.x, foo.y, foo.z` into `foo.xyz` when optimizing swizzles.
When values from the same argument are used consecutively by the outer
swizzle, they can be merged in the inner swizzle. Merging isn't always
possible, of course, but it will be used where it can be:
`half4(1, colRGB).yzwx` --> `half4(colRGB.xyz, 1)`
`half4(1, colRGB).yxzw` --> `half4(colRGB.x, 1, colRGB.yz)`
Change-Id: Id164b046bc15022ded331c06d722f1ae3605a3bd
Bug: skia:10954
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/335872
Commit-Queue: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
Reviewed-by: Brian Osman <brianosman@google.com>
Auto-Submit: John Stiles <johnstiles@google.com>
2020-11-19 17:18:36 +00:00
|
|
|
if (reorderedArgs.empty() ||
|
|
|
|
reorderedArgs.back().fArgIndex != argument.fArgIndex) {
|
|
|
|
// This can't be combined with the previous argument. Add a new one.
|
|
|
|
reorderedArgs.push_back({argument.fArgIndex,
|
|
|
|
ComponentArray{argument.fComponent}});
|
|
|
|
} else {
|
|
|
|
// Since we know this argument uses components, it should already
|
|
|
|
// have at least one component set.
|
|
|
|
SkASSERT(!reorderedArgs.back().fComponents.empty());
|
|
|
|
// Build up the current argument with one more component.
|
|
|
|
reorderedArgs.back().fComponents.push_back(argument.fComponent);
|
|
|
|
}
|
Optimize swizzled multiple-argument constructors.
This will reorder constructors with swizzles applied, such as
`half4(1, 2, 3, 4).xxyz` --> `half4(1, 1, 2, 3)`
`half4(1, colRGB).yzwx` --> `half4(colRGB.x, colRGB.y, colRGB.z, 1)`
Note that, depending on the swizzle components, some elements of the
constructor may be duplicated and others may be eliminated. The
optimizer makes sure to leave the swizzle alone if it would duplicate
anything non-trivial, or if it would eliminate anything with a side
effect.
Change-Id: I470fda217ae8cf5828406b89a5696ca6aebf608d
Bug: skia:10954
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/335860
Commit-Queue: Brian Osman <brianosman@google.com>
Reviewed-by: Brian Osman <brianosman@google.com>
Auto-Submit: John Stiles <johnstiles@google.com>
2020-11-19 16:06:47 +00:00
|
|
|
}
|
|
|
|
}
|
Merge `foo.x, foo.y, foo.z` into `foo.xyz` when optimizing swizzles.
When values from the same argument are used consecutively by the outer
swizzle, they can be merged in the inner swizzle. Merging isn't always
possible, of course, but it will be used where it can be:
`half4(1, colRGB).yzwx` --> `half4(colRGB.xyz, 1)`
`half4(1, colRGB).yxzw` --> `half4(colRGB.x, 1, colRGB.yz)`
Change-Id: Id164b046bc15022ded331c06d722f1ae3605a3bd
Bug: skia:10954
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/335872
Commit-Queue: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
Reviewed-by: Brian Osman <brianosman@google.com>
Auto-Submit: John Stiles <johnstiles@google.com>
2020-11-19 17:18:36 +00:00
|
|
|
|
|
|
|
// Convert our reordered argument list to an actual array of expressions, with
|
|
|
|
// the new order and any new inner swizzles that need to be applied. Note that
|
|
|
|
// we expect followup passes to clean up the inner swizzles.
|
|
|
|
ExpressionArray newArgs;
|
|
|
|
newArgs.reserve_back(swizzleSize);
|
|
|
|
for (const ReorderedArgument& reorderedArg : reorderedArgs) {
|
|
|
|
const Expression& baseArg = *base.arguments()[reorderedArg.fArgIndex];
|
|
|
|
if (reorderedArg.fComponents.empty()) {
|
|
|
|
newArgs.push_back(baseArg.clone());
|
|
|
|
} else {
|
|
|
|
newArgs.push_back(std::make_unique<Swizzle>(*fContext, baseArg.clone(),
|
|
|
|
reorderedArg.fComponents));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a new constructor.
|
Optimize swizzled multiple-argument constructors.
This will reorder constructors with swizzles applied, such as
`half4(1, 2, 3, 4).xxyz` --> `half4(1, 1, 2, 3)`
`half4(1, colRGB).yzwx` --> `half4(colRGB.x, colRGB.y, colRGB.z, 1)`
Note that, depending on the swizzle components, some elements of the
constructor may be duplicated and others may be eliminated. The
optimizer makes sure to leave the swizzle alone if it would duplicate
anything non-trivial, or if it would eliminate anything with a side
effect.
Change-Id: I470fda217ae8cf5828406b89a5696ca6aebf608d
Bug: skia:10954
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/335860
Commit-Queue: Brian Osman <brianosman@google.com>
Reviewed-by: Brian Osman <brianosman@google.com>
Auto-Submit: John Stiles <johnstiles@google.com>
2020-11-19 16:06:47 +00:00
|
|
|
replacement = std::make_unique<Constructor>(
|
|
|
|
base.fOffset,
|
|
|
|
&componentType.toCompound(*fContext, swizzleSize, /*rows=*/1),
|
|
|
|
std::move(newArgs));
|
|
|
|
|
|
|
|
// Remove references within 'expr', add references within 'optimized'
|
|
|
|
optimizationContext->fUpdated = true;
|
|
|
|
optimizationContext->fUsage->replace(expr, replacement.get());
|
|
|
|
if (!try_replace_expression(&b, iter, &replacement)) {
|
|
|
|
optimizationContext->fNeedsRescan = true;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
SkASSERT((*iter)->isExpression());
|
|
|
|
}
|
2020-11-18 16:10:38 +00:00
|
|
|
break;
|
2019-09-17 16:34:39 +00:00
|
|
|
}
|
2020-06-11 21:55:07 +00:00
|
|
|
break;
|
2017-04-20 23:31:52 +00:00
|
|
|
}
|
|
|
|
default:
|
|
|
|
break;
|
2016-10-13 20:25:34 +00:00
|
|
|
}
|
2017-04-20 23:31:52 +00:00
|
|
|
}
|
|
|
|
|
2020-08-31 22:09:01 +00:00
|
|
|
// Returns true if this statement could potentially execute a break at the current level. We ignore
|
|
|
|
// nested loops and switches, since any breaks inside of them will merely break the loop / switch.
|
|
|
|
static bool contains_conditional_break(Statement& stmt) {
|
|
|
|
class ContainsConditionalBreak : public ProgramVisitor {
|
|
|
|
public:
|
|
|
|
bool visitStatement(const Statement& stmt) override {
|
2020-09-08 14:22:09 +00:00
|
|
|
switch (stmt.kind()) {
|
|
|
|
case Statement::Kind::kBlock:
|
2020-08-31 22:09:01 +00:00
|
|
|
return this->INHERITED::visitStatement(stmt);
|
|
|
|
|
2020-09-08 14:22:09 +00:00
|
|
|
case Statement::Kind::kBreak:
|
2020-08-31 22:09:01 +00:00
|
|
|
return fInConditional > 0;
|
|
|
|
|
2020-09-08 14:22:09 +00:00
|
|
|
case Statement::Kind::kIf: {
|
2020-08-31 22:09:01 +00:00
|
|
|
++fInConditional;
|
|
|
|
bool result = this->INHERITED::visitStatement(stmt);
|
|
|
|
--fInConditional;
|
|
|
|
return result;
|
2017-05-10 19:06:17 +00:00
|
|
|
}
|
2020-06-15 16:32:24 +00:00
|
|
|
|
2020-08-31 22:09:01 +00:00
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
2017-05-10 19:06:17 +00:00
|
|
|
}
|
2020-06-15 16:32:24 +00:00
|
|
|
|
2020-08-31 22:09:01 +00:00
|
|
|
int fInConditional = 0;
|
|
|
|
using INHERITED = ProgramVisitor;
|
|
|
|
};
|
2017-05-10 19:06:17 +00:00
|
|
|
|
2020-08-31 22:09:01 +00:00
|
|
|
return ContainsConditionalBreak{}.visitStatement(stmt);
|
2020-06-15 16:32:24 +00:00
|
|
|
}
|
|
|
|
|
2018-08-24 17:06:27 +00:00
|
|
|
// returns true if this statement definitely executes a break at the current level (we ignore
|
|
|
|
// nested loops and switches, since any breaks inside of them will merely break the loop / switch)
|
2020-08-31 22:09:01 +00:00
|
|
|
static bool contains_unconditional_break(Statement& stmt) {
|
|
|
|
class ContainsUnconditionalBreak : public ProgramVisitor {
|
|
|
|
public:
|
|
|
|
bool visitStatement(const Statement& stmt) override {
|
2020-09-08 14:22:09 +00:00
|
|
|
switch (stmt.kind()) {
|
|
|
|
case Statement::Kind::kBlock:
|
2020-08-31 22:09:01 +00:00
|
|
|
return this->INHERITED::visitStatement(stmt);
|
|
|
|
|
2020-09-08 14:22:09 +00:00
|
|
|
case Statement::Kind::kBreak:
|
2018-08-24 17:06:27 +00:00
|
|
|
return true;
|
2020-08-31 22:09:01 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
return false;
|
2018-08-24 17:06:27 +00:00
|
|
|
}
|
2020-08-31 22:09:01 +00:00
|
|
|
}
|
2020-06-15 16:32:24 +00:00
|
|
|
|
2020-08-31 22:09:01 +00:00
|
|
|
using INHERITED = ProgramVisitor;
|
|
|
|
};
|
2020-06-15 16:32:24 +00:00
|
|
|
|
2020-08-31 22:09:01 +00:00
|
|
|
return ContainsUnconditionalBreak{}.visitStatement(stmt);
|
2018-08-24 17:06:27 +00:00
|
|
|
}
|
|
|
|
|
2020-10-13 16:48:21 +00:00
|
|
|
static void move_all_but_break(std::unique_ptr<Statement>& stmt, StatementArray* target) {
|
2020-09-08 14:22:09 +00:00
|
|
|
switch (stmt->kind()) {
|
|
|
|
case Statement::Kind::kBlock: {
|
2020-06-15 16:32:24 +00:00
|
|
|
// Recurse into the block.
|
|
|
|
Block& block = static_cast<Block&>(*stmt);
|
|
|
|
|
2020-10-13 16:48:21 +00:00
|
|
|
StatementArray blockStmts;
|
2020-10-14 20:57:41 +00:00
|
|
|
blockStmts.reserve_back(block.children().size());
|
2020-09-25 18:31:59 +00:00
|
|
|
for (std::unique_ptr<Statement>& stmt : block.children()) {
|
|
|
|
move_all_but_break(stmt, &blockStmts);
|
2020-06-11 16:16:14 +00:00
|
|
|
}
|
2020-06-15 16:32:24 +00:00
|
|
|
|
|
|
|
target->push_back(std::make_unique<Block>(block.fOffset, std::move(blockStmts),
|
2020-09-25 18:31:59 +00:00
|
|
|
block.symbolTable(), block.isScope()));
|
2020-06-11 16:16:14 +00:00
|
|
|
break;
|
2020-06-15 16:32:24 +00:00
|
|
|
}
|
|
|
|
|
2020-09-08 14:22:09 +00:00
|
|
|
case Statement::Kind::kBreak:
|
2020-06-15 16:32:24 +00:00
|
|
|
// Do not append a break to the target.
|
|
|
|
break;
|
|
|
|
|
2020-06-11 16:16:14 +00:00
|
|
|
default:
|
2020-06-15 16:32:24 +00:00
|
|
|
// Append normal statements to the target.
|
|
|
|
target->push_back(std::move(stmt));
|
|
|
|
break;
|
2020-06-11 16:16:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-10 19:06:17 +00:00
|
|
|
// Returns a block containing all of the statements that will be run if the given case matches
|
|
|
|
// (which, owing to the statements being owned by unique_ptrs, means the switch itself will be
|
|
|
|
// broken by this call and must then be discarded).
|
|
|
|
// Returns null (and leaves the switch unmodified) if no such simple reduction is possible, such as
|
|
|
|
// when break statements appear inside conditionals.
|
2020-06-15 16:32:24 +00:00
|
|
|
static std::unique_ptr<Statement> block_for_case(SwitchStatement* switchStatement,
|
|
|
|
SwitchCase* caseToCapture) {
|
|
|
|
// We have to be careful to not move any of the pointers until after we're sure we're going to
|
|
|
|
// succeed, so before we make any changes at all, we check the switch-cases to decide on a plan
|
|
|
|
// of action. First, find the switch-case we are interested in.
|
2020-10-22 19:53:41 +00:00
|
|
|
auto iter = switchStatement->cases().begin();
|
|
|
|
for (; iter != switchStatement->cases().end(); ++iter) {
|
2020-10-30 14:29:12 +00:00
|
|
|
if (iter->get() == caseToCapture) {
|
2020-06-15 16:32:24 +00:00
|
|
|
break;
|
2017-05-10 19:06:17 +00:00
|
|
|
}
|
2020-06-15 16:32:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Next, walk forward through the rest of the switch. If we find a conditional break, we're
|
|
|
|
// stuck and can't simplify at all. If we find an unconditional break, we have a range of
|
|
|
|
// statements that we can use for simplification.
|
|
|
|
auto startIter = iter;
|
|
|
|
Statement* unconditionalBreakStmt = nullptr;
|
2020-10-22 19:53:41 +00:00
|
|
|
for (; iter != switchStatement->cases().end(); ++iter) {
|
2020-10-30 14:29:12 +00:00
|
|
|
for (std::unique_ptr<Statement>& stmt : (*iter)->statements()) {
|
2020-06-15 16:32:24 +00:00
|
|
|
if (contains_conditional_break(*stmt)) {
|
|
|
|
// We can't reduce switch-cases to a block when they have conditional breaks.
|
|
|
|
return nullptr;
|
2017-05-10 19:06:17 +00:00
|
|
|
}
|
2020-06-15 16:32:24 +00:00
|
|
|
|
|
|
|
if (contains_unconditional_break(*stmt)) {
|
|
|
|
// We found an unconditional break. We can use this block, but we need to strip
|
|
|
|
// out the break statement.
|
|
|
|
unconditionalBreakStmt = stmt.get();
|
2017-05-10 19:06:17 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2020-06-15 16:32:24 +00:00
|
|
|
|
|
|
|
if (unconditionalBreakStmt != nullptr) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We fell off the bottom of the switch or encountered a break. We know the range of statements
|
|
|
|
// that we need to move over, and we know it's safe to do so.
|
2020-10-13 16:48:21 +00:00
|
|
|
StatementArray caseStmts;
|
2020-06-15 16:32:24 +00:00
|
|
|
|
|
|
|
// We can move over most of the statements as-is.
|
|
|
|
while (startIter != iter) {
|
2020-10-30 14:29:12 +00:00
|
|
|
for (std::unique_ptr<Statement>& stmt : (*startIter)->statements()) {
|
2020-06-15 16:32:24 +00:00
|
|
|
caseStmts.push_back(std::move(stmt));
|
|
|
|
}
|
|
|
|
++startIter;
|
2017-05-10 19:06:17 +00:00
|
|
|
}
|
2020-06-15 16:32:24 +00:00
|
|
|
|
|
|
|
// If we found an unconditional break at the end, we need to move what we can while avoiding
|
|
|
|
// that break.
|
|
|
|
if (unconditionalBreakStmt != nullptr) {
|
2020-10-30 14:29:12 +00:00
|
|
|
for (std::unique_ptr<Statement>& stmt : (*startIter)->statements()) {
|
2020-06-15 16:32:24 +00:00
|
|
|
if (stmt.get() == unconditionalBreakStmt) {
|
|
|
|
move_all_but_break(stmt, &caseStmts);
|
|
|
|
unconditionalBreakStmt = nullptr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
caseStmts.push_back(std::move(stmt));
|
|
|
|
}
|
2017-05-10 19:06:17 +00:00
|
|
|
}
|
2020-06-15 16:32:24 +00:00
|
|
|
|
|
|
|
SkASSERT(unconditionalBreakStmt == nullptr); // Verify that we fixed the unconditional break.
|
|
|
|
|
|
|
|
// Return our newly-synthesized block.
|
2020-10-22 19:53:41 +00:00
|
|
|
return std::make_unique<Block>(/*offset=*/-1, std::move(caseStmts), switchStatement->symbols());
|
2017-05-10 19:06:17 +00:00
|
|
|
}
|
|
|
|
|
2017-04-20 23:31:52 +00:00
|
|
|
void Compiler::simplifyStatement(DefinitionMap& definitions,
|
2017-05-10 19:06:17 +00:00
|
|
|
BasicBlock& b,
|
|
|
|
std::vector<BasicBlock::Node>::iterator* iter,
|
2020-10-22 18:39:46 +00:00
|
|
|
OptimizationContext* optimizationContext) {
|
2020-10-19 20:34:10 +00:00
|
|
|
ProgramUsage* usage = optimizationContext->fUsage;
|
2017-04-20 23:31:52 +00:00
|
|
|
Statement* stmt = (*iter)->statement()->get();
|
2020-09-08 14:22:09 +00:00
|
|
|
switch (stmt->kind()) {
|
|
|
|
case Statement::Kind::kVarDeclaration: {
|
2020-08-18 15:19:07 +00:00
|
|
|
const auto& varDecl = stmt->as<VarDeclaration>();
|
2020-10-19 20:34:10 +00:00
|
|
|
if (usage->isDead(varDecl.var()) &&
|
2020-10-13 17:49:44 +00:00
|
|
|
(!varDecl.value() ||
|
|
|
|
!varDecl.value()->hasSideEffects())) {
|
|
|
|
if (varDecl.value()) {
|
2018-06-12 15:05:59 +00:00
|
|
|
SkASSERT((*iter)->statement()->get() == stmt);
|
2020-10-13 17:49:44 +00:00
|
|
|
if (!b.tryRemoveExpressionBefore(iter, varDecl.value().get())) {
|
2020-10-22 18:39:46 +00:00
|
|
|
optimizationContext->fNeedsRescan = true;
|
2017-04-20 23:31:52 +00:00
|
|
|
}
|
|
|
|
}
|
2020-10-19 20:34:10 +00:00
|
|
|
(*iter)->setStatement(std::make_unique<Nop>(), usage);
|
2020-10-22 18:39:46 +00:00
|
|
|
optimizationContext->fUpdated = true;
|
2017-04-20 23:31:52 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2020-09-08 14:22:09 +00:00
|
|
|
case Statement::Kind::kIf: {
|
2020-08-18 15:19:07 +00:00
|
|
|
IfStatement& i = stmt->as<IfStatement>();
|
2020-10-07 20:47:09 +00:00
|
|
|
if (i.test()->kind() == Expression::Kind::kBoolLiteral) {
|
2017-05-10 19:06:17 +00:00
|
|
|
// constant if, collapse down to a single branch
|
2020-10-07 20:47:09 +00:00
|
|
|
if (i.test()->as<BoolLiteral>().value()) {
|
|
|
|
SkASSERT(i.ifTrue());
|
2020-10-19 20:34:10 +00:00
|
|
|
(*iter)->setStatement(std::move(i.ifTrue()), usage);
|
2017-05-10 19:06:17 +00:00
|
|
|
} else {
|
2020-10-07 20:47:09 +00:00
|
|
|
if (i.ifFalse()) {
|
2020-10-19 20:34:10 +00:00
|
|
|
(*iter)->setStatement(std::move(i.ifFalse()), usage);
|
2017-05-10 19:06:17 +00:00
|
|
|
} else {
|
2020-10-19 20:34:10 +00:00
|
|
|
(*iter)->setStatement(std::make_unique<Nop>(), usage);
|
2017-05-10 19:06:17 +00:00
|
|
|
}
|
|
|
|
}
|
2020-10-22 18:39:46 +00:00
|
|
|
optimizationContext->fUpdated = true;
|
|
|
|
optimizationContext->fNeedsRescan = true;
|
2017-05-10 19:06:17 +00:00
|
|
|
break;
|
|
|
|
}
|
2020-10-07 20:47:09 +00:00
|
|
|
if (i.ifFalse() && i.ifFalse()->isEmpty()) {
|
2017-04-20 23:31:52 +00:00
|
|
|
// else block doesn't do anything, remove it
|
2020-10-07 20:47:09 +00:00
|
|
|
i.ifFalse().reset();
|
2020-10-22 18:39:46 +00:00
|
|
|
optimizationContext->fUpdated = true;
|
|
|
|
optimizationContext->fNeedsRescan = true;
|
2017-04-20 23:31:52 +00:00
|
|
|
}
|
2020-10-07 20:47:09 +00:00
|
|
|
if (!i.ifFalse() && i.ifTrue()->isEmpty()) {
|
2017-04-20 23:31:52 +00:00
|
|
|
// if block doesn't do anything, no else block
|
2020-10-07 20:47:09 +00:00
|
|
|
if (i.test()->hasSideEffects()) {
|
2017-04-20 23:31:52 +00:00
|
|
|
// test has side effects, keep it
|
2020-10-19 20:34:10 +00:00
|
|
|
(*iter)->setStatement(
|
|
|
|
std::make_unique<ExpressionStatement>(std::move(i.test())), usage);
|
2017-04-20 23:31:52 +00:00
|
|
|
} else {
|
|
|
|
// no if, no else, no test side effects, kill the whole if
|
|
|
|
// statement
|
2020-10-19 20:34:10 +00:00
|
|
|
(*iter)->setStatement(std::make_unique<Nop>(), usage);
|
2017-04-20 23:31:52 +00:00
|
|
|
}
|
2020-10-22 18:39:46 +00:00
|
|
|
optimizationContext->fUpdated = true;
|
|
|
|
optimizationContext->fNeedsRescan = true;
|
2017-04-20 23:31:52 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2020-09-08 14:22:09 +00:00
|
|
|
case Statement::Kind::kSwitch: {
|
2020-08-18 15:19:07 +00:00
|
|
|
SwitchStatement& s = stmt->as<SwitchStatement>();
|
2020-09-14 15:33:47 +00:00
|
|
|
int64_t switchValue;
|
2020-10-22 19:53:41 +00:00
|
|
|
if (fIRGenerator->getConstantInt(*s.value(), &switchValue)) {
|
2017-05-10 19:06:17 +00:00
|
|
|
// switch is constant, replace it with the case that matches
|
|
|
|
bool found = false;
|
|
|
|
SwitchCase* defaultCase = nullptr;
|
2020-10-30 14:29:12 +00:00
|
|
|
for (const std::unique_ptr<SwitchCase>& c : s.cases()) {
|
|
|
|
if (!c->value()) {
|
|
|
|
defaultCase = c.get();
|
2017-05-10 19:06:17 +00:00
|
|
|
continue;
|
|
|
|
}
|
2020-09-14 15:33:47 +00:00
|
|
|
int64_t caseValue;
|
2020-10-30 14:29:12 +00:00
|
|
|
SkAssertResult(fIRGenerator->getConstantInt(*c->value(), &caseValue));
|
2020-09-14 15:33:47 +00:00
|
|
|
if (caseValue == switchValue) {
|
2020-10-30 14:29:12 +00:00
|
|
|
std::unique_ptr<Statement> newBlock = block_for_case(&s, c.get());
|
2017-05-10 19:06:17 +00:00
|
|
|
if (newBlock) {
|
2020-10-19 20:34:10 +00:00
|
|
|
(*iter)->setStatement(std::move(newBlock), usage);
|
2020-08-19 13:56:49 +00:00
|
|
|
found = true;
|
2017-05-10 19:06:17 +00:00
|
|
|
break;
|
|
|
|
} else {
|
2020-10-22 19:53:41 +00:00
|
|
|
if (s.isStatic() && !(fFlags & kPermitInvalidStaticTests_Flag) &&
|
|
|
|
optimizationContext->fSilences.find(&s) ==
|
|
|
|
optimizationContext->fSilences.end()) {
|
2017-09-11 20:50:14 +00:00
|
|
|
this->error(s.fOffset,
|
2017-05-10 19:06:17 +00:00
|
|
|
"static switch contains non-static conditional break");
|
2020-10-22 19:53:41 +00:00
|
|
|
optimizationContext->fSilences.insert(&s);
|
2017-05-10 19:06:17 +00:00
|
|
|
}
|
|
|
|
return; // can't simplify
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!found) {
|
|
|
|
// no matching case. use default if it exists, or kill the whole thing
|
|
|
|
if (defaultCase) {
|
|
|
|
std::unique_ptr<Statement> newBlock = block_for_case(&s, defaultCase);
|
|
|
|
if (newBlock) {
|
2020-10-19 20:34:10 +00:00
|
|
|
(*iter)->setStatement(std::move(newBlock), usage);
|
2017-05-10 19:06:17 +00:00
|
|
|
} else {
|
2020-10-22 19:53:41 +00:00
|
|
|
if (s.isStatic() && !(fFlags & kPermitInvalidStaticTests_Flag) &&
|
|
|
|
optimizationContext->fSilences.find(&s) ==
|
|
|
|
optimizationContext->fSilences.end()) {
|
2017-09-11 20:50:14 +00:00
|
|
|
this->error(s.fOffset,
|
2017-05-10 19:06:17 +00:00
|
|
|
"static switch contains non-static conditional break");
|
2020-10-22 19:53:41 +00:00
|
|
|
optimizationContext->fSilences.insert(&s);
|
2017-05-10 19:06:17 +00:00
|
|
|
}
|
|
|
|
return; // can't simplify
|
|
|
|
}
|
|
|
|
} else {
|
2020-10-19 20:34:10 +00:00
|
|
|
(*iter)->setStatement(std::make_unique<Nop>(), usage);
|
2017-05-10 19:06:17 +00:00
|
|
|
}
|
|
|
|
}
|
2020-10-22 18:39:46 +00:00
|
|
|
optimizationContext->fUpdated = true;
|
|
|
|
optimizationContext->fNeedsRescan = true;
|
2017-05-10 19:06:17 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2020-09-08 14:22:09 +00:00
|
|
|
case Statement::Kind::kExpression: {
|
2020-08-18 15:19:07 +00:00
|
|
|
ExpressionStatement& e = stmt->as<ExpressionStatement>();
|
2018-06-12 15:05:59 +00:00
|
|
|
SkASSERT((*iter)->statement()->get() == &e);
|
2020-09-30 13:29:55 +00:00
|
|
|
if (!e.expression()->hasSideEffects()) {
|
2017-04-20 23:31:52 +00:00
|
|
|
// Expression statement with no side effects, kill it
|
2020-09-30 13:29:55 +00:00
|
|
|
if (!b.tryRemoveExpressionBefore(iter, e.expression().get())) {
|
2020-10-22 18:39:46 +00:00
|
|
|
optimizationContext->fNeedsRescan = true;
|
2017-04-20 23:31:52 +00:00
|
|
|
}
|
2018-06-12 15:05:59 +00:00
|
|
|
SkASSERT((*iter)->statement()->get() == stmt);
|
2020-10-19 20:34:10 +00:00
|
|
|
(*iter)->setStatement(std::make_unique<Nop>(), usage);
|
2020-10-22 18:39:46 +00:00
|
|
|
optimizationContext->fUpdated = true;
|
2017-04-20 23:31:52 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-19 20:34:10 +00:00
|
|
|
bool Compiler::scanCFG(FunctionDefinition& f, ProgramUsage* usage) {
|
2020-09-09 13:39:34 +00:00
|
|
|
bool madeChanges = false;
|
|
|
|
|
2017-04-20 23:31:52 +00:00
|
|
|
CFG cfg = CFGGenerator().getCFG(f);
|
|
|
|
this->computeDataFlow(&cfg);
|
2016-10-13 20:25:34 +00:00
|
|
|
|
|
|
|
// check for unreachable code
|
|
|
|
for (size_t i = 0; i < cfg.fBlocks.size(); i++) {
|
2020-09-09 13:39:34 +00:00
|
|
|
const BasicBlock& block = cfg.fBlocks[i];
|
2020-10-01 19:42:37 +00:00
|
|
|
if (i != cfg.fStart && !block.fIsReachable && block.fNodes.size()) {
|
2017-09-11 20:50:14 +00:00
|
|
|
int offset;
|
2020-09-09 13:39:34 +00:00
|
|
|
const BasicBlock::Node& node = block.fNodes[0];
|
2020-09-28 20:08:58 +00:00
|
|
|
if (node.isStatement()) {
|
|
|
|
offset = (*node.statement())->fOffset;
|
|
|
|
} else {
|
|
|
|
offset = (*node.expression())->fOffset;
|
|
|
|
if ((*node.expression())->is<BoolLiteral>()) {
|
|
|
|
// Function inlining can generate do { ... } while(false) loops which always
|
|
|
|
// break, so the boolean condition is considered unreachable. Since not being
|
|
|
|
// able to reach a literal is a non-issue in the first place, we don't report an
|
|
|
|
// error in this case.
|
|
|
|
continue;
|
|
|
|
}
|
2017-01-19 18:32:00 +00:00
|
|
|
}
|
2017-09-11 20:50:14 +00:00
|
|
|
this->error(offset, String("unreachable"));
|
2016-10-13 20:25:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (fErrorCount) {
|
2020-09-09 13:39:34 +00:00
|
|
|
return madeChanges;
|
2016-10-13 20:25:34 +00:00
|
|
|
}
|
|
|
|
|
2017-04-20 23:31:52 +00:00
|
|
|
// check for dead code & undefined variables, perform constant propagation
|
2020-10-22 18:39:46 +00:00
|
|
|
OptimizationContext optimizationContext;
|
2020-10-19 20:34:10 +00:00
|
|
|
optimizationContext.fUsage = usage;
|
2020-11-03 16:35:01 +00:00
|
|
|
SkBitSet eliminatedBlockIds(cfg.fBlocks.size());
|
2017-04-20 23:31:52 +00:00
|
|
|
do {
|
2020-10-22 18:39:46 +00:00
|
|
|
if (optimizationContext.fNeedsRescan) {
|
2017-04-20 23:31:52 +00:00
|
|
|
cfg = CFGGenerator().getCFG(f);
|
|
|
|
this->computeDataFlow(&cfg);
|
2020-10-22 18:39:46 +00:00
|
|
|
optimizationContext.fNeedsRescan = false;
|
2017-04-20 23:31:52 +00:00
|
|
|
}
|
|
|
|
|
2020-11-03 16:35:01 +00:00
|
|
|
eliminatedBlockIds.reset();
|
2020-10-22 18:39:46 +00:00
|
|
|
optimizationContext.fUpdated = false;
|
2020-11-03 16:35:01 +00:00
|
|
|
|
|
|
|
for (BlockId blockId = 0; blockId < cfg.fBlocks.size(); ++blockId) {
|
|
|
|
if (eliminatedBlockIds.test(blockId)) {
|
|
|
|
// We reached a block ID that might have been eliminated. Be cautious and rescan.
|
|
|
|
optimizationContext.fUpdated = true;
|
|
|
|
optimizationContext.fNeedsRescan = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
BasicBlock& b = cfg.fBlocks[blockId];
|
|
|
|
if (blockId > 0 && !b.fIsReachable) {
|
2020-06-19 19:32:49 +00:00
|
|
|
// Block was reachable before optimization, but has since become unreachable. In
|
|
|
|
// addition to being dead code, it's broken - since control flow can't reach it, no
|
|
|
|
// prior variable definitions can reach it, and therefore variables might look to
|
2020-10-06 18:43:32 +00:00
|
|
|
// have not been properly assigned. Kill it by replacing all statements with Nops.
|
2020-06-19 19:32:49 +00:00
|
|
|
for (BasicBlock::Node& node : b.fNodes) {
|
2020-09-28 20:08:58 +00:00
|
|
|
if (node.isStatement() && !(*node.statement())->is<Nop>()) {
|
2020-11-03 16:35:01 +00:00
|
|
|
// Eliminating a node runs the risk of eliminating that node's exits as
|
|
|
|
// well. Keep track of this and do a rescan if we are about to access one
|
|
|
|
// of these.
|
|
|
|
for (BlockId id : b.fExits) {
|
|
|
|
eliminatedBlockIds.set(id);
|
|
|
|
}
|
2020-10-19 20:34:10 +00:00
|
|
|
node.setStatement(std::make_unique<Nop>(), usage);
|
2020-09-09 13:39:34 +00:00
|
|
|
madeChanges = true;
|
2020-06-19 19:32:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
2017-04-20 23:31:52 +00:00
|
|
|
DefinitionMap definitions = b.fBefore;
|
|
|
|
|
2020-10-22 18:39:46 +00:00
|
|
|
for (auto iter = b.fNodes.begin(); iter != b.fNodes.end() &&
|
|
|
|
!optimizationContext.fNeedsRescan; ++iter) {
|
2020-09-28 20:08:58 +00:00
|
|
|
if (iter->isExpression()) {
|
2020-10-22 18:39:46 +00:00
|
|
|
this->simplifyExpression(definitions, b, &iter, &optimizationContext);
|
2017-04-20 23:31:52 +00:00
|
|
|
} else {
|
2020-10-22 18:39:46 +00:00
|
|
|
this->simplifyStatement(definitions, b, &iter, &optimizationContext);
|
2016-10-13 20:25:34 +00:00
|
|
|
}
|
2020-10-22 18:39:46 +00:00
|
|
|
if (optimizationContext.fNeedsRescan) {
|
2017-05-17 14:52:55 +00:00
|
|
|
break;
|
|
|
|
}
|
2017-04-20 23:31:52 +00:00
|
|
|
this->addDefinitions(*iter, &definitions);
|
2016-10-13 20:25:34 +00:00
|
|
|
}
|
2020-09-14 15:32:49 +00:00
|
|
|
|
2020-10-22 18:39:46 +00:00
|
|
|
if (optimizationContext.fNeedsRescan) {
|
2020-09-14 15:32:49 +00:00
|
|
|
break;
|
|
|
|
}
|
2016-10-13 20:25:34 +00:00
|
|
|
}
|
2020-10-22 18:39:46 +00:00
|
|
|
madeChanges |= optimizationContext.fUpdated;
|
|
|
|
} while (optimizationContext.fUpdated);
|
|
|
|
SkASSERT(!optimizationContext.fNeedsRescan);
|
2016-10-13 20:25:34 +00:00
|
|
|
|
2017-06-22 15:24:38 +00:00
|
|
|
// verify static ifs & switches, clean up dead variable decls
|
2017-05-10 19:06:17 +00:00
|
|
|
for (BasicBlock& b : cfg.fBlocks) {
|
2020-10-22 18:39:46 +00:00
|
|
|
for (auto iter = b.fNodes.begin(); iter != b.fNodes.end() &&
|
|
|
|
!optimizationContext.fNeedsRescan;) {
|
2020-09-28 20:08:58 +00:00
|
|
|
if (iter->isStatement()) {
|
2017-05-10 19:06:17 +00:00
|
|
|
const Statement& s = **iter->statement();
|
2020-09-08 14:22:09 +00:00
|
|
|
switch (s.kind()) {
|
|
|
|
case Statement::Kind::kIf:
|
2020-10-07 20:47:09 +00:00
|
|
|
if (s.as<IfStatement>().isStatic() &&
|
2017-07-14 14:12:15 +00:00
|
|
|
!(fFlags & kPermitInvalidStaticTests_Flag)) {
|
2017-09-11 20:50:14 +00:00
|
|
|
this->error(s.fOffset, "static if has non-static test");
|
2017-05-10 19:06:17 +00:00
|
|
|
}
|
2017-06-22 15:24:38 +00:00
|
|
|
++iter;
|
2017-05-10 19:06:17 +00:00
|
|
|
break;
|
2020-09-08 14:22:09 +00:00
|
|
|
case Statement::Kind::kSwitch:
|
2020-10-22 19:53:41 +00:00
|
|
|
if (s.as<SwitchStatement>().isStatic() &&
|
|
|
|
!(fFlags & kPermitInvalidStaticTests_Flag) &&
|
|
|
|
optimizationContext.fSilences.find(&s) ==
|
|
|
|
optimizationContext.fSilences.end()) {
|
2017-09-11 20:50:14 +00:00
|
|
|
this->error(s.fOffset, "static switch has non-static test");
|
2017-05-10 19:06:17 +00:00
|
|
|
}
|
2017-06-22 15:24:38 +00:00
|
|
|
++iter;
|
|
|
|
break;
|
2017-05-10 19:06:17 +00:00
|
|
|
default:
|
2017-06-22 15:24:38 +00:00
|
|
|
++iter;
|
2017-05-10 19:06:17 +00:00
|
|
|
break;
|
|
|
|
}
|
2017-06-22 15:24:38 +00:00
|
|
|
} else {
|
|
|
|
++iter;
|
2017-05-10 19:06:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-13 20:25:34 +00:00
|
|
|
// check for missing return
|
2020-10-14 17:33:18 +00:00
|
|
|
if (f.declaration().returnType() != *fContext->fVoid_Type) {
|
2020-10-01 19:42:37 +00:00
|
|
|
if (cfg.fBlocks[cfg.fExit].fIsReachable) {
|
2020-10-14 17:33:18 +00:00
|
|
|
this->error(f.fOffset, String("function '" + String(f.declaration().name()) +
|
2019-11-22 19:06:12 +00:00
|
|
|
"' can exit without returning a value"));
|
2016-10-13 20:25:34 +00:00
|
|
|
}
|
|
|
|
}
|
2020-09-09 13:39:34 +00:00
|
|
|
|
|
|
|
return madeChanges;
|
2016-10-13 20:25:34 +00:00
|
|
|
}
|
|
|
|
|
2020-09-23 17:55:20 +00:00
|
|
|
std::unique_ptr<Program> Compiler::convertProgram(
|
|
|
|
Program::Kind kind,
|
|
|
|
String text,
|
|
|
|
const Program::Settings& settings,
|
|
|
|
const std::vector<std::unique_ptr<ExternalValue>>* externalValues) {
|
|
|
|
SkASSERT(!externalValues || (kind == Program::kGeneric_Kind));
|
2019-05-15 19:29:54 +00:00
|
|
|
|
2020-11-18 20:38:39 +00:00
|
|
|
// Loading and optimizing our base module might reset the inliner, so do that first,
|
|
|
|
// *then* configure the inliner with the settings for this program.
|
|
|
|
const ParsedModule& baseModule = this->moduleForProgramKind(kind);
|
|
|
|
|
2016-07-01 15:22:01 +00:00
|
|
|
fErrorText = "";
|
|
|
|
fErrorCount = 0;
|
2020-11-18 20:38:39 +00:00
|
|
|
fInliner.reset(fIRGenerator->fModifiers.get(), &settings);
|
Reorganization of IR generator's API and interaction with compiler
- Move all of IR generator's fields private (except for fContext, which
is used ~everywhere).
- Eliminate start() and finish(), fold this logic into convertProgram.
The division of what was set/reset in different places was pretty
arbitrary. Now, convertProgram does everything. Along that line, have
it actually return the "outputs" as an IRBundle (a small collection of
the things that the compiler needs). This seems better than the
compiler ripping out IR generator's internals.
- IR generator's POD field initialization was a mix of in-class and
constructor. Move all the constant initialization to declarations.
- No need to look up sk_PerVertex at start (or convertProgram) time, so
remove fSkPerVertex, and just do the lookup when we're about to use
it.
- IRGenerator::convertProgram is fairly long now, but all the code is in
one place. You don't have to think about the order that three
different member functions are called (along with the caller mutating
the internal state between those three calls).
- In the compiler, add an AutoSource helper to manage changing and
restoring the fSource pointer everywhere.
- Rename the loadXXXIntrinsics functions to loadXXXModule, have them
return the module, and wrap the whole thing up in a single
moduleForProgramKind() helper.
Change-Id: I0c9b6702f8786792963e3d9408d6619e5ab393e2
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/324696
Reviewed-by: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
2020-10-09 16:05:16 +00:00
|
|
|
|
|
|
|
// Not using AutoSource, because caller is likely to call errorText() if we fail to compile
|
2017-09-11 20:50:14 +00:00
|
|
|
std::unique_ptr<String> textPtr(new String(std::move(text)));
|
|
|
|
fSource = textPtr.get();
|
Reorganization of IR generator's API and interaction with compiler
- Move all of IR generator's fields private (except for fContext, which
is used ~everywhere).
- Eliminate start() and finish(), fold this logic into convertProgram.
The division of what was set/reset in different places was pretty
arbitrary. Now, convertProgram does everything. Along that line, have
it actually return the "outputs" as an IRBundle (a small collection of
the things that the compiler needs). This seems better than the
compiler ripping out IR generator's internals.
- IR generator's POD field initialization was a mix of in-class and
constructor. Move all the constant initialization to declarations.
- No need to look up sk_PerVertex at start (or convertProgram) time, so
remove fSkPerVertex, and just do the lookup when we're about to use
it.
- IRGenerator::convertProgram is fairly long now, but all the code is in
one place. You don't have to think about the order that three
different member functions are called (along with the caller mutating
the internal state between those three calls).
- In the compiler, add an AutoSource helper to manage changing and
restoring the fSource pointer everywhere.
- Rename the loadXXXIntrinsics functions to loadXXXModule, have them
return the module, and wrap the whole thing up in a single
moduleForProgramKind() helper.
Change-Id: I0c9b6702f8786792963e3d9408d6619e5ab393e2
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/324696
Reviewed-by: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
2020-10-09 16:05:16 +00:00
|
|
|
|
Reland "Create a basic IRNode pooling system."
This is a reland of e16eca95f5c08c2bdf72cc0b04af62a1071afd8d
This fixes the no-op (iOS) implementation of CreatePoolOnThread.
Original change's description:
> Create a basic IRNode pooling system.
>
> Allocations are redirected by overriding `operator new` and `operator
> delete` on the IRNode class. This allows us to use our existing
> `unique_ptr` and `make_unique` calls as-is. The Pool class is simple;
> it holds a fixed number of nodes and recycles them as they are returned.
>
> A fixed pool size of 2000 nodes was chosen. That is large enough to hold
> the contents of `sksl_large` during compilation, but it can be
> overflowed by very large shaders, or if multiple programs are converted
> at the same time. Exhausting the pool is not a problem; if this happens,
> additional nodes will be allocated via the system allocator as usual.
> More elaborate schemes are possible but might not add a lot of value.
>
> Thread safety is accomplished by placing the pool in a `thread_local`
> static during a Program's creation and destruction; the pool is freed
> when the program is destroyed. One important consequence of this
> strategy is that a program must free every node that it allocated during
> its creation, or else the node will be leaked. In debug, leaking a node
> will be detected and causes a DEBUGFAIL. In release, the pool will be
> freed despite having a live node in it, and if that node is later freed,
> that pointer will be passed to the system `free` (which is likely to
> cause a crash).
>
> In this CL, iOS does not support pooling, since support for
> `thread_local` was only added on iOS 9. This is fixed in the followup
> CL, http://review.skia.org/328837, which uses pthread keys on iOS.
>
> Nanobench shows ~15% improvement:
> (last week) http://screen/5CNBhTaZApcDA8h
> (today) http://screen/8ti5Rymvf6LUs8i
>
> Change-Id: I559de73606ee1be54e5eae7f82129dc928a63e3c
> Reviewed-on: https://skia-review.googlesource.com/c/skia/+/326876
> Commit-Queue: John Stiles <johnstiles@google.com>
> Reviewed-by: Ethan Nicholas <ethannicholas@google.com>
> Auto-Submit: John Stiles <johnstiles@google.com>
Change-Id: I8623a574a7e92332ff00b83982497863c8953929
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/329171
Commit-Queue: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
Auto-Submit: John Stiles <johnstiles@google.com>
Reviewed-by: Brian Osman <brianosman@google.com>
2020-10-22 15:09:15 +00:00
|
|
|
// Enable node pooling while converting and optimizing the program for a performance boost.
|
|
|
|
// The Program will take ownership of the pool.
|
2020-10-22 19:42:27 +00:00
|
|
|
std::unique_ptr<Pool> pool = Pool::Create();
|
|
|
|
pool->attachToThread();
|
2020-11-18 20:38:39 +00:00
|
|
|
IRGenerator::IRBundle ir =
|
|
|
|
fIRGenerator->convertProgram(kind, &settings, baseModule, /*isBuiltinCode=*/false,
|
|
|
|
textPtr->c_str(), textPtr->size(), externalValues);
|
Reland "Create a basic IRNode pooling system."
This is a reland of e16eca95f5c08c2bdf72cc0b04af62a1071afd8d
This fixes the no-op (iOS) implementation of CreatePoolOnThread.
Original change's description:
> Create a basic IRNode pooling system.
>
> Allocations are redirected by overriding `operator new` and `operator
> delete` on the IRNode class. This allows us to use our existing
> `unique_ptr` and `make_unique` calls as-is. The Pool class is simple;
> it holds a fixed number of nodes and recycles them as they are returned.
>
> A fixed pool size of 2000 nodes was chosen. That is large enough to hold
> the contents of `sksl_large` during compilation, but it can be
> overflowed by very large shaders, or if multiple programs are converted
> at the same time. Exhausting the pool is not a problem; if this happens,
> additional nodes will be allocated via the system allocator as usual.
> More elaborate schemes are possible but might not add a lot of value.
>
> Thread safety is accomplished by placing the pool in a `thread_local`
> static during a Program's creation and destruction; the pool is freed
> when the program is destroyed. One important consequence of this
> strategy is that a program must free every node that it allocated during
> its creation, or else the node will be leaked. In debug, leaking a node
> will be detected and causes a DEBUGFAIL. In release, the pool will be
> freed despite having a live node in it, and if that node is later freed,
> that pointer will be passed to the system `free` (which is likely to
> cause a crash).
>
> In this CL, iOS does not support pooling, since support for
> `thread_local` was only added on iOS 9. This is fixed in the followup
> CL, http://review.skia.org/328837, which uses pthread keys on iOS.
>
> Nanobench shows ~15% improvement:
> (last week) http://screen/5CNBhTaZApcDA8h
> (today) http://screen/8ti5Rymvf6LUs8i
>
> Change-Id: I559de73606ee1be54e5eae7f82129dc928a63e3c
> Reviewed-on: https://skia-review.googlesource.com/c/skia/+/326876
> Commit-Queue: John Stiles <johnstiles@google.com>
> Reviewed-by: Ethan Nicholas <ethannicholas@google.com>
> Auto-Submit: John Stiles <johnstiles@google.com>
Change-Id: I8623a574a7e92332ff00b83982497863c8953929
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/329171
Commit-Queue: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
Auto-Submit: John Stiles <johnstiles@google.com>
Reviewed-by: Brian Osman <brianosman@google.com>
2020-10-22 15:09:15 +00:00
|
|
|
auto program = std::make_unique<Program>(kind,
|
|
|
|
std::move(textPtr),
|
|
|
|
settings,
|
2020-11-02 17:26:22 +00:00
|
|
|
fCaps,
|
Reland "Create a basic IRNode pooling system."
This is a reland of e16eca95f5c08c2bdf72cc0b04af62a1071afd8d
This fixes the no-op (iOS) implementation of CreatePoolOnThread.
Original change's description:
> Create a basic IRNode pooling system.
>
> Allocations are redirected by overriding `operator new` and `operator
> delete` on the IRNode class. This allows us to use our existing
> `unique_ptr` and `make_unique` calls as-is. The Pool class is simple;
> it holds a fixed number of nodes and recycles them as they are returned.
>
> A fixed pool size of 2000 nodes was chosen. That is large enough to hold
> the contents of `sksl_large` during compilation, but it can be
> overflowed by very large shaders, or if multiple programs are converted
> at the same time. Exhausting the pool is not a problem; if this happens,
> additional nodes will be allocated via the system allocator as usual.
> More elaborate schemes are possible but might not add a lot of value.
>
> Thread safety is accomplished by placing the pool in a `thread_local`
> static during a Program's creation and destruction; the pool is freed
> when the program is destroyed. One important consequence of this
> strategy is that a program must free every node that it allocated during
> its creation, or else the node will be leaked. In debug, leaking a node
> will be detected and causes a DEBUGFAIL. In release, the pool will be
> freed despite having a live node in it, and if that node is later freed,
> that pointer will be passed to the system `free` (which is likely to
> cause a crash).
>
> In this CL, iOS does not support pooling, since support for
> `thread_local` was only added on iOS 9. This is fixed in the followup
> CL, http://review.skia.org/328837, which uses pthread keys on iOS.
>
> Nanobench shows ~15% improvement:
> (last week) http://screen/5CNBhTaZApcDA8h
> (today) http://screen/8ti5Rymvf6LUs8i
>
> Change-Id: I559de73606ee1be54e5eae7f82129dc928a63e3c
> Reviewed-on: https://skia-review.googlesource.com/c/skia/+/326876
> Commit-Queue: John Stiles <johnstiles@google.com>
> Reviewed-by: Ethan Nicholas <ethannicholas@google.com>
> Auto-Submit: John Stiles <johnstiles@google.com>
Change-Id: I8623a574a7e92332ff00b83982497863c8953929
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/329171
Commit-Queue: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
Auto-Submit: John Stiles <johnstiles@google.com>
Reviewed-by: Brian Osman <brianosman@google.com>
2020-10-22 15:09:15 +00:00
|
|
|
fContext,
|
|
|
|
std::move(ir.fElements),
|
2020-10-28 18:14:39 +00:00
|
|
|
std::move(ir.fSharedElements),
|
Reland "Create a basic IRNode pooling system."
This is a reland of e16eca95f5c08c2bdf72cc0b04af62a1071afd8d
This fixes the no-op (iOS) implementation of CreatePoolOnThread.
Original change's description:
> Create a basic IRNode pooling system.
>
> Allocations are redirected by overriding `operator new` and `operator
> delete` on the IRNode class. This allows us to use our existing
> `unique_ptr` and `make_unique` calls as-is. The Pool class is simple;
> it holds a fixed number of nodes and recycles them as they are returned.
>
> A fixed pool size of 2000 nodes was chosen. That is large enough to hold
> the contents of `sksl_large` during compilation, but it can be
> overflowed by very large shaders, or if multiple programs are converted
> at the same time. Exhausting the pool is not a problem; if this happens,
> additional nodes will be allocated via the system allocator as usual.
> More elaborate schemes are possible but might not add a lot of value.
>
> Thread safety is accomplished by placing the pool in a `thread_local`
> static during a Program's creation and destruction; the pool is freed
> when the program is destroyed. One important consequence of this
> strategy is that a program must free every node that it allocated during
> its creation, or else the node will be leaked. In debug, leaking a node
> will be detected and causes a DEBUGFAIL. In release, the pool will be
> freed despite having a live node in it, and if that node is later freed,
> that pointer will be passed to the system `free` (which is likely to
> cause a crash).
>
> In this CL, iOS does not support pooling, since support for
> `thread_local` was only added on iOS 9. This is fixed in the followup
> CL, http://review.skia.org/328837, which uses pthread keys on iOS.
>
> Nanobench shows ~15% improvement:
> (last week) http://screen/5CNBhTaZApcDA8h
> (today) http://screen/8ti5Rymvf6LUs8i
>
> Change-Id: I559de73606ee1be54e5eae7f82129dc928a63e3c
> Reviewed-on: https://skia-review.googlesource.com/c/skia/+/326876
> Commit-Queue: John Stiles <johnstiles@google.com>
> Reviewed-by: Ethan Nicholas <ethannicholas@google.com>
> Auto-Submit: John Stiles <johnstiles@google.com>
Change-Id: I8623a574a7e92332ff00b83982497863c8953929
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/329171
Commit-Queue: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
Auto-Submit: John Stiles <johnstiles@google.com>
Reviewed-by: Brian Osman <brianosman@google.com>
2020-10-22 15:09:15 +00:00
|
|
|
std::move(ir.fModifiers),
|
|
|
|
std::move(ir.fSymbolTable),
|
|
|
|
std::move(pool),
|
|
|
|
ir.fInputs);
|
|
|
|
bool success = false;
|
2016-12-12 20:33:30 +00:00
|
|
|
if (fErrorCount) {
|
Reland "Create a basic IRNode pooling system."
This is a reland of e16eca95f5c08c2bdf72cc0b04af62a1071afd8d
This fixes the no-op (iOS) implementation of CreatePoolOnThread.
Original change's description:
> Create a basic IRNode pooling system.
>
> Allocations are redirected by overriding `operator new` and `operator
> delete` on the IRNode class. This allows us to use our existing
> `unique_ptr` and `make_unique` calls as-is. The Pool class is simple;
> it holds a fixed number of nodes and recycles them as they are returned.
>
> A fixed pool size of 2000 nodes was chosen. That is large enough to hold
> the contents of `sksl_large` during compilation, but it can be
> overflowed by very large shaders, or if multiple programs are converted
> at the same time. Exhausting the pool is not a problem; if this happens,
> additional nodes will be allocated via the system allocator as usual.
> More elaborate schemes are possible but might not add a lot of value.
>
> Thread safety is accomplished by placing the pool in a `thread_local`
> static during a Program's creation and destruction; the pool is freed
> when the program is destroyed. One important consequence of this
> strategy is that a program must free every node that it allocated during
> its creation, or else the node will be leaked. In debug, leaking a node
> will be detected and causes a DEBUGFAIL. In release, the pool will be
> freed despite having a live node in it, and if that node is later freed,
> that pointer will be passed to the system `free` (which is likely to
> cause a crash).
>
> In this CL, iOS does not support pooling, since support for
> `thread_local` was only added on iOS 9. This is fixed in the followup
> CL, http://review.skia.org/328837, which uses pthread keys on iOS.
>
> Nanobench shows ~15% improvement:
> (last week) http://screen/5CNBhTaZApcDA8h
> (today) http://screen/8ti5Rymvf6LUs8i
>
> Change-Id: I559de73606ee1be54e5eae7f82129dc928a63e3c
> Reviewed-on: https://skia-review.googlesource.com/c/skia/+/326876
> Commit-Queue: John Stiles <johnstiles@google.com>
> Reviewed-by: Ethan Nicholas <ethannicholas@google.com>
> Auto-Submit: John Stiles <johnstiles@google.com>
Change-Id: I8623a574a7e92332ff00b83982497863c8953929
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/329171
Commit-Queue: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
Auto-Submit: John Stiles <johnstiles@google.com>
Reviewed-by: Brian Osman <brianosman@google.com>
2020-10-22 15:09:15 +00:00
|
|
|
// Do not return programs that failed to compile.
|
|
|
|
} else if (settings.fOptimize && !this->optimize(*program)) {
|
|
|
|
// Do not return programs that failed to optimize.
|
|
|
|
} else {
|
|
|
|
// We have a successful program!
|
|
|
|
success = true;
|
Revert "Reland "Create a basic IRNode pooling system.""
This reverts commit 5b09e6a00788d08a89604bd77f154c0e11dda035.
Reason for revert: breaking g3
Original change's description:
> Reland "Create a basic IRNode pooling system."
>
> This is a reland of e16eca95f5c08c2bdf72cc0b04af62a1071afd8d
>
> Original change's description:
> > Create a basic IRNode pooling system.
> >
> > Allocations are redirected by overriding `operator new` and `operator
> > delete` on the IRNode class. This allows us to use our existing
> > `unique_ptr` and `make_unique` calls as-is. The Pool class is simple;
> > it holds a fixed number of nodes and recycles them as they are returned.
> >
> > A fixed pool size of 2000 nodes was chosen. That is large enough to hold
> > the contents of `sksl_large` during compilation, but it can be
> > overflowed by very large shaders, or if multiple programs are converted
> > at the same time. Exhausting the pool is not a problem; if this happens,
> > additional nodes will be allocated via the system allocator as usual.
> > More elaborate schemes are possible but might not add a lot of value.
> >
> > Thread safety is accomplished by placing the pool in a `thread_local`
> > static during a Program's creation and destruction; the pool is freed
> > when the program is destroyed. One important consequence of this
> > strategy is that a program must free every node that it allocated during
> > its creation, or else the node will be leaked. In debug, leaking a node
> > will be detected and causes a DEBUGFAIL. In release, the pool will be
> > freed despite having a live node in it, and if that node is later freed,
> > that pointer will be passed to the system `free` (which is likely to
> > cause a crash).
> >
> > In this CL, iOS does not support pooling, since support for
> > `thread_local` was only added on iOS 9. This is fixed in the followup
> > CL, http://review.skia.org/328837, which uses pthread keys on iOS.
> >
> > Nanobench shows ~15% improvement:
> > (last week) http://screen/5CNBhTaZApcDA8h
> > (today) http://screen/8ti5Rymvf6LUs8i
> >
> > Change-Id: I559de73606ee1be54e5eae7f82129dc928a63e3c
> > Reviewed-on: https://skia-review.googlesource.com/c/skia/+/326876
> > Commit-Queue: John Stiles <johnstiles@google.com>
> > Reviewed-by: Ethan Nicholas <ethannicholas@google.com>
> > Auto-Submit: John Stiles <johnstiles@google.com>
>
> Change-Id: I114971e8e7ac0fabaf26216ae8813eeeaad0d4a2
> Reviewed-on: https://skia-review.googlesource.com/c/skia/+/329086
> Reviewed-by: Ethan Nicholas <ethannicholas@google.com>
> Commit-Queue: John Stiles <johnstiles@google.com>
TBR=brianosman@google.com,ethannicholas@google.com,johnstiles@google.com
Change-Id: Ie77a23366f2ba52fcbb0a751d11ca2792790a30c
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/329165
Reviewed-by: Greg Daniel <egdaniel@google.com>
Commit-Queue: Greg Daniel <egdaniel@google.com>
2020-10-22 14:04:32 +00:00
|
|
|
}
|
Reland "Create a basic IRNode pooling system."
This is a reland of e16eca95f5c08c2bdf72cc0b04af62a1071afd8d
This fixes the no-op (iOS) implementation of CreatePoolOnThread.
Original change's description:
> Create a basic IRNode pooling system.
>
> Allocations are redirected by overriding `operator new` and `operator
> delete` on the IRNode class. This allows us to use our existing
> `unique_ptr` and `make_unique` calls as-is. The Pool class is simple;
> it holds a fixed number of nodes and recycles them as they are returned.
>
> A fixed pool size of 2000 nodes was chosen. That is large enough to hold
> the contents of `sksl_large` during compilation, but it can be
> overflowed by very large shaders, or if multiple programs are converted
> at the same time. Exhausting the pool is not a problem; if this happens,
> additional nodes will be allocated via the system allocator as usual.
> More elaborate schemes are possible but might not add a lot of value.
>
> Thread safety is accomplished by placing the pool in a `thread_local`
> static during a Program's creation and destruction; the pool is freed
> when the program is destroyed. One important consequence of this
> strategy is that a program must free every node that it allocated during
> its creation, or else the node will be leaked. In debug, leaking a node
> will be detected and causes a DEBUGFAIL. In release, the pool will be
> freed despite having a live node in it, and if that node is later freed,
> that pointer will be passed to the system `free` (which is likely to
> cause a crash).
>
> In this CL, iOS does not support pooling, since support for
> `thread_local` was only added on iOS 9. This is fixed in the followup
> CL, http://review.skia.org/328837, which uses pthread keys on iOS.
>
> Nanobench shows ~15% improvement:
> (last week) http://screen/5CNBhTaZApcDA8h
> (today) http://screen/8ti5Rymvf6LUs8i
>
> Change-Id: I559de73606ee1be54e5eae7f82129dc928a63e3c
> Reviewed-on: https://skia-review.googlesource.com/c/skia/+/326876
> Commit-Queue: John Stiles <johnstiles@google.com>
> Reviewed-by: Ethan Nicholas <ethannicholas@google.com>
> Auto-Submit: John Stiles <johnstiles@google.com>
Change-Id: I8623a574a7e92332ff00b83982497863c8953929
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/329171
Commit-Queue: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
Auto-Submit: John Stiles <johnstiles@google.com>
Reviewed-by: Brian Osman <brianosman@google.com>
2020-10-22 15:09:15 +00:00
|
|
|
|
|
|
|
program->fPool->detachFromThread();
|
|
|
|
return success ? std::move(program) : nullptr;
|
2016-07-01 15:22:01 +00:00
|
|
|
}
|
|
|
|
|
2020-11-18 20:38:39 +00:00
|
|
|
bool Compiler::optimize(LoadedModule& module) {
|
|
|
|
SkASSERT(!fErrorCount);
|
2020-12-09 21:09:41 +00:00
|
|
|
const Program::Settings* oldSettings = fIRGenerator->fSettings;
|
2020-11-18 20:38:39 +00:00
|
|
|
Program::Settings settings;
|
|
|
|
fIRGenerator->fKind = module.fKind;
|
|
|
|
fIRGenerator->fSettings = &settings;
|
|
|
|
std::unique_ptr<ProgramUsage> usage = Analysis::GetUsage(module);
|
|
|
|
|
|
|
|
fInliner.reset(fModifiers.back().get(), &settings);
|
|
|
|
|
|
|
|
while (fErrorCount == 0) {
|
|
|
|
bool madeChanges = false;
|
|
|
|
|
|
|
|
// Scan and optimize based on the control-flow graph for each function.
|
|
|
|
for (const auto& element : module.fElements) {
|
|
|
|
if (element->is<FunctionDefinition>()) {
|
|
|
|
madeChanges |= this->scanCFG(element->as<FunctionDefinition>(), usage.get());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Perform inline-candidate analysis and inline any functions deemed suitable.
|
|
|
|
madeChanges |= fInliner.analyze(module.fElements, module.fSymbols.get(), usage.get());
|
|
|
|
|
|
|
|
if (!madeChanges) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2020-12-09 21:09:41 +00:00
|
|
|
fIRGenerator->fSettings = oldSettings;
|
2020-11-18 20:38:39 +00:00
|
|
|
return fErrorCount == 0;
|
|
|
|
}
|
|
|
|
|
2018-07-31 13:44:36 +00:00
|
|
|
bool Compiler::optimize(Program& program) {
|
|
|
|
SkASSERT(!fErrorCount);
|
2020-09-14 15:33:47 +00:00
|
|
|
fIRGenerator->fKind = program.fKind;
|
|
|
|
fIRGenerator->fSettings = &program.fSettings;
|
2020-10-19 20:34:10 +00:00
|
|
|
ProgramUsage* usage = program.fUsage.get();
|
2020-09-01 14:53:02 +00:00
|
|
|
|
2020-09-14 15:33:47 +00:00
|
|
|
while (fErrorCount == 0) {
|
|
|
|
bool madeChanges = false;
|
2020-09-09 17:40:37 +00:00
|
|
|
|
2020-09-14 15:33:47 +00:00
|
|
|
// Scan and optimize based on the control-flow graph for each function.
|
2020-10-28 18:14:39 +00:00
|
|
|
for (const auto& element : program.ownedElements()) {
|
2020-10-08 20:04:40 +00:00
|
|
|
if (element->is<FunctionDefinition>()) {
|
2020-10-19 20:34:10 +00:00
|
|
|
madeChanges |= this->scanCFG(element->as<FunctionDefinition>(), usage);
|
2018-07-31 13:44:36 +00:00
|
|
|
}
|
2020-09-14 15:33:47 +00:00
|
|
|
}
|
2020-09-01 14:53:02 +00:00
|
|
|
|
2020-09-14 15:33:47 +00:00
|
|
|
// Perform inline-candidate analysis and inline any functions deemed suitable.
|
2020-11-18 20:38:39 +00:00
|
|
|
madeChanges |= fInliner.analyze(program.ownedElements(), program.fSymbols.get(), usage);
|
2020-09-14 15:33:47 +00:00
|
|
|
|
|
|
|
// Remove dead functions. We wait until after analysis so that we still report errors,
|
|
|
|
// even in unused code.
|
|
|
|
if (program.fSettings.fRemoveDeadFunctions) {
|
2020-10-28 18:14:39 +00:00
|
|
|
auto isDeadFunction = [&](const ProgramElement* element) {
|
|
|
|
if (!element->is<FunctionDefinition>()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
const FunctionDefinition& fn = element->as<FunctionDefinition>();
|
|
|
|
if (fn.declaration().name() != "main" && usage->get(fn.declaration()) == 0) {
|
|
|
|
usage->remove(*element);
|
|
|
|
madeChanges = true;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
};
|
2020-09-14 15:33:47 +00:00
|
|
|
program.fElements.erase(
|
2020-10-28 18:14:39 +00:00
|
|
|
std::remove_if(program.fElements.begin(), program.fElements.end(),
|
2020-09-14 15:33:47 +00:00
|
|
|
[&](const std::unique_ptr<ProgramElement>& element) {
|
2020-10-28 18:14:39 +00:00
|
|
|
return isDeadFunction(element.get());
|
2020-09-14 15:33:47 +00:00
|
|
|
}),
|
|
|
|
program.fElements.end());
|
2020-10-28 18:14:39 +00:00
|
|
|
program.fSharedElements.erase(
|
|
|
|
std::remove_if(program.fSharedElements.begin(), program.fSharedElements.end(),
|
|
|
|
isDeadFunction),
|
|
|
|
program.fSharedElements.end());
|
2020-09-14 15:33:47 +00:00
|
|
|
}
|
2020-09-01 14:53:02 +00:00
|
|
|
|
2020-09-14 15:33:47 +00:00
|
|
|
if (program.fKind != Program::kFragmentProcessor_Kind) {
|
2020-10-06 18:43:32 +00:00
|
|
|
// Remove declarations of dead global variables
|
2020-10-28 18:14:39 +00:00
|
|
|
auto isDeadVariable = [&](const ProgramElement* element) {
|
|
|
|
if (!element->is<GlobalVarDeclaration>()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
const GlobalVarDeclaration& global = element->as<GlobalVarDeclaration>();
|
|
|
|
const VarDeclaration& varDecl = global.declaration()->as<VarDeclaration>();
|
|
|
|
if (usage->isDead(varDecl.var())) {
|
|
|
|
madeChanges = true;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
};
|
2020-09-14 15:33:47 +00:00
|
|
|
program.fElements.erase(
|
|
|
|
std::remove_if(program.fElements.begin(), program.fElements.end(),
|
|
|
|
[&](const std::unique_ptr<ProgramElement>& element) {
|
2020-10-28 18:14:39 +00:00
|
|
|
return isDeadVariable(element.get());
|
2020-09-14 15:33:47 +00:00
|
|
|
}),
|
|
|
|
program.fElements.end());
|
2020-10-28 18:14:39 +00:00
|
|
|
program.fSharedElements.erase(
|
|
|
|
std::remove_if(program.fSharedElements.begin(), program.fSharedElements.end(),
|
|
|
|
isDeadVariable),
|
|
|
|
program.fSharedElements.end());
|
2020-09-14 15:33:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!madeChanges) {
|
|
|
|
break;
|
2019-02-08 20:46:24 +00:00
|
|
|
}
|
2018-07-31 13:44:36 +00:00
|
|
|
}
|
|
|
|
return fErrorCount == 0;
|
|
|
|
}
|
|
|
|
|
2019-06-18 14:14:20 +00:00
|
|
|
#if defined(SKSL_STANDALONE) || SK_SUPPORT_GPU
|
|
|
|
|
2018-07-31 13:44:36 +00:00
|
|
|
bool Compiler::toSPIRV(Program& program, OutputStream& out) {
|
2017-03-16 13:56:54 +00:00
|
|
|
#ifdef SK_ENABLE_SPIRV_VALIDATION
|
2017-03-31 17:56:23 +00:00
|
|
|
StringStream buffer;
|
Reorganization of IR generator's API and interaction with compiler
- Move all of IR generator's fields private (except for fContext, which
is used ~everywhere).
- Eliminate start() and finish(), fold this logic into convertProgram.
The division of what was set/reset in different places was pretty
arbitrary. Now, convertProgram does everything. Along that line, have
it actually return the "outputs" as an IRBundle (a small collection of
the things that the compiler needs). This seems better than the
compiler ripping out IR generator's internals.
- IR generator's POD field initialization was a mix of in-class and
constructor. Move all the constant initialization to declarations.
- No need to look up sk_PerVertex at start (or convertProgram) time, so
remove fSkPerVertex, and just do the lookup when we're about to use
it.
- IRGenerator::convertProgram is fairly long now, but all the code is in
one place. You don't have to think about the order that three
different member functions are called (along with the caller mutating
the internal state between those three calls).
- In the compiler, add an AutoSource helper to manage changing and
restoring the fSource pointer everywhere.
- Rename the loadXXXIntrinsics functions to loadXXXModule, have them
return the module, and wrap the whole thing up in a single
moduleForProgramKind() helper.
Change-Id: I0c9b6702f8786792963e3d9408d6619e5ab393e2
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/324696
Reviewed-by: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
2020-10-09 16:05:16 +00:00
|
|
|
AutoSource as(this, program.fSource.get());
|
2020-10-09 17:31:42 +00:00
|
|
|
SPIRVCodeGenerator cg(fContext.get(), &program, this, &buffer);
|
2017-03-16 13:56:54 +00:00
|
|
|
bool result = cg.generateCode();
|
2020-11-06 14:28:04 +00:00
|
|
|
if (result && program.fSettings.fValidateSPIRV) {
|
2017-03-16 13:56:54 +00:00
|
|
|
spvtools::SpirvTools tools(SPV_ENV_VULKAN_1_0);
|
2017-06-29 14:03:38 +00:00
|
|
|
const String& data = buffer.str();
|
2018-06-12 15:05:59 +00:00
|
|
|
SkASSERT(0 == data.size() % 4);
|
2020-11-24 20:51:06 +00:00
|
|
|
String errors;
|
|
|
|
auto dumpmsg = [&errors](spv_message_level_t, const char*, const spv_position_t&,
|
|
|
|
const char* m) {
|
|
|
|
errors.appendf("SPIR-V validation error: %s\n", m);
|
2017-03-16 13:56:54 +00:00
|
|
|
};
|
|
|
|
tools.SetMessageConsumer(dumpmsg);
|
2020-11-24 20:51:06 +00:00
|
|
|
|
|
|
|
// Verify that the SPIR-V we produced is valid. At runtime, we will abort() with a message
|
|
|
|
// explaining the error. In standalone mode (skslc), we will send the message, plus the
|
|
|
|
// entire disassembled SPIR-V (for easier context & debugging) as *our* error message.
|
|
|
|
result = tools.Validate((const uint32_t*) data.c_str(), data.size() / 4);
|
|
|
|
|
|
|
|
if (!result) {
|
|
|
|
#if defined(SKSL_STANDALONE)
|
|
|
|
// Convert the string-stream to a SPIR-V disassembly.
|
|
|
|
std::string disassembly;
|
|
|
|
if (tools.Disassemble((const uint32_t*)data.data(), data.size() / 4, &disassembly)) {
|
|
|
|
errors.append(disassembly);
|
|
|
|
}
|
|
|
|
this->error(-1, errors);
|
|
|
|
#else
|
|
|
|
SkDEBUGFAILF("%s", errors.c_str());
|
|
|
|
#endif
|
|
|
|
}
|
2017-06-29 14:03:38 +00:00
|
|
|
out.write(data.c_str(), data.size());
|
2017-03-16 13:56:54 +00:00
|
|
|
}
|
|
|
|
#else
|
Reorganization of IR generator's API and interaction with compiler
- Move all of IR generator's fields private (except for fContext, which
is used ~everywhere).
- Eliminate start() and finish(), fold this logic into convertProgram.
The division of what was set/reset in different places was pretty
arbitrary. Now, convertProgram does everything. Along that line, have
it actually return the "outputs" as an IRBundle (a small collection of
the things that the compiler needs). This seems better than the
compiler ripping out IR generator's internals.
- IR generator's POD field initialization was a mix of in-class and
constructor. Move all the constant initialization to declarations.
- No need to look up sk_PerVertex at start (or convertProgram) time, so
remove fSkPerVertex, and just do the lookup when we're about to use
it.
- IRGenerator::convertProgram is fairly long now, but all the code is in
one place. You don't have to think about the order that three
different member functions are called (along with the caller mutating
the internal state between those three calls).
- In the compiler, add an AutoSource helper to manage changing and
restoring the fSource pointer everywhere.
- Rename the loadXXXIntrinsics functions to loadXXXModule, have them
return the module, and wrap the whole thing up in a single
moduleForProgramKind() helper.
Change-Id: I0c9b6702f8786792963e3d9408d6619e5ab393e2
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/324696
Reviewed-by: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
2020-10-09 16:05:16 +00:00
|
|
|
AutoSource as(this, program.fSource.get());
|
2020-10-09 17:31:42 +00:00
|
|
|
SPIRVCodeGenerator cg(fContext.get(), &program, this, &out);
|
2016-12-12 20:33:30 +00:00
|
|
|
bool result = cg.generateCode();
|
2017-03-16 13:56:54 +00:00
|
|
|
#endif
|
2016-07-01 15:22:01 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2018-07-31 13:44:36 +00:00
|
|
|
bool Compiler::toSPIRV(Program& program, String* out) {
|
2017-03-31 17:56:23 +00:00
|
|
|
StringStream buffer;
|
2016-12-12 20:33:30 +00:00
|
|
|
bool result = this->toSPIRV(program, buffer);
|
|
|
|
if (result) {
|
2017-06-29 14:03:38 +00:00
|
|
|
*out = buffer.str();
|
2016-07-01 15:22:01 +00:00
|
|
|
}
|
2016-12-12 20:33:30 +00:00
|
|
|
return result;
|
2016-07-01 15:22:01 +00:00
|
|
|
}
|
|
|
|
|
2018-07-31 13:44:36 +00:00
|
|
|
bool Compiler::toGLSL(Program& program, OutputStream& out) {
|
Reorganization of IR generator's API and interaction with compiler
- Move all of IR generator's fields private (except for fContext, which
is used ~everywhere).
- Eliminate start() and finish(), fold this logic into convertProgram.
The division of what was set/reset in different places was pretty
arbitrary. Now, convertProgram does everything. Along that line, have
it actually return the "outputs" as an IRBundle (a small collection of
the things that the compiler needs). This seems better than the
compiler ripping out IR generator's internals.
- IR generator's POD field initialization was a mix of in-class and
constructor. Move all the constant initialization to declarations.
- No need to look up sk_PerVertex at start (or convertProgram) time, so
remove fSkPerVertex, and just do the lookup when we're about to use
it.
- IRGenerator::convertProgram is fairly long now, but all the code is in
one place. You don't have to think about the order that three
different member functions are called (along with the caller mutating
the internal state between those three calls).
- In the compiler, add an AutoSource helper to manage changing and
restoring the fSource pointer everywhere.
- Rename the loadXXXIntrinsics functions to loadXXXModule, have them
return the module, and wrap the whole thing up in a single
moduleForProgramKind() helper.
Change-Id: I0c9b6702f8786792963e3d9408d6619e5ab393e2
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/324696
Reviewed-by: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
2020-10-09 16:05:16 +00:00
|
|
|
AutoSource as(this, program.fSource.get());
|
2018-03-27 18:10:52 +00:00
|
|
|
GLSLCodeGenerator cg(fContext.get(), &program, this, &out);
|
2016-12-12 20:33:30 +00:00
|
|
|
bool result = cg.generateCode();
|
|
|
|
return result;
|
2016-07-01 15:22:01 +00:00
|
|
|
}
|
|
|
|
|
2018-07-31 13:44:36 +00:00
|
|
|
bool Compiler::toGLSL(Program& program, String* out) {
|
2017-03-31 17:56:23 +00:00
|
|
|
StringStream buffer;
|
2016-12-12 20:33:30 +00:00
|
|
|
bool result = this->toGLSL(program, buffer);
|
2016-07-01 15:22:01 +00:00
|
|
|
if (result) {
|
2017-06-29 14:03:38 +00:00
|
|
|
*out = buffer.str();
|
2016-07-01 15:22:01 +00:00
|
|
|
}
|
2016-08-03 19:43:36 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2020-02-19 20:35:26 +00:00
|
|
|
bool Compiler::toHLSL(Program& program, String* out) {
|
|
|
|
String spirv;
|
|
|
|
if (!this->toSPIRV(program, &spirv)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return SPIRVtoHLSL(spirv, out);
|
|
|
|
}
|
|
|
|
|
2018-07-31 13:44:36 +00:00
|
|
|
bool Compiler::toMetal(Program& program, OutputStream& out) {
|
2018-03-27 18:10:52 +00:00
|
|
|
MetalCodeGenerator cg(fContext.get(), &program, this, &out);
|
2017-10-13 20:17:45 +00:00
|
|
|
bool result = cg.generateCode();
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2018-07-31 13:44:36 +00:00
|
|
|
bool Compiler::toMetal(Program& program, String* out) {
|
2018-07-23 20:46:16 +00:00
|
|
|
StringStream buffer;
|
|
|
|
bool result = this->toMetal(program, buffer);
|
|
|
|
if (result) {
|
|
|
|
*out = buffer.str();
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2020-09-25 15:12:56 +00:00
|
|
|
#if defined(SKSL_STANDALONE) || GR_TEST_UTILS
|
2018-07-31 13:44:36 +00:00
|
|
|
bool Compiler::toCPP(Program& program, String name, OutputStream& out) {
|
Reorganization of IR generator's API and interaction with compiler
- Move all of IR generator's fields private (except for fContext, which
is used ~everywhere).
- Eliminate start() and finish(), fold this logic into convertProgram.
The division of what was set/reset in different places was pretty
arbitrary. Now, convertProgram does everything. Along that line, have
it actually return the "outputs" as an IRBundle (a small collection of
the things that the compiler needs). This seems better than the
compiler ripping out IR generator's internals.
- IR generator's POD field initialization was a mix of in-class and
constructor. Move all the constant initialization to declarations.
- No need to look up sk_PerVertex at start (or convertProgram) time, so
remove fSkPerVertex, and just do the lookup when we're about to use
it.
- IRGenerator::convertProgram is fairly long now, but all the code is in
one place. You don't have to think about the order that three
different member functions are called (along with the caller mutating
the internal state between those three calls).
- In the compiler, add an AutoSource helper to manage changing and
restoring the fSource pointer everywhere.
- Rename the loadXXXIntrinsics functions to loadXXXModule, have them
return the module, and wrap the whole thing up in a single
moduleForProgramKind() helper.
Change-Id: I0c9b6702f8786792963e3d9408d6619e5ab393e2
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/324696
Reviewed-by: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
2020-10-09 16:05:16 +00:00
|
|
|
AutoSource as(this, program.fSource.get());
|
2018-03-27 18:10:52 +00:00
|
|
|
CPPCodeGenerator cg(fContext.get(), &program, this, name, &out);
|
2017-06-29 14:03:38 +00:00
|
|
|
bool result = cg.generateCode();
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2018-07-31 13:44:36 +00:00
|
|
|
bool Compiler::toH(Program& program, String name, OutputStream& out) {
|
Reorganization of IR generator's API and interaction with compiler
- Move all of IR generator's fields private (except for fContext, which
is used ~everywhere).
- Eliminate start() and finish(), fold this logic into convertProgram.
The division of what was set/reset in different places was pretty
arbitrary. Now, convertProgram does everything. Along that line, have
it actually return the "outputs" as an IRBundle (a small collection of
the things that the compiler needs). This seems better than the
compiler ripping out IR generator's internals.
- IR generator's POD field initialization was a mix of in-class and
constructor. Move all the constant initialization to declarations.
- No need to look up sk_PerVertex at start (or convertProgram) time, so
remove fSkPerVertex, and just do the lookup when we're about to use
it.
- IRGenerator::convertProgram is fairly long now, but all the code is in
one place. You don't have to think about the order that three
different member functions are called (along with the caller mutating
the internal state between those three calls).
- In the compiler, add an AutoSource helper to manage changing and
restoring the fSource pointer everywhere.
- Rename the loadXXXIntrinsics functions to loadXXXModule, have them
return the module, and wrap the whole thing up in a single
moduleForProgramKind() helper.
Change-Id: I0c9b6702f8786792963e3d9408d6619e5ab393e2
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/324696
Reviewed-by: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
2020-10-09 16:05:16 +00:00
|
|
|
AutoSource as(this, program.fSource.get());
|
2018-03-27 18:10:52 +00:00
|
|
|
HCodeGenerator cg(fContext.get(), &program, this, name, &out);
|
2017-06-29 14:03:38 +00:00
|
|
|
bool result = cg.generateCode();
|
2018-07-31 13:44:36 +00:00
|
|
|
return result;
|
|
|
|
}
|
2020-09-25 15:12:56 +00:00
|
|
|
#endif // defined(SKSL_STANDALONE) || GR_TEST_UTILS
|
2018-07-31 13:44:36 +00:00
|
|
|
|
2020-08-18 20:29:45 +00:00
|
|
|
#endif // defined(SKSL_STANDALONE) || SK_SUPPORT_GPU
|
2019-09-20 16:19:11 +00:00
|
|
|
|
|
|
|
#if !defined(SKSL_STANDALONE) && SK_SUPPORT_GPU
|
Remove 'in' variables from SkRuntimeEffect
Runtime effects previously allowed two kinds of global input variables:
'in' variables could be bool, int, or float. 'uniform' could be float,
vector, or matrix. Uniform variables worked like you'd expect, but 'in'
variables were baked into the program statically. There was a large
amount of machinery to make this work, and it meant that 'in' variables
needed to have values before we could make decisions about program
caching, and before we could catch some errors. It was also essentially
syntactic sugar over the client just inserting the value into their SkSL
as a string. Finally: No one was using the feature.
To simplify the mental model, and make the API much more predictable,
this CL removes 'in' variables entirely. We no longer need to
"specialize" runtime effect programs, which means we can catch more
errors up front (those not detected until optimization). All of the API
that referred to "inputs" (the previous term that unified 'in' and
'uniform') now just refers to "uniforms".
Bug: skia:10593
Change-Id: I971f620d868b259e652b3114f0b497c2620f4b0c
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/309050
Commit-Queue: Brian Osman <brianosman@google.com>
Reviewed-by: Brian Salomon <bsalomon@google.com>
Reviewed-by: John Stiles <johnstiles@google.com>
2020-08-10 18:26:16 +00:00
|
|
|
bool Compiler::toPipelineStage(Program& program, PipelineStageArgs* outArgs) {
|
Reorganization of IR generator's API and interaction with compiler
- Move all of IR generator's fields private (except for fContext, which
is used ~everywhere).
- Eliminate start() and finish(), fold this logic into convertProgram.
The division of what was set/reset in different places was pretty
arbitrary. Now, convertProgram does everything. Along that line, have
it actually return the "outputs" as an IRBundle (a small collection of
the things that the compiler needs). This seems better than the
compiler ripping out IR generator's internals.
- IR generator's POD field initialization was a mix of in-class and
constructor. Move all the constant initialization to declarations.
- No need to look up sk_PerVertex at start (or convertProgram) time, so
remove fSkPerVertex, and just do the lookup when we're about to use
it.
- IRGenerator::convertProgram is fairly long now, but all the code is in
one place. You don't have to think about the order that three
different member functions are called (along with the caller mutating
the internal state between those three calls).
- In the compiler, add an AutoSource helper to manage changing and
restoring the fSource pointer everywhere.
- Rename the loadXXXIntrinsics functions to loadXXXModule, have them
return the module, and wrap the whole thing up in a single
moduleForProgramKind() helper.
Change-Id: I0c9b6702f8786792963e3d9408d6619e5ab393e2
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/324696
Reviewed-by: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
2020-10-09 16:05:16 +00:00
|
|
|
AutoSource as(this, program.fSource.get());
|
2018-07-31 13:44:36 +00:00
|
|
|
StringStream buffer;
|
2020-01-23 20:42:43 +00:00
|
|
|
PipelineStageCodeGenerator cg(fContext.get(), &program, this, &buffer, outArgs);
|
2018-07-31 13:44:36 +00:00
|
|
|
bool result = cg.generateCode();
|
|
|
|
if (result) {
|
2019-12-30 20:02:30 +00:00
|
|
|
outArgs->fCode = buffer.str();
|
2018-07-31 13:44:36 +00:00
|
|
|
}
|
2017-06-29 14:03:38 +00:00
|
|
|
return result;
|
|
|
|
}
|
2019-06-18 14:14:20 +00:00
|
|
|
#endif
|
|
|
|
|
2019-03-21 15:05:37 +00:00
|
|
|
std::unique_ptr<ByteCode> Compiler::toByteCode(Program& program) {
|
Reorganization of IR generator's API and interaction with compiler
- Move all of IR generator's fields private (except for fContext, which
is used ~everywhere).
- Eliminate start() and finish(), fold this logic into convertProgram.
The division of what was set/reset in different places was pretty
arbitrary. Now, convertProgram does everything. Along that line, have
it actually return the "outputs" as an IRBundle (a small collection of
the things that the compiler needs). This seems better than the
compiler ripping out IR generator's internals.
- IR generator's POD field initialization was a mix of in-class and
constructor. Move all the constant initialization to declarations.
- No need to look up sk_PerVertex at start (or convertProgram) time, so
remove fSkPerVertex, and just do the lookup when we're about to use
it.
- IRGenerator::convertProgram is fairly long now, but all the code is in
one place. You don't have to think about the order that three
different member functions are called (along with the caller mutating
the internal state between those three calls).
- In the compiler, add an AutoSource helper to manage changing and
restoring the fSource pointer everywhere.
- Rename the loadXXXIntrinsics functions to loadXXXModule, have them
return the module, and wrap the whole thing up in a single
moduleForProgramKind() helper.
Change-Id: I0c9b6702f8786792963e3d9408d6619e5ab393e2
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/324696
Reviewed-by: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
2020-10-09 16:05:16 +00:00
|
|
|
AutoSource as(this, program.fSource.get());
|
2019-03-21 15:05:37 +00:00
|
|
|
std::unique_ptr<ByteCode> result(new ByteCode());
|
2020-04-02 15:38:40 +00:00
|
|
|
ByteCodeGenerator cg(fContext.get(), &program, this, result.get());
|
|
|
|
bool success = cg.generateCode();
|
|
|
|
if (success) {
|
2019-03-21 15:05:37 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2020-09-10 18:47:24 +00:00
|
|
|
const char* Compiler::OperatorName(Token::Kind op) {
|
|
|
|
switch (op) {
|
2020-04-17 16:45:51 +00:00
|
|
|
case Token::Kind::TK_PLUS: return "+";
|
|
|
|
case Token::Kind::TK_MINUS: return "-";
|
|
|
|
case Token::Kind::TK_STAR: return "*";
|
|
|
|
case Token::Kind::TK_SLASH: return "/";
|
|
|
|
case Token::Kind::TK_PERCENT: return "%";
|
|
|
|
case Token::Kind::TK_SHL: return "<<";
|
|
|
|
case Token::Kind::TK_SHR: return ">>";
|
|
|
|
case Token::Kind::TK_LOGICALNOT: return "!";
|
|
|
|
case Token::Kind::TK_LOGICALAND: return "&&";
|
|
|
|
case Token::Kind::TK_LOGICALOR: return "||";
|
|
|
|
case Token::Kind::TK_LOGICALXOR: return "^^";
|
|
|
|
case Token::Kind::TK_BITWISENOT: return "~";
|
|
|
|
case Token::Kind::TK_BITWISEAND: return "&";
|
|
|
|
case Token::Kind::TK_BITWISEOR: return "|";
|
|
|
|
case Token::Kind::TK_BITWISEXOR: return "^";
|
|
|
|
case Token::Kind::TK_EQ: return "=";
|
|
|
|
case Token::Kind::TK_EQEQ: return "==";
|
|
|
|
case Token::Kind::TK_NEQ: return "!=";
|
|
|
|
case Token::Kind::TK_LT: return "<";
|
|
|
|
case Token::Kind::TK_GT: return ">";
|
|
|
|
case Token::Kind::TK_LTEQ: return "<=";
|
|
|
|
case Token::Kind::TK_GTEQ: return ">=";
|
|
|
|
case Token::Kind::TK_PLUSEQ: return "+=";
|
|
|
|
case Token::Kind::TK_MINUSEQ: return "-=";
|
|
|
|
case Token::Kind::TK_STAREQ: return "*=";
|
|
|
|
case Token::Kind::TK_SLASHEQ: return "/=";
|
|
|
|
case Token::Kind::TK_PERCENTEQ: return "%=";
|
|
|
|
case Token::Kind::TK_SHLEQ: return "<<=";
|
|
|
|
case Token::Kind::TK_SHREQ: return ">>=";
|
|
|
|
case Token::Kind::TK_BITWISEANDEQ: return "&=";
|
|
|
|
case Token::Kind::TK_BITWISEOREQ: return "|=";
|
|
|
|
case Token::Kind::TK_BITWISEXOREQ: return "^=";
|
|
|
|
case Token::Kind::TK_PLUSPLUS: return "++";
|
|
|
|
case Token::Kind::TK_MINUSMINUS: return "--";
|
|
|
|
case Token::Kind::TK_COMMA: return ",";
|
2017-09-11 20:50:14 +00:00
|
|
|
default:
|
2020-09-10 18:47:24 +00:00
|
|
|
ABORT("unsupported operator: %d\n", (int) op);
|
2017-09-11 20:50:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool Compiler::IsAssignment(Token::Kind op) {
|
|
|
|
switch (op) {
|
2020-04-17 16:45:51 +00:00
|
|
|
case Token::Kind::TK_EQ: // fall through
|
|
|
|
case Token::Kind::TK_PLUSEQ: // fall through
|
|
|
|
case Token::Kind::TK_MINUSEQ: // fall through
|
|
|
|
case Token::Kind::TK_STAREQ: // fall through
|
|
|
|
case Token::Kind::TK_SLASHEQ: // fall through
|
|
|
|
case Token::Kind::TK_PERCENTEQ: // fall through
|
|
|
|
case Token::Kind::TK_SHLEQ: // fall through
|
|
|
|
case Token::Kind::TK_SHREQ: // fall through
|
|
|
|
case Token::Kind::TK_BITWISEOREQ: // fall through
|
|
|
|
case Token::Kind::TK_BITWISEXOREQ: // fall through
|
2020-11-23 16:06:44 +00:00
|
|
|
case Token::Kind::TK_BITWISEANDEQ:
|
2017-09-11 20:50:14 +00:00
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-10 18:47:24 +00:00
|
|
|
Token::Kind Compiler::RemoveAssignment(Token::Kind op) {
|
|
|
|
switch (op) {
|
|
|
|
case Token::Kind::TK_PLUSEQ: return Token::Kind::TK_PLUS;
|
|
|
|
case Token::Kind::TK_MINUSEQ: return Token::Kind::TK_MINUS;
|
|
|
|
case Token::Kind::TK_STAREQ: return Token::Kind::TK_STAR;
|
|
|
|
case Token::Kind::TK_SLASHEQ: return Token::Kind::TK_SLASH;
|
|
|
|
case Token::Kind::TK_PERCENTEQ: return Token::Kind::TK_PERCENT;
|
|
|
|
case Token::Kind::TK_SHLEQ: return Token::Kind::TK_SHL;
|
|
|
|
case Token::Kind::TK_SHREQ: return Token::Kind::TK_SHR;
|
|
|
|
case Token::Kind::TK_BITWISEOREQ: return Token::Kind::TK_BITWISEOR;
|
|
|
|
case Token::Kind::TK_BITWISEXOREQ: return Token::Kind::TK_BITWISEXOR;
|
|
|
|
case Token::Kind::TK_BITWISEANDEQ: return Token::Kind::TK_BITWISEAND;
|
|
|
|
default: return op;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-11 20:50:14 +00:00
|
|
|
Position Compiler::position(int offset) {
|
2020-12-07 17:47:17 +00:00
|
|
|
if (fSource && offset >= 0) {
|
|
|
|
int line = 1;
|
|
|
|
int column = 1;
|
|
|
|
for (int i = 0; i < offset; i++) {
|
|
|
|
if ((*fSource)[i] == '\n') {
|
|
|
|
++line;
|
|
|
|
column = 1;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
++column;
|
|
|
|
}
|
2017-09-11 20:50:14 +00:00
|
|
|
}
|
2020-12-07 17:47:17 +00:00
|
|
|
return Position(line, column);
|
|
|
|
} else {
|
|
|
|
return Position(-1, -1);
|
2017-09-11 20:50:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Compiler::error(int offset, String msg) {
|
2016-12-12 20:33:30 +00:00
|
|
|
fErrorCount++;
|
2017-09-11 20:50:14 +00:00
|
|
|
Position pos = this->position(offset);
|
2020-12-07 17:47:17 +00:00
|
|
|
fErrorText += "error: " + (pos.fLine >= 1 ? to_string(pos.fLine) + ": " : "") + msg + "\n";
|
2016-11-21 20:59:48 +00:00
|
|
|
}
|
|
|
|
|
2020-12-09 21:09:41 +00:00
|
|
|
String Compiler::errorText(bool showCount) {
|
|
|
|
if (showCount) {
|
|
|
|
this->writeErrorCount();
|
|
|
|
}
|
2018-07-31 13:44:36 +00:00
|
|
|
fErrorCount = 0;
|
2017-03-31 17:56:23 +00:00
|
|
|
String result = fErrorText;
|
2020-12-09 21:09:41 +00:00
|
|
|
fErrorText = "";
|
2016-12-12 20:33:30 +00:00
|
|
|
return result;
|
2016-07-01 15:22:01 +00:00
|
|
|
}
|
|
|
|
|
2016-12-12 20:33:30 +00:00
|
|
|
void Compiler::writeErrorCount() {
|
|
|
|
if (fErrorCount) {
|
|
|
|
fErrorText += to_string(fErrorCount) + " error";
|
|
|
|
if (fErrorCount > 1) {
|
|
|
|
fErrorText += "s";
|
|
|
|
}
|
|
|
|
fErrorText += "\n";
|
2016-08-03 19:43:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-06 18:11:56 +00:00
|
|
|
} // namespace SkSL
|