skia2/fuzz/oss_fuzz/FuzzSKSL2GLSL.cpp
Brian Osman d7e7659cad Move GrShaderCaps from Program::Settings to Compiler
This ties the caps to the compiler instance, paving the way for
pre-optimizing the shared code. Most of the time, the compiler is
created and owned the GPU instance, so this is fine. For runtime
effects, we now use the shared (device-agnostic) compiler instance
for the first compile, even on GPU. It's configured with caps that
apply no workarounds. We pass the user's SkSL to the backend as
cleanly as possible, and then apply any workarounds once it's part
of the full program.

Bug: skia:10905
Bug: skia:10868
Change-Id: Ifcf8d7ebda5d43ad8e180f06700a261811da83de
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/331493
Commit-Queue: Brian Osman <brianosman@google.com>
Reviewed-by: Brian Salomon <bsalomon@google.com>
Reviewed-by: John Stiles <johnstiles@google.com>
2020-11-04 19:38:33 +00:00

39 lines
1.2 KiB
C++

/*
* Copyright 2019 Google, LLC
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "src/gpu/GrShaderCaps.h"
#include "src/sksl/SkSLCompiler.h"
#include "fuzz/Fuzz.h"
bool FuzzSKSL2GLSL(sk_sp<SkData> bytes) {
sk_sp<GrShaderCaps> caps = SkSL::ShaderCapsFactory::Default();
SkSL::Compiler compiler(caps.get());
SkSL::String output;
SkSL::Program::Settings settings;
std::unique_ptr<SkSL::Program> program = compiler.convertProgram(
SkSL::Program::kFragment_Kind,
SkSL::String((const char*) bytes->data(),
bytes->size()),
settings);
if (!program || !compiler.toGLSL(*program, &output)) {
return false;
}
return true;
}
#if defined(SK_BUILD_FOR_LIBFUZZER)
extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
if (size > 3000) {
return 0;
}
auto bytes = SkData::MakeWithoutCopy(data, size);
FuzzSKSL2GLSL(bytes);
return 0;
}
#endif