[infra] Overhaul gen_tasks_logic

- Add jobBuilder and taskBuilder for convenience.
- Add parts type with convenience matching funcs.

Change-Id: I8213a1e3742d055515e7b8520b2dec0c06c6f2d3
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/275076
Commit-Queue: Eric Boren <borenet@google.com>
Reviewed-by: Ben Wagner aka dogben <benjaminwagner@google.com>
This commit is contained in:
Eric Boren 2020-03-09 08:43:45 -04:00 committed by Skia Commit-Bot
parent 424d28b070
commit c27e70235f
6 changed files with 1756 additions and 1348 deletions

View File

@ -41,10 +41,10 @@ func keyParams(parts map[string]string) []string {
}
// dmFlags generates flags to DM based on the given task properties.
func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwareLabel string) ([]string, map[string]string) {
func (b *taskBuilder) dmFlags(internalHardwareLabel string) ([]string, map[string]string) {
properties := map[string]string{
"gitHash": specs.PLACEHOLDER_REVISION,
"builder": bot, //specs.PLACEHOLDER_TASK_NAME,
"builder": b.Name,
"buildbucket_build_id": specs.PLACEHOLDER_BUILDBUCKET_BUILD_ID,
"task_id": specs.PLACEHOLDER_TASK_ID,
"issue": specs.PLACEHOLDER_ISSUE,
@ -62,9 +62,6 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
configs := []string{}
blacklisted := []string{}
has := func(keyword string) bool {
return strings.Contains(bot, keyword)
}
hasConfig := func(cfg string) bool {
for _, c := range configs {
if c == cfg {
@ -73,14 +70,6 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
}
return false
}
hasExtraConfig := func(ec string) bool {
for _, e := range strings.Split(parts["extra_config"], "_") {
if e == ec {
return true
}
}
return false
}
filter := func(slice []string, elems ...string) []string {
m := make(map[string]bool, len(elems))
for _, e := range elems {
@ -138,14 +127,12 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
}
}
isLinux := has("Debian") || has("Ubuntu") || has("Housekeeper")
// Keys.
keys := keyParams(parts)
if has("Lottie") {
keys := keyParams(b.parts)
if b.extraConfig("Lottie") {
keys = append(keys, "renderer", "skottie")
}
if strings.Contains(parts["extra_config"], "DDL") {
if b.matchExtraConfig("DDL") {
// 'DDL' style means "--skpViewportSize 2048 --pr ~small"
keys = append(keys, "style", "DDL")
} else {
@ -161,11 +148,11 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
// - https://skia.googlesource.com/skia/+/ce06e261e68848ae21cac1052abc16bc07b961bf/tests/ProcessorTest.cpp#307
// Not MSAN due to:
// - https://skia.googlesource.com/skia/+/0ac06e47269a40c177747310a613d213c95d1d6d/infra/bots/recipe_modules/flavor/gn_flavor.py#80
if !has("Android") && !has("MSAN") {
if !b.os("Android") && !b.extraConfig("MSAN") {
args = append(args, "--randomProcessorTest")
}
if has("Pixel3") && has("Vulkan") {
if b.model("Pixel3", "Pixel3a") && b.extraConfig("Vulkan") {
args = append(args, "--dontReduceOpsTaskSplitting")
}
@ -174,17 +161,17 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
// 32-bit desktop bots tend to run out of memory, because they have relatively
// far more cores than RAM (e.g. 32 cores, 3G RAM). Hold them back a bit.
if has("-x86-") {
if b.arch("x86") {
threadLimit = 4
}
// These bots run out of memory easily.
if has("MotoG4") || has("Nexus7") {
if b.model("MotoG4", "Nexus7") {
threadLimit = MAIN_THREAD_ONLY
}
// Avoid issues with dynamically exceeding resource cache limits.
if has("Test") && has("DISCARDABLE") {
if b.matchExtraConfig("DISCARDABLE") {
threadLimit = MAIN_THREAD_ONLY
}
@ -194,15 +181,15 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
sampleCount := 0
glPrefix := ""
if has("SwiftShader") {
if b.extraConfig("SwiftShader") {
configs = append(configs, "gles", "glesdft")
args = append(args, "--disableDriverCorrectnessWorkarounds")
} else if parts["cpu_or_gpu"] == "CPU" {
} else if b.cpu() {
args = append(args, "--nogpu")
configs = append(configs, "8888")
if has("BonusConfigs") {
if b.extraConfig("BonusConfigs") {
configs = []string{
"g8", "565",
"pic-8888", "serialize-8888",
@ -210,7 +197,7 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
"p3", "ep3", "rec2020", "erec2020"}
}
if has("PDF") {
if b.extraConfig("PDF") {
configs = []string{"pdf"}
args = append(args, "--rasterize_pdf") // Works only on Mac.
// Take ~forever to rasterize:
@ -219,30 +206,30 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
blacklist("pdf gm _ longpathdash")
}
} else if parts["cpu_or_gpu"] == "GPU" {
} else if b.gpu() {
args = append(args, "--nocpu")
// Add in either gles or gl configs to the canonical set based on OS
sampleCount = 8
glPrefix = "gl"
if has("Android") || has("iOS") {
if b.os("Android", "iOS") {
sampleCount = 4
// We want to test the OpenGL config not the GLES config on the Shield
if !has("NVIDIA_Shield") {
if !b.model("NVIDIA_Shield") {
glPrefix = "gles"
}
// MSAA is disabled on Pixel3a (https://b.corp.google.com/issues/143074513).
if has("Pixel3a") {
if b.model("Pixel3a") {
sampleCount = 0
}
} else if has("Intel") {
} else if b.matchGpu("Intel") {
// MSAA doesn't work well on Intel GPUs chromium:527565, chromium:983926
sampleCount = 0
} else if has("ChromeOS") {
} else if b.os("ChromeOS") {
glPrefix = "gles"
}
if has("NativeFonts") {
if b.extraConfig("NativeFonts") {
configs = append(configs, glPrefix)
} else {
configs = append(configs, glPrefix, glPrefix+"dft", glPrefix+"srgb")
@ -252,15 +239,11 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
}
// The Tegra3 doesn't support MSAA
if has("Tegra3") ||
if b.gpu("Tegra3") ||
// We aren't interested in fixing msaa bugs on current iOS devices.
has("iPad4") ||
has("iPadPro") ||
has("iPhone6") ||
has("iPhone7") ||
b.model("iPad4", "iPadPro", "iPhone6", "iPhone7") ||
// skia:5792
has("IntelHD530") ||
has("IntelIris540") {
b.gpu("IntelHD530", "IntelIris540") {
configs = removeContains(configs, "msaa")
}
@ -268,7 +251,7 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
// GL is used by Chrome, GLES is used by ChromeOS.
// Also do the Ganesh threading verification test (render with and without
// worker threads, using only the SW path renderer, and compare the results).
if has("Intel") && isLinux {
if b.matchGpu("Intel") && b.isLinux() {
configs = append(configs, "gles", "glesdft", "glessrgb", "gltestthreading")
// skbug.com/6333, skbug.com/6419, skbug.com/6702
blacklist("gltestthreading gm _ lcdblendmodes")
@ -291,12 +274,12 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
}
// CommandBuffer bot *only* runs the command_buffer config.
if has("CommandBuffer") {
if b.extraConfig("CommandBuffer") {
configs = []string{"commandbuffer"}
}
// ANGLE bot *only* runs the angle configs
if has("ANGLE") {
if b.extraConfig("ANGLE") {
configs = []string{"angle_d3d11_es2",
"angle_d3d9_es2",
"angle_gl_es2",
@ -305,14 +288,14 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
configs = append(configs, fmt.Sprintf("angle_d3d11_es2_msaa%d", sampleCount))
configs = append(configs, fmt.Sprintf("angle_d3d11_es3_msaa%d", sampleCount))
}
if has("LenovoYogaC630") {
if b.model("LenovoYogaC630") {
// LenovoYogaC630 only supports D3D11, and to save time, we only test ES3
configs = []string{
"angle_d3d11_es3",
fmt.Sprintf("angle_d3d11_es3_msaa%d", sampleCount),
}
}
if has("GTX") || has("Quadro") {
if b.matchGpu("GTX", "Quadro") {
// See skia:7823 and chromium:693090.
configs = append(configs, "angle_gl_es3")
if sampleCount > 0 {
@ -320,61 +303,59 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
configs = append(configs, fmt.Sprintf("angle_gl_es3_msaa%d", sampleCount))
}
}
if has("NUC5i7RYH") {
if b.model("NUC5i7RYH") {
// skbug.com/7376
blacklist("_ test _ ProcessorCloneTest")
}
}
if has("AndroidOne") || (has("Nexus") && !has("Nexus5x")) || has("GalaxyS6") {
if b.model("AndroidOne", "GalaxyS6") || (b.model("Nexus5", "Nexus7")) {
// skbug.com/9019
blacklist("_ test _ ProcessorCloneTest")
blacklist("_ test _ Programs")
blacklist("_ test _ ProcessorOptimizationValidationTest")
}
if has("CommandBuffer") && has("MacBook10.1-") {
if b.extraConfig("CommandBuffer") && b.model("MacBook10.1") {
// skbug.com/9235
blacklist("_ test _ Programs")
}
// skbug.com/9033 - these devices run out of memory on this test
// when opList splitting reduction is enabled
if has("GPU") && (has("Nexus7") ||
has("NVIDIA_Shield") ||
has("Nexus5x") ||
(has("Win10") && has("GTX660") && has("Vulkan"))) {
if b.gpu() && (b.model("Nexus7", "NVIDIA_Shield", "Nexus5x") ||
(b.os("Win10") && b.gpu("GTX660") && b.extraConfig("Vulkan"))) {
blacklist("_", "gm", "_", "savelayer_clipmask")
}
// skbug.com/9123
if has("CommandBuffer") && has("IntelIris5100") {
if b.extraConfig("CommandBuffer") && b.gpu("IntelIris5100") {
blacklist("_", "test", "_", "AsyncReadPixels")
}
// skbug.com/9043 - these devices render this test incorrectly
// when opList splitting reduction is enabled
if has("GPU") && has("Vulkan") && (has("RadeonR9M470X") || has("RadeonHD7770")) {
if b.gpu() && b.extraConfig("Vulkan") && (b.gpu("RadeonR9M470X", "RadeonHD7770")) {
blacklist("_", "tests", "_", "VkDrawableImportTest")
}
if has("Vulkan") {
if b.extraConfig("Vulkan") {
configs = []string{"vk"}
if has("Android") {
if b.os("Android") {
configs = append(configs, "vkmsaa4")
} else {
// MSAA doesn't work well on Intel GPUs chromium:527565, chromium:983926, skia:9023
if !has("Intel") {
if !b.matchGpu("Intel") {
configs = append(configs, "vkmsaa8")
}
}
}
if has("Metal") {
if b.extraConfig("Metal") {
configs = []string{"mtl"}
if has("iOS") {
if b.os("iOS") {
configs = append(configs, "mtlmsaa4")
} else {
// MSAA doesn't work well on Intel GPUs chromium:527565, chromium:983926
if !has("Intel") {
if !b.matchGpu("Intel") {
configs = append(configs, "mtlmsaa8")
}
}
@ -382,8 +363,8 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
// Test 1010102 on our Linux/NVIDIA bots and the persistent cache config
// on the GL bots.
if has("QuadroP400") && !has("PreAbandonGpuContext") && !has("TSAN") && isLinux {
if has("Vulkan") {
if b.gpu("QuadroP400") && !b.extraConfig("PreAbandonGpuContext") && !b.extraConfig("TSAN") && b.isLinux() {
if b.extraConfig("Vulkan") {
configs = append(configs, "vk1010102")
// Decoding transparent images to 1010102 just looks bad
blacklist("vk1010102 image _ _")
@ -410,47 +391,47 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
// We also test the SkSL precompile config on Pixel2XL as a representative
// Android device - this feature is primarily used by Flutter.
if has("Pixel2XL") && !has("Vulkan") {
if b.model("Pixel2XL") && !b.extraConfig("Vulkan") {
configs = append(configs, "glestestprecompile")
}
// Test rendering to wrapped dsts on a few bots
// Also test "glenarrow", which hits F16 surfaces and F16 vertex colors.
if has("BonusConfigs") {
if b.extraConfig("BonusConfigs") {
configs = []string{"glbetex", "glbert", "glenarrow"}
}
if has("ChromeOS") {
if b.os("ChromeOS") {
// Just run GLES for now - maybe add gles_msaa4 in the future
configs = []string{"gles"}
}
// Test coverage counting path renderer.
if has("CCPR") {
if b.extraConfig("CCPR") {
configs = filter(configs, "gl", "gles")
args = append(args, "--pr", "ccpr", "--cc", "true", "--cachePathMasks", "false")
}
// Test GPU tessellation path renderer.
if has("GpuTess") {
if b.extraConfig("GpuTess") {
configs = []string{glPrefix + "msaa4"}
args = append(args, "--pr", "gtess")
}
// Test non-nvpr on NVIDIA.
if has("NonNVPR") {
if b.extraConfig("NonNVPR") {
configs = []string{"gl", "glmsaa4"}
args = append(args, "--pr", "~nvpr")
}
// DDL is a GPU-only feature
if has("DDL1") {
if b.extraConfig("DDL1") {
// This bot generates gl and vk comparison images for the large skps
configs = filter(configs, "gl", "vk", "mtl")
args = append(args, "--skpViewportSize", "2048")
args = append(args, "--pr", "~small")
}
if has("DDL3") {
if b.extraConfig("DDL3") {
// This bot generates the ddl-gl and ddl-vk images for the
// large skps and the gms
ddlConfigs := prefix(filter(configs, "gl", "vk", "mtl"), "ddl-")
@ -462,7 +443,7 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
}
// Sharding.
tf := parts["test_filter"]
tf := b.parts["test_filter"]
if tf != "" && tf != "All" {
// Expected format: shard_XX_YY
split := strings.Split(tf, "_")
@ -483,10 +464,10 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
// Run tests, gms, and image decoding tests everywhere.
args = append(args, "--src", "tests", "gm", "image", "lottie", "colorImage", "svg", "skp")
if has("GPU") {
if b.gpu() {
// Don't run the "svgparse_*" svgs on GPU.
blacklist("_ svg _ svgparse_")
} else if bot == "Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Debug-All-ASAN" {
} else if b.Name == "Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Debug-All-ASAN" {
// Only run the CPU SVGs on 8888.
blacklist("~8888 svg _ _")
} else {
@ -495,16 +476,16 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
}
// Eventually I'd like these to pass, but for now just skip 'em.
if has("SK_FORCE_RASTER_PIPELINE_BLITTER") {
if b.extraConfig("SK_FORCE_RASTER_PIPELINE_BLITTER") {
removeFromArgs("tests")
}
if has("NativeFonts") { // images won't exercise native font integration :)
if b.extraConfig("NativeFonts") { // images won't exercise native font integration :)
removeFromArgs("image")
removeFromArgs("colorImage")
}
if has("DDL") || has("PDF") {
if b.matchExtraConfig("DDL", "PDF") {
// The DDL and PDF bots just render the large skps and the gms
removeFromArgs("tests")
removeFromArgs("image")
@ -515,7 +496,7 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
removeFromArgs("skp")
}
if has("Lottie") {
if b.extraConfig("Lottie") {
// Only run the lotties on Lottie bots.
removeFromArgs("tests")
removeFromArgs("gm")
@ -536,7 +517,7 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
blacklist("g8 image _ _")
blacklist("g8 colorImage _ _")
if has("Valgrind") {
if b.extraConfig("Valgrind") {
// These take 18+ hours to run.
blacklist("pdf gm _ fontmgr_iter")
blacklist("pdf _ _ PANO_20121023_214540.jpg")
@ -548,16 +529,16 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
blacklist("_ test _ InitialTextureClear")
}
if has("TecnoSpark3Pro") {
if b.model("TecnoSpark3Pro") {
// skbug.com/9421
blacklist("_ test _ InitialTextureClear")
}
if has("iOS") {
if b.os("iOS") {
blacklist(glPrefix + " skp _ _")
}
if has("Mac") || has("iOS") {
if b.matchOs("Mac", "iOS") {
// CG fails on questionable bmps
blacklist("_ image gen_platf rgba32abf.bmp")
blacklist("_ image gen_platf rgb24prof.bmp")
@ -598,7 +579,7 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
}
// WIC fails on questionable bmps
if has("Win") {
if b.matchOs("Win") {
blacklist("_ image gen_platf pal8os2v2.bmp")
blacklist("_ image gen_platf pal8os2v2-16.bmp")
blacklist("_ image gen_platf rgba32abf.bmp")
@ -608,13 +589,13 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
blacklist("_ image gen_platf 4bpp-pixeldata-cropped.bmp")
blacklist("_ image gen_platf 32bpp-pixeldata-cropped.bmp")
blacklist("_ image gen_platf 24bpp-pixeldata-cropped.bmp")
if has("x86_64") && has("CPU") {
if b.arch("x86_64") && b.cpu() {
// This GM triggers a SkSmallAllocator assert.
blacklist("_ gm _ composeshader_bitmap")
}
}
if has("Win") || has("Mac") {
if b.matchOs("Win", "Mac") {
// WIC and CG fail on arithmetic jpegs
blacklist("_ image gen_platf testimgari.jpg")
// More questionable bmps that fail on Mac, too. skbug.com/6984
@ -627,7 +608,7 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
// avoid lots of images on Gold.
blacklist("_ image gen_platf error")
if has("Android") || has("iOS") {
if b.os("Android", "iOS") {
// This test crashes the N9 (perhaps because of large malloc/frees). It also
// is fairly slow and not platform-specific. So we just disable it on all of
// Android and iOS. skia:5438
@ -712,18 +693,18 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
blacklist("serialize-8888", "gm", "_", test)
}
if !has("Mac") {
if !b.matchOs("Mac") {
for _, test := range []string{"bleed_alpha_image", "bleed_alpha_image_shader"} {
blacklist("serialize-8888", "gm", "_", test)
}
}
// It looks like we skip these only for out-of-memory concerns.
if has("Win") || has("Android") {
if b.matchOs("Win", "Android") {
for _, test := range []string{"verylargebitmap", "verylarge_picture_image"} {
blacklist("serialize-8888", "gm", "_", test)
}
}
if has("Mac") && has("CPU") {
if b.matchOs("Mac") && b.cpu() {
// skia:6992
blacklist("pic-8888", "gm", "_", "encode-platform")
blacklist("serialize-8888", "gm", "_", "encode-platform")
@ -768,7 +749,7 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
// skbug.com/4888
// Blacklist RAW images (and a few large PNGs) on GPU bots
// until we can resolve failures.
if has("GPU") {
if b.gpu() {
blacklist("_ image _ interlaced1.png")
blacklist("_ image _ interlaced2.png")
blacklist("_ image _ interlaced3.png")
@ -778,7 +759,7 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
}
// Blacklist memory intensive tests on 32-bit bots.
if has("Win8") && has("x86-") {
if b.os("Win8") && b.arch("x86") {
blacklist("_ image f16 _")
blacklist("_ image _ abnormal.wbmp")
blacklist("_ image _ interlaced1.png")
@ -789,12 +770,12 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
}
}
if has("Nexus5") && has("GPU") {
if b.model("Nexus5", "Nexus5x") && b.gpu() {
// skia:5876
blacklist("_", "gm", "_", "encode-platform")
}
if has("AndroidOne-GPU") { // skia:4697, skia:4704, skia:4694, skia:4705
if b.model("AndroidOne") && b.gpu() { // skia:4697, skia:4704, skia:4694, skia:4705
blacklist("_", "gm", "_", "bigblurs")
blacklist("_", "gm", "_", "bleed")
blacklist("_", "gm", "_", "bleed_alpha_bmp")
@ -818,31 +799,31 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
}
match := []string{}
if has("Valgrind") { // skia:3021
if b.extraConfig("Valgrind") { // skia:3021
match = append(match, "~Threaded")
}
if has("Valgrind") && has("PreAbandonGpuContext") {
if b.extraConfig("Valgrind") && b.extraConfig("PreAbandonGpuContext") {
// skia:6575
match = append(match, "~multipicturedraw_")
}
if has("AndroidOne") {
if b.model("AndroidOne") {
match = append(match, "~WritePixels") // skia:4711
match = append(match, "~PremulAlphaRoundTrip_Gpu") // skia:7501
match = append(match, "~ReimportImageTextureWithMipLevels") // skia:8090
}
if has("GalaxyS6") {
if b.model("GalaxyS6") {
match = append(match, "~SpecialImage") // skia:6338
match = append(match, "~skbug6653") // skia:6653
}
if has("MSAN") {
if b.extraConfig("MSAN") {
match = append(match, "~Once", "~Shared") // Not sure what's up with these tests.
}
if has("TSAN") {
if b.extraConfig("TSAN") {
match = append(match, "~ReadWriteAlpha") // Flaky on TSAN-covered on nvidia bots.
match = append(match, "~RGBA4444TextureTest", // Flakier than they are important.
"~RGB565TextureTest")
@ -850,38 +831,38 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
// By default, we test with GPU threading enabled, unless specifically
// disabled.
if has("NoGPUThreads") {
if b.extraConfig("NoGPUThreads") {
args = append(args, "--gpuThreads", "0")
}
if has("Vulkan") && has("Adreno530") {
if b.extraConfig("Vulkan") && b.gpu("Adreno530") {
// skia:5777
match = append(match, "~CopySurface")
}
if has("Vulkan") && has("Adreno") {
if b.extraConfig("Vulkan") && b.matchGpu("Adreno") {
// skia:7663
match = append(match, "~WritePixelsNonTextureMSAA_Gpu")
match = append(match, "~WritePixelsMSAA_Gpu")
}
if has("Vulkan") && isLinux && has("IntelIris640") {
if b.extraConfig("Vulkan") && b.isLinux() && b.gpu("IntelIris640") {
match = append(match, "~VkHeapTests") // skia:6245
}
if isLinux && has("IntelIris640") {
if b.isLinux() && b.gpu("IntelIris640") {
match = append(match, "~Programs") // skia:7849
}
if has("TecnoSpark3Pro") {
if b.model("TecnoSpark3Pro") {
match = append(match, "~Programs") // skia:9814
}
if has("IntelIris640") || has("IntelHD615") || has("IntelHDGraphics615") {
if b.gpu("IntelIris640", "IntelHD615", "IntelHDGraphics615") {
match = append(match, "~^SRGBReadWritePixels$") // skia:9225
}
if has("Vulkan") && isLinux && has("IntelHD405") {
if b.extraConfig("Vulkan") && b.isLinux() && b.gpu("IntelHD405") {
// skia:7322
blacklist("vk", "gm", "_", "skbug_257")
blacklist("vk", "gm", "_", "filltypespersp")
@ -900,52 +881,52 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
match = append(match, "~^WritePixelsMSAA_Gpu$")
}
if has("Vulkan") && has("GTX660") && has("Win") {
if b.extraConfig("Vulkan") && b.gpu("GTX660") && b.matchOs("Win") {
// skbug.com/8047
match = append(match, "~FloatingPointTextureTest$")
}
if has("Metal") && has("HD8870M") && has("Mac") {
if b.extraConfig("Metal") && b.gpu("RadeonHD8870M") && b.matchOs("Mac") {
// skia:9255
match = append(match, "~WritePixelsNonTextureMSAA_Gpu")
}
if has("ANGLE") {
if b.extraConfig("ANGLE") {
// skia:7835
match = append(match, "~BlurMaskBiggerThanDest")
}
if has("IntelIris6100") && has("ANGLE") && has("Release") {
if b.gpu("IntelIris6100") && b.extraConfig("ANGLE") && !b.debug() {
// skia:7376
match = append(match, "~^ProcessorOptimizationValidationTest$")
}
if (has("IntelIris6100") || has("IntelHD4400")) && has("ANGLE") {
if b.gpu("IntelIris6100", "IntelHD4400") && b.extraConfig("ANGLE") {
// skia:6857
blacklist("angle_d3d9_es2", "gm", "_", "lighting")
}
if has("PowerVRGX6250") {
if b.gpu("PowerVRGX6250") {
match = append(match, "~gradients_view_perspective_nodither") //skia:6972
}
if has("-arm-") && has("ASAN") {
if b.arch("arm") && b.extraConfig("ASAN") {
// TODO: can we run with env allocator_may_return_null=1 instead?
match = append(match, "~BadImage")
}
if has("Mac") && has("IntelHD6000") {
if b.matchOs("Mac") && b.gpu("IntelHD6000") {
// skia:7574
match = append(match, "~^ProcessorCloneTest$")
match = append(match, "~^GrMeshTest$")
}
if has("Mac") && has("IntelHD615") {
if b.matchOs("Mac") && b.gpu("IntelHD615") {
// skia:7603
match = append(match, "~^GrMeshTest$")
}
if has("LenovoYogaC630") && has("ANGLE") {
if b.model("LenovoYogaC630") && b.extraConfig("ANGLE") {
// skia:9275
blacklist("_", "tests", "_", "Programs")
// skia:8976
@ -966,22 +947,25 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
// These bots run out of memory running RAW codec tests. Do not run them in
// parallel
if has("Nexus5") || has("Nexus9") {
// TODO(borenet): Previously this was `'Nexus5' in bot or 'Nexus9' in bot`
// which also matched 'Nexus5x'. I added That here to maintain the
// existing behavior, but we should verify that it's needed.
if b.model("Nexus5", "Nexus5x", "Nexus9") {
args = append(args, "--noRAW_threading")
}
if has("FSAA") {
if b.extraConfig("FSAA") {
args = append(args, "--analyticAA", "false")
}
if has("FAAA") {
if b.extraConfig("FAAA") {
args = append(args, "--forceAnalyticAA")
}
if !has("NativeFonts") {
if !b.extraConfig("NativeFonts") {
args = append(args, "--nonativeFonts")
}
if has("GDI") {
if b.extraConfig("GDI") {
args = append(args, "--gdi")
}
@ -989,13 +973,13 @@ func dmFlags(bot string, parts map[string]string, doUpload bool, internalHardwar
args = append(args, "--verbose")
// See skia:2789.
if hasExtraConfig("AbandonGpuContext") {
if b.extraConfig("AbandonGpuContext") {
args = append(args, "--abandonGpuContext")
}
if hasExtraConfig("PreAbandonGpuContext") {
if b.extraConfig("PreAbandonGpuContext") {
args = append(args, "--preAbandonGpuContext")
}
if hasExtraConfig("ReleaseAndAbandonGpuContext") {
if b.extraConfig("ReleaseAndAbandonGpuContext") {
args = append(args, "--releaseAndAbandonGpuContext")
}
return args, properties

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,210 @@
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package gen_tasks_logic
import (
"log"
"go.skia.org/infra/task_scheduler/go/specs"
)
// jobBuilder provides helpers for creating a job.
type jobBuilder struct {
*builder
parts
Name string
Spec *specs.JobSpec
}
// newJobBuilder returns a jobBuilder for the given job name.
func newJobBuilder(b *builder, name string) *jobBuilder {
p, err := b.jobNameSchema.ParseJobName(name)
if err != nil {
log.Fatal(err)
}
return &jobBuilder{
builder: b,
parts: p,
Name: name,
Spec: &specs.JobSpec{},
}
}
// priority sets the priority of the job.
func (b *jobBuilder) priority(p float64) {
b.Spec.Priority = p
}
// trigger dictates when the job should be triggered.
func (b *jobBuilder) trigger(trigger string) {
b.Spec.Trigger = trigger
}
// Create a taskBuilder and run the given function for it.
func (b *jobBuilder) addTask(name string, fn func(*taskBuilder)) {
tb := newTaskBuilder(b, name)
fn(tb)
b.MustAddTask(tb.Name, tb.Spec)
// Add the task to the Job's dependency set, removing any which are
// accounted for by the new task's dependencies.
b.Spec.TaskSpecs = append(b.Spec.TaskSpecs, tb.Name)
newSpecs := make([]string, 0, len(b.Spec.TaskSpecs))
for _, t := range b.Spec.TaskSpecs {
if !In(t, tb.Spec.Dependencies) {
newSpecs = append(newSpecs, t)
}
}
b.Spec.TaskSpecs = newSpecs
}
// isolateCIPDAsset generates a task to isolate the given CIPD asset. Returns
// the name of the task.
func (b *jobBuilder) isolateCIPDAsset(asset string) string {
cfg, ok := ISOLATE_ASSET_MAPPING[asset]
if !ok {
log.Fatalf("No isolate task for asset %q", asset)
}
b.addTask(cfg.isolateTaskName, func(b *taskBuilder) {
b.asset(asset)
b.cmd("/bin/cp", "-rL", cfg.path, "${ISOLATED_OUTDIR}")
b.linuxGceDimensions(MACHINE_TYPE_SMALL)
b.idempotent()
b.isolate("empty.isolate")
})
return cfg.isolateTaskName
}
// genTasksForJob generates the tasks needed by this job.
func (b *jobBuilder) genTasksForJob() {
// Bundle Recipes.
if b.Name == BUNDLE_RECIPES_NAME {
b.bundleRecipes()
return
}
if b.Name == BUILD_TASK_DRIVERS_NAME {
b.buildTaskDrivers()
return
}
// Isolate CIPD assets.
if b.matchExtraConfig("Isolate") {
for asset, cfg := range ISOLATE_ASSET_MAPPING {
if cfg.isolateTaskName == b.Name {
b.isolateCIPDAsset(asset)
return
}
}
}
// RecreateSKPs.
if b.extraConfig("RecreateSKPs") {
b.recreateSKPs()
return
}
// Update Go Dependencies.
if b.extraConfig("UpdateGoDeps") {
b.updateGoDeps()
return
}
// Create docker image.
if b.extraConfig("CreateDockerImage") {
b.createDockerImage(b.extraConfig("WASM"))
return
}
// Push apps from docker image.
if b.extraConfig("PushAppsFromSkiaDockerImage") {
b.createPushAppsFromSkiaDockerImage()
return
} else if b.extraConfig("PushAppsFromWASMDockerImage") {
b.createPushAppsFromWASMDockerImage()
return
} else if b.extraConfig("PushAppsFromSkiaWASMDockerImages") {
b.createPushAppsFromSkiaWASMDockerImages()
return
}
// Infra tests.
if b.extraConfig("InfraTests") {
b.infra()
return
}
// Housekeepers.
if b.Name == "Housekeeper-PerCommit" {
b.housekeeper()
return
}
if b.Name == "Housekeeper-PerCommit-CheckGeneratedFiles" {
b.checkGeneratedFiles()
return
}
if b.Name == "Housekeeper-OnDemand-Presubmit" {
b.priority(1)
b.presubmit()
return
}
// Compile bots.
if b.role("Build") {
if b.extraConfig("Android") && b.extraConfig("Framework") {
// Android Framework compile tasks use a different recipe.
b.androidFrameworkCompile()
return
} else if b.extraConfig("G3") && b.extraConfig("Framework") {
// G3 compile tasks use a different recipe.
b.g3FrameworkCompile()
return
} else {
b.compile()
return
}
}
// BuildStats bots. This computes things like binary size.
if b.role("BuildStats") {
b.buildstats()
return
}
// Valgrind runs at a low priority so that it doesn't occupy all the bots.
if b.extraConfig("Valgrind") {
// Priority of 0.085 should result in Valgrind tasks with a blamelist of ~10 commits having the
// same score as other tasks with a blamelist of 1 commit, when we have insufficient bot
// capacity to run more frequently.
b.priority(0.085)
}
// Test bots.
if b.role("Test") {
b.test()
return
}
// Perf bots.
if b.role("Perf") {
b.perf()
return
}
log.Fatalf("Don't know how to handle job %q", b.Name)
}
func (b *jobBuilder) finish() {
// Add the Job spec.
if b.frequency("Nightly") {
b.trigger(specs.TRIGGER_NIGHTLY)
} else if b.frequency("Weekly") {
b.trigger(specs.TRIGGER_WEEKLY)
} else if b.extraConfig("Flutter", "CommandBuffer") {
b.trigger(specs.TRIGGER_MASTER_ONLY)
} else if b.frequency("OnDemand") || (b.extraConfig("Framework") && b.extraConfig("Android", "G3")) {
b.trigger(specs.TRIGGER_ON_DEMAND)
} else {
b.trigger(specs.TRIGGER_ANY_BRANCH)
}
b.MustAddJob(b.Name, b.Spec)
}

View File

@ -6,38 +6,29 @@ package gen_tasks_logic
import (
"fmt"
"sort"
"strings"
"go.skia.org/infra/task_scheduler/go/specs"
)
// nanobenchFlags generates flags to Nanobench based on the given task properties.
func nanobenchFlags(bot string, parts map[string]string, doUpload bool) ([]string, map[string]string) {
has := func(keyword string) bool {
return strings.Contains(bot, keyword)
}
// TODO(borenet): This duplicates code in recipes_modules/vars/api.py and will
// be removed soon.
isLinux := has("Ubuntu") || has("Debian") || has("Housekeeper")
func (b *taskBuilder) nanobenchFlags(doUpload bool) ([]string, map[string]string) {
args := []string{
"nanobench",
"--pre_log",
}
if has("GPU") {
if b.gpu() {
args = append(args, "--gpuStatsDump", "true")
}
args = append(args, "--scales", "1.0", "1.1")
configs := []string{}
if parts["cpu_or_gpu"] == "CPU" {
if b.cpu() {
args = append(args, "--nogpu")
configs = append(configs, "8888", "nonrendering")
if has("BonusConfigs") {
if b.extraConfig("BonusConfigs") {
configs = []string{
"f16",
"srgb",
@ -47,32 +38,32 @@ func nanobenchFlags(bot string, parts map[string]string, doUpload bool) ([]strin
}
}
if has("Nexus7") {
if b.model("Nexus7") {
args = append(args, "--purgeBetweenBenches") // Debugging skia:8929
}
} else if parts["cpu_or_gpu"] == "GPU" {
} else if b.gpu() {
args = append(args, "--nocpu")
glPrefix := "gl"
sampleCount := 8
if has("Android") || has("iOS") {
if b.os("Android", "iOS") {
sampleCount = 4
// The NVIDIA_Shield has a regular OpenGL implementation. We bench that
// instead of ES.
if !has("NVIDIA_Shield") {
if !b.model("NVIDIA_Shield") {
glPrefix = "gles"
}
// iOS crashes with MSAA (skia:6399)
// Nexus7 (Tegra3) does not support MSAA.
// MSAA is disabled on Pixel3a (https://b.corp.google.com/issues/143074513).
if has("iOS") || has("Nexus7") || has("Pixel3a") {
if b.os("iOS") || b.model("Nexus7", "Pixel3a") {
sampleCount = 0
}
} else if has("Intel") {
} else if b.matchGpu("Intel") {
// MSAA doesn't work well on Intel GPUs chromium:527565, chromium:983926
sampleCount = 0
} else if has("ChromeOS") {
} else if b.os("ChromeOS") {
glPrefix = "gles"
}
@ -83,44 +74,44 @@ func nanobenchFlags(bot string, parts map[string]string, doUpload bool) ([]strin
// We want to test both the OpenGL config and the GLES config on Linux Intel:
// GL is used by Chrome, GLES is used by ChromeOS.
if has("Intel") && isLinux {
if b.matchGpu("Intel") && b.isLinux() {
configs = append(configs, "gles", "glessrgb")
}
if has("CommandBuffer") {
if b.extraConfig("CommandBuffer") {
configs = []string{"commandbuffer"}
}
if has("Vulkan") {
if b.extraConfig("Vulkan") {
configs = []string{"vk"}
if has("Android") {
if b.os("Android") {
// skbug.com/9274
if !has("Pixel2XL") {
if !b.model("Pixel2XL") {
configs = append(configs, "vkmsaa4")
}
} else {
// MSAA doesn't work well on Intel GPUs chromium:527565, chromium:983926, skia:9023
if !has("Intel") {
if !b.matchGpu("Intel") {
configs = append(configs, "vkmsaa8")
}
}
}
if has("Metal") {
if b.extraConfig("Metal") {
configs = []string{"mtl"}
if has("iOS") {
if b.os("iOS") {
configs = append(configs, "mtlmsaa4")
} else {
configs = append(configs, "mtlmsaa8")
}
}
if has("ANGLE") {
if b.extraConfig("ANGLE") {
// Test only ANGLE configs.
configs = []string{"angle_d3d11_es2"}
if sampleCount > 0 {
configs = append(configs, fmt.Sprintf("angle_d3d11_es2_msaa%d", sampleCount))
}
if has("QuadroP400") {
if b.gpu("QuadroP400") {
// See skia:7823 and chromium:693090.
configs = append(configs, "angle_gl_es2")
if sampleCount > 0 {
@ -128,7 +119,7 @@ func nanobenchFlags(bot string, parts map[string]string, doUpload bool) ([]strin
}
}
}
if has("ChromeOS") {
if b.os("ChromeOS") {
// Just run GLES for now - maybe add gles_msaa4 in the future
configs = []string{"gles"}
}
@ -139,11 +130,11 @@ func nanobenchFlags(bot string, parts map[string]string, doUpload bool) ([]strin
// By default, we test with GPU threading enabled, unless specifically
// disabled.
if has("NoGPUThreads") {
if b.extraConfig("NoGPUThreads") {
args = append(args, "--gpuThreads", "0")
}
if has("Debug") || has("ASAN") || has("Valgrind") {
if b.debug() || b.extraConfig("ASAN") || b.extraConfig("Valgrind") {
args = append(args, "--loops", "1")
args = append(args, "--samples", "1")
// Ensure that the bot framework does not think we have timed out.
@ -151,7 +142,7 @@ func nanobenchFlags(bot string, parts map[string]string, doUpload bool) ([]strin
}
// skia:9036
if has("NVIDIA_Shield") {
if b.model("NVIDIA_Shield") {
args = append(args, "--dontReduceOpsTaskSplitting")
}
@ -159,16 +150,16 @@ func nanobenchFlags(bot string, parts map[string]string, doUpload bool) ([]strin
verbose := false
match := []string{}
if has("Android") {
if b.os("Android") {
// Segfaults when run as GPU bench. Very large texture?
match = append(match, "~blurroundrect")
match = append(match, "~patch_grid") // skia:2847
match = append(match, "~desk_carsvg")
}
if has("Nexus5") {
if b.matchModel("Nexus5") {
match = append(match, "~keymobi_shop_mobileweb_ebay_com.skp") // skia:5178
}
if has("iOS") {
if b.os("iOS") {
match = append(match, "~blurroundrect")
match = append(match, "~patch_grid") // skia:2847
match = append(match, "~desk_carsvg")
@ -176,15 +167,15 @@ func nanobenchFlags(bot string, parts map[string]string, doUpload bool) ([]strin
match = append(match, "~path_hairline")
match = append(match, "~GLInstancedArraysBench") // skia:4714
}
if has("iOS") && has("Metal") {
if b.os("iOS") && b.extraConfig("Metal") {
// skia:9799
match = append(match, "~compositing_images_tile_size")
}
if has("Intel") && isLinux && !has("Vulkan") {
if b.matchGpu("Intel") && b.isLinux() && !b.extraConfig("Vulkan") {
// TODO(dogben): Track down what's causing bots to die.
verbose = true
}
if has("IntelHD405") && isLinux && has("Vulkan") {
if b.gpu("IntelHD405") && b.isLinux() && b.extraConfig("Vulkan") {
// skia:7322
match = append(match, "~desk_carsvg.skp_1")
match = append(match, "~desk_googlehome.skp")
@ -211,27 +202,27 @@ func nanobenchFlags(bot string, parts map[string]string, doUpload bool) ([]strin
match = append(match, "~top25desk_ebay.skp_1.1")
match = append(match, "~top25desk_ebay.skp_1.1_mpd")
}
if has("Vulkan") && has("GTX660") {
if b.extraConfig("Vulkan") && b.gpu("GTX660") {
// skia:8523 skia:9271
match = append(match, "~compositing_images")
}
if has("MacBook10.1") && has("CommandBuffer") {
if b.model("MacBook10.1") && b.extraConfig("CommandBuffer") {
match = append(match, "~^desk_micrographygirlsvg.skp_1.1$")
}
if has("ASAN") && has("CPU") {
if b.extraConfig("ASAN") && b.cpu() {
// floor2int_undef benches undefined behavior, so ASAN correctly complains.
match = append(match, "~^floor2int_undef$")
}
if has("AcerChromebook13_CB5_311-GPU-TegraK1") {
if b.model("AcerChromebook13_CB5_311") && b.gpu() {
// skia:7551
match = append(match, "~^shapes_rrect_inner_rrect_50_500x500$")
}
if has("Perf-Android-Clang-Pixel3a-GPU-Adreno615-arm64-Release-All-Android") {
if b.model("Pixel3a") {
// skia:9413
match = append(match, "~^path_text$")
match = append(match, "~^path_text_clipped_uncached$")
}
if has("Perf-Android-Clang-Pixel3-GPU-Adreno630-arm64-Release-All-Android_Vulkan") {
if b.model("Pixel3") && b.extraConfig("Vulkan") {
// skia:9972
match = append(match, "~^path_text_clipped_uncached$")
}
@ -282,15 +273,15 @@ func nanobenchFlags(bot string, parts map[string]string, doUpload bool) ([]strin
"role": true,
"test_filter": true,
}
keys := make([]string, 0, len(parts))
for k := range parts {
keys := make([]string, 0, len(b.parts))
for k := range b.parts {
keys = append(keys, k)
}
sort.Strings(keys)
args = append(args, "--key")
for _, k := range keys {
if !keysBlacklist[k] {
args = append(args, k, parts[k])
args = append(args, k, b.parts[k])
}
}
}

View File

@ -0,0 +1,421 @@
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package gen_tasks_logic
/*
This file contains logic related to task/job name schemas.
*/
import (
"encoding/json"
"fmt"
"log"
"os"
"regexp"
"strings"
)
// parts represents the key/value pairs which make up task and job names.
type parts map[string]string
// equal returns true if the given part of this job's name equals any of the
// given values. Panics if no values are provided or the given part is not
// defined, except for "extra_config" which is allowed not to be defined.
func (p parts) equal(part string, eq ...string) bool {
if len(eq) == 0 {
log.Fatal("No values provided for equal!")
}
v := p[part]
for _, e := range eq {
if v == e {
return true
}
}
return false
}
// role returns true if the role for this job equals any of the given values.
func (p parts) role(eq ...string) bool {
return p.equal("role", eq...)
}
// os returns true if the OS for this job equals any of the given values.
func (p parts) os(eq ...string) bool {
return p.equal("os", eq...)
}
// compiler returns true if the compiler for this job equals any of the given
// values.
func (p parts) compiler(eq ...string) bool {
return p.equal("compiler", eq...)
}
// model returns true if the model for this job equals any of the given values.
func (p parts) model(eq ...string) bool {
return p.equal("model", eq...)
}
// frequency returns true if the frequency for this job equals any of the given
// values.
func (p parts) frequency(eq ...string) bool {
return p.equal("frequency", eq...)
}
// cpu returns true if the task's cpu_or_gpu is "CPU" and the CPU for this
// task equals any of the given values. If no values are provided, cpu returns
// true if this task runs on CPU.
func (p parts) cpu(eq ...string) bool {
if p["cpu_or_gpu"] == "CPU" {
if len(eq) == 0 {
return true
}
return p.equal("cpu_or_gpu_value", eq...)
}
return false
}
// gpu returns true if the task's cpu_or_gpu is "GPU" and the GPU for this task
// equals any of the given values. If no values are provided, gpu returns true
// if this task runs on GPU.
func (p parts) gpu(eq ...string) bool {
if p["cpu_or_gpu"] == "GPU" {
if len(eq) == 0 {
return true
}
return p.equal("cpu_or_gpu_value", eq...)
}
return false
}
// arch returns true if the architecture for this job equals any of the
// given values.
func (p parts) arch(eq ...string) bool {
return p.equal("arch", eq...) || p.equal("target_arch", eq...)
}
// extraConfig returns true if any of the extra_configs for this job equals
// any of the given values. If the extra_config starts with "SK_",
// it is considered to be a single config.
func (p parts) extraConfig(eq ...string) bool {
if len(eq) == 0 {
log.Fatal("No values provided for extraConfig()!")
}
ec := p["extra_config"]
if ec == "" {
return false
}
var cfgs []string
if strings.HasPrefix(ec, "SK_") {
cfgs = []string{ec}
} else {
cfgs = strings.Split(ec, "_")
}
for _, c := range cfgs {
for _, e := range eq {
if c == e {
return true
}
}
}
return false
}
// matchPart returns true if the given part of this job's name matches any of
// the given regular expressions. Note that a regular expression might match any
// substring, so if you need an exact match on the entire string you'll need to
// use `^` and `$`.
func (p parts) matchPart(part string, re ...string) bool {
if len(re) == 0 {
log.Fatal("No values provided for matchPart()!")
}
v := p[part]
for _, r := range re {
if regexp.MustCompile(r).MatchString(v) {
return true
}
}
return false
}
// matchRole returns true if the role for this job matches any of the given
// regular expressions.
func (p parts) matchRole(re ...string) bool {
return p.matchPart("role", re...)
}
// matchOs returns true if the OS for this job matches any of the given regular
// expressions.
func (p parts) matchOs(re ...string) bool {
return p.matchPart("os", re...)
}
// matchCompiler returns true if the compiler for this job matches any of the
// given regular expressions.
func (p parts) matchCompiler(re ...string) bool {
return p.matchPart("compiler", re...)
}
// matchModel returns true if the model for this job matches any of the given
// regular expressions.
func (p parts) matchModel(re ...string) bool {
return p.matchPart("model", re...)
}
// matchCpu returns true if the task's cpu_or_gpu is "CPU" and the CPU for this
// task matches any of the given regular expressions. If no regular expressions
// are provided, cpu returns true if this task runs on CPU.
func (p parts) matchCpu(re ...string) bool {
if p["cpu_or_gpu"] == "CPU" {
if len(re) == 0 {
return true
}
return p.matchPart("cpu_or_gpu_value", re...)
}
return false
}
// matchGpu returns true if the task's cpu_or_gpu is "GPU" and the GPU for this task
// matches any of the given regular expressions. If no regular expressions are
// provided, gpu returns true if this task runs on GPU.
func (p parts) matchGpu(re ...string) bool {
if p["cpu_or_gpu"] == "GPU" {
if len(re) == 0 {
return true
}
return p.matchPart("cpu_or_gpu_value", re...)
}
return false
}
// matchArch returns true if the architecture for this job matches any of the
// given regular expressions.
func (p parts) matchArch(re ...string) bool {
return p.matchPart("arch", re...) || p.matchPart("target_arch", re...)
}
// matchExtraConfig returns true if any of the extra_configs for this job matches
// any of the given regular expressions. If the extra_config starts with "SK_",
// it is considered to be a single config.
func (p parts) matchExtraConfig(re ...string) bool {
if len(re) == 0 {
log.Fatal("No regular expressions provided for matchExtraConfig()!")
}
ec := p["extra_config"]
if ec == "" {
return false
}
var cfgs []string
if strings.HasPrefix(ec, "SK_") {
cfgs = []string{ec}
} else {
cfgs = strings.Split(ec, "_")
}
compiled := make([]*regexp.Regexp, 0, len(re))
for _, r := range re {
compiled = append(compiled, regexp.MustCompile(r))
}
for _, c := range cfgs {
for _, r := range compiled {
if r.MatchString(c) {
return true
}
}
}
return false
}
// debug returns true if this task runs in debug mode.
func (p parts) debug() bool {
return p["configuration"] == "Debug"
}
// release returns true if this task runs in release mode.
func (p parts) release() bool {
return p["configuration"] == "Release"
}
// isLinux returns true if the task runs on Linux.
func (p parts) isLinux() bool {
return p.matchOs("Debian", "Ubuntu")
}
// TODO(borenet): The below really belongs in its own file, probably next to the
// builder_name_schema.json file.
// schema is a sub-struct of JobNameSchema.
type schema struct {
Keys []string `json:"keys"`
OptionalKeys []string `json:"optional_keys"`
RecurseRoles []string `json:"recurse_roles"`
}
// JobNameSchema is a struct used for (de)constructing Job names in a
// predictable format.
type JobNameSchema struct {
Schema map[string]*schema `json:"builder_name_schema"`
Sep string `json:"builder_name_sep"`
}
// NewJobNameSchema returns a JobNameSchema instance based on the given JSON
// file.
func NewJobNameSchema(jsonFile string) (*JobNameSchema, error) {
var rv JobNameSchema
f, err := os.Open(jsonFile)
if err != nil {
return nil, err
}
defer func() {
if err := f.Close(); err != nil {
log.Println(fmt.Sprintf("Failed to close %s: %s", jsonFile, err))
}
}()
if err := json.NewDecoder(f).Decode(&rv); err != nil {
return nil, err
}
return &rv, nil
}
// ParseJobName splits the given Job name into its component parts, according
// to the schema.
func (s *JobNameSchema) ParseJobName(n string) (map[string]string, error) {
popFront := func(items []string) (string, []string, error) {
if len(items) == 0 {
return "", nil, fmt.Errorf("Invalid job name: %s (not enough parts)", n)
}
return items[0], items[1:], nil
}
result := map[string]string{}
var parse func(int, string, []string) ([]string, error)
parse = func(depth int, role string, parts []string) ([]string, error) {
s, ok := s.Schema[role]
if !ok {
return nil, fmt.Errorf("Invalid job name; %q is not a valid role.", role)
}
if depth == 0 {
result["role"] = role
} else {
result[fmt.Sprintf("sub-role-%d", depth)] = role
}
var err error
for _, key := range s.Keys {
var value string
value, parts, err = popFront(parts)
if err != nil {
return nil, err
}
result[key] = value
}
for _, subRole := range s.RecurseRoles {
if len(parts) > 0 && parts[0] == subRole {
parts, err = parse(depth+1, parts[0], parts[1:])
if err != nil {
return nil, err
}
}
}
for _, key := range s.OptionalKeys {
if len(parts) > 0 {
var value string
value, parts, err = popFront(parts)
if err != nil {
return nil, err
}
result[key] = value
}
}
if len(parts) > 0 {
return nil, fmt.Errorf("Invalid job name: %s (too many parts)", n)
}
return parts, nil
}
split := strings.Split(n, s.Sep)
if len(split) < 2 {
return nil, fmt.Errorf("Invalid job name: %s (not enough parts)", n)
}
role := split[0]
split = split[1:]
_, err := parse(0, role, split)
return result, err
}
// MakeJobName assembles the given parts of a Job name, according to the schema.
func (s *JobNameSchema) MakeJobName(parts map[string]string) (string, error) {
rvParts := make([]string, 0, len(parts))
var process func(int, map[string]string) (map[string]string, error)
process = func(depth int, parts map[string]string) (map[string]string, error) {
roleKey := "role"
if depth != 0 {
roleKey = fmt.Sprintf("sub-role-%d", depth)
}
role, ok := parts[roleKey]
if !ok {
return nil, fmt.Errorf("Invalid job parts; missing key %q", roleKey)
}
s, ok := s.Schema[role]
if !ok {
return nil, fmt.Errorf("Invalid job parts; unknown role %q", role)
}
rvParts = append(rvParts, role)
delete(parts, roleKey)
for _, key := range s.Keys {
value, ok := parts[key]
if !ok {
return nil, fmt.Errorf("Invalid job parts; missing %q", key)
}
rvParts = append(rvParts, value)
delete(parts, key)
}
if len(s.RecurseRoles) > 0 {
subRoleKey := fmt.Sprintf("sub-role-%d", depth+1)
subRole, ok := parts[subRoleKey]
if !ok {
return nil, fmt.Errorf("Invalid job parts; missing %q", subRoleKey)
}
rvParts = append(rvParts, subRole)
delete(parts, subRoleKey)
found := false
for _, recurseRole := range s.RecurseRoles {
if recurseRole == subRole {
found = true
var err error
parts, err = process(depth+1, parts)
if err != nil {
return nil, err
}
break
}
}
if !found {
return nil, fmt.Errorf("Invalid job parts; unknown sub-role %q", subRole)
}
}
for _, key := range s.OptionalKeys {
if value, ok := parts[key]; ok {
rvParts = append(rvParts, value)
delete(parts, key)
}
}
if len(parts) > 0 {
return nil, fmt.Errorf("Invalid job parts: too many parts: %v", parts)
}
return parts, nil
}
// Copy the parts map, so that we can modify at will.
partsCpy := make(map[string]string, len(parts))
for k, v := range parts {
partsCpy[k] = v
}
if _, err := process(0, partsCpy); err != nil {
return "", err
}
return strings.Join(rvParts, s.Sep), nil
}

View File

@ -0,0 +1,250 @@
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package gen_tasks_logic
import (
"log"
"reflect"
"time"
"go.skia.org/infra/task_scheduler/go/specs"
)
// taskBuilder is a helper for creating a task.
type taskBuilder struct {
*jobBuilder
parts
Name string
Spec *specs.TaskSpec
}
// newTaskBuilder returns a taskBuilder instance.
func newTaskBuilder(b *jobBuilder, name string) *taskBuilder {
parts, err := b.jobNameSchema.ParseJobName(name)
if err != nil {
log.Fatal(err)
}
return &taskBuilder{
jobBuilder: b,
parts: parts,
Name: name,
Spec: &specs.TaskSpec{},
}
}
// attempts sets the desired MaxAttempts for this task.
func (b *taskBuilder) attempts(a int) {
b.Spec.MaxAttempts = a
}
// cache adds the given caches to the task.
func (b *taskBuilder) cache(caches ...*specs.Cache) {
for _, c := range caches {
alreadyHave := false
for _, exist := range b.Spec.Caches {
if c.Name == exist.Name {
if !reflect.DeepEqual(c, exist) {
log.Fatalf("Already have cache %s with a different definition!", c.Name)
}
alreadyHave = true
break
}
}
if !alreadyHave {
b.Spec.Caches = append(b.Spec.Caches, c)
}
}
}
// cmd sets the command for the task.
func (b *taskBuilder) cmd(c ...string) {
b.Spec.Command = c
}
// dimension adds the given dimensions to the task.
func (b *taskBuilder) dimension(dims ...string) {
for _, dim := range dims {
if !In(dim, b.Spec.Dimensions) {
b.Spec.Dimensions = append(b.Spec.Dimensions, dim)
}
}
}
// expiration sets the expiration of the task.
func (b *taskBuilder) expiration(e time.Duration) {
b.Spec.Expiration = e
}
// idempotent marks the task as idempotent.
func (b *taskBuilder) idempotent() {
b.Spec.Idempotent = true
}
// isolate sets the isolate file used by the task.
func (b *taskBuilder) isolate(i string) {
b.Spec.Isolate = b.relpath(i)
}
// env appends the given values to the given environment variable for the task.
func (b *taskBuilder) env(key string, values ...string) {
if b.Spec.EnvPrefixes == nil {
b.Spec.EnvPrefixes = map[string][]string{}
}
for _, value := range values {
if !In(value, b.Spec.EnvPrefixes[key]) {
b.Spec.EnvPrefixes[key] = append(b.Spec.EnvPrefixes[key], value)
}
}
}
// addToPATH adds the given locations to PATH for the task.
func (b *taskBuilder) addToPATH(loc ...string) {
b.env("PATH", loc...)
}
// output adds the given paths as outputs to the task, which results in their
// contents being uploaded to the isolate server.
func (b *taskBuilder) output(paths ...string) {
for _, path := range paths {
if !In(path, b.Spec.Outputs) {
b.Spec.Outputs = append(b.Spec.Outputs, path)
}
}
}
// serviceAccount sets the service account for this task.
func (b *taskBuilder) serviceAccount(sa string) {
b.Spec.ServiceAccount = sa
}
// timeout sets the timeout(s) for this task.
func (b *taskBuilder) timeout(timeout time.Duration) {
b.Spec.ExecutionTimeout = timeout
b.Spec.IoTimeout = timeout // With kitchen, step logs don't count toward IoTimeout.
}
// dep adds the given tasks as dependencies of this task.
func (b *taskBuilder) dep(tasks ...string) {
for _, task := range tasks {
if !In(task, b.Spec.Dependencies) {
b.Spec.Dependencies = append(b.Spec.Dependencies, task)
}
}
}
// cipd adds the given CIPD packages to the task.
func (b *taskBuilder) cipd(pkgs ...*specs.CipdPackage) {
for _, pkg := range pkgs {
alreadyHave := false
for _, exist := range b.Spec.CipdPackages {
if pkg.Name == exist.Name {
if !reflect.DeepEqual(pkg, exist) {
log.Fatalf("Already have package %s with a different definition!", pkg.Name)
}
alreadyHave = true
break
}
}
if !alreadyHave {
b.Spec.CipdPackages = append(b.Spec.CipdPackages, pkg)
}
}
}
// useIsolatedAssets returns true if this task should use assets which are
// isolated rather than downloading directly from CIPD.
func (b *taskBuilder) useIsolatedAssets() bool {
// TODO(borenet): Do we need the isolate tasks for non-RPi Skpbench?
if b.extraConfig("Skpbench") {
return true
}
// Only do this on the RPIs for now. Other, faster machines shouldn't
// see much benefit and we don't need the extra complexity, for now.
if b.os("Android", "ChromeOS", "iOS") {
return true
}
// TODO(borenet): Do we need the isolate tasks for Windows builds?
if b.matchOs("Win") && b.role("Build") {
return true
}
return false
}
// isolateAssetConfig represents a task which copies a CIPD package into
// isolate.
type isolateAssetCfg struct {
isolateTaskName string
path string
}
// asset adds the given assets to the task as CIPD packages.
func (b *taskBuilder) asset(assets ...string) {
shouldIsolate := b.useIsolatedAssets()
pkgs := make([]*specs.CipdPackage, 0, len(assets))
for _, asset := range assets {
if _, ok := ISOLATE_ASSET_MAPPING[asset]; ok && shouldIsolate {
b.dep(b.isolateCIPDAsset(asset))
} else {
pkgs = append(pkgs, b.MustGetCipdPackageFromAsset(asset))
}
}
b.cipd(pkgs...)
}
// usesCCache adds attributes to tasks which use ccache.
func (b *taskBuilder) usesCCache() {
b.cache(CACHES_CCACHE...)
}
// usesGit adds attributes to tasks which use git.
func (b *taskBuilder) usesGit() {
// TODO(borenet): Why aren't these tasks using the Git cache?
if !b.extraConfig("UpdateGoDeps", "BuildTaskDrivers", "CreateDockerImage", "PushAppsFromSkiaDockerImage", "PushAppsFromSkiaWASMDockerImages", "PushAppsFromWASMDockerImage") {
b.cache(CACHES_GIT...)
// TODO(borenet): Move this conditional into compile().
if !b.extraConfig("NoDEPS") {
b.cache(CACHES_WORKDIR...)
}
}
b.cipd(specs.CIPD_PKGS_GIT...)
}
// usesGo adds attributes to tasks which use go. Recipes should use
// "with api.context(env=api.infra.go_env)".
func (b *taskBuilder) usesGo() {
b.usesGit() // Go requires Git.
b.cache(CACHES_GO...)
pkg := b.MustGetCipdPackageFromAsset("go")
if b.matchOs("Win") || b.matchExtraConfig("Win") {
pkg = b.MustGetCipdPackageFromAsset("go_win")
pkg.Path = "go"
}
b.cipd(pkg)
}
// usesDocker adds attributes to tasks which use docker.
func (b *taskBuilder) usesDocker() {
// The "docker" cache is used as a persistent working directory for
// tasks which use Docker. It is not to be confused with Docker's own
// cache, which stores images. We do not currently use a named Swarming
// cache for the latter.
// TODO(borenet): We should ensure that any task which uses Docker does
// not also use the normal "work" cache, to prevent issues like
// https://bugs.chromium.org/p/skia/issues/detail?id=9749.
if b.role("Build") || (b.role("Housekeeper") && b.matchExtraConfig("DockerImage")) {
b.cache(&specs.Cache{
Name: "docker",
Path: "cache/docker",
})
}
// TODO(borenet): Why aren't these using the Docker dimension?
if b.extraConfig("SKQP") ||
b.model("Golo") ||
b.role("Perf", "Test") && b.cpu() && b.extraConfig("CanvasKit", "PathKit", "SkottieWASM", "LottieWeb") {
} else {
b.dimension("docker_installed:true")
}
}