2016-09-30 19:53:12 +00:00
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package main
/ *
Generate the tasks . json file .
* /
import (
"encoding/json"
2017-02-01 20:56:55 +00:00
"flag"
2016-09-30 19:53:12 +00:00
"fmt"
2017-02-01 20:56:55 +00:00
"io/ioutil"
2016-09-30 19:53:12 +00:00
"os"
"path"
2017-06-14 19:25:31 +00:00
"path/filepath"
2017-02-01 20:56:55 +00:00
"regexp"
2017-06-14 19:25:31 +00:00
"runtime"
2016-09-30 19:53:12 +00:00
"sort"
2017-10-17 17:40:52 +00:00
"strconv"
2016-09-30 19:53:12 +00:00
"strings"
2016-11-08 17:55:32 +00:00
"time"
2016-09-30 19:53:12 +00:00
"github.com/skia-dev/glog"
2017-06-14 19:25:31 +00:00
"go.skia.org/infra/go/sklog"
2016-09-30 19:53:12 +00:00
"go.skia.org/infra/go/util"
"go.skia.org/infra/task_scheduler/go/specs"
)
const (
2017-11-29 19:45:14 +00:00
BUNDLE_RECIPES_NAME = "Housekeeper-PerCommit-BundleRecipes"
ISOLATE_SKIMAGE_NAME = "Housekeeper-PerCommit-IsolateSkImage"
ISOLATE_SKP_NAME = "Housekeeper-PerCommit-IsolateSKP"
ISOLATE_SVG_NAME = "Housekeeper-PerCommit-IsolateSVG"
ISOLATE_NDK_LINUX_NAME = "Housekeeper-PerCommit-IsolateAndroidNDKLinux"
ISOLATE_WIN_TOOLCHAIN_NAME = "Housekeeper-PerCommit-IsolateWinToolchain"
ISOLATE_WIN_VULKAN_SDK_NAME = "Housekeeper-PerCommit-IsolateWinVulkanSDK"
2017-04-04 13:06:16 +00:00
2017-11-15 16:22:57 +00:00
DEFAULT_OS_DEBIAN = "Debian-9.1"
DEFAULT_OS_LINUX_GCE = "Debian-9.2"
2018-02-09 16:21:15 +00:00
DEFAULT_OS_MAC = "Mac-10.13.3"
2017-11-15 16:22:57 +00:00
DEFAULT_OS_UBUNTU = "Ubuntu-14.04"
DEFAULT_OS_WIN = "Windows-2016Server-14393"
2016-09-30 19:53:12 +00:00
// Name prefix for upload jobs.
PREFIX_UPLOAD = "Upload"
)
var (
// "Constants"
2017-02-01 20:56:55 +00:00
// Top-level list of all jobs to run at each commit; loaded from
// jobs.json.
JOBS [ ] string
// General configuration information.
CONFIG struct {
2017-10-09 19:26:19 +00:00
GsBucketCoverage string ` json:"gs_bucket_coverage" `
GsBucketGm string ` json:"gs_bucket_gm" `
GsBucketNano string ` json:"gs_bucket_nano" `
2017-10-24 13:43:21 +00:00
GsBucketCalm string ` json:"gs_bucket_calm" `
2017-10-09 19:26:19 +00:00
NoUpload [ ] string ` json:"no_upload" `
Pool string ` json:"pool" `
2016-09-30 19:53:12 +00:00
}
2017-06-13 21:01:16 +00:00
// alternateSwarmDimensions can be set in an init function to override the default swarming bot
// dimensions for the given task.
alternateSwarmDimensions func ( parts map [ string ] string ) [ ] string
2017-09-15 14:35:44 +00:00
// internalHardwareLabelFn can be set in an init function to provide an
// internal_hardware_label variable to the recipe.
2017-09-15 18:09:07 +00:00
internalHardwareLabelFn func ( parts map [ string ] string ) * int
2017-09-15 12:35:31 +00:00
2016-09-30 19:53:12 +00:00
// Defines the structure of job names.
jobNameSchema * JobNameSchema
2017-02-01 20:56:55 +00:00
2017-06-06 12:27:09 +00:00
// Git 2.13.
2017-12-07 14:54:05 +00:00
cipdGit1 = & specs . CipdPackage {
Name : fmt . Sprintf ( "infra/git/${platform}" ) ,
Path : "git" ,
Version : fmt . Sprintf ( "version:2.13.0.chromium9" ) ,
}
cipdGit2 = & specs . CipdPackage {
Name : fmt . Sprintf ( "infra/tools/git/${platform}" ) ,
Path : "git" ,
Version : fmt . Sprintf ( "git_revision:a78b5f3658c0578a017db48df97d20ac09822bcd" ) ,
2017-06-06 12:27:09 +00:00
}
2017-12-07 14:21:07 +00:00
2017-02-01 20:56:55 +00:00
// Flags.
2017-02-07 14:16:30 +00:00
builderNameSchemaFile = flag . String ( "builder_name_schema" , "" , "Path to the builder_name_schema.json file. If not specified, uses infra/bots/recipe_modules/builder_name_schema/builder_name_schema.json from this repo." )
assetsDir = flag . String ( "assets_dir" , "" , "Directory containing assets." )
cfgFile = flag . String ( "cfg_file" , "" , "JSON file containing general configuration information." )
jobsFile = flag . String ( "jobs" , "" , "JSON file containing jobs to run." )
2016-09-30 19:53:12 +00:00
)
2017-09-15 14:35:44 +00:00
// internalHardwareLabel returns the internal ID for the bot, if any.
2017-09-15 18:09:07 +00:00
func internalHardwareLabel ( parts map [ string ] string ) * int {
2017-09-15 14:35:44 +00:00
if internalHardwareLabelFn != nil {
return internalHardwareLabelFn ( parts )
2017-09-15 12:35:31 +00:00
}
return nil
}
2017-02-01 20:56:55 +00:00
// linuxGceDimensions are the Swarming dimensions for Linux GCE
// instances.
func linuxGceDimensions ( ) [ ] string {
return [ ] string {
2017-09-25 16:56:53 +00:00
// Specify CPU to avoid running builds on bots with a more unique CPU.
"cpu:x86-64-Haswell_GCE" ,
2017-02-01 20:56:55 +00:00
"gpu:none" ,
2017-11-15 16:22:57 +00:00
fmt . Sprintf ( "os:%s" , DEFAULT_OS_LINUX_GCE ) ,
2017-02-01 20:56:55 +00:00
fmt . Sprintf ( "pool:%s" , CONFIG . Pool ) ,
}
}
2016-09-30 19:53:12 +00:00
// deriveCompileTaskName returns the name of a compile task based on the given
// job name.
func deriveCompileTaskName ( jobName string , parts map [ string ] string ) string {
2018-01-02 19:54:43 +00:00
if strings . Contains ( jobName , "Bookmaker" ) {
2017-12-08 17:58:20 +00:00
return "Build-Debian9-GCC-x86_64-Release"
} else if parts [ "role" ] == "Housekeeper" {
2017-06-28 15:45:54 +00:00
return "Build-Debian9-GCC-x86_64-Release-Shared"
2018-01-05 16:13:43 +00:00
} else if parts [ "role" ] == "Test" || parts [ "role" ] == "Perf" || parts [ "role" ] == "Calmbench" {
2016-09-30 19:53:12 +00:00
task_os := parts [ "os" ]
2017-04-27 17:08:50 +00:00
ec := [ ] string { }
if val := parts [ "extra_config" ] ; val != "" {
ec = strings . Split ( val , "_" )
2017-12-11 18:27:27 +00:00
ignore := [ ] string { "Skpbench" , "AbandonGpuContext" , "PreAbandonGpuContext" , "Valgrind" , "ReleaseAndAbandonGpuContext" , "CCPR" , "FSAA" , "FAAA" , "FDAA" , "NativeFonts" , "GDI" , "NoGPUThreads" }
2017-04-27 17:08:50 +00:00
keep := make ( [ ] string , 0 , len ( ec ) )
for _ , part := range ec {
if ! util . In ( part , ignore ) {
keep = append ( keep , part )
}
}
ec = keep
2017-04-26 18:25:29 +00:00
}
2016-09-30 19:53:12 +00:00
if task_os == "Android" {
2017-04-27 17:08:50 +00:00
if ! util . In ( "Android" , ec ) {
ec = append ( [ ] string { "Android" } , ec ... )
2016-09-30 19:53:12 +00:00
}
2017-06-28 15:45:54 +00:00
task_os = "Debian9"
2017-03-08 19:01:01 +00:00
} else if task_os == "Chromecast" {
2017-06-28 15:45:54 +00:00
task_os = "Debian9"
2017-04-27 17:08:50 +00:00
ec = append ( [ ] string { "Chromecast" } , ec ... )
2017-04-07 14:04:08 +00:00
} else if strings . Contains ( task_os , "ChromeOS" ) {
2017-11-02 13:34:08 +00:00
ec = append ( [ ] string { "Chromebook" , "GLES" } , ec ... )
2017-06-28 15:45:54 +00:00
task_os = "Debian9"
2016-09-30 19:53:12 +00:00
} else if task_os == "iOS" {
2017-04-27 17:08:50 +00:00
ec = append ( [ ] string { task_os } , ec ... )
2016-09-30 19:53:12 +00:00
task_os = "Mac"
} else if strings . Contains ( task_os , "Win" ) {
task_os = "Win"
2017-06-28 15:45:54 +00:00
} else if strings . Contains ( task_os , "Ubuntu" ) || strings . Contains ( task_os , "Debian" ) {
task_os = "Debian9"
2016-09-30 19:53:12 +00:00
}
2016-11-18 18:10:51 +00:00
jobNameMap := map [ string ] string {
2016-09-30 19:53:12 +00:00
"role" : "Build" ,
"os" : task_os ,
"compiler" : parts [ "compiler" ] ,
"target_arch" : parts [ "arch" ] ,
"configuration" : parts [ "configuration" ] ,
2016-11-18 18:10:51 +00:00
}
2017-04-27 17:08:50 +00:00
if len ( ec ) > 0 {
jobNameMap [ "extra_config" ] = strings . Join ( ec , "_" )
2016-11-18 18:10:51 +00:00
}
name , err := jobNameSchema . MakeJobName ( jobNameMap )
2016-09-30 19:53:12 +00:00
if err != nil {
glog . Fatal ( err )
}
return name
} else {
return jobName
}
}
// swarmDimensions generates swarming bot dimensions for the given task.
func swarmDimensions ( parts map [ string ] string ) [ ] string {
2017-06-13 21:01:16 +00:00
if alternateSwarmDimensions != nil {
return alternateSwarmDimensions ( parts )
}
return defaultSwarmDimensions ( parts )
}
// defaultSwarmDimensions generates default swarming bot dimensions for the given task.
func defaultSwarmDimensions ( parts map [ string ] string ) [ ] string {
2016-09-30 19:53:12 +00:00
d := map [ string ] string {
2017-02-01 20:56:55 +00:00
"pool" : CONFIG . Pool ,
2016-09-30 19:53:12 +00:00
}
if os , ok := parts [ "os" ] ; ok {
2017-06-13 21:01:16 +00:00
d [ "os" ] , ok = map [ string ] string {
2017-03-21 13:25:34 +00:00
"Android" : "Android" ,
"Chromecast" : "Android" ,
2017-04-07 14:04:08 +00:00
"ChromeOS" : "ChromeOS" ,
2017-06-28 15:45:54 +00:00
"Debian9" : DEFAULT_OS_DEBIAN ,
2017-11-14 18:30:04 +00:00
"Mac" : DEFAULT_OS_MAC ,
2017-06-28 15:45:54 +00:00
"Ubuntu14" : DEFAULT_OS_UBUNTU ,
2017-03-21 13:25:34 +00:00
"Ubuntu16" : "Ubuntu-16.10" ,
2017-07-11 12:11:15 +00:00
"Ubuntu17" : "Ubuntu-17.04" ,
2017-11-14 18:30:04 +00:00
"Win" : DEFAULT_OS_WIN ,
2017-05-02 17:13:13 +00:00
"Win10" : "Windows-10-15063" ,
2017-03-21 13:25:34 +00:00
"Win2k8" : "Windows-2008ServerR2-SP1" ,
2017-11-14 18:30:04 +00:00
"Win2016" : DEFAULT_OS_WIN ,
2017-04-18 19:38:15 +00:00
"Win7" : "Windows-7-SP1" ,
2017-03-21 13:25:34 +00:00
"Win8" : "Windows-8.1-SP0" ,
2017-05-19 17:08:19 +00:00
"iOS" : "iOS-10.3.1" ,
2016-12-02 17:09:10 +00:00
} [ os ]
2017-06-13 21:01:16 +00:00
if ! ok {
glog . Fatalf ( "Entry %q not found in OS mapping." , os )
}
2016-09-30 19:53:12 +00:00
} else {
2017-06-28 15:45:54 +00:00
d [ "os" ] = DEFAULT_OS_DEBIAN
2016-09-30 19:53:12 +00:00
}
2017-11-02 17:48:23 +00:00
if parts [ "role" ] == "Test" || parts [ "role" ] == "Perf" || parts [ "role" ] == "Calmbench" {
2017-03-21 13:25:34 +00:00
if strings . Contains ( parts [ "os" ] , "Android" ) || strings . Contains ( parts [ "os" ] , "Chromecast" ) {
2016-09-30 19:53:12 +00:00
// For Android, the device type is a better dimension
// than CPU or GPU.
2017-06-14 14:01:45 +00:00
deviceInfo , ok := map [ string ] [ ] string {
2017-06-13 21:01:16 +00:00
"AndroidOne" : { "sprout" , "MOB30Q" } ,
2018-01-24 20:48:26 +00:00
"Chorizo" : { "chorizo" , "1.30_109591" } ,
2017-11-28 14:41:48 +00:00
"GalaxyS6" : { "zerofltetmo" , "NRD90M_G920TUVU5FQK1" } ,
2017-06-13 21:01:16 +00:00
"GalaxyS7_G930A" : { "heroqlteatt" , "NRD90M_G930AUCS4BQC2" } ,
"GalaxyS7_G930FD" : { "herolte" , "NRD90M_G930FXXU1DQAS" } ,
"MotoG4" : { "athene" , "NPJ25.93-14" } ,
2017-12-06 21:29:04 +00:00
"NVIDIA_Shield" : { "foster" , "NRD90M_1915764_848" } ,
"Nexus5" : { "hammerhead" , "M4B30Z_3437181" } ,
2017-11-17 13:59:44 +00:00
"Nexus5x" : { "bullhead" , "OPR6.170623.023" } ,
2017-12-06 21:29:04 +00:00
"Nexus7" : { "grouper" , "LMY47V_1836172" } , // 2012 Nexus 7
2017-11-02 18:09:32 +00:00
"NexusPlayer" : { "fugu" , "OPR6.170623.021" } ,
2017-09-26 18:07:38 +00:00
"Pixel" : { "sailfish" , "OPR3.170623.008" } ,
2017-10-31 18:44:08 +00:00
"Pixel2XL" : { "taimen" , "OPD1.170816.023" } ,
2017-10-31 13:47:38 +00:00
"PixelC" : { "dragon" , "OPR1.170623.034" } ,
2017-09-26 18:07:38 +00:00
"PixelXL" : { "marlin" , "OPR3.170623.008" } ,
2017-06-14 14:01:45 +00:00
} [ parts [ "model" ] ]
2017-02-01 20:56:55 +00:00
if ! ok {
2017-06-14 14:01:45 +00:00
glog . Fatalf ( "Entry %q not found in Android mapping." , parts [ "model" ] )
2017-02-01 20:56:55 +00:00
}
2016-11-08 17:55:32 +00:00
d [ "device_type" ] = deviceInfo [ 0 ]
d [ "device_os" ] = deviceInfo [ 1 ]
2017-12-07 16:19:31 +00:00
// TODO(kjlubick): Remove the python dimension after we have removed the
// Nexus5x devices from the local lab (on Monday, Dec 11, 2017 should be fine).
2017-12-06 20:05:29 +00:00
d [ "python" ] = "2.7.9" // This indicates a RPI, e.g. in Skolo. Golo is 2.7.12
2017-12-07 16:19:31 +00:00
if parts [ "model" ] == "Nexus5x" {
d [ "python" ] = "2.7.12"
}
2016-09-30 19:53:12 +00:00
} else if strings . Contains ( parts [ "os" ] , "iOS" ) {
2017-06-13 21:01:16 +00:00
device , ok := map [ string ] string {
2016-11-09 19:03:20 +00:00
"iPadMini4" : "iPad5,1" ,
2017-04-25 15:38:38 +00:00
"iPhone6" : "iPhone7,2" ,
"iPhone7" : "iPhone9,1" ,
"iPadPro" : "iPad6,3" ,
2016-09-30 19:53:12 +00:00
} [ parts [ "model" ] ]
2017-06-13 21:01:16 +00:00
if ! ok {
glog . Fatalf ( "Entry %q not found in iOS mapping." , parts [ "model" ] )
}
d [ "device" ] = device
2016-09-30 19:53:12 +00:00
} else if parts [ "cpu_or_gpu" ] == "CPU" {
2017-09-25 16:56:53 +00:00
modelMapping , ok := map [ string ] map [ string ] string {
"AVX" : {
"MacMini7.1" : "x86-64-E5-2697_v2" ,
"Golo" : "x86-64-E5-2670" ,
} ,
"AVX2" : {
2017-11-07 21:57:37 +00:00
"GCE" : "x86-64-Haswell_GCE" ,
2017-11-03 16:19:54 +00:00
"NUC5i7RYH" : "x86-64-i7-5557U" ,
2017-09-25 16:56:53 +00:00
} ,
2017-09-28 20:38:34 +00:00
"AVX512" : {
"GCE" : "x86-64-Skylake_GCE" ,
} ,
2016-09-30 19:53:12 +00:00
} [ parts [ "cpu_or_gpu_value" ] ]
2017-06-13 21:01:16 +00:00
if ! ok {
glog . Fatalf ( "Entry %q not found in CPU mapping." , parts [ "cpu_or_gpu_value" ] )
}
2017-09-25 16:56:53 +00:00
cpu , ok := modelMapping [ parts [ "model" ] ]
if ! ok {
glog . Fatalf ( "Entry %q not found in %q model mapping." , parts [ "model" ] , parts [ "cpu_or_gpu_value" ] )
2016-09-30 19:53:12 +00:00
}
2017-09-25 16:56:53 +00:00
d [ "cpu" ] = cpu
2017-11-15 16:22:57 +00:00
if parts [ "model" ] == "GCE" && d [ "os" ] == DEFAULT_OS_DEBIAN {
d [ "os" ] = DEFAULT_OS_LINUX_GCE
}
2017-12-14 18:15:01 +00:00
if parts [ "model" ] == "GCE" && d [ "os" ] == DEFAULT_OS_WIN {
2017-12-15 16:23:40 +00:00
// Use normal-size machines for Test and Perf tasks on Win GCE.
2017-12-14 18:15:01 +00:00
d [ "machine_type" ] = "n1-standard-16"
}
2016-09-30 19:53:12 +00:00
} else {
2017-06-14 14:34:18 +00:00
if strings . Contains ( parts [ "os" ] , "Win" ) {
gpu , ok := map [ string ] string {
2017-08-15 16:43:55 +00:00
"GT610" : "10de:104a-22.21.13.8205" ,
2017-11-07 21:57:37 +00:00
"GTX1070" : "10de:1ba1-23.21.13.8813" ,
"GTX660" : "10de:11c0-23.21.13.8813" ,
"GTX960" : "10de:1401-23.21.13.8813" ,
2017-06-14 14:34:18 +00:00
"IntelHD530" : "8086:1912-21.20.16.4590" ,
2017-08-04 15:25:33 +00:00
"IntelHD4400" : "8086:0a16-20.19.15.4703" ,
2017-08-11 14:35:57 +00:00
"IntelHD4600" : "8086:0412-20.19.15.4703" ,
2017-06-14 14:34:18 +00:00
"IntelIris540" : "8086:1926-21.20.16.4590" ,
2017-08-11 14:35:57 +00:00
"IntelIris6100" : "8086:162b-20.19.15.4703" ,
2017-11-22 16:22:22 +00:00
"RadeonHD7770" : "1002:683d-22.19.165.512" ,
2017-06-14 14:34:18 +00:00
"RadeonR9M470X" : "1002:6646-22.19.165.512" ,
2017-08-03 03:24:16 +00:00
"QuadroP400" : "10de:1cb3-22.21.13.8205" ,
2017-06-14 14:34:18 +00:00
} [ parts [ "cpu_or_gpu_value" ] ]
if ! ok {
glog . Fatalf ( "Entry %q not found in Win GPU mapping." , parts [ "cpu_or_gpu_value" ] )
}
d [ "gpu" ] = gpu
2017-02-19 04:28:26 +00:00
2017-06-15 16:28:04 +00:00
// Specify cpu dimension for NUCs and ShuttleCs. We temporarily have two
// types of machines with a GTX960.
cpu , ok := map [ string ] string {
"NUC6i7KYK" : "x86-64-i7-6770HQ" ,
"ShuttleC" : "x86-64-i7-6700K" ,
2017-06-14 14:34:18 +00:00
} [ parts [ "model" ] ]
if ok {
2017-06-15 16:28:04 +00:00
d [ "cpu" ] = cpu
2017-06-14 14:34:18 +00:00
}
2017-06-28 15:45:54 +00:00
} else if strings . Contains ( parts [ "os" ] , "Ubuntu" ) || strings . Contains ( parts [ "os" ] , "Debian" ) {
2017-06-14 14:34:18 +00:00
gpu , ok := map [ string ] string {
// Intel drivers come from CIPD, so no need to specify the version here.
"IntelBayTrail" : "8086:0f31" ,
"IntelHD2000" : "8086:0102" ,
"IntelHD405" : "8086:22b1" ,
2017-12-19 20:14:12 +00:00
"IntelIris640" : "8086:5926" ,
2017-08-04 02:29:22 +00:00
"QuadroP400" : "10de:1cb3-384.59" ,
2017-06-14 14:34:18 +00:00
} [ parts [ "cpu_or_gpu_value" ] ]
if ! ok {
glog . Fatalf ( "Entry %q not found in Ubuntu GPU mapping." , parts [ "cpu_or_gpu_value" ] )
}
d [ "gpu" ] = gpu
} else if strings . Contains ( parts [ "os" ] , "Mac" ) {
gpu , ok := map [ string ] string {
2018-02-02 19:47:31 +00:00
"IntelHD6000" : "8086:1626" ,
2018-02-08 21:57:04 +00:00
"IntelHD615" : "8086:591e" ,
2017-08-17 21:29:04 +00:00
"IntelIris5100" : "8086:0a2e" ,
2017-06-14 14:34:18 +00:00
} [ parts [ "cpu_or_gpu_value" ] ]
if ! ok {
glog . Fatalf ( "Entry %q not found in Mac GPU mapping." , parts [ "cpu_or_gpu_value" ] )
}
d [ "gpu" ] = gpu
2018-02-09 15:29:09 +00:00
// Yuck. We have two different types of MacMini7,1 with the same GPU but different CPUs.
if parts [ "cpu_or_gpu_value" ] == "IntelIris5100" {
// Run all tasks on Golo machines for now.
d [ "cpu" ] = "x86-64-i7-4578U"
}
2017-06-14 14:34:18 +00:00
} else if strings . Contains ( parts [ "os" ] , "ChromeOS" ) {
2017-12-04 20:43:31 +00:00
version , ok := map [ string ] string {
"MaliT604" : "9901.12.0" ,
"MaliT764" : "10172.0.0" ,
"MaliT860" : "10172.0.0" ,
2017-12-08 19:14:38 +00:00
"PowerVRGX6250" : "10176.5.0" ,
2017-12-04 20:43:31 +00:00
"TegraK1" : "10172.0.0" ,
"IntelHDGraphics615" : "10032.17.0" ,
2017-06-14 14:34:18 +00:00
} [ parts [ "cpu_or_gpu_value" ] ]
if ! ok {
glog . Fatalf ( "Entry %q not found in ChromeOS GPU mapping." , parts [ "cpu_or_gpu_value" ] )
}
2017-12-04 20:43:31 +00:00
d [ "gpu" ] = parts [ "cpu_or_gpu_value" ]
d [ "release_version" ] = version
2017-06-14 14:34:18 +00:00
} else {
glog . Fatalf ( "Unknown GPU mapping for OS %q." , parts [ "os" ] )
2017-02-19 04:28:26 +00:00
}
2016-09-30 19:53:12 +00:00
}
} else {
d [ "gpu" ] = "none"
2017-06-28 15:45:54 +00:00
if d [ "os" ] == DEFAULT_OS_DEBIAN {
2017-03-22 19:54:54 +00:00
return linuxGceDimensions ( )
2017-11-14 18:30:04 +00:00
} else if d [ "os" ] == DEFAULT_OS_WIN {
// Windows CPU bots.
2017-09-26 22:19:56 +00:00
d [ "cpu" ] = "x86-64-Haswell_GCE"
2017-12-15 16:23:40 +00:00
// Use many-core machines for Build tasks on Win GCE, except for Goma.
if strings . Contains ( parts [ "extra_config" ] , "Goma" ) {
d [ "machine_type" ] = "n1-standard-16"
} else {
d [ "machine_type" ] = "n1-highcpu-64"
}
2017-11-14 18:30:04 +00:00
} else if d [ "os" ] == DEFAULT_OS_MAC {
// Mac CPU bots.
2017-09-26 22:19:56 +00:00
d [ "cpu" ] = "x86-64-E5-2697_v2"
2017-03-22 19:54:54 +00:00
}
2016-09-30 19:53:12 +00:00
}
2017-03-22 19:54:54 +00:00
2016-09-30 19:53:12 +00:00
rv := make ( [ ] string , 0 , len ( d ) )
for k , v := range d {
rv = append ( rv , fmt . Sprintf ( "%s:%s" , k , v ) )
}
sort . Strings ( rv )
return rv
}
2017-06-14 19:25:31 +00:00
// relpath returns the relative path to the given file from the config file.
func relpath ( f string ) string {
_ , filename , _ , _ := runtime . Caller ( 0 )
dir := path . Dir ( filename )
rel := dir
if * cfgFile != "" {
rel = path . Dir ( * cfgFile )
}
rv , err := filepath . Rel ( rel , path . Join ( dir , f ) )
if err != nil {
sklog . Fatal ( err )
}
return rv
}
2017-04-04 13:06:16 +00:00
// bundleRecipes generates the task to bundle and isolate the recipes.
func bundleRecipes ( b * specs . TasksCfgBuilder ) string {
b . MustAddTask ( BUNDLE_RECIPES_NAME , & specs . TaskSpec {
2017-12-07 14:54:05 +00:00
CipdPackages : [ ] * specs . CipdPackage { cipdGit1 , cipdGit2 } ,
Dimensions : linuxGceDimensions ( ) ,
ExtraArgs : [ ] string {
"--workdir" , "../../.." , "bundle_recipes" ,
fmt . Sprintf ( "buildername=%s" , BUNDLE_RECIPES_NAME ) ,
fmt . Sprintf ( "swarm_out_dir=%s" , specs . PLACEHOLDER_ISOLATED_OUTDIR ) ,
2017-04-04 13:06:16 +00:00
} ,
2017-12-07 14:54:05 +00:00
Isolate : relpath ( "bundle_recipes.isolate" ) ,
2017-05-11 17:35:23 +00:00
Priority : 0.7 ,
2017-04-04 13:06:16 +00:00
} )
return BUNDLE_RECIPES_NAME
}
2017-04-07 12:31:22 +00:00
// useBundledRecipes returns true iff the given bot should use bundled recipes
// instead of syncing recipe DEPS itself.
func useBundledRecipes ( parts map [ string ] string ) bool {
// Use bundled recipes for all test/perf tasks.
return true
}
2017-05-11 17:35:23 +00:00
type isolateAssetCfg struct {
2017-12-07 14:54:05 +00:00
isolateFile string
cipdPkg string
2017-05-11 17:35:23 +00:00
}
var ISOLATE_ASSET_MAPPING = map [ string ] isolateAssetCfg {
ISOLATE_SKIMAGE_NAME : {
2017-12-07 14:54:05 +00:00
isolateFile : "isolate_skimage.isolate" ,
cipdPkg : "skimage" ,
2017-05-11 17:35:23 +00:00
} ,
ISOLATE_SKP_NAME : {
2017-12-07 14:54:05 +00:00
isolateFile : "isolate_skp.isolate" ,
cipdPkg : "skp" ,
2017-05-11 17:35:23 +00:00
} ,
ISOLATE_SVG_NAME : {
2017-12-07 14:54:05 +00:00
isolateFile : "isolate_svg.isolate" ,
cipdPkg : "svg" ,
2017-05-11 17:35:23 +00:00
} ,
2017-11-29 19:45:14 +00:00
ISOLATE_NDK_LINUX_NAME : {
2017-12-07 14:54:05 +00:00
isolateFile : "isolate_ndk_linux.isolate" ,
cipdPkg : "android_ndk_linux" ,
2017-11-29 19:45:14 +00:00
} ,
ISOLATE_WIN_TOOLCHAIN_NAME : {
2017-12-07 14:54:05 +00:00
isolateFile : "isolate_win_toolchain.isolate" ,
cipdPkg : "win_toolchain" ,
2017-11-29 19:45:14 +00:00
} ,
ISOLATE_WIN_VULKAN_SDK_NAME : {
2017-12-07 14:54:05 +00:00
isolateFile : "isolate_win_vulkan_sdk.isolate" ,
cipdPkg : "win_vulkan_sdk" ,
2017-11-29 19:45:14 +00:00
} ,
2017-05-11 17:35:23 +00:00
}
2017-12-07 14:54:05 +00:00
// bundleRecipes generates the task to bundle and isolate the recipes.
2017-05-11 17:35:23 +00:00
func isolateCIPDAsset ( b * specs . TasksCfgBuilder , name string ) string {
b . MustAddTask ( name , & specs . TaskSpec {
CipdPackages : [ ] * specs . CipdPackage {
2017-12-07 14:54:05 +00:00
b . MustGetCipdPackageFromAsset ( ISOLATE_ASSET_MAPPING [ name ] . cipdPkg ) ,
2017-05-11 17:35:23 +00:00
} ,
Dimensions : linuxGceDimensions ( ) ,
2017-12-07 14:54:05 +00:00
Isolate : relpath ( ISOLATE_ASSET_MAPPING [ name ] . isolateFile ) ,
2017-05-11 17:35:23 +00:00
Priority : 0.7 ,
} )
return name
}
2017-05-15 12:30:27 +00:00
// getIsolatedCIPDDeps returns the slice of Isolate_* tasks a given task needs.
// This allows us to save time on I/O bound bots, like the RPIs.
func getIsolatedCIPDDeps ( parts map [ string ] string ) [ ] string {
deps := [ ] string { }
2017-05-11 17:35:23 +00:00
// Only do this on the RPIs for now. Other, faster machines shouldn't see much
// benefit and we don't need the extra complexity, for now
2017-05-15 12:30:27 +00:00
rpiOS := [ ] string { "Android" , "ChromeOS" , "iOS" }
if o := parts [ "os" ] ; strings . Contains ( o , "Chromecast" ) {
// Chromecasts don't have enough disk space to fit all of the content,
// so we do a subset of the skps.
deps = append ( deps , ISOLATE_SKP_NAME )
} else if e := parts [ "extra_config" ] ; strings . Contains ( e , "Skpbench" ) {
// Skpbench only needs skps
deps = append ( deps , ISOLATE_SKP_NAME )
} else if util . In ( o , rpiOS ) {
deps = append ( deps , ISOLATE_SKP_NAME )
deps = append ( deps , ISOLATE_SVG_NAME )
deps = append ( deps , ISOLATE_SKIMAGE_NAME )
}
return deps
2017-05-11 17:35:23 +00:00
}
2016-09-30 19:53:12 +00:00
// compile generates a compile task. Returns the name of the last task in the
// generated chain of tasks, which the Job should add as a dependency.
2016-10-20 18:04:31 +00:00
func compile ( b * specs . TasksCfgBuilder , name string , parts map [ string ] string ) string {
2016-09-30 19:53:12 +00:00
// Collect the necessary CIPD packages.
pkgs := [ ] * specs . CipdPackage { }
2017-12-07 14:54:05 +00:00
deps := [ ] string { }
2016-09-30 19:53:12 +00:00
// Android bots require a toolchain.
if strings . Contains ( name , "Android" ) {
2018-02-01 18:38:13 +00:00
if parts [ "extra_config" ] == "Android_Framework" {
// Do not need a toolchain when building the
// Android Framework.
} else if strings . Contains ( name , "Mac" ) {
2016-10-20 18:04:31 +00:00
pkgs = append ( pkgs , b . MustGetCipdPackageFromAsset ( "android_ndk_darwin" ) )
2016-11-02 17:13:16 +00:00
} else if strings . Contains ( name , "Win" ) {
2016-11-02 19:44:26 +00:00
pkg := b . MustGetCipdPackageFromAsset ( "android_ndk_windows" )
pkg . Path = "n"
pkgs = append ( pkgs , pkg )
2016-09-30 19:53:12 +00:00
} else {
2017-11-29 19:45:14 +00:00
deps = append ( deps , isolateCIPDAsset ( b , ISOLATE_NDK_LINUX_NAME ) )
2016-09-30 19:53:12 +00:00
}
2017-03-08 19:01:01 +00:00
} else if strings . Contains ( name , "Chromecast" ) {
pkgs = append ( pkgs , b . MustGetCipdPackageFromAsset ( "cast_toolchain" ) )
2017-05-24 19:30:35 +00:00
pkgs = append ( pkgs , b . MustGetCipdPackageFromAsset ( "chromebook_arm_gles" ) )
2017-04-05 11:32:45 +00:00
} else if strings . Contains ( name , "Chromebook" ) {
pkgs = append ( pkgs , b . MustGetCipdPackageFromAsset ( "clang_linux" ) )
2017-11-02 13:34:08 +00:00
if parts [ "target_arch" ] == "x86_64" {
pkgs = append ( pkgs , b . MustGetCipdPackageFromAsset ( "chromebook_x86_64_gles" ) )
} else if parts [ "target_arch" ] == "arm" {
pkgs = append ( pkgs , b . MustGetCipdPackageFromAsset ( "armhf_sysroot" ) )
pkgs = append ( pkgs , b . MustGetCipdPackageFromAsset ( "chromebook_arm_gles" ) )
}
2017-06-28 15:45:54 +00:00
} else if strings . Contains ( name , "Debian" ) {
2017-01-18 14:24:56 +00:00
if strings . Contains ( name , "Clang" ) {
pkgs = append ( pkgs , b . MustGetCipdPackageFromAsset ( "clang_linux" ) )
}
if strings . Contains ( name , "Vulkan" ) {
pkgs = append ( pkgs , b . MustGetCipdPackageFromAsset ( "linux_vulkan_sdk" ) )
}
2017-09-21 17:45:16 +00:00
if strings . Contains ( name , "EMCC" ) {
pkgs = append ( pkgs , b . MustGetCipdPackageFromAsset ( "emscripten_sdk" ) )
}
2016-11-09 21:31:42 +00:00
} else if strings . Contains ( name , "Win" ) {
2017-11-29 19:45:14 +00:00
deps = append ( deps , isolateCIPDAsset ( b , ISOLATE_WIN_TOOLCHAIN_NAME ) )
2017-07-31 18:57:20 +00:00
if strings . Contains ( name , "Clang" ) {
pkgs = append ( pkgs , b . MustGetCipdPackageFromAsset ( "clang_win" ) )
}
2016-09-30 19:53:12 +00:00
if strings . Contains ( name , "Vulkan" ) {
2017-11-29 19:45:14 +00:00
deps = append ( deps , isolateCIPDAsset ( b , ISOLATE_WIN_VULKAN_SDK_NAME ) )
2016-09-30 19:53:12 +00:00
}
}
2017-03-20 17:38:45 +00:00
dimensions := swarmDimensions ( parts )
2016-09-30 19:53:12 +00:00
// Add the task.
2016-10-20 18:04:31 +00:00
b . MustAddTask ( name , & specs . TaskSpec {
2016-09-30 19:53:12 +00:00
CipdPackages : pkgs ,
2017-12-07 14:54:05 +00:00
Dimensions : dimensions ,
Dependencies : deps ,
ExtraArgs : [ ] string {
"--workdir" , "../../.." , "compile" ,
2016-11-04 18:37:26 +00:00
fmt . Sprintf ( "repository=%s" , specs . PLACEHOLDER_REPO ) ,
2016-09-30 19:53:12 +00:00
fmt . Sprintf ( "buildername=%s" , name ) ,
fmt . Sprintf ( "swarm_out_dir=%s" , specs . PLACEHOLDER_ISOLATED_OUTDIR ) ,
fmt . Sprintf ( "revision=%s" , specs . PLACEHOLDER_REVISION ) ,
2017-04-21 13:37:37 +00:00
fmt . Sprintf ( "patch_repo=%s" , specs . PLACEHOLDER_PATCH_REPO ) ,
2016-10-13 13:23:45 +00:00
fmt . Sprintf ( "patch_storage=%s" , specs . PLACEHOLDER_PATCH_STORAGE ) ,
2016-11-04 18:37:26 +00:00
fmt . Sprintf ( "patch_issue=%s" , specs . PLACEHOLDER_ISSUE ) ,
fmt . Sprintf ( "patch_set=%s" , specs . PLACEHOLDER_PATCHSET ) ,
2016-09-30 19:53:12 +00:00
} ,
2017-12-07 14:54:05 +00:00
Isolate : relpath ( "compile_skia.isolate" ) ,
Priority : 0.8 ,
2016-10-20 18:04:31 +00:00
} )
2016-12-12 19:30:12 +00:00
// All compile tasks are runnable as their own Job. Assert that the Job
// is listed in JOBS.
if ! util . In ( name , JOBS ) {
glog . Fatalf ( "Job %q is missing from the JOBS list!" , name )
}
2017-12-12 22:08:24 +00:00
// Upload the skiaserve binary only for Linux Android compile bots.
// See skbug.com/7399 for context.
if parts [ "configuration" ] == "Release" &&
parts [ "extra_config" ] == "Android" &&
! strings . Contains ( parts [ "os" ] , "Win" ) &&
! strings . Contains ( parts [ "os" ] , "Mac" ) {
uploadName := fmt . Sprintf ( "%s%s%s" , PREFIX_UPLOAD , jobNameSchema . Sep , name )
b . MustAddTask ( uploadName , & specs . TaskSpec {
Dependencies : [ ] string { name } ,
Dimensions : linuxGceDimensions ( ) ,
ExtraArgs : [ ] string {
"--workdir" , "../../.." , "upload_skiaserve" ,
fmt . Sprintf ( "repository=%s" , specs . PLACEHOLDER_REPO ) ,
fmt . Sprintf ( "buildername=%s" , name ) ,
fmt . Sprintf ( "swarm_out_dir=%s" , specs . PLACEHOLDER_ISOLATED_OUTDIR ) ,
fmt . Sprintf ( "revision=%s" , specs . PLACEHOLDER_REVISION ) ,
fmt . Sprintf ( "patch_repo=%s" , specs . PLACEHOLDER_PATCH_REPO ) ,
fmt . Sprintf ( "patch_storage=%s" , specs . PLACEHOLDER_PATCH_STORAGE ) ,
fmt . Sprintf ( "patch_issue=%s" , specs . PLACEHOLDER_ISSUE ) ,
fmt . Sprintf ( "patch_set=%s" , specs . PLACEHOLDER_PATCHSET ) ,
} ,
// We're using the same isolate as upload_dm_results
Isolate : relpath ( "upload_dm_results.isolate" ) ,
Priority : 0.8 ,
} )
return uploadName
}
2016-09-30 19:53:12 +00:00
return name
}
// recreateSKPs generates a RecreateSKPs task. Returns the name of the last
// task in the generated chain of tasks, which the Job should add as a
// dependency.
2017-12-07 14:54:05 +00:00
func recreateSKPs ( b * specs . TasksCfgBuilder , name string ) string {
2016-11-08 17:55:32 +00:00
b . MustAddTask ( name , & specs . TaskSpec {
2017-12-07 14:54:05 +00:00
CipdPackages : [ ] * specs . CipdPackage { b . MustGetCipdPackageFromAsset ( "go" ) } ,
Dimensions : linuxGceDimensions ( ) ,
ExecutionTimeout : 4 * time . Hour ,
ExtraArgs : [ ] string {
"--workdir" , "../../.." , "recreate_skps" ,
2016-11-08 17:55:32 +00:00
fmt . Sprintf ( "repository=%s" , specs . PLACEHOLDER_REPO ) ,
fmt . Sprintf ( "buildername=%s" , name ) ,
fmt . Sprintf ( "swarm_out_dir=%s" , specs . PLACEHOLDER_ISOLATED_OUTDIR ) ,
fmt . Sprintf ( "revision=%s" , specs . PLACEHOLDER_REVISION ) ,
2017-04-21 13:37:37 +00:00
fmt . Sprintf ( "patch_repo=%s" , specs . PLACEHOLDER_PATCH_REPO ) ,
2016-11-08 17:55:32 +00:00
fmt . Sprintf ( "patch_storage=%s" , specs . PLACEHOLDER_PATCH_STORAGE ) ,
fmt . Sprintf ( "patch_issue=%s" , specs . PLACEHOLDER_ISSUE ) ,
fmt . Sprintf ( "patch_set=%s" , specs . PLACEHOLDER_PATCHSET ) ,
} ,
2017-12-07 14:54:05 +00:00
IoTimeout : 40 * time . Minute ,
Isolate : relpath ( "compile_skia.isolate" ) ,
Priority : 0.8 ,
2016-11-08 17:55:32 +00:00
} )
2016-09-30 19:53:12 +00:00
return name
}
2017-05-17 18:28:06 +00:00
// updateMetaConfig generates a UpdateMetaConfig task. Returns the name of the
// last task in the generated chain of tasks, which the Job should add as a
// dependency.
2017-12-07 14:54:05 +00:00
func updateMetaConfig ( b * specs . TasksCfgBuilder , name string ) string {
2017-05-17 18:28:06 +00:00
b . MustAddTask ( name , & specs . TaskSpec {
2017-05-19 17:08:19 +00:00
CipdPackages : [ ] * specs . CipdPackage { } ,
2017-12-07 14:54:05 +00:00
Dimensions : linuxGceDimensions ( ) ,
ExtraArgs : [ ] string {
"--workdir" , "../../.." , "update_meta_config" ,
2017-05-17 18:28:06 +00:00
fmt . Sprintf ( "repository=%s" , specs . PLACEHOLDER_REPO ) ,
fmt . Sprintf ( "buildername=%s" , name ) ,
fmt . Sprintf ( "swarm_out_dir=%s" , specs . PLACEHOLDER_ISOLATED_OUTDIR ) ,
fmt . Sprintf ( "revision=%s" , specs . PLACEHOLDER_REVISION ) ,
fmt . Sprintf ( "patch_repo=%s" , specs . PLACEHOLDER_PATCH_REPO ) ,
fmt . Sprintf ( "patch_storage=%s" , specs . PLACEHOLDER_PATCH_STORAGE ) ,
fmt . Sprintf ( "patch_issue=%s" , specs . PLACEHOLDER_ISSUE ) ,
fmt . Sprintf ( "patch_set=%s" , specs . PLACEHOLDER_PATCHSET ) ,
} ,
2017-12-07 14:54:05 +00:00
Isolate : relpath ( "meta_config.isolate" ) ,
Priority : 0.8 ,
2017-05-17 18:28:06 +00:00
} )
return name
}
2016-09-30 19:53:12 +00:00
// ctSKPs generates a CT SKPs task. Returns the name of the last task in the
// generated chain of tasks, which the Job should add as a dependency.
2017-12-07 14:54:05 +00:00
func ctSKPs ( b * specs . TasksCfgBuilder , name string ) string {
2016-11-08 17:55:32 +00:00
b . MustAddTask ( name , & specs . TaskSpec {
2017-08-04 18:13:27 +00:00
CipdPackages : [ ] * specs . CipdPackage { } ,
2017-12-07 14:54:05 +00:00
Dimensions : [ ] string {
"pool:SkiaCT" ,
fmt . Sprintf ( "os:%s" , DEFAULT_OS_LINUX_GCE ) ,
} ,
ExecutionTimeout : 24 * time . Hour ,
ExtraArgs : [ ] string {
"--workdir" , "../../.." , "ct_skps" ,
2016-11-08 17:55:32 +00:00
fmt . Sprintf ( "repository=%s" , specs . PLACEHOLDER_REPO ) ,
fmt . Sprintf ( "buildername=%s" , name ) ,
fmt . Sprintf ( "swarm_out_dir=%s" , specs . PLACEHOLDER_ISOLATED_OUTDIR ) ,
fmt . Sprintf ( "revision=%s" , specs . PLACEHOLDER_REVISION ) ,
2017-04-21 13:37:37 +00:00
fmt . Sprintf ( "patch_repo=%s" , specs . PLACEHOLDER_PATCH_REPO ) ,
2016-11-08 17:55:32 +00:00
fmt . Sprintf ( "patch_storage=%s" , specs . PLACEHOLDER_PATCH_STORAGE ) ,
fmt . Sprintf ( "patch_issue=%s" , specs . PLACEHOLDER_ISSUE ) ,
fmt . Sprintf ( "patch_set=%s" , specs . PLACEHOLDER_PATCHSET ) ,
} ,
2017-12-07 14:54:05 +00:00
IoTimeout : time . Hour ,
Isolate : relpath ( "ct_skps_skia.isolate" ) ,
Priority : 0.8 ,
2016-11-08 17:55:32 +00:00
} )
2016-09-30 19:53:12 +00:00
return name
}
2017-07-28 11:35:28 +00:00
// checkGeneratedFiles verifies that no generated SKSL files have been edited
// by hand.
2017-12-07 14:54:05 +00:00
func checkGeneratedFiles ( b * specs . TasksCfgBuilder , name string ) string {
2017-07-28 11:35:28 +00:00
b . MustAddTask ( name , & specs . TaskSpec {
CipdPackages : [ ] * specs . CipdPackage { } ,
2017-12-07 14:54:05 +00:00
Dimensions : linuxGceDimensions ( ) ,
ExtraArgs : [ ] string {
"--workdir" , "../../.." , "check_generated_files" ,
2017-07-28 11:35:28 +00:00
fmt . Sprintf ( "repository=%s" , specs . PLACEHOLDER_REPO ) ,
fmt . Sprintf ( "buildername=%s" , name ) ,
fmt . Sprintf ( "swarm_out_dir=%s" , specs . PLACEHOLDER_ISOLATED_OUTDIR ) ,
fmt . Sprintf ( "revision=%s" , specs . PLACEHOLDER_REVISION ) ,
fmt . Sprintf ( "patch_repo=%s" , specs . PLACEHOLDER_PATCH_REPO ) ,
fmt . Sprintf ( "patch_storage=%s" , specs . PLACEHOLDER_PATCH_STORAGE ) ,
fmt . Sprintf ( "patch_issue=%s" , specs . PLACEHOLDER_ISSUE ) ,
fmt . Sprintf ( "patch_set=%s" , specs . PLACEHOLDER_PATCHSET ) ,
} ,
2017-12-07 14:54:05 +00:00
Isolate : relpath ( "compile_skia.isolate" ) ,
Priority : 0.8 ,
2017-07-28 11:35:28 +00:00
} )
return name
}
2016-09-30 19:53:12 +00:00
// housekeeper generates a Housekeeper task. Returns the name of the last task
// in the generated chain of tasks, which the Job should add as a dependency.
2017-12-07 14:54:05 +00:00
func housekeeper ( b * specs . TasksCfgBuilder , name , compileTaskName string ) string {
2016-11-08 17:55:32 +00:00
b . MustAddTask ( name , & specs . TaskSpec {
2016-12-02 16:01:33 +00:00
CipdPackages : [ ] * specs . CipdPackage { b . MustGetCipdPackageFromAsset ( "go" ) } ,
2017-12-07 14:54:05 +00:00
Dependencies : [ ] string { compileTaskName } ,
Dimensions : linuxGceDimensions ( ) ,
ExtraArgs : [ ] string {
"--workdir" , "../../.." , "housekeeper" ,
2016-11-08 17:55:32 +00:00
fmt . Sprintf ( "repository=%s" , specs . PLACEHOLDER_REPO ) ,
fmt . Sprintf ( "buildername=%s" , name ) ,
fmt . Sprintf ( "swarm_out_dir=%s" , specs . PLACEHOLDER_ISOLATED_OUTDIR ) ,
fmt . Sprintf ( "revision=%s" , specs . PLACEHOLDER_REVISION ) ,
2017-04-21 13:37:37 +00:00
fmt . Sprintf ( "patch_repo=%s" , specs . PLACEHOLDER_PATCH_REPO ) ,
2016-11-08 17:55:32 +00:00
fmt . Sprintf ( "patch_storage=%s" , specs . PLACEHOLDER_PATCH_STORAGE ) ,
fmt . Sprintf ( "patch_issue=%s" , specs . PLACEHOLDER_ISSUE ) ,
fmt . Sprintf ( "patch_set=%s" , specs . PLACEHOLDER_PATCHSET ) ,
} ,
2017-12-07 14:54:05 +00:00
Isolate : relpath ( "housekeeper_skia.isolate" ) ,
Priority : 0.8 ,
2016-11-08 17:55:32 +00:00
} )
2016-09-30 19:53:12 +00:00
return name
}
2017-12-08 17:58:20 +00:00
// bookmaker generates a Bookmaker task. Returns the name of the last task
// in the generated chain of tasks, which the Job should add as a dependency.
func bookmaker ( b * specs . TasksCfgBuilder , name , compileTaskName string ) string {
b . MustAddTask ( name , & specs . TaskSpec {
CipdPackages : [ ] * specs . CipdPackage { b . MustGetCipdPackageFromAsset ( "go" ) } ,
Dependencies : [ ] string { compileTaskName } ,
Dimensions : linuxGceDimensions ( ) ,
ExtraArgs : [ ] string {
"--workdir" , "../../.." , "bookmaker" ,
fmt . Sprintf ( "repository=%s" , specs . PLACEHOLDER_REPO ) ,
fmt . Sprintf ( "buildername=%s" , name ) ,
fmt . Sprintf ( "swarm_out_dir=%s" , specs . PLACEHOLDER_ISOLATED_OUTDIR ) ,
fmt . Sprintf ( "revision=%s" , specs . PLACEHOLDER_REVISION ) ,
fmt . Sprintf ( "patch_repo=%s" , specs . PLACEHOLDER_PATCH_REPO ) ,
fmt . Sprintf ( "patch_storage=%s" , specs . PLACEHOLDER_PATCH_STORAGE ) ,
fmt . Sprintf ( "patch_issue=%s" , specs . PLACEHOLDER_ISSUE ) ,
fmt . Sprintf ( "patch_set=%s" , specs . PLACEHOLDER_PATCHSET ) ,
} ,
Isolate : relpath ( "compile_skia.isolate" ) ,
Priority : 0.8 ,
ExecutionTimeout : 2 * time . Hour ,
IoTimeout : 2 * time . Hour ,
} )
return name
}
2018-02-01 18:38:13 +00:00
// androidFrameworkCompile generates an Android Framework Compile task. Returns
// the name of the last task in the generated chain of tasks, which the Job
// should add as a dependency.
func androidFrameworkCompile ( b * specs . TasksCfgBuilder , name string ) string {
b . MustAddTask ( name , & specs . TaskSpec {
Dimensions : linuxGceDimensions ( ) ,
ExtraArgs : [ ] string {
"--workdir" , "../../.." , "android_compile" ,
fmt . Sprintf ( "repository=%s" , specs . PLACEHOLDER_REPO ) ,
fmt . Sprintf ( "buildername=%s" , name ) ,
fmt . Sprintf ( "swarm_out_dir=%s" , specs . PLACEHOLDER_ISOLATED_OUTDIR ) ,
fmt . Sprintf ( "revision=%s" , specs . PLACEHOLDER_REVISION ) ,
fmt . Sprintf ( "patch_repo=%s" , specs . PLACEHOLDER_PATCH_REPO ) ,
fmt . Sprintf ( "patch_storage=%s" , specs . PLACEHOLDER_PATCH_STORAGE ) ,
fmt . Sprintf ( "patch_issue=%s" , specs . PLACEHOLDER_ISSUE ) ,
fmt . Sprintf ( "patch_set=%s" , specs . PLACEHOLDER_PATCHSET ) ,
} ,
Isolate : relpath ( "compile_skia.isolate" ) ,
Priority : 0.8 ,
} )
return name
}
2016-10-14 13:32:09 +00:00
// infra generates an infra_tests task. Returns the name of the last task in the
// generated chain of tasks, which the Job should add as a dependency.
2017-12-07 14:54:05 +00:00
func infra ( b * specs . TasksCfgBuilder , name string ) string {
2016-10-20 18:04:31 +00:00
b . MustAddTask ( name , & specs . TaskSpec {
2017-12-07 14:54:05 +00:00
CipdPackages : [ ] * specs . CipdPackage { b . MustGetCipdPackageFromAsset ( "go" ) } ,
Dimensions : linuxGceDimensions ( ) ,
ExtraArgs : [ ] string {
"--workdir" , "../../.." , "infra" ,
2016-11-04 18:37:26 +00:00
fmt . Sprintf ( "repository=%s" , specs . PLACEHOLDER_REPO ) ,
2016-10-14 13:32:09 +00:00
fmt . Sprintf ( "buildername=%s" , name ) ,
fmt . Sprintf ( "swarm_out_dir=%s" , specs . PLACEHOLDER_ISOLATED_OUTDIR ) ,
fmt . Sprintf ( "revision=%s" , specs . PLACEHOLDER_REVISION ) ,
2017-04-21 13:37:37 +00:00
fmt . Sprintf ( "patch_repo=%s" , specs . PLACEHOLDER_PATCH_REPO ) ,
2016-10-14 13:32:09 +00:00
fmt . Sprintf ( "patch_storage=%s" , specs . PLACEHOLDER_PATCH_STORAGE ) ,
2016-11-04 18:37:26 +00:00
fmt . Sprintf ( "patch_issue=%s" , specs . PLACEHOLDER_ISSUE ) ,
fmt . Sprintf ( "patch_set=%s" , specs . PLACEHOLDER_PATCHSET ) ,
2016-10-14 13:32:09 +00:00
} ,
2017-12-07 14:54:05 +00:00
Isolate : relpath ( "infra_skia.isolate" ) ,
Priority : 0.8 ,
2016-10-20 18:04:31 +00:00
} )
2016-10-14 13:32:09 +00:00
return name
}
2018-01-05 16:13:43 +00:00
func getParentRevisionName ( compileTaskName string , parts map [ string ] string ) string {
if parts [ "extra_config" ] == "" {
return compileTaskName + "-ParentRevision"
} else {
return compileTaskName + "_ParentRevision"
}
}
2017-10-16 16:24:43 +00:00
// calmbench generates a calmbench task. Returns the name of the last task in the
// generated chain of tasks, which the Job should add as a dependency.
2018-01-05 16:13:43 +00:00
func calmbench ( b * specs . TasksCfgBuilder , name string , parts map [ string ] string , compileTaskName string , compileParentName string ) string {
2017-10-16 16:24:43 +00:00
s := & specs . TaskSpec {
2018-01-05 16:13:43 +00:00
Dependencies : [ ] string { compileTaskName , compileParentName } ,
2017-10-16 16:24:43 +00:00
CipdPackages : [ ] * specs . CipdPackage { b . MustGetCipdPackageFromAsset ( "clang_linux" ) } ,
2017-12-07 14:54:05 +00:00
Dimensions : swarmDimensions ( parts ) ,
ExtraArgs : [ ] string {
"--workdir" , "../../.." , "calmbench" ,
2017-10-16 16:24:43 +00:00
fmt . Sprintf ( "repository=%s" , specs . PLACEHOLDER_REPO ) ,
fmt . Sprintf ( "buildername=%s" , name ) ,
fmt . Sprintf ( "swarm_out_dir=%s" , specs . PLACEHOLDER_ISOLATED_OUTDIR ) ,
fmt . Sprintf ( "revision=%s" , specs . PLACEHOLDER_REVISION ) ,
fmt . Sprintf ( "patch_repo=%s" , specs . PLACEHOLDER_PATCH_REPO ) ,
fmt . Sprintf ( "patch_storage=%s" , specs . PLACEHOLDER_PATCH_STORAGE ) ,
fmt . Sprintf ( "patch_issue=%s" , specs . PLACEHOLDER_ISSUE ) ,
fmt . Sprintf ( "patch_set=%s" , specs . PLACEHOLDER_PATCHSET ) ,
} ,
2018-01-05 16:13:43 +00:00
Isolate : relpath ( "calmbench.isolate" ) ,
2017-12-07 14:54:05 +00:00
Priority : 0.8 ,
2017-10-16 16:24:43 +00:00
}
s . Dependencies = append ( s . Dependencies , ISOLATE_SKP_NAME , ISOLATE_SVG_NAME )
b . MustAddTask ( name , s )
2017-10-24 13:43:21 +00:00
// Upload results if necessary.
if strings . Contains ( name , "Release" ) && doUpload ( name ) {
uploadName := fmt . Sprintf ( "%s%s%s" , PREFIX_UPLOAD , jobNameSchema . Sep , name )
b . MustAddTask ( uploadName , & specs . TaskSpec {
2017-12-07 14:54:05 +00:00
Dependencies : [ ] string { name } ,
Dimensions : linuxGceDimensions ( ) ,
ExtraArgs : [ ] string {
"--workdir" , "../../.." , "upload_calmbench_results" ,
2017-10-24 13:43:21 +00:00
fmt . Sprintf ( "repository=%s" , specs . PLACEHOLDER_REPO ) ,
fmt . Sprintf ( "buildername=%s" , name ) ,
fmt . Sprintf ( "swarm_out_dir=%s" , specs . PLACEHOLDER_ISOLATED_OUTDIR ) ,
fmt . Sprintf ( "revision=%s" , specs . PLACEHOLDER_REVISION ) ,
fmt . Sprintf ( "patch_repo=%s" , specs . PLACEHOLDER_PATCH_REPO ) ,
fmt . Sprintf ( "patch_storage=%s" , specs . PLACEHOLDER_PATCH_STORAGE ) ,
fmt . Sprintf ( "patch_issue=%s" , specs . PLACEHOLDER_ISSUE ) ,
fmt . Sprintf ( "patch_set=%s" , specs . PLACEHOLDER_PATCHSET ) ,
fmt . Sprintf ( "gs_bucket=%s" , CONFIG . GsBucketCalm ) ,
} ,
2017-12-07 14:54:05 +00:00
// We're using the same isolate as upload_nano_results
Isolate : relpath ( "upload_nano_results.isolate" ) ,
Priority : 0.8 ,
2017-10-24 13:43:21 +00:00
} )
return uploadName
}
2017-10-16 16:24:43 +00:00
return name
}
2016-09-30 19:53:12 +00:00
// doUpload indicates whether the given Job should upload its results.
func doUpload ( name string ) bool {
2017-02-01 20:56:55 +00:00
for _ , s := range CONFIG . NoUpload {
m , err := regexp . MatchString ( s , name )
if err != nil {
glog . Fatal ( err )
}
if m {
2016-09-30 19:53:12 +00:00
return false
}
}
return true
}
// test generates a Test task. Returns the name of the last task in the
// generated chain of tasks, which the Job should add as a dependency.
2016-10-20 18:04:31 +00:00
func test ( b * specs . TasksCfgBuilder , name string , parts map [ string ] string , compileTaskName string , pkgs [ ] * specs . CipdPackage ) string {
2018-01-12 19:31:48 +00:00
deps := [ ] string { compileTaskName }
if strings . Contains ( name , "Android_ASAN" ) {
deps = append ( deps , isolateCIPDAsset ( b , ISOLATE_NDK_LINUX_NAME ) )
}
2016-11-08 17:55:32 +00:00
s := & specs . TaskSpec {
2017-12-07 14:54:05 +00:00
CipdPackages : pkgs ,
2018-01-12 19:31:48 +00:00
Dependencies : deps ,
2017-12-07 14:54:05 +00:00
Dimensions : swarmDimensions ( parts ) ,
ExecutionTimeout : 4 * time . Hour ,
Expiration : 20 * time . Hour ,
ExtraArgs : [ ] string {
"--workdir" , "../../.." , "test" ,
2016-11-04 18:37:26 +00:00
fmt . Sprintf ( "repository=%s" , specs . PLACEHOLDER_REPO ) ,
2017-11-29 17:33:22 +00:00
fmt . Sprintf ( "buildbucket_build_id=%s" , specs . PLACEHOLDER_BUILDBUCKET_BUILD_ID ) ,
2016-09-30 19:53:12 +00:00
fmt . Sprintf ( "buildername=%s" , name ) ,
fmt . Sprintf ( "swarm_out_dir=%s" , specs . PLACEHOLDER_ISOLATED_OUTDIR ) ,
fmt . Sprintf ( "revision=%s" , specs . PLACEHOLDER_REVISION ) ,
2017-04-21 13:37:37 +00:00
fmt . Sprintf ( "patch_repo=%s" , specs . PLACEHOLDER_PATCH_REPO ) ,
2016-10-13 13:23:45 +00:00
fmt . Sprintf ( "patch_storage=%s" , specs . PLACEHOLDER_PATCH_STORAGE ) ,
2016-11-04 18:37:26 +00:00
fmt . Sprintf ( "patch_issue=%s" , specs . PLACEHOLDER_ISSUE ) ,
fmt . Sprintf ( "patch_set=%s" , specs . PLACEHOLDER_PATCHSET ) ,
2016-09-30 19:53:12 +00:00
} ,
2017-12-07 14:54:05 +00:00
IoTimeout : 40 * time . Minute ,
Isolate : relpath ( "test_skia.isolate" ) ,
MaxAttempts : 1 ,
Priority : 0.8 ,
}
if useBundledRecipes ( parts ) {
s . Dependencies = append ( s . Dependencies , BUNDLE_RECIPES_NAME )
if strings . Contains ( parts [ "os" ] , "Win" ) {
s . Isolate = relpath ( "test_skia_bundled_win.isolate" )
} else {
s . Isolate = relpath ( "test_skia_bundled_unix.isolate" )
}
2017-04-04 13:06:16 +00:00
}
2017-05-15 12:30:27 +00:00
if deps := getIsolatedCIPDDeps ( parts ) ; len ( deps ) > 0 {
s . Dependencies = append ( s . Dependencies , deps ... )
2017-05-11 17:35:23 +00:00
}
2016-11-08 17:55:32 +00:00
if strings . Contains ( parts [ "extra_config" ] , "Valgrind" ) {
s . ExecutionTimeout = 9 * time . Hour
s . Expiration = 48 * time . Hour
s . IoTimeout = time . Hour
2017-05-31 19:09:10 +00:00
s . CipdPackages = append ( s . CipdPackages , b . MustGetCipdPackageFromAsset ( "valgrind" ) )
2017-12-04 15:20:23 +00:00
s . Dimensions = append ( s . Dimensions , "valgrind:1" )
2016-11-08 17:55:32 +00:00
} else if strings . Contains ( parts [ "extra_config" ] , "MSAN" ) {
s . ExecutionTimeout = 9 * time . Hour
2017-06-08 14:34:17 +00:00
} else if parts [ "arch" ] == "x86" && parts [ "configuration" ] == "Debug" {
// skia:6737
s . ExecutionTimeout = 6 * time . Hour
2016-11-08 17:55:32 +00:00
}
2017-09-15 14:35:44 +00:00
iid := internalHardwareLabel ( parts )
2017-09-15 12:35:31 +00:00
if iid != nil {
2017-09-15 18:09:07 +00:00
s . ExtraArgs = append ( s . ExtraArgs , fmt . Sprintf ( "internal_hardware_label=%d" , * iid ) )
2017-09-15 12:35:31 +00:00
}
2016-11-08 17:55:32 +00:00
b . MustAddTask ( name , s )
2017-10-09 19:26:19 +00:00
// Upload results if necessary. TODO(kjlubick): If we do coverage analysis at the same
// time as normal tests (which would be nice), cfg.json needs to have Coverage removed.
2016-09-30 19:53:12 +00:00
if doUpload ( name ) {
uploadName := fmt . Sprintf ( "%s%s%s" , PREFIX_UPLOAD , jobNameSchema . Sep , name )
2016-10-20 18:04:31 +00:00
b . MustAddTask ( uploadName , & specs . TaskSpec {
2017-12-07 14:54:05 +00:00
Dependencies : [ ] string { name } ,
Dimensions : linuxGceDimensions ( ) ,
ExtraArgs : [ ] string {
"--workdir" , "../../.." , "upload_dm_results" ,
2016-11-04 18:37:26 +00:00
fmt . Sprintf ( "repository=%s" , specs . PLACEHOLDER_REPO ) ,
2016-09-30 19:53:12 +00:00
fmt . Sprintf ( "buildername=%s" , name ) ,
fmt . Sprintf ( "swarm_out_dir=%s" , specs . PLACEHOLDER_ISOLATED_OUTDIR ) ,
fmt . Sprintf ( "revision=%s" , specs . PLACEHOLDER_REVISION ) ,
2017-04-21 13:37:37 +00:00
fmt . Sprintf ( "patch_repo=%s" , specs . PLACEHOLDER_PATCH_REPO ) ,
2016-10-13 13:23:45 +00:00
fmt . Sprintf ( "patch_storage=%s" , specs . PLACEHOLDER_PATCH_STORAGE ) ,
2016-11-04 18:37:26 +00:00
fmt . Sprintf ( "patch_issue=%s" , specs . PLACEHOLDER_ISSUE ) ,
fmt . Sprintf ( "patch_set=%s" , specs . PLACEHOLDER_PATCHSET ) ,
2017-02-06 20:38:41 +00:00
fmt . Sprintf ( "gs_bucket=%s" , CONFIG . GsBucketGm ) ,
2016-09-30 19:53:12 +00:00
} ,
2017-12-07 14:54:05 +00:00
Isolate : relpath ( "upload_dm_results.isolate" ) ,
Priority : 0.8 ,
2016-10-20 18:04:31 +00:00
} )
2016-09-30 19:53:12 +00:00
return uploadName
2017-10-17 17:40:52 +00:00
}
return name
}
func coverage ( b * specs . TasksCfgBuilder , name string , parts map [ string ] string , compileTaskName string , pkgs [ ] * specs . CipdPackage ) string {
shards := 1
deps := [ ] string { }
tf := parts [ "test_filter" ]
if strings . Contains ( tf , "Shard" ) {
// Expected Shard_NN
shardstr := strings . Split ( tf , "_" ) [ 1 ]
var err error
shards , err = strconv . Atoi ( shardstr )
if err != nil {
glog . Fatalf ( "Expected int for number of shards %q in %s: %s" , shardstr , name , err )
}
}
for i := 0 ; i < shards ; i ++ {
n := strings . Replace ( name , tf , fmt . Sprintf ( "shard_%02d_%02d" , i , shards ) , 1 )
s := & specs . TaskSpec {
2017-12-07 14:54:05 +00:00
CipdPackages : pkgs ,
Dependencies : [ ] string { compileTaskName } ,
Dimensions : swarmDimensions ( parts ) ,
ExecutionTimeout : 4 * time . Hour ,
Expiration : 20 * time . Hour ,
ExtraArgs : [ ] string {
"--workdir" , "../../.." , "test" ,
2017-10-09 19:26:19 +00:00
fmt . Sprintf ( "repository=%s" , specs . PLACEHOLDER_REPO ) ,
2017-10-17 17:40:52 +00:00
fmt . Sprintf ( "buildername=%s" , n ) ,
2017-10-09 19:26:19 +00:00
fmt . Sprintf ( "swarm_out_dir=%s" , specs . PLACEHOLDER_ISOLATED_OUTDIR ) ,
fmt . Sprintf ( "revision=%s" , specs . PLACEHOLDER_REVISION ) ,
fmt . Sprintf ( "patch_repo=%s" , specs . PLACEHOLDER_PATCH_REPO ) ,
fmt . Sprintf ( "patch_storage=%s" , specs . PLACEHOLDER_PATCH_STORAGE ) ,
fmt . Sprintf ( "patch_issue=%s" , specs . PLACEHOLDER_ISSUE ) ,
fmt . Sprintf ( "patch_set=%s" , specs . PLACEHOLDER_PATCHSET ) ,
} ,
2017-12-07 14:54:05 +00:00
IoTimeout : 40 * time . Minute ,
Isolate : relpath ( "test_skia.isolate" ) ,
MaxAttempts : 1 ,
Priority : 0.8 ,
}
if useBundledRecipes ( parts ) {
s . Dependencies = append ( s . Dependencies , BUNDLE_RECIPES_NAME )
if strings . Contains ( parts [ "os" ] , "Win" ) {
s . Isolate = relpath ( "test_skia_bundled_win.isolate" )
} else {
s . Isolate = relpath ( "test_skia_bundled_unix.isolate" )
}
2017-10-17 17:40:52 +00:00
}
if deps := getIsolatedCIPDDeps ( parts ) ; len ( deps ) > 0 {
s . Dependencies = append ( s . Dependencies , deps ... )
}
b . MustAddTask ( n , s )
deps = append ( deps , n )
2016-09-30 19:53:12 +00:00
}
2017-10-09 19:26:19 +00:00
2017-10-17 17:40:52 +00:00
uploadName := fmt . Sprintf ( "%s%s%s" , "Upload" , jobNameSchema . Sep , name )
// We need clang_linux to get access to the llvm-profdata and llvm-cov binaries
// which are used to deal with the raw coverage data output by the Test step.
pkgs = append ( [ ] * specs . CipdPackage { } , b . MustGetCipdPackageFromAsset ( "clang_linux" ) )
deps = append ( deps , compileTaskName )
b . MustAddTask ( uploadName , & specs . TaskSpec {
// A dependency on compileTaskName makes the TaskScheduler link the
// isolated output of the compile step to the input of the upload step,
// which gives us access to the instrumented binary. The binary is
// needed to figure out symbol names and line numbers.
Dependencies : deps ,
Dimensions : linuxGceDimensions ( ) ,
CipdPackages : pkgs ,
2017-12-07 14:54:05 +00:00
ExtraArgs : [ ] string {
"--workdir" , "../../.." , "upload_coverage_results" ,
2017-10-17 17:40:52 +00:00
fmt . Sprintf ( "repository=%s" , specs . PLACEHOLDER_REPO ) ,
fmt . Sprintf ( "buildername=%s" , name ) ,
fmt . Sprintf ( "swarm_out_dir=%s" , specs . PLACEHOLDER_ISOLATED_OUTDIR ) ,
fmt . Sprintf ( "revision=%s" , specs . PLACEHOLDER_REVISION ) ,
fmt . Sprintf ( "patch_repo=%s" , specs . PLACEHOLDER_PATCH_REPO ) ,
fmt . Sprintf ( "patch_storage=%s" , specs . PLACEHOLDER_PATCH_STORAGE ) ,
fmt . Sprintf ( "patch_issue=%s" , specs . PLACEHOLDER_ISSUE ) ,
fmt . Sprintf ( "patch_set=%s" , specs . PLACEHOLDER_PATCHSET ) ,
fmt . Sprintf ( "gs_bucket=%s" , CONFIG . GsBucketCoverage ) ,
} ,
2017-12-07 14:54:05 +00:00
Isolate : relpath ( "upload_coverage_results.isolate" ) ,
2017-10-17 17:40:52 +00:00
Priority : 0.8 ,
} )
return uploadName
2016-09-30 19:53:12 +00:00
}
// perf generates a Perf task. Returns the name of the last task in the
// generated chain of tasks, which the Job should add as a dependency.
2016-10-20 18:04:31 +00:00
func perf ( b * specs . TasksCfgBuilder , name string , parts map [ string ] string , compileTaskName string , pkgs [ ] * specs . CipdPackage ) string {
2017-04-10 12:14:33 +00:00
recipe := "perf"
2017-12-07 14:54:05 +00:00
isolate := relpath ( "perf_skia.isolate" )
2016-11-14 18:42:27 +00:00
if strings . Contains ( parts [ "extra_config" ] , "Skpbench" ) {
2017-04-10 12:14:33 +00:00
recipe = "skpbench"
2017-12-07 14:54:05 +00:00
isolate = relpath ( "skpbench_skia.isolate" )
if useBundledRecipes ( parts ) {
if strings . Contains ( parts [ "os" ] , "Win" ) {
isolate = relpath ( "skpbench_skia_bundled_win.isolate" )
} else {
isolate = relpath ( "skpbench_skia_bundled_unix.isolate" )
}
}
} else if useBundledRecipes ( parts ) {
if strings . Contains ( parts [ "os" ] , "Win" ) {
isolate = relpath ( "perf_skia_bundled_win.isolate" )
} else {
isolate = relpath ( "perf_skia_bundled_unix.isolate" )
}
2016-11-14 18:42:27 +00:00
}
2016-11-08 17:55:32 +00:00
s := & specs . TaskSpec {
2017-12-07 14:54:05 +00:00
CipdPackages : pkgs ,
Dependencies : [ ] string { compileTaskName } ,
Dimensions : swarmDimensions ( parts ) ,
ExecutionTimeout : 4 * time . Hour ,
Expiration : 20 * time . Hour ,
ExtraArgs : [ ] string {
"--workdir" , "../../.." , recipe ,
2016-11-04 18:37:26 +00:00
fmt . Sprintf ( "repository=%s" , specs . PLACEHOLDER_REPO ) ,
2016-09-30 19:53:12 +00:00
fmt . Sprintf ( "buildername=%s" , name ) ,
fmt . Sprintf ( "swarm_out_dir=%s" , specs . PLACEHOLDER_ISOLATED_OUTDIR ) ,
fmt . Sprintf ( "revision=%s" , specs . PLACEHOLDER_REVISION ) ,
2017-04-21 13:37:37 +00:00
fmt . Sprintf ( "patch_repo=%s" , specs . PLACEHOLDER_PATCH_REPO ) ,
2016-10-13 13:23:45 +00:00
fmt . Sprintf ( "patch_storage=%s" , specs . PLACEHOLDER_PATCH_STORAGE ) ,
2016-11-04 18:37:26 +00:00
fmt . Sprintf ( "patch_issue=%s" , specs . PLACEHOLDER_ISSUE ) ,
fmt . Sprintf ( "patch_set=%s" , specs . PLACEHOLDER_PATCHSET ) ,
2016-09-30 19:53:12 +00:00
} ,
2017-12-07 14:54:05 +00:00
IoTimeout : 40 * time . Minute ,
Isolate : isolate ,
MaxAttempts : 1 ,
Priority : 0.8 ,
}
if useBundledRecipes ( parts ) {
s . Dependencies = append ( s . Dependencies , BUNDLE_RECIPES_NAME )
2017-04-04 13:06:16 +00:00
}
2017-05-15 12:30:27 +00:00
if deps := getIsolatedCIPDDeps ( parts ) ; len ( deps ) > 0 {
s . Dependencies = append ( s . Dependencies , deps ... )
}
2016-11-08 17:55:32 +00:00
if strings . Contains ( parts [ "extra_config" ] , "Valgrind" ) {
s . ExecutionTimeout = 9 * time . Hour
s . Expiration = 48 * time . Hour
s . IoTimeout = time . Hour
2017-06-01 11:13:33 +00:00
s . CipdPackages = append ( s . CipdPackages , b . MustGetCipdPackageFromAsset ( "valgrind" ) )
2017-12-04 15:20:23 +00:00
s . Dimensions = append ( s . Dimensions , "valgrind:1" )
2016-11-08 17:55:32 +00:00
} else if strings . Contains ( parts [ "extra_config" ] , "MSAN" ) {
s . ExecutionTimeout = 9 * time . Hour
2017-06-08 14:34:17 +00:00
} else if parts [ "arch" ] == "x86" && parts [ "configuration" ] == "Debug" {
// skia:6737
s . ExecutionTimeout = 6 * time . Hour
2016-11-08 17:55:32 +00:00
}
2017-09-15 14:35:44 +00:00
iid := internalHardwareLabel ( parts )
2017-09-15 12:35:31 +00:00
if iid != nil {
2017-09-15 18:09:07 +00:00
s . ExtraArgs = append ( s . ExtraArgs , fmt . Sprintf ( "internal_hardware_label=%d" , * iid ) )
2017-09-15 12:35:31 +00:00
}
2016-11-08 17:55:32 +00:00
b . MustAddTask ( name , s )
2016-09-30 19:53:12 +00:00
// Upload results if necessary.
if strings . Contains ( name , "Release" ) && doUpload ( name ) {
uploadName := fmt . Sprintf ( "%s%s%s" , PREFIX_UPLOAD , jobNameSchema . Sep , name )
2016-10-20 18:04:31 +00:00
b . MustAddTask ( uploadName , & specs . TaskSpec {
2017-12-07 14:54:05 +00:00
Dependencies : [ ] string { name } ,
Dimensions : linuxGceDimensions ( ) ,
ExtraArgs : [ ] string {
"--workdir" , "../../.." , "upload_nano_results" ,
2016-11-04 18:37:26 +00:00
fmt . Sprintf ( "repository=%s" , specs . PLACEHOLDER_REPO ) ,
2016-09-30 19:53:12 +00:00
fmt . Sprintf ( "buildername=%s" , name ) ,
fmt . Sprintf ( "swarm_out_dir=%s" , specs . PLACEHOLDER_ISOLATED_OUTDIR ) ,
fmt . Sprintf ( "revision=%s" , specs . PLACEHOLDER_REVISION ) ,
2017-04-21 13:37:37 +00:00
fmt . Sprintf ( "patch_repo=%s" , specs . PLACEHOLDER_PATCH_REPO ) ,
2016-10-13 13:23:45 +00:00
fmt . Sprintf ( "patch_storage=%s" , specs . PLACEHOLDER_PATCH_STORAGE ) ,
2016-11-04 18:37:26 +00:00
fmt . Sprintf ( "patch_issue=%s" , specs . PLACEHOLDER_ISSUE ) ,
fmt . Sprintf ( "patch_set=%s" , specs . PLACEHOLDER_PATCHSET ) ,
2017-02-06 20:38:41 +00:00
fmt . Sprintf ( "gs_bucket=%s" , CONFIG . GsBucketNano ) ,
2016-09-30 19:53:12 +00:00
} ,
2017-12-07 14:54:05 +00:00
Isolate : relpath ( "upload_nano_results.isolate" ) ,
Priority : 0.8 ,
2016-10-20 18:04:31 +00:00
} )
2016-09-30 19:53:12 +00:00
return uploadName
}
return name
}
// process generates tasks and jobs for the given job name.
2016-10-20 18:04:31 +00:00
func process ( b * specs . TasksCfgBuilder , name string ) {
2016-09-30 19:53:12 +00:00
deps := [ ] string { }
2017-04-04 13:06:16 +00:00
// Bundle Recipes.
if name == BUNDLE_RECIPES_NAME {
deps = append ( deps , bundleRecipes ( b ) )
}
2017-05-11 17:35:23 +00:00
// Isolate CIPD assets.
if _ , ok := ISOLATE_ASSET_MAPPING [ name ] ; ok {
deps = append ( deps , isolateCIPDAsset ( b , name ) )
}
2016-09-30 19:53:12 +00:00
parts , err := jobNameSchema . ParseJobName ( name )
if err != nil {
glog . Fatal ( err )
}
// RecreateSKPs.
if strings . Contains ( name , "RecreateSKPs" ) {
2017-12-07 14:54:05 +00:00
deps = append ( deps , recreateSKPs ( b , name ) )
2016-09-30 19:53:12 +00:00
}
2017-05-17 18:28:06 +00:00
// UpdateMetaConfig bot.
if strings . Contains ( name , "UpdateMetaConfig" ) {
2017-12-07 14:54:05 +00:00
deps = append ( deps , updateMetaConfig ( b , name ) )
2017-05-17 18:28:06 +00:00
}
2016-09-30 19:53:12 +00:00
// CT bots.
if strings . Contains ( name , "-CT_" ) {
2017-12-07 14:54:05 +00:00
deps = append ( deps , ctSKPs ( b , name ) )
2016-09-30 19:53:12 +00:00
}
2016-10-14 13:32:09 +00:00
// Infra tests.
if name == "Housekeeper-PerCommit-InfraTests" {
2017-12-07 14:54:05 +00:00
deps = append ( deps , infra ( b , name ) )
2016-10-14 13:32:09 +00:00
}
2016-09-30 19:53:12 +00:00
// Compile bots.
if parts [ "role" ] == "Build" {
2018-02-01 18:38:13 +00:00
if parts [ "extra_config" ] == "Android_Framework" {
// Android Framework compile tasks use a different recipe.
deps = append ( deps , androidFrameworkCompile ( b , name ) )
} else {
deps = append ( deps , compile ( b , name , parts ) )
}
2016-09-30 19:53:12 +00:00
}
2016-11-15 20:18:20 +00:00
// Most remaining bots need a compile task.
2016-09-30 19:53:12 +00:00
compileTaskName := deriveCompileTaskName ( name , parts )
2016-10-17 17:17:53 +00:00
compileTaskParts , err := jobNameSchema . ParseJobName ( compileTaskName )
if err != nil {
glog . Fatal ( err )
}
2018-01-05 16:13:43 +00:00
compileParentName := getParentRevisionName ( compileTaskName , compileTaskParts )
compileParentParts , err := jobNameSchema . ParseJobName ( compileParentName )
if err != nil {
glog . Fatal ( err )
}
2016-11-17 16:33:27 +00:00
// These bots do not need a compile task.
2018-01-05 16:13:43 +00:00
if parts [ "role" ] != "Build" &&
2017-04-10 15:00:09 +00:00
name != "Housekeeper-PerCommit-BundleRecipes" &&
2016-11-15 20:18:20 +00:00
name != "Housekeeper-PerCommit-InfraTests" &&
2017-07-28 11:35:28 +00:00
name != "Housekeeper-PerCommit-CheckGeneratedFiles" &&
2018-02-01 18:38:13 +00:00
! strings . Contains ( name , "Android_Framework" ) &&
2016-11-30 19:05:16 +00:00
! strings . Contains ( name , "RecreateSKPs" ) &&
2017-05-17 18:28:06 +00:00
! strings . Contains ( name , "UpdateMetaConfig" ) &&
2017-06-12 17:03:29 +00:00
! strings . Contains ( name , "-CT_" ) &&
! strings . Contains ( name , "Housekeeper-PerCommit-Isolate" ) {
2016-10-20 18:04:31 +00:00
compile ( b , compileTaskName , compileTaskParts )
2018-01-10 16:14:52 +00:00
if parts [ "role" ] == "Calmbench" {
2018-01-05 16:13:43 +00:00
compile ( b , compileParentName , compileParentParts )
}
2016-10-17 17:17:53 +00:00
}
2016-09-30 19:53:12 +00:00
2017-07-28 11:35:28 +00:00
// Housekeepers.
2016-12-02 16:01:33 +00:00
if name == "Housekeeper-PerCommit" {
2017-12-07 14:54:05 +00:00
deps = append ( deps , housekeeper ( b , name , compileTaskName ) )
2016-09-30 19:53:12 +00:00
}
2017-07-28 11:35:28 +00:00
if name == "Housekeeper-PerCommit-CheckGeneratedFiles" {
2017-12-07 14:54:05 +00:00
deps = append ( deps , checkGeneratedFiles ( b , name ) )
2017-07-28 11:35:28 +00:00
}
2018-01-02 19:54:43 +00:00
if strings . Contains ( name , "Bookmaker" ) {
2017-12-08 17:58:20 +00:00
deps = append ( deps , bookmaker ( b , name , compileTaskName ) )
}
2016-09-30 19:53:12 +00:00
// Common assets needed by the remaining bots.
2017-05-11 17:35:23 +00:00
pkgs := [ ] * specs . CipdPackage { }
2017-05-15 12:30:27 +00:00
if deps := getIsolatedCIPDDeps ( parts ) ; len ( deps ) == 0 {
2017-05-11 17:35:23 +00:00
pkgs = [ ] * specs . CipdPackage {
b . MustGetCipdPackageFromAsset ( "skimage" ) ,
b . MustGetCipdPackageFromAsset ( "skp" ) ,
b . MustGetCipdPackageFromAsset ( "svg" ) ,
}
2016-09-30 19:53:12 +00:00
}
2017-05-15 12:30:27 +00:00
2017-10-02 14:48:32 +00:00
if strings . Contains ( name , "Ubuntu" ) || strings . Contains ( name , "Debian" ) {
if strings . Contains ( name , "SAN" ) {
pkgs = append ( pkgs , b . MustGetCipdPackageFromAsset ( "clang_linux" ) )
}
2017-02-17 15:25:34 +00:00
if strings . Contains ( name , "Vulkan" ) {
pkgs = append ( pkgs , b . MustGetCipdPackageFromAsset ( "linux_vulkan_sdk" ) )
}
2017-10-02 14:48:32 +00:00
if strings . Contains ( name , "Intel" ) && strings . Contains ( name , "GPU" ) {
if strings . Contains ( name , "Release" ) {
pkgs = append ( pkgs , b . MustGetCipdPackageFromAsset ( "linux_vulkan_intel_driver_release" ) )
} else {
pkgs = append ( pkgs , b . MustGetCipdPackageFromAsset ( "linux_vulkan_intel_driver_debug" ) )
}
2017-02-06 17:45:29 +00:00
}
}
2016-09-30 19:53:12 +00:00
// Test bots.
2017-10-17 17:40:52 +00:00
if parts [ "role" ] == "Test" {
if strings . Contains ( parts [ "extra_config" ] , "Coverage" ) {
deps = append ( deps , coverage ( b , name , parts , compileTaskName , pkgs ) )
} else if ! strings . Contains ( name , "-CT_" ) {
deps = append ( deps , test ( b , name , parts , compileTaskName , pkgs ) )
}
2016-09-30 19:53:12 +00:00
}
// Perf bots.
2016-11-30 19:05:16 +00:00
if parts [ "role" ] == "Perf" && ! strings . Contains ( name , "-CT_" ) {
2016-10-20 18:04:31 +00:00
deps = append ( deps , perf ( b , name , parts , compileTaskName , pkgs ) )
2016-09-30 19:53:12 +00:00
}
2018-01-05 16:13:43 +00:00
// Calmbench bots.
if parts [ "role" ] == "Calmbench" {
deps = append ( deps , calmbench ( b , name , parts , compileTaskName , compileParentName ) )
}
2016-09-30 19:53:12 +00:00
// Add the Job spec.
2016-11-15 20:18:20 +00:00
j := & specs . JobSpec {
2016-09-30 19:53:12 +00:00
Priority : 0.8 ,
TaskSpecs : deps ,
2017-07-31 14:41:15 +00:00
Trigger : specs . TRIGGER_ANY_BRANCH ,
}
if strings . Contains ( name , "-Nightly-" ) {
j . Trigger = specs . TRIGGER_NIGHTLY
2017-10-16 14:31:41 +00:00
} else if strings . Contains ( name , "-Weekly-" ) || strings . Contains ( name , "CT_DM_1m_SKPs" ) {
2017-07-31 14:41:15 +00:00
j . Trigger = specs . TRIGGER_WEEKLY
} else if strings . Contains ( name , "Flutter" ) || strings . Contains ( name , "PDFium" ) || strings . Contains ( name , "CommandBuffer" ) {
j . Trigger = specs . TRIGGER_MASTER_ONLY
2016-11-30 19:05:16 +00:00
}
2016-12-12 19:30:12 +00:00
b . MustAddJob ( name , j )
2016-09-30 19:53:12 +00:00
}
2017-02-01 20:56:55 +00:00
func loadJson ( flag * string , defaultFlag string , val interface { } ) {
if * flag == "" {
* flag = defaultFlag
}
b , err := ioutil . ReadFile ( * flag )
if err != nil {
glog . Fatal ( err )
}
if err := json . Unmarshal ( b , val ) ; err != nil {
glog . Fatal ( err )
}
}
2016-09-30 19:53:12 +00:00
// Regenerate the tasks.json file.
func main ( ) {
2016-10-20 18:04:31 +00:00
b := specs . MustNewTasksCfgBuilder ( )
2017-02-01 20:56:55 +00:00
b . SetAssetsDir ( * assetsDir )
infraBots := path . Join ( b . CheckoutRoot ( ) , "infra" , "bots" )
// Load the jobs from a JSON file.
loadJson ( jobsFile , path . Join ( infraBots , "jobs.json" ) , & JOBS )
// Load general config information from a JSON file.
loadJson ( cfgFile , path . Join ( infraBots , "cfg.json" ) , & CONFIG )
2016-09-30 19:53:12 +00:00
// Create the JobNameSchema.
2017-02-07 14:16:30 +00:00
if * builderNameSchemaFile == "" {
* builderNameSchemaFile = path . Join ( b . CheckoutRoot ( ) , "infra" , "bots" , "recipe_modules" , "builder_name_schema" , "builder_name_schema.json" )
}
schema , err := NewJobNameSchema ( * builderNameSchemaFile )
2016-09-30 19:53:12 +00:00
if err != nil {
glog . Fatal ( err )
}
jobNameSchema = schema
// Create Tasks and Jobs.
2016-10-20 18:04:31 +00:00
for _ , name := range JOBS {
process ( b , name )
2016-09-30 19:53:12 +00:00
}
2016-10-20 18:04:31 +00:00
b . MustFinish ( )
2016-09-30 19:53:12 +00:00
}
// TODO(borenet): The below really belongs in its own file, probably next to the
// builder_name_schema.json file.
// JobNameSchema is a struct used for (de)constructing Job names in a
// predictable format.
type JobNameSchema struct {
Schema map [ string ] [ ] string ` json:"builder_name_schema" `
Sep string ` json:"builder_name_sep" `
}
// NewJobNameSchema returns a JobNameSchema instance based on the given JSON
// file.
func NewJobNameSchema ( jsonFile string ) ( * JobNameSchema , error ) {
var rv JobNameSchema
f , err := os . Open ( jsonFile )
if err != nil {
return nil , err
}
defer util . Close ( f )
if err := json . NewDecoder ( f ) . Decode ( & rv ) ; err != nil {
return nil , err
}
return & rv , nil
}
// ParseJobName splits the given Job name into its component parts, according
// to the schema.
func ( s * JobNameSchema ) ParseJobName ( n string ) ( map [ string ] string , error ) {
split := strings . Split ( n , s . Sep )
if len ( split ) < 2 {
return nil , fmt . Errorf ( "Invalid job name: %q" , n )
}
role := split [ 0 ]
split = split [ 1 : ]
keys , ok := s . Schema [ role ]
if ! ok {
return nil , fmt . Errorf ( "Invalid job name; %q is not a valid role." , role )
}
extraConfig := ""
if len ( split ) == len ( keys ) + 1 {
extraConfig = split [ len ( split ) - 1 ]
split = split [ : len ( split ) - 1 ]
}
if len ( split ) != len ( keys ) {
return nil , fmt . Errorf ( "Invalid job name; %q has incorrect number of parts." , n )
}
rv := make ( map [ string ] string , len ( keys ) + 2 )
rv [ "role" ] = role
if extraConfig != "" {
rv [ "extra_config" ] = extraConfig
}
for i , k := range keys {
rv [ k ] = split [ i ]
}
return rv , nil
}
// MakeJobName assembles the given parts of a Job name, according to the schema.
func ( s * JobNameSchema ) MakeJobName ( parts map [ string ] string ) ( string , error ) {
role , ok := parts [ "role" ]
if ! ok {
return "" , fmt . Errorf ( "Invalid job parts; jobs must have a role." )
}
keys , ok := s . Schema [ role ]
if ! ok {
return "" , fmt . Errorf ( "Invalid job parts; unknown role %q" , role )
}
rvParts := make ( [ ] string , 0 , len ( parts ) )
rvParts = append ( rvParts , role )
for _ , k := range keys {
v , ok := parts [ k ]
if ! ok {
return "" , fmt . Errorf ( "Invalid job parts; missing %q" , k )
}
rvParts = append ( rvParts , v )
}
if _ , ok := parts [ "extra_config" ] ; ok {
rvParts = append ( rvParts , parts [ "extra_config" ] )
}
return strings . Join ( rvParts , s . Sep ) , nil
}