blob: 0751124368c73f4cda8ac29e18ee8c5eebb402f3 [file] [log] [blame]
borenetdb182c72016-09-30 12:53:12 -07001// Copyright 2016 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5package main
6
7/*
8 Generate the tasks.json file.
9*/
10
11import (
12 "bytes"
13 "encoding/json"
borenet60b0a2d2016-10-04 12:45:41 -070014 "flag"
borenetdb182c72016-09-30 12:53:12 -070015 "fmt"
16 "io/ioutil"
17 "os"
18 "path"
19 "path/filepath"
20 "sort"
21 "strings"
22
23 "github.com/skia-dev/glog"
24 "go.skia.org/infra/go/common"
25 "go.skia.org/infra/go/util"
26 "go.skia.org/infra/task_scheduler/go/specs"
27)
28
29const (
30 DEFAULT_OS = "Ubuntu"
31
32 // Pool for Skia bots.
33 POOL_SKIA = "Skia"
34
35 // Name prefix for upload jobs.
36 PREFIX_UPLOAD = "Upload"
37)
38
39var (
40 // "Constants"
41
42 // Top-level list of all jobs to run at each commit.
43 JOBS = []string{
44 "Build-Ubuntu-GCC-x86_64-Release-GN",
45 "Perf-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-GN",
46 "Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-GN",
47 }
48
49 // UPLOAD_DIMENSIONS are the Swarming dimensions for upload tasks.
50 UPLOAD_DIMENSIONS = []string{
51 "cpu:x86-64-avx2",
52 "gpu:none",
53 "os:Ubuntu",
54 fmt.Sprintf("pool:%s", POOL_SKIA),
55 }
56
57 // Defines the structure of job names.
58 jobNameSchema *JobNameSchema
59
60 // Caches CIPD package info so that we don't have to re-read VERSION
61 // files.
62 cipdPackages = map[string]*specs.CipdPackage{}
63
64 // Path to the infra/bots directory.
65 infrabotsDir = ""
borenet60b0a2d2016-10-04 12:45:41 -070066
67 // Flags.
68 testing = flag.Bool("test", false, "Run in test mode: verify that the output hasn't changed.")
borenetdb182c72016-09-30 12:53:12 -070069)
70
71// deriveCompileTaskName returns the name of a compile task based on the given
72// job name.
73func deriveCompileTaskName(jobName string, parts map[string]string) string {
74 if parts["role"] == "Housekeeper" {
75 return "Build-Ubuntu-GCC-x86_64-Release-Shared"
76 } else if parts["role"] == "Test" || parts["role"] == "Perf" {
77 task_os := parts["os"]
78 ec := parts["extra_config"]
79 if task_os == "Android" {
80 if ec == "Vulkan" {
81 ec = "Android_Vulkan"
82 } else if !strings.Contains(ec, "GN_Android") {
83 ec = task_os
84 }
85 task_os = "Android"
86 } else if task_os == "iOS" {
87 ec = task_os
88 task_os = "Mac"
89 } else if strings.Contains(task_os, "Win") {
90 task_os = "Win"
91 }
92 name, err := jobNameSchema.MakeJobName(map[string]string{
93 "role": "Build",
94 "os": task_os,
95 "compiler": parts["compiler"],
96 "target_arch": parts["arch"],
97 "configuration": parts["configuration"],
98 "extra_config": ec,
99 })
100 if err != nil {
101 glog.Fatal(err)
102 }
103 return name
104 } else {
105 return jobName
106 }
107}
108
109// swarmDimensions generates swarming bot dimensions for the given task.
110func swarmDimensions(parts map[string]string) []string {
111 if parts["extra_config"] == "SkiaCT" {
112 return []string{
113 "pool:SkiaCT",
114 }
115 }
116 d := map[string]string{
117 "pool": POOL_SKIA,
118 }
119 if os, ok := parts["os"]; ok {
120 d["os"] = os
121 } else {
122 d["os"] = DEFAULT_OS
123 }
124 if strings.Contains(d["os"], "Win") {
125 d["os"] = "Windows"
126 }
127 if parts["role"] == "Test" || parts["role"] == "Perf" {
128 if strings.Contains(parts["os"], "Android") {
129 // For Android, the device type is a better dimension
130 // than CPU or GPU.
131 d["device_type"] = map[string]string{
132 "AndroidOne": "sprout",
133 "GalaxyS3": "m0", // "smdk4x12", Detected incorrectly by swarming?
134 "GalaxyS4": "", // TODO(borenet,kjlubick)
135 "GalaxyS7": "heroqlteatt",
136 "NVIDIA_Shield": "foster",
137 "Nexus10": "manta",
138 "Nexus5": "hammerhead",
139 "Nexus6": "shamu",
140 "Nexus6p": "angler",
141 "Nexus7": "grouper",
142 "Nexus7v2": "flo",
143 "Nexus9": "flounder",
144 "NexusPlayer": "fugu",
145 }[parts["model"]]
146 } else if strings.Contains(parts["os"], "iOS") {
147 d["device"] = map[string]string{
148 "iPad4": "iPad4,1",
149 }[parts["model"]]
150 // TODO(borenet): Replace this hack with something
151 // better.
152 d["os"] = "iOS-9.2"
153 } else if parts["cpu_or_gpu"] == "CPU" {
154 d["gpu"] = "none"
155 d["cpu"] = map[string]string{
156 "AVX": "x86-64",
157 "AVX2": "x86-64-avx2",
158 "SSE4": "x86-64",
159 }[parts["cpu_or_gpu_value"]]
160 if strings.Contains(parts["os"], "Win") && parts["cpu_or_gpu_value"] == "AVX2" {
161 // AVX2 is not correctly detected on Windows. Fall back on other
162 // dimensions to ensure that we correctly target machines which we know
163 // have AVX2 support.
164 d["cpu"] = "x86-64"
165 d["os"] = "Windows-2008ServerR2-SP1"
166 }
167 } else {
168 d["gpu"] = map[string]string{
169 "GeForce320M": "10de:08a4",
170 "GT610": "10de:104a",
171 "GTX550Ti": "10de:1244",
172 "GTX660": "10de:11c0",
173 "GTX960": "10de:1401",
174 "HD4000": "8086:0a2e",
175 "HD4600": "8086:0412",
176 "HD7770": "1002:683d",
177 "iHD530": "8086:1912",
178 }[parts["cpu_or_gpu_value"]]
179 }
180 } else {
181 d["gpu"] = "none"
182 }
183 rv := make([]string, 0, len(d))
184 for k, v := range d {
185 rv = append(rv, fmt.Sprintf("%s:%s", k, v))
186 }
187 sort.Strings(rv)
188 return rv
189}
190
191// getCipdPackage finds and returns the given CIPD package and version.
192func getCipdPackage(assetName string) *specs.CipdPackage {
193 if pkg, ok := cipdPackages[assetName]; ok {
194 return pkg
195 }
196 versionFile := path.Join(infrabotsDir, "assets", assetName, "VERSION")
197 contents, err := ioutil.ReadFile(versionFile)
198 if err != nil {
199 glog.Fatal(err)
200 }
201 version := strings.TrimSpace(string(contents))
202 pkg := &specs.CipdPackage{
203 Name: fmt.Sprintf("skia/bots/%s", assetName),
204 Path: assetName,
205 Version: fmt.Sprintf("version:%s", version),
206 }
207 if assetName == "win_toolchain" {
208 pkg.Path = "t" // Workaround for path length limit on Windows.
209 }
210 cipdPackages[assetName] = pkg
211 return pkg
212}
213
214// compile generates a compile task. Returns the name of the last task in the
215// generated chain of tasks, which the Job should add as a dependency.
216func compile(cfg *specs.TasksCfg, name string, parts map[string]string) string {
217 // Collect the necessary CIPD packages.
218 pkgs := []*specs.CipdPackage{}
219
220 // Android bots require a toolchain.
221 if strings.Contains(name, "Android") {
222 pkgs = append(pkgs, getCipdPackage("android_sdk"))
223 if strings.Contains(name, "Mac") {
224 pkgs = append(pkgs, getCipdPackage("android_ndk_darwin"))
225 } else {
226 pkgs = append(pkgs, getCipdPackage("android_ndk_linux"))
227 }
228 }
229
230 // Clang on Linux.
231 if strings.Contains(name, "Ubuntu") && strings.Contains(name, "Clang") {
232 pkgs = append(pkgs, getCipdPackage("clang_linux"))
233 }
234
235 // Windows toolchain.
236 if strings.Contains(name, "Win") {
237 pkgs = append(pkgs, getCipdPackage("win_toolchain"))
238 if strings.Contains(name, "Vulkan") {
239 pkgs = append(pkgs, getCipdPackage("win_vulkan_sdk"))
240 }
241 }
242
243 // Add the task.
244 cfg.Tasks[name] = &specs.TaskSpec{
245 CipdPackages: pkgs,
246 Dimensions: swarmDimensions(parts),
247 ExtraArgs: []string{
248 "--workdir", "../../..", "swarm_compile",
249 fmt.Sprintf("buildername=%s", name),
250 "mastername=fake-master",
251 "buildnumber=2",
252 "slavename=fake-buildslave",
253 fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
254 fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION),
255 },
256 Isolate: "compile_skia.isolate",
257 Priority: 0.8,
258 }
259 return name
260}
261
262// recreateSKPs generates a RecreateSKPs task. Returns the name of the last
263// task in the generated chain of tasks, which the Job should add as a
264// dependency.
265func recreateSKPs(cfg *specs.TasksCfg, name string) string {
266 // TODO
267 return name
268}
269
270// ctSKPs generates a CT SKPs task. Returns the name of the last task in the
271// generated chain of tasks, which the Job should add as a dependency.
272func ctSKPs(cfg *specs.TasksCfg, name string) string {
273 // TODO
274 return name
275}
276
277// housekeeper generates a Housekeeper task. Returns the name of the last task
278// in the generated chain of tasks, which the Job should add as a dependency.
279func housekeeper(cfg *specs.TasksCfg, name, compileTaskName string) string {
280 // TODO
281 return name
282}
283
284// doUpload indicates whether the given Job should upload its results.
285func doUpload(name string) bool {
286 skipUploadBots := []string{
287 "ASAN",
288 "Coverage",
289 "MSAN",
290 "TSAN",
291 "UBSAN",
292 "Valgrind",
293 }
294 for _, s := range skipUploadBots {
295 if strings.Contains(name, s) {
296 return false
297 }
298 }
299 return true
300}
301
302// test generates a Test task. Returns the name of the last task in the
303// generated chain of tasks, which the Job should add as a dependency.
304func test(cfg *specs.TasksCfg, name string, parts map[string]string, compileTaskName string, pkgs []*specs.CipdPackage) string {
305 cfg.Tasks[name] = &specs.TaskSpec{
306 CipdPackages: pkgs,
307 Dependencies: []string{compileTaskName},
308 Dimensions: swarmDimensions(parts),
309 ExtraArgs: []string{
310 "--workdir", "../../..", "swarm_test",
311 fmt.Sprintf("buildername=%s", name),
312 "mastername=fake-master",
313 "buildnumber=2",
314 "slavename=fake-buildslave",
315 fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
316 fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION),
317 },
318 Isolate: "test_skia.isolate",
319 Priority: 0.8,
320 }
321 // Upload results if necessary.
322 if doUpload(name) {
323 uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, jobNameSchema.Sep, name)
324 cfg.Tasks[uploadName] = &specs.TaskSpec{
325 Dependencies: []string{name},
326 Dimensions: UPLOAD_DIMENSIONS,
327 ExtraArgs: []string{
328 "--workdir", "../../..", "upload_dm_results",
329 fmt.Sprintf("buildername=%s", name),
330 "mastername=fake-master",
331 "buildnumber=2",
332 "slavename=fake-buildslave",
333 fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
334 fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION),
335 },
336 Isolate: "upload_dm_results.isolate",
337 Priority: 0.8,
338 }
339 return uploadName
340 }
341 return name
342}
343
344// perf generates a Perf task. Returns the name of the last task in the
345// generated chain of tasks, which the Job should add as a dependency.
346func perf(cfg *specs.TasksCfg, name string, parts map[string]string, compileTaskName string, pkgs []*specs.CipdPackage) string {
347 cfg.Tasks[name] = &specs.TaskSpec{
348 CipdPackages: pkgs,
349 Dependencies: []string{compileTaskName},
350 Dimensions: swarmDimensions(parts),
351 ExtraArgs: []string{
352 "--workdir", "../../..", "swarm_perf",
353 fmt.Sprintf("buildername=%s", name),
354 "mastername=fake-master",
355 "buildnumber=2",
356 "slavename=fake-buildslave",
357 fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
358 fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION),
359 },
360 Isolate: "perf_skia.isolate",
361 Priority: 0.8,
362 }
363 // Upload results if necessary.
364 if strings.Contains(name, "Release") && doUpload(name) {
365 uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, jobNameSchema.Sep, name)
366 cfg.Tasks[uploadName] = &specs.TaskSpec{
367 Dependencies: []string{name},
368 Dimensions: UPLOAD_DIMENSIONS,
369 ExtraArgs: []string{
370 "--workdir", "../../..", "upload_nano_results",
371 fmt.Sprintf("buildername=%s", name),
372 "mastername=fake-master",
373 "buildnumber=2",
374 "slavename=fake-buildslave",
375 fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
376 fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION),
377 },
378 Isolate: "upload_nano_results.isolate",
379 Priority: 0.8,
380 }
381 return uploadName
382 }
383 return name
384}
385
386// process generates tasks and jobs for the given job name.
387func process(cfg *specs.TasksCfg, name string) {
388 if _, ok := cfg.Jobs[name]; ok {
389 glog.Fatalf("Duplicate job %q", name)
390 }
391 deps := []string{}
392
393 parts, err := jobNameSchema.ParseJobName(name)
394 if err != nil {
395 glog.Fatal(err)
396 }
397
398 // RecreateSKPs.
399 if strings.Contains(name, "RecreateSKPs") {
400 deps = append(deps, recreateSKPs(cfg, name))
401 }
402
403 // CT bots.
404 if strings.Contains(name, "-CT_") {
405 deps = append(deps, ctSKPs(cfg, name))
406 }
407
408 // Compile bots.
409 if parts["role"] == "Build" {
410 deps = append(deps, compile(cfg, name, parts))
411 }
412
413 // Any remaining bots need a compile task.
414 compileTaskName := deriveCompileTaskName(name, parts)
415
416 // Housekeeper.
417 if parts["role"] == "Housekeeper" {
418 deps = append(deps, housekeeper(cfg, name, compileTaskName))
419 }
420
421 // Common assets needed by the remaining bots.
422 pkgs := []*specs.CipdPackage{
423 getCipdPackage("skimage"),
424 getCipdPackage("skp"),
425 getCipdPackage("svg"),
426 }
427
428 // Test bots.
429 if parts["role"] == "Test" {
430 deps = append(deps, test(cfg, name, parts, compileTaskName, pkgs))
431 }
432
433 // Perf bots.
434 if parts["role"] == "Perf" {
435 deps = append(deps, perf(cfg, name, parts, compileTaskName, pkgs))
436 }
437
438 // Add the Job spec.
439 cfg.Jobs[name] = &specs.JobSpec{
440 Priority: 0.8,
441 TaskSpecs: deps,
442 }
443}
444
445// getCheckoutRoot returns the path of the root of the Skia checkout, or an
446// error if it cannot be found.
447func getCheckoutRoot() string {
448 cwd, err := os.Getwd()
449 if err != nil {
450 glog.Fatal(err)
451 }
452 for {
453 if _, err := os.Stat(cwd); err != nil {
454 glog.Fatal(err)
455 }
456 s, err := os.Stat(path.Join(cwd, ".git"))
457 if err == nil && s.IsDir() {
458 // TODO(borenet): Should we verify that this is a Skia
459 // checkout and not something else?
460 return cwd
461 }
462 cwd = filepath.Clean(path.Join(cwd, ".."))
463 }
464}
465
466// Regenerate the tasks.json file.
467func main() {
468 common.Init()
469 defer common.LogPanic()
470
471 // Where are we?
472 root := getCheckoutRoot()
473 infrabotsDir = path.Join(root, "infra", "bots")
474
475 // Create the JobNameSchema.
476 schema, err := NewJobNameSchema(path.Join(infrabotsDir, "recipe_modules", "builder_name_schema", "builder_name_schema.json"))
477 if err != nil {
478 glog.Fatal(err)
479 }
480 jobNameSchema = schema
481
482 // Create the config.
483 cfg := &specs.TasksCfg{
484 Jobs: map[string]*specs.JobSpec{},
485 Tasks: map[string]*specs.TaskSpec{},
486 }
487
488 // Create Tasks and Jobs.
489 for _, j := range JOBS {
490 process(cfg, j)
491 }
492
493 // Validate the config.
494 if err := cfg.Validate(); err != nil {
495 glog.Fatal(err)
496 }
497
498 // Write the tasks.json file.
borenetdb182c72016-09-30 12:53:12 -0700499 b, err := json.MarshalIndent(cfg, "", " ")
500 if err != nil {
501 glog.Fatal(err)
502 }
503 // The json package escapes HTML characters, which makes our output
504 // much less readable. Replace the escape characters with the real
505 // character.
506 b = bytes.Replace(b, []byte("\\u003c"), []byte("<"), -1)
borenet60b0a2d2016-10-04 12:45:41 -0700507
508 outFile := path.Join(root, specs.TASKS_CFG_FILE)
509 if *testing {
510 // Don't write the file; read it and compare.
511 expect, err := ioutil.ReadFile(outFile)
512 if err != nil {
513 glog.Fatal(err)
514 }
515 if !bytes.Equal(expect, b) {
516 glog.Fatalf("Expected no changes, but changes were found!")
517 }
518 } else {
519 if err := ioutil.WriteFile(outFile, b, os.ModePerm); err != nil {
520 glog.Fatal(err)
521 }
borenetdb182c72016-09-30 12:53:12 -0700522 }
523}
524
525// TODO(borenet): The below really belongs in its own file, probably next to the
526// builder_name_schema.json file.
527
528// JobNameSchema is a struct used for (de)constructing Job names in a
529// predictable format.
530type JobNameSchema struct {
531 Schema map[string][]string `json:"builder_name_schema"`
532 Sep string `json:"builder_name_sep"`
533}
534
535// NewJobNameSchema returns a JobNameSchema instance based on the given JSON
536// file.
537func NewJobNameSchema(jsonFile string) (*JobNameSchema, error) {
538 var rv JobNameSchema
539 f, err := os.Open(jsonFile)
540 if err != nil {
541 return nil, err
542 }
543 defer util.Close(f)
544 if err := json.NewDecoder(f).Decode(&rv); err != nil {
545 return nil, err
546 }
547 return &rv, nil
548}
549
550// ParseJobName splits the given Job name into its component parts, according
551// to the schema.
552func (s *JobNameSchema) ParseJobName(n string) (map[string]string, error) {
553 split := strings.Split(n, s.Sep)
554 if len(split) < 2 {
555 return nil, fmt.Errorf("Invalid job name: %q", n)
556 }
557 role := split[0]
558 split = split[1:]
559 keys, ok := s.Schema[role]
560 if !ok {
561 return nil, fmt.Errorf("Invalid job name; %q is not a valid role.", role)
562 }
563 extraConfig := ""
564 if len(split) == len(keys)+1 {
565 extraConfig = split[len(split)-1]
566 split = split[:len(split)-1]
567 }
568 if len(split) != len(keys) {
569 return nil, fmt.Errorf("Invalid job name; %q has incorrect number of parts.", n)
570 }
571 rv := make(map[string]string, len(keys)+2)
572 rv["role"] = role
573 if extraConfig != "" {
574 rv["extra_config"] = extraConfig
575 }
576 for i, k := range keys {
577 rv[k] = split[i]
578 }
579 return rv, nil
580}
581
582// MakeJobName assembles the given parts of a Job name, according to the schema.
583func (s *JobNameSchema) MakeJobName(parts map[string]string) (string, error) {
584 role, ok := parts["role"]
585 if !ok {
586 return "", fmt.Errorf("Invalid job parts; jobs must have a role.")
587 }
588 keys, ok := s.Schema[role]
589 if !ok {
590 return "", fmt.Errorf("Invalid job parts; unknown role %q", role)
591 }
592 rvParts := make([]string, 0, len(parts))
593 rvParts = append(rvParts, role)
594 for _, k := range keys {
595 v, ok := parts[k]
596 if !ok {
597 return "", fmt.Errorf("Invalid job parts; missing %q", k)
598 }
599 rvParts = append(rvParts, v)
600 }
601 if _, ok := parts["extra_config"]; ok {
602 rvParts = append(rvParts, parts["extra_config"])
603 }
604 return strings.Join(rvParts, s.Sep), nil
605}