[mb] Copy MB from Chromium repo
This prepares deletion of MB on the Chromium side. This runs validation of the v8 configs as presubmit and removes some obsolete code that required chromium. BUG=chromium:616035 Review-Url: https://codereview.chromium.org/2299953002 Cr-Commit-Position: refs/heads/master@{#39082}
This commit is contained in:
parent
bbe5c5490c
commit
9c9d1e18c7
1
.gitignore
vendored
1
.gitignore
vendored
@ -81,7 +81,6 @@ shell_g
|
||||
/tools/luci-go/linux64/isolate
|
||||
/tools/luci-go/mac64/isolate
|
||||
/tools/luci-go/win64/isolate.exe
|
||||
/tools/mb
|
||||
/tools/oom_dump/oom_dump
|
||||
/tools/oom_dump/oom_dump.o
|
||||
/tools/swarming_client
|
||||
|
2
DEPS
2
DEPS
@ -25,8 +25,6 @@ deps = {
|
||||
Var("git_url") + "/chromium/src/third_party/jinja2.git" + "@" + "2222b31554f03e62600cd7e383376a7c187967a1",
|
||||
"v8/third_party/markupsafe":
|
||||
Var("git_url") + "/chromium/src/third_party/markupsafe.git" + "@" + "484a5661041cac13bfc688a26ec5434b05d18961",
|
||||
"v8/tools/mb":
|
||||
Var('git_url') + '/chromium/src/tools/mb.git' + '@' + "2f9349ee2aec6dd7b167b26cbbcac2891a9649ab",
|
||||
"v8/tools/swarming_client":
|
||||
Var('git_url') + '/external/swarming.client.git' + '@' + "e4288c3040a32f2e7ad92f957668f2ee3d36e5a6",
|
||||
"v8/testing/gtest":
|
||||
|
3
tools/mb/OWNERS
Normal file
3
tools/mb/OWNERS
Normal file
@ -0,0 +1,3 @@
|
||||
brettw@chromium.org
|
||||
dpranke@chromium.org
|
||||
machenbach@chromium.org
|
41
tools/mb/PRESUBMIT.py
Normal file
41
tools/mb/PRESUBMIT.py
Normal file
@ -0,0 +1,41 @@
|
||||
# Copyright 2016 the V8 project authors. All rights reserved.
|
||||
# Copyright 2015 The Chromium Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
|
||||
def _CommonChecks(input_api, output_api):
|
||||
results = []
|
||||
|
||||
# Run Pylint over the files in the directory.
|
||||
pylint_checks = input_api.canned_checks.GetPylint(input_api, output_api)
|
||||
results.extend(input_api.RunTests(pylint_checks))
|
||||
|
||||
# Run the MB unittests.
|
||||
results.extend(input_api.canned_checks.RunUnitTestsInDirectory(
|
||||
input_api, output_api, '.', [ r'^.+_unittest\.py$']))
|
||||
|
||||
# Validate the format of the mb_config.pyl file.
|
||||
cmd = [input_api.python_executable, 'mb.py', 'validate']
|
||||
kwargs = {'cwd': input_api.PresubmitLocalPath()}
|
||||
results.extend(input_api.RunTests([
|
||||
input_api.Command(name='mb_validate',
|
||||
cmd=cmd, kwargs=kwargs,
|
||||
message=output_api.PresubmitError)]))
|
||||
|
||||
results.extend(
|
||||
input_api.canned_checks.CheckLongLines(
|
||||
input_api,
|
||||
output_api,
|
||||
maxlen=80,
|
||||
source_file_filter=lambda x: 'mb_config.pyl' in x.LocalPath()))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def CheckChangeOnUpload(input_api, output_api):
|
||||
return _CommonChecks(input_api, output_api)
|
||||
|
||||
|
||||
def CheckChangeOnCommit(input_api, output_api):
|
||||
return _CommonChecks(input_api, output_api)
|
22
tools/mb/README.md
Normal file
22
tools/mb/README.md
Normal file
@ -0,0 +1,22 @@
|
||||
# MB - The Meta-Build wrapper
|
||||
|
||||
MB is a simple wrapper intended to provide a uniform interface to either
|
||||
GYP or GN, such that users and bots can call one script and not need to
|
||||
worry about whether a given bot is meant to use GN or GYP.
|
||||
|
||||
It supports two main functions:
|
||||
|
||||
1. "gen" - the main `gyp_chromium` / `gn gen` invocation that generates the
|
||||
Ninja files needed for the build.
|
||||
|
||||
2. "analyze" - the step that takes a list of modified files and a list of
|
||||
desired targets and reports which targets will need to be rebuilt.
|
||||
|
||||
We also use MB as a forcing function to collect all of the different
|
||||
build configurations that we actually support for Chromium builds into
|
||||
one place, in `//tools/mb/mb_config.pyl`.
|
||||
|
||||
For more information, see:
|
||||
|
||||
* [The User Guide](docs/user_guide.md)
|
||||
* [The Design Spec](docs/design_spec.md)
|
4
tools/mb/docs/README.md
Normal file
4
tools/mb/docs/README.md
Normal file
@ -0,0 +1,4 @@
|
||||
# The MB (Meta-Build wrapper) documentation
|
||||
|
||||
* The [User Guide](user_guide.md)
|
||||
* The [Design Spec](design_spec.md)
|
426
tools/mb/docs/design_spec.md
Normal file
426
tools/mb/docs/design_spec.md
Normal file
@ -0,0 +1,426 @@
|
||||
# The MB (Meta-Build wrapper) design spec
|
||||
|
||||
[TOC]
|
||||
|
||||
## Intro
|
||||
|
||||
MB is intended to address two major aspects of the GYP -> GN transition
|
||||
for Chromium:
|
||||
|
||||
1. "bot toggling" - make it so that we can easily flip a given bot
|
||||
back and forth between GN and GYP.
|
||||
|
||||
2. "bot configuration" - provide a single source of truth for all of
|
||||
the different configurations (os/arch/`gyp_define` combinations) of
|
||||
Chromium that are supported.
|
||||
|
||||
MB must handle at least the `gen` and `analyze` steps on the bots, i.e.,
|
||||
we need to wrap both the `gyp_chromium` invocation to generate the
|
||||
Ninja files, and the `analyze` step that takes a list of modified files
|
||||
and a list of targets to build and returns which targets are affected by
|
||||
the files.
|
||||
|
||||
For more information on how to actually use MB, see
|
||||
[the user guide](user_guide.md).
|
||||
|
||||
## Design
|
||||
|
||||
MB is intended to be as simple as possible, and to defer as much work as
|
||||
possible to GN or GYP. It should live as a very simple Python wrapper
|
||||
that offers little in the way of surprises.
|
||||
|
||||
### Command line
|
||||
|
||||
It is structured as a single binary that supports a list of subcommands:
|
||||
|
||||
* `mb gen -c linux_rel_bot //out/Release`
|
||||
* `mb analyze -m tryserver.chromium.linux -b linux_rel /tmp/input.json /tmp/output.json`
|
||||
|
||||
### Configurations
|
||||
|
||||
`mb` will first look for a bot config file in a set of different locations
|
||||
(initially just in //ios/build/bots). Bot config files are JSON files that
|
||||
contain keys for 'GYP_DEFINES' (a list of strings that will be joined together
|
||||
with spaces and passed to GYP, or a dict that will be similarly converted),
|
||||
'gn_args' (a list of strings that will be joined together), and an
|
||||
'mb_type' field that says whether to use GN or GYP. Bot config files
|
||||
require the full list of settings to be given explicitly.
|
||||
|
||||
If no matching bot config file is found, `mb` looks in the
|
||||
`//tools/mb/mb_config.pyl` config file to determine whether to use GYP or GN
|
||||
for a particular build directory, and what set of flags (`GYP_DEFINES` or `gn
|
||||
args`) to use.
|
||||
|
||||
A config can either be specified directly (useful for testing) or by specifying
|
||||
the master name and builder name (useful on the bots so that they do not need
|
||||
to specify a config directly and can be hidden from the details).
|
||||
|
||||
See the [user guide](user_guide.md#mb_config.pyl) for details.
|
||||
|
||||
### Handling the analyze step
|
||||
|
||||
The interface to `mb analyze` is described in the
|
||||
[user\_guide](user_guide.md#mb_analyze).
|
||||
|
||||
The way analyze works can be subtle and complicated (see below).
|
||||
|
||||
Since the interface basically mirrors the way the "analyze" step on the bots
|
||||
invokes `gyp_chromium` today, when the config is found to be a gyp config,
|
||||
the arguments are passed straight through.
|
||||
|
||||
It implements the equivalent functionality in GN by calling `gn refs
|
||||
[list of files] --type=executable --all --as=output` and filtering the
|
||||
output to match the list of targets.
|
||||
|
||||
## Analyze
|
||||
|
||||
The goal of the `analyze` step is to speed up the cycle time of the try servers
|
||||
by only building and running the tests affected by the files in a patch, rather
|
||||
than everything that might be out of date. Doing this ends up being tricky.
|
||||
|
||||
We start with the following requirements and observations:
|
||||
|
||||
* In an ideal (un-resource-constrained) world, we would build and test
|
||||
everything that a patch affected on every patch. This does not
|
||||
necessarily mean that we would build 'all' on every patch (see below).
|
||||
|
||||
* In the real world, however, we do not have an infinite number of machines,
|
||||
and try jobs are not infinitely fast, so we need to balance the desire
|
||||
to get maximum test coverage against the desire to have reasonable cycle
|
||||
times, given the number of machines we have.
|
||||
|
||||
* Also, since we run most try jobs against tip-of-tree Chromium, by
|
||||
the time one job completes on the bot, new patches have probably landed,
|
||||
rendering the build out of date.
|
||||
|
||||
* This means that the next try job may have to do a build that is out of
|
||||
date due to a combination of files affected by a given patch, and files
|
||||
affected for unrelated reasons. We want to rebuild and test only the
|
||||
targets affected by the patch, so that we don't blame or punish the
|
||||
patch author for unrelated changes.
|
||||
|
||||
So:
|
||||
|
||||
1. We need a way to indicate which changed files we care about and which
|
||||
we don't (the affected files of a patch).
|
||||
|
||||
2. We need to know which tests we might potentially want to run, and how
|
||||
those are mapped onto build targets. For some kinds of tests (like
|
||||
GTest-based tests), the mapping is 1:1 - if you want to run base_unittests,
|
||||
you need to build base_unittests. For others (like the telemetry and
|
||||
layout tests), you might need to build several executables in order to
|
||||
run the tests, and that mapping might best be captured by a *meta*
|
||||
target (a GN group or a GYP 'none' target like `webkit_tests`) that
|
||||
depends on the right list of files. Because the GN and GYP files know
|
||||
nothing about test steps, we have to have some way of mapping back
|
||||
and forth between test steps and build targets. That mapping
|
||||
is *not* currently available to MB (or GN or GYP), and so we have to
|
||||
enough information to make it possible for the caller to do the mapping.
|
||||
|
||||
3. We might also want to know when test targets are affected by data files
|
||||
that aren't compiled (python scripts, or the layout tests themselves).
|
||||
There's no good way to do this in GYP, but GN supports this.
|
||||
|
||||
4. We also want to ensure that particular targets still compile even if they
|
||||
are not actually tested; consider testing the installers themselves, or
|
||||
targets that don't yet have good test coverage. We might want to use meta
|
||||
targets for this purpose as well.
|
||||
|
||||
5. However, for some meta targets, we don't necessarily want to rebuild the
|
||||
meta target itself, perhaps just the dependencies of the meta target that
|
||||
are affected by the patch. For example, if you have a meta target like
|
||||
`blink_tests` that might depend on ten different test binaries. If a patch
|
||||
only affects one of them (say `wtf_unittests`), you don't want to
|
||||
build `blink_tests`, because that might actually also build the other nine
|
||||
targets. In other words, some meta targets are *prunable*.
|
||||
|
||||
6. As noted above, in the ideal case we actually have enough resources and
|
||||
things are fast enough that we can afford to build everything affected by a
|
||||
patch, but listing every possible target explicitly would be painful. The
|
||||
GYP and GN Ninja generators provide an 'all' target that captures (nearly,
|
||||
see [crbug.com/503241](crbug.com/503241)) everything, but unfortunately
|
||||
neither GN nor GYP actually represents 'all' as a meta target in the build
|
||||
graph, so we will need to write code to handle that specially.
|
||||
|
||||
7. In some cases, we will not be able to correctly analyze the build graph to
|
||||
determine the impact of a patch, and need to bail out (e.g,. if you change a
|
||||
build file itself, it may not be easy to tell how that affects the graph).
|
||||
In that case we should simply build and run everything.
|
||||
|
||||
The interaction between 2) and 5) means that we need to treat meta targets
|
||||
two different ways, and so we need to know which targets should be
|
||||
pruned in the sense of 5) and which targets should be returned unchanged
|
||||
so that we can map them back to the appropriate tests.
|
||||
|
||||
So, we need three things as input:
|
||||
|
||||
* `files`: the list of files in the patch
|
||||
* `test_targets`: the list of ninja targets which, if affected by a patch,
|
||||
should be reported back so that we can map them back to the appropriate
|
||||
tests to run. Any meta targets in this list should *not* be pruned.
|
||||
* `additional_compile_targets`: the list of ninja targets we wish to compile
|
||||
*in addition to* the list in `test_targets`. Any meta targets
|
||||
present in this list should be pruned (we don't need to return the
|
||||
meta targets because they aren't mapped back to tests, and we don't want
|
||||
to build them because we might build too much).
|
||||
|
||||
We can then return two lists as output:
|
||||
|
||||
* `compile_targets`, which is a list of pruned targets to be
|
||||
passed to Ninja to build. It is acceptable to replace a list of
|
||||
pruned targets by a meta target if it turns out that all of the
|
||||
dependendencies of the target are affected by the patch (i.e.,
|
||||
all ten binaries that blink_tests depends on), but doing so is
|
||||
not required.
|
||||
* `test_targets`, which is a list of unpruned targets to be mapped
|
||||
back to determine which tests to run.
|
||||
|
||||
There may be substantial overlap between the two lists, but there is
|
||||
no guarantee that one is a subset of the other and the two cannot be
|
||||
used interchangeably or merged together without losing information and
|
||||
causing the wrong thing to happen.
|
||||
|
||||
The implementation is responsible for recognizing 'all' as a magic string
|
||||
and mapping it onto the list of all root nodes in the build graph.
|
||||
|
||||
There may be files listed in the input that don't actually exist in the build
|
||||
graph: this could be either the result of an error (the file should be in the
|
||||
build graph, but isn't), or perfectly fine (the file doesn't affect the build
|
||||
graph at all). We can't tell these two apart, so we should ignore missing
|
||||
files.
|
||||
|
||||
There may be targets listed in the input that don't exist in the build
|
||||
graph; unlike missing files, this can only indicate a configuration error,
|
||||
and so we should return which targets are missing so the caller can
|
||||
treat this as an error, if so desired.
|
||||
|
||||
Any of the three inputs may be an empty list:
|
||||
|
||||
* It normally doesn't make sense to call analyze at all if no files
|
||||
were modified, but in rare cases we can hit a race where we try to
|
||||
test a patch after it has already been committed, in which case
|
||||
the list of modified files is empty. We should return 'no dependency'
|
||||
in that case.
|
||||
|
||||
* Passing an empty list for one or the other of test_targets and
|
||||
additional_compile_targets is perfectly sensible: in the former case,
|
||||
it can indicate that you don't want to run any tests, and in the latter,
|
||||
it can indicate that you don't want to do build anything else in
|
||||
addition to the test targets.
|
||||
|
||||
* It doesn't make sense to call analyze if you don't want to compile
|
||||
anything at all, so passing [] for both test_targets and
|
||||
additional_compile_targets should probably return an error.
|
||||
|
||||
In the output case, an empty list indicates that there was nothing to
|
||||
build, or that there were no affected test targets as appropriate.
|
||||
|
||||
Note that passing no arguments to Ninja is equivalent to passing
|
||||
`all` to Ninja (at least given how GN and GYP work); however, we
|
||||
don't want to take advantage of this in most cases because we don't
|
||||
actually want to build every out of date target, only the targets
|
||||
potentially affected by the files. One could try to indicate
|
||||
to analyze that we wanted to use no arguments instead of an empty
|
||||
list, but using the existing fields for this seems fragile and/or
|
||||
confusing, and adding a new field for this seems unwarranted at this time.
|
||||
|
||||
There is an "error" field in case something goes wrong (like the
|
||||
empty file list case, above, or an internal error in MB/GYP/GN). The
|
||||
analyze code should also return an error code to the shell if appropriate
|
||||
to indicate that the command failed.
|
||||
|
||||
In the case where build files themselves are modified and analyze may
|
||||
not be able to determine a correct answer (point 7 above, where we return
|
||||
"Found dependency (all)"), we should also return the `test_targets` unmodified
|
||||
and return the union of `test_targets` and `additional_compile_targets` for
|
||||
`compile_targets`, to avoid confusion.
|
||||
|
||||
### Examples
|
||||
|
||||
Continuing the example given above, suppose we have the following build
|
||||
graph:
|
||||
|
||||
* `blink_tests` is a meta target that depends on `webkit_unit_tests`,
|
||||
`wtf_unittests`, and `webkit_tests` and represents all of the targets
|
||||
needed to fully test Blink. Each of those is a separate test step.
|
||||
* `webkit_tests` is also a meta target; it depends on `content_shell`
|
||||
and `image_diff`.
|
||||
* `base_unittests` is a separate test binary.
|
||||
* `wtf_unittests` depends on `Assertions.cpp` and `AssertionsTest.cpp`.
|
||||
* `webkit_unit_tests` depends on `WebNode.cpp` and `WebNodeTest.cpp`.
|
||||
* `content_shell` depends on `WebNode.cpp` and `Assertions.cpp`.
|
||||
* `base_unittests` depends on `logging.cc` and `logging_unittest.cc`.
|
||||
|
||||
#### Example 1
|
||||
|
||||
We wish to run 'wtf_unittests' and 'webkit_tests' on a bot, but not
|
||||
compile any additional targets.
|
||||
|
||||
If a patch touches WebNode.cpp, then analyze gets as input:
|
||||
|
||||
{
|
||||
"files": ["WebNode.cpp"],
|
||||
"test_targets": ["wtf_unittests", "webkit_tests"],
|
||||
"additional_compile_targets": []
|
||||
}
|
||||
|
||||
and should return as output:
|
||||
|
||||
{
|
||||
"status": "Found dependency",
|
||||
"compile_targets": ["webkit_unit_tests"],
|
||||
"test_targets": ["webkit_tests"]
|
||||
}
|
||||
|
||||
Note how `webkit_tests` was pruned in compile_targets but not in test_targets.
|
||||
|
||||
#### Example 2
|
||||
|
||||
Using the same patch as Example 1, assume we wish to run only `wtf_unittests`,
|
||||
but additionally build everything needed to test Blink (`blink_tests`):
|
||||
|
||||
We pass as input:
|
||||
|
||||
{
|
||||
"files": ["WebNode.cpp"],
|
||||
"test_targets": ["wtf_unittests"],
|
||||
"additional_compile_targets": ["blink_tests"]
|
||||
}
|
||||
|
||||
And should get as output:
|
||||
|
||||
{
|
||||
"status": "Found dependency",
|
||||
"compile_targets": ["webkit_unit_tests"],
|
||||
"test_targets": []
|
||||
}
|
||||
|
||||
Here `blink_tests` was pruned in the output compile_targets, and
|
||||
test_targets was empty, since blink_tests was not listed in the input
|
||||
test_targets.
|
||||
|
||||
#### Example 3
|
||||
|
||||
Build everything, but do not run any tests.
|
||||
|
||||
Input:
|
||||
|
||||
{
|
||||
"files": ["WebNode.cpp"],
|
||||
"test_targets": [],
|
||||
"additional_compile_targets": ["all"]
|
||||
}
|
||||
|
||||
Output:
|
||||
|
||||
{
|
||||
"status": "Found dependency",
|
||||
"compile_targets": ["webkit_unit_tests", "content_shell"],
|
||||
"test_targets": []
|
||||
}
|
||||
|
||||
#### Example 4
|
||||
|
||||
Same as Example 2, but a build file was modified instead of a source file.
|
||||
|
||||
Input:
|
||||
|
||||
{
|
||||
"files": ["BUILD.gn"],
|
||||
"test_targets": ["wtf_unittests"],
|
||||
"additional_compile_targets": ["blink_tests"]
|
||||
}
|
||||
|
||||
Output:
|
||||
|
||||
{
|
||||
"status": "Found dependency (all)",
|
||||
"compile_targets": ["webkit_unit_tests", "wtf_unittests"],
|
||||
"test_targets": ["wtf_unittests"]
|
||||
}
|
||||
|
||||
test_targets was returned unchanged, compile_targets was pruned.
|
||||
|
||||
## Random Requirements and Rationale
|
||||
|
||||
This section is collection of semi-organized notes on why MB is the way
|
||||
it is ...
|
||||
|
||||
### in-tree or out-of-tree
|
||||
|
||||
The first issue is whether or not this should exist as a script in
|
||||
Chromium at all; an alternative would be to simply change the bot
|
||||
configurations to know whether to use GYP or GN, and which flags to
|
||||
pass.
|
||||
|
||||
That would certainly work, but experience over the past two years
|
||||
suggests a few things:
|
||||
|
||||
* we should push as much logic as we can into the source repositories
|
||||
so that they can be versioned and changed atomically with changes to
|
||||
the product code; having to coordinate changes between src/ and
|
||||
build/ is at best annoying and can lead to weird errors.
|
||||
* the infra team would really like to move to providing
|
||||
product-independent services (i.e., not have to do one thing for
|
||||
Chromium, another for NaCl, a third for V8, etc.).
|
||||
* we found that during the SVN->GIT migration the ability to flip bot
|
||||
configurations between the two via changes to a file in chromium
|
||||
was very useful.
|
||||
|
||||
All of this suggests that the interface between bots and Chromium should
|
||||
be a simple one, hiding as much of the chromium logic as possible.
|
||||
|
||||
### Why not have MB be smarter about de-duping flags?
|
||||
|
||||
This just adds complexity to the MB implementation, and duplicates logic
|
||||
that GYP and GN already have to support anyway; in particular, it might
|
||||
require MB to know how to parse GYP and GN values. The belief is that
|
||||
if MB does *not* do this, it will lead to fewer surprises.
|
||||
|
||||
It will not be hard to change this if need be.
|
||||
|
||||
### Integration w/ gclient runhooks
|
||||
|
||||
On the bots, we will disable `gyp_chromium` as part of runhooks (using
|
||||
`GYP_CHROMIUM_NO_ACTION=1`), so that mb shows up as a separate step.
|
||||
|
||||
At the moment, we expect most developers to either continue to use
|
||||
`gyp_chromium` in runhooks or to disable at as above if they have no
|
||||
use for GYP at all. We may revisit how this works once we encourage more
|
||||
people to use GN full-time (i.e., we might take `gyp_chromium` out of
|
||||
runhooks altogether).
|
||||
|
||||
### Config per flag set or config per (os/arch/flag set)?
|
||||
|
||||
Currently, mb_config.pyl does not specify the host_os, target_os, host_cpu, or
|
||||
target_cpu values for every config that Chromium runs on, it only specifies
|
||||
them for when the values need to be explicitly set on the command line.
|
||||
|
||||
Instead, we have one config per unique combination of flags only.
|
||||
|
||||
In other words, rather than having `linux_rel_bot`, `win_rel_bot`, and
|
||||
`mac_rel_bot`, we just have `rel_bot`.
|
||||
|
||||
This design allows us to determine easily all of the different sets
|
||||
of flags that we need to support, but *not* which flags are used on which
|
||||
host/target combinations.
|
||||
|
||||
It may be that we should really track the latter. Doing so is just a
|
||||
config file change, however.
|
||||
|
||||
### Non-goals
|
||||
|
||||
* MB is not intended to replace direct invocation of GN or GYP for
|
||||
complicated build scenarios (aka ChromeOS), where multiple flags need
|
||||
to be set to user-defined paths for specific toolchains (e.g., where
|
||||
ChromeOS needs to specify specific board types and compilers).
|
||||
|
||||
* MB is not intended at this time to be something developers use frequently,
|
||||
or to add a lot of features to. We hope to be able to get rid of it once
|
||||
the GYP->GN migration is done, and so we should not add things for
|
||||
developers that can't easily be added to GN itself.
|
||||
|
||||
* MB is not intended to replace the
|
||||
[CR tool](https://code.google.com/p/chromium/wiki/CRUserManual). Not
|
||||
only is it only intended to replace the gyp\_chromium part of `'gclient
|
||||
runhooks'`, it is not really meant as a developer-facing tool.
|
297
tools/mb/docs/user_guide.md
Normal file
297
tools/mb/docs/user_guide.md
Normal file
@ -0,0 +1,297 @@
|
||||
# The MB (Meta-Build wrapper) user guide
|
||||
|
||||
[TOC]
|
||||
|
||||
## Introduction
|
||||
|
||||
`mb` is a simple python wrapper around the GYP and GN meta-build tools to
|
||||
be used as part of the GYP->GN migration.
|
||||
|
||||
It is intended to be used by bots to make it easier to manage the configuration
|
||||
each bot builds (i.e., the configurations can be changed from chromium
|
||||
commits), and to consolidate the list of all of the various configurations
|
||||
that Chromium is built in.
|
||||
|
||||
Ideally this tool will no longer be needed after the migration is complete.
|
||||
|
||||
For more discussion of MB, see also [the design spec](design_spec.md).
|
||||
|
||||
## MB subcommands
|
||||
|
||||
### `mb analyze`
|
||||
|
||||
`mb analyze` is reponsible for determining what targets are affected by
|
||||
a list of files (e.g., the list of files in a patch on a trybot):
|
||||
|
||||
```
|
||||
mb analyze -c chromium_linux_rel //out/Release input.json output.json
|
||||
```
|
||||
|
||||
Either the `-c/--config` flag or the `-m/--master` and `-b/--builder` flags
|
||||
must be specified so that `mb` can figure out which config to use.
|
||||
|
||||
The first positional argument must be a GN-style "source-absolute" path
|
||||
to the build directory.
|
||||
|
||||
The second positional argument is a (normal) path to a JSON file containing
|
||||
a single object with the following fields:
|
||||
|
||||
* `files`: an array of the modified filenames to check (as paths relative to
|
||||
the checkout root).
|
||||
* `test_targets`: an array of (ninja) build targets that needed to run the
|
||||
tests we wish to run. An empty array will be treated as if there are
|
||||
no tests that will be run.
|
||||
* `additional_compile_targets`: an array of (ninja) build targets that
|
||||
reflect the stuff we might want to build *in addition to* the list
|
||||
passed in `test_targets`. Targets in this list will be treated
|
||||
specially, in the following way: if a given target is a "meta"
|
||||
(GN: group, GYP: none) target like 'blink_tests' or
|
||||
'chromium_builder_tests', or even the ninja-specific 'all' target,
|
||||
then only the *dependencies* of the target that are affected by
|
||||
the modified files will be rebuilt (not the target itself, which
|
||||
might also cause unaffected dependencies to be rebuilt). An empty
|
||||
list will be treated as if there are no additional targets to build.
|
||||
Empty lists for both `test_targets` and `additional_compile_targets`
|
||||
would cause no work to be done, so will result in an error.
|
||||
* `targets`: a legacy field that resembled a union of `compile_targets`
|
||||
and `test_targets`. Support for this field will be removed once the
|
||||
bots have been updated to use compile_targets and test_targets instead.
|
||||
|
||||
The third positional argument is a (normal) path to where mb will write
|
||||
the result, also as a JSON object. This object may contain the following
|
||||
fields:
|
||||
|
||||
* `error`: this should only be present if something failed.
|
||||
* `compile_targets`: the list of ninja targets that should be passed
|
||||
directly to the corresponding ninja / compile.py invocation. This
|
||||
list may contain entries that are *not* listed in the input (see
|
||||
the description of `additional_compile_targets` above and
|
||||
[design_spec.md](the design spec) for how this works).
|
||||
* `invalid_targets`: a list of any targets that were passed in
|
||||
either of the input lists that weren't actually found in the graph.
|
||||
* `test_targets`: the subset of the input `test_targets` that are
|
||||
potentially out of date, indicating that the matching test steps
|
||||
should be re-run.
|
||||
* `targets`: a legacy field that indicates the subset of the input `targets`
|
||||
that depend on the input `files`.
|
||||
* `build_targets`: a legacy field that indicates the minimal subset of
|
||||
targets needed to build all of `targets` that were affected.
|
||||
* `status`: a field containing one of three strings:
|
||||
|
||||
* `"Found dependency"` (build the `compile_targets`)
|
||||
* `"No dependency"` (i.e., no build needed)
|
||||
* `"Found dependency (all)"` (`test_targets` is returned as-is;
|
||||
`compile_targets` should contain the union of `test_targets` and
|
||||
`additional_compile_targets`. In this case the targets do not
|
||||
need to be pruned).
|
||||
|
||||
See [design_spec.md](the design spec) for more details and examples; the
|
||||
differences can be subtle. We won't even go into how the `targets` and
|
||||
`build_targets` differ from each other or from `compile_targets` and
|
||||
`test_targets`.
|
||||
|
||||
The `-b/--builder`, `-c/--config`, `-f/--config-file`, `-m/--master`,
|
||||
`-q/--quiet`, and `-v/--verbose` flags work as documented for `mb gen`.
|
||||
|
||||
### `mb audit`
|
||||
|
||||
`mb audit` is used to track the progress of the GYP->GN migration. You can
|
||||
use it to check a single master, or all the masters we care about. See
|
||||
`mb help audit` for more details (most people are not expected to care about
|
||||
this).
|
||||
|
||||
### `mb gen`
|
||||
|
||||
`mb gen` is responsible for generating the Ninja files by invoking either GYP
|
||||
or GN as appropriate. It takes arguments to specify a build config and
|
||||
a directory, then runs GYP or GN as appropriate:
|
||||
|
||||
```
|
||||
% mb gen -m tryserver.chromium.linux -b linux_rel //out/Release
|
||||
% mb gen -c linux_rel_trybot //out/Release
|
||||
```
|
||||
|
||||
Either the `-c/--config` flag or the `-m/--master` and `-b/--builder` flags
|
||||
must be specified so that `mb` can figure out which config to use. The
|
||||
`--phase` flag must also be used with builders that have multiple
|
||||
build/compile steps (and only with those builders).
|
||||
|
||||
By default, MB will look for a bot config file under `//ios/build/bots` (see
|
||||
[design_spec.md](the design spec) for details of how the bot config files
|
||||
work). If no matching one is found, will then look in
|
||||
`//tools/mb/mb_config.pyl` to look up the config information, but you can
|
||||
specify a custom config file using the `-f/--config-file` flag.
|
||||
|
||||
The path must be a GN-style "source-absolute" path (as above).
|
||||
|
||||
You can pass the `-n/--dryrun` flag to mb gen to see what will happen without
|
||||
actually writing anything.
|
||||
|
||||
You can pass the `-q/--quiet` flag to get mb to be silent unless there is an
|
||||
error, and pass the `-v/--verbose` flag to get mb to log all of the files
|
||||
that are read and written, and all the commands that are run.
|
||||
|
||||
If the build config will use the Goma distributed-build system, you can pass
|
||||
the path to your Goma client in the `-g/--goma-dir` flag, and it will be
|
||||
incorporated into the appropriate flags for GYP or GN as needed.
|
||||
|
||||
If gen ends up using GYP, the path must have a valid GYP configuration as the
|
||||
last component of the path (i.e., specify `//out/Release_x64`, not `//out`).
|
||||
The gyp script defaults to `//build/gyp_chromium`, but can be overridden with
|
||||
the `--gyp-script` flag, e.g. `--gyp-script=gypfiles/gyp_v8`.
|
||||
|
||||
### `mb help`
|
||||
|
||||
Produces help output on the other subcommands
|
||||
|
||||
### `mb lookup`
|
||||
|
||||
Prints what command will be run by `mb gen` (like `mb gen -n` but does
|
||||
not require you to specify a path).
|
||||
|
||||
The `-b/--builder`, `-c/--config`, `-f/--config-file`, `-m/--master`,
|
||||
`--phase`, `-q/--quiet`, and `-v/--verbose` flags work as documented for
|
||||
`mb gen`.
|
||||
|
||||
### `mb validate`
|
||||
|
||||
Does internal checking to make sure the config file is syntactically
|
||||
valid and that all of the entries are used properly. It does not validate
|
||||
that the flags make sense, or that the builder names are legal or
|
||||
comprehensive, but it does complain about configs and mixins that aren't
|
||||
used.
|
||||
|
||||
The `-f/--config-file` and `-q/--quiet` flags work as documented for
|
||||
`mb gen`.
|
||||
|
||||
This is mostly useful as a presubmit check and for verifying changes to
|
||||
the config file.
|
||||
|
||||
## Isolates and Swarming
|
||||
|
||||
`mb gen` is also responsible for generating the `.isolate` and
|
||||
`.isolated.gen.json` files needed to run test executables through swarming
|
||||
in a GN build (in a GYP build, this is done as part of the compile step).
|
||||
|
||||
If you wish to generate the isolate files, pass `mb gen` the
|
||||
`--swarming-targets-file` command line argument; that arg should be a path
|
||||
to a file containing a list of ninja build targets to compute the runtime
|
||||
dependencies for (on Windows, use the ninja target name, not the file, so
|
||||
`base_unittests`, not `base_unittests.exe`).
|
||||
|
||||
MB will take this file, translate each build target to the matching GN
|
||||
label (e.g., `base_unittests` -> `//base:base_unittests`, write that list
|
||||
to a file called `runtime_deps` in the build directory, and pass that to
|
||||
`gn gen $BUILD ... --runtime-deps-list-file=$BUILD/runtime_deps`.
|
||||
|
||||
Once GN has computed the lists of runtime dependencies, MB will then
|
||||
look up the command line for each target (currently this is hard-coded
|
||||
in [mb.py](https://code.google.com/p/chromium/codesearch?q=mb.py#chromium/src/tools/mb/mb.py&q=mb.py%20GetIsolateCommand&sq=package:chromium&type=cs)), and write out the
|
||||
matching `.isolate` and `.isolated.gen.json` files.
|
||||
|
||||
## The `mb_config.pyl` config file
|
||||
|
||||
The `mb_config.pyl` config file is intended to enumerate all of the
|
||||
supported build configurations for Chromium. Generally speaking, you
|
||||
should never need to (or want to) build a configuration that isn't
|
||||
listed here, and so by using the configs in this file you can avoid
|
||||
having to juggle long lists of GYP_DEFINES and gn args by hand.
|
||||
|
||||
`mb_config.pyl` is structured as a file containing a single PYthon Literal
|
||||
expression: a dictionary with three main keys, `masters`, `configs` and
|
||||
`mixins`.
|
||||
|
||||
The `masters` key contains a nested series of dicts containing mappings
|
||||
of master -> builder -> config . This allows us to isolate the buildbot
|
||||
recipes from the actual details of the configs. The config should either
|
||||
be a single string value representing a key in the `configs` dictionary,
|
||||
or a list of strings, each of which is a key in the `configs` dictionary;
|
||||
the latter case is for builders that do multiple compiles with different
|
||||
arguments in a single build, and must *only* be used for such builders
|
||||
(where a --phase argument must be supplied in each lookup or gen call).
|
||||
|
||||
The `configs` key points to a dictionary of named build configurations.
|
||||
|
||||
There should be an key in this dict for every supported configuration
|
||||
of Chromium, meaning every configuration we have a bot for, and every
|
||||
configuration commonly used by develpers but that we may not have a bot
|
||||
for.
|
||||
|
||||
The value of each key is a list of "mixins" that will define what that
|
||||
build_config does. Each item in the list must be an entry in the dictionary
|
||||
value of the `mixins` key.
|
||||
|
||||
Each mixin value is itself a dictionary that contains one or more of the
|
||||
following keys:
|
||||
|
||||
* `gyp_crosscompile`: a boolean; if true, GYP_CROSSCOMPILE=1 is set in
|
||||
the environment and passed to GYP.
|
||||
* `gyp_defines`: a string containing a list of GYP_DEFINES.
|
||||
* `gn_args`: a string containing a list of values passed to gn --args.
|
||||
* `mixins`: a list of other mixins that should be included.
|
||||
* `type`: a string with either the value `gyp` or `gn`;
|
||||
setting this indicates which meta-build tool to use.
|
||||
|
||||
When `mb gen` or `mb analyze` executes, it takes a config name, looks it
|
||||
up in the 'configs' dict, and then does a left-to-right expansion of the
|
||||
mixins; gyp_defines and gn_args values are concatenated, and the type values
|
||||
override each other.
|
||||
|
||||
For example, if you had:
|
||||
|
||||
```
|
||||
{
|
||||
'configs`: {
|
||||
'linux_release_trybot': ['gyp_release', 'trybot'],
|
||||
'gn_shared_debug': None,
|
||||
}
|
||||
'mixins': {
|
||||
'bot': {
|
||||
'gyp_defines': 'use_goma=1 dcheck_always_on=0',
|
||||
'gn_args': 'use_goma=true dcheck_always_on=false',
|
||||
},
|
||||
'debug': {
|
||||
'gn_args': 'is_debug=true',
|
||||
},
|
||||
'gn': {'type': 'gn'},
|
||||
'gyp_release': {
|
||||
'mixins': ['release'],
|
||||
'type': 'gyp',
|
||||
},
|
||||
'release': {
|
||||
'gn_args': 'is_debug=false',
|
||||
}
|
||||
'shared': {
|
||||
'gn_args': 'is_component_build=true',
|
||||
'gyp_defines': 'component=shared_library',
|
||||
},
|
||||
'trybot': {
|
||||
'gyp_defines': 'dcheck_always_on=1',
|
||||
'gn_args': 'dcheck_always_on=true',
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
and you ran `mb gen -c linux_release_trybot //out/Release`, it would
|
||||
translate into a call to `gyp_chromium -G Release` with `GYP_DEFINES` set to
|
||||
`"use_goma=true dcheck_always_on=false dcheck_always_on=true"`.
|
||||
|
||||
(From that you can see that mb is intentionally dumb and does not
|
||||
attempt to de-dup the flags, it lets gyp do that).
|
||||
|
||||
## Debugging MB
|
||||
|
||||
By design, MB should be simple enough that very little can go wrong.
|
||||
|
||||
The most obvious issue is that you might see different commands being
|
||||
run than you expect; running `'mb -v'` will print what it's doing and
|
||||
run the commands; `'mb -n'` will print what it will do but *not* run
|
||||
the commands.
|
||||
|
||||
If you hit weirder things than that, add some print statements to the
|
||||
python script, send a question to gn-dev@chromium.org, or
|
||||
[file a bug](https://crbug.com/new) with the label
|
||||
'mb' and cc: dpranke@chromium.org.
|
||||
|
||||
|
8
tools/mb/mb
Executable file
8
tools/mb/mb
Executable file
@ -0,0 +1,8 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright 2015 The Chromium Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
base_dir=$(dirname "$0")
|
||||
|
||||
PYTHONDONTWRITEBYTECODE=1 exec python "$base_dir/mb.py" "$@"
|
6
tools/mb/mb.bat
Executable file
6
tools/mb/mb.bat
Executable file
@ -0,0 +1,6 @@
|
||||
@echo off
|
||||
setlocal
|
||||
:: This is required with cygwin only.
|
||||
PATH=%~dp0;%PATH%
|
||||
set PYTHONDONTWRITEBYTECODE=1
|
||||
call python "%~dp0mb.py" %*
|
1500
tools/mb/mb.py
Executable file
1500
tools/mb/mb.py
Executable file
File diff suppressed because it is too large
Load Diff
572
tools/mb/mb_unittest.py
Executable file
572
tools/mb/mb_unittest.py
Executable file
@ -0,0 +1,572 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright 2016 the V8 project authors. All rights reserved.
|
||||
# Copyright 2015 The Chromium Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""Tests for mb.py."""
|
||||
|
||||
import json
|
||||
import StringIO
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
import mb
|
||||
|
||||
|
||||
class FakeMBW(mb.MetaBuildWrapper):
|
||||
def __init__(self, win32=False):
|
||||
super(FakeMBW, self).__init__()
|
||||
|
||||
# Override vars for test portability.
|
||||
if win32:
|
||||
self.chromium_src_dir = 'c:\\fake_src'
|
||||
self.default_config = 'c:\\fake_src\\tools\\mb\\mb_config.pyl'
|
||||
self.platform = 'win32'
|
||||
self.executable = 'c:\\python\\python.exe'
|
||||
self.sep = '\\'
|
||||
else:
|
||||
self.chromium_src_dir = '/fake_src'
|
||||
self.default_config = '/fake_src/tools/mb/mb_config.pyl'
|
||||
self.executable = '/usr/bin/python'
|
||||
self.platform = 'linux2'
|
||||
self.sep = '/'
|
||||
|
||||
self.files = {}
|
||||
self.calls = []
|
||||
self.cmds = []
|
||||
self.cross_compile = None
|
||||
self.out = ''
|
||||
self.err = ''
|
||||
self.rmdirs = []
|
||||
|
||||
def ExpandUser(self, path):
|
||||
return '$HOME/%s' % path
|
||||
|
||||
def Exists(self, path):
|
||||
return self.files.get(path) is not None
|
||||
|
||||
def MaybeMakeDirectory(self, path):
|
||||
self.files[path] = True
|
||||
|
||||
def PathJoin(self, *comps):
|
||||
return self.sep.join(comps)
|
||||
|
||||
def ReadFile(self, path):
|
||||
return self.files[path]
|
||||
|
||||
def WriteFile(self, path, contents, force_verbose=False):
|
||||
if self.args.dryrun or self.args.verbose or force_verbose:
|
||||
self.Print('\nWriting """\\\n%s""" to %s.\n' % (contents, path))
|
||||
self.files[path] = contents
|
||||
|
||||
def Call(self, cmd, env=None, buffer_output=True):
|
||||
if env:
|
||||
self.cross_compile = env.get('GYP_CROSSCOMPILE')
|
||||
self.calls.append(cmd)
|
||||
if self.cmds:
|
||||
return self.cmds.pop(0)
|
||||
return 0, '', ''
|
||||
|
||||
def Print(self, *args, **kwargs):
|
||||
sep = kwargs.get('sep', ' ')
|
||||
end = kwargs.get('end', '\n')
|
||||
f = kwargs.get('file', sys.stdout)
|
||||
if f == sys.stderr:
|
||||
self.err += sep.join(args) + end
|
||||
else:
|
||||
self.out += sep.join(args) + end
|
||||
|
||||
def TempFile(self, mode='w'):
|
||||
return FakeFile(self.files)
|
||||
|
||||
def RemoveFile(self, path):
|
||||
del self.files[path]
|
||||
|
||||
def RemoveDirectory(self, path):
|
||||
self.rmdirs.append(path)
|
||||
files_to_delete = [f for f in self.files if f.startswith(path)]
|
||||
for f in files_to_delete:
|
||||
self.files[f] = None
|
||||
|
||||
|
||||
class FakeFile(object):
|
||||
def __init__(self, files):
|
||||
self.name = '/tmp/file'
|
||||
self.buf = ''
|
||||
self.files = files
|
||||
|
||||
def write(self, contents):
|
||||
self.buf += contents
|
||||
|
||||
def close(self):
|
||||
self.files[self.name] = self.buf
|
||||
|
||||
|
||||
TEST_CONFIG = """\
|
||||
{
|
||||
'masters': {
|
||||
'chromium': {},
|
||||
'fake_master': {
|
||||
'fake_builder': 'gyp_rel_bot',
|
||||
'fake_gn_builder': 'gn_rel_bot',
|
||||
'fake_gyp_crosscompile_builder': 'gyp_crosscompile',
|
||||
'fake_gn_debug_builder': 'gn_debug_goma',
|
||||
'fake_gyp_builder': 'gyp_debug',
|
||||
'fake_gn_args_bot': '//build/args/bots/fake_master/fake_gn_args_bot.gn',
|
||||
'fake_multi_phase': ['gn_phase_1', 'gn_phase_2'],
|
||||
},
|
||||
},
|
||||
'configs': {
|
||||
'gyp_rel_bot': ['gyp', 'rel', 'goma'],
|
||||
'gn_debug_goma': ['gn', 'debug', 'goma'],
|
||||
'gyp_debug': ['gyp', 'debug', 'fake_feature1'],
|
||||
'gn_rel_bot': ['gn', 'rel', 'goma'],
|
||||
'gyp_crosscompile': ['gyp', 'crosscompile'],
|
||||
'gn_phase_1': ['gn', 'phase_1'],
|
||||
'gn_phase_2': ['gn', 'phase_2'],
|
||||
},
|
||||
'mixins': {
|
||||
'crosscompile': {
|
||||
'gyp_crosscompile': True,
|
||||
},
|
||||
'fake_feature1': {
|
||||
'gn_args': 'enable_doom_melon=true',
|
||||
'gyp_defines': 'doom_melon=1',
|
||||
},
|
||||
'gyp': {'type': 'gyp'},
|
||||
'gn': {'type': 'gn'},
|
||||
'goma': {
|
||||
'gn_args': 'use_goma=true',
|
||||
'gyp_defines': 'goma=1',
|
||||
},
|
||||
'phase_1': {
|
||||
'gn_args': 'phase=1',
|
||||
'gyp_args': 'phase=1',
|
||||
},
|
||||
'phase_2': {
|
||||
'gn_args': 'phase=2',
|
||||
'gyp_args': 'phase=2',
|
||||
},
|
||||
'rel': {
|
||||
'gn_args': 'is_debug=false',
|
||||
},
|
||||
'debug': {
|
||||
'gn_args': 'is_debug=true',
|
||||
},
|
||||
},
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
TEST_BAD_CONFIG = """\
|
||||
{
|
||||
'configs': {
|
||||
'gn_rel_bot_1': ['gn', 'rel', 'chrome_with_codecs'],
|
||||
'gn_rel_bot_2': ['gn', 'rel', 'bad_nested_config'],
|
||||
},
|
||||
'masters': {
|
||||
'chromium': {
|
||||
'a': 'gn_rel_bot_1',
|
||||
'b': 'gn_rel_bot_2',
|
||||
},
|
||||
},
|
||||
'mixins': {
|
||||
'gn': {'type': 'gn'},
|
||||
'chrome_with_codecs': {
|
||||
'gn_args': 'proprietary_codecs=true',
|
||||
},
|
||||
'bad_nested_config': {
|
||||
'mixins': ['chrome_with_codecs'],
|
||||
},
|
||||
'rel': {
|
||||
'gn_args': 'is_debug=false',
|
||||
},
|
||||
},
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
GYP_HACKS_CONFIG = """\
|
||||
{
|
||||
'masters': {
|
||||
'chromium': {},
|
||||
'fake_master': {
|
||||
'fake_builder': 'fake_config',
|
||||
},
|
||||
},
|
||||
'configs': {
|
||||
'fake_config': ['fake_mixin'],
|
||||
},
|
||||
'mixins': {
|
||||
'fake_mixin': {
|
||||
'type': 'gyp',
|
||||
'gn_args': '',
|
||||
'gyp_defines':
|
||||
('foo=bar llvm_force_head_revision=1 '
|
||||
'gyp_link_concurrency=1 baz=1'),
|
||||
},
|
||||
},
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
class UnitTest(unittest.TestCase):
|
||||
def fake_mbw(self, files=None, win32=False):
|
||||
mbw = FakeMBW(win32=win32)
|
||||
mbw.files.setdefault(mbw.default_config, TEST_CONFIG)
|
||||
mbw.files.setdefault(
|
||||
mbw.ToAbsPath('//build/args/bots/fake_master/fake_gn_args_bot.gn'),
|
||||
'is_debug = false\n')
|
||||
if files:
|
||||
for path, contents in files.items():
|
||||
mbw.files[path] = contents
|
||||
return mbw
|
||||
|
||||
def check(self, args, mbw=None, files=None, out=None, err=None, ret=None):
|
||||
if not mbw:
|
||||
mbw = self.fake_mbw(files)
|
||||
|
||||
actual_ret = mbw.Main(args)
|
||||
|
||||
self.assertEqual(actual_ret, ret)
|
||||
if out is not None:
|
||||
self.assertEqual(mbw.out, out)
|
||||
if err is not None:
|
||||
self.assertEqual(mbw.err, err)
|
||||
return mbw
|
||||
|
||||
def test_clobber(self):
|
||||
files = {
|
||||
'/fake_src/out/Debug': None,
|
||||
'/fake_src/out/Debug/mb_type': None,
|
||||
}
|
||||
mbw = self.fake_mbw(files)
|
||||
|
||||
# The first time we run this, the build dir doesn't exist, so no clobber.
|
||||
self.check(['gen', '-c', 'gn_debug_goma', '//out/Debug'], mbw=mbw, ret=0)
|
||||
self.assertEqual(mbw.rmdirs, [])
|
||||
self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gn')
|
||||
|
||||
# The second time we run this, the build dir exists and matches, so no
|
||||
# clobber.
|
||||
self.check(['gen', '-c', 'gn_debug_goma', '//out/Debug'], mbw=mbw, ret=0)
|
||||
self.assertEqual(mbw.rmdirs, [])
|
||||
self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gn')
|
||||
|
||||
# Now we switch build types; this should result in a clobber.
|
||||
self.check(['gen', '-c', 'gyp_debug', '//out/Debug'], mbw=mbw, ret=0)
|
||||
self.assertEqual(mbw.rmdirs, ['/fake_src/out/Debug'])
|
||||
self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gyp')
|
||||
|
||||
# Now we delete mb_type; this checks the case where the build dir
|
||||
# exists but wasn't populated by mb; this should also result in a clobber.
|
||||
del mbw.files['/fake_src/out/Debug/mb_type']
|
||||
self.check(['gen', '-c', 'gyp_debug', '//out/Debug'], mbw=mbw, ret=0)
|
||||
self.assertEqual(mbw.rmdirs,
|
||||
['/fake_src/out/Debug', '/fake_src/out/Debug'])
|
||||
self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gyp')
|
||||
|
||||
def test_gn_analyze(self):
|
||||
files = {'/tmp/in.json': """{\
|
||||
"files": ["foo/foo_unittest.cc"],
|
||||
"test_targets": ["foo_unittests", "bar_unittests"],
|
||||
"additional_compile_targets": []
|
||||
}"""}
|
||||
|
||||
mbw = self.fake_mbw(files)
|
||||
mbw.Call = lambda cmd, env=None, buffer_output=True: (
|
||||
0, 'out/Default/foo_unittests\n', '')
|
||||
|
||||
self.check(['analyze', '-c', 'gn_debug_goma', '//out/Default',
|
||||
'/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
|
||||
out = json.loads(mbw.files['/tmp/out.json'])
|
||||
self.assertEqual(out, {
|
||||
'status': 'Found dependency',
|
||||
'compile_targets': ['foo_unittests'],
|
||||
'test_targets': ['foo_unittests']
|
||||
})
|
||||
|
||||
def test_gn_analyze_fails(self):
|
||||
files = {'/tmp/in.json': """{\
|
||||
"files": ["foo/foo_unittest.cc"],
|
||||
"test_targets": ["foo_unittests", "bar_unittests"],
|
||||
"additional_compile_targets": []
|
||||
}"""}
|
||||
|
||||
mbw = self.fake_mbw(files)
|
||||
mbw.Call = lambda cmd, env=None, buffer_output=True: (1, '', '')
|
||||
|
||||
self.check(['analyze', '-c', 'gn_debug_goma', '//out/Default',
|
||||
'/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=1)
|
||||
|
||||
def test_gn_analyze_all(self):
|
||||
files = {'/tmp/in.json': """{\
|
||||
"files": ["foo/foo_unittest.cc"],
|
||||
"test_targets": ["bar_unittests"],
|
||||
"additional_compile_targets": ["all"]
|
||||
}"""}
|
||||
mbw = self.fake_mbw(files)
|
||||
mbw.Call = lambda cmd, env=None, buffer_output=True: (
|
||||
0, 'out/Default/foo_unittests\n', '')
|
||||
self.check(['analyze', '-c', 'gn_debug_goma', '//out/Default',
|
||||
'/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
|
||||
out = json.loads(mbw.files['/tmp/out.json'])
|
||||
self.assertEqual(out, {
|
||||
'status': 'Found dependency (all)',
|
||||
'compile_targets': ['all', 'bar_unittests'],
|
||||
'test_targets': ['bar_unittests'],
|
||||
})
|
||||
|
||||
def test_gn_analyze_missing_file(self):
|
||||
files = {'/tmp/in.json': """{\
|
||||
"files": ["foo/foo_unittest.cc"],
|
||||
"test_targets": ["bar_unittests"],
|
||||
"additional_compile_targets": []
|
||||
}"""}
|
||||
mbw = self.fake_mbw(files)
|
||||
mbw.cmds = [
|
||||
(0, '', ''),
|
||||
(1, 'The input matches no targets, configs, or files\n', ''),
|
||||
(1, 'The input matches no targets, configs, or files\n', ''),
|
||||
]
|
||||
|
||||
self.check(['analyze', '-c', 'gn_debug_goma', '//out/Default',
|
||||
'/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
|
||||
out = json.loads(mbw.files['/tmp/out.json'])
|
||||
self.assertEqual(out, {
|
||||
'status': 'No dependency',
|
||||
'compile_targets': [],
|
||||
'test_targets': [],
|
||||
})
|
||||
|
||||
def test_gn_gen(self):
|
||||
mbw = self.fake_mbw()
|
||||
self.check(['gen', '-c', 'gn_debug_goma', '//out/Default', '-g', '/goma'],
|
||||
mbw=mbw, ret=0)
|
||||
self.assertMultiLineEqual(mbw.files['/fake_src/out/Default/args.gn'],
|
||||
('goma_dir = "/goma"\n'
|
||||
'is_debug = true\n'
|
||||
'use_goma = true\n'))
|
||||
|
||||
# Make sure we log both what is written to args.gn and the command line.
|
||||
self.assertIn('Writing """', mbw.out)
|
||||
self.assertIn('/fake_src/buildtools/linux64/gn gen //out/Default --check',
|
||||
mbw.out)
|
||||
|
||||
mbw = self.fake_mbw(win32=True)
|
||||
self.check(['gen', '-c', 'gn_debug_goma', '-g', 'c:\\goma', '//out/Debug'],
|
||||
mbw=mbw, ret=0)
|
||||
self.assertMultiLineEqual(mbw.files['c:\\fake_src\\out\\Debug\\args.gn'],
|
||||
('goma_dir = "c:\\\\goma"\n'
|
||||
'is_debug = true\n'
|
||||
'use_goma = true\n'))
|
||||
self.assertIn('c:\\fake_src\\buildtools\\win\\gn.exe gen //out/Debug '
|
||||
'--check\n', mbw.out)
|
||||
|
||||
mbw = self.fake_mbw()
|
||||
self.check(['gen', '-m', 'fake_master', '-b', 'fake_gn_args_bot',
|
||||
'//out/Debug'],
|
||||
mbw=mbw, ret=0)
|
||||
self.assertEqual(
|
||||
mbw.files['/fake_src/out/Debug/args.gn'],
|
||||
'import("//build/args/bots/fake_master/fake_gn_args_bot.gn")\n')
|
||||
|
||||
|
||||
def test_gn_gen_fails(self):
|
||||
mbw = self.fake_mbw()
|
||||
mbw.Call = lambda cmd, env=None, buffer_output=True: (1, '', '')
|
||||
self.check(['gen', '-c', 'gn_debug_goma', '//out/Default'], mbw=mbw, ret=1)
|
||||
|
||||
def test_gn_gen_swarming(self):
|
||||
files = {
|
||||
'/tmp/swarming_targets': 'base_unittests\n',
|
||||
'/fake_src/testing/buildbot/gn_isolate_map.pyl': (
|
||||
"{'base_unittests': {"
|
||||
" 'label': '//base:base_unittests',"
|
||||
" 'type': 'raw',"
|
||||
" 'args': [],"
|
||||
"}}\n"
|
||||
),
|
||||
'/fake_src/out/Default/base_unittests.runtime_deps': (
|
||||
"base_unittests\n"
|
||||
),
|
||||
}
|
||||
mbw = self.fake_mbw(files)
|
||||
self.check(['gen',
|
||||
'-c', 'gn_debug_goma',
|
||||
'--swarming-targets-file', '/tmp/swarming_targets',
|
||||
'//out/Default'], mbw=mbw, ret=0)
|
||||
self.assertIn('/fake_src/out/Default/base_unittests.isolate',
|
||||
mbw.files)
|
||||
self.assertIn('/fake_src/out/Default/base_unittests.isolated.gen.json',
|
||||
mbw.files)
|
||||
|
||||
def test_gn_isolate(self):
|
||||
files = {
|
||||
'/fake_src/out/Default/toolchain.ninja': "",
|
||||
'/fake_src/testing/buildbot/gn_isolate_map.pyl': (
|
||||
"{'base_unittests': {"
|
||||
" 'label': '//base:base_unittests',"
|
||||
" 'type': 'raw',"
|
||||
" 'args': [],"
|
||||
"}}\n"
|
||||
),
|
||||
'/fake_src/out/Default/base_unittests.runtime_deps': (
|
||||
"base_unittests\n"
|
||||
),
|
||||
}
|
||||
self.check(['isolate', '-c', 'gn_debug_goma', '//out/Default',
|
||||
'base_unittests'], files=files, ret=0)
|
||||
|
||||
# test running isolate on an existing build_dir
|
||||
files['/fake_src/out/Default/args.gn'] = 'is_debug = True\n'
|
||||
self.check(['isolate', '//out/Default', 'base_unittests'],
|
||||
files=files, ret=0)
|
||||
|
||||
files['/fake_src/out/Default/mb_type'] = 'gn\n'
|
||||
self.check(['isolate', '//out/Default', 'base_unittests'],
|
||||
files=files, ret=0)
|
||||
|
||||
def test_gn_run(self):
|
||||
files = {
|
||||
'/fake_src/testing/buildbot/gn_isolate_map.pyl': (
|
||||
"{'base_unittests': {"
|
||||
" 'label': '//base:base_unittests',"
|
||||
" 'type': 'raw',"
|
||||
" 'args': [],"
|
||||
"}}\n"
|
||||
),
|
||||
'/fake_src/out/Default/base_unittests.runtime_deps': (
|
||||
"base_unittests\n"
|
||||
),
|
||||
}
|
||||
self.check(['run', '-c', 'gn_debug_goma', '//out/Default',
|
||||
'base_unittests'], files=files, ret=0)
|
||||
|
||||
def test_gn_lookup(self):
|
||||
self.check(['lookup', '-c', 'gn_debug_goma'], ret=0)
|
||||
|
||||
def test_gn_lookup_goma_dir_expansion(self):
|
||||
self.check(['lookup', '-c', 'gn_rel_bot', '-g', '/foo'], ret=0,
|
||||
out=('\n'
|
||||
'Writing """\\\n'
|
||||
'goma_dir = "/foo"\n'
|
||||
'is_debug = false\n'
|
||||
'use_goma = true\n'
|
||||
'""" to _path_/args.gn.\n\n'
|
||||
'/fake_src/buildtools/linux64/gn gen _path_\n'))
|
||||
|
||||
def test_gyp_analyze(self):
|
||||
mbw = self.check(['analyze', '-c', 'gyp_rel_bot', '//out/Release',
|
||||
'/tmp/in.json', '/tmp/out.json'], ret=0)
|
||||
self.assertIn('analyzer', mbw.calls[0])
|
||||
|
||||
def test_gyp_crosscompile(self):
|
||||
mbw = self.fake_mbw()
|
||||
self.check(['gen', '-c', 'gyp_crosscompile', '//out/Release'],
|
||||
mbw=mbw, ret=0)
|
||||
self.assertTrue(mbw.cross_compile)
|
||||
|
||||
def test_gyp_gen(self):
|
||||
self.check(['gen', '-c', 'gyp_rel_bot', '-g', '/goma', '//out/Release'],
|
||||
ret=0,
|
||||
out=("GYP_DEFINES='goma=1 gomadir=/goma'\n"
|
||||
"python build/gyp_chromium -G output_dir=out\n"))
|
||||
|
||||
mbw = self.fake_mbw(win32=True)
|
||||
self.check(['gen', '-c', 'gyp_rel_bot', '-g', 'c:\\goma', '//out/Release'],
|
||||
mbw=mbw, ret=0,
|
||||
out=("set GYP_DEFINES=goma=1 gomadir='c:\\goma'\n"
|
||||
"python build\\gyp_chromium -G output_dir=out\n"))
|
||||
|
||||
def test_gyp_gen_fails(self):
|
||||
mbw = self.fake_mbw()
|
||||
mbw.Call = lambda cmd, env=None, buffer_output=True: (1, '', '')
|
||||
self.check(['gen', '-c', 'gyp_rel_bot', '//out/Release'], mbw=mbw, ret=1)
|
||||
|
||||
def test_gyp_lookup_goma_dir_expansion(self):
|
||||
self.check(['lookup', '-c', 'gyp_rel_bot', '-g', '/foo'], ret=0,
|
||||
out=("GYP_DEFINES='goma=1 gomadir=/foo'\n"
|
||||
"python build/gyp_chromium -G output_dir=_path_\n"))
|
||||
|
||||
def test_help(self):
|
||||
orig_stdout = sys.stdout
|
||||
try:
|
||||
sys.stdout = StringIO.StringIO()
|
||||
self.assertRaises(SystemExit, self.check, ['-h'])
|
||||
self.assertRaises(SystemExit, self.check, ['help'])
|
||||
self.assertRaises(SystemExit, self.check, ['help', 'gen'])
|
||||
finally:
|
||||
sys.stdout = orig_stdout
|
||||
|
||||
def test_multiple_phases(self):
|
||||
# Check that not passing a --phase to a multi-phase builder fails.
|
||||
mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase'],
|
||||
ret=1)
|
||||
self.assertIn('Must specify a build --phase', mbw.out)
|
||||
|
||||
# Check that passing a --phase to a single-phase builder fails.
|
||||
mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_gn_builder',
|
||||
'--phase', '1'],
|
||||
ret=1)
|
||||
self.assertIn('Must not specify a build --phase', mbw.out)
|
||||
|
||||
# Check different ranges; 0 and 3 are out of bounds, 1 and 2 should work.
|
||||
mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase',
|
||||
'--phase', '0'], ret=1)
|
||||
self.assertIn('Phase 0 out of bounds', mbw.out)
|
||||
|
||||
mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase',
|
||||
'--phase', '1'], ret=0)
|
||||
self.assertIn('phase = 1', mbw.out)
|
||||
|
||||
mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase',
|
||||
'--phase', '2'], ret=0)
|
||||
self.assertIn('phase = 2', mbw.out)
|
||||
|
||||
mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase',
|
||||
'--phase', '3'], ret=1)
|
||||
self.assertIn('Phase 3 out of bounds', mbw.out)
|
||||
|
||||
def test_validate(self):
|
||||
mbw = self.fake_mbw()
|
||||
self.check(['validate'], mbw=mbw, ret=0)
|
||||
|
||||
def test_gyp_env_hacks(self):
|
||||
mbw = self.fake_mbw()
|
||||
mbw.files[mbw.default_config] = GYP_HACKS_CONFIG
|
||||
self.check(['lookup', '-c', 'fake_config'], mbw=mbw,
|
||||
ret=0,
|
||||
out=("GYP_DEFINES='foo=bar baz=1'\n"
|
||||
"GYP_LINK_CONCURRENCY=1\n"
|
||||
"LLVM_FORCE_HEAD_REVISION=1\n"
|
||||
"python build/gyp_chromium -G output_dir=_path_\n"))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
||||
def test_validate(self):
|
||||
mbw = self.fake_mbw()
|
||||
self.check(['validate'], mbw=mbw, ret=0)
|
||||
|
||||
def test_bad_validate(self):
|
||||
mbw = self.fake_mbw()
|
||||
mbw.files[mbw.default_config] = TEST_BAD_CONFIG
|
||||
self.check(['validate'], mbw=mbw, ret=1)
|
||||
|
||||
def test_gyp_env_hacks(self):
|
||||
mbw = self.fake_mbw()
|
||||
mbw.files[mbw.default_config] = GYP_HACKS_CONFIG
|
||||
self.check(['lookup', '-c', 'fake_config'], mbw=mbw,
|
||||
ret=0,
|
||||
out=("GYP_DEFINES='foo=bar baz=1'\n"
|
||||
"GYP_LINK_CONCURRENCY=1\n"
|
||||
"LLVM_FORCE_HEAD_REVISION=1\n"
|
||||
"python build/gyp_chromium -G output_dir=_path_\n"))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
Loading…
Reference in New Issue
Block a user