Revert "Preparing v8 to use with python3 /test"

This reverts commit f8962ae1a2.

Reason for revert: breaks Arm bots, e.g. https://ci.chromium.org/p/v8/builders/luci.v8.ci/V8%20Arm/9655, task: https://chromium-swarm.appspot.com/task?d=true&id=431dfa503db16d10

Original change's description:
> Preparing v8 to use with python3 /test
> 
> There are now less that 400 days until the end of life
> of Python 2(aka _legacy_ Python) https://pythonclock.org/ .
> The code compatibility check for python2 and python3
> used the following tools: futurize, flake8
> You can see the reports here: https://travis-ci.com/bmsdave/v8/builds
> 
> This CL was uploaded by git cl split.
> 
> Bug: v8:8594
> Change-Id: Idbf467daf629a4e808345a6a88036c2a3f259138
> Reviewed-on: https://chromium-review.googlesource.com/c/1470121
> Commit-Queue: Michael Achenbach <machenbach@chromium.org>
> Reviewed-by: Michael Achenbach <machenbach@chromium.org>
> Reviewed-by: Sergiy Belozorov <sergiyb@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#59679}

TBR=machenbach@chromium.org,sergiyb@chromium.org,herhut@chromium.org,bmsdave@gmail.com

# Not skipping CQ checks because original CL landed > 1 day ago.

Bug: v8:8594
Change-Id: I17a0a7b203fa2c0ab0f965240ee1415b7513e1cf
Reviewed-on: https://chromium-review.googlesource.com/c/1478692
Reviewed-by: Sergiy Belozorov <sergiyb@chromium.org>
Reviewed-by: Michael Achenbach <machenbach@chromium.org>
Commit-Queue: Sergiy Belozorov <sergiyb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59725}
This commit is contained in:
Sergiy Belozorov 2019-02-20 09:48:12 +00:00 committed by Commit Bot
parent 18772548b6
commit 6956c02c8e
9 changed files with 43 additions and 67 deletions

View File

@ -8,9 +8,6 @@ Local benchmark runner.
The -c option is mandatory.
'''
# for py2/py3 compatibility
from __future__ import print_function
import math
from optparse import OptionParser
import os
@ -99,7 +96,7 @@ class BenchmarkSuite(object):
elif self.name in self.kGeometricScoreSuites:
self.ComputeScoreV8Octane(self.name)
else:
print("Don't know how to compute score for suite: '%s'" % self.name)
print "Don't know how to compute score for suite: '%s'" % self.name
def IsBetterThan(self, other):
if self.name in self.kClassicScoreSuites:
@ -107,7 +104,7 @@ class BenchmarkSuite(object):
elif self.name in self.kGeometricScoreSuites:
return self.score > other.score
else:
print("Don't know how to compare score for suite: '%s'" % self.name)
print "Don't know how to compare score for suite: '%s'" % self.name
class BenchmarkRunner(object):
@ -143,7 +140,7 @@ class BenchmarkRunner(object):
outfile = "%s/out.%d.txt" % (self.outdir, i)
if os.path.exists(outfile) and not self.opts.force:
continue
print("run #%d" % i)
print "run #%d" % i
cmdline = "%s > %s" % (self.opts.command, outfile)
subprocess.call(cmdline, shell=True)
time.sleep(self.opts.sleep)
@ -182,11 +179,11 @@ class BenchmarkRunner(object):
suite.ProcessResults(self.opts)
suite.ComputeScore()
print(("%s,%.1f,%.2f,%d " %
(suite.name, suite.score, suite.sigma, suite.num)), end=' ')
print ("%s,%.1f,%.2f,%d " %
(suite.name, suite.score, suite.sigma, suite.num)),
if self.opts.verbose:
print("")
print("")
print ""
print ""
if __name__ == '__main__':
@ -209,14 +206,14 @@ if __name__ == '__main__':
opts.sleep = int(opts.sleep)
if not opts.command:
print("You must specify the command to run (-c). Aborting.")
print "You must specify the command to run (-c). Aborting."
sys.exit(1)
cachedir = os.path.abspath(os.getcwd())
if not opts.cachedir:
opts.cachedir = cachedir
if not os.path.exists(opts.cachedir):
print("Directory " + opts.cachedir + " is not valid. Aborting.")
print "Directory " + opts.cachedir + " is not valid. Aborting."
sys.exit(1)
br = BenchmarkRunner(args, os.getcwd(), opts)

View File

@ -10,9 +10,6 @@ runner directly into this script or specify the results file with
the -f option.
'''
# for py2/py3 compatibility
from __future__ import print_function
import csv
import math
from optparse import OptionParser

View File

@ -31,9 +31,6 @@ You can run from any place:
../../somewhere-strange/csuite.py sunspider compare ./d8-better
'''
# for py2/py3 compatibility
from __future__ import print_function
import os
from optparse import OptionParser
import subprocess
@ -50,48 +47,48 @@ if __name__ == '__main__':
(opts, args) = parser.parse_args()
if len(args) < 3:
print('not enough arguments')
print 'not enough arguments'
sys.exit(1)
suite = args[0]
mode = args[1]
if suite not in ['octane', 'sunspider', 'kraken']:
print('Suite must be octane, sunspider or kraken. Aborting.')
print 'Suite must be octane, sunspider or kraken. Aborting.'
sys.exit(1)
if mode != 'baseline' and mode != 'compare':
print('mode must be baseline or compare. Aborting.')
print 'mode must be baseline or compare. Aborting.'
sys.exit(1)
# Set up paths.
d8_path = os.path.abspath(args[2])
if not os.path.exists(d8_path):
print(d8_path + " is not valid.")
print d8_path + " is not valid."
sys.exit(1)
csuite_path = os.path.dirname(os.path.abspath(__file__))
if not os.path.exists(csuite_path):
print("The csuite directory is invalid.")
print "The csuite directory is invalid."
sys.exit(1)
benchmark_py_path = os.path.join(csuite_path, "benchmark.py")
if not os.path.exists(benchmark_py_path):
print("Unable to find benchmark.py in " + csuite_path \
+ ". Aborting.")
print "Unable to find benchmark.py in " + output_path_base \
+ ". Aborting."
sys.exit(1)
compare_baseline_py_path = os.path.join(csuite_path,
"compare-baseline.py")
if not os.path.exists(compare_baseline_py_path):
print("Unable to find compare-baseline.py in " + csuite_path \
+ ". Aborting.")
print "Unable to find compare-baseline.py in " + output_path_base \
+ ". Aborting."
sys.exit(1)
benchmark_path = os.path.abspath(os.path.join(csuite_path, "../data"))
if not os.path.exists(benchmark_path):
print("I can't find the benchmark data directory. Aborting.")
print "I can't find the benchmark data directory. Aborting."
sys.exit(1)
# Gather the remaining arguments into a string of extra args for d8.
@ -114,12 +111,12 @@ if __name__ == '__main__':
if opts.runs:
if (float(opts.runs) / runs) < 0.6:
print("Normally, %s requires %d runs to get stable results." \
% (suite, runs))
print "Normally, %s requires %d runs to get stable results." \
% (suite, runs)
runs = int(opts.runs)
if opts.verbose:
print("Running and averaging %s %d times." % (suite, runs))
print "Running and averaging %s %d times." % (suite, runs)
# Ensure output directory is setup
output_path_base = os.path.abspath(os.getcwd())
@ -127,16 +124,16 @@ if __name__ == '__main__':
output_file = os.path.join(output_path, "master")
if not os.path.exists(output_path):
if opts.verbose:
print("Creating directory %s." % output_path)
print "Creating directory %s." % output_path
os.mkdir(output_path)
if opts.verbose:
print("Working directory for runs is %s." % suite_path)
print "Working directory for runs is %s." % suite_path
inner_command = " -c \"%s --expose-gc %s %s \"" \
% (d8_path, extra_args, cmd)
if opts.verbose:
print("calling d8 like so: %s." % inner_command)
print "calling d8 like so: %s." % inner_command
cmdline_base = "python %s %s -fv -r %d -d %s" \
% (benchmark_py_path, inner_command, runs, output_path_base)
@ -148,10 +145,10 @@ if __name__ == '__main__':
% (cmdline_base, compare_baseline_py_path, output_file)
if opts.verbose:
print("Spawning subprocess: %s." % cmdline)
print "Spawning subprocess: %s." % cmdline
return_code = subprocess.call(cmdline, shell=True, cwd=suite_path)
if return_code < 0:
print("Error return code: %d." % return_code)
print "Error return code: %d." % return_code
if mode == "baseline":
print("Wrote %s." % output_file)
print("Run %s again with compare mode to see results." % suite)
print "Wrote %s." % output_file
print "Run %s again with compare mode to see results." % suite

View File

@ -25,9 +25,6 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# for py2/py3 compatibility
from __future__ import print_function
import os
import shutil
@ -51,9 +48,9 @@ class TestLoader(testsuite.TestLoader):
output = cmd.execute()
# TODO make errors visible (see duplicated code in 'unittests')
if output.exit_code != 0:
print(cmd)
print(output.stdout)
print(output.stderr)
print cmd
print output.stdout
print output.stderr
return []
return sorted(output.stdout.strip().split())

View File

@ -35,11 +35,6 @@ from testrunner.local import testsuite
from testrunner.objects import testcase
from testrunner.outproc import base as outproc
try:
basestring # Python 2
except NameError: # Python 3
basestring = str
FILES_PATTERN = re.compile(r"//\s+Files:(.*)")
ENV_PATTERN = re.compile(r"//\s+Environment Variables:(.*)")
SELF_SCRIPT_PATTERN = re.compile(r"//\s+Env: TEST_FILE_NAME")

View File

@ -25,8 +25,6 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# for py2/py3 compatibility
from functools import reduce
import os
@ -93,7 +91,7 @@ class TestCase(testcase.D8TestCase):
files = [os.path.join(self.suite.root, "mozilla-shell-emulation.js")]
testfilename = self.path + ".js"
testfilepath = testfilename.split(os.path.sep)
for i in range(len(testfilepath)):
for i in xrange(len(testfilepath)):
script = os.path.join(self.suite.test_root,
reduce(os.path.join, testfilepath[:i], ""),
"shell.js")

View File

@ -25,6 +25,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from testrunner.local import testsuite
@ -65,8 +66,7 @@ class TestSuite(testsuite.TestSuite):
testsource = testsource.replace("$" + key, replacement[key]);
Test(testname, testsource, expectation)
return MkTest
with open(pathname) as in_file:
exec(in_file.read(), {"Test": Test, "Template": Template})
execfile(pathname, {"Test": Test, "Template": Template})
def ListTests(self):
result = []

View File

@ -25,8 +25,6 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# for py2/py3 compatibility
from __future__ import print_function
import imp
import itertools

View File

@ -2,9 +2,6 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# for py2/py3 compatibility
from __future__ import print_function
import os
from testrunner.local import command
@ -26,7 +23,7 @@ class TestLoader(testsuite.TestLoader):
shell += ".exe"
output = None
for i in range(3): # Try 3 times in case of errors.
for i in xrange(3): # Try 3 times in case of errors.
cmd = command.Command(
cmd_prefix=self.test_config.command_prefix,
shell=shell,
@ -35,13 +32,13 @@ class TestLoader(testsuite.TestLoader):
if output.exit_code == 0:
break
print("Test executable failed to list the tests (try %d).\n\nCmd:" % i)
print(cmd)
print("\nStdout:")
print(output.stdout)
print("\nStderr:")
print(output.stderr)
print("\nExit code: %d" % output.exit_code)
print "Test executable failed to list the tests (try %d).\n\nCmd:" % i
print cmd
print "\nStdout:"
print output.stdout
print "\nStderr:"
print output.stderr
print "\nExit code: %d" % output.exit_code
else:
raise Exception("Test executable failed to list the tests.")