Fix benchmark by making sure we use Python 3 (#9170)
The benchmark runs have been failing since we started requiring Python 3, so this changes fixes the benchmarks by ensuring we always use Python 3.
This commit is contained in:
parent
26603a1466
commit
6a9cf18cba
@ -165,7 +165,7 @@ python_add_init: protoc_middleman protoc_middleman2
|
||||
done \
|
||||
done
|
||||
|
||||
python_cpp_pkg_flags = `pkg-config --cflags --libs python`
|
||||
python_cpp_pkg_flags = `pkg-config --cflags --libs python3`
|
||||
|
||||
lib_LTLIBRARIES = libbenchmark_messages.la
|
||||
libbenchmark_messages_la_SOURCES = python/python_benchmark_messages.cc
|
||||
@ -186,7 +186,7 @@ python-pure-python-benchmark: python_add_init
|
||||
@echo export DYLD_LIBRARY_PATH=$(top_srcdir)/src/.libs >> python-pure-python-benchmark
|
||||
@echo export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=\'python\' >> python-pure-python-benchmark
|
||||
@echo cp $(srcdir)/python/py_benchmark.py tmp >> python-pure-python-benchmark
|
||||
@echo python tmp/py_benchmark.py '$$@' >> python-pure-python-benchmark
|
||||
@echo python3 tmp/py_benchmark.py '$$@' >> python-pure-python-benchmark
|
||||
@chmod +x python-pure-python-benchmark
|
||||
|
||||
python-cpp-reflection-benchmark: python_add_init
|
||||
@ -196,7 +196,7 @@ python-cpp-reflection-benchmark: python_add_init
|
||||
@echo export DYLD_LIBRARY_PATH=$(top_srcdir)/src/.libs >> python-cpp-reflection-benchmark
|
||||
@echo export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=\'cpp\' >> python-cpp-reflection-benchmark
|
||||
@echo cp $(srcdir)/python/py_benchmark.py tmp >> python-cpp-reflection-benchmark
|
||||
@echo python tmp/py_benchmark.py '$$@' >> python-cpp-reflection-benchmark
|
||||
@echo python3 tmp/py_benchmark.py '$$@' >> python-cpp-reflection-benchmark
|
||||
@chmod +x python-cpp-reflection-benchmark
|
||||
|
||||
python-cpp-generated-code-benchmark: python_add_init libbenchmark_messages.la
|
||||
@ -206,7 +206,7 @@ python-cpp-generated-code-benchmark: python_add_init libbenchmark_messages.la
|
||||
@echo export DYLD_LIBRARY_PATH=$(top_srcdir)/src/.libs >> python-cpp-generated-code-benchmark
|
||||
@echo export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=\'cpp\' >> python-cpp-generated-code-benchmark
|
||||
@echo cp $(srcdir)/python/py_benchmark.py tmp >> python-cpp-generated-code-benchmark
|
||||
@echo python tmp/py_benchmark.py --cpp_generated '$$@' >> python-cpp-generated-code-benchmark
|
||||
@echo python3 tmp/py_benchmark.py --cpp_generated '$$@' >> python-cpp-generated-code-benchmark
|
||||
@chmod +x python-cpp-generated-code-benchmark
|
||||
|
||||
python-pure-python: python-pure-python-benchmark
|
||||
|
@ -7,13 +7,19 @@
|
||||
#include "datasets/google_message3/benchmark_message3.pb.h"
|
||||
#include "datasets/google_message4/benchmark_message4.pb.h"
|
||||
|
||||
static PyMethodDef python_benchmark_methods[] = {
|
||||
{NULL, NULL, 0, NULL} /* Sentinel */
|
||||
};
|
||||
|
||||
static struct PyModuleDef _module = {PyModuleDef_HEAD_INIT,
|
||||
"libbenchmark_messages",
|
||||
"Benchmark messages Python module",
|
||||
-1,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL};
|
||||
|
||||
extern "C" {
|
||||
PyMODINIT_FUNC
|
||||
initlibbenchmark_messages() {
|
||||
PyInit_libbenchmark_messages() {
|
||||
benchmarks::BenchmarkDataset().descriptor();
|
||||
benchmarks::proto3::GoogleMessage1().descriptor();
|
||||
benchmarks::proto2::GoogleMessage1().descriptor();
|
||||
@ -21,9 +27,6 @@ initlibbenchmark_messages() {
|
||||
benchmarks::google_message3::GoogleMessage3().descriptor();
|
||||
benchmarks::google_message4::GoogleMessage4().descriptor();
|
||||
|
||||
PyObject *m;
|
||||
|
||||
m = Py_InitModule("libbenchmark_messages", python_benchmark_methods);
|
||||
if (m == NULL)
|
||||
return;
|
||||
return PyModule_Create(&_module);
|
||||
}
|
||||
}
|
||||
|
@ -61,7 +61,7 @@ def __parse_cpp_result(filename):
|
||||
return
|
||||
if filename[0] != '/':
|
||||
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
|
||||
with open(filename, "rb") as f:
|
||||
with open(filename, encoding="utf-8") as f:
|
||||
results = json.loads(f.read())
|
||||
for benchmark in results["benchmarks"]:
|
||||
data_filename = "".join(
|
||||
@ -96,7 +96,7 @@ def __parse_synthetic_result(filename):
|
||||
return
|
||||
if filename[0] != "/":
|
||||
filename = os.path.dirname(os.path.abspath(__file__)) + "/" + filename
|
||||
with open(filename, "rb") as f:
|
||||
with open(filename, encoding="utf-8") as f:
|
||||
results = json.loads(f.read())
|
||||
for benchmark in results["benchmarks"]:
|
||||
__results.append({
|
||||
@ -126,7 +126,7 @@ def __parse_python_result(filename):
|
||||
return
|
||||
if filename[0] != '/':
|
||||
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
|
||||
with open(filename, "rb") as f:
|
||||
with open(filename, encoding="utf-8") as f:
|
||||
results_list = json.loads(f.read())
|
||||
for results in results_list:
|
||||
for result in results:
|
||||
@ -176,7 +176,7 @@ def __parse_java_result(filename):
|
||||
return
|
||||
if filename[0] != '/':
|
||||
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
|
||||
with open(filename, "rb") as f:
|
||||
with open(filename, encoding="utf-8") as f:
|
||||
results = json.loads(f.read())
|
||||
for result in results:
|
||||
total_weight = 0
|
||||
@ -212,7 +212,7 @@ def __parse_go_result(filename):
|
||||
return
|
||||
if filename[0] != '/':
|
||||
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
|
||||
with open(filename, "rb") as f:
|
||||
with open(filename, encoding="utf-8") as f:
|
||||
for line in f:
|
||||
result_list = re.split(r"[\ \t]+", line)
|
||||
if result_list[0][:9] != "Benchmark":
|
||||
@ -252,7 +252,7 @@ def __parse_custom_result(filename, language):
|
||||
return
|
||||
if filename[0] != '/':
|
||||
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
|
||||
with open(filename, "rb") as f:
|
||||
with open(filename, encoding="utf-8") as f:
|
||||
results = json.loads(f.read())
|
||||
for result in results:
|
||||
_, avg_size = __get_data_size(result["filename"])
|
||||
|
@ -23,8 +23,10 @@ popd
|
||||
./configure CXXFLAGS="-fPIC -O2"
|
||||
make -j8
|
||||
pushd python
|
||||
python setup.py build --cpp_implementation
|
||||
pip install . --user
|
||||
virtualenv -p python3 env
|
||||
source env/bin/activate
|
||||
python3 setup.py build --cpp_implementation
|
||||
pip3 install --install-option="--cpp_implementation" .
|
||||
popd
|
||||
|
||||
# build and run Python benchmark
|
||||
@ -91,7 +93,7 @@ cat tmp/python_result.json
|
||||
# print the postprocessed results to the build job log
|
||||
# TODO(jtattermusch): re-enable uploading results to bigquery (it is currently broken)
|
||||
make python_add_init
|
||||
env LD_LIBRARY_PATH="${repo_root}/src/.libs" python -m util.result_parser \
|
||||
env LD_LIBRARY_PATH="${repo_root}/src/.libs" python3 -m util.result_parser \
|
||||
-cpp="../tmp/cpp_result.json" -java="../tmp/java_result.json" -python="../tmp/python_result.json"
|
||||
popd
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user