Work around odd deadlock in test_shaders.py in --parallel mode.

Not exactly sure what is going on, internet suggests there's a pipe that
fills up. Calling close() before we start waiting for results seems to
do the trick.
This commit is contained in:
Hans-Kristian Arntzen 2020-04-27 16:44:14 +02:00
parent d7d630a0b7
commit 35a9b793d6

View File

@ -721,20 +721,21 @@ def test_shaders_helper(stats, backend, args):
# The child processes in parallel execution mode don't have the proper state for the global args variable, so
# at this point we need to switch to explicit arguments
if args.parallel:
pool = multiprocessing.Pool(multiprocessing.cpu_count())
with multiprocessing.Pool(multiprocessing.cpu_count()) as pool:
results = []
for f in all_files:
results.append(pool.apply_async(test_shader_file,
args = (f, stats, args, backend)))
results = []
for f in all_files:
results.append(pool.apply_async(test_shader_file,
args = (f, stats, args, backend)))
pool.close()
pool.join()
results_completed = [res.get() for res in results]
for error in results_completed:
if error is not None:
print('Error:', error)
sys.exit(1)
for res in results:
error = res.get()
if error is not None:
pool.close()
pool.join()
print('Error:', error)
sys.exit(1)
else:
for i in all_files:
e = test_shader_file(i, stats, args, backend)