simplebench: move results_to_text() into separate file

Let's keep view part in separate: this way it's better to improve it in
the following commits.

Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20201021145859.11201-18-vsementsov@virtuozzo.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
This commit is contained in:
Vladimir Sementsov-Ogievskiy 2020-10-21 17:58:55 +03:00 committed by Max Reitz
parent bfccfa62ac
commit 8e979febb0
4 changed files with 52 additions and 33 deletions

View file

@ -19,6 +19,7 @@
#
import simplebench
from results_to_text import results_to_text
from bench_block_job import bench_block_copy, drv_file, drv_nbd
@ -77,4 +78,4 @@ test_envs = [
]
result = simplebench.bench(bench_func, test_envs, test_cases, count=3)
print(simplebench.results_to_text(result))
print(results_to_text(result))

View file

@ -26,6 +26,7 @@ import sys
import os
import subprocess
import simplebench
from results_to_text import results_to_text
def bench_func(env, case):
@ -167,4 +168,4 @@ if __name__ == '__main__':
result = simplebench.bench(bench_func, test_envs, test_cases, count=3,
initial_run=False)
print(simplebench.results_to_text(result))
print(results_to_text(result))

View file

@ -0,0 +1,48 @@
# Simple benchmarking framework
#
# Copyright (c) 2019 Virtuozzo International GmbH.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
def result_to_text(result):
"""Return text representation of bench_one() returned dict."""
if 'average' in result:
s = '{:.2f} +- {:.2f}'.format(result['average'], result['stdev'])
if 'n-failed' in result:
s += '\n({} failed)'.format(result['n-failed'])
return s
else:
return 'FAILED'
def results_to_text(results):
"""Return text representation of bench() returned dict."""
from tabulate import tabulate
dim = None
tab = [[""] + [c['id'] for c in results['envs']]]
for case in results['cases']:
row = [case['id']]
for env in results['envs']:
res = results['tab'][case['id']][env['id']]
if dim is None:
dim = res['dimension']
else:
assert dim == res['dimension']
row.append(result_to_text(res))
tab.append(row)
return f'All results are in {dim}\n\n' + tabulate(tab)

View file

@ -79,17 +79,6 @@ def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
return result
def result_to_text(result):
"""Return text representation of bench_one() returned dict."""
if 'average' in result:
s = '{:.2f} +- {:.2f}'.format(result['average'], result['stdev'])
if 'n-failed' in result:
s += '\n({} failed)'.format(result['n-failed'])
return s
else:
return 'FAILED'
def bench(test_func, test_envs, test_cases, *args, **vargs):
"""Fill benchmark table
@ -125,23 +114,3 @@ def bench(test_func, test_envs, test_cases, *args, **vargs):
print('Done')
return results
def results_to_text(results):
"""Return text representation of bench() returned dict."""
from tabulate import tabulate
dim = None
tab = [[""] + [c['id'] for c in results['envs']]]
for case in results['cases']:
row = [case['id']]
for env in results['envs']:
res = results['tab'][case['id']][env['id']]
if dim is None:
dim = res['dimension']
else:
assert dim == res['dimension']
row.append(result_to_text(res))
tab.append(row)
return f'All results are in {dim}\n\n' + tabulate(tab)