forked from ocaml-bench/ocaml_bench_scripts
-
Notifications
You must be signed in to change notification settings - Fork 0
/
load_operf_data.py
executable file
·105 lines (82 loc) · 3.32 KB
/
load_operf_data.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
#!/usr/bin/env python3
import argparse
import datetime
import glob
import json
import os
import yaml
import subprocess
from codespeed_upload import post_data_to_server
GLOB_PATTERN = '*.summary'
CODESPEED_URL = 'http://localhost:8000/'
parser = argparse.ArgumentParser(description='Load operf-micro summary files into codespeed')
parser.add_argument('resultdir', type=str, help='directory of results')
parser.add_argument('--codespeed_url', type=str, help='url of codespeed server', default=CODESPEED_URL)
parser.add_argument('--glob_pattern', type=str, help='glob pattern for summary files', default=GLOB_PATTERN)
parser.add_argument('--halt_on_bad_parse', action='store_true', default=False)
parser.add_argument('--dry_run', action='store_true', default=False)
parser.add_argument('-v', '--verbose', action='store_true', default=False)
args = parser.parse_args()
def get_bench_dict(name, context, results):
return {
'commitid': context['commitid'],
'project': context['project'],
'branch': context['branch'],
'executable': context['executable'],
'environment': context['environment'],
'benchmark': name,
'units': 'cycles',
'units_title': 'Time',
'result_value': results['mean'],
'min': results['min'],
'max': results['max'],
'std_dev': results['standard_error'],
}
def parse_results(fname, context):
bench_data = []
with open(fname) as f:
dat = yaml.safe_load(f)
# first key in dat is always the bench run timestamp
benchmarks = dat[list(dat.keys())[0]]
# first key in bench is the name of the bench suite
for k1 in benchmarks.keys():
# second key in bench is either the subname or subgroup
for k2 in benchmarks[k1].keys():
# handle groups
if k2.startswith('group '):
for k3 in benchmarks[k1][k2].keys():
k2_out = k2.replace('group ', '')
bench_data.append(get_bench_dict('%s/%s/%s'%(k1,k2_out,k3), context, benchmarks[k1][k2][k3]))
else:
bench_data.append(get_bench_dict('%s/%s'%(k1,k2), context, benchmarks[k1][k2]))
return bench_data
def get_context(dir, verbose=args.verbose):
def ld(x):
fname = os.path.join(dir, x)
if verbose: print('loading context info from %s'%fname)
with open(fname) as f:
return yaml.safe_load(f)
context = {}
context.update(ld('build_context.conf'))
context.update(ld('run_context.conf'))
return context
# load context information
context = get_context(args.resultdir)
if args.verbose:
print('got context: \n%s'%yaml.dump(context, default_flow_style=False))
# get file list
glob_str = '%s/%s'%(args.resultdir, args.glob_pattern)
if args.verbose:
print('taking result files of the form: %s'%glob_str)
for f in sorted(glob.glob(glob_str)):
if args.verbose:
print('processing %s'%f)
try:
results = parse_results(f, context)
except:
print('ERROR: failed to parse results in %s'%f)
if args.halt_on_bad_parse:
sys.exit(1)
if args.verbose:
print('loaded: \n%s'%yaml.dump(results, default_flow_style=False))
post_data_to_server(args.codespeed_url, results, dry_run=args.dry_run, verbose=args.verbose)