1 # SPDX-License-Identifier: GPL-2.0
3 default_keys = [ 'iops', 'io_bytes', 'bw' ]
4 latency_keys = [ 'lat_ns_min', 'lat_ns_max' ]
5 main_job_keys = [ 'sys_cpu', 'elapsed' ]
6 io_ops = ['read', 'write', 'trim' ]
8 def _fuzzy_compare(a, b, fuzzy):
16 val = ((b - a) / a) * 100
17 if val > fuzzy or val < -fuzzy:
21 def _compare_jobs(ijob, njob, latency, fuzz, failures_only):
23 for k in default_keys:
25 key = "{}_{}".format(io, k)
26 comp = _fuzzy_compare(ijob[key], njob[key], fuzz)
28 print(" {} regressed: old {} new {} {}%".format(key,
29 ijob[key], njob[key], comp))
31 elif not failures_only and comp > 0:
32 print(" {} improved: old {} new {} {}%".format(key,
33 ijob[key], njob[key], comp))
34 elif not failures_only:
35 print("{} is a-ok {} {}".format(key, ijob[key], njob[key]))
36 for k in latency_keys:
40 key = "{}_{}".format(io, k)
41 comp = _fuzzy_compare(ijob[key], njob[key], fuzz)
43 print(" {} regressed: old {} new {} {}%".format(key,
44 ijob[key], njob[key], comp))
46 elif not failures_only and comp < 0:
47 print(" {} improved: old {} new {} {}%".format(key,
48 ijob[key], njob[key], comp))
49 elif not failures_only:
50 print("{} is a-ok {} {}".format(key, ijob[key], njob[key]))
51 for k in main_job_keys:
52 comp = _fuzzy_compare(ijob[k], njob[k], fuzz)
54 print(" {} regressed: old {} new {} {}%".format(k, ijob[k],
57 elif not failures_only and comp < 0:
58 print(" {} improved: old {} new {} {}%".format(k, ijob[k],
60 elif not failures_only:
61 print("{} is a-ok {} {}".format(k, ijob[k], njob[k]))
64 def compare_individual_jobs(initial, data, fuzz, failures_only):
66 initial_jobs = initial['jobs'][:]
67 for njob in data['jobs']:
68 for ijob in initial_jobs:
69 if njob['jobname'] == ijob['jobname']:
70 print(" Checking results for {}".format(njob['jobname']))
71 failed += _compare_jobs(ijob, njob, fuzz, failures_only)
72 initial_jobs.remove(ijob)
76 def default_merge(data):
77 '''Default merge function for multiple jobs in one run
79 For runs that include multiple threads we will have a lot of variation
80 between the different threads, which makes comparing them to eachother
81 across multiple runs less that useful. Instead merge the jobs into a single
82 job. This function does that by adding up 'iops', 'io_kbytes', and 'bw' for
83 read/write/trim in the merged job, and then taking the maximal values of the
87 for job in data['jobs']:
88 for k in main_job_keys:
89 if k not in merge_job:
92 merge_job[k] += job[k]
94 for k in default_keys:
95 key = "{}_{}".format(io, k)
96 if key not in merge_job:
97 merge_job[key] = job[key]
99 merge_job[key] += job[key]
100 for k in latency_keys:
101 key = "{}_{}".format(io, k)
102 if key not in merge_job:
103 merge_job[key] = job[key]
104 elif merge_job[key] < job[key]:
105 merge_job[key] = job[key]
108 def compare_fiodata(initial, data, latency, merge_func=default_merge, fuzz=5,
111 if merge_func is None:
112 return compare_individual_jobs(initial, data, fuzz, failures_only)
113 ijob = merge_func(initial)
114 njob = merge_func(data)
115 return _compare_jobs(ijob, njob, latency, fuzz, failures_only)