xref: /dpdk/app/test-crypto-perf/dpdk-graph-crypto-perf.py (revision 5ba31a4e2e27cf0f78e4db138c42b79d8e0ccc95)
1#! /usr/bin/env python3
2# SPDX-License-Identifier: BSD-3-Clause
3# Copyright(c) 2021 Intel Corporation
4
5"""
6Script to automate running crypto performance tests for a range of test
7cases as configured in the JSON file specified by the user.
8The results are processed and output into various graphs in PDF files.
9Currently, throughput and latency tests are supported.
10"""
11
12import glob
13import json
14import os
15import shutil
16import subprocess
17from argparse import ArgumentParser
18from argparse import ArgumentDefaultsHelpFormatter
19import img2pdf
20import pandas as pd
21import plotly.express as px
22
23SCRIPT_PATH = os.path.dirname(__file__) + "/"
24GRAPH_DIR = "temp_graphs"
25
26
27class Grapher:
28    """Grapher object containing all graphing functions. """
29    def __init__(self, config, suite, graph_path):
30        self.graph_num = 0
31        self.graph_path = graph_path
32        self.suite = suite
33        self.config = config
34        self.test = ""
35        self.ptest = ""
36        self.data = pd.DataFrame()
37
38    def save_graph(self, fig, subdir):
39        """
40        Update figure layout to increase readability, output to JPG file.
41        """
42        path = os.path.join(self.graph_path, subdir, "")
43        if not os.path.exists(path):
44            os.makedirs(path)
45        fig.update_layout(font_size=30, title_x=0.5, title_font={"size": 25},
46                          margin={'t': 300, 'l': 150, 'r': 150, 'b': 150})
47        fig.write_image(path + "%d.jpg" % self.graph_num)
48
49    def boxplot_graph(self, x_axis_label, burst, buffer):
50        """Plot a boxplot graph for the given parameters."""
51        fig = px.box(self.data, x=x_axis_label,
52                     title="Config: " + self.config + "<br>Test Suite: " +
53                     self.suite + "<br>" + self.test +
54                     "<br>(Outliers Included)<br>Burst Size: " + burst +
55                     ", Buffer Size: " + buffer,
56                     height=1400, width=2400)
57        self.save_graph(fig, x_axis_label.replace(' ', '_'))
58        self.graph_num += 1
59
60    def grouped_graph(self, y_axis_label, x_axis_label, color_label):
61        """Plot a grouped barchart using the given parameters."""
62        if (self.data[y_axis_label] == 0).all():
63            return
64        fig = px.bar(self.data, x=x_axis_label, color=color_label,
65                     y=y_axis_label,
66                     title="Config: " + self.config + "<br>Test Suite: " +
67                     self.suite + "<br>" + self.test + "<br>"
68                     + y_axis_label + " for each " + x_axis_label +
69                     "/" + color_label, barmode="group", height=1400,
70                     width=2400)
71        fig.update_xaxes(type='category')
72        self.save_graph(fig, y_axis_label.replace(' ', '_'))
73        self.graph_num += 1
74
75    def histogram_graph(self, x_axis_label, burst, buffer):
76        """Plot a histogram graph using the given parameters."""
77        quart1 = self.data[x_axis_label].quantile(0.25)
78        quart3 = self.data[x_axis_label].quantile(0.75)
79        inter_quart_range = quart3 - quart1
80        data_out = self.data[~((self.data[x_axis_label] <
81                                (quart1 - 1.5 * inter_quart_range)) |
82                               (self.data[x_axis_label] >
83                                (quart3 + 1.5 * inter_quart_range)))]
84        fig = px.histogram(data_out, x=x_axis_label,
85                           title="Config: " + self.config + "<br>Test Suite: "
86                           + self.suite + "<br>" + self.test
87                           + "<br>(Outliers removed using Interquartile Range)"
88                           + "<br>Burst Size: " + burst + ", Buffer Size: " +
89                           buffer, height=1400, width=2400)
90        max_val = data_out[x_axis_label].max()
91        min_val = data_out[x_axis_label].min()
92        fig.update_traces(xbins=dict(
93            start=min_val,
94            end=max_val,
95            size=(max_val - min_val) / 200
96        ))
97        self.save_graph(fig, x_axis_label.replace(' ', '_'))
98        self.graph_num += 1
99
100
101def cleanup_throughput_datatypes(data):
102    """Cleanup data types of throughput test results dataframe. """
103    data.columns = data.columns.str.replace('/', ' ')
104    data.columns = data.columns.str.strip()
105    data['Burst Size'] = data['Burst Size'].astype('category')
106    data['Buffer Size(B)'] = data['Buffer Size(B)'].astype('category')
107    data['Failed Enq'] = data['Failed Enq'].astype('int')
108    data['Throughput(Gbps)'] = data['Throughput(Gbps)'].astype('float')
109    data['Ops(Millions)'] = data['Ops(Millions)'].astype('float')
110    data['Cycles Buf'] = data['Cycles Buf'].astype('float')
111    return data
112
113
114def cleanup_latency_datatypes(data):
115    """Cleanup data types of latency test results dataframe. """
116    data.columns = data.columns.str.strip()
117    data = data[['Burst Size', 'Buffer Size', 'time (us)']].copy()
118    data['Burst Size'] = data['Burst Size'].astype('category')
119    data['Buffer Size'] = data['Buffer Size'].astype('category')
120    data['time (us)'] = data['time (us)'].astype('float')
121    return data
122
123
124def process_test_results(grapher, data):
125    """
126    Process results from the test case,
127    calling graph functions to output graph images.
128    """
129    if grapher.ptest == "throughput":
130        grapher.data = cleanup_throughput_datatypes(data)
131        for y_label in ["Throughput(Gbps)", "Ops(Millions)",
132                        "Cycles Buf", "Failed Enq"]:
133            grapher.grouped_graph(y_label, "Buffer Size(B)",
134                                  "Burst Size")
135    elif grapher.ptest == "latency":
136        clean_data = cleanup_latency_datatypes(data)
137        for (burst, buffer), group in clean_data.groupby(['Burst Size',
138                                                          'Buffer Size']):
139            grapher.data = group
140            grapher.histogram_graph("time (us)", burst, buffer)
141            grapher.boxplot_graph("time (us)", burst, buffer)
142    else:
143        print("Invalid ptest")
144        return
145
146
147def create_results_pdf(graph_path, pdf_path):
148    """Output results graphs to PDFs."""
149    if not os.path.exists(pdf_path):
150        os.makedirs(pdf_path)
151    for _, dirs, _ in os.walk(graph_path):
152        for sub in dirs:
153            graphs = sorted(glob.glob(os.path.join(graph_path, sub, "*.jpg")),
154                            key=(lambda x: int((x.rsplit('/', 1)[1])
155                                               .split('.')[0])))
156            if graphs:
157                with open(pdf_path + "%s_results.pdf" % sub, "wb") as pdf_file:
158                    pdf_file.write(img2pdf.convert(graphs))
159
160
161def run_test(test_cmd, test, grapher, params, verbose):
162    """Run performance test app for the given test case parameters."""
163    process = subprocess.Popen(["stdbuf", "-oL", test_cmd] + params,
164                               universal_newlines=True,
165                               stdout=subprocess.PIPE,
166                               stderr=subprocess.STDOUT)
167    rows = []
168    if verbose:
169        print("\n\tOutput for " + test + ":")
170    while process.poll() is None:
171        line = process.stdout.readline().strip()
172        if not line:
173            continue
174        if verbose:
175            print("\t\t>>" + line)
176
177        if line.replace(' ', '').startswith('#lcore'):
178            columns = line[1:].split(',')
179        elif line[0].isdigit():
180            line = line.replace(';', ',')
181            rows.append(line.split(','))
182        else:
183            continue
184
185    if process.poll() != 0 or not columns or not rows:
186        print("\n\t" + test + ": FAIL")
187        return
188    data = pd.DataFrame(rows, columns=columns)
189    grapher.test = test
190    process_test_results(grapher, data)
191    print("\n\t" + test + ": OK")
192    return
193
194
195def parse_parameters(config_parameters):
196    """Convert the JSON config to list of strings."""
197    params = []
198    for (key, val) in config_parameters:
199        if isinstance(val, bool):
200            params.append("--" + key if val is True else "")
201        elif len(key) == 1:
202            params.append("-" + key)
203            params.append(val)
204        else:
205            params.append("--" + key + "=" + val)
206    return params
207
208
209def run_test_suite(test_cmd, suite_config, verbose):
210    """Parse test cases for the test suite and run each test."""
211    print("\nRunning Test Suite: " + suite_config['suite'])
212    graph_path = os.path.join(suite_config['output_path'], GRAPH_DIR,
213                              suite_config['suite'], "")
214    grapher = Grapher(suite_config['config_name'], suite_config['suite'],
215                      graph_path)
216    test_cases = suite_config['test_cases']
217    if 'default' not in test_cases:
218        print("Test Suite must contain default case, skipping")
219        return
220
221    default_params = parse_parameters(test_cases['default']['eal'].items())
222    default_params.append("--")
223    default_params += parse_parameters(test_cases['default']['app'].items())
224
225    if 'ptest' not in test_cases['default']['app']:
226        print("Test Suite must contain default ptest value, skipping")
227        return
228    grapher.ptest = test_cases['default']['app']['ptest']
229
230    for (test, params) in {k: v for (k, v) in test_cases.items() if
231                           k != "default"}.items():
232        extra_params = parse_parameters(params.items())
233        run_test(test_cmd, test, grapher, default_params + extra_params,
234                 verbose)
235
236    create_results_pdf(graph_path, os.path.join(suite_config['output_path'],
237                                                suite_config['suite'], ""))
238
239
240def parse_args():
241    """Parse command-line arguments passed to script."""
242    parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
243    parser.add_argument('config_path', type=str,
244                        help="Path to JSON configuration file")
245    parser.add_argument('-t', '--test-suites', nargs='+', default=["all"],
246                        help="List of test suites to run")
247    parser.add_argument('-v', '--verbose', action='store_true',
248                        help="""Display perf test app output.
249                        Not recommended for latency tests.""")
250    parser.add_argument('-f', '--file-path',
251                        default=shutil.which('dpdk-test-crypto-perf'),
252                        help="Path for perf test app")
253    parser.add_argument('-o', '--output-path', default=SCRIPT_PATH,
254                        help="Path to store output directories")
255    args = parser.parse_args()
256    return (args.file_path, args.test_suites, args.config_path,
257            args.output_path, args.verbose)
258
259
260def main():
261    """
262    Load JSON config and call relevant functions to run chosen test suites.
263    """
264    test_cmd, test_suites, config_file, output_path, verbose = parse_args()
265    if test_cmd is None or not os.path.isfile(test_cmd):
266        print("Invalid filepath for perf test app!")
267        return
268    try:
269        with open(config_file) as conf:
270            test_suite_ops = json.load(conf)
271            config_name = os.path.splitext(config_file)[0]
272            if '/' in config_name:
273                config_name = config_name.rsplit('/', 1)[1]
274            output_path = os.path.join(output_path, config_name, "")
275            print("Using config: " + config_file)
276    except OSError as err:
277        print("Error with JSON file path: " + err.strerror)
278        return
279    except json.decoder.JSONDecodeError as err:
280        print("Error loading JSON config: " + err.msg)
281        return
282
283    if test_suites != ["all"]:
284        suite_list = []
285        for (suite, test_cases) in {k: v for (k, v) in test_suite_ops.items()
286                                    if k in test_suites}.items():
287            suite_list.append(suite)
288            suite_config = {'config_name': config_name, 'suite': suite,
289                            'test_cases': test_cases,
290                            'output_path': output_path}
291            run_test_suite(test_cmd, suite_config, verbose)
292        if not suite_list:
293            print("No valid test suites chosen!")
294            return
295    else:
296        for (suite, test_cases) in test_suite_ops.items():
297            suite_config = {'config_name': config_name, 'suite': suite,
298                            'test_cases': test_cases,
299                            'output_path': output_path}
300            run_test_suite(test_cmd, suite_config, verbose)
301
302    graph_path = os.path.join(output_path, GRAPH_DIR, "")
303    if os.path.exists(graph_path):
304        shutil.rmtree(graph_path)
305
306
307if __name__ == "__main__":
308    main()
309