xref: /spdk/scripts/perf/nvmf/run_nvmf.py (revision f869197b76ff6981e901b6d9a05789e1b993494a)
1#!/usr/bin/env python3
2
3from json.decoder import JSONDecodeError
4import os
5import re
6import sys
7import argparse
8import json
9import zipfile
10import threading
11import subprocess
12import itertools
13import configparser
14import time
15import uuid
16from collections import OrderedDict
17
18import paramiko
19import pandas as pd
20from common import *
21
22sys.path.append(os.path.dirname(__file__) + '/../../../python')
23
24import spdk.rpc as rpc  # noqa
25import spdk.rpc.client as rpc_client  # noqa
26
27
28class Server:
29    def __init__(self, name, general_config, server_config):
30        self.name = name
31        self.username = general_config["username"]
32        self.password = general_config["password"]
33        self.transport = general_config["transport"].lower()
34        self.nic_ips = server_config["nic_ips"]
35        self.mode = server_config["mode"]
36
37        self.irq_scripts_dir = "/usr/src/local/mlnx-tools/ofed_scripts"
38        if "irq_scripts_dir" in server_config and server_config["irq_scripts_dir"]:
39            self.irq_scripts_dir = server_config["irq_scripts_dir"]
40
41        self.local_nic_info = []
42        self._nics_json_obj = {}
43        self.svc_restore_dict = {}
44        self.sysctl_restore_dict = {}
45        self.tuned_restore_dict = {}
46        self.governor_restore = ""
47        self.tuned_profile = ""
48
49        self.enable_adq = False
50        self.adq_priority = None
51        if "adq_enable" in server_config and server_config["adq_enable"]:
52            self.enable_adq = server_config["adq_enable"]
53            self.adq_priority = 1
54
55        if "tuned_profile" in server_config:
56            self.tuned_profile = server_config["tuned_profile"]
57
58        if not re.match("^[A-Za-z0-9]*$", name):
59            self.log_print("Please use a name which contains only letters or numbers")
60            sys.exit(1)
61
62    def log_print(self, msg):
63        print("[%s] %s" % (self.name, msg), flush=True)
64
65    @staticmethod
66    def get_uncommented_lines(lines):
67        return [line for line in lines if line and not line.startswith('#')]
68
69    def get_nic_name_by_ip(self, ip):
70        if not self._nics_json_obj:
71            nics_json_obj = self.exec_cmd(["ip", "-j", "address", "show"])
72            self._nics_json_obj = list(filter(lambda x: x["addr_info"], json.loads(nics_json_obj)))
73        for nic in self._nics_json_obj:
74            for addr in nic["addr_info"]:
75                if ip in addr["local"]:
76                    return nic["ifname"]
77
78    def set_local_nic_info_helper(self):
79        pass
80
81    def set_local_nic_info(self, pci_info):
82        def extract_network_elements(json_obj):
83            nic_list = []
84            if isinstance(json_obj, list):
85                for x in json_obj:
86                    nic_list.extend(extract_network_elements(x))
87            elif isinstance(json_obj, dict):
88                if "children" in json_obj:
89                    nic_list.extend(extract_network_elements(json_obj["children"]))
90                if "class" in json_obj.keys() and "network" in json_obj["class"]:
91                    nic_list.append(json_obj)
92            return nic_list
93
94        self.local_nic_info = extract_network_elements(pci_info)
95
96    # pylint: disable=R0201
97    def exec_cmd(self, cmd, stderr_redirect=False, change_dir=None):
98        return ""
99
100    def configure_system(self):
101        self.load_drivers()
102        self.configure_services()
103        self.configure_sysctl()
104        self.configure_tuned()
105        self.configure_cpu_governor()
106        self.configure_irq_affinity()
107
108    def load_drivers(self):
109        self.log_print("Loading drivers")
110        self.exec_cmd(["sudo", "modprobe", "-a",
111                       "nvme-%s" % self.transport,
112                       "nvmet-%s" % self.transport])
113        if self.mode == "kernel" and hasattr(self, "null_block") and self.null_block:
114            self.exec_cmd(["sudo", "modprobe", "null_blk",
115                           "nr_devices=%s" % self.null_block])
116
117    def configure_adq(self):
118        if self.mode == "kernel":
119            self.log_print("WARNING: ADQ setup not yet supported for Kernel mode. Skipping configuration.")
120            return
121        self.adq_load_modules()
122        self.adq_configure_nic()
123
124    def adq_load_modules(self):
125        self.log_print("Modprobing ADQ-related Linux modules...")
126        adq_module_deps = ["sch_mqprio", "act_mirred", "cls_flower"]
127        for module in adq_module_deps:
128            try:
129                self.exec_cmd(["sudo", "modprobe", module])
130                self.log_print("%s loaded!" % module)
131            except CalledProcessError as e:
132                self.log_print("ERROR: failed to load module %s" % module)
133                self.log_print("%s resulted in error: %s" % (e.cmd, e.output))
134
135    def adq_configure_tc(self):
136        self.log_print("Configuring ADQ Traffic classes and filters...")
137
138        if self.mode == "kernel":
139            self.log_print("WARNING: ADQ setup not yet supported for Kernel mode. Skipping configuration.")
140            return
141
142        num_queues_tc0 = 2  # 2 is minimum number of queues for TC0
143        num_queues_tc1 = self.num_cores
144        port_param = "dst_port" if isinstance(self, Target) else "src_port"
145        port = "4420"
146        xps_script_path = os.path.join(self.spdk_dir, "scripts", "perf", "nvmf", "set_xps_rxqs")
147
148        for nic_ip in self.nic_ips:
149            nic_name = self.get_nic_name_by_ip(nic_ip)
150            tc_qdisc_map_cmd = ["sudo", "tc", "qdisc", "add", "dev", nic_name,
151                                "root", "mqprio", "num_tc", "2", "map", "0", "1",
152                                "queues", "%s@0" % num_queues_tc0,
153                                "%s@%s" % (num_queues_tc1, num_queues_tc0),
154                                "hw", "1", "mode", "channel"]
155            self.log_print(" ".join(tc_qdisc_map_cmd))
156            self.exec_cmd(tc_qdisc_map_cmd)
157
158            time.sleep(5)
159            tc_qdisc_ingress_cmd = ["sudo", "tc", "qdisc", "add", "dev", nic_name, "ingress"]
160            self.log_print(" ".join(tc_qdisc_ingress_cmd))
161            self.exec_cmd(tc_qdisc_ingress_cmd)
162
163            tc_filter_cmd = ["sudo", "tc", "filter", "add", "dev", nic_name,
164                             "protocol", "ip", "ingress", "prio", "1", "flower",
165                             "dst_ip", "%s/32" % nic_ip, "ip_proto", "tcp", port_param, port,
166                             "skip_sw", "hw_tc", "1"]
167            self.log_print(" ".join(tc_filter_cmd))
168            self.exec_cmd(tc_filter_cmd)
169
170            # show tc configuration
171            self.log_print("Show tc configuration for %s NIC..." % nic_name)
172            tc_disk_out = self.exec_cmd(["sudo", "tc", "qdisc", "show", "dev", nic_name])
173            tc_filter_out = self.exec_cmd(["sudo", "tc", "filter", "show", "dev", nic_name, "ingress"])
174            self.log_print("%s" % tc_disk_out)
175            self.log_print("%s" % tc_filter_out)
176
177            # Ethtool coalesce settings must be applied after configuring traffic classes
178            self.exec_cmd(["sudo", "ethtool", "--coalesce", nic_name, "adaptive-rx", "off", "rx-usecs", "0"])
179            self.exec_cmd(["sudo", "ethtool", "--coalesce", nic_name, "adaptive-tx", "off", "tx-usecs", "500"])
180
181            self.log_print("Running set_xps_rxqs script for %s NIC..." % nic_name)
182            xps_cmd = ["sudo", xps_script_path, nic_name]
183            self.log_print(xps_cmd)
184            self.exec_cmd(xps_cmd)
185
186    def reload_driver(self, driver):
187        try:
188            self.exec_cmd(["sudo", "rmmod", driver])
189            self.exec_cmd(["sudo", "modprobe", driver])
190        except CalledProcessError as e:
191            self.log_print("ERROR: failed to reload %s module!" % driver)
192            self.log_print("%s resulted in error: %s" % (e.cmd, e.output))
193
194    def adq_configure_nic(self):
195        self.log_print("Configuring NIC port settings for ADQ testing...")
196
197        # Reload the driver first, to make sure any previous settings are re-set.
198        self.reload_driver("ice")
199
200        nic_names = [self.get_nic_name_by_ip(n) for n in self.nic_ips]
201        for nic in nic_names:
202            self.log_print(nic)
203            try:
204                self.exec_cmd(["sudo", "ethtool", "-K", nic,
205                               "hw-tc-offload", "on"])  # Enable hardware TC offload
206                self.exec_cmd(["sudo", "ethtool", "--set-priv-flags", nic,
207                               "channel-inline-flow-director", "on"])  # Enable Intel Flow Director
208                self.exec_cmd(["sudo", "ethtool", "--set-priv-flags", nic, "fw-lldp-agent", "off"])  # Disable LLDP
209                # As temporary workaround for ADQ, channel packet inspection optimization is turned on during connection establishment.
210                # Then turned off before fio ramp_up expires in ethtool_after_fio_ramp().
211                self.exec_cmd(["sudo", "ethtool", "--set-priv-flags", nic,
212                               "channel-pkt-inspect-optimize", "on"])
213            except CalledProcessError as e:
214                self.log_print("ERROR: failed to configure NIC port using ethtool!")
215                self.log_print("%s resulted in error: %s" % (e.cmd, e.output))
216                self.log_print("Please update your NIC driver and firmware versions and try again.")
217            self.log_print(self.exec_cmd(["sudo", "ethtool", "-k", nic]))
218            self.log_print(self.exec_cmd(["sudo", "ethtool", "--show-priv-flags", nic]))
219
220    def configure_services(self):
221        self.log_print("Configuring active services...")
222        svc_config = configparser.ConfigParser(strict=False)
223
224        # Below list is valid only for RHEL / Fedora systems and might not
225        # contain valid names for other distributions.
226        svc_target_state = {
227            "firewalld": "inactive",
228            "irqbalance": "inactive",
229            "lldpad.service": "inactive",
230            "lldpad.socket": "inactive"
231        }
232
233        for service in svc_target_state:
234            out = self.exec_cmd(["sudo", "systemctl", "show", "--no-page", service])
235            out = "\n".join(["[%s]" % service, out])
236            svc_config.read_string(out)
237
238            if "LoadError" in svc_config[service] and "not found" in svc_config[service]["LoadError"]:
239                continue
240
241            service_state = svc_config[service]["ActiveState"]
242            self.log_print("Current state of %s service is %s" % (service, service_state))
243            self.svc_restore_dict.update({service: service_state})
244            if service_state != "inactive":
245                self.log_print("Disabling %s. It will be restored after the test has finished." % service)
246                self.exec_cmd(["sudo", "systemctl", "stop", service])
247
248    def configure_sysctl(self):
249        self.log_print("Tuning sysctl settings...")
250
251        busy_read = 0
252        if self.enable_adq and self.mode == "spdk":
253            busy_read = 1
254
255        sysctl_opts = {
256            "net.core.busy_poll": 0,
257            "net.core.busy_read": busy_read,
258            "net.core.somaxconn": 4096,
259            "net.core.netdev_max_backlog": 8192,
260            "net.ipv4.tcp_max_syn_backlog": 16384,
261            "net.core.rmem_max": 268435456,
262            "net.core.wmem_max": 268435456,
263            "net.ipv4.tcp_mem": "268435456 268435456 268435456",
264            "net.ipv4.tcp_rmem": "8192 1048576 33554432",
265            "net.ipv4.tcp_wmem": "8192 1048576 33554432",
266            "net.ipv4.route.flush": 1,
267            "vm.overcommit_memory": 1,
268        }
269
270        for opt, value in sysctl_opts.items():
271            self.sysctl_restore_dict.update({opt: self.exec_cmd(["sysctl", "-n", opt]).strip()})
272            self.log_print(self.exec_cmd(["sudo", "sysctl", "-w", "%s=%s" % (opt, value)]).strip())
273
274    def configure_tuned(self):
275        if not self.tuned_profile:
276            self.log_print("WARNING: Tuned profile not set in configuration file. Skipping configuration.")
277            return
278
279        self.log_print("Configuring tuned-adm profile to %s." % self.tuned_profile)
280        service = "tuned"
281        tuned_config = configparser.ConfigParser(strict=False)
282
283        out = self.exec_cmd(["sudo", "systemctl", "show", "--no-page", service])
284        out = "\n".join(["[%s]" % service, out])
285        tuned_config.read_string(out)
286        tuned_state = tuned_config[service]["ActiveState"]
287        self.svc_restore_dict.update({service: tuned_state})
288
289        if tuned_state != "inactive":
290            profile = self.exec_cmd(["cat", "/etc/tuned/active_profile"]).strip()
291            profile_mode = self.exec_cmd(["cat", "/etc/tuned/profile_mode"]).strip()
292
293            self.tuned_restore_dict = {
294                "profile": profile,
295                "mode": profile_mode
296            }
297
298        self.exec_cmd(["sudo", "systemctl", "start", service])
299        self.exec_cmd(["sudo", "tuned-adm", "profile", self.tuned_profile])
300        self.log_print("Tuned profile set to %s." % self.exec_cmd(["cat", "/etc/tuned/active_profile"]))
301
302    def configure_cpu_governor(self):
303        self.log_print("Setting CPU governor to performance...")
304
305        # This assumes that there is the same CPU scaling governor on each CPU
306        self.governor_restore = self.exec_cmd(["cat", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor"]).strip()
307        self.exec_cmd(["sudo", "cpupower", "frequency-set", "-g", "performance"])
308
309    def configure_irq_affinity(self):
310        self.log_print("Setting NIC irq affinity for NICs...")
311
312        irq_script_path = os.path.join(self.irq_scripts_dir, "set_irq_affinity.sh")
313        nic_names = [self.get_nic_name_by_ip(n) for n in self.nic_ips]
314        for nic in nic_names:
315            irq_cmd = ["sudo", irq_script_path, nic]
316            self.log_print(irq_cmd)
317            self.exec_cmd(irq_cmd, change_dir=self.irq_scripts_dir)
318
319    def restore_services(self):
320        self.log_print("Restoring services...")
321        for service, state in self.svc_restore_dict.items():
322            cmd = "stop" if state == "inactive" else "start"
323            self.exec_cmd(["sudo", "systemctl", cmd, service])
324
325    def restore_sysctl(self):
326        self.log_print("Restoring sysctl settings...")
327        for opt, value in self.sysctl_restore_dict.items():
328            self.log_print(self.exec_cmd(["sudo", "sysctl", "-w", "%s=%s" % (opt, value)]).strip())
329
330    def restore_tuned(self):
331        self.log_print("Restoring tuned-adm settings...")
332
333        if not self.tuned_restore_dict:
334            return
335
336        if self.tuned_restore_dict["mode"] == "auto":
337            self.exec_cmd(["sudo", "tuned-adm", "auto_profile"])
338            self.log_print("Reverted tuned-adm to auto_profile.")
339        else:
340            self.exec_cmd(["sudo", "tuned-adm", "profile", self.tuned_restore_dict["profile"]])
341            self.log_print("Reverted tuned-adm to %s profile." % self.tuned_restore_dict["profile"])
342
343    def restore_governor(self):
344        self.log_print("Restoring CPU governor setting...")
345        if self.governor_restore:
346            self.exec_cmd(["sudo", "cpupower", "frequency-set", "-g", self.governor_restore])
347            self.log_print("Reverted CPU governor to %s." % self.governor_restore)
348
349
350class Target(Server):
351    def __init__(self, name, general_config, target_config):
352        super().__init__(name, general_config, target_config)
353
354        # Defaults
355        self.enable_sar = False
356        self.sar_delay = 0
357        self.sar_interval = 0
358        self.sar_count = 0
359        self.enable_pcm = False
360        self.pcm_dir = ""
361        self.pcm_delay = 0
362        self.pcm_interval = 0
363        self.pcm_count = 0
364        self.enable_bandwidth = 0
365        self.bandwidth_count = 0
366        self.enable_dpdk_memory = False
367        self.dpdk_wait_time = 0
368        self.enable_zcopy = False
369        self.scheduler_name = "static"
370        self.null_block = 0
371        self._nics_json_obj = json.loads(self.exec_cmd(["ip", "-j", "address", "show"]))
372        self.subsystem_info_list = []
373        self.initiator_info = []
374
375        if "null_block_devices" in target_config:
376            self.null_block = target_config["null_block_devices"]
377        if "sar_settings" in target_config:
378            self.enable_sar, self.sar_delay, self.sar_interval, self.sar_count = target_config["sar_settings"]
379        if "pcm_settings" in target_config:
380            self.enable_pcm = True
381            self.pcm_dir, self.pcm_delay, self.pcm_interval, self.pcm_count = target_config["pcm_settings"]
382        if "enable_bandwidth" in target_config:
383            self.enable_bandwidth, self.bandwidth_count = target_config["enable_bandwidth"]
384        if "enable_dpdk_memory" in target_config:
385            self.enable_dpdk_memory, self.dpdk_wait_time = target_config["enable_dpdk_memory"]
386        if "scheduler_settings" in target_config:
387            self.scheduler_name = target_config["scheduler_settings"]
388        if "zcopy_settings" in target_config:
389            self.enable_zcopy = target_config["zcopy_settings"]
390        if "results_dir" in target_config:
391            self.results_dir = target_config["results_dir"]
392
393        self.script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
394        self.spdk_dir = os.path.abspath(os.path.join(self.script_dir, "../../../"))
395        self.set_local_nic_info(self.set_local_nic_info_helper())
396
397        if "skip_spdk_install" not in general_config or general_config["skip_spdk_install"] is False:
398            self.zip_spdk_sources(self.spdk_dir, "/tmp/spdk.zip")
399
400        self.configure_system()
401        if self.enable_adq:
402            self.configure_adq()
403        self.sys_config()
404
405    def set_local_nic_info_helper(self):
406        return json.loads(self.exec_cmd(["lshw", "-json"]))
407
408    def exec_cmd(self, cmd, stderr_redirect=False, change_dir=None):
409        stderr_opt = None
410        if stderr_redirect:
411            stderr_opt = subprocess.STDOUT
412        if change_dir:
413            old_cwd = os.getcwd()
414            os.chdir(change_dir)
415            self.log_print("Changing directory to %s" % change_dir)
416
417        out = check_output(cmd, stderr=stderr_opt).decode(encoding="utf-8")
418
419        if change_dir:
420            os.chdir(old_cwd)
421            self.log_print("Changing directory to %s" % old_cwd)
422        return out
423
424    def zip_spdk_sources(self, spdk_dir, dest_file):
425        self.log_print("Zipping SPDK source directory")
426        fh = zipfile.ZipFile(dest_file, "w", zipfile.ZIP_DEFLATED)
427        for root, _directories, files in os.walk(spdk_dir, followlinks=True):
428            for file in files:
429                fh.write(os.path.relpath(os.path.join(root, file)))
430        fh.close()
431        self.log_print("Done zipping")
432
433    @staticmethod
434    def _chunks(input_list, chunks_no):
435        div, rem = divmod(len(input_list), chunks_no)
436        for i in range(chunks_no):
437            si = (div + 1) * (i if i < rem else rem) + div * (0 if i < rem else i - rem)
438            yield input_list[si:si + (div + 1 if i < rem else div)]
439
440    def spread_bdevs(self, req_disks):
441        # Spread available block devices indexes:
442        # - evenly across available initiator systems
443        # - evenly across available NIC interfaces for
444        #   each initiator
445        # Not NUMA aware.
446        ip_bdev_map = []
447        initiator_chunks = self._chunks(range(0, req_disks), len(self.initiator_info))
448
449        for i, (init, init_chunk) in enumerate(zip(self.initiator_info, initiator_chunks)):
450            self.initiator_info[i]["bdev_range"] = init_chunk
451            init_chunks_list = list(self._chunks(init_chunk, len(init["target_nic_ips"])))
452            for ip, nic_chunk in zip(self.initiator_info[i]["target_nic_ips"], init_chunks_list):
453                for c in nic_chunk:
454                    ip_bdev_map.append((ip, c))
455        return ip_bdev_map
456
457    @staticmethod
458    def read_json_stats(file):
459        with open(file, "r") as json_data:
460            data = json.load(json_data)
461            job_pos = 0  # job_post = 0 because using aggregated results
462
463            # Check if latency is in nano or microseconds to choose correct dict key
464            def get_lat_unit(key_prefix, dict_section):
465                # key prefix - lat, clat or slat.
466                # dict section - portion of json containing latency bucket in question
467                # Return dict key to access the bucket and unit as string
468                for k, _ in dict_section.items():
469                    if k.startswith(key_prefix):
470                        return k, k.split("_")[1]
471
472            def get_clat_percentiles(clat_dict_leaf):
473                if "percentile" in clat_dict_leaf:
474                    p99_lat = float(clat_dict_leaf["percentile"]["99.000000"])
475                    p99_9_lat = float(clat_dict_leaf["percentile"]["99.900000"])
476                    p99_99_lat = float(clat_dict_leaf["percentile"]["99.990000"])
477                    p99_999_lat = float(clat_dict_leaf["percentile"]["99.999000"])
478
479                    return [p99_lat, p99_9_lat, p99_99_lat, p99_999_lat]
480                else:
481                    # Latest fio versions do not provide "percentile" results if no
482                    # measurements were done, so just return zeroes
483                    return [0, 0, 0, 0]
484
485            read_iops = float(data["jobs"][job_pos]["read"]["iops"])
486            read_bw = float(data["jobs"][job_pos]["read"]["bw"])
487            lat_key, lat_unit = get_lat_unit("lat", data["jobs"][job_pos]["read"])
488            read_avg_lat = float(data["jobs"][job_pos]["read"][lat_key]["mean"])
489            read_min_lat = float(data["jobs"][job_pos]["read"][lat_key]["min"])
490            read_max_lat = float(data["jobs"][job_pos]["read"][lat_key]["max"])
491            clat_key, clat_unit = get_lat_unit("clat", data["jobs"][job_pos]["read"])
492            read_p99_lat, read_p99_9_lat, read_p99_99_lat, read_p99_999_lat = get_clat_percentiles(
493                data["jobs"][job_pos]["read"][clat_key])
494
495            if "ns" in lat_unit:
496                read_avg_lat, read_min_lat, read_max_lat = [x / 1000 for x in [read_avg_lat, read_min_lat, read_max_lat]]
497            if "ns" in clat_unit:
498                read_p99_lat = read_p99_lat / 1000
499                read_p99_9_lat = read_p99_9_lat / 1000
500                read_p99_99_lat = read_p99_99_lat / 1000
501                read_p99_999_lat = read_p99_999_lat / 1000
502
503            write_iops = float(data["jobs"][job_pos]["write"]["iops"])
504            write_bw = float(data["jobs"][job_pos]["write"]["bw"])
505            lat_key, lat_unit = get_lat_unit("lat", data["jobs"][job_pos]["write"])
506            write_avg_lat = float(data["jobs"][job_pos]["write"][lat_key]["mean"])
507            write_min_lat = float(data["jobs"][job_pos]["write"][lat_key]["min"])
508            write_max_lat = float(data["jobs"][job_pos]["write"][lat_key]["max"])
509            clat_key, clat_unit = get_lat_unit("clat", data["jobs"][job_pos]["write"])
510            write_p99_lat, write_p99_9_lat, write_p99_99_lat, write_p99_999_lat = get_clat_percentiles(
511                data["jobs"][job_pos]["write"][clat_key])
512
513            if "ns" in lat_unit:
514                write_avg_lat, write_min_lat, write_max_lat = [x / 1000 for x in [write_avg_lat, write_min_lat, write_max_lat]]
515            if "ns" in clat_unit:
516                write_p99_lat = write_p99_lat / 1000
517                write_p99_9_lat = write_p99_9_lat / 1000
518                write_p99_99_lat = write_p99_99_lat / 1000
519                write_p99_999_lat = write_p99_999_lat / 1000
520
521        return [read_iops, read_bw, read_avg_lat, read_min_lat, read_max_lat,
522                read_p99_lat, read_p99_9_lat, read_p99_99_lat, read_p99_999_lat,
523                write_iops, write_bw, write_avg_lat, write_min_lat, write_max_lat,
524                write_p99_lat, write_p99_9_lat, write_p99_99_lat, write_p99_999_lat]
525
526    def parse_results(self, results_dir, csv_file):
527        files = os.listdir(results_dir)
528        fio_files = filter(lambda x: ".fio" in x, files)
529        json_files = [x for x in files if ".json" in x]
530
531        headers = ["read_iops", "read_bw", "read_avg_lat_us", "read_min_lat_us", "read_max_lat_us",
532                   "read_p99_lat_us", "read_p99.9_lat_us", "read_p99.99_lat_us", "read_p99.999_lat_us",
533                   "write_iops", "write_bw", "write_avg_lat_us", "write_min_lat_us", "write_max_lat_us",
534                   "write_p99_lat_us", "write_p99.9_lat_us", "write_p99.99_lat_us", "write_p99.999_lat_us"]
535
536        aggr_headers = ["iops", "bw", "avg_lat_us", "min_lat_us", "max_lat_us",
537                        "p99_lat_us", "p99.9_lat_us", "p99.99_lat_us", "p99.999_lat_us"]
538
539        header_line = ",".join(["Name", *headers])
540        aggr_header_line = ",".join(["Name", *aggr_headers])
541
542        # Create empty results file
543        with open(os.path.join(results_dir, csv_file), "w") as fh:
544            fh.write(aggr_header_line + "\n")
545        rows = set()
546
547        for fio_config in fio_files:
548            self.log_print("Getting FIO stats for %s" % fio_config)
549            job_name, _ = os.path.splitext(fio_config)
550
551            # Look in the filename for rwmixread value. Function arguments do
552            # not have that information.
553            # TODO: Improve this function by directly using workload params instead
554            # of regexing through filenames.
555            if "read" in job_name:
556                rw_mixread = 1
557            elif "write" in job_name:
558                rw_mixread = 0
559            else:
560                rw_mixread = float(re.search(r"m_(\d+)", job_name).group(1)) / 100
561
562            # If "_CPU" exists in name - ignore it
563            # Initiators for the same job could have different num_cores parameter
564            job_name = re.sub(r"_\d+CPU", "", job_name)
565            job_result_files = [x for x in json_files if x.startswith(job_name)]
566            self.log_print("Matching result files for current fio config:")
567            for j in job_result_files:
568                self.log_print("\t %s" % j)
569
570            # There may have been more than 1 initiator used in test, need to check that
571            # Result files are created so that string after last "_" separator is server name
572            inits_names = set([os.path.splitext(x)[0].split("_")[-1] for x in job_result_files])
573            inits_avg_results = []
574            for i in inits_names:
575                self.log_print("\tGetting stats for initiator %s" % i)
576                # There may have been more than 1 test run for this job, calculate average results for initiator
577                i_results = [x for x in job_result_files if i in x]
578                i_results_filename = re.sub(r"run_\d+_", "", i_results[0].replace("json", "csv"))
579
580                separate_stats = []
581                for r in i_results:
582                    try:
583                        stats = self.read_json_stats(os.path.join(results_dir, r))
584                        separate_stats.append(stats)
585                        self.log_print(stats)
586                    except JSONDecodeError:
587                        self.log_print("ERROR: Failed to parse %s results! Results might be incomplete!" % r)
588
589                init_results = [sum(x) for x in zip(*separate_stats)]
590                init_results = [x / len(separate_stats) for x in init_results]
591                inits_avg_results.append(init_results)
592
593                self.log_print("\tAverage results for initiator %s" % i)
594                self.log_print(init_results)
595                with open(os.path.join(results_dir, i_results_filename), "w") as fh:
596                    fh.write(header_line + "\n")
597                    fh.write(",".join([job_name, *["{0:.3f}".format(x) for x in init_results]]) + "\n")
598
599            # Sum results of all initiators running this FIO job.
600            # Latency results are an average of latencies from accros all initiators.
601            inits_avg_results = [sum(x) for x in zip(*inits_avg_results)]
602            inits_avg_results = OrderedDict(zip(headers, inits_avg_results))
603            for key in inits_avg_results:
604                if "lat" in key:
605                    inits_avg_results[key] /= len(inits_names)
606
607            # Aggregate separate read/write values into common labels
608            # Take rw_mixread into consideration for mixed read/write workloads.
609            aggregate_results = OrderedDict()
610            for h in aggr_headers:
611                read_stat, write_stat = [float(value) for key, value in inits_avg_results.items() if h in key]
612                if "lat" in h:
613                    _ = rw_mixread * read_stat + (1 - rw_mixread) * write_stat
614                else:
615                    _ = read_stat + write_stat
616                aggregate_results[h] = "{0:.3f}".format(_)
617
618            rows.add(",".join([job_name, *aggregate_results.values()]))
619
620        # Save results to file
621        for row in rows:
622            with open(os.path.join(results_dir, csv_file), "a") as fh:
623                fh.write(row + "\n")
624        self.log_print("You can find the test results in the file %s" % os.path.join(results_dir, csv_file))
625
626    def measure_sar(self, results_dir, sar_file_prefix):
627        cpu_number = os.cpu_count()
628        sar_idle_sum = 0
629        sar_output_file = os.path.join(results_dir, sar_file_prefix + ".txt")
630        sar_cpu_util_file = os.path.join(results_dir, ".".join([sar_file_prefix + "cpu_util", "txt"]))
631
632        self.log_print("Waiting %d seconds for ramp-up to finish before measuring SAR stats" % self.sar_delay)
633        time.sleep(self.sar_delay)
634        self.log_print("Starting SAR measurements")
635
636        out = self.exec_cmd(["sar", "-P", "ALL", "%s" % self.sar_interval, "%s" % self.sar_count])
637        with open(os.path.join(results_dir, sar_output_file), "w") as fh:
638            for line in out.split("\n"):
639                if "Average" in line:
640                    if "CPU" in line:
641                        self.log_print("Summary CPU utilization from SAR:")
642                        self.log_print(line)
643                    elif "all" in line:
644                        self.log_print(line)
645                    else:
646                        sar_idle_sum += float(line.split()[7])
647            fh.write(out)
648        sar_cpu_usage = cpu_number * 100 - sar_idle_sum
649
650        with open(os.path.join(results_dir, sar_cpu_util_file), "w") as f:
651            f.write("%0.2f" % sar_cpu_usage)
652
653    def ethtool_after_fio_ramp(self, fio_ramp_time):
654        time.sleep(fio_ramp_time//2)
655        nic_names = [self.get_nic_name_by_ip(n) for n in self.nic_ips]
656        for nic in nic_names:
657            self.log_print(nic)
658            self.exec_cmd(["sudo", "ethtool", "--set-priv-flags", nic,
659                           "channel-pkt-inspect-optimize", "off"])  # Disable channel packet inspection optimization
660
661    def measure_pcm_memory(self, results_dir, pcm_file_name):
662        time.sleep(self.pcm_delay)
663        cmd = ["%s/build/bin/pcm-memory" % self.pcm_dir, "%s" % self.pcm_interval, "-csv=%s/%s" % (results_dir, pcm_file_name)]
664        pcm_memory = subprocess.Popen(cmd)
665        time.sleep(self.pcm_count)
666        pcm_memory.terminate()
667
668    def measure_pcm(self, results_dir, pcm_file_name):
669        time.sleep(self.pcm_delay)
670        cmd = ["%s/build/bin/pcm" % self.pcm_dir, "%s" % self.pcm_interval, "-i=%s" % self.pcm_count,
671               "-csv=%s/%s" % (results_dir, pcm_file_name)]
672        subprocess.run(cmd)
673        df = pd.read_csv(os.path.join(results_dir, pcm_file_name), header=[0, 1])
674        df = df.rename(columns=lambda x: re.sub(r'Unnamed:[\w\s]*$', '', x))
675        skt = df.loc[:, df.columns.get_level_values(1).isin({'UPI0', 'UPI1', 'UPI2'})]
676        skt_pcm_file_name = "_".join(["skt", pcm_file_name])
677        skt.to_csv(os.path.join(results_dir, skt_pcm_file_name), index=False)
678
679    def measure_pcm_power(self, results_dir, pcm_power_file_name):
680        time.sleep(self.pcm_delay)
681        out = self.exec_cmd(["%s/build/bin/pcm-power" % self.pcm_dir, "%s" % self.pcm_interval, "-i=%s" % self.pcm_count])
682        with open(os.path.join(results_dir, pcm_power_file_name), "w") as fh:
683            fh.write(out)
684
685    def measure_network_bandwidth(self, results_dir, bandwidth_file_name):
686        self.log_print("INFO: starting network bandwidth measure")
687        self.exec_cmd(["bwm-ng", "-o", "csv", "-F", "%s/%s" % (results_dir, bandwidth_file_name),
688                       "-a", "1", "-t", "1000", "-c", str(self.bandwidth_count)])
689
690    def measure_dpdk_memory(self, results_dir):
691        self.log_print("INFO: waiting to generate DPDK memory usage")
692        time.sleep(self.dpdk_wait_time)
693        self.log_print("INFO: generating DPDK memory usage")
694        rpc.env.env_dpdk_get_mem_stats
695        os.rename("/tmp/spdk_mem_dump.txt", "%s/spdk_mem_dump.txt" % (results_dir))
696
697    def sys_config(self):
698        self.log_print("====Kernel release:====")
699        self.log_print(os.uname().release)
700        self.log_print("====Kernel command line:====")
701        with open('/proc/cmdline') as f:
702            cmdline = f.readlines()
703            self.log_print('\n'.join(self.get_uncommented_lines(cmdline)))
704        self.log_print("====sysctl conf:====")
705        with open('/etc/sysctl.conf') as f:
706            sysctl = f.readlines()
707            self.log_print('\n'.join(self.get_uncommented_lines(sysctl)))
708        self.log_print("====Cpu power info:====")
709        self.log_print(self.exec_cmd(["cpupower", "frequency-info"]))
710        self.log_print("====zcopy settings:====")
711        self.log_print("zcopy enabled: %s" % (self.enable_zcopy))
712        self.log_print("====Scheduler settings:====")
713        self.log_print("SPDK scheduler: %s" % (self.scheduler_name))
714
715
716class Initiator(Server):
717    def __init__(self, name, general_config, initiator_config):
718        super().__init__(name, general_config, initiator_config)
719
720        # Required fields
721        self.ip = initiator_config["ip"]
722        self.target_nic_ips = initiator_config["target_nic_ips"]
723
724        # Defaults
725        self.cpus_allowed = None
726        self.cpus_allowed_policy = "shared"
727        self.spdk_dir = "/tmp/spdk"
728        self.fio_bin = "/usr/src/fio/fio"
729        self.nvmecli_bin = "nvme"
730        self.cpu_frequency = None
731        self.subsystem_info_list = []
732
733        if "spdk_dir" in initiator_config:
734            self.spdk_dir = initiator_config["spdk_dir"]
735        if "fio_bin" in initiator_config:
736            self.fio_bin = initiator_config["fio_bin"]
737        if "nvmecli_bin" in initiator_config:
738            self.nvmecli_bin = initiator_config["nvmecli_bin"]
739        if "cpus_allowed" in initiator_config:
740            self.cpus_allowed = initiator_config["cpus_allowed"]
741        if "cpus_allowed_policy" in initiator_config:
742            self.cpus_allowed_policy = initiator_config["cpus_allowed_policy"]
743        if "cpu_frequency" in initiator_config:
744            self.cpu_frequency = initiator_config["cpu_frequency"]
745        if os.getenv('SPDK_WORKSPACE'):
746            self.spdk_dir = os.getenv('SPDK_WORKSPACE')
747
748        self.ssh_connection = paramiko.SSHClient()
749        self.ssh_connection.set_missing_host_key_policy(paramiko.AutoAddPolicy())
750        self.ssh_connection.connect(self.ip, username=self.username, password=self.password)
751        self.exec_cmd(["sudo", "rm", "-rf", "%s/nvmf_perf" % self.spdk_dir])
752        self.exec_cmd(["mkdir", "-p", "%s" % self.spdk_dir])
753        self._nics_json_obj = json.loads(self.exec_cmd(["ip", "-j", "address", "show"]))
754
755        if "skip_spdk_install" not in general_config or general_config["skip_spdk_install"] is False:
756            self.copy_spdk("/tmp/spdk.zip")
757        self.set_local_nic_info(self.set_local_nic_info_helper())
758        self.set_cpu_frequency()
759        self.configure_system()
760        if self.enable_adq:
761            self.configure_adq()
762        self.sys_config()
763
764    def set_local_nic_info_helper(self):
765        return json.loads(self.exec_cmd(["lshw", "-json"]))
766
767    def stop(self):
768        self.ssh_connection.close()
769
770    def exec_cmd(self, cmd, stderr_redirect=False, change_dir=None):
771        if change_dir:
772            cmd = ["cd", change_dir, ";", *cmd]
773
774        # In case one of the command elements contains whitespace and is not
775        # already quoted, # (e.g. when calling sysctl) quote it again to prevent expansion
776        # when sending to remote system.
777        for i, c in enumerate(cmd):
778            if (" " in c or "\t" in c) and not (c.startswith("'") and c.endswith("'")):
779                cmd[i] = '"%s"' % c
780        cmd = " ".join(cmd)
781
782        # Redirect stderr to stdout thanks using get_pty option if needed
783        _, stdout, _ = self.ssh_connection.exec_command(cmd, get_pty=stderr_redirect)
784        out = stdout.read().decode(encoding="utf-8")
785
786        # Check the return code
787        rc = stdout.channel.recv_exit_status()
788        if rc:
789            raise CalledProcessError(int(rc), cmd, out)
790
791        return out
792
793    def put_file(self, local, remote_dest):
794        ftp = self.ssh_connection.open_sftp()
795        ftp.put(local, remote_dest)
796        ftp.close()
797
798    def get_file(self, remote, local_dest):
799        ftp = self.ssh_connection.open_sftp()
800        ftp.get(remote, local_dest)
801        ftp.close()
802
803    def copy_spdk(self, local_spdk_zip):
804        self.log_print("Copying SPDK sources to initiator %s" % self.name)
805        self.put_file(local_spdk_zip, "/tmp/spdk_drop.zip")
806        self.log_print("Copied sources zip from target")
807        self.exec_cmd(["unzip", "-qo", "/tmp/spdk_drop.zip", "-d", self.spdk_dir])
808        self.log_print("Sources unpacked")
809
810    def copy_result_files(self, dest_dir):
811        self.log_print("Copying results")
812
813        if not os.path.exists(dest_dir):
814            os.mkdir(dest_dir)
815
816        # Get list of result files from initiator and copy them back to target
817        file_list = self.exec_cmd(["ls", "%s/nvmf_perf" % self.spdk_dir]).strip().split("\n")
818
819        for file in file_list:
820            self.get_file(os.path.join(self.spdk_dir, "nvmf_perf", file),
821                          os.path.join(dest_dir, file))
822        self.log_print("Done copying results")
823
824    def discover_subsystems(self, address_list, subsys_no):
825        num_nvmes = range(0, subsys_no)
826        nvme_discover_output = ""
827        for ip, subsys_no in itertools.product(address_list, num_nvmes):
828            self.log_print("Trying to discover: %s:%s" % (ip, 4420 + subsys_no))
829            nvme_discover_cmd = ["sudo",
830                                 "%s" % self.nvmecli_bin,
831                                 "discover", "-t", "%s" % self.transport,
832                                 "-s", "%s" % (4420 + subsys_no),
833                                 "-a", "%s" % ip]
834
835            try:
836                stdout = self.exec_cmd(nvme_discover_cmd)
837                if stdout:
838                    nvme_discover_output = nvme_discover_output + stdout
839            except CalledProcessError:
840                # Do nothing. In case of discovering remote subsystems of kernel target
841                # we expect "nvme discover" to fail a bunch of times because we basically
842                # scan ports.
843                pass
844
845        subsystems = re.findall(r'trsvcid:\s(\d+)\s+'  # get svcid number
846                                r'subnqn:\s+([a-zA-Z0-9\.\-\:]+)\s+'  # get NQN id
847                                r'traddr:\s+(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',  # get IP address
848                                nvme_discover_output)  # from nvme discovery output
849        subsystems = filter(lambda x: x[-1] in address_list, subsystems)
850        subsystems = filter(lambda x: "discovery" not in x[1], subsystems)
851        subsystems = list(set(subsystems))
852        subsystems.sort(key=lambda x: x[1])
853        self.log_print("Found matching subsystems on target side:")
854        for s in subsystems:
855            self.log_print(s)
856        self.subsystem_info_list = subsystems
857
858    def gen_fio_filename_conf(self, *args, **kwargs):
859        # Logic implemented in SPDKInitiator and KernelInitiator classes
860        pass
861
862    def gen_fio_config(self, rw, rwmixread, block_size, io_depth, subsys_no, num_jobs=None, ramp_time=0, run_time=10, rate_iops=0):
863        fio_conf_template = """
864[global]
865ioengine={ioengine}
866{spdk_conf}
867thread=1
868group_reporting=1
869direct=1
870percentile_list=50:90:99:99.5:99.9:99.99:99.999
871
872norandommap=1
873rw={rw}
874rwmixread={rwmixread}
875bs={block_size}
876time_based=1
877ramp_time={ramp_time}
878runtime={run_time}
879rate_iops={rate_iops}
880"""
881        if "spdk" in self.mode:
882            bdev_conf = self.gen_spdk_bdev_conf(self.subsystem_info_list)
883            self.exec_cmd(["echo", "'%s'" % bdev_conf, ">", "%s/bdev.conf" % self.spdk_dir])
884            ioengine = "%s/build/fio/spdk_bdev" % self.spdk_dir
885            spdk_conf = "spdk_json_conf=%s/bdev.conf" % self.spdk_dir
886        else:
887            ioengine = self.ioengine
888            spdk_conf = ""
889            out = self.exec_cmd(["sudo", "nvme", "list", "|", "grep", "-E", "'SPDK|Linux'",
890                                 "|", "awk", "'{print $1}'"])
891            subsystems = [x for x in out.split("\n") if "nvme" in x]
892
893        if self.cpus_allowed is not None:
894            self.log_print("Limiting FIO workload execution on specific cores %s" % self.cpus_allowed)
895            cpus_num = 0
896            cpus = self.cpus_allowed.split(",")
897            for cpu in cpus:
898                if "-" in cpu:
899                    a, b = cpu.split("-")
900                    a = int(a)
901                    b = int(b)
902                    cpus_num += len(range(a, b))
903                else:
904                    cpus_num += 1
905            self.num_cores = cpus_num
906            threads = range(0, self.num_cores)
907        elif hasattr(self, 'num_cores'):
908            self.log_print("Limiting FIO workload execution to %s cores" % self.num_cores)
909            threads = range(0, int(self.num_cores))
910        else:
911            self.num_cores = len(subsystems)
912            threads = range(0, len(subsystems))
913
914        if "spdk" in self.mode:
915            filename_section = self.gen_fio_filename_conf(self.subsystem_info_list, threads, io_depth, num_jobs)
916        else:
917            filename_section = self.gen_fio_filename_conf(threads, io_depth, num_jobs)
918
919        fio_config = fio_conf_template.format(ioengine=ioengine, spdk_conf=spdk_conf,
920                                              rw=rw, rwmixread=rwmixread, block_size=block_size,
921                                              ramp_time=ramp_time, run_time=run_time, rate_iops=rate_iops)
922
923        # TODO: hipri disabled for now, as it causes fio errors:
924        # io_u error on file /dev/nvme2n1: Operation not supported
925        # See comment in KernelInitiator class, kernel_init_connect() function
926        if hasattr(self, "ioengine") and "io_uring" in self.ioengine:
927            fio_config = fio_config + """
928fixedbufs=1
929registerfiles=1
930#hipri=1
931"""
932        if num_jobs:
933            fio_config = fio_config + "numjobs=%s \n" % num_jobs
934        if self.cpus_allowed is not None:
935            fio_config = fio_config + "cpus_allowed=%s \n" % self.cpus_allowed
936            fio_config = fio_config + "cpus_allowed_policy=%s \n" % self.cpus_allowed_policy
937        fio_config = fio_config + filename_section
938
939        fio_config_filename = "%s_%s_%s_m_%s" % (block_size, io_depth, rw, rwmixread)
940        if hasattr(self, "num_cores"):
941            fio_config_filename += "_%sCPU" % self.num_cores
942        fio_config_filename += ".fio"
943
944        self.exec_cmd(["mkdir", "-p", "%s/nvmf_perf" % self.spdk_dir])
945        self.exec_cmd(["echo", "'%s'" % fio_config, ">", "%s/nvmf_perf/%s" % (self.spdk_dir, fio_config_filename)])
946        self.log_print("Created FIO Config:")
947        self.log_print(fio_config)
948
949        return os.path.join(self.spdk_dir, "nvmf_perf", fio_config_filename)
950
951    def set_cpu_frequency(self):
952        if self.cpu_frequency is not None:
953            try:
954                self.exec_cmd(["sudo", "cpupower", "frequency-set", "-g", "userspace"], True)
955                self.exec_cmd(["sudo", "cpupower", "frequency-set", "-f", "%s" % self.cpu_frequency], True)
956                self.log_print(self.exec_cmd(["sudo", "cpupower", "frequency-info"]))
957            except Exception:
958                self.log_print("ERROR: cpu_frequency will not work when intel_pstate is enabled!")
959                sys.exit()
960        else:
961            self.log_print("WARNING: you have disabled intel_pstate and using default cpu governance.")
962
963    def run_fio(self, fio_config_file, run_num=None):
964        job_name, _ = os.path.splitext(fio_config_file)
965        self.log_print("Starting FIO run for job: %s" % job_name)
966        self.log_print("Using FIO: %s" % self.fio_bin)
967
968        if run_num:
969            for i in range(1, run_num + 1):
970                output_filename = job_name + "_run_" + str(i) + "_" + self.name + ".json"
971                try:
972                    output = self.exec_cmd(["sudo", self.fio_bin, fio_config_file, "--output-format=json",
973                                            "--output=%s" % output_filename, "--eta=never"], True)
974                    self.log_print(output)
975                except subprocess.CalledProcessError as e:
976                    self.log_print("ERROR: Fio process failed!")
977                    self.log_print(e.stdout)
978        else:
979            output_filename = job_name + "_" + self.name + ".json"
980            output = self.exec_cmd(["sudo", self.fio_bin,
981                                    fio_config_file, "--output-format=json",
982                                    "--output" % output_filename], True)
983            self.log_print(output)
984        self.log_print("FIO run finished. Results in: %s" % output_filename)
985
986    def sys_config(self):
987        self.log_print("====Kernel release:====")
988        self.log_print(self.exec_cmd(["uname", "-r"]))
989        self.log_print("====Kernel command line:====")
990        cmdline = self.exec_cmd(["cat", "/proc/cmdline"])
991        self.log_print('\n'.join(self.get_uncommented_lines(cmdline.splitlines())))
992        self.log_print("====sysctl conf:====")
993        sysctl = self.exec_cmd(["cat", "/etc/sysctl.conf"])
994        self.log_print('\n'.join(self.get_uncommented_lines(sysctl.splitlines())))
995        self.log_print("====Cpu power info:====")
996        self.log_print(self.exec_cmd(["cpupower", "frequency-info"]))
997
998
999class KernelTarget(Target):
1000    def __init__(self, name, general_config, target_config):
1001        super().__init__(name, general_config, target_config)
1002        # Defaults
1003        self.nvmet_bin = "nvmetcli"
1004
1005        if "nvmet_bin" in target_config:
1006            self.nvmet_bin = target_config["nvmet_bin"]
1007
1008    def stop(self):
1009        nvmet_command(self.nvmet_bin, "clear")
1010
1011    def kernel_tgt_gen_subsystem_conf(self, nvme_list):
1012
1013        nvmet_cfg = {
1014            "ports": [],
1015            "hosts": [],
1016            "subsystems": [],
1017        }
1018
1019        for ip, bdev_num in self.spread_bdevs(len(nvme_list)):
1020            port = str(4420 + bdev_num)
1021            nqn = "nqn.2018-09.io.spdk:cnode%s" % bdev_num
1022            serial = "SPDK00%s" % bdev_num
1023            bdev_name = nvme_list[bdev_num]
1024
1025            nvmet_cfg["subsystems"].append({
1026                "allowed_hosts": [],
1027                "attr": {
1028                    "allow_any_host": "1",
1029                    "serial": serial,
1030                    "version": "1.3"
1031                },
1032                "namespaces": [
1033                    {
1034                        "device": {
1035                            "path": bdev_name,
1036                            "uuid": "%s" % uuid.uuid4()
1037                        },
1038                        "enable": 1,
1039                        "nsid": port
1040                    }
1041                ],
1042                "nqn": nqn
1043            })
1044
1045            nvmet_cfg["ports"].append({
1046                "addr": {
1047                    "adrfam": "ipv4",
1048                    "traddr": ip,
1049                    "trsvcid": port,
1050                    "trtype": self.transport
1051                },
1052                "portid": bdev_num,
1053                "referrals": [],
1054                "subsystems": [nqn]
1055            })
1056
1057            self.subsystem_info_list.append([port, nqn, ip])
1058        self.subsys_no = len(self.subsystem_info_list)
1059
1060        with open("kernel.conf", "w") as fh:
1061            fh.write(json.dumps(nvmet_cfg, indent=2))
1062
1063    def tgt_start(self):
1064        self.log_print("Configuring kernel NVMeOF Target")
1065
1066        if self.null_block:
1067            print("Configuring with null block device.")
1068            nvme_list = ["/dev/nullb{}".format(x) for x in range(self.null_block)]
1069        else:
1070            print("Configuring with NVMe drives.")
1071            nvme_list = get_nvme_devices()
1072
1073        self.kernel_tgt_gen_subsystem_conf(nvme_list)
1074        self.subsys_no = len(nvme_list)
1075
1076        nvmet_command(self.nvmet_bin, "clear")
1077        nvmet_command(self.nvmet_bin, "restore kernel.conf")
1078
1079        if self.enable_adq:
1080            self.adq_configure_tc()
1081
1082        self.log_print("Done configuring kernel NVMeOF Target")
1083
1084
1085class SPDKTarget(Target):
1086    def __init__(self, name, general_config, target_config):
1087        super().__init__(name, general_config, target_config)
1088
1089        # Required fields
1090        self.core_mask = target_config["core_mask"]
1091        self.num_cores = self.get_num_cores(self.core_mask)
1092
1093        # Defaults
1094        self.dif_insert_strip = False
1095        self.null_block_dif_type = 0
1096        self.num_shared_buffers = 4096
1097        self.max_queue_depth = 128
1098        self.bpf_proc = None
1099        self.bpf_scripts = []
1100        self.enable_dsa = False
1101        self.scheduler_core_limit = None
1102
1103        if "num_shared_buffers" in target_config:
1104            self.num_shared_buffers = target_config["num_shared_buffers"]
1105        if "max_queue_depth" in target_config:
1106            self.max_queue_depth = target_config["max_queue_depth"]
1107        if "null_block_dif_type" in target_config:
1108            self.null_block_dif_type = target_config["null_block_dif_type"]
1109        if "dif_insert_strip" in target_config:
1110            self.dif_insert_strip = target_config["dif_insert_strip"]
1111        if "bpf_scripts" in target_config:
1112            self.bpf_scripts = target_config["bpf_scripts"]
1113        if "dsa_settings" in target_config:
1114            self.enable_dsa = target_config["dsa_settings"]
1115        if "scheduler_core_limit" in target_config:
1116            self.scheduler_core_limit = target_config["scheduler_core_limit"]
1117
1118        self.log_print("====DSA settings:====")
1119        self.log_print("DSA enabled: %s" % (self.enable_dsa))
1120
1121    @staticmethod
1122    def get_num_cores(core_mask):
1123        if "0x" in core_mask:
1124            return bin(int(core_mask, 16)).count("1")
1125        else:
1126            num_cores = 0
1127            core_mask = core_mask.replace("[", "")
1128            core_mask = core_mask.replace("]", "")
1129            for i in core_mask.split(","):
1130                if "-" in i:
1131                    x, y = i.split("-")
1132                    num_cores += len(range(int(x), int(y))) + 1
1133                else:
1134                    num_cores += 1
1135            return num_cores
1136
1137    def spdk_tgt_configure(self):
1138        self.log_print("Configuring SPDK NVMeOF target via RPC")
1139
1140        if self.enable_adq:
1141            self.adq_configure_tc()
1142
1143        # Create transport layer
1144        rpc.nvmf.nvmf_create_transport(self.client, trtype=self.transport,
1145                                       num_shared_buffers=self.num_shared_buffers,
1146                                       max_queue_depth=self.max_queue_depth,
1147                                       dif_insert_or_strip=self.dif_insert_strip,
1148                                       sock_priority=self.adq_priority)
1149        self.log_print("SPDK NVMeOF transport layer:")
1150        rpc_client.print_dict(rpc.nvmf.nvmf_get_transports(self.client))
1151
1152        if self.null_block:
1153            self.spdk_tgt_add_nullblock(self.null_block)
1154            self.spdk_tgt_add_subsystem_conf(self.nic_ips, self.null_block)
1155        else:
1156            self.spdk_tgt_add_nvme_conf()
1157            self.spdk_tgt_add_subsystem_conf(self.nic_ips)
1158
1159        self.log_print("Done configuring SPDK NVMeOF Target")
1160
1161    def spdk_tgt_add_nullblock(self, null_block_count):
1162        md_size = 0
1163        block_size = 4096
1164        if self.null_block_dif_type != 0:
1165            md_size = 128
1166
1167        self.log_print("Adding null block bdevices to config via RPC")
1168        for i in range(null_block_count):
1169            self.log_print("Setting bdev protection to :%s" % self.null_block_dif_type)
1170            rpc.bdev.bdev_null_create(self.client, 102400, block_size + md_size, "Nvme{}n1".format(i),
1171                                      dif_type=self.null_block_dif_type, md_size=md_size)
1172        self.log_print("SPDK Bdevs configuration:")
1173        rpc_client.print_dict(rpc.bdev.bdev_get_bdevs(self.client))
1174
1175    def spdk_tgt_add_nvme_conf(self, req_num_disks=None):
1176        self.log_print("Adding NVMe bdevs to config via RPC")
1177
1178        bdfs = get_nvme_devices_bdf()
1179        bdfs = [b.replace(":", ".") for b in bdfs]
1180
1181        if req_num_disks:
1182            if req_num_disks > len(bdfs):
1183                self.log_print("ERROR: Requested number of disks is more than available %s" % len(bdfs))
1184                sys.exit(1)
1185            else:
1186                bdfs = bdfs[0:req_num_disks]
1187
1188        for i, bdf in enumerate(bdfs):
1189            rpc.bdev.bdev_nvme_attach_controller(self.client, name="Nvme%s" % i, trtype="PCIe", traddr=bdf)
1190
1191        self.log_print("SPDK Bdevs configuration:")
1192        rpc_client.print_dict(rpc.bdev.bdev_get_bdevs(self.client))
1193
1194    def spdk_tgt_add_subsystem_conf(self, ips=None, req_num_disks=None):
1195        self.log_print("Adding subsystems to config")
1196        if not req_num_disks:
1197            req_num_disks = get_nvme_devices_count()
1198
1199        for ip, bdev_num in self.spread_bdevs(req_num_disks):
1200            port = str(4420 + bdev_num)
1201            nqn = "nqn.2018-09.io.spdk:cnode%s" % bdev_num
1202            serial = "SPDK00%s" % bdev_num
1203            bdev_name = "Nvme%sn1" % bdev_num
1204
1205            rpc.nvmf.nvmf_create_subsystem(self.client, nqn, serial,
1206                                           allow_any_host=True, max_namespaces=8)
1207            rpc.nvmf.nvmf_subsystem_add_ns(self.client, nqn, bdev_name)
1208            for nqn_name in [nqn, "discovery"]:
1209                rpc.nvmf.nvmf_subsystem_add_listener(self.client,
1210                                                     nqn=nqn_name,
1211                                                     trtype=self.transport,
1212                                                     traddr=ip,
1213                                                     trsvcid=port,
1214                                                     adrfam="ipv4")
1215            self.subsystem_info_list.append([port, nqn, ip])
1216        self.subsys_no = len(self.subsystem_info_list)
1217
1218        self.log_print("SPDK NVMeOF subsystem configuration:")
1219        rpc_client.print_dict(rpc.nvmf.nvmf_get_subsystems(self.client))
1220
1221    def bpf_start(self):
1222        self.log_print("Starting BPF Trace scripts: %s" % self.bpf_scripts)
1223        bpf_script = os.path.join(self.spdk_dir, "scripts/bpftrace.sh")
1224        bpf_traces = [os.path.join(self.spdk_dir, "scripts/bpf", trace) for trace in self.bpf_scripts]
1225        results_path = os.path.join(self.results_dir, "bpf_traces.txt")
1226
1227        with open(self.pid, "r") as fh:
1228            nvmf_pid = str(fh.readline())
1229
1230        cmd = [bpf_script, nvmf_pid, *bpf_traces]
1231        self.log_print(cmd)
1232        self.bpf_proc = subprocess.Popen(cmd, env={"BPF_OUTFILE": results_path})
1233
1234    def tgt_start(self):
1235        if self.null_block:
1236            self.subsys_no = 1
1237        else:
1238            self.subsys_no = get_nvme_devices_count()
1239        self.log_print("Starting SPDK NVMeOF Target process")
1240        nvmf_app_path = os.path.join(self.spdk_dir, "build/bin/nvmf_tgt")
1241        proc = subprocess.Popen([nvmf_app_path, "--wait-for-rpc", "-m", self.core_mask])
1242        self.pid = os.path.join(self.spdk_dir, "nvmf.pid")
1243
1244        with open(self.pid, "w") as fh:
1245            fh.write(str(proc.pid))
1246        self.nvmf_proc = proc
1247        self.log_print("SPDK NVMeOF Target PID=%s" % self.pid)
1248        self.log_print("Waiting for spdk to initialize...")
1249        while True:
1250            if os.path.exists("/var/tmp/spdk.sock"):
1251                break
1252            time.sleep(1)
1253        self.client = rpc_client.JSONRPCClient("/var/tmp/spdk.sock")
1254
1255        rpc.sock.sock_set_default_impl(self.client, impl_name="posix")
1256
1257        if self.enable_zcopy:
1258            rpc.sock.sock_impl_set_options(self.client, impl_name="posix",
1259                                           enable_zerocopy_send_server=True)
1260            self.log_print("Target socket options:")
1261            rpc_client.print_dict(rpc.sock.sock_impl_get_options(self.client, impl_name="posix"))
1262
1263        if self.enable_adq:
1264            rpc.sock.sock_impl_set_options(self.client, impl_name="posix", enable_placement_id=1)
1265            rpc.bdev.bdev_nvme_set_options(self.client, timeout_us=0, action_on_timeout=None,
1266                                           nvme_adminq_poll_period_us=100000, retry_count=4)
1267
1268        if self.enable_dsa:
1269            rpc.dsa.dsa_scan_accel_engine(self.client, config_kernel_mode=None)
1270            self.log_print("Target DSA accel engine enabled")
1271
1272        rpc.app.framework_set_scheduler(self.client, name=self.scheduler_name, core_limit=self.scheduler_core_limit)
1273        rpc.framework_start_init(self.client)
1274
1275        if self.bpf_scripts:
1276            self.bpf_start()
1277
1278        self.spdk_tgt_configure()
1279
1280    def stop(self):
1281        if self.bpf_proc:
1282            self.log_print("Stopping BPF Trace script")
1283            self.bpf_proc.terminate()
1284            self.bpf_proc.wait()
1285
1286        if hasattr(self, "nvmf_proc"):
1287            try:
1288                self.nvmf_proc.terminate()
1289                self.nvmf_proc.wait(timeout=30)
1290            except Exception as e:
1291                self.log_print("Failed to terminate SPDK Target process. Sending SIGKILL.")
1292                self.log_print(e)
1293                self.nvmf_proc.kill()
1294                self.nvmf_proc.communicate()
1295                # Try to clean up RPC socket files if they were not removed
1296                # because of using 'kill'
1297                try:
1298                    os.remove("/var/tmp/spdk.sock")
1299                    os.remove("/var/tmp/spdk.sock.lock")
1300                except FileNotFoundError:
1301                    pass
1302
1303
1304class KernelInitiator(Initiator):
1305    def __init__(self, name, general_config, initiator_config):
1306        super().__init__(name, general_config, initiator_config)
1307
1308        # Defaults
1309        self.extra_params = ""
1310        self.ioengine = "libaio"
1311
1312        if "extra_params" in initiator_config:
1313            self.extra_params = initiator_config["extra_params"]
1314
1315        if "kernel_engine" in initiator_config:
1316            self.ioengine = initiator_config["kernel_engine"]
1317            if "io_uring" in self.ioengine:
1318                self.extra_params = "--nr-poll-queues=8"
1319
1320    def get_connected_nvme_list(self):
1321        json_obj = json.loads(self.exec_cmd(["sudo", "nvme", "list", "-o", "json"]))
1322        nvme_list = [os.path.basename(x["DevicePath"]) for x in json_obj["Devices"]
1323                     if "SPDK" in x["ModelNumber"] or "Linux" in x["ModelNumber"]]
1324        return nvme_list
1325
1326    def kernel_init_connect(self):
1327        self.log_print("Below connection attempts may result in error messages, this is expected!")
1328        for subsystem in self.subsystem_info_list:
1329            self.log_print("Trying to connect %s %s %s" % subsystem)
1330            self.exec_cmd(["sudo", self.nvmecli_bin, "connect", "-t", self.transport,
1331                           "-s", subsystem[0], "-n", subsystem[1], "-a", subsystem[2], self.extra_params])
1332            time.sleep(2)
1333
1334        if "io_uring" in self.ioengine:
1335            self.log_print("Setting block layer settings for io_uring.")
1336
1337            # TODO: io_poll=1 and io_poll_delay=-1 params not set here, because
1338            #       apparently it's not possible for connected subsystems.
1339            #       Results in "error: Invalid argument"
1340            block_sysfs_settings = {
1341                "iostats": "0",
1342                "rq_affinity": "0",
1343                "nomerges": "2"
1344            }
1345
1346            for disk in self.get_connected_nvme_list():
1347                sysfs = os.path.join("/sys/block", disk, "queue")
1348                for k, v in block_sysfs_settings.items():
1349                    sysfs_opt_path = os.path.join(sysfs, k)
1350                    try:
1351                        self.exec_cmd(["sudo", "bash", "-c", "echo %s > %s" % (v, sysfs_opt_path)], stderr_redirect=True)
1352                    except subprocess.CalledProcessError as e:
1353                        self.log_print("Warning: command %s failed due to error %s. %s was not set!" % (e.cmd, e.output, v))
1354                    finally:
1355                        _ = self.exec_cmd(["sudo", "cat", "%s" % (sysfs_opt_path)])
1356                        self.log_print("%s=%s" % (sysfs_opt_path, _))
1357
1358    def kernel_init_disconnect(self):
1359        for subsystem in self.subsystem_info_list:
1360            self.exec_cmd(["sudo", self.nvmecli_bin, "disconnect", "-n", subsystem[1]])
1361            time.sleep(1)
1362
1363    def gen_fio_filename_conf(self, threads, io_depth, num_jobs=1):
1364        nvme_list = [os.path.join("/dev", nvme) for nvme in self.get_connected_nvme_list()]
1365
1366        filename_section = ""
1367        nvme_per_split = int(len(nvme_list) / len(threads))
1368        remainder = len(nvme_list) % len(threads)
1369        iterator = iter(nvme_list)
1370        result = []
1371        for i in range(len(threads)):
1372            result.append([])
1373            for _ in range(nvme_per_split):
1374                result[i].append(next(iterator))
1375                if remainder:
1376                    result[i].append(next(iterator))
1377                    remainder -= 1
1378        for i, r in enumerate(result):
1379            header = "[filename%s]" % i
1380            disks = "\n".join(["filename=%s" % x for x in r])
1381            job_section_qd = round((io_depth * len(r)) / num_jobs)
1382            if job_section_qd == 0:
1383                job_section_qd = 1
1384            iodepth = "iodepth=%s" % job_section_qd
1385            filename_section = "\n".join([filename_section, header, disks, iodepth])
1386
1387        return filename_section
1388
1389
1390class SPDKInitiator(Initiator):
1391    def __init__(self, name, general_config, initiator_config):
1392        super().__init__(name, general_config, initiator_config)
1393
1394        if "skip_spdk_install" not in general_config or general_config["skip_spdk_install"] is False:
1395            self.install_spdk()
1396
1397        # Required fields
1398        self.num_cores = initiator_config["num_cores"]
1399
1400        # Optional fields
1401        self.enable_data_digest = False
1402        if "enable_data_digest" in initiator_config:
1403            self.enable_data_digest = initiator_config["enable_data_digest"]
1404
1405    def install_spdk(self):
1406        self.log_print("Using fio binary %s" % self.fio_bin)
1407        self.exec_cmd(["git", "-C", self.spdk_dir, "submodule", "update", "--init"])
1408        self.exec_cmd(["git", "-C", self.spdk_dir, "clean", "-ffdx"])
1409        self.exec_cmd(["cd", self.spdk_dir, "&&", "./configure", "--with-rdma", "--with-fio=%s" % os.path.dirname(self.fio_bin)])
1410        self.exec_cmd(["make", "-C", self.spdk_dir, "clean"])
1411        self.exec_cmd(["make", "-C", self.spdk_dir, "-j$(($(nproc)*2))"])
1412
1413        self.log_print("SPDK built")
1414        self.exec_cmd(["sudo", "%s/scripts/setup.sh" % self.spdk_dir])
1415
1416    def gen_spdk_bdev_conf(self, remote_subsystem_list):
1417        bdev_cfg_section = {
1418            "subsystems": [
1419                {
1420                    "subsystem": "bdev",
1421                    "config": []
1422                }
1423            ]
1424        }
1425
1426        for i, subsys in enumerate(remote_subsystem_list):
1427            sub_port, sub_nqn, sub_addr = map(lambda x: str(x), subsys)
1428            nvme_ctrl = {
1429                "method": "bdev_nvme_attach_controller",
1430                "params": {
1431                    "name": "Nvme{}".format(i),
1432                    "trtype": self.transport,
1433                    "traddr": sub_addr,
1434                    "trsvcid": sub_port,
1435                    "subnqn": sub_nqn,
1436                    "adrfam": "IPv4"
1437                }
1438            }
1439
1440            if self.enable_adq:
1441                nvme_ctrl["params"].update({"priority": "1"})
1442
1443            if self.enable_data_digest:
1444                nvme_ctrl["params"].update({"ddgst": self.enable_data_digest})
1445
1446            bdev_cfg_section["subsystems"][0]["config"].append(nvme_ctrl)
1447
1448        return json.dumps(bdev_cfg_section, indent=2)
1449
1450    def gen_fio_filename_conf(self, subsystems, threads, io_depth, num_jobs=1):
1451        filename_section = ""
1452        if len(threads) >= len(subsystems):
1453            threads = range(0, len(subsystems))
1454        filenames = ["Nvme%sn1" % x for x in range(0, len(subsystems))]
1455        nvme_per_split = int(len(subsystems) / len(threads))
1456        remainder = len(subsystems) % len(threads)
1457        iterator = iter(filenames)
1458        result = []
1459        for i in range(len(threads)):
1460            result.append([])
1461            for _ in range(nvme_per_split):
1462                result[i].append(next(iterator))
1463            if remainder:
1464                result[i].append(next(iterator))
1465                remainder -= 1
1466        for i, r in enumerate(result):
1467            header = "[filename%s]" % i
1468            disks = "\n".join(["filename=%s" % x for x in r])
1469            job_section_qd = round((io_depth * len(r)) / num_jobs)
1470            if job_section_qd == 0:
1471                job_section_qd = 1
1472            iodepth = "iodepth=%s" % job_section_qd
1473            filename_section = "\n".join([filename_section, header, disks, iodepth])
1474
1475        return filename_section
1476
1477
1478if __name__ == "__main__":
1479    script_full_dir = os.path.dirname(os.path.realpath(__file__))
1480    default_config_file_path = os.path.relpath(os.path.join(script_full_dir, "config.json"))
1481
1482    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
1483    parser.add_argument('-c', '--config', type=str, default=default_config_file_path,
1484                        help='Configuration file.')
1485    parser.add_argument('-r', '--results', type=str, default='/tmp/results',
1486                        help='Results directory.')
1487    parser.add_argument('-s', '--csv-filename', type=str, default='nvmf_results.csv',
1488                        help='CSV results filename.')
1489
1490    args = parser.parse_args()
1491
1492    print("Using config file: %s" % args.config)
1493    with open(args.config, "r") as config:
1494        data = json.load(config)
1495
1496    initiators = []
1497    fio_cases = []
1498
1499    general_config = data["general"]
1500    target_config = data["target"]
1501    initiator_configs = [data[x] for x in data.keys() if "initiator" in x]
1502
1503    for k, v in data.items():
1504        if "target" in k:
1505            v.update({"results_dir": args.results})
1506            if data[k]["mode"] == "spdk":
1507                target_obj = SPDKTarget(k, data["general"], v)
1508            elif data[k]["mode"] == "kernel":
1509                target_obj = KernelTarget(k, data["general"], v)
1510        elif "initiator" in k:
1511            if data[k]["mode"] == "spdk":
1512                init_obj = SPDKInitiator(k, data["general"], v)
1513            elif data[k]["mode"] == "kernel":
1514                init_obj = KernelInitiator(k, data["general"], v)
1515            initiators.append(init_obj)
1516        elif "fio" in k:
1517            fio_workloads = itertools.product(data[k]["bs"],
1518                                              data[k]["qd"],
1519                                              data[k]["rw"])
1520
1521            fio_run_time = data[k]["run_time"]
1522            fio_ramp_time = data[k]["ramp_time"]
1523            fio_rw_mix_read = data[k]["rwmixread"]
1524            fio_run_num = data[k]["run_num"] if "run_num" in data[k].keys() else None
1525            fio_num_jobs = data[k]["num_jobs"] if "num_jobs" in data[k].keys() else None
1526
1527            fio_rate_iops = 0
1528            if "rate_iops" in data[k]:
1529                fio_rate_iops = data[k]["rate_iops"]
1530        else:
1531            continue
1532
1533    try:
1534        os.mkdir(args.results)
1535    except FileExistsError:
1536        pass
1537
1538    for i in initiators:
1539        target_obj.initiator_info.append(
1540            {"name": i.name, "target_nic_ips": i.target_nic_ips, "initiator_nic_ips": i.nic_ips}
1541        )
1542
1543    # TODO: This try block is definietly too large. Need to break this up into separate
1544    # logical blocks to reduce size.
1545    try:
1546        target_obj.tgt_start()
1547
1548        for i in initiators:
1549            i.discover_subsystems(i.target_nic_ips, target_obj.subsys_no)
1550            if i.enable_adq:
1551                i.adq_configure_tc()
1552
1553        # Poor mans threading
1554        # Run FIO tests
1555        for block_size, io_depth, rw in fio_workloads:
1556            threads = []
1557            configs = []
1558            for i in initiators:
1559                if i.mode == "kernel":
1560                    i.kernel_init_connect()
1561
1562                cfg = i.gen_fio_config(rw, fio_rw_mix_read, block_size, io_depth, target_obj.subsys_no,
1563                                       fio_num_jobs, fio_ramp_time, fio_run_time, fio_rate_iops)
1564                configs.append(cfg)
1565
1566            for i, cfg in zip(initiators, configs):
1567                t = threading.Thread(target=i.run_fio, args=(cfg, fio_run_num))
1568                threads.append(t)
1569            if target_obj.enable_sar:
1570                sar_file_prefix = "%s_%s_%s_sar" % (block_size, rw, io_depth)
1571                t = threading.Thread(target=target_obj.measure_sar, args=(args.results, sar_file_prefix))
1572                threads.append(t)
1573
1574            if target_obj.enable_pcm:
1575                pcm_fnames = ["%s_%s_%s_%s.csv" % (block_size, rw, io_depth, x) for x in ["pcm_cpu", "pcm_memory", "pcm_power"]]
1576
1577                pcm_cpu_t = threading.Thread(target=target_obj.measure_pcm, args=(args.results, pcm_fnames[0],))
1578                pcm_mem_t = threading.Thread(target=target_obj.measure_pcm_memory, args=(args.results, pcm_fnames[1],))
1579                pcm_pow_t = threading.Thread(target=target_obj.measure_pcm_power, args=(args.results, pcm_fnames[2],))
1580
1581                threads.append(pcm_cpu_t)
1582                threads.append(pcm_mem_t)
1583                threads.append(pcm_pow_t)
1584
1585            if target_obj.enable_bandwidth:
1586                bandwidth_file_name = "_".join(["bandwidth", str(block_size), str(rw), str(io_depth)])
1587                bandwidth_file_name = ".".join([bandwidth_file_name, "csv"])
1588                t = threading.Thread(target=target_obj.measure_network_bandwidth, args=(args.results, bandwidth_file_name,))
1589                threads.append(t)
1590
1591            if target_obj.enable_dpdk_memory:
1592                t = threading.Thread(target=target_obj.measure_dpdk_memory, args=(args.results))
1593                threads.append(t)
1594
1595            if target_obj.enable_adq:
1596                ethtool_thread = threading.Thread(target=target_obj.ethtool_after_fio_ramp, args=(fio_ramp_time,))
1597                threads.append(ethtool_thread)
1598
1599            for t in threads:
1600                t.start()
1601            for t in threads:
1602                t.join()
1603
1604            for i in initiators:
1605                if i.mode == "kernel":
1606                    i.kernel_init_disconnect()
1607                i.copy_result_files(args.results)
1608
1609        target_obj.restore_governor()
1610        target_obj.restore_tuned()
1611        target_obj.restore_services()
1612        target_obj.restore_sysctl()
1613        if target_obj.enable_adq:
1614            target_obj.reload_driver("ice")
1615        for i in initiators:
1616            i.restore_governor()
1617            i.restore_tuned()
1618            i.restore_services()
1619            i.restore_sysctl()
1620            if i.enable_adq:
1621                i.reload_driver("ice")
1622        target_obj.parse_results(args.results, args.csv_filename)
1623    finally:
1624        for i in initiators:
1625            try:
1626                i.stop()
1627            except Exception as err:
1628                pass
1629        target_obj.stop()
1630