xref: /spdk/scripts/perf/nvmf/run_nvmf.py (revision 18c8b52afa69f39481ebb75711b2f30b11693f9d)
1#!/usr/bin/env python3
2
3from json.decoder import JSONDecodeError
4import os
5import re
6import sys
7import argparse
8import json
9import zipfile
10import threading
11import subprocess
12import itertools
13import configparser
14import time
15import uuid
16from collections import OrderedDict
17
18import paramiko
19import pandas as pd
20from common import *
21
22sys.path.append(os.path.dirname(__file__) + '/../../../python')
23
24import spdk.rpc as rpc  # noqa
25import spdk.rpc.client as rpc_client  # noqa
26
27
28class Server:
29    def __init__(self, name, general_config, server_config):
30        self.name = name
31        self.username = general_config["username"]
32        self.password = general_config["password"]
33        self.transport = general_config["transport"].lower()
34        self.nic_ips = server_config["nic_ips"]
35        self.mode = server_config["mode"]
36
37        self.irq_scripts_dir = "/usr/src/local/mlnx-tools/ofed_scripts"
38        if "irq_scripts_dir" in server_config and server_config["irq_scripts_dir"]:
39            self.irq_scripts_dir = server_config["irq_scripts_dir"]
40
41        self.local_nic_info = []
42        self._nics_json_obj = {}
43        self.svc_restore_dict = {}
44        self.sysctl_restore_dict = {}
45        self.tuned_restore_dict = {}
46        self.governor_restore = ""
47        self.tuned_profile = ""
48
49        self.enable_adq = False
50        self.adq_priority = None
51        if "adq_enable" in server_config and server_config["adq_enable"]:
52            self.enable_adq = server_config["adq_enable"]
53            self.adq_priority = 1
54
55        if "tuned_profile" in server_config:
56            self.tuned_profile = server_config["tuned_profile"]
57
58        if not re.match("^[A-Za-z0-9]*$", name):
59            self.log_print("Please use a name which contains only letters or numbers")
60            sys.exit(1)
61
62    def log_print(self, msg):
63        print("[%s] %s" % (self.name, msg), flush=True)
64
65    @staticmethod
66    def get_uncommented_lines(lines):
67        return [line for line in lines if line and not line.startswith('#')]
68
69    def get_nic_name_by_ip(self, ip):
70        if not self._nics_json_obj:
71            nics_json_obj = self.exec_cmd(["ip", "-j", "address", "show"])
72            self._nics_json_obj = list(filter(lambda x: x["addr_info"], json.loads(nics_json_obj)))
73        for nic in self._nics_json_obj:
74            for addr in nic["addr_info"]:
75                if ip in addr["local"]:
76                    return nic["ifname"]
77
78    def set_local_nic_info_helper(self):
79        pass
80
81    def set_local_nic_info(self, pci_info):
82        def extract_network_elements(json_obj):
83            nic_list = []
84            if isinstance(json_obj, list):
85                for x in json_obj:
86                    nic_list.extend(extract_network_elements(x))
87            elif isinstance(json_obj, dict):
88                if "children" in json_obj:
89                    nic_list.extend(extract_network_elements(json_obj["children"]))
90                if "class" in json_obj.keys() and "network" in json_obj["class"]:
91                    nic_list.append(json_obj)
92            return nic_list
93
94        self.local_nic_info = extract_network_elements(pci_info)
95
96    # pylint: disable=R0201
97    def exec_cmd(self, cmd, stderr_redirect=False, change_dir=None):
98        return ""
99
100    def configure_system(self):
101        self.load_drivers()
102        self.configure_services()
103        self.configure_sysctl()
104        self.configure_tuned()
105        self.configure_cpu_governor()
106        self.configure_irq_affinity()
107
108    def load_drivers(self):
109        self.log_print("Loading drivers")
110        self.exec_cmd(["sudo", "modprobe", "-a",
111                       "nvme-%s" % self.transport,
112                       "nvmet-%s" % self.transport])
113        if self.mode == "kernel" and hasattr(self, "null_block") and self.null_block:
114            self.exec_cmd(["sudo", "modprobe", "null_blk",
115                           "nr_devices=%s" % self.null_block])
116
117    def configure_adq(self):
118        if self.mode == "kernel":
119            self.log_print("WARNING: ADQ setup not yet supported for Kernel mode. Skipping configuration.")
120            return
121        self.adq_load_modules()
122        self.adq_configure_nic()
123
124    def adq_load_modules(self):
125        self.log_print("Modprobing ADQ-related Linux modules...")
126        adq_module_deps = ["sch_mqprio", "act_mirred", "cls_flower"]
127        for module in adq_module_deps:
128            try:
129                self.exec_cmd(["sudo", "modprobe", module])
130                self.log_print("%s loaded!" % module)
131            except CalledProcessError as e:
132                self.log_print("ERROR: failed to load module %s" % module)
133                self.log_print("%s resulted in error: %s" % (e.cmd, e.output))
134
135    def adq_configure_tc(self):
136        self.log_print("Configuring ADQ Traffic classes and filters...")
137
138        if self.mode == "kernel":
139            self.log_print("WARNING: ADQ setup not yet supported for Kernel mode. Skipping configuration.")
140            return
141
142        num_queues_tc0 = 2  # 2 is minimum number of queues for TC0
143        num_queues_tc1 = self.num_cores
144        port_param = "dst_port" if isinstance(self, Target) else "src_port"
145        port = "4420"
146        xps_script_path = os.path.join(self.spdk_dir, "scripts", "perf", "nvmf", "set_xps_rxqs")
147
148        for nic_ip in self.nic_ips:
149            nic_name = self.get_nic_name_by_ip(nic_ip)
150            tc_qdisc_map_cmd = ["sudo", "tc", "qdisc", "add", "dev", nic_name,
151                                "root", "mqprio", "num_tc", "2", "map", "0", "1",
152                                "queues", "%s@0" % num_queues_tc0,
153                                "%s@%s" % (num_queues_tc1, num_queues_tc0),
154                                "hw", "1", "mode", "channel"]
155            self.log_print(" ".join(tc_qdisc_map_cmd))
156            self.exec_cmd(tc_qdisc_map_cmd)
157
158            time.sleep(5)
159            tc_qdisc_ingress_cmd = ["sudo", "tc", "qdisc", "add", "dev", nic_name, "ingress"]
160            self.log_print(" ".join(tc_qdisc_ingress_cmd))
161            self.exec_cmd(tc_qdisc_ingress_cmd)
162
163            tc_filter_cmd = ["sudo", "tc", "filter", "add", "dev", nic_name,
164                             "protocol", "ip", "ingress", "prio", "1", "flower",
165                             "dst_ip", "%s/32" % nic_ip, "ip_proto", "tcp", port_param, port,
166                             "skip_sw", "hw_tc", "1"]
167            self.log_print(" ".join(tc_filter_cmd))
168            self.exec_cmd(tc_filter_cmd)
169
170            # show tc configuration
171            self.log_print("Show tc configuration for %s NIC..." % nic_name)
172            tc_disk_out = self.exec_cmd(["sudo", "tc", "qdisc", "show", "dev", nic_name])
173            tc_filter_out = self.exec_cmd(["sudo", "tc", "filter", "show", "dev", nic_name, "ingress"])
174            self.log_print("%s" % tc_disk_out)
175            self.log_print("%s" % tc_filter_out)
176
177            # Ethtool coalesce settings must be applied after configuring traffic classes
178            self.exec_cmd(["sudo", "ethtool", "--coalesce", nic_name, "adaptive-rx", "off", "rx-usecs", "0"])
179            self.exec_cmd(["sudo", "ethtool", "--coalesce", nic_name, "adaptive-tx", "off", "tx-usecs", "500"])
180
181            self.log_print("Running set_xps_rxqs script for %s NIC..." % nic_name)
182            xps_cmd = ["sudo", xps_script_path, nic_name]
183            self.log_print(xps_cmd)
184            self.exec_cmd(xps_cmd)
185
186    def reload_driver(self, driver):
187        try:
188            self.exec_cmd(["sudo", "rmmod", driver])
189            self.exec_cmd(["sudo", "modprobe", driver])
190        except CalledProcessError as e:
191            self.log_print("ERROR: failed to reload %s module!" % driver)
192            self.log_print("%s resulted in error: %s" % (e.cmd, e.output))
193
194    def adq_configure_nic(self):
195        self.log_print("Configuring NIC port settings for ADQ testing...")
196
197        # Reload the driver first, to make sure any previous settings are re-set.
198        self.reload_driver("ice")
199
200        nic_names = [self.get_nic_name_by_ip(n) for n in self.nic_ips]
201        for nic in nic_names:
202            self.log_print(nic)
203            try:
204                self.exec_cmd(["sudo", "ethtool", "-K", nic,
205                               "hw-tc-offload", "on"])  # Enable hardware TC offload
206                self.exec_cmd(["sudo", "ethtool", "--set-priv-flags", nic,
207                               "channel-inline-flow-director", "on"])  # Enable Intel Flow Director
208                self.exec_cmd(["sudo", "ethtool", "--set-priv-flags", nic, "fw-lldp-agent", "off"])  # Disable LLDP
209                # As temporary workaround for ADQ, channel packet inspection optimization is turned on during connection establishment.
210                # Then turned off before fio ramp_up expires in ethtool_after_fio_ramp().
211                self.exec_cmd(["sudo", "ethtool", "--set-priv-flags", nic,
212                               "channel-pkt-inspect-optimize", "on"])
213            except CalledProcessError as e:
214                self.log_print("ERROR: failed to configure NIC port using ethtool!")
215                self.log_print("%s resulted in error: %s" % (e.cmd, e.output))
216                self.log_print("Please update your NIC driver and firmware versions and try again.")
217            self.log_print(self.exec_cmd(["sudo", "ethtool", "-k", nic]))
218            self.log_print(self.exec_cmd(["sudo", "ethtool", "--show-priv-flags", nic]))
219
220    def configure_services(self):
221        self.log_print("Configuring active services...")
222        svc_config = configparser.ConfigParser(strict=False)
223
224        # Below list is valid only for RHEL / Fedora systems and might not
225        # contain valid names for other distributions.
226        svc_target_state = {
227            "firewalld": "inactive",
228            "irqbalance": "inactive",
229            "lldpad.service": "inactive",
230            "lldpad.socket": "inactive"
231        }
232
233        for service in svc_target_state:
234            out = self.exec_cmd(["sudo", "systemctl", "show", "--no-page", service])
235            out = "\n".join(["[%s]" % service, out])
236            svc_config.read_string(out)
237
238            if "LoadError" in svc_config[service] and "not found" in svc_config[service]["LoadError"]:
239                continue
240
241            service_state = svc_config[service]["ActiveState"]
242            self.log_print("Current state of %s service is %s" % (service, service_state))
243            self.svc_restore_dict.update({service: service_state})
244            if service_state != "inactive":
245                self.log_print("Disabling %s. It will be restored after the test has finished." % service)
246                self.exec_cmd(["sudo", "systemctl", "stop", service])
247
248    def configure_sysctl(self):
249        self.log_print("Tuning sysctl settings...")
250
251        busy_read = 0
252        if self.enable_adq and self.mode == "spdk":
253            busy_read = 1
254
255        sysctl_opts = {
256            "net.core.busy_poll": 0,
257            "net.core.busy_read": busy_read,
258            "net.core.somaxconn": 4096,
259            "net.core.netdev_max_backlog": 8192,
260            "net.ipv4.tcp_max_syn_backlog": 16384,
261            "net.core.rmem_max": 268435456,
262            "net.core.wmem_max": 268435456,
263            "net.ipv4.tcp_mem": "268435456 268435456 268435456",
264            "net.ipv4.tcp_rmem": "8192 1048576 33554432",
265            "net.ipv4.tcp_wmem": "8192 1048576 33554432",
266            "net.ipv4.route.flush": 1,
267            "vm.overcommit_memory": 1,
268        }
269
270        for opt, value in sysctl_opts.items():
271            self.sysctl_restore_dict.update({opt: self.exec_cmd(["sysctl", "-n", opt]).strip()})
272            self.log_print(self.exec_cmd(["sudo", "sysctl", "-w", "%s=%s" % (opt, value)]).strip())
273
274    def configure_tuned(self):
275        if not self.tuned_profile:
276            self.log_print("WARNING: Tuned profile not set in configuration file. Skipping configuration.")
277            return
278
279        self.log_print("Configuring tuned-adm profile to %s." % self.tuned_profile)
280        service = "tuned"
281        tuned_config = configparser.ConfigParser(strict=False)
282
283        out = self.exec_cmd(["sudo", "systemctl", "show", "--no-page", service])
284        out = "\n".join(["[%s]" % service, out])
285        tuned_config.read_string(out)
286        tuned_state = tuned_config[service]["ActiveState"]
287        self.svc_restore_dict.update({service: tuned_state})
288
289        if tuned_state != "inactive":
290            profile = self.exec_cmd(["cat", "/etc/tuned/active_profile"]).strip()
291            profile_mode = self.exec_cmd(["cat", "/etc/tuned/profile_mode"]).strip()
292
293            self.tuned_restore_dict = {
294                "profile": profile,
295                "mode": profile_mode
296            }
297
298        self.exec_cmd(["sudo", "systemctl", "start", service])
299        self.exec_cmd(["sudo", "tuned-adm", "profile", self.tuned_profile])
300        self.log_print("Tuned profile set to %s." % self.exec_cmd(["cat", "/etc/tuned/active_profile"]))
301
302    def configure_cpu_governor(self):
303        self.log_print("Setting CPU governor to performance...")
304
305        # This assumes that there is the same CPU scaling governor on each CPU
306        self.governor_restore = self.exec_cmd(["cat", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor"]).strip()
307        self.exec_cmd(["sudo", "cpupower", "frequency-set", "-g", "performance"])
308
309    def configure_irq_affinity(self):
310        self.log_print("Setting NIC irq affinity for NICs...")
311
312        irq_script_path = os.path.join(self.irq_scripts_dir, "set_irq_affinity.sh")
313        nic_names = [self.get_nic_name_by_ip(n) for n in self.nic_ips]
314        for nic in nic_names:
315            irq_cmd = ["sudo", irq_script_path, nic]
316            self.log_print(irq_cmd)
317            self.exec_cmd(irq_cmd, change_dir=self.irq_scripts_dir)
318
319    def restore_services(self):
320        self.log_print("Restoring services...")
321        for service, state in self.svc_restore_dict.items():
322            cmd = "stop" if state == "inactive" else "start"
323            self.exec_cmd(["sudo", "systemctl", cmd, service])
324
325    def restore_sysctl(self):
326        self.log_print("Restoring sysctl settings...")
327        for opt, value in self.sysctl_restore_dict.items():
328            self.log_print(self.exec_cmd(["sudo", "sysctl", "-w", "%s=%s" % (opt, value)]).strip())
329
330    def restore_tuned(self):
331        self.log_print("Restoring tuned-adm settings...")
332
333        if not self.tuned_restore_dict:
334            return
335
336        if self.tuned_restore_dict["mode"] == "auto":
337            self.exec_cmd(["sudo", "tuned-adm", "auto_profile"])
338            self.log_print("Reverted tuned-adm to auto_profile.")
339        else:
340            self.exec_cmd(["sudo", "tuned-adm", "profile", self.tuned_restore_dict["profile"]])
341            self.log_print("Reverted tuned-adm to %s profile." % self.tuned_restore_dict["profile"])
342
343    def restore_governor(self):
344        self.log_print("Restoring CPU governor setting...")
345        if self.governor_restore:
346            self.exec_cmd(["sudo", "cpupower", "frequency-set", "-g", self.governor_restore])
347            self.log_print("Reverted CPU governor to %s." % self.governor_restore)
348
349
350class Target(Server):
351    def __init__(self, name, general_config, target_config):
352        super().__init__(name, general_config, target_config)
353
354        # Defaults
355        self.enable_sar = False
356        self.sar_delay = 0
357        self.sar_interval = 0
358        self.sar_count = 0
359        self.enable_pcm = False
360        self.pcm_dir = ""
361        self.pcm_delay = 0
362        self.pcm_interval = 0
363        self.pcm_count = 0
364        self.enable_bandwidth = 0
365        self.bandwidth_count = 0
366        self.enable_dpdk_memory = False
367        self.dpdk_wait_time = 0
368        self.enable_zcopy = False
369        self.scheduler_name = "static"
370        self.null_block = 0
371        self._nics_json_obj = json.loads(self.exec_cmd(["ip", "-j", "address", "show"]))
372        self.subsystem_info_list = []
373        self.initiator_info = []
374        self.enable_pm = True
375
376        if "null_block_devices" in target_config:
377            self.null_block = target_config["null_block_devices"]
378        if "sar_settings" in target_config:
379            self.enable_sar, self.sar_delay, self.sar_interval, self.sar_count = target_config["sar_settings"]
380        if "pcm_settings" in target_config:
381            self.enable_pcm = True
382            self.pcm_dir, self.pcm_delay, self.pcm_interval, self.pcm_count = target_config["pcm_settings"]
383        if "enable_bandwidth" in target_config:
384            self.enable_bandwidth, self.bandwidth_count = target_config["enable_bandwidth"]
385        if "enable_dpdk_memory" in target_config:
386            self.enable_dpdk_memory, self.dpdk_wait_time = target_config["enable_dpdk_memory"]
387        if "scheduler_settings" in target_config:
388            self.scheduler_name = target_config["scheduler_settings"]
389        if "zcopy_settings" in target_config:
390            self.enable_zcopy = target_config["zcopy_settings"]
391        if "results_dir" in target_config:
392            self.results_dir = target_config["results_dir"]
393        if "enable_pm" in target_config:
394            self.enable_pm = target_config["enable_pm"]
395
396        self.script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
397        self.spdk_dir = os.path.abspath(os.path.join(self.script_dir, "../../../"))
398        self.set_local_nic_info(self.set_local_nic_info_helper())
399
400        if "skip_spdk_install" not in general_config or general_config["skip_spdk_install"] is False:
401            self.zip_spdk_sources(self.spdk_dir, "/tmp/spdk.zip")
402
403        self.configure_system()
404        if self.enable_adq:
405            self.configure_adq()
406        self.sys_config()
407
408    def set_local_nic_info_helper(self):
409        return json.loads(self.exec_cmd(["lshw", "-json"]))
410
411    def exec_cmd(self, cmd, stderr_redirect=False, change_dir=None):
412        stderr_opt = None
413        if stderr_redirect:
414            stderr_opt = subprocess.STDOUT
415        if change_dir:
416            old_cwd = os.getcwd()
417            os.chdir(change_dir)
418            self.log_print("Changing directory to %s" % change_dir)
419
420        out = check_output(cmd, stderr=stderr_opt).decode(encoding="utf-8")
421
422        if change_dir:
423            os.chdir(old_cwd)
424            self.log_print("Changing directory to %s" % old_cwd)
425        return out
426
427    def zip_spdk_sources(self, spdk_dir, dest_file):
428        self.log_print("Zipping SPDK source directory")
429        fh = zipfile.ZipFile(dest_file, "w", zipfile.ZIP_DEFLATED)
430        for root, _directories, files in os.walk(spdk_dir, followlinks=True):
431            for file in files:
432                fh.write(os.path.relpath(os.path.join(root, file)))
433        fh.close()
434        self.log_print("Done zipping")
435
436    @staticmethod
437    def _chunks(input_list, chunks_no):
438        div, rem = divmod(len(input_list), chunks_no)
439        for i in range(chunks_no):
440            si = (div + 1) * (i if i < rem else rem) + div * (0 if i < rem else i - rem)
441            yield input_list[si:si + (div + 1 if i < rem else div)]
442
443    def spread_bdevs(self, req_disks):
444        # Spread available block devices indexes:
445        # - evenly across available initiator systems
446        # - evenly across available NIC interfaces for
447        #   each initiator
448        # Not NUMA aware.
449        ip_bdev_map = []
450        initiator_chunks = self._chunks(range(0, req_disks), len(self.initiator_info))
451
452        for i, (init, init_chunk) in enumerate(zip(self.initiator_info, initiator_chunks)):
453            self.initiator_info[i]["bdev_range"] = init_chunk
454            init_chunks_list = list(self._chunks(init_chunk, len(init["target_nic_ips"])))
455            for ip, nic_chunk in zip(self.initiator_info[i]["target_nic_ips"], init_chunks_list):
456                for c in nic_chunk:
457                    ip_bdev_map.append((ip, c))
458        return ip_bdev_map
459
460    @staticmethod
461    def read_json_stats(file):
462        with open(file, "r") as json_data:
463            data = json.load(json_data)
464            job_pos = 0  # job_post = 0 because using aggregated results
465
466            # Check if latency is in nano or microseconds to choose correct dict key
467            def get_lat_unit(key_prefix, dict_section):
468                # key prefix - lat, clat or slat.
469                # dict section - portion of json containing latency bucket in question
470                # Return dict key to access the bucket and unit as string
471                for k, _ in dict_section.items():
472                    if k.startswith(key_prefix):
473                        return k, k.split("_")[1]
474
475            def get_clat_percentiles(clat_dict_leaf):
476                if "percentile" in clat_dict_leaf:
477                    p99_lat = float(clat_dict_leaf["percentile"]["99.000000"])
478                    p99_9_lat = float(clat_dict_leaf["percentile"]["99.900000"])
479                    p99_99_lat = float(clat_dict_leaf["percentile"]["99.990000"])
480                    p99_999_lat = float(clat_dict_leaf["percentile"]["99.999000"])
481
482                    return [p99_lat, p99_9_lat, p99_99_lat, p99_999_lat]
483                else:
484                    # Latest fio versions do not provide "percentile" results if no
485                    # measurements were done, so just return zeroes
486                    return [0, 0, 0, 0]
487
488            read_iops = float(data["jobs"][job_pos]["read"]["iops"])
489            read_bw = float(data["jobs"][job_pos]["read"]["bw"])
490            lat_key, lat_unit = get_lat_unit("lat", data["jobs"][job_pos]["read"])
491            read_avg_lat = float(data["jobs"][job_pos]["read"][lat_key]["mean"])
492            read_min_lat = float(data["jobs"][job_pos]["read"][lat_key]["min"])
493            read_max_lat = float(data["jobs"][job_pos]["read"][lat_key]["max"])
494            clat_key, clat_unit = get_lat_unit("clat", data["jobs"][job_pos]["read"])
495            read_p99_lat, read_p99_9_lat, read_p99_99_lat, read_p99_999_lat = get_clat_percentiles(
496                data["jobs"][job_pos]["read"][clat_key])
497
498            if "ns" in lat_unit:
499                read_avg_lat, read_min_lat, read_max_lat = [x / 1000 for x in [read_avg_lat, read_min_lat, read_max_lat]]
500            if "ns" in clat_unit:
501                read_p99_lat = read_p99_lat / 1000
502                read_p99_9_lat = read_p99_9_lat / 1000
503                read_p99_99_lat = read_p99_99_lat / 1000
504                read_p99_999_lat = read_p99_999_lat / 1000
505
506            write_iops = float(data["jobs"][job_pos]["write"]["iops"])
507            write_bw = float(data["jobs"][job_pos]["write"]["bw"])
508            lat_key, lat_unit = get_lat_unit("lat", data["jobs"][job_pos]["write"])
509            write_avg_lat = float(data["jobs"][job_pos]["write"][lat_key]["mean"])
510            write_min_lat = float(data["jobs"][job_pos]["write"][lat_key]["min"])
511            write_max_lat = float(data["jobs"][job_pos]["write"][lat_key]["max"])
512            clat_key, clat_unit = get_lat_unit("clat", data["jobs"][job_pos]["write"])
513            write_p99_lat, write_p99_9_lat, write_p99_99_lat, write_p99_999_lat = get_clat_percentiles(
514                data["jobs"][job_pos]["write"][clat_key])
515
516            if "ns" in lat_unit:
517                write_avg_lat, write_min_lat, write_max_lat = [x / 1000 for x in [write_avg_lat, write_min_lat, write_max_lat]]
518            if "ns" in clat_unit:
519                write_p99_lat = write_p99_lat / 1000
520                write_p99_9_lat = write_p99_9_lat / 1000
521                write_p99_99_lat = write_p99_99_lat / 1000
522                write_p99_999_lat = write_p99_999_lat / 1000
523
524        return [read_iops, read_bw, read_avg_lat, read_min_lat, read_max_lat,
525                read_p99_lat, read_p99_9_lat, read_p99_99_lat, read_p99_999_lat,
526                write_iops, write_bw, write_avg_lat, write_min_lat, write_max_lat,
527                write_p99_lat, write_p99_9_lat, write_p99_99_lat, write_p99_999_lat]
528
529    def parse_results(self, results_dir, csv_file):
530        files = os.listdir(results_dir)
531        fio_files = filter(lambda x: ".fio" in x, files)
532        json_files = [x for x in files if ".json" in x]
533
534        headers = ["read_iops", "read_bw", "read_avg_lat_us", "read_min_lat_us", "read_max_lat_us",
535                   "read_p99_lat_us", "read_p99.9_lat_us", "read_p99.99_lat_us", "read_p99.999_lat_us",
536                   "write_iops", "write_bw", "write_avg_lat_us", "write_min_lat_us", "write_max_lat_us",
537                   "write_p99_lat_us", "write_p99.9_lat_us", "write_p99.99_lat_us", "write_p99.999_lat_us"]
538
539        aggr_headers = ["iops", "bw", "avg_lat_us", "min_lat_us", "max_lat_us",
540                        "p99_lat_us", "p99.9_lat_us", "p99.99_lat_us", "p99.999_lat_us"]
541
542        header_line = ",".join(["Name", *headers])
543        aggr_header_line = ",".join(["Name", *aggr_headers])
544
545        # Create empty results file
546        with open(os.path.join(results_dir, csv_file), "w") as fh:
547            fh.write(aggr_header_line + "\n")
548        rows = set()
549
550        for fio_config in fio_files:
551            self.log_print("Getting FIO stats for %s" % fio_config)
552            job_name, _ = os.path.splitext(fio_config)
553
554            # Look in the filename for rwmixread value. Function arguments do
555            # not have that information.
556            # TODO: Improve this function by directly using workload params instead
557            # of regexing through filenames.
558            if "read" in job_name:
559                rw_mixread = 1
560            elif "write" in job_name:
561                rw_mixread = 0
562            else:
563                rw_mixread = float(re.search(r"m_(\d+)", job_name).group(1)) / 100
564
565            # If "_CPU" exists in name - ignore it
566            # Initiators for the same job could have different num_cores parameter
567            job_name = re.sub(r"_\d+CPU", "", job_name)
568            job_result_files = [x for x in json_files if x.startswith(job_name)]
569            self.log_print("Matching result files for current fio config:")
570            for j in job_result_files:
571                self.log_print("\t %s" % j)
572
573            # There may have been more than 1 initiator used in test, need to check that
574            # Result files are created so that string after last "_" separator is server name
575            inits_names = set([os.path.splitext(x)[0].split("_")[-1] for x in job_result_files])
576            inits_avg_results = []
577            for i in inits_names:
578                self.log_print("\tGetting stats for initiator %s" % i)
579                # There may have been more than 1 test run for this job, calculate average results for initiator
580                i_results = [x for x in job_result_files if i in x]
581                i_results_filename = re.sub(r"run_\d+_", "", i_results[0].replace("json", "csv"))
582
583                separate_stats = []
584                for r in i_results:
585                    try:
586                        stats = self.read_json_stats(os.path.join(results_dir, r))
587                        separate_stats.append(stats)
588                        self.log_print(stats)
589                    except JSONDecodeError:
590                        self.log_print("ERROR: Failed to parse %s results! Results might be incomplete!" % r)
591
592                init_results = [sum(x) for x in zip(*separate_stats)]
593                init_results = [x / len(separate_stats) for x in init_results]
594                inits_avg_results.append(init_results)
595
596                self.log_print("\tAverage results for initiator %s" % i)
597                self.log_print(init_results)
598                with open(os.path.join(results_dir, i_results_filename), "w") as fh:
599                    fh.write(header_line + "\n")
600                    fh.write(",".join([job_name, *["{0:.3f}".format(x) for x in init_results]]) + "\n")
601
602            # Sum results of all initiators running this FIO job.
603            # Latency results are an average of latencies from accros all initiators.
604            inits_avg_results = [sum(x) for x in zip(*inits_avg_results)]
605            inits_avg_results = OrderedDict(zip(headers, inits_avg_results))
606            for key in inits_avg_results:
607                if "lat" in key:
608                    inits_avg_results[key] /= len(inits_names)
609
610            # Aggregate separate read/write values into common labels
611            # Take rw_mixread into consideration for mixed read/write workloads.
612            aggregate_results = OrderedDict()
613            for h in aggr_headers:
614                read_stat, write_stat = [float(value) for key, value in inits_avg_results.items() if h in key]
615                if "lat" in h:
616                    _ = rw_mixread * read_stat + (1 - rw_mixread) * write_stat
617                else:
618                    _ = read_stat + write_stat
619                aggregate_results[h] = "{0:.3f}".format(_)
620
621            rows.add(",".join([job_name, *aggregate_results.values()]))
622
623        # Save results to file
624        for row in rows:
625            with open(os.path.join(results_dir, csv_file), "a") as fh:
626                fh.write(row + "\n")
627        self.log_print("You can find the test results in the file %s" % os.path.join(results_dir, csv_file))
628
629    def measure_sar(self, results_dir, sar_file_prefix):
630        cpu_number = os.cpu_count()
631        sar_idle_sum = 0
632        sar_output_file = os.path.join(results_dir, sar_file_prefix + ".txt")
633        sar_cpu_util_file = os.path.join(results_dir, ".".join([sar_file_prefix + "cpu_util", "txt"]))
634
635        self.log_print("Waiting %d seconds for ramp-up to finish before measuring SAR stats" % self.sar_delay)
636        time.sleep(self.sar_delay)
637        self.log_print("Starting SAR measurements")
638
639        out = self.exec_cmd(["sar", "-P", "ALL", "%s" % self.sar_interval, "%s" % self.sar_count])
640        with open(os.path.join(results_dir, sar_output_file), "w") as fh:
641            for line in out.split("\n"):
642                if "Average" in line:
643                    if "CPU" in line:
644                        self.log_print("Summary CPU utilization from SAR:")
645                        self.log_print(line)
646                    elif "all" in line:
647                        self.log_print(line)
648                    else:
649                        sar_idle_sum += float(line.split()[7])
650            fh.write(out)
651        sar_cpu_usage = cpu_number * 100 - sar_idle_sum
652
653        with open(os.path.join(results_dir, sar_cpu_util_file), "w") as f:
654            f.write("%0.2f" % sar_cpu_usage)
655
656    def measure_power(self, results_dir, prefix, script_full_dir):
657        return subprocess.Popen(["%s/../pm/collect-bmc-pm" % script_full_dir, "-d", results_dir, "-l", "-p", prefix, "-x"])
658
659    def ethtool_after_fio_ramp(self, fio_ramp_time):
660        time.sleep(fio_ramp_time//2)
661        nic_names = [self.get_nic_name_by_ip(n) for n in self.nic_ips]
662        for nic in nic_names:
663            self.log_print(nic)
664            self.exec_cmd(["sudo", "ethtool", "--set-priv-flags", nic,
665                           "channel-pkt-inspect-optimize", "off"])  # Disable channel packet inspection optimization
666
667    def measure_pcm_memory(self, results_dir, pcm_file_name):
668        time.sleep(self.pcm_delay)
669        cmd = ["%s/build/bin/pcm-memory" % self.pcm_dir, "%s" % self.pcm_interval, "-csv=%s/%s" % (results_dir, pcm_file_name)]
670        pcm_memory = subprocess.Popen(cmd)
671        time.sleep(self.pcm_count)
672        pcm_memory.terminate()
673
674    def measure_pcm(self, results_dir, pcm_file_name):
675        time.sleep(self.pcm_delay)
676        cmd = ["%s/build/bin/pcm" % self.pcm_dir, "%s" % self.pcm_interval, "-i=%s" % self.pcm_count,
677               "-csv=%s/%s" % (results_dir, pcm_file_name)]
678        subprocess.run(cmd)
679        df = pd.read_csv(os.path.join(results_dir, pcm_file_name), header=[0, 1])
680        df = df.rename(columns=lambda x: re.sub(r'Unnamed:[\w\s]*$', '', x))
681        skt = df.loc[:, df.columns.get_level_values(1).isin({'UPI0', 'UPI1', 'UPI2'})]
682        skt_pcm_file_name = "_".join(["skt", pcm_file_name])
683        skt.to_csv(os.path.join(results_dir, skt_pcm_file_name), index=False)
684
685    def measure_pcm_power(self, results_dir, pcm_power_file_name):
686        time.sleep(self.pcm_delay)
687        out = self.exec_cmd(["%s/build/bin/pcm-power" % self.pcm_dir, "%s" % self.pcm_interval, "-i=%s" % self.pcm_count])
688        with open(os.path.join(results_dir, pcm_power_file_name), "w") as fh:
689            fh.write(out)
690
691    def measure_network_bandwidth(self, results_dir, bandwidth_file_name):
692        self.log_print("INFO: starting network bandwidth measure")
693        self.exec_cmd(["bwm-ng", "-o", "csv", "-F", "%s/%s" % (results_dir, bandwidth_file_name),
694                       "-a", "1", "-t", "1000", "-c", str(self.bandwidth_count)])
695
696    def measure_dpdk_memory(self, results_dir):
697        self.log_print("INFO: waiting to generate DPDK memory usage")
698        time.sleep(self.dpdk_wait_time)
699        self.log_print("INFO: generating DPDK memory usage")
700        rpc.env.env_dpdk_get_mem_stats
701        os.rename("/tmp/spdk_mem_dump.txt", "%s/spdk_mem_dump.txt" % (results_dir))
702
703    def sys_config(self):
704        self.log_print("====Kernel release:====")
705        self.log_print(os.uname().release)
706        self.log_print("====Kernel command line:====")
707        with open('/proc/cmdline') as f:
708            cmdline = f.readlines()
709            self.log_print('\n'.join(self.get_uncommented_lines(cmdline)))
710        self.log_print("====sysctl conf:====")
711        with open('/etc/sysctl.conf') as f:
712            sysctl = f.readlines()
713            self.log_print('\n'.join(self.get_uncommented_lines(sysctl)))
714        self.log_print("====Cpu power info:====")
715        self.log_print(self.exec_cmd(["cpupower", "frequency-info"]))
716        self.log_print("====zcopy settings:====")
717        self.log_print("zcopy enabled: %s" % (self.enable_zcopy))
718        self.log_print("====Scheduler settings:====")
719        self.log_print("SPDK scheduler: %s" % (self.scheduler_name))
720
721
722class Initiator(Server):
723    def __init__(self, name, general_config, initiator_config):
724        super().__init__(name, general_config, initiator_config)
725
726        # Required fields
727        self.ip = initiator_config["ip"]
728        self.target_nic_ips = initiator_config["target_nic_ips"]
729
730        # Defaults
731        self.cpus_allowed = None
732        self.cpus_allowed_policy = "shared"
733        self.spdk_dir = "/tmp/spdk"
734        self.fio_bin = "/usr/src/fio/fio"
735        self.nvmecli_bin = "nvme"
736        self.cpu_frequency = None
737        self.subsystem_info_list = []
738
739        if "spdk_dir" in initiator_config:
740            self.spdk_dir = initiator_config["spdk_dir"]
741        if "fio_bin" in initiator_config:
742            self.fio_bin = initiator_config["fio_bin"]
743        if "nvmecli_bin" in initiator_config:
744            self.nvmecli_bin = initiator_config["nvmecli_bin"]
745        if "cpus_allowed" in initiator_config:
746            self.cpus_allowed = initiator_config["cpus_allowed"]
747        if "cpus_allowed_policy" in initiator_config:
748            self.cpus_allowed_policy = initiator_config["cpus_allowed_policy"]
749        if "cpu_frequency" in initiator_config:
750            self.cpu_frequency = initiator_config["cpu_frequency"]
751        if os.getenv('SPDK_WORKSPACE'):
752            self.spdk_dir = os.getenv('SPDK_WORKSPACE')
753
754        self.ssh_connection = paramiko.SSHClient()
755        self.ssh_connection.set_missing_host_key_policy(paramiko.AutoAddPolicy())
756        self.ssh_connection.connect(self.ip, username=self.username, password=self.password)
757        self.exec_cmd(["sudo", "rm", "-rf", "%s/nvmf_perf" % self.spdk_dir])
758        self.exec_cmd(["mkdir", "-p", "%s" % self.spdk_dir])
759        self._nics_json_obj = json.loads(self.exec_cmd(["ip", "-j", "address", "show"]))
760
761        if "skip_spdk_install" not in general_config or general_config["skip_spdk_install"] is False:
762            self.copy_spdk("/tmp/spdk.zip")
763        self.set_local_nic_info(self.set_local_nic_info_helper())
764        self.set_cpu_frequency()
765        self.configure_system()
766        if self.enable_adq:
767            self.configure_adq()
768        self.sys_config()
769
770    def set_local_nic_info_helper(self):
771        return json.loads(self.exec_cmd(["lshw", "-json"]))
772
773    def stop(self):
774        self.ssh_connection.close()
775
776    def exec_cmd(self, cmd, stderr_redirect=False, change_dir=None):
777        if change_dir:
778            cmd = ["cd", change_dir, ";", *cmd]
779
780        # In case one of the command elements contains whitespace and is not
781        # already quoted, # (e.g. when calling sysctl) quote it again to prevent expansion
782        # when sending to remote system.
783        for i, c in enumerate(cmd):
784            if (" " in c or "\t" in c) and not (c.startswith("'") and c.endswith("'")):
785                cmd[i] = '"%s"' % c
786        cmd = " ".join(cmd)
787
788        # Redirect stderr to stdout thanks using get_pty option if needed
789        _, stdout, _ = self.ssh_connection.exec_command(cmd, get_pty=stderr_redirect)
790        out = stdout.read().decode(encoding="utf-8")
791
792        # Check the return code
793        rc = stdout.channel.recv_exit_status()
794        if rc:
795            raise CalledProcessError(int(rc), cmd, out)
796
797        return out
798
799    def put_file(self, local, remote_dest):
800        ftp = self.ssh_connection.open_sftp()
801        ftp.put(local, remote_dest)
802        ftp.close()
803
804    def get_file(self, remote, local_dest):
805        ftp = self.ssh_connection.open_sftp()
806        ftp.get(remote, local_dest)
807        ftp.close()
808
809    def copy_spdk(self, local_spdk_zip):
810        self.log_print("Copying SPDK sources to initiator %s" % self.name)
811        self.put_file(local_spdk_zip, "/tmp/spdk_drop.zip")
812        self.log_print("Copied sources zip from target")
813        self.exec_cmd(["unzip", "-qo", "/tmp/spdk_drop.zip", "-d", self.spdk_dir])
814        self.log_print("Sources unpacked")
815
816    def copy_result_files(self, dest_dir):
817        self.log_print("Copying results")
818
819        if not os.path.exists(dest_dir):
820            os.mkdir(dest_dir)
821
822        # Get list of result files from initiator and copy them back to target
823        file_list = self.exec_cmd(["ls", "%s/nvmf_perf" % self.spdk_dir]).strip().split("\n")
824
825        for file in file_list:
826            self.get_file(os.path.join(self.spdk_dir, "nvmf_perf", file),
827                          os.path.join(dest_dir, file))
828        self.log_print("Done copying results")
829
830    def discover_subsystems(self, address_list, subsys_no):
831        num_nvmes = range(0, subsys_no)
832        nvme_discover_output = ""
833        for ip, subsys_no in itertools.product(address_list, num_nvmes):
834            self.log_print("Trying to discover: %s:%s" % (ip, 4420 + subsys_no))
835            nvme_discover_cmd = ["sudo",
836                                 "%s" % self.nvmecli_bin,
837                                 "discover", "-t", "%s" % self.transport,
838                                 "-s", "%s" % (4420 + subsys_no),
839                                 "-a", "%s" % ip]
840
841            try:
842                stdout = self.exec_cmd(nvme_discover_cmd)
843                if stdout:
844                    nvme_discover_output = nvme_discover_output + stdout
845            except CalledProcessError:
846                # Do nothing. In case of discovering remote subsystems of kernel target
847                # we expect "nvme discover" to fail a bunch of times because we basically
848                # scan ports.
849                pass
850
851        subsystems = re.findall(r'trsvcid:\s(\d+)\s+'  # get svcid number
852                                r'subnqn:\s+([a-zA-Z0-9\.\-\:]+)\s+'  # get NQN id
853                                r'traddr:\s+(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',  # get IP address
854                                nvme_discover_output)  # from nvme discovery output
855        subsystems = filter(lambda x: x[-1] in address_list, subsystems)
856        subsystems = filter(lambda x: "discovery" not in x[1], subsystems)
857        subsystems = list(set(subsystems))
858        subsystems.sort(key=lambda x: x[1])
859        self.log_print("Found matching subsystems on target side:")
860        for s in subsystems:
861            self.log_print(s)
862        self.subsystem_info_list = subsystems
863
864    def gen_fio_filename_conf(self, *args, **kwargs):
865        # Logic implemented in SPDKInitiator and KernelInitiator classes
866        pass
867
868    def gen_fio_config(self, rw, rwmixread, block_size, io_depth, subsys_no, num_jobs=None, ramp_time=0, run_time=10, rate_iops=0):
869        fio_conf_template = """
870[global]
871ioengine={ioengine}
872{spdk_conf}
873thread=1
874group_reporting=1
875direct=1
876percentile_list=50:90:99:99.5:99.9:99.99:99.999
877
878norandommap=1
879rw={rw}
880rwmixread={rwmixread}
881bs={block_size}
882time_based=1
883ramp_time={ramp_time}
884runtime={run_time}
885rate_iops={rate_iops}
886"""
887        if "spdk" in self.mode:
888            bdev_conf = self.gen_spdk_bdev_conf(self.subsystem_info_list)
889            self.exec_cmd(["echo", "'%s'" % bdev_conf, ">", "%s/bdev.conf" % self.spdk_dir])
890            ioengine = "%s/build/fio/spdk_bdev" % self.spdk_dir
891            spdk_conf = "spdk_json_conf=%s/bdev.conf" % self.spdk_dir
892        else:
893            ioengine = self.ioengine
894            spdk_conf = ""
895            out = self.exec_cmd(["sudo", "nvme", "list", "|", "grep", "-E", "'SPDK|Linux'",
896                                 "|", "awk", "'{print $1}'"])
897            subsystems = [x for x in out.split("\n") if "nvme" in x]
898
899        if self.cpus_allowed is not None:
900            self.log_print("Limiting FIO workload execution on specific cores %s" % self.cpus_allowed)
901            cpus_num = 0
902            cpus = self.cpus_allowed.split(",")
903            for cpu in cpus:
904                if "-" in cpu:
905                    a, b = cpu.split("-")
906                    a = int(a)
907                    b = int(b)
908                    cpus_num += len(range(a, b))
909                else:
910                    cpus_num += 1
911            self.num_cores = cpus_num
912            threads = range(0, self.num_cores)
913        elif hasattr(self, 'num_cores'):
914            self.log_print("Limiting FIO workload execution to %s cores" % self.num_cores)
915            threads = range(0, int(self.num_cores))
916        else:
917            self.num_cores = len(subsystems)
918            threads = range(0, len(subsystems))
919
920        if "spdk" in self.mode:
921            filename_section = self.gen_fio_filename_conf(self.subsystem_info_list, threads, io_depth, num_jobs)
922        else:
923            filename_section = self.gen_fio_filename_conf(threads, io_depth, num_jobs)
924
925        fio_config = fio_conf_template.format(ioengine=ioengine, spdk_conf=spdk_conf,
926                                              rw=rw, rwmixread=rwmixread, block_size=block_size,
927                                              ramp_time=ramp_time, run_time=run_time, rate_iops=rate_iops)
928
929        # TODO: hipri disabled for now, as it causes fio errors:
930        # io_u error on file /dev/nvme2n1: Operation not supported
931        # See comment in KernelInitiator class, kernel_init_connect() function
932        if hasattr(self, "ioengine") and "io_uring" in self.ioengine:
933            fio_config = fio_config + """
934fixedbufs=1
935registerfiles=1
936#hipri=1
937"""
938        if num_jobs:
939            fio_config = fio_config + "numjobs=%s \n" % num_jobs
940        if self.cpus_allowed is not None:
941            fio_config = fio_config + "cpus_allowed=%s \n" % self.cpus_allowed
942            fio_config = fio_config + "cpus_allowed_policy=%s \n" % self.cpus_allowed_policy
943        fio_config = fio_config + filename_section
944
945        fio_config_filename = "%s_%s_%s_m_%s" % (block_size, io_depth, rw, rwmixread)
946        if hasattr(self, "num_cores"):
947            fio_config_filename += "_%sCPU" % self.num_cores
948        fio_config_filename += ".fio"
949
950        self.exec_cmd(["mkdir", "-p", "%s/nvmf_perf" % self.spdk_dir])
951        self.exec_cmd(["echo", "'%s'" % fio_config, ">", "%s/nvmf_perf/%s" % (self.spdk_dir, fio_config_filename)])
952        self.log_print("Created FIO Config:")
953        self.log_print(fio_config)
954
955        return os.path.join(self.spdk_dir, "nvmf_perf", fio_config_filename)
956
957    def set_cpu_frequency(self):
958        if self.cpu_frequency is not None:
959            try:
960                self.exec_cmd(["sudo", "cpupower", "frequency-set", "-g", "userspace"], True)
961                self.exec_cmd(["sudo", "cpupower", "frequency-set", "-f", "%s" % self.cpu_frequency], True)
962                self.log_print(self.exec_cmd(["sudo", "cpupower", "frequency-info"]))
963            except Exception:
964                self.log_print("ERROR: cpu_frequency will not work when intel_pstate is enabled!")
965                sys.exit()
966        else:
967            self.log_print("WARNING: you have disabled intel_pstate and using default cpu governance.")
968
969    def run_fio(self, fio_config_file, run_num=None):
970        job_name, _ = os.path.splitext(fio_config_file)
971        self.log_print("Starting FIO run for job: %s" % job_name)
972        self.log_print("Using FIO: %s" % self.fio_bin)
973
974        if run_num:
975            for i in range(1, run_num + 1):
976                output_filename = job_name + "_run_" + str(i) + "_" + self.name + ".json"
977                try:
978                    output = self.exec_cmd(["sudo", self.fio_bin, fio_config_file, "--output-format=json",
979                                            "--output=%s" % output_filename, "--eta=never"], True)
980                    self.log_print(output)
981                except subprocess.CalledProcessError as e:
982                    self.log_print("ERROR: Fio process failed!")
983                    self.log_print(e.stdout)
984        else:
985            output_filename = job_name + "_" + self.name + ".json"
986            output = self.exec_cmd(["sudo", self.fio_bin,
987                                    fio_config_file, "--output-format=json",
988                                    "--output" % output_filename], True)
989            self.log_print(output)
990        self.log_print("FIO run finished. Results in: %s" % output_filename)
991
992    def sys_config(self):
993        self.log_print("====Kernel release:====")
994        self.log_print(self.exec_cmd(["uname", "-r"]))
995        self.log_print("====Kernel command line:====")
996        cmdline = self.exec_cmd(["cat", "/proc/cmdline"])
997        self.log_print('\n'.join(self.get_uncommented_lines(cmdline.splitlines())))
998        self.log_print("====sysctl conf:====")
999        sysctl = self.exec_cmd(["cat", "/etc/sysctl.conf"])
1000        self.log_print('\n'.join(self.get_uncommented_lines(sysctl.splitlines())))
1001        self.log_print("====Cpu power info:====")
1002        self.log_print(self.exec_cmd(["cpupower", "frequency-info"]))
1003
1004
1005class KernelTarget(Target):
1006    def __init__(self, name, general_config, target_config):
1007        super().__init__(name, general_config, target_config)
1008        # Defaults
1009        self.nvmet_bin = "nvmetcli"
1010
1011        if "nvmet_bin" in target_config:
1012            self.nvmet_bin = target_config["nvmet_bin"]
1013
1014    def stop(self):
1015        nvmet_command(self.nvmet_bin, "clear")
1016
1017    def kernel_tgt_gen_subsystem_conf(self, nvme_list):
1018
1019        nvmet_cfg = {
1020            "ports": [],
1021            "hosts": [],
1022            "subsystems": [],
1023        }
1024
1025        for ip, bdev_num in self.spread_bdevs(len(nvme_list)):
1026            port = str(4420 + bdev_num)
1027            nqn = "nqn.2018-09.io.spdk:cnode%s" % bdev_num
1028            serial = "SPDK00%s" % bdev_num
1029            bdev_name = nvme_list[bdev_num]
1030
1031            nvmet_cfg["subsystems"].append({
1032                "allowed_hosts": [],
1033                "attr": {
1034                    "allow_any_host": "1",
1035                    "serial": serial,
1036                    "version": "1.3"
1037                },
1038                "namespaces": [
1039                    {
1040                        "device": {
1041                            "path": bdev_name,
1042                            "uuid": "%s" % uuid.uuid4()
1043                        },
1044                        "enable": 1,
1045                        "nsid": port
1046                    }
1047                ],
1048                "nqn": nqn
1049            })
1050
1051            nvmet_cfg["ports"].append({
1052                "addr": {
1053                    "adrfam": "ipv4",
1054                    "traddr": ip,
1055                    "trsvcid": port,
1056                    "trtype": self.transport
1057                },
1058                "portid": bdev_num,
1059                "referrals": [],
1060                "subsystems": [nqn]
1061            })
1062
1063            self.subsystem_info_list.append([port, nqn, ip])
1064        self.subsys_no = len(self.subsystem_info_list)
1065
1066        with open("kernel.conf", "w") as fh:
1067            fh.write(json.dumps(nvmet_cfg, indent=2))
1068
1069    def tgt_start(self):
1070        self.log_print("Configuring kernel NVMeOF Target")
1071
1072        if self.null_block:
1073            print("Configuring with null block device.")
1074            nvme_list = ["/dev/nullb{}".format(x) for x in range(self.null_block)]
1075        else:
1076            print("Configuring with NVMe drives.")
1077            nvme_list = get_nvme_devices()
1078
1079        self.kernel_tgt_gen_subsystem_conf(nvme_list)
1080        self.subsys_no = len(nvme_list)
1081
1082        nvmet_command(self.nvmet_bin, "clear")
1083        nvmet_command(self.nvmet_bin, "restore kernel.conf")
1084
1085        if self.enable_adq:
1086            self.adq_configure_tc()
1087
1088        self.log_print("Done configuring kernel NVMeOF Target")
1089
1090
1091class SPDKTarget(Target):
1092    def __init__(self, name, general_config, target_config):
1093        super().__init__(name, general_config, target_config)
1094
1095        # Required fields
1096        self.core_mask = target_config["core_mask"]
1097        self.num_cores = self.get_num_cores(self.core_mask)
1098
1099        # Defaults
1100        self.dif_insert_strip = False
1101        self.null_block_dif_type = 0
1102        self.num_shared_buffers = 4096
1103        self.max_queue_depth = 128
1104        self.bpf_proc = None
1105        self.bpf_scripts = []
1106        self.enable_dsa = False
1107        self.scheduler_core_limit = None
1108
1109        if "num_shared_buffers" in target_config:
1110            self.num_shared_buffers = target_config["num_shared_buffers"]
1111        if "max_queue_depth" in target_config:
1112            self.max_queue_depth = target_config["max_queue_depth"]
1113        if "null_block_dif_type" in target_config:
1114            self.null_block_dif_type = target_config["null_block_dif_type"]
1115        if "dif_insert_strip" in target_config:
1116            self.dif_insert_strip = target_config["dif_insert_strip"]
1117        if "bpf_scripts" in target_config:
1118            self.bpf_scripts = target_config["bpf_scripts"]
1119        if "dsa_settings" in target_config:
1120            self.enable_dsa = target_config["dsa_settings"]
1121        if "scheduler_core_limit" in target_config:
1122            self.scheduler_core_limit = target_config["scheduler_core_limit"]
1123
1124        self.log_print("====DSA settings:====")
1125        self.log_print("DSA enabled: %s" % (self.enable_dsa))
1126
1127    @staticmethod
1128    def get_num_cores(core_mask):
1129        if "0x" in core_mask:
1130            return bin(int(core_mask, 16)).count("1")
1131        else:
1132            num_cores = 0
1133            core_mask = core_mask.replace("[", "")
1134            core_mask = core_mask.replace("]", "")
1135            for i in core_mask.split(","):
1136                if "-" in i:
1137                    x, y = i.split("-")
1138                    num_cores += len(range(int(x), int(y))) + 1
1139                else:
1140                    num_cores += 1
1141            return num_cores
1142
1143    def spdk_tgt_configure(self):
1144        self.log_print("Configuring SPDK NVMeOF target via RPC")
1145
1146        if self.enable_adq:
1147            self.adq_configure_tc()
1148
1149        # Create transport layer
1150        rpc.nvmf.nvmf_create_transport(self.client, trtype=self.transport,
1151                                       num_shared_buffers=self.num_shared_buffers,
1152                                       max_queue_depth=self.max_queue_depth,
1153                                       dif_insert_or_strip=self.dif_insert_strip,
1154                                       sock_priority=self.adq_priority)
1155        self.log_print("SPDK NVMeOF transport layer:")
1156        rpc_client.print_dict(rpc.nvmf.nvmf_get_transports(self.client))
1157
1158        if self.null_block:
1159            self.spdk_tgt_add_nullblock(self.null_block)
1160            self.spdk_tgt_add_subsystem_conf(self.nic_ips, self.null_block)
1161        else:
1162            self.spdk_tgt_add_nvme_conf()
1163            self.spdk_tgt_add_subsystem_conf(self.nic_ips)
1164
1165        self.log_print("Done configuring SPDK NVMeOF Target")
1166
1167    def spdk_tgt_add_nullblock(self, null_block_count):
1168        md_size = 0
1169        block_size = 4096
1170        if self.null_block_dif_type != 0:
1171            md_size = 128
1172
1173        self.log_print("Adding null block bdevices to config via RPC")
1174        for i in range(null_block_count):
1175            self.log_print("Setting bdev protection to :%s" % self.null_block_dif_type)
1176            rpc.bdev.bdev_null_create(self.client, 102400, block_size + md_size, "Nvme{}n1".format(i),
1177                                      dif_type=self.null_block_dif_type, md_size=md_size)
1178        self.log_print("SPDK Bdevs configuration:")
1179        rpc_client.print_dict(rpc.bdev.bdev_get_bdevs(self.client))
1180
1181    def spdk_tgt_add_nvme_conf(self, req_num_disks=None):
1182        self.log_print("Adding NVMe bdevs to config via RPC")
1183
1184        bdfs = get_nvme_devices_bdf()
1185        bdfs = [b.replace(":", ".") for b in bdfs]
1186
1187        if req_num_disks:
1188            if req_num_disks > len(bdfs):
1189                self.log_print("ERROR: Requested number of disks is more than available %s" % len(bdfs))
1190                sys.exit(1)
1191            else:
1192                bdfs = bdfs[0:req_num_disks]
1193
1194        for i, bdf in enumerate(bdfs):
1195            rpc.bdev.bdev_nvme_attach_controller(self.client, name="Nvme%s" % i, trtype="PCIe", traddr=bdf)
1196
1197        self.log_print("SPDK Bdevs configuration:")
1198        rpc_client.print_dict(rpc.bdev.bdev_get_bdevs(self.client))
1199
1200    def spdk_tgt_add_subsystem_conf(self, ips=None, req_num_disks=None):
1201        self.log_print("Adding subsystems to config")
1202        if not req_num_disks:
1203            req_num_disks = get_nvme_devices_count()
1204
1205        for ip, bdev_num in self.spread_bdevs(req_num_disks):
1206            port = str(4420 + bdev_num)
1207            nqn = "nqn.2018-09.io.spdk:cnode%s" % bdev_num
1208            serial = "SPDK00%s" % bdev_num
1209            bdev_name = "Nvme%sn1" % bdev_num
1210
1211            rpc.nvmf.nvmf_create_subsystem(self.client, nqn, serial,
1212                                           allow_any_host=True, max_namespaces=8)
1213            rpc.nvmf.nvmf_subsystem_add_ns(self.client, nqn, bdev_name)
1214            for nqn_name in [nqn, "discovery"]:
1215                rpc.nvmf.nvmf_subsystem_add_listener(self.client,
1216                                                     nqn=nqn_name,
1217                                                     trtype=self.transport,
1218                                                     traddr=ip,
1219                                                     trsvcid=port,
1220                                                     adrfam="ipv4")
1221            self.subsystem_info_list.append([port, nqn, ip])
1222        self.subsys_no = len(self.subsystem_info_list)
1223
1224        self.log_print("SPDK NVMeOF subsystem configuration:")
1225        rpc_client.print_dict(rpc.nvmf.nvmf_get_subsystems(self.client))
1226
1227    def bpf_start(self):
1228        self.log_print("Starting BPF Trace scripts: %s" % self.bpf_scripts)
1229        bpf_script = os.path.join(self.spdk_dir, "scripts/bpftrace.sh")
1230        bpf_traces = [os.path.join(self.spdk_dir, "scripts/bpf", trace) for trace in self.bpf_scripts]
1231        results_path = os.path.join(self.results_dir, "bpf_traces.txt")
1232
1233        with open(self.pid, "r") as fh:
1234            nvmf_pid = str(fh.readline())
1235
1236        cmd = [bpf_script, nvmf_pid, *bpf_traces]
1237        self.log_print(cmd)
1238        self.bpf_proc = subprocess.Popen(cmd, env={"BPF_OUTFILE": results_path})
1239
1240    def tgt_start(self):
1241        if self.null_block:
1242            self.subsys_no = 1
1243        else:
1244            self.subsys_no = get_nvme_devices_count()
1245        self.log_print("Starting SPDK NVMeOF Target process")
1246        nvmf_app_path = os.path.join(self.spdk_dir, "build/bin/nvmf_tgt")
1247        proc = subprocess.Popen([nvmf_app_path, "--wait-for-rpc", "-m", self.core_mask])
1248        self.pid = os.path.join(self.spdk_dir, "nvmf.pid")
1249
1250        with open(self.pid, "w") as fh:
1251            fh.write(str(proc.pid))
1252        self.nvmf_proc = proc
1253        self.log_print("SPDK NVMeOF Target PID=%s" % self.pid)
1254        self.log_print("Waiting for spdk to initialize...")
1255        while True:
1256            if os.path.exists("/var/tmp/spdk.sock"):
1257                break
1258            time.sleep(1)
1259        self.client = rpc_client.JSONRPCClient("/var/tmp/spdk.sock")
1260
1261        rpc.sock.sock_set_default_impl(self.client, impl_name="posix")
1262
1263        if self.enable_zcopy:
1264            rpc.sock.sock_impl_set_options(self.client, impl_name="posix",
1265                                           enable_zerocopy_send_server=True)
1266            self.log_print("Target socket options:")
1267            rpc_client.print_dict(rpc.sock.sock_impl_get_options(self.client, impl_name="posix"))
1268
1269        if self.enable_adq:
1270            rpc.sock.sock_impl_set_options(self.client, impl_name="posix", enable_placement_id=1)
1271            rpc.bdev.bdev_nvme_set_options(self.client, timeout_us=0, action_on_timeout=None,
1272                                           nvme_adminq_poll_period_us=100000, retry_count=4)
1273
1274        if self.enable_dsa:
1275            rpc.dsa.dsa_scan_accel_engine(self.client, config_kernel_mode=None)
1276            self.log_print("Target DSA accel engine enabled")
1277
1278        rpc.app.framework_set_scheduler(self.client, name=self.scheduler_name, core_limit=self.scheduler_core_limit)
1279        rpc.framework_start_init(self.client)
1280
1281        if self.bpf_scripts:
1282            self.bpf_start()
1283
1284        self.spdk_tgt_configure()
1285
1286    def stop(self):
1287        if self.bpf_proc:
1288            self.log_print("Stopping BPF Trace script")
1289            self.bpf_proc.terminate()
1290            self.bpf_proc.wait()
1291
1292        if hasattr(self, "nvmf_proc"):
1293            try:
1294                self.nvmf_proc.terminate()
1295                self.nvmf_proc.wait(timeout=30)
1296            except Exception as e:
1297                self.log_print("Failed to terminate SPDK Target process. Sending SIGKILL.")
1298                self.log_print(e)
1299                self.nvmf_proc.kill()
1300                self.nvmf_proc.communicate()
1301                # Try to clean up RPC socket files if they were not removed
1302                # because of using 'kill'
1303                try:
1304                    os.remove("/var/tmp/spdk.sock")
1305                    os.remove("/var/tmp/spdk.sock.lock")
1306                except FileNotFoundError:
1307                    pass
1308
1309
1310class KernelInitiator(Initiator):
1311    def __init__(self, name, general_config, initiator_config):
1312        super().__init__(name, general_config, initiator_config)
1313
1314        # Defaults
1315        self.extra_params = ""
1316        self.ioengine = "libaio"
1317
1318        if "extra_params" in initiator_config:
1319            self.extra_params = initiator_config["extra_params"]
1320
1321        if "kernel_engine" in initiator_config:
1322            self.ioengine = initiator_config["kernel_engine"]
1323            if "io_uring" in self.ioengine:
1324                self.extra_params = "--nr-poll-queues=8"
1325
1326    def get_connected_nvme_list(self):
1327        json_obj = json.loads(self.exec_cmd(["sudo", "nvme", "list", "-o", "json"]))
1328        nvme_list = [os.path.basename(x["DevicePath"]) for x in json_obj["Devices"]
1329                     if "SPDK" in x["ModelNumber"] or "Linux" in x["ModelNumber"]]
1330        return nvme_list
1331
1332    def kernel_init_connect(self):
1333        self.log_print("Below connection attempts may result in error messages, this is expected!")
1334        for subsystem in self.subsystem_info_list:
1335            self.log_print("Trying to connect %s %s %s" % subsystem)
1336            self.exec_cmd(["sudo", self.nvmecli_bin, "connect", "-t", self.transport,
1337                           "-s", subsystem[0], "-n", subsystem[1], "-a", subsystem[2], self.extra_params])
1338            time.sleep(2)
1339
1340        if "io_uring" in self.ioengine:
1341            self.log_print("Setting block layer settings for io_uring.")
1342
1343            # TODO: io_poll=1 and io_poll_delay=-1 params not set here, because
1344            #       apparently it's not possible for connected subsystems.
1345            #       Results in "error: Invalid argument"
1346            block_sysfs_settings = {
1347                "iostats": "0",
1348                "rq_affinity": "0",
1349                "nomerges": "2"
1350            }
1351
1352            for disk in self.get_connected_nvme_list():
1353                sysfs = os.path.join("/sys/block", disk, "queue")
1354                for k, v in block_sysfs_settings.items():
1355                    sysfs_opt_path = os.path.join(sysfs, k)
1356                    try:
1357                        self.exec_cmd(["sudo", "bash", "-c", "echo %s > %s" % (v, sysfs_opt_path)], stderr_redirect=True)
1358                    except subprocess.CalledProcessError as e:
1359                        self.log_print("Warning: command %s failed due to error %s. %s was not set!" % (e.cmd, e.output, v))
1360                    finally:
1361                        _ = self.exec_cmd(["sudo", "cat", "%s" % (sysfs_opt_path)])
1362                        self.log_print("%s=%s" % (sysfs_opt_path, _))
1363
1364    def kernel_init_disconnect(self):
1365        for subsystem in self.subsystem_info_list:
1366            self.exec_cmd(["sudo", self.nvmecli_bin, "disconnect", "-n", subsystem[1]])
1367            time.sleep(1)
1368
1369    def gen_fio_filename_conf(self, threads, io_depth, num_jobs=1):
1370        nvme_list = [os.path.join("/dev", nvme) for nvme in self.get_connected_nvme_list()]
1371
1372        filename_section = ""
1373        nvme_per_split = int(len(nvme_list) / len(threads))
1374        remainder = len(nvme_list) % len(threads)
1375        iterator = iter(nvme_list)
1376        result = []
1377        for i in range(len(threads)):
1378            result.append([])
1379            for _ in range(nvme_per_split):
1380                result[i].append(next(iterator))
1381                if remainder:
1382                    result[i].append(next(iterator))
1383                    remainder -= 1
1384        for i, r in enumerate(result):
1385            header = "[filename%s]" % i
1386            disks = "\n".join(["filename=%s" % x for x in r])
1387            job_section_qd = round((io_depth * len(r)) / num_jobs)
1388            if job_section_qd == 0:
1389                job_section_qd = 1
1390            iodepth = "iodepth=%s" % job_section_qd
1391            filename_section = "\n".join([filename_section, header, disks, iodepth])
1392
1393        return filename_section
1394
1395
1396class SPDKInitiator(Initiator):
1397    def __init__(self, name, general_config, initiator_config):
1398        super().__init__(name, general_config, initiator_config)
1399
1400        if "skip_spdk_install" not in general_config or general_config["skip_spdk_install"] is False:
1401            self.install_spdk()
1402
1403        # Required fields
1404        self.num_cores = initiator_config["num_cores"]
1405
1406        # Optional fields
1407        self.enable_data_digest = False
1408        if "enable_data_digest" in initiator_config:
1409            self.enable_data_digest = initiator_config["enable_data_digest"]
1410
1411    def install_spdk(self):
1412        self.log_print("Using fio binary %s" % self.fio_bin)
1413        self.exec_cmd(["git", "-C", self.spdk_dir, "submodule", "update", "--init"])
1414        self.exec_cmd(["git", "-C", self.spdk_dir, "clean", "-ffdx"])
1415        self.exec_cmd(["cd", self.spdk_dir, "&&", "./configure", "--with-rdma", "--with-fio=%s" % os.path.dirname(self.fio_bin)])
1416        self.exec_cmd(["make", "-C", self.spdk_dir, "clean"])
1417        self.exec_cmd(["make", "-C", self.spdk_dir, "-j$(($(nproc)*2))"])
1418
1419        self.log_print("SPDK built")
1420        self.exec_cmd(["sudo", "%s/scripts/setup.sh" % self.spdk_dir])
1421
1422    def gen_spdk_bdev_conf(self, remote_subsystem_list):
1423        bdev_cfg_section = {
1424            "subsystems": [
1425                {
1426                    "subsystem": "bdev",
1427                    "config": []
1428                }
1429            ]
1430        }
1431
1432        for i, subsys in enumerate(remote_subsystem_list):
1433            sub_port, sub_nqn, sub_addr = map(lambda x: str(x), subsys)
1434            nvme_ctrl = {
1435                "method": "bdev_nvme_attach_controller",
1436                "params": {
1437                    "name": "Nvme{}".format(i),
1438                    "trtype": self.transport,
1439                    "traddr": sub_addr,
1440                    "trsvcid": sub_port,
1441                    "subnqn": sub_nqn,
1442                    "adrfam": "IPv4"
1443                }
1444            }
1445
1446            if self.enable_adq:
1447                nvme_ctrl["params"].update({"priority": "1"})
1448
1449            if self.enable_data_digest:
1450                nvme_ctrl["params"].update({"ddgst": self.enable_data_digest})
1451
1452            bdev_cfg_section["subsystems"][0]["config"].append(nvme_ctrl)
1453
1454        return json.dumps(bdev_cfg_section, indent=2)
1455
1456    def gen_fio_filename_conf(self, subsystems, threads, io_depth, num_jobs=1):
1457        filename_section = ""
1458        if len(threads) >= len(subsystems):
1459            threads = range(0, len(subsystems))
1460        filenames = ["Nvme%sn1" % x for x in range(0, len(subsystems))]
1461        nvme_per_split = int(len(subsystems) / len(threads))
1462        remainder = len(subsystems) % len(threads)
1463        iterator = iter(filenames)
1464        result = []
1465        for i in range(len(threads)):
1466            result.append([])
1467            for _ in range(nvme_per_split):
1468                result[i].append(next(iterator))
1469            if remainder:
1470                result[i].append(next(iterator))
1471                remainder -= 1
1472        for i, r in enumerate(result):
1473            header = "[filename%s]" % i
1474            disks = "\n".join(["filename=%s" % x for x in r])
1475            job_section_qd = round((io_depth * len(r)) / num_jobs)
1476            if job_section_qd == 0:
1477                job_section_qd = 1
1478            iodepth = "iodepth=%s" % job_section_qd
1479            filename_section = "\n".join([filename_section, header, disks, iodepth])
1480
1481        return filename_section
1482
1483
1484if __name__ == "__main__":
1485    script_full_dir = os.path.dirname(os.path.realpath(__file__))
1486    default_config_file_path = os.path.relpath(os.path.join(script_full_dir, "config.json"))
1487
1488    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
1489    parser.add_argument('-c', '--config', type=str, default=default_config_file_path,
1490                        help='Configuration file.')
1491    parser.add_argument('-r', '--results', type=str, default='/tmp/results',
1492                        help='Results directory.')
1493    parser.add_argument('-s', '--csv-filename', type=str, default='nvmf_results.csv',
1494                        help='CSV results filename.')
1495
1496    args = parser.parse_args()
1497
1498    print("Using config file: %s" % args.config)
1499    with open(args.config, "r") as config:
1500        data = json.load(config)
1501
1502    initiators = []
1503    fio_cases = []
1504
1505    general_config = data["general"]
1506    target_config = data["target"]
1507    initiator_configs = [data[x] for x in data.keys() if "initiator" in x]
1508
1509    for k, v in data.items():
1510        if "target" in k:
1511            v.update({"results_dir": args.results})
1512            if data[k]["mode"] == "spdk":
1513                target_obj = SPDKTarget(k, data["general"], v)
1514            elif data[k]["mode"] == "kernel":
1515                target_obj = KernelTarget(k, data["general"], v)
1516        elif "initiator" in k:
1517            if data[k]["mode"] == "spdk":
1518                init_obj = SPDKInitiator(k, data["general"], v)
1519            elif data[k]["mode"] == "kernel":
1520                init_obj = KernelInitiator(k, data["general"], v)
1521            initiators.append(init_obj)
1522        elif "fio" in k:
1523            fio_workloads = itertools.product(data[k]["bs"],
1524                                              data[k]["qd"],
1525                                              data[k]["rw"])
1526
1527            fio_run_time = data[k]["run_time"]
1528            fio_ramp_time = data[k]["ramp_time"]
1529            fio_rw_mix_read = data[k]["rwmixread"]
1530            fio_run_num = data[k]["run_num"] if "run_num" in data[k].keys() else None
1531            fio_num_jobs = data[k]["num_jobs"] if "num_jobs" in data[k].keys() else None
1532
1533            fio_rate_iops = 0
1534            if "rate_iops" in data[k]:
1535                fio_rate_iops = data[k]["rate_iops"]
1536        else:
1537            continue
1538
1539    try:
1540        os.mkdir(args.results)
1541    except FileExistsError:
1542        pass
1543
1544    for i in initiators:
1545        target_obj.initiator_info.append(
1546            {"name": i.name, "target_nic_ips": i.target_nic_ips, "initiator_nic_ips": i.nic_ips}
1547        )
1548
1549    # TODO: This try block is definietly too large. Need to break this up into separate
1550    # logical blocks to reduce size.
1551    try:
1552        target_obj.tgt_start()
1553
1554        for i in initiators:
1555            i.discover_subsystems(i.target_nic_ips, target_obj.subsys_no)
1556            if i.enable_adq:
1557                i.adq_configure_tc()
1558
1559        # Poor mans threading
1560        # Run FIO tests
1561        for block_size, io_depth, rw in fio_workloads:
1562            threads = []
1563            configs = []
1564            power_daemon = None
1565            for i in initiators:
1566                if i.mode == "kernel":
1567                    i.kernel_init_connect()
1568
1569                cfg = i.gen_fio_config(rw, fio_rw_mix_read, block_size, io_depth, target_obj.subsys_no,
1570                                       fio_num_jobs, fio_ramp_time, fio_run_time, fio_rate_iops)
1571                configs.append(cfg)
1572
1573            for i, cfg in zip(initiators, configs):
1574                t = threading.Thread(target=i.run_fio, args=(cfg, fio_run_num))
1575                threads.append(t)
1576            if target_obj.enable_sar:
1577                sar_file_prefix = "%s_%s_%s_sar" % (block_size, rw, io_depth)
1578                t = threading.Thread(target=target_obj.measure_sar, args=(args.results, sar_file_prefix))
1579                threads.append(t)
1580
1581            if target_obj.enable_pcm:
1582                pcm_fnames = ["%s_%s_%s_%s.csv" % (block_size, rw, io_depth, x) for x in ["pcm_cpu", "pcm_memory", "pcm_power"]]
1583
1584                pcm_cpu_t = threading.Thread(target=target_obj.measure_pcm, args=(args.results, pcm_fnames[0],))
1585                pcm_mem_t = threading.Thread(target=target_obj.measure_pcm_memory, args=(args.results, pcm_fnames[1],))
1586                pcm_pow_t = threading.Thread(target=target_obj.measure_pcm_power, args=(args.results, pcm_fnames[2],))
1587
1588                threads.append(pcm_cpu_t)
1589                threads.append(pcm_mem_t)
1590                threads.append(pcm_pow_t)
1591
1592            if target_obj.enable_bandwidth:
1593                bandwidth_file_name = "_".join(["bandwidth", str(block_size), str(rw), str(io_depth)])
1594                bandwidth_file_name = ".".join([bandwidth_file_name, "csv"])
1595                t = threading.Thread(target=target_obj.measure_network_bandwidth, args=(args.results, bandwidth_file_name,))
1596                threads.append(t)
1597
1598            if target_obj.enable_dpdk_memory:
1599                t = threading.Thread(target=target_obj.measure_dpdk_memory, args=(args.results))
1600                threads.append(t)
1601
1602            if target_obj.enable_adq:
1603                ethtool_thread = threading.Thread(target=target_obj.ethtool_after_fio_ramp, args=(fio_ramp_time,))
1604                threads.append(ethtool_thread)
1605
1606            if target_obj.enable_pm:
1607                power_daemon = target_obj.measure_power(args.results, "%s_%s_%s" % (block_size, rw, io_depth), script_full_dir)
1608
1609            for t in threads:
1610                t.start()
1611            for t in threads:
1612                t.join()
1613
1614            for i in initiators:
1615                if i.mode == "kernel":
1616                    i.kernel_init_disconnect()
1617                i.copy_result_files(args.results)
1618
1619            if power_daemon:
1620                power_daemon.terminate()
1621
1622        target_obj.restore_governor()
1623        target_obj.restore_tuned()
1624        target_obj.restore_services()
1625        target_obj.restore_sysctl()
1626        if target_obj.enable_adq:
1627            target_obj.reload_driver("ice")
1628        for i in initiators:
1629            i.restore_governor()
1630            i.restore_tuned()
1631            i.restore_services()
1632            i.restore_sysctl()
1633            if i.enable_adq:
1634                i.reload_driver("ice")
1635        target_obj.parse_results(args.results, args.csv_filename)
1636    finally:
1637        for i in initiators:
1638            try:
1639                i.stop()
1640            except Exception as err:
1641                pass
1642        target_obj.stop()
1643