xref: /spdk/test/vhost/perf_bench/vhost_perf.sh (revision ee32a82bfd3ff5b1a10ed775ee06f0eaffce60eb)
1#!/usr/bin/env bash
2#  SPDX-License-Identifier: BSD-3-Clause
3#  Copyright (C) 2018 Intel Corporation
4#  All rights reserved.
5#
6testdir=$(readlink -f $(dirname $0))
7rootdir=$(readlink -f $testdir/../../..)
8source $rootdir/test/common/autotest_common.sh
9source $rootdir/test/vhost/common.sh
10
11vhost_num="0"
12vm_memory=2048
13vm_sar_enable=false
14host_sar_enable=false
15sar_delay="0"
16sar_interval="1"
17sar_count="10"
18vm_throttle=""
19bpf_traces=()
20ctrl_type="spdk_vhost_scsi"
21use_split=false
22run_precondition=false
23lvol_stores=()
24lvol_bdevs=()
25split_bdevs=()
26used_vms=""
27wwpn_prefix="naa.5001405bc6498"
28packed_ring=false
29
30fio_iterations=1
31fio_gtod=""
32precond_fio_bin=$CONFIG_FIO_SOURCE_DIR/fio
33disk_map=""
34enable_irq=0
35irqs_pids=()
36enable_perf=0
37perf_cpus=""
38
39disk_cfg_bdfs=()
40disk_cfg_spdk_names=()
41disk_cfg_splits=()
42disk_cfg_vms=()
43disk_cfg_kernel_names=()
44
45function usage() {
46	[[ -n $2 ]] && (
47		echo "$2"
48		echo ""
49	)
50	echo "Shortcut script for doing automated test"
51	echo "Usage: $(basename $1) [OPTIONS]"
52	echo
53	echo "-h, --help                  Print help and exit"
54	echo "    --fio-bin=PATH          Path to FIO binary on host.;"
55	echo "                            Binary will be copied to VM, static compilation"
56	echo "                            of binary is recommended."
57	echo "    --fio-jobs=PATH         Comma separated list of fio config files to use for test."
58	echo "    --fio-iterations=INT    Number of times to run specified workload."
59	echo "    --fio-gtod-reduce       Enable fio gtod_reduce option in test."
60	echo "    --vm-memory=INT         Amount of RAM memory (in MB) to pass to a single VM."
61	echo "                            Default: 2048 MB"
62	echo "    --vm-image=PATH         OS image to use for running the VMs."
63	echo "                            Default: \$DEPENDENCY_DIR/spdk_test_image.qcow2"
64	echo "    --vm-sar-enable         Measure CPU utilization in guest VMs using sar."
65	echo "    --host-sar-enable       Measure CPU utilization on host using sar."
66	echo "    --sar-delay=INT         Wait for X seconds before starting SAR measurement. Default: 0."
67	echo "    --sar-interval=INT      Interval (seconds) argument for SAR. Default: 1s."
68	echo "    --sar-count=INT         Count argument for SAR. Default: 10."
69	echo "    --bpf-traces=LIST       Comma delimited list of .bt scripts for enabling BPF traces."
70	echo "                            List of .bt scripts available in scripts/bpf"
71	echo "    --vm-throttle-iops=INT  I/Os throttle rate in IOPS for each device on the VMs."
72	echo "    --ctrl-type=TYPE        Controller type to use for test:"
73	echo "                            spdk_vhost_scsi - use spdk vhost scsi"
74	echo "                            spdk_vhost_blk - use spdk vhost block"
75	echo "                            kernel_vhost - use kernel vhost scsi"
76	echo "                            vfio_user - use vfio-user transport layer"
77	echo "                            Default: spdk_vhost_scsi"
78	echo "    --packed-ring           Use packed ring support. Requires Qemu 4.2.0 or greater. Default: disabled."
79	echo "    --use-split             Use split vbdevs instead of Logical Volumes"
80	echo "    --run-precondition      Precondition lvols after creating. Default: true."
81	echo "    --precond-fio-bin       FIO binary used for SPDK fio plugin precondition. Default: $CONFIG_FIO_SOURCE_DIR/fio."
82	echo "    --custom-cpu-cfg=PATH   Custom CPU config for test."
83	echo "                            Default: spdk/test/vhost/common/autotest.config"
84	echo "    --disk-map              Disk map for given test. Specify which disks to use, their SPDK name,"
85	echo "                            how many times to split them and which VMs should be attached to created bdevs."
86	echo "                            Example:"
87	echo "                            NVME PCI BDF,Spdk Bdev Name,Split Count,VM List"
88	echo "                            0000:1a:00.0,Nvme0,2,0 1"
89	echo "                            0000:1b:00.0,Nvme1,2,2 3"
90	echo "    --iobuf-small-pool-count=INT   number of small buffers in the global pool"
91	echo "    --iobuf-large-pool-count=INT   number of large buffers in the global pool"
92	echo "-x                          set -x for script debug"
93	echo "-i                          Collect IRQ stats from each VM"
94	echo "-p                          Enable perf report collection hooked to vhost CPUs"
95	exit 0
96}
97
98function cleanup_lvol_cfg() {
99	notice "Removing lvol bdevs"
100	for lvol_bdev in "${lvol_bdevs[@]}"; do
101		$rpc_py bdev_lvol_delete $lvol_bdev
102		notice "lvol bdev $lvol_bdev removed"
103	done
104
105	notice "Removing lvol stores"
106	for lvol_store in "${lvol_stores[@]}"; do
107		$rpc_py bdev_lvol_delete_lvstore -u $lvol_store
108		notice "lvol store $lvol_store removed"
109	done
110}
111
112function cleanup_split_cfg() {
113	notice "Removing split vbdevs"
114	for disk in "${disk_cfg_spdk_names[@]}"; do
115		$rpc_py bdev_split_delete ${disk}n1
116	done
117}
118
119function cleanup_parted_config() {
120	notice "Removing parted disk configuration"
121	for disk in "${disk_cfg_kernel_names[@]}"; do
122		wipefs --all "/dev/${disk}n1"
123	done
124}
125
126function cleanup_kernel_vhost() {
127	notice "Cleaning kernel vhost configuration"
128	targetcli clearconfig confirm=True
129	cleanup_parted_config
130}
131
132function create_vm() {
133	vm_num=$1
134	setup_cmd="vm_setup --disk-type=$ctrl_type --force=$vm_num --memory=$vm_memory --os=$VM_IMAGE"
135	if [[ "$ctrl_type" == "kernel_vhost" ]]; then
136		x=$(printf %03d $vm_num)
137		setup_cmd+=" --disks=${wwpn_prefix}${x}"
138	elif [[ "$ctrl_type" == "vfio_user" ]]; then
139		setup_cmd+=" --disks=$vm_num"
140	else
141		setup_cmd+=" --disks=0"
142	fi
143
144	if $packed_ring; then
145		setup_cmd+=" --packed"
146	fi
147
148	$setup_cmd
149	used_vms+=" $vm_num"
150	echo "Added to used vms"
151	echo $used_vms
152}
153
154function create_spdk_controller() {
155	vm_num=$1
156	bdev=$2
157
158	if [[ "$ctrl_type" == "spdk_vhost_scsi" ]]; then
159		$rpc_py vhost_create_scsi_controller naa.0.$vm_num
160		notice "Created vhost scsi controller naa.0.$vm_num"
161		$rpc_py vhost_scsi_controller_add_target naa.0.$vm_num 0 $bdev
162		notice "Added LUN 0/$bdev to controller naa.0.$vm_num"
163	elif [[ "$ctrl_type" == "spdk_vhost_blk" ]]; then
164		if $packed_ring; then
165			p_opt="-p"
166		fi
167
168		$rpc_py vhost_create_blk_controller naa.0.$vm_num $bdev $p_opt
169		notice "Created vhost blk controller naa.0.$vm_num $bdev"
170	elif [[ "$ctrl_type" == "vfio_user" ]]; then
171		vm_muser_dir="$VM_DIR/$vm_num/muser"
172		rm -rf $vm_muser_dir
173		mkdir -p $vm_muser_dir/domain/muser$vm_num/$vm_num
174
175		$rpc_py nvmf_create_subsystem ${nqn_prefix}${vm_num} -s SPDK00$vm_num -a
176		$rpc_py nvmf_subsystem_add_ns ${nqn_prefix}${vm_num} $bdev
177		$rpc_py nvmf_subsystem_add_listener ${nqn_prefix}${vm_num} -t VFIOUSER -a $vm_muser_dir/domain/muser$vm_num/$vm_num -s 0
178	fi
179}
180
181while getopts 'xhip-:' optchar; do
182	case "$optchar" in
183		-)
184			case "$OPTARG" in
185				help) usage $0 ;;
186				fio-bin=*) fio_bin="--fio-bin=${OPTARG#*=}" ;;
187				fio-jobs=*) fio_jobs="${OPTARG#*=}" ;;
188				fio-iterations=*) fio_iterations="${OPTARG#*=}" ;;
189				fio-gtod-reduce) fio_gtod="--gtod-reduce" ;;
190				vm-memory=*) vm_memory="${OPTARG#*=}" ;;
191				vm-image=*) VM_IMAGE="${OPTARG#*=}" ;;
192				vm-sar-enable) vm_sar_enable=true ;;
193				host-sar-enable) host_sar_enable=true ;;
194				sar-delay=*) sar_delay="${OPTARG#*=}" ;;
195				sar-interval=*) sar_interval="${OPTARG#*=}" ;;
196				sar-count=*) sar_count="${OPTARG#*=}" ;;
197				bpf-traces=*) IFS="," read -r -a bpf_traces <<< "${OPTARG#*=}" ;;
198				vm-throttle-iops=*) vm_throttle="${OPTARG#*=}" ;;
199				ctrl-type=*) ctrl_type="${OPTARG#*=}" ;;
200				packed-ring) packed_ring=true ;;
201				use-split) use_split=true ;;
202				run-precondition) run_precondition=true ;;
203				precond-fio-bin=*) precond_fio_bin="${OPTARG#*=}" ;;
204				custom-cpu-cfg=*) custom_cpu_cfg="${OPTARG#*=}" ;;
205				disk-map=*) disk_map="${OPTARG#*=}" ;;
206				iobuf-small-pool-count=*) iobuf_small_count="${OPTARG#*=}" ;;
207				iobuf-large-pool-count=*) iobuf_large_count="${OPTARG#*=}" ;;
208				*) usage $0 "Invalid argument '$OPTARG'" ;;
209			esac
210			;;
211		h) usage $0 ;;
212		x)
213			set -x
214			x="-x"
215			;;
216		i) enable_irq=1 ;;
217		p) enable_perf=1 ;;
218		*) usage $0 "Invalid argument '$OPTARG'" ;;
219	esac
220done
221
222rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
223
224if [[ -n $custom_cpu_cfg ]]; then
225	source $custom_cpu_cfg
226	vhost_reactor_mask="vhost_${vhost_num}_reactor_mask"
227	vhost_reactor_mask="${!vhost_reactor_mask}"
228	vhost_main_core="vhost_${vhost_num}_main_core"
229	vhost_main_core="${!vhost_main_core}"
230fi
231
232if [[ -z $fio_jobs ]]; then
233	error "No FIO job specified!"
234fi
235
236if [[ $ctrl_type == "vfio_user" ]]; then
237	vhost_bin_opt="-b nvmf_tgt"
238fi
239
240trap 'error_exit "${FUNCNAME}" "${LINENO}"' INT ERR
241
242if [[ ! -f $disk_map ]]; then
243	fail "No disk map provided for test. Exiting."
244fi
245
246# ===== Enable "performance" cpu governor =====
247if hash cpupower; then
248	cpupower frequency-set -g performance
249else
250	echo "WARNING: Missing CPUPOWER!!! Please install."
251fi
252current_governor=$(cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor)
253echo "INFO: Using $current_governor cpu governor for test."
254
255# ===== Precondition NVMes if specified =====
256if [[ $run_precondition == true ]]; then
257	# Using the same precondition routine possible for lvols thanks
258	# to --clear-method option. Lvols should not UNMAP on creation.
259	json_cfg=$rootdir/nvme.json
260	$rootdir/scripts/gen_nvme.sh --json-with-subsystems > "$json_cfg"
261	mapfile -t nvmes < <(grep -oP "Nvme\d+" "$json_cfg")
262	fio_filename=$(printf ":%sn1" "${nvmes[@]}")
263	fio_filename=${fio_filename:1}
264	$precond_fio_bin --name="precondition" \
265		--ioengine="${rootdir}/build/fio/spdk_bdev" \
266		--rw="write" --spdk_json_conf="$json_cfg" --thread="1" \
267		--group_reporting --direct="1" --size="100%" --loops="2" --bs="256k" \
268		--iodepth=32 --filename="${fio_filename}" || true
269fi
270
271while IFS="," read -r bdf spdk_name split vms; do
272	[[ $bdf == "#"* ]] && continue
273	disk_cfg_bdfs+=("$bdf")
274	disk_cfg_spdk_names+=("$spdk_name")
275	disk_cfg_splits+=("$split")
276	disk_cfg_vms+=("$vms")
277	# Find kernel nvme names
278	if [[ "$ctrl_type" == "kernel_vhost" ]]; then
279		disk_cfg_kernel_names+=("/sys/bus/pci/devices/$bdf/nvme/nvme"*)
280	fi
281done < "$disk_map"
282
283disk_cfg_kernel_names=("${disk_cfg_kernel_names[@]##*/}")
284
285if [[ "$ctrl_type" == "kernel_vhost" ]]; then
286	notice "Configuring kernel vhost..."
287	trap 'vm_kill_all; sleep 1; cleanup_kernel_vhost; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR
288
289	# Split disks using parted for kernel vhost
290	newline=$'\n'
291	backstores=()
292	for ((i = 0; i < ${#disk_cfg_kernel_names[@]}; i++)); do
293		nvme=${disk_cfg_kernel_names[$i]}
294		splits=${disk_cfg_splits[$i]}
295		notice "  Creating partition table (GPT) on /dev/${nvme}n1"
296		parted -s /dev/${nvme}n1 mklabel gpt
297
298		part_size=$((100 / ${disk_cfg_splits[$i]})) # Split 100% of disk into roughly even parts
299		echo "  Creating  ${splits} partitions of relative disk size ${part_size}"
300		for p in $(seq 0 $((splits - 1))); do
301			p_start=$((p * part_size))
302			p_end=$((p_start + part_size))
303			parted -s "/dev/${nvme}n1" mkpart "part$p" ${p_start}% ${p_end}%
304		done
305
306		sleep 3
307
308		# Prepare kernel vhost configuration
309		partitions=("/dev/${nvme}n1p"*)
310		# Create block backstores for vhost kernel process
311		for p in "${partitions[@]}"; do
312			backstore_name=$(basename $p)
313			backstores+=("$backstore_name")
314			targetcli backstores/block create $backstore_name $p
315		done
316
317		# Create kernel vhost controllers and add LUNs
318		# Setup VM configurations
319		vms_to_run=(${disk_cfg_vms[i]})
320		for ((j = 0; j < ${#vms_to_run[@]}; j++)); do
321			# WWPN prefix misses 3 characters. Need to complete it
322			# using block backstore number
323			x=$(printf %03d ${vms_to_run[$j]})
324			wwpn="${wwpn_prefix}${x}"
325			targetcli vhost/ create $wwpn
326			targetcli vhost/$wwpn/tpg1/luns create /backstores/block/$(basename ${partitions[$j]})
327			create_vm ${vms_to_run[j]}
328			sleep 1
329		done
330	done
331	targetcli ls
332else
333	notice "Configuring SPDK vhost..."
334	vhost_run -n "${vhost_num}" -g ${vhost_bin_opt} -- -p "${vhost_main_core}" -m "${vhost_reactor_mask}"
335	notice "..."
336	if [[ ${#bpf_traces[@]} -gt 0 ]]; then
337		notice "Enabling BPF traces: ${bpf_traces[*]}"
338		vhost_dir="$(get_vhost_dir 0)"
339		vhost_pid="$(cat $vhost_dir/vhost.pid)"
340
341		bpf_cmd=("$rootdir/scripts/bpftrace.sh")
342		bpf_cmd+=("$vhost_pid")
343		for trace in "${bpf_traces[@]}"; do
344			bpf_cmd+=("$rootdir/scripts/bpf/$trace")
345		done
346
347		BPF_OUTFILE="$VHOST_DIR/bpftraces.txt" "${bpf_cmd[@]}" &
348		bpf_script_pid=$!
349
350		# Wait a bit for trace capture to start
351		sleep 3
352	fi
353
354	if [[ "$ctrl_type" == "vfio_user" ]]; then
355		rm -rf /dev/shm/muser
356		$rpc_py nvmf_create_transport --trtype VFIOUSER
357		nqn_prefix="nqn.2021-02.io.spdk:cnode"
358	fi
359
360	if [[ $use_split == true ]]; then
361		notice "Configuring split bdevs configuration..."
362		trap 'cleanup_split_cfg; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR
363		for ((i = 0; i < ${#disk_cfg_bdfs[@]}; i++)); do
364			nvme_bdev=$($rpc_py bdev_nvme_attach_controller -b ${disk_cfg_spdk_names[$i]} -t pcie -a ${disk_cfg_bdfs[$i]})
365			notice "Created NVMe Bdev: $nvme_bdev with BDF ${disk_cfg_bdfs[$i]}"
366
367			splits=$($rpc_py bdev_split_create $nvme_bdev ${disk_cfg_splits[$i]})
368			splits=($splits)
369			notice "Created splits: ${splits[*]} on Bdev ${nvme_bdev}"
370			for s in "${splits[@]}"; do
371				split_bdevs+=($s)
372			done
373
374			vms_to_run=(${disk_cfg_vms[i]})
375			for ((j = 0; j < ${#vms_to_run[@]}; j++)); do
376				notice "Setting up VM ${vms_to_run[j]}"
377				create_spdk_controller "${vms_to_run[j]}" ${splits[j]}
378				create_vm ${vms_to_run[j]}
379			done
380			echo " "
381		done
382		bdevs=("${split_bdevs[@]}")
383	else
384		notice "Configuring LVOLs..."
385		trap 'cleanup_lvol_cfg; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR
386		for ((i = 0; i < ${#disk_cfg_bdfs[@]}; i++)); do
387			nvme_bdev=$($rpc_py bdev_nvme_attach_controller -b ${disk_cfg_spdk_names[$i]} -t pcie -a ${disk_cfg_bdfs[$i]})
388			notice "Created NVMe Bdev: $nvme_bdev with BDF ${disk_cfg_bdfs[$i]}"
389
390			ls_guid=$($rpc_py bdev_lvol_create_lvstore $nvme_bdev lvs_$i --clear-method none)
391			lvol_stores+=("$ls_guid")
392			notice "Created Lvol Store: $ls_guid on Bdev $nvme_bdev"
393
394			vms_to_run=(${disk_cfg_vms[i]})
395			for ((j = 0; j < ${disk_cfg_splits[$i]}; j++)); do
396				free_mb=$(get_lvs_free_mb "$ls_guid")
397				size=$((free_mb / (disk_cfg_splits[i] - j)))
398				lb_name=$($rpc_py bdev_lvol_create -u $ls_guid lbd_$j $size --clear-method none)
399				lvol_bdevs+=("$lb_name")
400				notice "Created LVOL Bdev $lb_name on Lvol Store $ls_guid on Bdev $nvme_bdev"
401
402				notice "Setting up VM ${vms_to_run[j]}"
403				create_spdk_controller "${vms_to_run[j]}" ${lb_name}
404				create_vm ${vms_to_run[j]}
405			done
406			echo " "
407		done
408		$rpc_py bdev_lvol_get_lvstores
409	fi
410	$rpc_py bdev_get_bdevs
411	if [[ "$ctrl_type" =~ "vhost" ]]; then
412		$rpc_py vhost_get_controllers
413	elif [[ "$ctrl_type" =~ "vfio" ]]; then
414		$rpc_py nvmf_get_subsystems
415	fi
416fi
417
418# Start VMs
419# Run VMs
420vm_run $used_vms
421vm_wait_for_boot 300 $used_vms
422
423# Run FIO
424fio_disks=""
425
426for vm_num in $used_vms; do
427	host_name="VM-$vm_num"
428	vm_exec $vm_num "hostname $host_name"
429	vm_start_fio_server $fio_bin $vm_num
430
431	if [[ "$ctrl_type" == "spdk_vhost_scsi" ]]; then
432		vm_check_scsi_location $vm_num
433	elif [[ "$ctrl_type" == "spdk_vhost_blk" ]]; then
434		vm_check_blk_location $vm_num
435	elif [[ "$ctrl_type" == "kernel_vhost" ]]; then
436		vm_check_scsi_location $vm_num
437	elif [[ "$ctrl_type" == "vfio_user" ]]; then
438		vm_check_nvme_location $vm_num
439	fi
440
441	block=$(printf '%s' $SCSI_DISK)
442	vm_exec "$vm_num" "echo none > /sys/class/block/$block/queue/scheduler"
443
444	if [[ -n "$vm_throttle" ]]; then
445		# Check whether cgroups or cgroupsv2 is used on guest system
446		# Simple, naive & quick approach as it should do the trick for simple
447		# VMs used for performance tests
448		c_gr_ver=2
449		if vm_exec "$vm_num" "grep '^cgroup ' /proc/mounts"; then
450			c_gr_ver=1
451		fi
452		major_minor=$(vm_exec "$vm_num" "cat /sys/block/$block/dev")
453
454		if [[ $c_gr_ver == 1 ]]; then
455			vm_exec "$vm_num" "echo \"$major_minor $vm_throttle\" > /sys/fs/cgroup/blkio/blkio.throttle.read_iops_device"
456			vm_exec "$vm_num" "echo \"$major_minor $vm_throttle\" > /sys/fs/cgroup/blkio/blkio.throttle.write_iops_device"
457		elif [[ $c_gr_ver == 2 ]]; then
458			vm_exec "$vm_num" "echo '+io' > /sys/fs/cgroup/cgroup.subtree_control"
459			vm_exec "$vm_num" "echo \"$major_minor riops=$vm_throttle wiops=$vm_throttle\" > /sys/fs/cgroup/user.slice/io.max"
460		fi
461	fi
462
463	fio_disks+=" --vm=${vm_num}$(printf ':/dev/%s' $SCSI_DISK)"
464	((enable_irq == 1)) && lookup_dev_irqs "$vm_num"
465done
466
467# Gather perf stats only from the vhost cpus
468perf_cpus=${vhost_reactor_mask//[\[\]]/}
469
470# Run FIO traffic
471for fio_job in ${fio_jobs//,/ }; do
472	((enable_irq == 1)) && irqs $used_vms
473	runtime=$(get_from_fio "runtime" "$fio_job")
474	ramptime=$(get_from_fio "ramp_time" "$fio_job")
475	fio_job_fname=$(basename $fio_job)
476	fio_log_fname="${fio_job_fname%%.*}.log"
477	for i in $(seq 1 $fio_iterations); do
478		echo "Running FIO iteration $i for $fio_job_fname"
479		run_fio $fio_bin --hide-results --job-file="$fio_job" --out="$VHOST_DIR/fio_results" --json $fio_disks $fio_gtod &
480		fio_pid=$!
481		if ((enable_perf == 1)); then
482			collect_perf "$perf_cpus" "$VHOST_DIR/perf/report.perf" "$runtime" "$ramptime" &
483			perf_pid=$!
484		fi
485
486		if $host_sar_enable || $vm_sar_enable; then
487			pids=""
488			mkdir -p $VHOST_DIR/fio_results/sar_stats
489			sleep $sar_delay
490		fi
491
492		if $host_sar_enable; then
493			sar -P ALL $sar_interval $sar_count > "$VHOST_DIR/fio_results/sar_stats/sar_stats_host.txt" &
494			pids+=" $!"
495		fi
496
497		if $vm_sar_enable; then
498			for vm_num in $used_vms; do
499				vm_exec "$vm_num" "mkdir -p /root/sar; sar -P ALL $sar_interval $sar_count >> /root/sar/sar_stats_VM${vm_num}_run${i}.txt" &
500				pids+=" $!"
501			done
502		fi
503
504		for j in $pids; do
505			wait $j
506		done
507
508		if $vm_sar_enable; then
509			for vm_num in $used_vms; do
510				vm_scp "$vm_num" "root@127.0.0.1:/root/sar/sar_stats_VM${vm_num}_run${i}.txt" "$VHOST_DIR/fio_results/sar_stats"
511			done
512		fi
513
514		wait $fio_pid $perf_pid
515		mv $VHOST_DIR/fio_results/$fio_log_fname $VHOST_DIR/fio_results/$fio_log_fname.$i
516		sleep 1
517	done
518
519	((enable_irq == 1)) && kill "${irqs_pids[@]}"
520
521	parse_fio_results "$VHOST_DIR/fio_results" "$fio_log_fname"
522	((enable_irq == 1)) && parse_irqs $((++iter))
523	((enable_perf == 1)) && parse_perf $iter
524done
525
526notice "Shutting down virtual machines..."
527vm_shutdown_all
528
529if [[ "$ctrl_type" == "kernel_vhost" ]]; then
530	cleanup_kernel_vhost || true
531else
532	notice "Shutting down SPDK vhost app..."
533	if [[ $use_split == true ]]; then
534		cleanup_split_cfg
535	else
536		cleanup_lvol_cfg
537	fi
538	vhost_kill "${vhost_num}"
539
540	if ((bpf_script_pid)); then
541		wait $bpf_script_pid
542	fi
543fi
544