xref: /spdk/test/vhost/common.sh (revision ee32a82bfd3ff5b1a10ed775ee06f0eaffce60eb)
1#  SPDX-License-Identifier: BSD-3-Clause
2#  Copyright (C) 2017 Intel Corporation
3#  All rights reserved.
4#
5
6: ${SPDK_VHOST_VERBOSE=false}
7: ${VHOST_DIR="$HOME/vhost_test"}
8: ${QEMU_BIN:="qemu-system-x86_64"}
9: ${QEMU_IMG_BIN="qemu-img"}
10
11TEST_DIR=$(readlink -f $rootdir/..)
12VM_DIR=$VHOST_DIR/vms
13TARGET_DIR=$VHOST_DIR/vhost
14VM_PASSWORD="root"
15
16VM_IMAGE=${VM_IMAGE:-"$DEPENDENCY_DIR/vhost/spdk_test_image.qcow2"}
17FIO_BIN=${FIO_BIN:-}
18
19WORKDIR=$(readlink -f "$(dirname "$0")")
20
21if ! hash $QEMU_IMG_BIN $QEMU_BIN; then
22	echo 'ERROR: QEMU is not installed on this system. Unable to run vhost tests.' >&2
23	return 1
24fi
25
26mkdir -p $VHOST_DIR
27mkdir -p $VM_DIR
28mkdir -p $TARGET_DIR
29
30#
31# Source config describing QEMU and VHOST cores and NUMA
32#
33source $rootdir/test/vhost/common/autotest.config
34source "$rootdir/test/scheduler/common.sh"
35
36function vhosttestinit() {
37	if [ "$TEST_MODE" == "iso" ]; then
38		$rootdir/scripts/setup.sh
39	fi
40
41	if [[ -e $VM_IMAGE.gz && ! -e $VM_IMAGE ]]; then
42		gzip -dc "$VM_IMAGE.gz" > "$VM_IMAGE"
43	fi
44
45	# Look for the VM image
46	if [[ ! -f $VM_IMAGE ]]; then
47		[[ $1 != "--no_vm" ]] || return 0
48		echo "$VM_IMAGE is missing" >&2
49		return 1
50	fi
51}
52
53function vhosttestfini() {
54	if [ "$TEST_MODE" == "iso" ]; then
55		$rootdir/scripts/setup.sh reset
56	fi
57}
58
59function message() {
60	local verbose_out
61	if ! $SPDK_VHOST_VERBOSE; then
62		verbose_out=""
63	elif [[ ${FUNCNAME[2]} == "source" ]]; then
64		verbose_out=" (file $(basename ${BASH_SOURCE[1]}):${BASH_LINENO[1]})"
65	else
66		verbose_out=" (function ${FUNCNAME[2]}:${BASH_LINENO[1]})"
67	fi
68
69	local msg_type="$1"
70	shift
71	echo -e "${msg_type}${verbose_out}: $*"
72}
73
74function fail() {
75	echo "===========" >&2
76	message "FAIL" "$@" >&2
77	echo "===========" >&2
78	exit 1
79}
80
81function error() {
82	echo "===========" >&2
83	message "ERROR" "$@" >&2
84	echo "===========" >&2
85	# Don't 'return 1' since the stack trace will be incomplete (why?) missing upper command.
86	false
87}
88
89function warning() {
90	message "WARN" "$@" >&2
91}
92
93function notice() {
94	message "INFO" "$@"
95}
96
97function check_qemu_packedring_support() {
98	qemu_version=$($QEMU_BIN -version | grep -Po "(?<=version )\d+.\d+.\d+")
99	if [[ "$qemu_version" < "4.2.0" ]]; then
100		error "This qemu binary does not support packed ring"
101	fi
102}
103
104function get_vhost_dir() {
105	local vhost_name="$1"
106
107	if [[ -z "$vhost_name" ]]; then
108		error "vhost name must be provided to get_vhost_dir"
109		return 1
110	fi
111
112	echo "$TARGET_DIR/${vhost_name}"
113}
114
115function vhost_run() {
116	local OPTIND
117	local vhost_name
118	local run_gen_nvme=true
119	local vhost_bin="vhost"
120	local vhost_args=()
121	local cmd=()
122
123	while getopts "n:b:g" optchar; do
124		case "$optchar" in
125			n) vhost_name="$OPTARG" ;;
126			b) vhost_bin="$OPTARG" ;;
127			g)
128				run_gen_nvme=false
129				notice "Skipping gen_nvme.sh NVMe bdev configuration"
130				;;
131			*)
132				error "Unknown param $optchar"
133				return 1
134				;;
135		esac
136	done
137	shift $((OPTIND - 1))
138
139	vhost_args=("$@")
140
141	if [[ -z "$vhost_name" ]]; then
142		error "vhost name must be provided to vhost_run"
143		return 1
144	fi
145
146	local vhost_dir
147	vhost_dir="$(get_vhost_dir $vhost_name)"
148	local vhost_app="$SPDK_BIN_DIR/$vhost_bin"
149	local vhost_log_file="$vhost_dir/vhost.log"
150	local vhost_pid_file="$vhost_dir/vhost.pid"
151	local vhost_socket="$vhost_dir/usvhost"
152	notice "starting vhost app in background"
153	[[ -r "$vhost_pid_file" ]] && vhost_kill $vhost_name
154	[[ -d $vhost_dir ]] && rm -f $vhost_dir/*
155	mkdir -p $vhost_dir
156
157	if [[ ! -x $vhost_app ]]; then
158		error "application not found: $vhost_app"
159		return 1
160	fi
161
162	cmd=("$vhost_app" "-r" "$vhost_dir/rpc.sock" "${vhost_args[@]}")
163	if [[ "$vhost_bin" =~ vhost ]]; then
164		cmd+=(-S "$vhost_dir")
165	fi
166
167	notice "Logging to:   $vhost_log_file"
168	notice "Socket:      $vhost_socket"
169	notice "Command:     ${cmd[*]}"
170
171	timing_enter vhost_start
172
173	iobuf_small_count=${iobuf_small_count:-16383}
174	iobuf_large_count=${iobuf_large_count:-2047}
175
176	"${cmd[@]}" --wait-for-rpc &
177	vhost_pid=$!
178	echo $vhost_pid > $vhost_pid_file
179
180	notice "waiting for app to run..."
181	waitforlisten "$vhost_pid" "$vhost_dir/rpc.sock"
182
183	"$rootdir/scripts/rpc.py" -s "$vhost_dir/rpc.sock" \
184		iobuf_set_options \
185		--small-pool-count="$iobuf_small_count" \
186		--large-pool-count="$iobuf_large_count"
187
188	"$rootdir/scripts/rpc.py" -s "$vhost_dir/rpc.sock" \
189		framework_start_init
190
191	#do not generate nvmes if pci access is disabled
192	if [[ "${cmd[*]}" != *"--no-pci"* ]] && [[ "${cmd[*]}" != *"-u"* ]] && $run_gen_nvme; then
193		$rootdir/scripts/gen_nvme.sh | $rootdir/scripts/rpc.py -s $vhost_dir/rpc.sock load_subsystem_config
194	fi
195
196	notice "vhost started - pid=$vhost_pid"
197
198	timing_exit vhost_start
199}
200
201function vhost_kill() {
202	local rc=0
203	local vhost_name="$1"
204
205	if [[ -z "$vhost_name" ]]; then
206		error "Must provide vhost name to vhost_kill"
207		return 0
208	fi
209
210	local vhost_dir
211	vhost_dir="$(get_vhost_dir $vhost_name)"
212	local vhost_pid_file="$vhost_dir/vhost.pid"
213
214	if [[ ! -r $vhost_pid_file ]]; then
215		warning "no vhost pid file found"
216		return 0
217	fi
218
219	timing_enter vhost_kill
220	local vhost_pid
221	vhost_pid="$(cat $vhost_pid_file)"
222	notice "killing vhost (PID $vhost_pid) app"
223
224	if kill -INT $vhost_pid > /dev/null; then
225		notice "sent SIGINT to vhost app - waiting 60 seconds to exit"
226		for ((i = 0; i < 60; i++)); do
227			if kill -0 $vhost_pid; then
228				echo "."
229				sleep 1
230			else
231				break
232			fi
233		done
234		if kill -0 $vhost_pid; then
235			error "ERROR: vhost was NOT killed - sending SIGABRT"
236			kill -ABRT $vhost_pid
237			rc=1
238		else
239			while kill -0 $vhost_pid; do
240				echo "."
241			done
242		fi
243	elif kill -0 $vhost_pid; then
244		error "vhost NOT killed - you need to kill it manually"
245		rc=1
246	else
247		notice "vhost was not running"
248	fi
249
250	timing_exit vhost_kill
251
252	rm -rf "$vhost_dir"
253
254	return $rc
255}
256
257function vhost_rpc() {
258	local vhost_name="$1"
259
260	if [[ -z "$vhost_name" ]]; then
261		error "vhost name must be provided to vhost_rpc"
262		return 1
263	fi
264	shift
265
266	$rootdir/scripts/rpc.py -s $(get_vhost_dir $vhost_name)/rpc.sock "$@"
267}
268
269###
270# Mgmt functions
271###
272
273function assert_number() {
274	[[ "$1" =~ [0-9]+ ]] && return 0
275
276	error "Invalid or missing parameter: need number but got '$1'"
277	return 1
278}
279
280# Run command on vm with given password
281# First argument - vm number
282# Second argument - ssh password for vm
283#
284function vm_sshpass() {
285	vm_num_is_valid $1 || return 1
286
287	local ssh_cmd
288	ssh_cmd="sshpass -p $2 ssh \
289		-o UserKnownHostsFile=/dev/null \
290		-o StrictHostKeyChecking=no \
291		-o User=root \
292		-p $(vm_ssh_socket $1) $VM_SSH_OPTIONS 127.0.0.1"
293
294	shift 2
295	$ssh_cmd "$@"
296}
297
298# Helper to validate VM number
299# param $1 VM number
300#
301function vm_num_is_valid() {
302	[[ "$1" =~ ^[0-9]+$ ]] && return 0
303
304	error "Invalid or missing parameter: vm number '$1'"
305	return 1
306}
307
308# Print network socket for given VM number
309# param $1 virtual machine number
310#
311function vm_ssh_socket() {
312	vm_num_is_valid $1 || return 1
313	local vm_dir="$VM_DIR/$1"
314
315	cat $vm_dir/ssh_socket
316}
317
318function vm_fio_socket() {
319	vm_num_is_valid $1 || return 1
320	local vm_dir="$VM_DIR/$1"
321
322	cat $vm_dir/fio_socket
323}
324
325# Execute command on given VM
326# param $1 virtual machine number
327#
328function vm_exec() {
329	vm_num_is_valid $1 || return 1
330
331	local vm_num="$1"
332	shift
333
334	sshpass -p "$VM_PASSWORD" ssh \
335		-o UserKnownHostsFile=/dev/null \
336		-o StrictHostKeyChecking=no \
337		-o User=root \
338		-p $(vm_ssh_socket $vm_num) $VM_SSH_OPTIONS 127.0.0.1 \
339		"$@"
340}
341
342# Execute scp command on given VM
343# param $1 virtual machine number
344#
345function vm_scp() {
346	vm_num_is_valid $1 || return 1
347
348	local vm_num="$1"
349	shift
350
351	sshpass -p "$VM_PASSWORD" scp \
352		-o UserKnownHostsFile=/dev/null \
353		-o StrictHostKeyChecking=no \
354		-o User=root \
355		-P $(vm_ssh_socket $vm_num) $VM_SSH_OPTIONS \
356		"$@"
357}
358
359# check if specified VM is running
360# param $1 VM num
361function vm_is_running() {
362	vm_num_is_valid $1 || return 1
363	local vm_dir="$VM_DIR/$1"
364
365	if [[ ! -r $vm_dir/qemu.pid ]]; then
366		return 1
367	fi
368
369	local vm_pid
370	vm_pid="$(cat $vm_dir/qemu.pid)"
371
372	if /bin/kill -0 $vm_pid; then
373		return 0
374	else
375		if [[ $EUID -ne 0 ]]; then
376			warning "not root - assuming VM running since can't be checked"
377			return 0
378		fi
379
380		# not running - remove pid file
381		rm -f $vm_dir/qemu.pid
382		return 1
383	fi
384}
385
386# check if specified VM is running
387# param $1 VM num
388function vm_os_booted() {
389	vm_num_is_valid $1 || return 1
390	local vm_dir="$VM_DIR/$1"
391
392	if [[ ! -r $vm_dir/qemu.pid ]]; then
393		error "VM $1 is not running"
394		return 1
395	fi
396
397	if ! VM_SSH_OPTIONS="-o ControlMaster=no" vm_exec $1 "true" 2> /dev/null; then
398		# Shutdown existing master. Ignore errors as it might not exist.
399		VM_SSH_OPTIONS="-O exit" vm_exec $1 "true" 2> /dev/null
400		return 1
401	fi
402
403	return 0
404}
405
406# Shutdown given VM
407# param $1 virtual machine number
408# return non-zero in case of error.
409function vm_shutdown() {
410	vm_num_is_valid $1 || return 1
411	local vm_dir="$VM_DIR/$1"
412	if [[ ! -d "$vm_dir" ]]; then
413		error "VM$1 ($vm_dir) not exist - setup it first"
414		return 1
415	fi
416
417	if ! vm_is_running $1; then
418		notice "VM$1 ($vm_dir) is not running"
419		return 0
420	fi
421
422	# Temporarily disabling exit flag for next ssh command, since it will
423	# "fail" due to shutdown
424	notice "Shutting down virtual machine $vm_dir"
425	set +e
426	vm_exec $1 "nohup sh -c 'shutdown -h -P now'" || true
427	notice "VM$1 is shutting down - wait a while to complete"
428	set -e
429}
430
431# Kill given VM
432# param $1 virtual machine number
433#
434function vm_kill() {
435	vm_num_is_valid $1 || return 1
436	local vm_dir="$VM_DIR/$1"
437
438	if [[ ! -r $vm_dir/qemu.pid ]]; then
439		return 0
440	fi
441
442	local vm_pid
443	vm_pid="$(cat $vm_dir/qemu.pid)"
444
445	notice "Killing virtual machine $vm_dir (pid=$vm_pid)"
446	# First kill should fail, second one must fail
447	if /bin/kill $vm_pid; then
448		notice "process $vm_pid killed"
449		rm -rf $vm_dir
450	elif vm_is_running $1; then
451		error "Process $vm_pid NOT killed"
452		return 1
453	fi
454}
455
456# List all VM numbers in VM_DIR
457#
458function vm_list_all() {
459	local vms=()
460	vms=("$VM_DIR"/+([0-9]))
461	if ((${#vms[@]} > 0)); then
462		basename --multiple "${vms[@]}"
463	fi
464}
465
466# Kills all VM in $VM_DIR
467#
468function vm_kill_all() {
469	local vm
470	for vm in $(vm_list_all); do
471		vm_kill $vm
472	done
473
474	rm -rf $VM_DIR
475}
476
477# Shutdown all VM in $VM_DIR
478#
479function vm_shutdown_all() {
480	local timeo=${1:-90} vms vm
481
482	vms=($(vm_list_all))
483
484	for vm in "${vms[@]}"; do
485		vm_shutdown "$vm"
486	done
487
488	notice "Waiting for VMs to shutdown..."
489	while ((timeo-- > 0 && ${#vms[@]} > 0)); do
490		for vm in "${!vms[@]}"; do
491			vm_is_running "${vms[vm]}" || unset -v "vms[vm]"
492		done
493		sleep 1
494	done
495
496	if ((${#vms[@]} == 0)); then
497		notice "All VMs successfully shut down"
498		return 0
499	fi
500
501	warning "Not all VMs were shut down. Leftovers: ${vms[*]}"
502
503	for vm in "${vms[@]}"; do
504		vm_print_logs "$vm"
505	done
506
507	return 1
508}
509
510function vm_setup() {
511	xtrace_disable
512	local OPTIND optchar vm_num
513
514	local os=""
515	local os_mode=""
516	local qemu_args=()
517	local disk_type_g=NOT_DEFINED
518	local read_only="false"
519	# List created of a strings separated with a ":"
520	local disks=()
521	local raw_cache=""
522	local vm_incoming=""
523	local vm_migrate_to=""
524	local force_vm=""
525	local guest_memory=1024
526	local vhost_dir
527	local packed=false
528	vhost_dir="$(get_vhost_dir 0)"
529	while getopts ':-:' optchar; do
530		case "$optchar" in
531			-)
532				case "$OPTARG" in
533					os=*) os="${OPTARG#*=}" ;;
534					os-mode=*) os_mode="${OPTARG#*=}" ;;
535					qemu-args=*) qemu_args+=("${OPTARG#*=}") ;;
536					disk-type=*) disk_type_g="${OPTARG#*=}" ;;
537					read-only=*) read_only="${OPTARG#*=}" ;;
538					disks=*) IFS=":" read -ra disks <<< "${OPTARG#*=}" ;;
539					raw-cache=*) raw_cache=",cache${OPTARG#*=}" ;;
540					force=*) force_vm=${OPTARG#*=} ;;
541					memory=*) guest_memory=${OPTARG#*=} ;;
542					incoming=*) vm_incoming="${OPTARG#*=}" ;;
543					migrate-to=*) vm_migrate_to="${OPTARG#*=}" ;;
544					vhost-name=*) vhost_dir="$(get_vhost_dir ${OPTARG#*=})" ;;
545					spdk-boot=*) local boot_from="${OPTARG#*=}" ;;
546					packed) packed=true ;;
547					*)
548						error "unknown argument $OPTARG"
549						return 1
550						;;
551				esac
552				;;
553			*)
554				error "vm_create Unknown param $OPTARG"
555				return 1
556				;;
557		esac
558	done
559
560	# Find next directory we can use
561	if [[ -n $force_vm ]]; then
562		vm_num=$force_vm
563
564		vm_num_is_valid $vm_num || return 1
565		local vm_dir="$VM_DIR/$vm_num"
566		[[ -d $vm_dir ]] && warning "removing existing VM in '$vm_dir'"
567	else
568		local vm_dir=""
569
570		set +x
571		for ((i = 0; i <= 256; i++)); do
572			local vm_dir="$VM_DIR/$i"
573			[[ ! -d $vm_dir ]] && break
574		done
575		xtrace_restore
576
577		vm_num=$i
578	fi
579
580	if [[ $vm_num -eq 256 ]]; then
581		error "no free VM found. do some cleanup (256 VMs created, are you insane?)"
582		return 1
583	fi
584
585	if [[ -n "$vm_migrate_to" && -n "$vm_incoming" ]]; then
586		error "'--incoming' and '--migrate-to' cannot be used together"
587		return 1
588	elif [[ -n "$vm_incoming" ]]; then
589		if [[ -n "$os_mode" || -n "$os" ]]; then
590			error "'--incoming' can't be used together with '--os' nor '--os-mode'"
591			return 1
592		fi
593
594		os_mode="original"
595		os="$VM_DIR/$vm_incoming/os.qcow2"
596	elif [[ -n "$vm_migrate_to" ]]; then
597		[[ "$os_mode" != "backing" ]] && warning "Using 'backing' mode for OS since '--migrate-to' is used"
598		os_mode=backing
599	fi
600
601	notice "Creating new VM in $vm_dir"
602	mkdir -p $vm_dir
603
604	if [[ "$os_mode" == "backing" ]]; then
605		notice "Creating backing file for OS image file: $os"
606		if ! $QEMU_IMG_BIN create -f qcow2 -b $os $vm_dir/os.qcow2 -F qcow2; then
607			error "Failed to create OS backing file in '$vm_dir/os.qcow2' using '$os'"
608			return 1
609		fi
610
611		local os=$vm_dir/os.qcow2
612	elif [[ "$os_mode" == "original" ]]; then
613		warning "Using original OS image file: $os"
614	elif [[ "$os_mode" != "snapshot" ]]; then
615		if [[ -z "$os_mode" ]]; then
616			notice "No '--os-mode' parameter provided - using 'snapshot'"
617			os_mode="snapshot"
618		else
619			error "Invalid '--os-mode=$os_mode'"
620			return 1
621		fi
622	fi
623
624	local qemu_mask_param="VM_${vm_num}_qemu_mask"
625	local qemu_numa_node_param="VM_${vm_num}_qemu_numa_node"
626
627	if [[ -z "${!qemu_mask_param}" ]] || [[ -z "${!qemu_numa_node_param}" ]]; then
628		error "Parameters ${qemu_mask_param} or ${qemu_numa_node_param} not found in autotest.config file"
629		return 1
630	fi
631
632	local task_mask=${!qemu_mask_param}
633
634	notice "TASK MASK: $task_mask"
635	local cmd=(taskset -a -c "$task_mask" "$QEMU_BIN")
636	local vm_socket_offset=$((10000 + 100 * vm_num))
637
638	local ssh_socket=$((vm_socket_offset + 0))
639	local fio_socket=$((vm_socket_offset + 1))
640	local monitor_port=$((vm_socket_offset + 2))
641	local migration_port=$((vm_socket_offset + 3))
642	local gdbserver_socket=$((vm_socket_offset + 4))
643	local vnc_socket=$((100 + vm_num))
644	local qemu_pid_file="$vm_dir/qemu.pid"
645	local cpu_list
646	local cpu_num=0 queue_number=0
647
648	cpu_list=($(parse_cpu_list <(echo "$task_mask")))
649	cpu_num=${#cpu_list[@]} queue_number=$cpu_num
650
651	# Let's be paranoid about it
652	((cpu_num > 0 && queue_number > 0)) || return 1
653
654	# Normalize tcp ports to make sure they are available
655	ssh_socket=$(get_free_tcp_port "$ssh_socket")
656	fio_socket=$(get_free_tcp_port "$fio_socket")
657	monitor_port=$(get_free_tcp_port "$monitor_port")
658	migration_port=$(get_free_tcp_port "$migration_port")
659	gdbserver_socket=$(get_free_tcp_port "$gdbserver_socket")
660	vnc_socket=$(get_free_tcp_port "$vnc_socket")
661
662	xtrace_restore
663
664	local node_num=${!qemu_numa_node_param}
665	local boot_disk_present=false
666	notice "NUMA NODE: $node_num"
667	cmd+=(-m "$guest_memory" --enable-kvm -cpu host -smp "$cpu_num" -vga std -vnc ":$vnc_socket" -daemonize)
668	cmd+=(-object "memory-backend-file,id=mem,size=${guest_memory}M,mem-path=/dev/hugepages,share=on,prealloc=yes,host-nodes=$node_num,policy=bind")
669	[[ $os_mode == snapshot ]] && cmd+=(-snapshot)
670	[[ -n "$vm_incoming" ]] && cmd+=(-incoming "tcp:0:$migration_port")
671	cmd+=(-monitor "telnet:127.0.0.1:$monitor_port,server,nowait")
672	cmd+=(-numa "node,memdev=mem")
673	cmd+=(-pidfile "$qemu_pid_file")
674	cmd+=(-serial "file:$vm_dir/serial.log")
675	cmd+=(-D "$vm_dir/qemu.log")
676	cmd+=(-chardev "file,path=$vm_dir/seabios.log,id=seabios" -device "isa-debugcon,iobase=0x402,chardev=seabios")
677	cmd+=(-net "user,hostfwd=tcp::$ssh_socket-:22,hostfwd=tcp::$fio_socket-:8765")
678	cmd+=(-net nic)
679	if [[ -z "$boot_from" ]]; then
680		cmd+=(-drive "file=$os,if=none,id=os_disk")
681		cmd+=(-device "ide-hd,drive=os_disk,bootindex=0")
682	fi
683
684	if ((${#disks[@]} == 0)) && [[ $disk_type_g == virtio* ]]; then
685		disks=("default_virtio.img")
686	elif ((${#disks[@]} == 0)); then
687		error "No disks defined, aborting"
688		return 1
689	fi
690
691	for disk in "${disks[@]}"; do
692		# Each disk can define its type in a form of a disk_name,type. The remaining parts
693		# of the string are dropped.
694		IFS="," read -r disk disk_type _ <<< "$disk"
695		[[ -z $disk_type ]] && disk_type=$disk_type_g
696
697		case $disk_type in
698			virtio)
699				local raw_name="RAWSCSI"
700				local raw_disk=$vm_dir/test.img
701
702				# Create disk file if it not exist or it is smaller than 1G
703				if [[ -f $disk && $(stat --printf="%s" $disk) -ge $((1024 * 1024 * 1024)) ]]; then
704					raw_disk=$disk
705					notice "Using existing image $raw_disk"
706				else
707					notice "Creating Virtio disc $raw_disk"
708					dd if=/dev/zero of=$raw_disk bs=1024k count=1024
709				fi
710
711				cmd+=(-device "virtio-scsi-pci,num_queues=$queue_number")
712				cmd+=(-device "scsi-hd,drive=hd$i,vendor=$raw_name")
713				cmd+=(-drive "if=none,id=hd$i,file=$raw_disk,format=raw$raw_cache")
714				;;
715			spdk_vhost_scsi)
716				notice "using socket $vhost_dir/naa.$disk.$vm_num"
717				cmd+=(-chardev "socket,id=char_$disk,path=$vhost_dir/naa.$disk.$vm_num")
718				cmd+=(-device "vhost-user-scsi-pci,id=scsi_$disk,num_queues=$queue_number,chardev=char_$disk")
719				if [[ "$disk" == "$boot_from" ]]; then
720					cmd[-1]+=,bootindex=0
721					boot_disk_present=true
722				fi
723				;;
724			spdk_vhost_blk)
725				notice "using socket $vhost_dir/naa.$disk.$vm_num"
726				cmd+=(-chardev "socket,id=char_$disk,path=$vhost_dir/naa.$disk.$vm_num")
727				cmd+=(-device "vhost-user-blk-pci,num-queues=$queue_number,chardev=char_$disk")
728				if [[ "$disk" == "$boot_from" ]]; then
729					cmd[-1]+=,bootindex=0
730					boot_disk_present=true
731				fi
732
733				if $packed; then
734					check_qemu_packedring_support
735					notice "Enabling packed ring support for VM $vm_num, controller $vhost_dir/naa.$disk.$vm_num"
736					cmd[-1]+=,packed=on
737				fi
738				;;
739			kernel_vhost)
740				if [[ -z $disk ]]; then
741					error "need WWN for $disk_type"
742					return 1
743				elif [[ ! $disk =~ ^[[:alpha:]]{3}[.][[:xdigit:]]+$ ]]; then
744					error "$disk_type - disk(wnn)=$disk does not look like WNN number"
745					return 1
746				fi
747				notice "Using kernel vhost disk wwn=$disk"
748				cmd+=(-device "vhost-scsi-pci,wwpn=$disk,num_queues=$queue_number")
749				;;
750			vfio_user)
751				notice "using socket $VM_DIR/$vm_num/domain/muser$disk/$disk/cntrl"
752				cmd+=(-device "vfio-user-pci,x-msg-timeout=5000,socket=$VM_DIR/$vm_num/muser/domain/muser$disk/$disk/cntrl")
753				if [[ "$disk" == "$boot_from" ]]; then
754					cmd[-1]+=",bootindex=0"
755					boot_disk_present=true
756				fi
757				;;
758			vfio_user_virtio)
759				notice "using socket $VM_DIR/vfu_tgt/virtio.$disk"
760				cmd+=(-device "vfio-user-pci,x-msg-timeout=5000,socket=$VM_DIR/vfu_tgt/virtio.$disk")
761				if [[ "$disk" == "$boot_from" ]]; then
762					cmd[-1]+=",bootindex=0"
763					boot_disk_present=true
764				fi
765				;;
766			*)
767				error "unknown mode '$disk_type', use: virtio, spdk_vhost_scsi, spdk_vhost_blk, kernel_vhost, vfio_user or vfio_user_virtio"
768				return 1
769				;;
770		esac
771	done
772
773	if [[ -n $boot_from ]] && [[ $boot_disk_present == false ]]; then
774		error "Boot from $boot_from is selected but device is not present"
775		return 1
776	fi
777
778	((${#qemu_args[@]})) && cmd+=("${qemu_args[@]}")
779	notice "Saving to $vm_dir/run.sh"
780	cat <<- RUN > "$vm_dir/run.sh"
781		#!/bin/bash
782		shopt -s nullglob extglob
783		rootdir=$rootdir
784		source "\$rootdir/test/scheduler/common.sh"
785		qemu_log () {
786			echo "=== qemu.log ==="
787			[[ -s $vm_dir/qemu.log ]] && cat $vm_dir/qemu.log
788			echo "=== qemu.log ==="
789		}
790
791		if [[ \$EUID -ne 0 ]]; then
792			echo "Go away user come back as root"
793			exit 1
794		fi
795
796		trap "qemu_log" EXIT
797
798		qemu_cmd=($(printf '%s\n' "${cmd[@]}"))
799		chmod +r $vm_dir/*
800		echo "Running VM in $vm_dir"
801		rm -f $qemu_pid_file
802		cgroup=\$(get_cgroup \$$)
803		set_cgroup_attr_top_bottom \$$ cgroup.subtree_control "+cpuset"
804		create_cgroup \$cgroup/qemu.$vm_num
805		set_cgroup_attr "\$cgroup/qemu.$vm_num" cpuset.mems "$node_num"
806		set_cgroup_attr "\$cgroup/qemu.$vm_num" cpuset.cpus "$task_mask"
807		"\${qemu_cmd[@]}"
808
809		echo "Waiting for QEMU pid file"
810		sleep 1
811		[[ ! -f $qemu_pid_file ]] && sleep 1
812		[[ ! -f $qemu_pid_file ]] && echo "ERROR: no qemu pid file found" && exit 1
813		set_cgroup_attr "\$cgroup/qemu.$vm_num" cgroup.threads \$(< "$qemu_pid_file")
814		exit 0
815		# EOF
816	RUN
817	chmod +x $vm_dir/run.sh
818
819	# Save generated sockets redirection
820	echo $ssh_socket > $vm_dir/ssh_socket
821	echo $fio_socket > $vm_dir/fio_socket
822	echo $monitor_port > $vm_dir/monitor_port
823
824	rm -f $vm_dir/migration_port
825	[[ -z $vm_incoming ]] || echo $migration_port > $vm_dir/migration_port
826
827	echo $gdbserver_socket > $vm_dir/gdbserver_socket
828	echo $vnc_socket >> $vm_dir/vnc_socket
829
830	[[ -z $vm_incoming ]] || ln -fs $VM_DIR/$vm_incoming $vm_dir/vm_incoming
831	[[ -z $vm_migrate_to ]] || ln -fs $VM_DIR/$vm_migrate_to $vm_dir/vm_migrate_to
832}
833
834function vm_run() {
835	local OPTIND optchar vm
836	local run_all=false
837	local vms_to_run=""
838
839	while getopts 'a-:' optchar; do
840		case "$optchar" in
841			a) run_all=true ;;
842			*)
843				error "Unknown param $OPTARG"
844				return 1
845				;;
846		esac
847	done
848
849	if $run_all; then
850		vms_to_run="$(vm_list_all)"
851	else
852		shift $((OPTIND - 1))
853		for vm in "$@"; do
854			vm_num_is_valid $1 || return 1
855			if [[ ! -x $VM_DIR/$vm/run.sh ]]; then
856				error "VM$vm not defined - setup it first"
857				return 1
858			fi
859			vms_to_run+=" $vm"
860		done
861	fi
862
863	for vm in $vms_to_run; do
864		if vm_is_running $vm; then
865			warning "VM$vm ($VM_DIR/$vm) already running"
866			continue
867		fi
868
869		notice "running $VM_DIR/$vm/run.sh"
870		if ! $VM_DIR/$vm/run.sh; then
871			error "FAILED to run vm $vm"
872			return 1
873		fi
874	done
875}
876
877function vm_print_logs() {
878	vm_num=$1
879	warning "================"
880	warning "QEMU LOG:"
881	if [[ -r $VM_DIR/$vm_num/qemu.log ]]; then
882		cat $VM_DIR/$vm_num/qemu.log
883	else
884		warning "LOG qemu.log not found"
885	fi
886
887	warning "VM LOG:"
888	if [[ -r $VM_DIR/$vm_num/serial.log ]]; then
889		cat $VM_DIR/$vm_num/serial.log
890	else
891		warning "LOG serial.log not found"
892	fi
893
894	warning "SEABIOS LOG:"
895	if [[ -r $VM_DIR/$vm_num/seabios.log ]]; then
896		cat $VM_DIR/$vm_num/seabios.log
897	else
898		warning "LOG seabios.log not found"
899	fi
900	warning "================"
901}
902
903# Wait for all created VMs to boot.
904# param $1 max wait time
905function vm_wait_for_boot() {
906	assert_number $1
907
908	xtrace_disable
909
910	local all_booted=false
911	local timeout_time=$1
912	[[ $timeout_time -lt 10 ]] && timeout_time=10
913	local timeout_time
914	timeout_time=$(date -d "+$timeout_time seconds" +%s)
915
916	notice "Waiting for VMs to boot"
917	shift
918	if [[ "$*" == "" ]]; then
919		local vms_to_check="$VM_DIR/[0-9]*"
920	else
921		local vms_to_check=""
922		for vm in "$@"; do
923			vms_to_check+=" $VM_DIR/$vm"
924		done
925	fi
926
927	for vm in $vms_to_check; do
928		local vm_num
929		vm_num=$(basename $vm)
930		local i=0
931		notice "waiting for VM$vm_num ($vm)"
932		while ! vm_os_booted $vm_num; do
933			if ! vm_is_running $vm_num; then
934				warning "VM $vm_num is not running"
935				vm_print_logs $vm_num
936				xtrace_restore
937				return 1
938			fi
939
940			if [[ $(date +%s) -gt $timeout_time ]]; then
941				warning "timeout waiting for machines to boot"
942				vm_print_logs $vm_num
943				xtrace_restore
944				return 1
945			fi
946			if ((i > 30)); then
947				local i=0
948				echo
949			fi
950			echo -n "."
951			sleep 1
952		done
953		echo ""
954		notice "VM$vm_num ready"
955		#Change Timeout for stopping services to prevent lengthy powerdowns
956		#Check that remote system is not Cygwin in case of Windows VMs
957		local vm_os
958		vm_os=$(vm_exec $vm_num "uname -o")
959		if [[ "$vm_os" != "Cygwin" ]]; then
960			vm_exec $vm_num "echo 'DefaultTimeoutStopSec=10' >> /etc/systemd/system.conf; systemctl daemon-reexec"
961		fi
962	done
963
964	notice "all VMs ready"
965	xtrace_restore
966	return 0
967}
968
969function vm_start_fio_server() {
970	local OPTIND optchar
971	local readonly=''
972	local fio_bin=''
973	while getopts ':-:' optchar; do
974		case "$optchar" in
975			-)
976				case "$OPTARG" in
977					fio-bin=*) local fio_bin="${OPTARG#*=}" ;;
978					readonly) local readonly="--readonly" ;;
979					*) error "Invalid argument '$OPTARG'" && return 1 ;;
980				esac
981				;;
982			*) error "Invalid argument '$OPTARG'" && return 1 ;;
983		esac
984	done
985
986	shift $((OPTIND - 1))
987	for vm_num in "$@"; do
988		notice "Starting fio server on VM$vm_num"
989		if [[ $fio_bin != "" ]]; then
990			vm_exec $vm_num 'cat > /root/fio; chmod +x /root/fio' < $fio_bin
991			vm_exec $vm_num /root/fio $readonly --eta=never --server --daemonize=/root/fio.pid
992		else
993			vm_exec $vm_num fio $readonly --eta=never --server --daemonize=/root/fio.pid
994		fi
995	done
996}
997
998function vm_check_scsi_location() {
999	# Script to find wanted disc
1000	local script='shopt -s nullglob;
1001	for entry in /sys/block/sd*; do
1002		disk_type="$(cat $entry/device/vendor)";
1003		if [[ $disk_type == INTEL* ]] || [[ $disk_type == RAWSCSI* ]] || [[ $disk_type == LIO-ORG* ]]; then
1004			fname=$(basename $entry);
1005			echo -n " $fname";
1006		fi;
1007	done'
1008
1009	SCSI_DISK="$(echo "$script" | vm_exec $1 bash -s)"
1010
1011	if [[ -z "$SCSI_DISK" ]]; then
1012		error "no test disk found!"
1013		return 1
1014	fi
1015}
1016
1017# Script to perform scsi device reset on all disks in VM
1018# param $1 VM num
1019# param $2..$n Disks to perform reset on
1020function vm_reset_scsi_devices() {
1021	for disk in "${@:2}"; do
1022		notice "VM$1 Performing device reset on disk $disk"
1023		vm_exec $1 sg_reset /dev/$disk -vNd
1024	done
1025}
1026
1027function vm_check_blk_location() {
1028	local script='shopt -s nullglob; cd /sys/block; echo vd*'
1029	SCSI_DISK="$(echo "$script" | vm_exec $1 bash -s)"
1030
1031	if [[ -z "$SCSI_DISK" ]]; then
1032		error "no blk test disk found!"
1033		return 1
1034	fi
1035}
1036
1037function vm_check_nvme_location() {
1038	SCSI_DISK="$(vm_exec $1 "grep -l SPDK /sys/class/nvme/*/model" | awk -F/ '{print $5"n1"}')"
1039	if [[ -z "$SCSI_DISK" ]]; then
1040		error "no vfio-user nvme test disk found!"
1041		return 1
1042	fi
1043}
1044
1045function run_fio() {
1046	local arg
1047	local job_file=""
1048	local fio_bin=""
1049	local vms=()
1050	local out=""
1051	local vm
1052	local run_server_mode=true
1053	local run_plugin_mode=false
1054	local fio_start_cmd
1055	local fio_output_format="normal"
1056	local fio_gtod_reduce=false
1057	local wait_for_fio=true
1058
1059	for arg in "$@"; do
1060		case "$arg" in
1061			--job-file=*) local job_file="${arg#*=}" ;;
1062			--fio-bin=*) local fio_bin="${arg#*=}" ;;
1063			--vm=*) vms+=("${arg#*=}") ;;
1064			--out=*)
1065				local out="${arg#*=}"
1066				mkdir -p $out
1067				;;
1068			--local) run_server_mode=false ;;
1069			--plugin)
1070				notice "Using plugin mode. Disabling server mode."
1071				run_plugin_mode=true
1072				run_server_mode=false
1073				;;
1074			--json) fio_output_format="json" ;;
1075			--hide-results) hide_results=true ;;
1076			--no-wait-for-fio) wait_for_fio=false ;;
1077			--gtod-reduce) fio_gtod_reduce=true ;;
1078			*)
1079				error "Invalid argument '$arg'"
1080				return 1
1081				;;
1082		esac
1083	done
1084
1085	if [[ -n "$fio_bin" && ! -r "$fio_bin" ]]; then
1086		error "FIO binary '$fio_bin' does not exist"
1087		return 1
1088	fi
1089
1090	if [[ -z "$fio_bin" ]]; then
1091		fio_bin="fio"
1092	fi
1093
1094	if [[ ! -r "$job_file" ]]; then
1095		error "Fio job '$job_file' does not exist"
1096		return 1
1097	fi
1098
1099	fio_start_cmd="$fio_bin --eta=never "
1100
1101	local job_fname
1102	job_fname=$(basename "$job_file")
1103	log_fname="${job_fname%%.*}.log"
1104	fio_start_cmd+=" --output=$out/$log_fname --output-format=$fio_output_format "
1105
1106	# prepare job file for each VM
1107	for vm in "${vms[@]}"; do
1108		local vm_num=${vm%%:*}
1109		local vmdisks=${vm#*:}
1110
1111		sed "s@filename=@filename=$vmdisks@;s@description=\(.*\)@description=\1 (VM=$vm_num)@" "$job_file" \
1112			| vm_exec $vm_num "cat > /root/$job_fname"
1113
1114		if $fio_gtod_reduce; then
1115			vm_exec $vm_num "echo 'gtod_reduce=1' >> /root/$job_fname"
1116		fi
1117
1118		vm_exec $vm_num cat /root/$job_fname
1119
1120		if $run_server_mode; then
1121			fio_start_cmd+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/$job_fname "
1122		fi
1123
1124		if ! $run_server_mode; then
1125			if [[ -n "$fio_bin" ]]; then
1126				if ! $run_plugin_mode && [[ -e $fio_bin ]]; then
1127					vm_exec $vm_num 'cat > /root/fio; chmod +x /root/fio' < $fio_bin
1128					vm_fio_bin="/root/fio"
1129				else
1130					vm_fio_bin=$fio_bin
1131				fi
1132			fi
1133
1134			notice "Running local fio on VM $vm_num"
1135			vm_exec $vm_num "$vm_fio_bin --output=/root/$log_fname --output-format=$fio_output_format /root/$job_fname & echo \$! > /root/fio.pid" &
1136			vm_exec_pids+=("$!")
1137		fi
1138	done
1139
1140	if ! $run_server_mode; then
1141		if ! $wait_for_fio; then
1142			return 0
1143		fi
1144		echo "Waiting for guest fio instances to finish.."
1145		wait "${vm_exec_pids[@]}"
1146
1147		for vm in "${vms[@]}"; do
1148			local vm_num=${vm%%:*}
1149			vm_exec $vm_num cat /root/$log_fname > "$out/vm${vm_num}_${log_fname}"
1150		done
1151		return 0
1152	fi
1153
1154	$fio_start_cmd
1155	sleep 1
1156
1157	if [[ "$fio_output_format" == "json" ]]; then
1158		# Fio in client-server mode produces a lot of "trash" output
1159		# preceding JSON structure, making it not possible to parse.
1160		# Remove these lines from file.
1161		# shellcheck disable=SC2005
1162		echo "$(grep -vP '^[<\w]' "$out/$log_fname")" > "$out/$log_fname"
1163	fi
1164
1165	if [[ ! $hide_results ]]; then
1166		cat $out/$log_fname
1167	fi
1168}
1169
1170# Parsing fio results for json output and client-server mode only!
1171function parse_fio_results() {
1172	local fio_log_dir=$1
1173	local fio_log_filename=$2
1174	local fio_csv_filename
1175
1176	# Variables used in parsing loop
1177	local log_file
1178	local rwmode mixread mixwrite
1179	local lat_key lat_divisor
1180	local client_stats iops bw
1181	local read_avg_lat read_min_lat read_max_lat
1182	local write_avg_lat write_min_lat write_min_lat
1183	local clients
1184
1185	declare -A results
1186	results["iops"]=0
1187	results["bw"]=0
1188	results["avg_lat"]=0
1189	results["min_lat"]=0
1190	results["max_lat"]=0
1191
1192	# Loop using the log filename to see if there are any other
1193	# matching files. This is in case we ran fio test multiple times.
1194	log_files=("$fio_log_dir/$fio_log_filename"*)
1195	for log_file in "${log_files[@]}"; do
1196		# Save entire array to avoid opening $log_file multiple times
1197		clients=$(jq -r '.client_stats' "$log_file")
1198		[[ -n $clients ]]
1199		rwmode=$(jq -r '.[0]["job options"]["rw"]' <<< "$clients")
1200		mixread=1
1201		mixwrite=1
1202		if [[ $rwmode = *"rw"* ]]; then
1203			mixread=$(jq -r '.[0]["job options"]["rwmixread"]' <<< "$clients")
1204			mixread=$(bc -l <<< "scale=3; $mixread/100")
1205			mixwrite=$(bc -l <<< "scale=3; 1-$mixread")
1206		fi
1207
1208		client_stats=$(jq -r '.[] | select(.jobname == "All clients")' <<< "$clients")
1209		if [[ -z $client_stats ]]; then
1210			# Potentially single client (single VM)
1211			client_stats=$(jq -r '.[]' <<< "$clients")
1212		fi
1213
1214		# Check latency unit and later normalize to microseconds
1215		lat_key="lat_us"
1216		lat_divisor=1
1217		if jq -er '.read["lat_ns"]' &> /dev/null <<< $client_stats; then
1218			lat_key="lat_ns"
1219			lat_divisor=1000
1220		fi
1221
1222		# Horrific bash float point arithmetic operations below.
1223		# Viewer discretion is advised.
1224		iops=$(jq -r '[.read["iops"],.write["iops"]] | add' <<< $client_stats)
1225		bw=$(jq -r '[.read["bw"],.write["bw"]] | add' <<< $client_stats)
1226		read_avg_lat=$(jq -r --arg lat_key $lat_key '.read[$lat_key]["mean"]' <<< $client_stats)
1227		read_min_lat=$(jq -r --arg lat_key $lat_key '.read[$lat_key]["min"]' <<< $client_stats)
1228		read_max_lat=$(jq -r --arg lat_key $lat_key '.read[$lat_key]["max"]' <<< $client_stats)
1229		write_avg_lat=$(jq -r --arg lat_key $lat_key '.write[$lat_key]["mean"]' <<< $client_stats)
1230		write_min_lat=$(jq -r --arg lat_key $lat_key '.write[$lat_key]["min"]' <<< $client_stats)
1231		write_max_lat=$(jq -r --arg lat_key $lat_key '.write[$lat_key]["max"]' <<< $client_stats)
1232
1233		results["iops"]=$(bc -l <<< "${results[iops]} + $iops")
1234		results["bw"]=$(bc -l <<< "${results[bw]} + $bw")
1235		results["avg_lat"]=$(bc -l <<< "${results[avg_lat]} + ($mixread*$read_avg_lat + $mixwrite*$write_avg_lat)/$lat_divisor")
1236		results["min_lat"]=$(bc -l <<< "${results[min_lat]} + ($mixread*$read_min_lat + $mixwrite*$write_min_lat)/$lat_divisor")
1237		results["max_lat"]=$(bc -l <<< "${results[max_lat]} + ($mixread*$read_max_lat + $mixwrite*$write_max_lat)/$lat_divisor")
1238	done
1239
1240	results["iops"]=$(bc -l <<< "scale=3; ${results[iops]} / ${#log_files[@]}")
1241	results["bw"]=$(bc -l <<< "scale=3; ${results[bw]} / ${#log_files[@]}")
1242	results["avg_lat"]=$(bc -l <<< "scale=3; ${results[avg_lat]} / ${#log_files[@]}")
1243	results["min_lat"]=$(bc -l <<< "scale=3; ${results[min_lat]} / ${#log_files[@]}")
1244	results["max_lat"]=$(bc -l <<< "scale=3; ${results[max_lat]} / ${#log_files[@]}")
1245
1246	fio_csv_filename="${fio_log_filename%%.*}.csv"
1247	cat <<- EOF > "$fio_log_dir/$fio_csv_filename"
1248		iops,bw,avg_lat,min_lat,max_lat
1249		${results["iops"]},${results["bw"]},${results["avg_lat"]},${results["min_lat"]},${results["max_lat"]}
1250	EOF
1251}
1252
1253# Shutdown or kill any running VM and SPDK APP.
1254#
1255function at_app_exit() {
1256	local vhost_name
1257
1258	notice "APP EXITING"
1259	notice "killing all VMs"
1260	vm_kill_all
1261	# Kill vhost application
1262	notice "killing vhost app"
1263
1264	for vhost_name in "$TARGET_DIR"/*; do
1265		vhost_kill "$(basename "$vhost_name")"
1266	done
1267
1268	notice "EXIT DONE"
1269}
1270
1271function error_exit() {
1272	trap - ERR
1273	print_backtrace
1274	set +e
1275	error "Error on $1 $2"
1276
1277	at_app_exit
1278	exit 1
1279}
1280
1281function lookup_dev_irqs() {
1282	local vm=$1 irqs=() cpus=()
1283	local script_get_irqs script_get_cpus
1284
1285	mkdir -p "$VHOST_DIR/irqs"
1286
1287	# All vhost tests depend either on virtio_blk or virtio_scsi drivers on the VM side.
1288	# Considering that, simply iterate over virtio bus and pick pci device corresponding
1289	# to each virtio device.
1290	# For vfio-user setup, look for bare nvme devices.
1291
1292	script_get_irqs=$(
1293		cat <<- 'SCRIPT'
1294			shopt -s nullglob
1295			for virtio in /sys/bus/virtio/devices/virtio*; do
1296			  irqs+=("$(readlink -f "$virtio")/../msi_irqs/"*)
1297			done
1298			irqs+=(/sys/class/nvme/nvme*/device/msi_irqs/*)
1299			printf '%u\n' "${irqs[@]##*/}"
1300		SCRIPT
1301	)
1302
1303	script_get_cpus=$(
1304		cat <<- 'SCRIPT'
1305			cpus=(/sys/devices/system/cpu/cpu[0-9]*)
1306			printf '%u\n' "${cpus[@]##*cpu}"
1307		SCRIPT
1308	)
1309
1310	irqs=($(vm_exec "$vm" "$script_get_irqs"))
1311	cpus=($(vm_exec "$vm" "$script_get_cpus"))
1312	((${#irqs[@]} > 0 && ${#cpus[@]} > 0))
1313
1314	printf '%u\n' "${irqs[@]}" > "$VHOST_DIR/irqs/$vm.irqs"
1315	printf '%u\n' "${cpus[@]}" > "$VHOST_DIR/irqs/$vm.cpus"
1316}
1317
1318function irqs() {
1319	local vm
1320	for vm; do
1321		vm_exec "$vm" "while :; do cat /proc/interrupts; sleep 1s; done" > "$VHOST_DIR/irqs/$vm.interrupts" &
1322		irqs_pids+=($!)
1323	done
1324}
1325
1326function parse_irqs() {
1327	local iter=${1:-1}
1328	"$rootdir/test/vhost/parse_irqs.sh" "$VHOST_DIR/irqs/"*.interrupts
1329	rm "$VHOST_DIR/irqs/"*.interrupts
1330
1331	mkdir -p "$VHOST_DIR/irqs/$iter"
1332	mv "$VHOST_DIR/irqs/"*.parsed "$VHOST_DIR/irqs/$iter/"
1333}
1334
1335function collect_perf() {
1336	local cpus=$1 outf=$2 runtime=$3 delay=$4
1337
1338	mkdir -p "$VHOST_DIR/perf"
1339
1340	perf record -g \
1341		${cpus:+-C "$cpus"} \
1342		${outf:+-o "$outf"} \
1343		${delay:+-D $((delay * 1000))} \
1344		-z \
1345		${runtime:+ -- sleep $((runtime + delay))}
1346}
1347
1348function parse_perf() {
1349	local iter=${1:-1}
1350	local report out
1351
1352	mkdir -p "$VHOST_DIR/perf/$iter"
1353	shift
1354
1355	for report in "$@" "$VHOST_DIR/perf/"*.perf; do
1356		[[ -f $report ]] || continue
1357		perf report \
1358			-n \
1359			-i "$report" \
1360			--header \
1361			--stdio > "$VHOST_DIR/perf/$iter/${report##*/}.parsed"
1362		cp "$report" "$VHOST_DIR/perf/$iter/"
1363	done
1364	rm "$VHOST_DIR/perf/"*.perf
1365}
1366
1367function get_from_fio() {
1368	local opt=$1 conf=$2
1369
1370	[[ -n $opt && -f $conf ]] || return 1
1371
1372	awk -F= "/^$opt/{print \$2}" "$conf"
1373}
1374
1375function get_free_tcp_port() {
1376	local port=$1 to=${2:-1} sockets=()
1377
1378	mapfile -t sockets < /proc/net/tcp
1379
1380	# If there's a TCP socket in a listening state keep incrementing $port until
1381	# we find one that's not used. $to determines how long should we look for:
1382	#  0: don't increment, just check if given $port is in use
1383	# >0: increment $to times
1384	# <0: no increment limit
1385
1386	while [[ ${sockets[*]} == *":$(printf '%04X' "$port") 00000000:0000 0A"* ]]; do
1387		((to-- && ++port <= 65535)) || return 1
1388	done
1389
1390	echo "$port"
1391}
1392
1393function gen_cpu_vm_spdk_config() (
1394	local vm_count=$1 vm_cpu_num=$2 vm
1395	local spdk_cpu_num=${3:-1} spdk_cpu_list=${4:-} spdk_cpus
1396	local nodes=("${@:5}") node
1397	local env
1398
1399	spdk_cpus=spdk_cpu_num
1400	[[ -n $spdk_cpu_list ]] && spdk_cpus=spdk_cpu_list
1401
1402	if ((${#nodes[@]} > 0)); then
1403		((${#nodes[@]} == 1)) && node=${nodes[0]}
1404		for ((vm = 0; vm < vm_count; vm++)); do
1405			env+=("VM${vm}_NODE=${nodes[vm]:-$node}")
1406		done
1407	fi
1408
1409	env+=("$spdk_cpus=${!spdk_cpus}")
1410	env+=("vm_count=$vm_count")
1411	env+=("vm_cpu_num=$vm_cpu_num")
1412
1413	export "${env[@]}"
1414
1415	"$rootdir/scripts/perf/vhost/conf-generator" -p cpu
1416)
1417