xref: /spdk/test/vhost/common.sh (revision a711f44526029d628241f7d38c7113bf4dfda279)
1#  SPDX-License-Identifier: BSD-3-Clause
2#  Copyright (C) 2017 Intel Corporation
3#  All rights reserved.
4#
5
6: ${SPDK_VHOST_VERBOSE=false}
7: ${VHOST_DIR="$HOME/vhost_test"}
8: ${QEMU_BIN:="qemu-system-x86_64"}
9: ${QEMU_IMG_BIN="qemu-img"}
10
11TEST_DIR=$(readlink -f $rootdir/..)
12VM_DIR=$VHOST_DIR/vms
13TARGET_DIR=$VHOST_DIR/vhost
14VM_PASSWORD="root"
15
16VM_IMAGE=${VM_IMAGE:-"$DEPENDENCY_DIR/vhost/spdk_test_image.qcow2"}
17FIO_BIN=${FIO_BIN:-}
18
19WORKDIR=$(readlink -f "$(dirname "$0")")
20
21if ! hash $QEMU_IMG_BIN $QEMU_BIN; then
22	echo 'ERROR: QEMU is not installed on this system. Unable to run vhost tests.' >&2
23	return 1
24fi
25
26mkdir -p $VHOST_DIR
27mkdir -p $VM_DIR
28mkdir -p $TARGET_DIR
29
30#
31# Source config describing QEMU and VHOST cores and NUMA
32#
33source $rootdir/test/vhost/common/autotest.config
34source "$rootdir/test/scheduler/common.sh"
35
36function vhosttestinit() {
37	if [ "$TEST_MODE" == "iso" ]; then
38		$rootdir/scripts/setup.sh
39	fi
40
41	if [[ -e $VM_IMAGE.gz && ! -e $VM_IMAGE ]]; then
42		gzip -dc "$VM_IMAGE.gz" > "$VM_IMAGE"
43	fi
44
45	# Look for the VM image
46	if [[ ! -f $VM_IMAGE ]]; then
47		[[ $1 != "--no_vm" ]] || return 0
48		echo "$VM_IMAGE is missing" >&2
49		return 1
50	fi
51}
52
53function vhosttestfini() {
54	if [ "$TEST_MODE" == "iso" ]; then
55		$rootdir/scripts/setup.sh reset
56	fi
57}
58
59function message() {
60	local verbose_out
61	if ! $SPDK_VHOST_VERBOSE; then
62		verbose_out=""
63	elif [[ ${FUNCNAME[2]} == "source" ]]; then
64		verbose_out=" (file $(basename ${BASH_SOURCE[1]}):${BASH_LINENO[1]})"
65	else
66		verbose_out=" (function ${FUNCNAME[2]}:${BASH_LINENO[1]})"
67	fi
68
69	local msg_type="$1"
70	shift
71	echo -e "${msg_type}${verbose_out}: $*"
72}
73
74function fail() {
75	echo "===========" >&2
76	message "FAIL" "$@" >&2
77	echo "===========" >&2
78	exit 1
79}
80
81function error() {
82	echo "===========" >&2
83	message "ERROR" "$@" >&2
84	echo "===========" >&2
85	# Don't 'return 1' since the stack trace will be incomplete (why?) missing upper command.
86	false
87}
88
89function warning() {
90	message "WARN" "$@" >&2
91}
92
93function notice() {
94	message "INFO" "$@"
95}
96
97function check_qemu_packedring_support() {
98	qemu_version=$($QEMU_BIN -version | grep -Po "(?<=version )\d+.\d+.\d+")
99	if [[ "$qemu_version" < "4.2.0" ]]; then
100		error "This qemu binary does not support packed ring"
101	fi
102}
103
104function get_vhost_dir() {
105	local vhost_name="$1"
106
107	if [[ -z "$vhost_name" ]]; then
108		error "vhost name must be provided to get_vhost_dir"
109		return 1
110	fi
111
112	echo "$TARGET_DIR/${vhost_name}"
113}
114
115function vhost_run() {
116	local OPTIND
117	local vhost_name
118	local run_gen_nvme=true
119	local vhost_bin="vhost"
120	local vhost_args=()
121	local cmd=()
122
123	while getopts "n:b:g" optchar; do
124		case "$optchar" in
125			n) vhost_name="$OPTARG" ;;
126			b) vhost_bin="$OPTARG" ;;
127			g)
128				run_gen_nvme=false
129				notice "Skipping gen_nvme.sh NVMe bdev configuration"
130				;;
131			*)
132				error "Unknown param $optchar"
133				return 1
134				;;
135		esac
136	done
137	shift $((OPTIND - 1))
138
139	vhost_args=("$@")
140
141	if [[ -z "$vhost_name" ]]; then
142		error "vhost name must be provided to vhost_run"
143		return 1
144	fi
145
146	local vhost_dir
147	vhost_dir="$(get_vhost_dir $vhost_name)"
148	local vhost_app="$SPDK_BIN_DIR/$vhost_bin"
149	local vhost_log_file="$vhost_dir/vhost.log"
150	local vhost_pid_file="$vhost_dir/vhost.pid"
151	local vhost_socket="$vhost_dir/usvhost"
152	notice "starting vhost app in background"
153	[[ -r "$vhost_pid_file" ]] && vhost_kill $vhost_name
154	[[ -d $vhost_dir ]] && rm -f $vhost_dir/*
155	mkdir -p $vhost_dir
156
157	if [[ ! -x $vhost_app ]]; then
158		error "application not found: $vhost_app"
159		return 1
160	fi
161
162	cmd=("$vhost_app" "-r" "$vhost_dir/rpc.sock" "${vhost_args[@]}")
163	if [[ "$vhost_bin" =~ vhost ]]; then
164		cmd+=(-S "$vhost_dir")
165	fi
166
167	notice "Logging to:   $vhost_log_file"
168	notice "Socket:      $vhost_socket"
169	notice "Command:     ${cmd[*]}"
170
171	timing_enter vhost_start
172
173	iobuf_small_count=${iobuf_small_count:-16383}
174	iobuf_large_count=${iobuf_large_count:-2047}
175
176	"${cmd[@]}" --wait-for-rpc &
177	vhost_pid=$!
178	echo $vhost_pid > $vhost_pid_file
179
180	notice "waiting for app to run..."
181	waitforlisten "$vhost_pid" "$vhost_dir/rpc.sock"
182
183	"$rootdir/scripts/rpc.py" -s "$vhost_dir/rpc.sock" \
184		iobuf_set_options \
185		--small-pool-count="$iobuf_small_count" \
186		--large-pool-count="$iobuf_large_count"
187
188	"$rootdir/scripts/rpc.py" -s "$vhost_dir/rpc.sock" \
189		framework_start_init
190
191	#do not generate nvmes if pci access is disabled
192	if [[ "${cmd[*]}" != *"--no-pci"* ]] && [[ "${cmd[*]}" != *"-u"* ]] && $run_gen_nvme; then
193		$rootdir/scripts/gen_nvme.sh | $rootdir/scripts/rpc.py -s $vhost_dir/rpc.sock load_subsystem_config
194	fi
195
196	notice "vhost started - pid=$vhost_pid"
197
198	timing_exit vhost_start
199}
200
201function vhost_kill() {
202	local rc=0
203	local vhost_name="$1"
204
205	if [[ -z "$vhost_name" ]]; then
206		error "Must provide vhost name to vhost_kill"
207		return 0
208	fi
209
210	local vhost_dir
211	vhost_dir="$(get_vhost_dir $vhost_name)"
212	local vhost_pid_file="$vhost_dir/vhost.pid"
213
214	if [[ ! -r $vhost_pid_file ]]; then
215		warning "no vhost pid file found"
216		return 0
217	fi
218
219	timing_enter vhost_kill
220	local vhost_pid
221	vhost_pid="$(cat $vhost_pid_file)"
222	notice "killing vhost (PID $vhost_pid) app"
223
224	if kill -INT $vhost_pid > /dev/null; then
225		notice "sent SIGINT to vhost app - waiting 60 seconds to exit"
226		for ((i = 0; i < 60; i++)); do
227			if kill -0 $vhost_pid; then
228				echo "."
229				sleep 1
230			else
231				break
232			fi
233		done
234		if kill -0 $vhost_pid; then
235			error "ERROR: vhost was NOT killed - sending SIGABRT"
236			kill -ABRT $vhost_pid
237			rc=1
238		else
239			while kill -0 $vhost_pid; do
240				echo "."
241			done
242		fi
243		# If this PID is our child, we should attempt to verify its status
244		# to catch any "silent" crashes that may happen upon termination.
245		if is_pid_child "$vhost_pid"; then
246			notice "Checking status of $vhost_pid"
247			wait "$vhost_pid" || rc=1
248		fi
249
250	elif kill -0 $vhost_pid; then
251		error "vhost NOT killed - you need to kill it manually"
252		rc=1
253	else
254		notice "vhost was not running"
255	fi
256
257	timing_exit vhost_kill
258
259	rm -rf "$vhost_dir"
260
261	return $rc
262}
263
264function vhost_rpc() {
265	local vhost_name="$1"
266
267	if [[ -z "$vhost_name" ]]; then
268		error "vhost name must be provided to vhost_rpc"
269		return 1
270	fi
271	shift
272
273	$rootdir/scripts/rpc.py -s $(get_vhost_dir $vhost_name)/rpc.sock "$@"
274}
275
276###
277# Mgmt functions
278###
279
280function assert_number() {
281	[[ "$1" =~ [0-9]+ ]] && return 0
282
283	error "Invalid or missing parameter: need number but got '$1'"
284	return 1
285}
286
287# Run command on vm with given password
288# First argument - vm number
289# Second argument - ssh password for vm
290#
291function vm_sshpass() {
292	vm_num_is_valid $1 || return 1
293
294	local ssh_cmd
295	ssh_cmd="sshpass -p $2 ssh \
296		-o UserKnownHostsFile=/dev/null \
297		-o StrictHostKeyChecking=no \
298		-o User=root \
299		-p $(vm_ssh_socket $1) $VM_SSH_OPTIONS 127.0.0.1"
300
301	shift 2
302	$ssh_cmd "$@"
303}
304
305# Helper to validate VM number
306# param $1 VM number
307#
308function vm_num_is_valid() {
309	[[ "$1" =~ ^[0-9]+$ ]] && return 0
310
311	error "Invalid or missing parameter: vm number '$1'"
312	return 1
313}
314
315# Print network socket for given VM number
316# param $1 virtual machine number
317#
318function vm_ssh_socket() {
319	vm_num_is_valid $1 || return 1
320	local vm_dir="$VM_DIR/$1"
321
322	cat $vm_dir/ssh_socket
323}
324
325function vm_fio_socket() {
326	vm_num_is_valid $1 || return 1
327	local vm_dir="$VM_DIR/$1"
328
329	cat $vm_dir/fio_socket
330}
331
332# Execute command on given VM
333# param $1 virtual machine number
334#
335function vm_exec() {
336	vm_num_is_valid $1 || return 1
337
338	local vm_num="$1"
339	shift
340
341	sshpass -p "$VM_PASSWORD" ssh \
342		-o UserKnownHostsFile=/dev/null \
343		-o StrictHostKeyChecking=no \
344		-o User=root \
345		-p $(vm_ssh_socket $vm_num) $VM_SSH_OPTIONS 127.0.0.1 \
346		"$@"
347}
348
349# Execute scp command on given VM
350# param $1 virtual machine number
351#
352function vm_scp() {
353	vm_num_is_valid $1 || return 1
354
355	local vm_num="$1"
356	shift
357
358	sshpass -p "$VM_PASSWORD" scp \
359		-o UserKnownHostsFile=/dev/null \
360		-o StrictHostKeyChecking=no \
361		-o User=root \
362		-P $(vm_ssh_socket $vm_num) $VM_SSH_OPTIONS \
363		"$@"
364}
365
366# check if specified VM is running
367# param $1 VM num
368function vm_is_running() {
369	vm_num_is_valid $1 || return 1
370	local vm_dir="$VM_DIR/$1"
371
372	if [[ ! -r $vm_dir/qemu.pid ]]; then
373		return 1
374	fi
375
376	local vm_pid
377	vm_pid="$(cat $vm_dir/qemu.pid)"
378
379	if /bin/kill -0 $vm_pid; then
380		return 0
381	else
382		if [[ $EUID -ne 0 ]]; then
383			warning "not root - assuming VM running since can't be checked"
384			return 0
385		fi
386
387		# not running - remove pid file
388		rm -f $vm_dir/qemu.pid
389		return 1
390	fi
391}
392
393# check if specified VM is running
394# param $1 VM num
395function vm_os_booted() {
396	vm_num_is_valid $1 || return 1
397	local vm_dir="$VM_DIR/$1"
398
399	if [[ ! -r $vm_dir/qemu.pid ]]; then
400		error "VM $1 is not running"
401		return 1
402	fi
403
404	if ! VM_SSH_OPTIONS="-o ControlMaster=no" vm_exec $1 "true" 2> /dev/null; then
405		# Shutdown existing master. Ignore errors as it might not exist.
406		VM_SSH_OPTIONS="-O exit" vm_exec $1 "true" 2> /dev/null
407		return 1
408	fi
409
410	return 0
411}
412
413# Shutdown given VM
414# param $1 virtual machine number
415# return non-zero in case of error.
416function vm_shutdown() {
417	vm_num_is_valid $1 || return 1
418	local vm_dir="$VM_DIR/$1"
419	if [[ ! -d "$vm_dir" ]]; then
420		error "VM$1 ($vm_dir) not exist - setup it first"
421		return 1
422	fi
423
424	if ! vm_is_running $1; then
425		notice "VM$1 ($vm_dir) is not running"
426		return 0
427	fi
428
429	# Temporarily disabling exit flag for next ssh command, since it will
430	# "fail" due to shutdown
431	notice "Shutting down virtual machine $vm_dir"
432	set +e
433	vm_exec $1 "nohup sh -c 'shutdown -h -P now'" || true
434	notice "VM$1 is shutting down - wait a while to complete"
435	set -e
436}
437
438# Kill given VM
439# param $1 virtual machine number
440#
441function vm_kill() {
442	vm_num_is_valid $1 || return 1
443	local vm_dir="$VM_DIR/$1"
444
445	if [[ ! -r $vm_dir/qemu.pid ]]; then
446		return 0
447	fi
448
449	local vm_pid
450	vm_pid="$(cat $vm_dir/qemu.pid)"
451
452	notice "Killing virtual machine $vm_dir (pid=$vm_pid)"
453	# First kill should fail, second one must fail
454	if /bin/kill $vm_pid; then
455		notice "process $vm_pid killed"
456		rm -rf $vm_dir
457	elif vm_is_running $1; then
458		error "Process $vm_pid NOT killed"
459		return 1
460	fi
461}
462
463# List all VM numbers in VM_DIR
464#
465function vm_list_all() {
466	local vms=()
467	vms=("$VM_DIR"/+([0-9]))
468	if ((${#vms[@]} > 0)); then
469		basename --multiple "${vms[@]}"
470	fi
471}
472
473# Kills all VM in $VM_DIR
474#
475function vm_kill_all() {
476	local vm
477	for vm in $(vm_list_all); do
478		vm_kill $vm
479	done
480
481	rm -rf $VM_DIR
482}
483
484# Shutdown all VM in $VM_DIR
485#
486function vm_shutdown_all() {
487	local timeo=${1:-90} vms vm
488
489	vms=($(vm_list_all))
490
491	for vm in "${vms[@]}"; do
492		vm_shutdown "$vm"
493	done
494
495	notice "Waiting for VMs to shutdown..."
496	while ((timeo-- > 0 && ${#vms[@]} > 0)); do
497		for vm in "${!vms[@]}"; do
498			vm_is_running "${vms[vm]}" || unset -v "vms[vm]"
499		done
500		sleep 1
501	done
502
503	if ((${#vms[@]} == 0)); then
504		notice "All VMs successfully shut down"
505		return 0
506	fi
507
508	warning "Not all VMs were shut down. Leftovers: ${vms[*]}"
509
510	for vm in "${vms[@]}"; do
511		vm_print_logs "$vm"
512	done
513
514	return 1
515}
516
517function vm_setup() {
518	xtrace_disable
519	local OPTIND optchar vm_num
520
521	local os=""
522	local os_mode=""
523	local qemu_args=()
524	local disk_type_g=NOT_DEFINED
525	local read_only="false"
526	# List created of a strings separated with a ":"
527	local disks=()
528	local raw_cache=""
529	local vm_incoming=""
530	local vm_migrate_to=""
531	local force_vm=""
532	local guest_memory=1024
533	local vhost_dir
534	local packed=false
535	vhost_dir="$(get_vhost_dir 0)"
536	while getopts ':-:' optchar; do
537		case "$optchar" in
538			-)
539				case "$OPTARG" in
540					os=*) os="${OPTARG#*=}" ;;
541					os-mode=*) os_mode="${OPTARG#*=}" ;;
542					qemu-args=*) qemu_args+=("${OPTARG#*=}") ;;
543					disk-type=*) disk_type_g="${OPTARG#*=}" ;;
544					read-only=*) read_only="${OPTARG#*=}" ;;
545					disks=*) IFS=":" read -ra disks <<< "${OPTARG#*=}" ;;
546					raw-cache=*) raw_cache=",cache${OPTARG#*=}" ;;
547					force=*) force_vm=${OPTARG#*=} ;;
548					memory=*) guest_memory=${OPTARG#*=} ;;
549					incoming=*) vm_incoming="${OPTARG#*=}" ;;
550					migrate-to=*) vm_migrate_to="${OPTARG#*=}" ;;
551					vhost-name=*) vhost_dir="$(get_vhost_dir ${OPTARG#*=})" ;;
552					spdk-boot=*) local boot_from="${OPTARG#*=}" ;;
553					packed) packed=true ;;
554					*)
555						error "unknown argument $OPTARG"
556						return 1
557						;;
558				esac
559				;;
560			*)
561				error "vm_create Unknown param $OPTARG"
562				return 1
563				;;
564		esac
565	done
566
567	# Find next directory we can use
568	if [[ -n $force_vm ]]; then
569		vm_num=$force_vm
570
571		vm_num_is_valid $vm_num || return 1
572		local vm_dir="$VM_DIR/$vm_num"
573		[[ -d $vm_dir ]] && warning "removing existing VM in '$vm_dir'"
574	else
575		local vm_dir=""
576
577		set +x
578		for ((i = 0; i <= 256; i++)); do
579			local vm_dir="$VM_DIR/$i"
580			[[ ! -d $vm_dir ]] && break
581		done
582		xtrace_restore
583
584		vm_num=$i
585	fi
586
587	if [[ $vm_num -eq 256 ]]; then
588		error "no free VM found. do some cleanup (256 VMs created, are you insane?)"
589		return 1
590	fi
591
592	if [[ -n "$vm_migrate_to" && -n "$vm_incoming" ]]; then
593		error "'--incoming' and '--migrate-to' cannot be used together"
594		return 1
595	elif [[ -n "$vm_incoming" ]]; then
596		if [[ -n "$os_mode" || -n "$os" ]]; then
597			error "'--incoming' can't be used together with '--os' nor '--os-mode'"
598			return 1
599		fi
600
601		os_mode="original"
602		os="$VM_DIR/$vm_incoming/os.qcow2"
603	elif [[ -n "$vm_migrate_to" ]]; then
604		[[ "$os_mode" != "backing" ]] && warning "Using 'backing' mode for OS since '--migrate-to' is used"
605		os_mode=backing
606	fi
607
608	notice "Creating new VM in $vm_dir"
609	mkdir -p $vm_dir
610
611	if [[ "$os_mode" == "backing" ]]; then
612		notice "Creating backing file for OS image file: $os"
613		if ! $QEMU_IMG_BIN create -f qcow2 -b $os $vm_dir/os.qcow2 -F qcow2; then
614			error "Failed to create OS backing file in '$vm_dir/os.qcow2' using '$os'"
615			return 1
616		fi
617
618		local os=$vm_dir/os.qcow2
619	elif [[ "$os_mode" == "original" ]]; then
620		warning "Using original OS image file: $os"
621	elif [[ "$os_mode" != "snapshot" ]]; then
622		if [[ -z "$os_mode" ]]; then
623			notice "No '--os-mode' parameter provided - using 'snapshot'"
624			os_mode="snapshot"
625		else
626			error "Invalid '--os-mode=$os_mode'"
627			return 1
628		fi
629	fi
630
631	local qemu_mask_param="VM_${vm_num}_qemu_mask"
632	local qemu_numa_node_param="VM_${vm_num}_qemu_numa_node"
633
634	if [[ -z "${!qemu_mask_param}" ]] || [[ -z "${!qemu_numa_node_param}" ]]; then
635		error "Parameters ${qemu_mask_param} or ${qemu_numa_node_param} not found in autotest.config file"
636		return 1
637	fi
638
639	local task_mask=${!qemu_mask_param}
640
641	notice "TASK MASK: $task_mask"
642	local cmd=(taskset -a -c "$task_mask" "$QEMU_BIN")
643	local vm_socket_offset=$((10000 + 100 * vm_num))
644
645	local ssh_socket=$((vm_socket_offset + 0))
646	local fio_socket=$((vm_socket_offset + 1))
647	local monitor_port=$((vm_socket_offset + 2))
648	local migration_port=$((vm_socket_offset + 3))
649	local gdbserver_socket=$((vm_socket_offset + 4))
650	local vnc_socket=$((100 + vm_num))
651	local qemu_pid_file="$vm_dir/qemu.pid"
652	local cpu_list
653	local cpu_num=0 queue_number=0
654
655	cpu_list=($(parse_cpu_list <(echo "$task_mask")))
656	cpu_num=${#cpu_list[@]} queue_number=$cpu_num
657
658	# Let's be paranoid about it
659	((cpu_num > 0 && queue_number > 0)) || return 1
660
661	# Normalize tcp ports to make sure they are available
662	ssh_socket=$(get_free_tcp_port "$ssh_socket")
663	fio_socket=$(get_free_tcp_port "$fio_socket")
664	monitor_port=$(get_free_tcp_port "$monitor_port")
665	migration_port=$(get_free_tcp_port "$migration_port")
666	gdbserver_socket=$(get_free_tcp_port "$gdbserver_socket")
667	vnc_socket=$(get_free_tcp_port "$vnc_socket")
668
669	xtrace_restore
670
671	local node_num=${!qemu_numa_node_param}
672	local boot_disk_present=false
673	notice "NUMA NODE: $node_num"
674	cmd+=(-m "$guest_memory" --enable-kvm -cpu host -smp "$cpu_num" -vga std -vnc ":$vnc_socket" -daemonize)
675	cmd+=(-object "memory-backend-file,id=mem,size=${guest_memory}M,mem-path=/dev/hugepages,share=on,prealloc=yes,host-nodes=$node_num,policy=bind")
676	[[ $os_mode == snapshot ]] && cmd+=(-snapshot)
677	[[ -n "$vm_incoming" ]] && cmd+=(-incoming "tcp:0:$migration_port")
678	cmd+=(-monitor "telnet:127.0.0.1:$monitor_port,server,nowait")
679	cmd+=(-numa "node,memdev=mem")
680	cmd+=(-pidfile "$qemu_pid_file")
681	cmd+=(-serial "file:$vm_dir/serial.log")
682	cmd+=(-D "$vm_dir/qemu.log")
683	cmd+=(-chardev "file,path=$vm_dir/seabios.log,id=seabios" -device "isa-debugcon,iobase=0x402,chardev=seabios")
684	cmd+=(-net "user,hostfwd=tcp::$ssh_socket-:22,hostfwd=tcp::$fio_socket-:8765")
685	cmd+=(-net nic)
686	if [[ -z "$boot_from" ]]; then
687		cmd+=(-drive "file=$os,if=none,id=os_disk")
688		cmd+=(-device "ide-hd,drive=os_disk,bootindex=0")
689	fi
690
691	if ((${#disks[@]} == 0)) && [[ $disk_type_g == virtio* ]]; then
692		disks=("default_virtio.img")
693	elif ((${#disks[@]} == 0)); then
694		error "No disks defined, aborting"
695		return 1
696	fi
697
698	for disk in "${disks[@]}"; do
699		# Each disk can define its type in a form of a disk_name,type. The remaining parts
700		# of the string are dropped.
701		IFS="," read -r disk disk_type _ <<< "$disk"
702		[[ -z $disk_type ]] && disk_type=$disk_type_g
703
704		case $disk_type in
705			virtio)
706				local raw_name="RAWSCSI"
707				local raw_disk=$vm_dir/test.img
708
709				# Create disk file if it not exist or it is smaller than 1G
710				if [[ -f $disk && $(stat --printf="%s" $disk) -ge $((1024 * 1024 * 1024)) ]]; then
711					raw_disk=$disk
712					notice "Using existing image $raw_disk"
713				else
714					notice "Creating Virtio disc $raw_disk"
715					dd if=/dev/zero of=$raw_disk bs=1024k count=1024
716				fi
717
718				cmd+=(-device "virtio-scsi-pci,num_queues=$queue_number")
719				cmd+=(-device "scsi-hd,drive=hd$i,vendor=$raw_name")
720				cmd+=(-drive "if=none,id=hd$i,file=$raw_disk,format=raw$raw_cache")
721				;;
722			spdk_vhost_scsi)
723				notice "using socket $vhost_dir/naa.$disk.$vm_num"
724				cmd+=(-chardev "socket,id=char_$disk,path=$vhost_dir/naa.$disk.$vm_num")
725				cmd+=(-device "vhost-user-scsi-pci,id=scsi_$disk,num_queues=$queue_number,chardev=char_$disk")
726				if [[ "$disk" == "$boot_from" ]]; then
727					cmd[-1]+=,bootindex=0
728					boot_disk_present=true
729				fi
730				;;
731			spdk_vhost_blk)
732				notice "using socket $vhost_dir/naa.$disk.$vm_num"
733				cmd+=(-chardev "socket,id=char_$disk,path=$vhost_dir/naa.$disk.$vm_num")
734				cmd+=(-device "vhost-user-blk-pci,num-queues=$queue_number,chardev=char_$disk")
735				if [[ "$disk" == "$boot_from" ]]; then
736					cmd[-1]+=,bootindex=0
737					boot_disk_present=true
738				fi
739
740				if $packed; then
741					check_qemu_packedring_support
742					notice "Enabling packed ring support for VM $vm_num, controller $vhost_dir/naa.$disk.$vm_num"
743					cmd[-1]+=,packed=on
744				fi
745				;;
746			kernel_vhost)
747				if [[ -z $disk ]]; then
748					error "need WWN for $disk_type"
749					return 1
750				elif [[ ! $disk =~ ^[[:alpha:]]{3}[.][[:xdigit:]]+$ ]]; then
751					error "$disk_type - disk(wnn)=$disk does not look like WNN number"
752					return 1
753				fi
754				notice "Using kernel vhost disk wwn=$disk"
755				cmd+=(-device "vhost-scsi-pci,wwpn=$disk,num_queues=$queue_number")
756				;;
757			vfio_user)
758				notice "using socket $VM_DIR/$vm_num/domain/muser$disk/$disk/cntrl"
759				cmd+=(-device "vfio-user-pci,x-msg-timeout=5000,socket=$VM_DIR/$vm_num/muser/domain/muser$disk/$disk/cntrl")
760				if [[ "$disk" == "$boot_from" ]]; then
761					cmd[-1]+=",bootindex=0"
762					boot_disk_present=true
763				fi
764				;;
765			vfio_user_virtio)
766				notice "using socket $VM_DIR/vfu_tgt/virtio.$disk"
767				cmd+=(-device "vfio-user-pci,x-msg-timeout=5000,socket=$VM_DIR/vfu_tgt/virtio.$disk")
768				if [[ "$disk" == "$boot_from" ]]; then
769					cmd[-1]+=",bootindex=0"
770					boot_disk_present=true
771				fi
772				;;
773			*)
774				error "unknown mode '$disk_type', use: virtio, spdk_vhost_scsi, spdk_vhost_blk, kernel_vhost, vfio_user or vfio_user_virtio"
775				return 1
776				;;
777		esac
778	done
779
780	if [[ -n $boot_from ]] && [[ $boot_disk_present == false ]]; then
781		error "Boot from $boot_from is selected but device is not present"
782		return 1
783	fi
784
785	((${#qemu_args[@]})) && cmd+=("${qemu_args[@]}")
786	notice "Saving to $vm_dir/run.sh"
787	cat <<- RUN > "$vm_dir/run.sh"
788		#!/bin/bash
789		shopt -s nullglob extglob
790		rootdir=$rootdir
791		source "\$rootdir/test/scheduler/common.sh"
792		qemu_log () {
793			echo "=== qemu.log ==="
794			[[ -s $vm_dir/qemu.log ]] && cat $vm_dir/qemu.log
795			echo "=== qemu.log ==="
796		}
797
798		if [[ \$EUID -ne 0 ]]; then
799			echo "Go away user come back as root"
800			exit 1
801		fi
802
803		trap "qemu_log" EXIT
804
805		qemu_cmd=($(printf '%s\n' "${cmd[@]}"))
806		chmod +r $vm_dir/*
807		echo "Running VM in $vm_dir"
808		rm -f $qemu_pid_file
809		cgroup=\$(get_cgroup \$$)
810		set_cgroup_attr_top_bottom \$$ cgroup.subtree_control "+cpuset"
811		create_cgroup \$cgroup/qemu.$vm_num
812		set_cgroup_attr "\$cgroup/qemu.$vm_num" cpuset.mems "$node_num"
813		set_cgroup_attr "\$cgroup/qemu.$vm_num" cpuset.cpus "$task_mask"
814		"\${qemu_cmd[@]}"
815
816		echo "Waiting for QEMU pid file"
817		sleep 1
818		[[ ! -f $qemu_pid_file ]] && sleep 1
819		[[ ! -f $qemu_pid_file ]] && echo "ERROR: no qemu pid file found" && exit 1
820		set_cgroup_attr "\$cgroup/qemu.$vm_num" cgroup.threads \$(< "$qemu_pid_file")
821		exit 0
822		# EOF
823	RUN
824	chmod +x $vm_dir/run.sh
825
826	# Save generated sockets redirection
827	echo $ssh_socket > $vm_dir/ssh_socket
828	echo $fio_socket > $vm_dir/fio_socket
829	echo $monitor_port > $vm_dir/monitor_port
830
831	rm -f $vm_dir/migration_port
832	[[ -z $vm_incoming ]] || echo $migration_port > $vm_dir/migration_port
833
834	echo $gdbserver_socket > $vm_dir/gdbserver_socket
835	echo $vnc_socket >> $vm_dir/vnc_socket
836
837	[[ -z $vm_incoming ]] || ln -fs $VM_DIR/$vm_incoming $vm_dir/vm_incoming
838	[[ -z $vm_migrate_to ]] || ln -fs $VM_DIR/$vm_migrate_to $vm_dir/vm_migrate_to
839}
840
841function vm_run() {
842	local OPTIND optchar vm
843	local run_all=false
844	local vms_to_run=""
845
846	while getopts 'a-:' optchar; do
847		case "$optchar" in
848			a) run_all=true ;;
849			*)
850				error "Unknown param $OPTARG"
851				return 1
852				;;
853		esac
854	done
855
856	if $run_all; then
857		vms_to_run="$(vm_list_all)"
858	else
859		shift $((OPTIND - 1))
860		for vm in "$@"; do
861			vm_num_is_valid $1 || return 1
862			if [[ ! -x $VM_DIR/$vm/run.sh ]]; then
863				error "VM$vm not defined - setup it first"
864				return 1
865			fi
866			vms_to_run+=" $vm"
867		done
868	fi
869
870	for vm in $vms_to_run; do
871		if vm_is_running $vm; then
872			warning "VM$vm ($VM_DIR/$vm) already running"
873			continue
874		fi
875
876		notice "running $VM_DIR/$vm/run.sh"
877		if ! $VM_DIR/$vm/run.sh; then
878			error "FAILED to run vm $vm"
879			return 1
880		fi
881	done
882}
883
884function vm_print_logs() {
885	vm_num=$1
886	warning "================"
887	warning "QEMU LOG:"
888	if [[ -r $VM_DIR/$vm_num/qemu.log ]]; then
889		cat $VM_DIR/$vm_num/qemu.log
890	else
891		warning "LOG qemu.log not found"
892	fi
893
894	warning "VM LOG:"
895	if [[ -r $VM_DIR/$vm_num/serial.log ]]; then
896		cat $VM_DIR/$vm_num/serial.log
897	else
898		warning "LOG serial.log not found"
899	fi
900
901	warning "SEABIOS LOG:"
902	if [[ -r $VM_DIR/$vm_num/seabios.log ]]; then
903		cat $VM_DIR/$vm_num/seabios.log
904	else
905		warning "LOG seabios.log not found"
906	fi
907	warning "================"
908}
909
910# Wait for all created VMs to boot.
911# param $1 max wait time
912function vm_wait_for_boot() {
913	assert_number $1
914
915	xtrace_disable
916
917	local all_booted=false
918	local timeout_time=$1
919	[[ $timeout_time -lt 10 ]] && timeout_time=10
920	local timeout_time
921	timeout_time=$(date -d "+$timeout_time seconds" +%s)
922
923	notice "Waiting for VMs to boot"
924	shift
925	if [[ "$*" == "" ]]; then
926		local vms_to_check="$VM_DIR/[0-9]*"
927	else
928		local vms_to_check=""
929		for vm in "$@"; do
930			vms_to_check+=" $VM_DIR/$vm"
931		done
932	fi
933
934	for vm in $vms_to_check; do
935		local vm_num
936		vm_num=$(basename $vm)
937		local i=0
938		notice "waiting for VM$vm_num ($vm)"
939		while ! vm_os_booted $vm_num; do
940			if ! vm_is_running $vm_num; then
941				warning "VM $vm_num is not running"
942				vm_print_logs $vm_num
943				xtrace_restore
944				return 1
945			fi
946
947			if [[ $(date +%s) -gt $timeout_time ]]; then
948				warning "timeout waiting for machines to boot"
949				vm_print_logs $vm_num
950				xtrace_restore
951				return 1
952			fi
953			if ((i > 30)); then
954				local i=0
955				echo
956			fi
957			echo -n "."
958			sleep 1
959		done
960		echo ""
961		notice "VM$vm_num ready"
962		#Change Timeout for stopping services to prevent lengthy powerdowns
963		#Check that remote system is not Cygwin in case of Windows VMs
964		local vm_os
965		vm_os=$(vm_exec $vm_num "uname -o")
966		if [[ "$vm_os" != "Cygwin" ]]; then
967			vm_exec $vm_num "echo 'DefaultTimeoutStopSec=10' >> /etc/systemd/system.conf; systemctl daemon-reexec"
968		fi
969	done
970
971	notice "all VMs ready"
972	xtrace_restore
973	return 0
974}
975
976function vm_start_fio_server() {
977	local OPTIND optchar
978	local readonly=''
979	local fio_bin=''
980	while getopts ':-:' optchar; do
981		case "$optchar" in
982			-)
983				case "$OPTARG" in
984					fio-bin=*) local fio_bin="${OPTARG#*=}" ;;
985					readonly) local readonly="--readonly" ;;
986					*) error "Invalid argument '$OPTARG'" && return 1 ;;
987				esac
988				;;
989			*) error "Invalid argument '$OPTARG'" && return 1 ;;
990		esac
991	done
992
993	shift $((OPTIND - 1))
994	for vm_num in "$@"; do
995		notice "Starting fio server on VM$vm_num"
996		if [[ $fio_bin != "" ]]; then
997			vm_exec $vm_num 'cat > /root/fio; chmod +x /root/fio' < $fio_bin
998			vm_exec $vm_num /root/fio $readonly --eta=never --server --daemonize=/root/fio.pid
999		else
1000			vm_exec $vm_num fio $readonly --eta=never --server --daemonize=/root/fio.pid
1001		fi
1002	done
1003}
1004
1005function vm_check_scsi_location() {
1006	# Script to find wanted disc
1007	local script='shopt -s nullglob;
1008	for entry in /sys/block/sd*; do
1009		disk_type="$(cat $entry/device/vendor)";
1010		if [[ $disk_type == INTEL* ]] || [[ $disk_type == RAWSCSI* ]] || [[ $disk_type == LIO-ORG* ]]; then
1011			fname=$(basename $entry);
1012			echo -n " $fname";
1013		fi;
1014	done'
1015
1016	SCSI_DISK="$(echo "$script" | vm_exec $1 bash -s)"
1017
1018	if [[ -z "$SCSI_DISK" ]]; then
1019		error "no test disk found!"
1020		return 1
1021	fi
1022}
1023
1024# Script to perform scsi device reset on all disks in VM
1025# param $1 VM num
1026# param $2..$n Disks to perform reset on
1027function vm_reset_scsi_devices() {
1028	for disk in "${@:2}"; do
1029		notice "VM$1 Performing device reset on disk $disk"
1030		vm_exec $1 sg_reset /dev/$disk -vNd
1031	done
1032}
1033
1034function vm_check_blk_location() {
1035	local script='shopt -s nullglob; cd /sys/block; echo vd*'
1036	SCSI_DISK="$(echo "$script" | vm_exec $1 bash -s)"
1037
1038	if [[ -z "$SCSI_DISK" ]]; then
1039		error "no blk test disk found!"
1040		return 1
1041	fi
1042}
1043
1044function vm_check_nvme_location() {
1045	SCSI_DISK="$(vm_exec $1 "grep -l SPDK /sys/class/nvme/*/model" | awk -F/ '{print $5"n1"}')"
1046	if [[ -z "$SCSI_DISK" ]]; then
1047		error "no vfio-user nvme test disk found!"
1048		return 1
1049	fi
1050}
1051
1052function run_fio() {
1053	local arg
1054	local job_file=""
1055	local fio_bin=""
1056	local vms=()
1057	local out=""
1058	local vm
1059	local run_server_mode=true
1060	local run_plugin_mode=false
1061	local fio_start_cmd
1062	local fio_output_format="normal"
1063	local fio_gtod_reduce=false
1064	local wait_for_fio=true
1065
1066	for arg in "$@"; do
1067		case "$arg" in
1068			--job-file=*) local job_file="${arg#*=}" ;;
1069			--fio-bin=*) local fio_bin="${arg#*=}" ;;
1070			--vm=*) vms+=("${arg#*=}") ;;
1071			--out=*)
1072				local out="${arg#*=}"
1073				mkdir -p $out
1074				;;
1075			--local) run_server_mode=false ;;
1076			--plugin)
1077				notice "Using plugin mode. Disabling server mode."
1078				run_plugin_mode=true
1079				run_server_mode=false
1080				;;
1081			--json) fio_output_format="json" ;;
1082			--hide-results) hide_results=true ;;
1083			--no-wait-for-fio) wait_for_fio=false ;;
1084			--gtod-reduce) fio_gtod_reduce=true ;;
1085			*)
1086				error "Invalid argument '$arg'"
1087				return 1
1088				;;
1089		esac
1090	done
1091
1092	if [[ -n "$fio_bin" && ! -r "$fio_bin" ]]; then
1093		error "FIO binary '$fio_bin' does not exist"
1094		return 1
1095	fi
1096
1097	if [[ -z "$fio_bin" ]]; then
1098		fio_bin="fio"
1099	fi
1100
1101	if [[ ! -r "$job_file" ]]; then
1102		error "Fio job '$job_file' does not exist"
1103		return 1
1104	fi
1105
1106	fio_start_cmd="$fio_bin --eta=never "
1107
1108	local job_fname
1109	job_fname=$(basename "$job_file")
1110	log_fname="${job_fname%%.*}.log"
1111	fio_start_cmd+=" --output=$out/$log_fname --output-format=$fio_output_format "
1112
1113	# prepare job file for each VM
1114	for vm in "${vms[@]}"; do
1115		local vm_num=${vm%%:*}
1116		local vmdisks=${vm#*:}
1117
1118		sed "s@filename=@filename=$vmdisks@;s@description=\(.*\)@description=\1 (VM=$vm_num)@" "$job_file" \
1119			| vm_exec $vm_num "cat > /root/$job_fname"
1120
1121		if $fio_gtod_reduce; then
1122			vm_exec $vm_num "echo 'gtod_reduce=1' >> /root/$job_fname"
1123		fi
1124
1125		vm_exec $vm_num cat /root/$job_fname
1126
1127		if $run_server_mode; then
1128			fio_start_cmd+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/$job_fname "
1129		fi
1130
1131		if ! $run_server_mode; then
1132			if [[ -n "$fio_bin" ]]; then
1133				if ! $run_plugin_mode && [[ -e $fio_bin ]]; then
1134					vm_exec $vm_num 'cat > /root/fio; chmod +x /root/fio' < $fio_bin
1135					vm_fio_bin="/root/fio"
1136				else
1137					vm_fio_bin=$fio_bin
1138				fi
1139			fi
1140
1141			notice "Running local fio on VM $vm_num"
1142			vm_exec $vm_num "$vm_fio_bin --output=/root/$log_fname --output-format=$fio_output_format /root/$job_fname & echo \$! > /root/fio.pid" &
1143			vm_exec_pids+=("$!")
1144		fi
1145	done
1146
1147	if ! $run_server_mode; then
1148		if ! $wait_for_fio; then
1149			return 0
1150		fi
1151		echo "Waiting for guest fio instances to finish.."
1152		wait "${vm_exec_pids[@]}"
1153
1154		for vm in "${vms[@]}"; do
1155			local vm_num=${vm%%:*}
1156			vm_exec $vm_num cat /root/$log_fname > "$out/vm${vm_num}_${log_fname}"
1157		done
1158		return 0
1159	fi
1160
1161	$fio_start_cmd
1162	sleep 1
1163
1164	if [[ "$fio_output_format" == "json" ]]; then
1165		# Fio in client-server mode produces a lot of "trash" output
1166		# preceding JSON structure, making it not possible to parse.
1167		# Remove these lines from file.
1168		# shellcheck disable=SC2005
1169		echo "$(grep -vP '^[<\w]' "$out/$log_fname")" > "$out/$log_fname"
1170	fi
1171
1172	if [[ ! $hide_results ]]; then
1173		cat $out/$log_fname
1174	fi
1175}
1176
1177# Parsing fio results for json output and client-server mode only!
1178function parse_fio_results() {
1179	local fio_log_dir=$1
1180	local fio_log_filename=$2
1181	local fio_csv_filename
1182
1183	# Variables used in parsing loop
1184	local log_file
1185	local rwmode mixread mixwrite
1186	local lat_key lat_divisor
1187	local client_stats iops bw
1188	local read_avg_lat read_min_lat read_max_lat
1189	local write_avg_lat write_min_lat write_min_lat
1190	local clients
1191
1192	declare -A results
1193	results["iops"]=0
1194	results["bw"]=0
1195	results["avg_lat"]=0
1196	results["min_lat"]=0
1197	results["max_lat"]=0
1198
1199	# Loop using the log filename to see if there are any other
1200	# matching files. This is in case we ran fio test multiple times.
1201	log_files=("$fio_log_dir/$fio_log_filename"*)
1202	for log_file in "${log_files[@]}"; do
1203		# Save entire array to avoid opening $log_file multiple times
1204		clients=$(jq -r '.client_stats' "$log_file")
1205		[[ -n $clients ]]
1206		rwmode=$(jq -r '.[0]["job options"]["rw"]' <<< "$clients")
1207		mixread=1
1208		mixwrite=1
1209		if [[ $rwmode = *"rw"* ]]; then
1210			mixread=$(jq -r '.[0]["job options"]["rwmixread"]' <<< "$clients")
1211			mixread=$(bc -l <<< "scale=3; $mixread/100")
1212			mixwrite=$(bc -l <<< "scale=3; 1-$mixread")
1213		fi
1214
1215		client_stats=$(jq -r '.[] | select(.jobname == "All clients")' <<< "$clients")
1216		if [[ -z $client_stats ]]; then
1217			# Potentially single client (single VM)
1218			client_stats=$(jq -r '.[]' <<< "$clients")
1219		fi
1220
1221		# Check latency unit and later normalize to microseconds
1222		lat_key="lat_us"
1223		lat_divisor=1
1224		if jq -er '.read["lat_ns"]' &> /dev/null <<< $client_stats; then
1225			lat_key="lat_ns"
1226			lat_divisor=1000
1227		fi
1228
1229		# Horrific bash float point arithmetic operations below.
1230		# Viewer discretion is advised.
1231		iops=$(jq -r '[.read["iops"],.write["iops"]] | add' <<< $client_stats)
1232		bw=$(jq -r '[.read["bw"],.write["bw"]] | add' <<< $client_stats)
1233		read_avg_lat=$(jq -r --arg lat_key $lat_key '.read[$lat_key]["mean"]' <<< $client_stats)
1234		read_min_lat=$(jq -r --arg lat_key $lat_key '.read[$lat_key]["min"]' <<< $client_stats)
1235		read_max_lat=$(jq -r --arg lat_key $lat_key '.read[$lat_key]["max"]' <<< $client_stats)
1236		write_avg_lat=$(jq -r --arg lat_key $lat_key '.write[$lat_key]["mean"]' <<< $client_stats)
1237		write_min_lat=$(jq -r --arg lat_key $lat_key '.write[$lat_key]["min"]' <<< $client_stats)
1238		write_max_lat=$(jq -r --arg lat_key $lat_key '.write[$lat_key]["max"]' <<< $client_stats)
1239
1240		results["iops"]=$(bc -l <<< "${results[iops]} + $iops")
1241		results["bw"]=$(bc -l <<< "${results[bw]} + $bw")
1242		results["avg_lat"]=$(bc -l <<< "${results[avg_lat]} + ($mixread*$read_avg_lat + $mixwrite*$write_avg_lat)/$lat_divisor")
1243		results["min_lat"]=$(bc -l <<< "${results[min_lat]} + ($mixread*$read_min_lat + $mixwrite*$write_min_lat)/$lat_divisor")
1244		results["max_lat"]=$(bc -l <<< "${results[max_lat]} + ($mixread*$read_max_lat + $mixwrite*$write_max_lat)/$lat_divisor")
1245	done
1246
1247	results["iops"]=$(bc -l <<< "scale=3; ${results[iops]} / ${#log_files[@]}")
1248	results["bw"]=$(bc -l <<< "scale=3; ${results[bw]} / ${#log_files[@]}")
1249	results["avg_lat"]=$(bc -l <<< "scale=3; ${results[avg_lat]} / ${#log_files[@]}")
1250	results["min_lat"]=$(bc -l <<< "scale=3; ${results[min_lat]} / ${#log_files[@]}")
1251	results["max_lat"]=$(bc -l <<< "scale=3; ${results[max_lat]} / ${#log_files[@]}")
1252
1253	fio_csv_filename="${fio_log_filename%%.*}.csv"
1254	cat <<- EOF > "$fio_log_dir/$fio_csv_filename"
1255		iops,bw,avg_lat,min_lat,max_lat
1256		${results["iops"]},${results["bw"]},${results["avg_lat"]},${results["min_lat"]},${results["max_lat"]}
1257	EOF
1258}
1259
1260# Shutdown or kill any running VM and SPDK APP.
1261#
1262function at_app_exit() {
1263	local vhost_name
1264
1265	notice "APP EXITING"
1266	notice "killing all VMs"
1267	vm_kill_all
1268	# Kill vhost application
1269	notice "killing vhost app"
1270
1271	for vhost_name in "$TARGET_DIR"/*; do
1272		vhost_kill "$(basename "$vhost_name")"
1273	done
1274
1275	notice "EXIT DONE"
1276}
1277
1278function error_exit() {
1279	trap - ERR
1280	print_backtrace
1281	set +e
1282	error "Error on $1 $2"
1283
1284	at_app_exit
1285	exit 1
1286}
1287
1288function lookup_dev_irqs() {
1289	local vm=$1 irqs=() cpus=()
1290	local script_get_irqs script_get_cpus
1291
1292	mkdir -p "$VHOST_DIR/irqs"
1293
1294	# All vhost tests depend either on virtio_blk or virtio_scsi drivers on the VM side.
1295	# Considering that, simply iterate over virtio bus and pick pci device corresponding
1296	# to each virtio device.
1297	# For vfio-user setup, look for bare nvme devices.
1298
1299	script_get_irqs=$(
1300		cat <<- 'SCRIPT'
1301			shopt -s nullglob
1302			for virtio in /sys/bus/virtio/devices/virtio*; do
1303			  irqs+=("$(readlink -f "$virtio")/../msi_irqs/"*)
1304			done
1305			irqs+=(/sys/class/nvme/nvme*/device/msi_irqs/*)
1306			printf '%u\n' "${irqs[@]##*/}"
1307		SCRIPT
1308	)
1309
1310	script_get_cpus=$(
1311		cat <<- 'SCRIPT'
1312			cpus=(/sys/devices/system/cpu/cpu[0-9]*)
1313			printf '%u\n' "${cpus[@]##*cpu}"
1314		SCRIPT
1315	)
1316
1317	irqs=($(vm_exec "$vm" "$script_get_irqs"))
1318	cpus=($(vm_exec "$vm" "$script_get_cpus"))
1319	((${#irqs[@]} > 0 && ${#cpus[@]} > 0))
1320
1321	printf '%u\n' "${irqs[@]}" > "$VHOST_DIR/irqs/$vm.irqs"
1322	printf '%u\n' "${cpus[@]}" > "$VHOST_DIR/irqs/$vm.cpus"
1323}
1324
1325function irqs() {
1326	local vm
1327	for vm; do
1328		vm_exec "$vm" "while :; do cat /proc/interrupts; sleep 1s; done" > "$VHOST_DIR/irqs/$vm.interrupts" &
1329		irqs_pids+=($!)
1330	done
1331}
1332
1333function parse_irqs() {
1334	local iter=${1:-1}
1335	"$rootdir/test/vhost/parse_irqs.sh" "$VHOST_DIR/irqs/"*.interrupts
1336	rm "$VHOST_DIR/irqs/"*.interrupts
1337
1338	mkdir -p "$VHOST_DIR/irqs/$iter"
1339	mv "$VHOST_DIR/irqs/"*.parsed "$VHOST_DIR/irqs/$iter/"
1340}
1341
1342function collect_perf() {
1343	local cpus=$1 outf=$2 runtime=$3 delay=$4
1344
1345	mkdir -p "$VHOST_DIR/perf"
1346
1347	perf record -g \
1348		${cpus:+-C "$cpus"} \
1349		${outf:+-o "$outf"} \
1350		${delay:+-D $((delay * 1000))} \
1351		-z \
1352		${runtime:+ -- sleep $((runtime + delay))}
1353}
1354
1355function parse_perf() {
1356	local iter=${1:-1}
1357	local report out
1358
1359	mkdir -p "$VHOST_DIR/perf/$iter"
1360	shift
1361
1362	for report in "$@" "$VHOST_DIR/perf/"*.perf; do
1363		[[ -f $report ]] || continue
1364		perf report \
1365			-n \
1366			-i "$report" \
1367			--header \
1368			--stdio > "$VHOST_DIR/perf/$iter/${report##*/}.parsed"
1369		cp "$report" "$VHOST_DIR/perf/$iter/"
1370	done
1371	rm "$VHOST_DIR/perf/"*.perf
1372}
1373
1374function get_from_fio() {
1375	local opt=$1 conf=$2
1376
1377	[[ -n $opt && -f $conf ]] || return 1
1378
1379	awk -F= "/^$opt/{print \$2}" "$conf"
1380}
1381
1382function get_free_tcp_port() {
1383	local port=$1 to=${2:-1} sockets=()
1384
1385	mapfile -t sockets < /proc/net/tcp
1386
1387	# If there's a TCP socket in a listening state keep incrementing $port until
1388	# we find one that's not used. $to determines how long should we look for:
1389	#  0: don't increment, just check if given $port is in use
1390	# >0: increment $to times
1391	# <0: no increment limit
1392
1393	while [[ ${sockets[*]} == *":$(printf '%04X' "$port") 00000000:0000 0A"* ]]; do
1394		((to-- && ++port <= 65535)) || return 1
1395	done
1396
1397	echo "$port"
1398}
1399
1400function gen_cpu_vm_spdk_config() (
1401	local vm_count=$1 vm_cpu_num=$2 vm
1402	local spdk_cpu_num=${3:-1} spdk_cpu_list=${4:-} spdk_cpus
1403	local nodes=("${@:5}") node
1404	local env
1405
1406	spdk_cpus=spdk_cpu_num
1407	[[ -n $spdk_cpu_list ]] && spdk_cpus=spdk_cpu_list
1408
1409	if ((${#nodes[@]} > 0)); then
1410		((${#nodes[@]} == 1)) && node=${nodes[0]}
1411		for ((vm = 0; vm < vm_count; vm++)); do
1412			env+=("VM${vm}_NODE=${nodes[vm]:-$node}")
1413		done
1414	fi
1415
1416	env+=("$spdk_cpus=${!spdk_cpus}")
1417	env+=("vm_count=$vm_count")
1418	env+=("vm_cpu_num=$vm_cpu_num")
1419
1420	export "${env[@]}"
1421
1422	"$rootdir/scripts/perf/vhost/conf-generator" -p cpu
1423)
1424