xref: /spdk/test/vhost/common.sh (revision cc6920a4763d4b9a43aa40583c8397d8f14fa100)
1: ${SPDK_VHOST_VERBOSE=false}
2: ${VHOST_DIR="$HOME/vhost_test"}
3: ${QEMU_BIN="qemu-system-x86_64"}
4: ${QEMU_IMG_BIN="qemu-img"}
5
6TEST_DIR=$(readlink -f $rootdir/..)
7VM_DIR=$VHOST_DIR/vms
8TARGET_DIR=$VHOST_DIR/vhost
9VM_PASSWORD="root"
10
11VM_IMAGE=${VM_IMAGE:-"$DEPENDENCY_DIR/spdk_test_image.qcow2"}
12DEFAULT_FIO_BIN=${DEFAULT_FIO_BIN:-"$DEPENDENCY_DIR/fio"}
13FIO_BIN=${FIO_BIN:-"$DEFAULT_FIO_BIN"}
14
15WORKDIR=$(readlink -f "$(dirname "$0")")
16
17if ! hash $QEMU_IMG_BIN $QEMU_BIN; then
18	error 'QEMU is not installed on this system. Unable to run vhost tests.'
19	exit 1
20fi
21
22mkdir -p $VHOST_DIR
23mkdir -p $VM_DIR
24mkdir -p $TARGET_DIR
25
26#
27# Source config describing QEMU and VHOST cores and NUMA
28#
29source $rootdir/test/vhost/common/autotest.config
30
31function vhosttestinit() {
32	if [ "$TEST_MODE" == "iso" ]; then
33		$rootdir/scripts/setup.sh
34	fi
35
36	if [[ -e $VM_IMAGE.gz && ! -e $VM_IMAGE ]]; then
37		gzip -dc "$VM_IMAGE.gz" > "$VM_IMAGE"
38	fi
39
40	# Look for the VM image
41	if [[ ! -f $VM_IMAGE ]]; then
42		[[ $1 != "--no_vm" ]] || return 0
43		echo "$VM_IMAGE is missing" >&2
44		return 1
45	fi
46}
47
48function vhosttestfini() {
49	if [ "$TEST_MODE" == "iso" ]; then
50		$rootdir/scripts/setup.sh reset
51	fi
52}
53
54function message() {
55	local verbose_out
56	if ! $SPDK_VHOST_VERBOSE; then
57		verbose_out=""
58	elif [[ ${FUNCNAME[2]} == "source" ]]; then
59		verbose_out=" (file $(basename ${BASH_SOURCE[1]}):${BASH_LINENO[1]})"
60	else
61		verbose_out=" (function ${FUNCNAME[2]}:${BASH_LINENO[1]})"
62	fi
63
64	local msg_type="$1"
65	shift
66	echo -e "${msg_type}${verbose_out}: $*"
67}
68
69function fail() {
70	echo "===========" >&2
71	message "FAIL" "$@" >&2
72	echo "===========" >&2
73	exit 1
74}
75
76function error() {
77	echo "===========" >&2
78	message "ERROR" "$@" >&2
79	echo "===========" >&2
80	# Don't 'return 1' since the stack trace will be incomplete (why?) missing upper command.
81	false
82}
83
84function warning() {
85	message "WARN" "$@" >&2
86}
87
88function notice() {
89	message "INFO" "$@"
90}
91
92function check_qemu_packedring_support() {
93	qemu_version=$($QEMU_BIN -version | grep -Po "(?<=version )\d+.\d+.\d+")
94	if [[ "$qemu_version" < "4.2.0" ]]; then
95		error "This qemu binary does not support packed ring"
96	fi
97}
98
99function get_vhost_dir() {
100	local vhost_name="$1"
101
102	if [[ -z "$vhost_name" ]]; then
103		error "vhost name must be provided to get_vhost_dir"
104		return 1
105	fi
106
107	echo "$TARGET_DIR/${vhost_name}"
108}
109
110function vhost_run() {
111	local OPTIND
112	local vhost_name
113	local run_gen_nvme=true
114	local vhost_bin="vhost"
115
116	while getopts "n:a:b:g" optchar; do
117		case "$optchar" in
118			n) vhost_name="$OPTARG" ;;
119			a) vhost_args="$OPTARG" ;;
120			b) vhost_bin="$OPTARG" ;;
121			g)
122				run_gen_nvme=false
123				notice "Skipping gen_nvme.sh NVMe bdev configuration"
124				;;
125			*)
126				error "Unknown param $optchar"
127				return 1
128				;;
129		esac
130	done
131
132	if [[ -z "$vhost_name" ]]; then
133		error "vhost name must be provided to vhost_run"
134		return 1
135	fi
136
137	local vhost_dir
138	vhost_dir="$(get_vhost_dir $vhost_name)"
139	local vhost_app="$SPDK_BIN_DIR/$vhost_bin"
140	local vhost_log_file="$vhost_dir/vhost.log"
141	local vhost_pid_file="$vhost_dir/vhost.pid"
142	local vhost_socket="$vhost_dir/usvhost"
143	notice "starting vhost app in background"
144	[[ -r "$vhost_pid_file" ]] && vhost_kill $vhost_name
145	[[ -d $vhost_dir ]] && rm -f $vhost_dir/*
146	mkdir -p $vhost_dir
147
148	if [[ ! -x $vhost_app ]]; then
149		error "application not found: $vhost_app"
150		return 1
151	fi
152
153	local cmd="$vhost_app -r $vhost_dir/rpc.sock -S $vhost_dir $vhost_args"
154
155	notice "Loging to:   $vhost_log_file"
156	notice "Socket:      $vhost_socket"
157	notice "Command:     $cmd"
158
159	timing_enter vhost_start
160
161	$cmd &
162	vhost_pid=$!
163	echo $vhost_pid > $vhost_pid_file
164
165	notice "waiting for app to run..."
166	waitforlisten "$vhost_pid" "$vhost_dir/rpc.sock"
167	#do not generate nvmes if pci access is disabled
168	if [[ "$cmd" != *"--no-pci"* ]] && [[ "$cmd" != *"-u"* ]] && $run_gen_nvme; then
169		$rootdir/scripts/gen_nvme.sh | $rootdir/scripts/rpc.py -s $vhost_dir/rpc.sock load_subsystem_config
170	fi
171
172	notice "vhost started - pid=$vhost_pid"
173
174	timing_exit vhost_start
175}
176
177function vhost_kill() {
178	local rc=0
179	local vhost_name="$1"
180
181	if [[ -z "$vhost_name" ]]; then
182		error "Must provide vhost name to vhost_kill"
183		return 0
184	fi
185
186	local vhost_dir
187	vhost_dir="$(get_vhost_dir $vhost_name)"
188	local vhost_pid_file="$vhost_dir/vhost.pid"
189
190	if [[ ! -r $vhost_pid_file ]]; then
191		warning "no vhost pid file found"
192		return 0
193	fi
194
195	timing_enter vhost_kill
196	local vhost_pid
197	vhost_pid="$(cat $vhost_pid_file)"
198	notice "killing vhost (PID $vhost_pid) app"
199
200	if kill -INT $vhost_pid > /dev/null; then
201		notice "sent SIGINT to vhost app - waiting 60 seconds to exit"
202		for ((i = 0; i < 60; i++)); do
203			if kill -0 $vhost_pid; then
204				echo "."
205				sleep 1
206			else
207				break
208			fi
209		done
210		if kill -0 $vhost_pid; then
211			error "ERROR: vhost was NOT killed - sending SIGABRT"
212			kill -ABRT $vhost_pid
213			rm $vhost_pid_file
214			rc=1
215		else
216			while kill -0 $vhost_pid; do
217				echo "."
218			done
219		fi
220	elif kill -0 $vhost_pid; then
221		error "vhost NOT killed - you need to kill it manually"
222		rc=1
223	else
224		notice "vhost was not running"
225	fi
226
227	timing_exit vhost_kill
228	if [[ $rc == 0 ]]; then
229		rm $vhost_pid_file
230	fi
231
232	rm -rf "$vhost_dir"
233
234	return $rc
235}
236
237function vhost_rpc() {
238	local vhost_name="$1"
239
240	if [[ -z "$vhost_name" ]]; then
241		error "vhost name must be provided to vhost_rpc"
242		return 1
243	fi
244	shift
245
246	$rootdir/scripts/rpc.py -s $(get_vhost_dir $vhost_name)/rpc.sock "$@"
247}
248
249###
250# Mgmt functions
251###
252
253function assert_number() {
254	[[ "$1" =~ [0-9]+ ]] && return 0
255
256	error "Invalid or missing paramter: need number but got '$1'"
257	return 1
258}
259
260# Run command on vm with given password
261# First argument - vm number
262# Second argument - ssh password for vm
263#
264function vm_sshpass() {
265	vm_num_is_valid $1 || return 1
266
267	local ssh_cmd
268	ssh_cmd="sshpass -p $2 ssh \
269		-o UserKnownHostsFile=/dev/null \
270		-o StrictHostKeyChecking=no \
271		-o User=root \
272		-p $(vm_ssh_socket $1) $VM_SSH_OPTIONS 127.0.0.1"
273
274	shift 2
275	$ssh_cmd "$@"
276}
277
278# Helper to validate VM number
279# param $1 VM number
280#
281function vm_num_is_valid() {
282	[[ "$1" =~ ^[0-9]+$ ]] && return 0
283
284	error "Invalid or missing paramter: vm number '$1'"
285	return 1
286}
287
288# Print network socket for given VM number
289# param $1 virtual machine number
290#
291function vm_ssh_socket() {
292	vm_num_is_valid $1 || return 1
293	local vm_dir="$VM_DIR/$1"
294
295	cat $vm_dir/ssh_socket
296}
297
298function vm_fio_socket() {
299	vm_num_is_valid $1 || return 1
300	local vm_dir="$VM_DIR/$1"
301
302	cat $vm_dir/fio_socket
303}
304
305# Execute command on given VM
306# param $1 virtual machine number
307#
308function vm_exec() {
309	vm_num_is_valid $1 || return 1
310
311	local vm_num="$1"
312	shift
313
314	sshpass -p "$VM_PASSWORD" ssh \
315		-o UserKnownHostsFile=/dev/null \
316		-o StrictHostKeyChecking=no \
317		-o User=root \
318		-p $(vm_ssh_socket $vm_num) $VM_SSH_OPTIONS 127.0.0.1 \
319		"$@"
320}
321
322# Execute scp command on given VM
323# param $1 virtual machine number
324#
325function vm_scp() {
326	vm_num_is_valid $1 || return 1
327
328	local vm_num="$1"
329	shift
330
331	sshpass -p "$VM_PASSWORD" scp \
332		-o UserKnownHostsFile=/dev/null \
333		-o StrictHostKeyChecking=no \
334		-o User=root \
335		-P $(vm_ssh_socket $vm_num) $VM_SSH_OPTIONS \
336		"$@"
337}
338
339# check if specified VM is running
340# param $1 VM num
341function vm_is_running() {
342	vm_num_is_valid $1 || return 1
343	local vm_dir="$VM_DIR/$1"
344
345	if [[ ! -r $vm_dir/qemu.pid ]]; then
346		return 1
347	fi
348
349	local vm_pid
350	vm_pid="$(cat $vm_dir/qemu.pid)"
351
352	if /bin/kill -0 $vm_pid; then
353		return 0
354	else
355		if [[ $EUID -ne 0 ]]; then
356			warning "not root - assuming VM running since can't be checked"
357			return 0
358		fi
359
360		# not running - remove pid file
361		rm $vm_dir/qemu.pid
362		return 1
363	fi
364}
365
366# check if specified VM is running
367# param $1 VM num
368function vm_os_booted() {
369	vm_num_is_valid $1 || return 1
370	local vm_dir="$VM_DIR/$1"
371
372	if [[ ! -r $vm_dir/qemu.pid ]]; then
373		error "VM $1 is not running"
374		return 1
375	fi
376
377	if ! VM_SSH_OPTIONS="-o ControlMaster=no" vm_exec $1 "true" 2> /dev/null; then
378		# Shutdown existing master. Ignore errors as it might not exist.
379		VM_SSH_OPTIONS="-O exit" vm_exec $1 "true" 2> /dev/null
380		return 1
381	fi
382
383	return 0
384}
385
386# Shutdown given VM
387# param $1 virtual machine number
388# return non-zero in case of error.
389function vm_shutdown() {
390	vm_num_is_valid $1 || return 1
391	local vm_dir="$VM_DIR/$1"
392	if [[ ! -d "$vm_dir" ]]; then
393		error "VM$1 ($vm_dir) not exist - setup it first"
394		return 1
395	fi
396
397	if ! vm_is_running $1; then
398		notice "VM$1 ($vm_dir) is not running"
399		return 0
400	fi
401
402	# Temporarily disabling exit flag for next ssh command, since it will
403	# "fail" due to shutdown
404	notice "Shutting down virtual machine $vm_dir"
405	set +e
406	vm_exec $1 "nohup sh -c 'shutdown -h -P now'" || true
407	notice "VM$1 is shutting down - wait a while to complete"
408	set -e
409}
410
411# Kill given VM
412# param $1 virtual machine number
413#
414function vm_kill() {
415	vm_num_is_valid $1 || return 1
416	local vm_dir="$VM_DIR/$1"
417
418	if [[ ! -r $vm_dir/qemu.pid ]]; then
419		return 0
420	fi
421
422	local vm_pid
423	vm_pid="$(cat $vm_dir/qemu.pid)"
424
425	notice "Killing virtual machine $vm_dir (pid=$vm_pid)"
426	# First kill should fail, second one must fail
427	if /bin/kill $vm_pid; then
428		notice "process $vm_pid killed"
429		rm $vm_dir/qemu.pid
430		rm -rf $vm_dir
431	elif vm_is_running $1; then
432		error "Process $vm_pid NOT killed"
433		return 1
434	fi
435}
436
437# List all VM numbers in VM_DIR
438#
439function vm_list_all() {
440	local vms
441	vms="$(
442		shopt -s nullglob
443		echo $VM_DIR/[0-9]*
444	)"
445	if [[ -n "$vms" ]]; then
446		basename --multiple $vms
447	fi
448}
449
450# Kills all VM in $VM_DIR
451#
452function vm_kill_all() {
453	local vm
454	for vm in $(vm_list_all); do
455		vm_kill $vm
456	done
457
458	rm -rf $VM_DIR
459}
460
461# Shutdown all VM in $VM_DIR
462#
463function vm_shutdown_all() {
464	# XXX: temporarily disable to debug shutdown issue
465	# xtrace_disable
466
467	local vms
468	vms=$(vm_list_all)
469	local vm
470
471	for vm in $vms; do
472		vm_shutdown $vm
473	done
474
475	notice "Waiting for VMs to shutdown..."
476	local timeo=30
477	while [[ $timeo -gt 0 ]]; do
478		local all_vms_down=1
479		for vm in $vms; do
480			if vm_is_running $vm; then
481				all_vms_down=0
482				break
483			fi
484		done
485
486		if [[ $all_vms_down == 1 ]]; then
487			notice "All VMs successfully shut down"
488			xtrace_restore
489			return 0
490		fi
491
492		((timeo -= 1))
493		sleep 1
494	done
495
496	rm -rf $VM_DIR
497
498	xtrace_restore
499}
500
501function vm_setup() {
502	xtrace_disable
503	local OPTIND optchar vm_num
504
505	local os=""
506	local os_mode=""
507	local qemu_args=()
508	local disk_type_g=NOT_DEFINED
509	local read_only="false"
510	# List created of a strings separated with a ":"
511	local disks=()
512	local raw_cache=""
513	local vm_incoming=""
514	local vm_migrate_to=""
515	local force_vm=""
516	local guest_memory=1024
517	local queue_number=""
518	local vhost_dir
519	local packed=false
520	vhost_dir="$(get_vhost_dir 0)"
521	while getopts ':-:' optchar; do
522		case "$optchar" in
523			-)
524				case "$OPTARG" in
525					os=*) os="${OPTARG#*=}" ;;
526					os-mode=*) os_mode="${OPTARG#*=}" ;;
527					qemu-args=*) qemu_args+=("${OPTARG#*=}") ;;
528					disk-type=*) disk_type_g="${OPTARG#*=}" ;;
529					read-only=*) read_only="${OPTARG#*=}" ;;
530					disks=*) IFS=":" read -ra disks <<< "${OPTARG#*=}" ;;
531					raw-cache=*) raw_cache=",cache${OPTARG#*=}" ;;
532					force=*) force_vm=${OPTARG#*=} ;;
533					memory=*) guest_memory=${OPTARG#*=} ;;
534					queue_num=*) queue_number=${OPTARG#*=} ;;
535					incoming=*) vm_incoming="${OPTARG#*=}" ;;
536					migrate-to=*) vm_migrate_to="${OPTARG#*=}" ;;
537					vhost-name=*) vhost_dir="$(get_vhost_dir ${OPTARG#*=})" ;;
538					spdk-boot=*) local boot_from="${OPTARG#*=}" ;;
539					packed) packed=true ;;
540					*)
541						error "unknown argument $OPTARG"
542						return 1
543						;;
544				esac
545				;;
546			*)
547				error "vm_create Unknown param $OPTARG"
548				return 1
549				;;
550		esac
551	done
552
553	# Find next directory we can use
554	if [[ -n $force_vm ]]; then
555		vm_num=$force_vm
556
557		vm_num_is_valid $vm_num || return 1
558		local vm_dir="$VM_DIR/$vm_num"
559		[[ -d $vm_dir ]] && warning "removing existing VM in '$vm_dir'"
560	else
561		local vm_dir=""
562
563		set +x
564		for ((i = 0; i <= 256; i++)); do
565			local vm_dir="$VM_DIR/$i"
566			[[ ! -d $vm_dir ]] && break
567		done
568		xtrace_restore
569
570		vm_num=$i
571	fi
572
573	if [[ $vm_num -eq 256 ]]; then
574		error "no free VM found. do some cleanup (256 VMs created, are you insane?)"
575		return 1
576	fi
577
578	if [[ -n "$vm_migrate_to" && -n "$vm_incoming" ]]; then
579		error "'--incoming' and '--migrate-to' cannot be used together"
580		return 1
581	elif [[ -n "$vm_incoming" ]]; then
582		if [[ -n "$os_mode" || -n "$os" ]]; then
583			error "'--incoming' can't be used together with '--os' nor '--os-mode'"
584			return 1
585		fi
586
587		os_mode="original"
588		os="$VM_DIR/$vm_incoming/os.qcow2"
589	elif [[ -n "$vm_migrate_to" ]]; then
590		[[ "$os_mode" != "backing" ]] && warning "Using 'backing' mode for OS since '--migrate-to' is used"
591		os_mode=backing
592	fi
593
594	notice "Creating new VM in $vm_dir"
595	mkdir -p $vm_dir
596
597	if [[ "$os_mode" == "backing" ]]; then
598		notice "Creating backing file for OS image file: $os"
599		if ! $QEMU_IMG_BIN create -f qcow2 -b $os $vm_dir/os.qcow2; then
600			error "Failed to create OS backing file in '$vm_dir/os.qcow2' using '$os'"
601			return 1
602		fi
603
604		local os=$vm_dir/os.qcow2
605	elif [[ "$os_mode" == "original" ]]; then
606		warning "Using original OS image file: $os"
607	elif [[ "$os_mode" != "snapshot" ]]; then
608		if [[ -z "$os_mode" ]]; then
609			notice "No '--os-mode' parameter provided - using 'snapshot'"
610			os_mode="snapshot"
611		else
612			error "Invalid '--os-mode=$os_mode'"
613			return 1
614		fi
615	fi
616
617	local qemu_mask_param="VM_${vm_num}_qemu_mask"
618	local qemu_numa_node_param="VM_${vm_num}_qemu_numa_node"
619
620	if [[ -z "${!qemu_mask_param}" ]] || [[ -z "${!qemu_numa_node_param}" ]]; then
621		error "Parameters ${qemu_mask_param} or ${qemu_numa_node_param} not found in autotest.config file"
622		return 1
623	fi
624
625	local task_mask=${!qemu_mask_param}
626
627	notice "TASK MASK: $task_mask"
628	local cmd=(taskset -a -c "$task_mask" "$QEMU_BIN")
629	local vm_socket_offset=$((10000 + 100 * vm_num))
630
631	local ssh_socket=$((vm_socket_offset + 0))
632	local fio_socket=$((vm_socket_offset + 1))
633	local monitor_port=$((vm_socket_offset + 2))
634	local migration_port=$((vm_socket_offset + 3))
635	local gdbserver_socket=$((vm_socket_offset + 4))
636	local vnc_socket=$((100 + vm_num))
637	local qemu_pid_file="$vm_dir/qemu.pid"
638	local cpu_num=0
639
640	set +x
641	# cpu list for taskset can be comma separated or range
642	# or both at the same time, so first split on commas
643	cpu_list=$(echo $task_mask | tr "," "\n")
644	queue_number=0
645	for c in $cpu_list; do
646		# if range is detected - count how many cpus
647		if [[ $c =~ [0-9]+-[0-9]+ ]]; then
648			val=$((c - 1))
649			val=${val#-}
650		else
651			val=1
652		fi
653		cpu_num=$((cpu_num + val))
654		queue_number=$((queue_number + val))
655	done
656
657	if [ -z $queue_number ]; then
658		queue_number=$cpu_num
659	fi
660
661	xtrace_restore
662
663	local node_num=${!qemu_numa_node_param}
664	local boot_disk_present=false
665	notice "NUMA NODE: $node_num"
666	cmd+=(-m "$guest_memory" --enable-kvm -cpu host -smp "$cpu_num" -vga std -vnc ":$vnc_socket" -daemonize)
667	cmd+=(-object "memory-backend-file,id=mem,size=${guest_memory}M,mem-path=/dev/hugepages,share=on,prealloc=yes,host-nodes=$node_num,policy=bind")
668	[[ $os_mode == snapshot ]] && cmd+=(-snapshot)
669	[[ -n "$vm_incoming" ]] && cmd+=(-incoming "tcp:0:$migration_port")
670	cmd+=(-monitor "telnet:127.0.0.1:$monitor_port,server,nowait")
671	cmd+=(-numa "node,memdev=mem")
672	cmd+=(-pidfile "$qemu_pid_file")
673	cmd+=(-serial "file:$vm_dir/serial.log")
674	cmd+=(-D "$vm_dir/qemu.log")
675	cmd+=(-chardev "file,path=$vm_dir/seabios.log,id=seabios" -device "isa-debugcon,iobase=0x402,chardev=seabios")
676	cmd+=(-net "user,hostfwd=tcp::$ssh_socket-:22,hostfwd=tcp::$fio_socket-:8765")
677	cmd+=(-net nic)
678	if [[ -z "$boot_from" ]]; then
679		cmd+=(-drive "file=$os,if=none,id=os_disk")
680		cmd+=(-device "ide-hd,drive=os_disk,bootindex=0")
681	fi
682
683	if ((${#disks[@]} == 0)) && [[ $disk_type_g == virtio* ]]; then
684		disks=("default_virtio.img")
685	elif ((${#disks[@]} == 0)); then
686		error "No disks defined, aborting"
687		return 1
688	fi
689
690	for disk in "${disks[@]}"; do
691		# Each disk can define its type in a form of a disk_name,type. The remaining parts
692		# of the string are dropped.
693		IFS="," read -r disk disk_type _ <<< "$disk"
694		[[ -z $disk_type ]] && disk_type=$disk_type_g
695
696		case $disk_type in
697			virtio)
698				local raw_name="RAWSCSI"
699				local raw_disk=$vm_dir/test.img
700
701				if [[ -n $disk ]]; then
702					[[ ! -b $disk ]] && touch $disk
703					local raw_disk
704					raw_disk=$(readlink -f $disk)
705				fi
706
707				# Create disk file if it not exist or it is smaller than 1G
708				if { [[ -f $raw_disk ]] && [[ $(stat --printf="%s" $raw_disk) -lt $((1024 * 1024 * 1024)) ]]; } \
709					|| [[ ! -e $raw_disk ]]; then
710					if [[ $raw_disk =~ /dev/.* ]]; then
711						error \
712							"ERROR: Virtio disk point to missing device ($raw_disk) -\n" \
713							"       this is probably not what you want."
714						return 1
715					fi
716
717					notice "Creating Virtio disc $raw_disk"
718					dd if=/dev/zero of=$raw_disk bs=1024k count=1024
719				else
720					notice "Using existing image $raw_disk"
721				fi
722
723				cmd+=(-device "virtio-scsi-pci,num_queues=$queue_number")
724				cmd+=(-device "scsi-hd,drive=hd$i,vendor=$raw_name")
725				cmd+=(-drive "if=none,id=hd$i,file=$raw_disk,format=raw$raw_cache")
726				;;
727			spdk_vhost_scsi)
728				notice "using socket $vhost_dir/naa.$disk.$vm_num"
729				cmd+=(-chardev "socket,id=char_$disk,path=$vhost_dir/naa.$disk.$vm_num")
730				cmd+=(-device "vhost-user-scsi-pci,id=scsi_$disk,num_queues=$queue_number,chardev=char_$disk")
731				if [[ "$disk" == "$boot_from" ]]; then
732					cmd[-1]+=,bootindex=0
733					boot_disk_present=true
734				fi
735				;;
736			spdk_vhost_blk)
737				notice "using socket $vhost_dir/naa.$disk.$vm_num"
738				cmd+=(-chardev "socket,id=char_$disk,path=$vhost_dir/naa.$disk.$vm_num")
739				cmd+=(-device "vhost-user-blk-pci,num-queues=$queue_number,chardev=char_$disk")
740				if [[ "$disk" == "$boot_from" ]]; then
741					cmd[-1]+=,bootindex=0
742					boot_disk_present=true
743				fi
744
745				if $packed; then
746					check_qemu_packedring_support
747					notice "Enabling packed ring support for VM $vm_num, controller $vhost_dir/naa.$disk.$vm_num"
748					cmd[-1]+=,packed=on
749				fi
750				;;
751			kernel_vhost)
752				if [[ -z $disk ]]; then
753					error "need WWN for $disk_type"
754					return 1
755				elif [[ ! $disk =~ ^[[:alpha:]]{3}[.][[:xdigit:]]+$ ]]; then
756					error "$disk_type - disk(wnn)=$disk does not look like WNN number"
757					return 1
758				fi
759				notice "Using kernel vhost disk wwn=$disk"
760				cmd+=(-device "vhost-scsi-pci,wwpn=$disk,num_queues=$queue_number")
761				;;
762			vfio_user)
763				notice "using socket $VM_DIR/$vm_num/domain/muser$disk/$disk/cntrl"
764				cmd+=(-device "vfio-user-pci,socket=$VM_DIR/$vm_num/muser/domain/muser$disk/$disk/cntrl")
765				if [[ "$disk" == "$boot_from" ]]; then
766					cmd[-1]+=",bootindex=0"
767					boot_disk_present=true
768				fi
769				;;
770			*)
771				error "unknown mode '$disk_type', use: virtio, spdk_vhost_scsi, spdk_vhost_blk, kernel_vhost or vfio_user"
772				return 1
773				;;
774		esac
775	done
776
777	if [[ -n $boot_from ]] && [[ $boot_disk_present == false ]]; then
778		error "Boot from $boot_from is selected but device is not present"
779		return 1
780	fi
781
782	((${#qemu_args[@]})) && cmd+=("${qemu_args[@]}")
783	notice "Saving to $vm_dir/run.sh"
784	cat <<- RUN > "$vm_dir/run.sh"
785		#!/bin/bash
786		qemu_log () {
787			echo "=== qemu.log ==="
788			[[ -s $vm_dir/qemu.log ]] && cat $vm_dir/qemu.log
789			echo "=== qemu.log ==="
790		}
791
792		if [[ \$EUID -ne 0 ]]; then
793			echo "Go away user come back as root"
794			exit 1
795		fi
796
797		trap "qemu_log" EXIT
798
799		qemu_cmd=($(printf '%s\n' "${cmd[@]}"))
800		chmod +r $vm_dir/*
801		echo "Running VM in $vm_dir"
802		rm -f $qemu_pid_file
803		"\${qemu_cmd[@]}"
804
805		echo "Waiting for QEMU pid file"
806		sleep 1
807		[[ ! -f $qemu_pid_file ]] && sleep 1
808		[[ ! -f $qemu_pid_file ]] && echo "ERROR: no qemu pid file found" && exit 1
809		exit 0
810		# EOF
811	RUN
812	chmod +x $vm_dir/run.sh
813
814	# Save generated sockets redirection
815	echo $ssh_socket > $vm_dir/ssh_socket
816	echo $fio_socket > $vm_dir/fio_socket
817	echo $monitor_port > $vm_dir/monitor_port
818
819	rm -f $vm_dir/migration_port
820	[[ -z $vm_incoming ]] || echo $migration_port > $vm_dir/migration_port
821
822	echo $gdbserver_socket > $vm_dir/gdbserver_socket
823	echo $vnc_socket >> $vm_dir/vnc_socket
824
825	[[ -z $vm_incoming ]] || ln -fs $VM_DIR/$vm_incoming $vm_dir/vm_incoming
826	[[ -z $vm_migrate_to ]] || ln -fs $VM_DIR/$vm_migrate_to $vm_dir/vm_migrate_to
827}
828
829function vm_run() {
830	local OPTIND optchar vm
831	local run_all=false
832	local vms_to_run=""
833
834	while getopts 'a-:' optchar; do
835		case "$optchar" in
836			a) run_all=true ;;
837			*)
838				error "Unknown param $OPTARG"
839				return 1
840				;;
841		esac
842	done
843
844	if $run_all; then
845		vms_to_run="$(vm_list_all)"
846	else
847		shift $((OPTIND - 1))
848		for vm in "$@"; do
849			vm_num_is_valid $1 || return 1
850			if [[ ! -x $VM_DIR/$vm/run.sh ]]; then
851				error "VM$vm not defined - setup it first"
852				return 1
853			fi
854			vms_to_run+=" $vm"
855		done
856	fi
857
858	for vm in $vms_to_run; do
859		if vm_is_running $vm; then
860			warning "VM$vm ($VM_DIR/$vm) already running"
861			continue
862		fi
863
864		notice "running $VM_DIR/$vm/run.sh"
865		if ! $VM_DIR/$vm/run.sh; then
866			error "FAILED to run vm $vm"
867			return 1
868		fi
869	done
870}
871
872function vm_print_logs() {
873	vm_num=$1
874	warning "================"
875	warning "QEMU LOG:"
876	if [[ -r $VM_DIR/$vm_num/qemu.log ]]; then
877		cat $VM_DIR/$vm_num/qemu.log
878	else
879		warning "LOG qemu.log not found"
880	fi
881
882	warning "VM LOG:"
883	if [[ -r $VM_DIR/$vm_num/serial.log ]]; then
884		cat $VM_DIR/$vm_num/serial.log
885	else
886		warning "LOG serial.log not found"
887	fi
888
889	warning "SEABIOS LOG:"
890	if [[ -r $VM_DIR/$vm_num/seabios.log ]]; then
891		cat $VM_DIR/$vm_num/seabios.log
892	else
893		warning "LOG seabios.log not found"
894	fi
895	warning "================"
896}
897
898# Wait for all created VMs to boot.
899# param $1 max wait time
900function vm_wait_for_boot() {
901	assert_number $1
902
903	xtrace_disable
904
905	local all_booted=false
906	local timeout_time=$1
907	[[ $timeout_time -lt 10 ]] && timeout_time=10
908	local timeout_time
909	timeout_time=$(date -d "+$timeout_time seconds" +%s)
910
911	notice "Waiting for VMs to boot"
912	shift
913	if [[ "$*" == "" ]]; then
914		local vms_to_check="$VM_DIR/[0-9]*"
915	else
916		local vms_to_check=""
917		for vm in "$@"; do
918			vms_to_check+=" $VM_DIR/$vm"
919		done
920	fi
921
922	for vm in $vms_to_check; do
923		local vm_num
924		vm_num=$(basename $vm)
925		local i=0
926		notice "waiting for VM$vm_num ($vm)"
927		while ! vm_os_booted $vm_num; do
928			if ! vm_is_running $vm_num; then
929				warning "VM $vm_num is not running"
930				vm_print_logs $vm_num
931				xtrace_restore
932				return 1
933			fi
934
935			if [[ $(date +%s) -gt $timeout_time ]]; then
936				warning "timeout waiting for machines to boot"
937				vm_print_logs $vm_num
938				xtrace_restore
939				return 1
940			fi
941			if ((i > 30)); then
942				local i=0
943				echo
944			fi
945			echo -n "."
946			sleep 1
947		done
948		echo ""
949		notice "VM$vm_num ready"
950		#Change Timeout for stopping services to prevent lengthy powerdowns
951		#Check that remote system is not Cygwin in case of Windows VMs
952		local vm_os
953		vm_os=$(vm_exec $vm_num "uname -o")
954		if [[ "$vm_os" != "Cygwin" ]]; then
955			vm_exec $vm_num "echo 'DefaultTimeoutStopSec=10' >> /etc/systemd/system.conf; systemctl daemon-reexec"
956		fi
957	done
958
959	notice "all VMs ready"
960	xtrace_restore
961	return 0
962}
963
964function vm_start_fio_server() {
965	local OPTIND optchar
966	local readonly=''
967	local fio_bin=''
968	while getopts ':-:' optchar; do
969		case "$optchar" in
970			-)
971				case "$OPTARG" in
972					fio-bin=*) local fio_bin="${OPTARG#*=}" ;;
973					readonly) local readonly="--readonly" ;;
974					*) error "Invalid argument '$OPTARG'" && return 1 ;;
975				esac
976				;;
977			*) error "Invalid argument '$OPTARG'" && return 1 ;;
978		esac
979	done
980
981	shift $((OPTIND - 1))
982	for vm_num in "$@"; do
983		notice "Starting fio server on VM$vm_num"
984		if [[ $fio_bin != "" ]]; then
985			vm_exec $vm_num 'cat > /root/fio; chmod +x /root/fio' < $fio_bin
986			vm_exec $vm_num /root/fio $readonly --eta=never --server --daemonize=/root/fio.pid
987		else
988			vm_exec $vm_num fio $readonly --eta=never --server --daemonize=/root/fio.pid
989		fi
990	done
991}
992
993function vm_check_scsi_location() {
994	# Script to find wanted disc
995	local script='shopt -s nullglob;
996	for entry in /sys/block/sd*; do
997		disk_type="$(cat $entry/device/vendor)";
998		if [[ $disk_type == INTEL* ]] || [[ $disk_type == RAWSCSI* ]] || [[ $disk_type == LIO-ORG* ]]; then
999			fname=$(basename $entry);
1000			echo -n " $fname";
1001		fi;
1002	done'
1003
1004	SCSI_DISK="$(echo "$script" | vm_exec $1 bash -s)"
1005
1006	if [[ -z "$SCSI_DISK" ]]; then
1007		error "no test disk found!"
1008		return 1
1009	fi
1010}
1011
1012# Script to perform scsi device reset on all disks in VM
1013# param $1 VM num
1014# param $2..$n Disks to perform reset on
1015function vm_reset_scsi_devices() {
1016	for disk in "${@:2}"; do
1017		notice "VM$1 Performing device reset on disk $disk"
1018		vm_exec $1 sg_reset /dev/$disk -vNd
1019	done
1020}
1021
1022function vm_check_blk_location() {
1023	local script='shopt -s nullglob; cd /sys/block; echo vd*'
1024	SCSI_DISK="$(echo "$script" | vm_exec $1 bash -s)"
1025
1026	if [[ -z "$SCSI_DISK" ]]; then
1027		error "no blk test disk found!"
1028		return 1
1029	fi
1030}
1031
1032function vm_check_nvme_location() {
1033	SCSI_DISK="$(vm_exec $1 grep -l SPDK /sys/class/nvme/*/model | awk -F/ '{print $5"n1"}')"
1034	if [[ -z "$SCSI_DISK" ]]; then
1035		error "no vfio-user nvme test disk found!"
1036		return 1
1037	fi
1038}
1039
1040function run_fio() {
1041	local arg
1042	local job_file=""
1043	local fio_bin=""
1044	local vms=()
1045	local out=""
1046	local vm
1047	local run_server_mode=true
1048	local run_plugin_mode=false
1049	local fio_start_cmd
1050	local fio_output_format="normal"
1051	local fio_gtod_reduce=false
1052	local wait_for_fio=true
1053
1054	for arg in "$@"; do
1055		case "$arg" in
1056			--job-file=*) local job_file="${arg#*=}" ;;
1057			--fio-bin=*) local fio_bin="${arg#*=}" ;;
1058			--vm=*) vms+=("${arg#*=}") ;;
1059			--out=*)
1060				local out="${arg#*=}"
1061				mkdir -p $out
1062				;;
1063			--local) run_server_mode=false ;;
1064			--plugin)
1065				notice "Using plugin mode. Disabling server mode."
1066				run_plugin_mode=true
1067				run_server_mode=false
1068				;;
1069			--json) fio_output_format="json" ;;
1070			--hide-results) hide_results=true ;;
1071			--no-wait-for-fio) wait_for_fio=false ;;
1072			--gtod-reduce) fio_gtod_reduce=true ;;
1073			*)
1074				error "Invalid argument '$arg'"
1075				return 1
1076				;;
1077		esac
1078	done
1079
1080	if [[ -n "$fio_bin" && ! -r "$fio_bin" ]]; then
1081		error "FIO binary '$fio_bin' does not exist"
1082		return 1
1083	fi
1084
1085	if [[ -z "$fio_bin" ]]; then
1086		fio_bin="fio"
1087	fi
1088
1089	if [[ ! -r "$job_file" ]]; then
1090		error "Fio job '$job_file' does not exist"
1091		return 1
1092	fi
1093
1094	fio_start_cmd="$fio_bin --eta=never "
1095
1096	local job_fname
1097	job_fname=$(basename "$job_file")
1098	log_fname="${job_fname%%.*}.log"
1099	fio_start_cmd+=" --output=$out/$log_fname --output-format=$fio_output_format "
1100
1101	# prepare job file for each VM
1102	for vm in "${vms[@]}"; do
1103		local vm_num=${vm%%:*}
1104		local vmdisks=${vm#*:}
1105
1106		sed "s@filename=@filename=$vmdisks@" $job_file | vm_exec $vm_num "cat > /root/$job_fname"
1107
1108		if $fio_gtod_reduce; then
1109			vm_exec $vm_num "echo 'gtod_reduce=1' >> /root/$job_fname"
1110		fi
1111
1112		vm_exec $vm_num cat /root/$job_fname
1113
1114		if $run_server_mode; then
1115			fio_start_cmd+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/$job_fname "
1116		fi
1117
1118		if ! $run_server_mode; then
1119			if [[ -n "$fio_bin" ]]; then
1120				if ! $run_plugin_mode; then
1121					vm_exec $vm_num 'cat > /root/fio; chmod +x /root/fio' < $fio_bin
1122					vm_fio_bin="/root/fio"
1123				else
1124					vm_fio_bin="/usr/src/fio/fio"
1125				fi
1126			fi
1127
1128			notice "Running local fio on VM $vm_num"
1129			vm_exec $vm_num "$vm_fio_bin --output=/root/$log_fname --output-format=$fio_output_format /root/$job_fname & echo \$! > /root/fio.pid" &
1130			vm_exec_pids+=("$!")
1131		fi
1132	done
1133
1134	if ! $run_server_mode; then
1135		if ! $wait_for_fio; then
1136			return 0
1137		fi
1138		echo "Waiting for guest fio instances to finish.."
1139		wait "${vm_exec_pids[@]}"
1140
1141		for vm in "${vms[@]}"; do
1142			local vm_num=${vm%%:*}
1143			vm_exec $vm_num cat /root/$log_fname > "$out/vm${vm_num}_${log_fname}"
1144		done
1145		return 0
1146	fi
1147
1148	$fio_start_cmd
1149	sleep 1
1150
1151	if [[ "$fio_output_format" == "json" ]]; then
1152		# Fio in client-server mode produces a lot of "trash" output
1153		# preceding JSON structure, making it not possible to parse.
1154		# Remove these lines from file.
1155		# shellcheck disable=SC2005
1156		echo "$(grep -vP '^[<\w]' "$out/$log_fname")" > "$out/$log_fname"
1157	fi
1158
1159	if [[ ! $hide_results ]]; then
1160		cat $out/$log_fname
1161	fi
1162}
1163
1164# Parsing fio results for json output and client-server mode only!
1165function parse_fio_results() {
1166	local fio_log_dir=$1
1167	local fio_log_filename=$2
1168	local fio_csv_filename
1169
1170	# Variables used in parsing loop
1171	local log_file
1172	local rwmode mixread mixwrite
1173	local lat_key lat_divisor
1174	local client_stats iops bw
1175	local read_avg_lat read_min_lat read_max_lat
1176	local write_avg_lat write_min_lat write_min_lat
1177
1178	declare -A results
1179	results["iops"]=0
1180	results["bw"]=0
1181	results["avg_lat"]=0
1182	results["min_lat"]=0
1183	results["max_lat"]=0
1184
1185	# Loop using the log filename to see if there are any other
1186	# matching files. This is in case we ran fio test multiple times.
1187	log_files=("$fio_log_dir/$fio_log_filename"*)
1188	for log_file in "${log_files[@]}"; do
1189		rwmode=$(jq -r '.["client_stats"][0]["job options"]["rw"]' "$log_file")
1190		mixread=1
1191		mixwrite=1
1192		if [[ $rwmode = *"rw"* ]]; then
1193			mixread=$(jq -r '.["client_stats"][0]["job options"]["rwmixread"]' "$log_file")
1194			mixread=$(bc -l <<< "scale=3; $mixread/100")
1195			mixwrite=$(bc -l <<< "scale=3; 1-$mixread")
1196		fi
1197
1198		client_stats=$(jq -r '.["client_stats"][] | select(.jobname == "All clients")' "$log_file")
1199
1200		# Check latency unit and later normalize to microseconds
1201		lat_key="lat_us"
1202		lat_divisor=1
1203		if jq -er '.read["lat_ns"]' &> /dev/null <<< $client_stats; then
1204			lat_key="lat_ns"
1205			lat_divisor=1000
1206		fi
1207
1208		# Horrific bash float point arithmetic oprations below.
1209		# Viewer discretion is advised.
1210		iops=$(jq -r '[.read["iops"],.write["iops"]] | add' <<< $client_stats)
1211		bw=$(jq -r '[.read["bw"],.write["bw"]] | add' <<< $client_stats)
1212		read_avg_lat=$(jq -r --arg lat_key $lat_key '.read[$lat_key]["mean"]' <<< $client_stats)
1213		read_min_lat=$(jq -r --arg lat_key $lat_key '.read[$lat_key]["min"]' <<< $client_stats)
1214		read_max_lat=$(jq -r --arg lat_key $lat_key '.read[$lat_key]["max"]' <<< $client_stats)
1215		write_avg_lat=$(jq -r --arg lat_key $lat_key '.write[$lat_key]["mean"]' <<< $client_stats)
1216		write_min_lat=$(jq -r --arg lat_key $lat_key '.write[$lat_key]["min"]' <<< $client_stats)
1217		write_max_lat=$(jq -r --arg lat_key $lat_key '.write[$lat_key]["max"]' <<< $client_stats)
1218
1219		results["iops"]=$(bc -l <<< "${results[iops]} + $iops")
1220		results["bw"]=$(bc -l <<< "${results[bw]} + $bw")
1221		results["avg_lat"]=$(bc -l <<< "${results[avg_lat]} + ($mixread*$read_avg_lat + $mixwrite*$write_avg_lat)/$lat_divisor")
1222		results["min_lat"]=$(bc -l <<< "${results[min_lat]} + ($mixread*$read_min_lat + $mixwrite*$write_min_lat)/$lat_divisor")
1223		results["max_lat"]=$(bc -l <<< "${results[max_lat]} + ($mixread*$read_max_lat + $mixwrite*$write_max_lat)/$lat_divisor")
1224	done
1225
1226	results["iops"]=$(bc -l <<< "scale=3; ${results[iops]} / ${#log_files[@]}")
1227	results["bw"]=$(bc -l <<< "scale=3; ${results[bw]} / ${#log_files[@]}")
1228	results["avg_lat"]=$(bc -l <<< "scale=3; ${results[avg_lat]} / ${#log_files[@]}")
1229	results["min_lat"]=$(bc -l <<< "scale=3; ${results[min_lat]} / ${#log_files[@]}")
1230	results["max_lat"]=$(bc -l <<< "scale=3; ${results[max_lat]} / ${#log_files[@]}")
1231
1232	fio_csv_filename="${fio_log_filename%%.*}.csv"
1233	cat <<- EOF > "$fio_log_dir/$fio_csv_filename"
1234		iops,bw,avg_lat,min_lat,max_lat
1235		${results["iops"]},${results["bw"]},${results["avg_lat"]},${results["min_lat"]},${results["max_lat"]}
1236	EOF
1237}
1238
1239# Shutdown or kill any running VM and SPDK APP.
1240#
1241function at_app_exit() {
1242	local vhost_name
1243
1244	notice "APP EXITING"
1245	notice "killing all VMs"
1246	vm_kill_all
1247	# Kill vhost application
1248	notice "killing vhost app"
1249
1250	for vhost_name in "$TARGET_DIR"/*; do
1251		vhost_kill "$(basename "$vhost_name")"
1252	done
1253
1254	notice "EXIT DONE"
1255}
1256
1257function error_exit() {
1258	trap - ERR
1259	print_backtrace
1260	set +e
1261	error "Error on $1 $2"
1262
1263	at_app_exit
1264	exit 1
1265}
1266