xref: /spdk/test/vhost/common.sh (revision 407e88fd2ab020d753e33014cf759353a9901b51)
1: ${SPDK_VHOST_VERBOSE=false}
2: ${VHOST_DIR="$HOME/vhost_test"}
3
4TEST_DIR=$(readlink -f $rootdir/..)
5VM_DIR=$VHOST_DIR/vms
6TARGET_DIR=$VHOST_DIR/vhost
7VM_PASSWORD="root"
8
9#TODO: Move vhost_vm_image.qcow2 into VHOST_DIR on test systems.
10VM_IMAGE=$HOME/vhost_vm_image.qcow2
11
12if ! hash qemu-img qemu-system-x86_64; then
13	error 'QEMU is not installed on this system. Unable to run vhost tests.'
14	exit 1
15fi
16
17mkdir -p $VHOST_DIR
18mkdir -p $VM_DIR
19mkdir -p $TARGET_DIR
20
21#
22# Source config describing QEMU and VHOST cores and NUMA
23#
24source $rootdir/test/vhost/common/autotest.config
25
26function vhosttestinit()
27{
28	if [ "$TEST_MODE" == "iso" ]; then
29		$rootdir/scripts/setup.sh
30
31		# Look for the VM image
32		if [[ ! -f $VM_IMAGE ]]; then
33			echo "VM image not found at $VM_IMAGE"
34			echo "Download to $HOME? [yn]"
35			read download
36			if [ "$download" = "y" ]; then
37				curl https://dqtibwqq6s6ux.cloudfront.net/download/test_resources/vhost_vm_image.tar.gz | tar xz -C $HOME
38			fi
39		fi
40	fi
41
42	# Look for the VM image
43	if [[ ! -f $VM_IMAGE ]]; then
44		error "VM image not found at $VM_IMAGE"
45		exit 1
46	fi
47}
48
49function vhosttestfini()
50{
51	if [ "$TEST_MODE" == "iso" ]; then
52		$rootdir/scripts/setup.sh reset
53	fi
54}
55
56function message()
57{
58	if ! $SPDK_VHOST_VERBOSE; then
59		local verbose_out=""
60	elif [[ ${FUNCNAME[2]} == "source" ]]; then
61		local verbose_out=" (file $(basename ${BASH_SOURCE[1]}):${BASH_LINENO[1]})"
62	else
63		local verbose_out=" (function ${FUNCNAME[2]}:${BASH_LINENO[1]})"
64	fi
65
66	local msg_type="$1"
67	shift
68	echo -e "${msg_type}${verbose_out}: $@"
69}
70
71function fail()
72{
73	echo "===========" >&2
74	message "FAIL" "$@" >&2
75	echo "===========" >&2
76	exit 1
77}
78
79function error()
80{
81	echo "===========" >&2
82	message "ERROR" "$@" >&2
83	echo "===========" >&2
84	# Don't 'return 1' since the stack trace will be incomplete (why?) missing upper command.
85	false
86}
87
88function warning()
89{
90	message "WARN" "$@" >&2
91}
92
93function notice()
94{
95	message "INFO" "$@"
96}
97
98function get_vhost_dir()
99{
100	local vhost_name="$1"
101
102	if [[ -z "$vhost_name" ]]; then
103		error "vhost name must be provided to get_vhost_dir"
104		return 1
105	fi
106
107	echo "$TARGET_DIR/${vhost_name}"
108}
109
110function vhost_run()
111{
112	local vhost_name="$1"
113
114	shift
115
116	if [[ -z "$vhost_name" ]]; then
117		error "vhost name must be provided to vhost_run"
118		return 1
119	fi
120
121	local vhost_dir="$(get_vhost_dir $vhost_name)"
122	local vhost_app="$rootdir/app/vhost/vhost"
123	local vhost_log_file="$vhost_dir/vhost.log"
124	local vhost_pid_file="$vhost_dir/vhost.pid"
125	local vhost_socket="$vhost_dir/usvhost"
126	notice "starting vhost app in background"
127	[[ -r "$vhost_pid_file" ]] && vhost_kill 0 $vhost_name
128	[[ -d $vhost_dir ]] && rm -f $vhost_dir/*
129	mkdir -p $vhost_dir
130
131	if [[ ! -x $vhost_app ]]; then
132		error "application not found: $vhost_app"
133		return 1
134	fi
135
136	local cmd="$vhost_app -r $vhost_dir/rpc.sock $2"
137
138	notice "Loging to:   $vhost_log_file"
139	notice "Socket:      $vhost_socket"
140	notice "Command:     $cmd"
141
142	timing_enter vhost_start
143	cd $vhost_dir; $cmd &
144	vhost_pid=$!
145	echo $vhost_pid > $vhost_pid_file
146
147	notice "waiting for app to run..."
148	waitforlisten "$vhost_pid" "$vhost_dir/rpc.sock"
149	#do not generate nvmes if pci access is disabled
150	if [[ -z "$no_pci" ]]; then
151		$rootdir/scripts/gen_nvme.sh "--json" | $rootdir/scripts/rpc.py\
152		 -s $vhost_dir/rpc.sock load_subsystem_config
153	fi
154
155	notice "vhost started - pid=$vhost_pid"
156	timing_exit vhost_start
157}
158
159function vhost_load_config()
160{
161	local vhost_num="$1"
162	local vhost_json_conf="$2"
163	local vhost_dir="$(get_vhost_dir $vhost_num)"
164
165	$rootdir/scripts/rpc.py -s $vhost_dir/rpc.sock load_config < "$vhost_json_conf"
166}
167
168function vhost_kill()
169{
170	local rc=0
171	local vhost_name="$1"
172
173	if [[ -z "$vhost_name" ]]; then
174		error "Must provide vhost name to vhost_kill"
175		return 0
176	fi
177
178	local vhost_dir="$(get_vhost_dir $vhost_name)"
179	local vhost_pid_file="$vhost_dir/vhost.pid"
180
181	if [[ ! -r $vhost_pid_file ]]; then
182		warning "no vhost pid file found"
183		return 0
184	fi
185
186	timing_enter vhost_kill
187	local vhost_pid="$(cat $vhost_pid_file)"
188	notice "killing vhost (PID $vhost_pid) app"
189
190	if kill -INT $vhost_pid > /dev/null; then
191		notice "sent SIGINT to vhost app - waiting 60 seconds to exit"
192		for ((i=0; i<60; i++)); do
193			if kill -0 $vhost_pid; then
194				echo "."
195				sleep 1
196			else
197				break
198			fi
199		done
200		if kill -0 $vhost_pid; then
201			error "ERROR: vhost was NOT killed - sending SIGABRT"
202			kill -ABRT $vhost_pid
203			rm $vhost_pid_file
204			rc=1
205		else
206			while kill -0 $vhost_pid; do
207				echo "."
208			done
209		fi
210	elif kill -0 $vhost_pid; then
211		error "vhost NOT killed - you need to kill it manually"
212		rc=1
213	else
214		notice "vhost was not running"
215	fi
216
217	timing_exit vhost_kill
218	if [[ $rc == 0 ]]; then
219		rm $vhost_pid_file
220	fi
221
222	rm -rf "$vhost_dir"
223
224	return $rc
225}
226
227function vhost_rpc
228{
229	local vhost_name="$1"
230
231	if [[ -z "$vhost_name" ]]; then
232		error "vhost name must be provided to vhost_rpc"
233		return 1
234	fi
235	shift
236
237	$rootdir/scripts/rpc.py -s $(get_vhost_dir $vhost_name)/rpc.sock $@
238}
239
240###
241# Mgmt functions
242###
243
244function assert_number()
245{
246	[[ "$1" =~ [0-9]+ ]] && return 0
247
248	error "Invalid or missing paramter: need number but got '$1'"
249	return 1;
250}
251
252# Run command on vm with given password
253# First argument - vm number
254# Second argument - ssh password for vm
255#
256function vm_sshpass()
257{
258	vm_num_is_valid $1 || return 1
259
260	local ssh_cmd="sshpass -p $2 ssh \
261		-o UserKnownHostsFile=/dev/null \
262		-o StrictHostKeyChecking=no \
263		-o User=root \
264		-p $(vm_ssh_socket $1) $VM_SSH_OPTIONS 127.0.0.1"
265
266	shift 2
267	$ssh_cmd "$@"
268}
269
270
271# Helper to validate VM number
272# param $1 VM number
273#
274function vm_num_is_valid()
275{
276	[[ "$1" =~ ^[0-9]+$ ]] && return 0
277
278	error "Invalid or missing paramter: vm number '$1'"
279	return 1;
280}
281
282
283# Print network socket for given VM number
284# param $1 virtual machine number
285#
286function vm_ssh_socket()
287{
288	vm_num_is_valid $1 || return 1
289	local vm_dir="$VM_DIR/$1"
290
291	cat $vm_dir/ssh_socket
292}
293
294function vm_fio_socket()
295{
296	vm_num_is_valid $1 || return 1
297	local vm_dir="$VM_DIR/$1"
298
299	cat $vm_dir/fio_socket
300}
301
302# Execute command on given VM
303# param $1 virtual machine number
304#
305function vm_exec()
306{
307	vm_num_is_valid $1 || return 1
308
309	local vm_num="$1"
310	shift
311
312	sshpass -p "$VM_PASSWORD" ssh \
313		-o UserKnownHostsFile=/dev/null \
314		-o StrictHostKeyChecking=no \
315		-o User=root \
316		-p $(vm_ssh_socket $vm_num) $VM_SSH_OPTIONS 127.0.0.1 \
317		"$@"
318}
319
320# Execute scp command on given VM
321# param $1 virtual machine number
322#
323function vm_scp()
324{
325	vm_num_is_valid $1 || return 1
326
327	local vm_num="$1"
328	shift
329
330	sshpass -p "$VM_PASSWORD" scp \
331		-o UserKnownHostsFile=/dev/null \
332		-o StrictHostKeyChecking=no \
333		-o User=root \
334		-P $(vm_ssh_socket $vm_num) $VM_SSH_OPTIONS \
335		"$@"
336}
337
338
339# check if specified VM is running
340# param $1 VM num
341function vm_is_running()
342{
343	vm_num_is_valid $1 || return 1
344	local vm_dir="$VM_DIR/$1"
345
346	if [[ ! -r $vm_dir/qemu.pid ]]; then
347		return 1
348	fi
349
350	local vm_pid="$(cat $vm_dir/qemu.pid)"
351
352	if /bin/kill -0 $vm_pid; then
353		return 0
354	else
355		if [[ $EUID -ne 0 ]]; then
356			warning "not root - assuming VM running since can't be checked"
357			return 0
358		fi
359
360		# not running - remove pid file
361		rm $vm_dir/qemu.pid
362		return 1
363	fi
364}
365
366# check if specified VM is running
367# param $1 VM num
368function vm_os_booted()
369{
370	vm_num_is_valid $1 || return 1
371	local vm_dir="$VM_DIR/$1"
372
373	if [[ ! -r $vm_dir/qemu.pid ]]; then
374		error "VM $1 is not running"
375		return 1
376	fi
377
378	if ! VM_SSH_OPTIONS="-o ControlMaster=no" vm_exec $1 "true" 2>/dev/null; then
379		# Shutdown existing master. Ignore errors as it might not exist.
380		VM_SSH_OPTIONS="-O exit" vm_exec $1 "true" 2>/dev/null
381		return 1
382	fi
383
384	return 0
385}
386
387
388# Shutdown given VM
389# param $1 virtual machine number
390# return non-zero in case of error.
391function vm_shutdown()
392{
393	vm_num_is_valid $1 || return 1
394	local vm_dir="$VM_DIR/$1"
395	if [[ ! -d "$vm_dir" ]]; then
396		error "VM$1 ($vm_dir) not exist - setup it first"
397		return 1
398	fi
399
400	if ! vm_is_running $1; then
401		notice "VM$1 ($vm_dir) is not running"
402		return 0
403	fi
404
405	# Temporarily disabling exit flag for next ssh command, since it will
406	# "fail" due to shutdown
407	notice "Shutting down virtual machine $vm_dir"
408	set +e
409	vm_exec $1 "nohup sh -c 'shutdown -h -P now'" || true
410	notice "VM$1 is shutting down - wait a while to complete"
411	set -e
412}
413
414# Kill given VM
415# param $1 virtual machine number
416#
417function vm_kill()
418{
419	vm_num_is_valid $1 || return 1
420	local vm_dir="$VM_DIR/$1"
421
422	if [[ ! -r $vm_dir/qemu.pid ]]; then
423		return 0
424	fi
425
426	local vm_pid="$(cat $vm_dir/qemu.pid)"
427
428	notice "Killing virtual machine $vm_dir (pid=$vm_pid)"
429	# First kill should fail, second one must fail
430	if /bin/kill $vm_pid; then
431		notice "process $vm_pid killed"
432		rm $vm_dir/qemu.pid
433		rm -rf $vm_dir
434	elif vm_is_running $1; then
435		error "Process $vm_pid NOT killed"
436		return 1
437	fi
438}
439
440# List all VM numbers in VM_DIR
441#
442function vm_list_all()
443{
444	local vms="$(shopt -s nullglob; echo $VM_DIR/[0-9]*)"
445	if [[ -n "$vms" ]]; then
446		basename --multiple $vms
447	fi
448}
449
450# Kills all VM in $VM_DIR
451#
452function vm_kill_all()
453{
454	local vm
455	for vm in $(vm_list_all); do
456		vm_kill $vm
457	done
458
459	rm -rf $VM_DIR
460}
461
462# Shutdown all VM in $VM_DIR
463#
464function vm_shutdown_all()
465{
466	local shell_restore_x="$( [[ "$-" =~ x ]] && echo 'set -x' )"
467	# XXX: temporally disable to debug shutdown issue
468	# set +x
469
470	local vms=$(vm_list_all)
471	local vm
472
473	for vm in $vms; do
474		vm_shutdown $vm
475	done
476
477	notice "Waiting for VMs to shutdown..."
478	local timeo=30
479	while [[ $timeo -gt 0 ]]; do
480		local all_vms_down=1
481		for vm in $vms; do
482			if vm_is_running $vm; then
483				all_vms_down=0
484				break
485			fi
486		done
487
488		if [[ $all_vms_down == 1 ]]; then
489			notice "All VMs successfully shut down"
490			$shell_restore_x
491			return 0
492		fi
493
494		((timeo-=1))
495		sleep 1
496	done
497
498	rm -rf $VM_DIR
499
500	$shell_restore_x
501	error "Timeout waiting for some VMs to shutdown"
502	return 1
503}
504
505function vm_setup()
506{
507	local shell_restore_x="$( [[ "$-" =~ x ]] && echo 'set -x' )"
508	local OPTIND optchar vm_num
509
510	local os=""
511	local os_mode=""
512	local qemu_args=""
513	local disk_type_g=NOT_DEFINED
514	local read_only="false"
515	local disks=""
516	local raw_cache=""
517	local vm_incoming=""
518	local vm_migrate_to=""
519	local force_vm=""
520	local guest_memory=1024
521	local queue_number=""
522	local vhost_dir="$(get_vhost_dir 0)"
523	while getopts ':-:' optchar; do
524		case "$optchar" in
525			-)
526			case "$OPTARG" in
527				os=*) local os="${OPTARG#*=}" ;;
528				os-mode=*) local os_mode="${OPTARG#*=}" ;;
529				qemu-args=*) local qemu_args="${qemu_args} ${OPTARG#*=}" ;;
530				disk-type=*) local disk_type_g="${OPTARG#*=}" ;;
531				read-only=*) local read_only="${OPTARG#*=}" ;;
532				disks=*) local disks="${OPTARG#*=}" ;;
533				raw-cache=*) local raw_cache=",cache${OPTARG#*=}" ;;
534				force=*) local force_vm=${OPTARG#*=} ;;
535				memory=*) local guest_memory=${OPTARG#*=} ;;
536				queue_num=*) local queue_number=${OPTARG#*=} ;;
537				incoming=*) local vm_incoming="${OPTARG#*=}" ;;
538				migrate-to=*) local vm_migrate_to="${OPTARG#*=}" ;;
539				vhost-name=*) local vhost_dir="$(get_vhost_dir ${OPTARG#*=})" ;;
540				spdk-boot=*) local boot_from="${OPTARG#*=}" ;;
541				*)
542					error "unknown argument $OPTARG"
543					return 1
544			esac
545			;;
546			*)
547				error "vm_create Unknown param $OPTARG"
548				return 1
549			;;
550		esac
551	done
552
553	# Find next directory we can use
554	if [[ -n $force_vm ]]; then
555		vm_num=$force_vm
556
557		vm_num_is_valid $vm_num || return 1
558		local vm_dir="$VM_DIR/$vm_num"
559		[[ -d $vm_dir ]] && warning "removing existing VM in '$vm_dir'"
560	else
561		local vm_dir=""
562
563		set +x
564		for (( i=0; i<=256; i++)); do
565			local vm_dir="$VM_DIR/$i"
566			[[ ! -d $vm_dir ]] && break
567		done
568		$shell_restore_x
569
570		vm_num=$i
571	fi
572
573	if [[ $i -eq 256 ]]; then
574		error "no free VM found. do some cleanup (256 VMs created, are you insane?)"
575		return 1
576	fi
577
578	if [[ -n "$vm_migrate_to" && -n "$vm_incoming" ]]; then
579		error "'--incoming' and '--migrate-to' cannot be used together"
580		return 1
581	elif [[ -n "$vm_incoming" ]]; then
582		if [[ -n "$os_mode" || -n "$os_img" ]]; then
583			error "'--incoming' can't be used together with '--os' nor '--os-mode'"
584			return 1
585		fi
586
587		os_mode="original"
588		os="$VM_DIR/$vm_incoming/os.qcow2"
589	elif [[ -n "$vm_migrate_to" ]]; then
590		[[ "$os_mode" != "backing" ]] && warning "Using 'backing' mode for OS since '--migrate-to' is used"
591		os_mode=backing
592	fi
593
594	notice "Creating new VM in $vm_dir"
595	mkdir -p $vm_dir
596
597	if [[ "$os_mode" == "backing" ]]; then
598		notice "Creating backing file for OS image file: $os"
599		if ! qemu-img create -f qcow2 -b $os $vm_dir/os.qcow2; then
600			error "Failed to create OS backing file in '$vm_dir/os.qcow2' using '$os'"
601			return 1
602		fi
603
604		local os=$vm_dir/os.qcow2
605	elif [[ "$os_mode" == "original" ]]; then
606		warning "Using original OS image file: $os"
607	elif [[ "$os_mode" != "snapshot" ]]; then
608		if [[ -z "$os_mode" ]]; then
609			notice "No '--os-mode' parameter provided - using 'snapshot'"
610			os_mode="snapshot"
611		else
612			error "Invalid '--os-mode=$os_mode'"
613			return 1
614		fi
615	fi
616
617	# WARNING:
618	# each cmd+= must contain ' ${eol}' at the end
619	#
620	local eol="\\\\\n  "
621	local qemu_mask_param="VM_${vm_num}_qemu_mask"
622	local qemu_numa_node_param="VM_${vm_num}_qemu_numa_node"
623
624	if [[ -z "${!qemu_mask_param}" ]] || [[ -z "${!qemu_numa_node_param}" ]]; then
625		error "Parameters ${qemu_mask_param} or ${qemu_numa_node_param} not found in autotest.config file"
626		return 1
627	fi
628
629	local task_mask=${!qemu_mask_param}
630
631	notice "TASK MASK: $task_mask"
632	local cmd="taskset -a -c $task_mask qemu-system-x86_64 ${eol}"
633	local vm_socket_offset=$(( 10000 + 100 * vm_num ))
634
635	local ssh_socket=$(( vm_socket_offset + 0 ))
636	local fio_socket=$(( vm_socket_offset + 1 ))
637	local monitor_port=$(( vm_socket_offset + 2 ))
638	local migration_port=$(( vm_socket_offset + 3 ))
639	local gdbserver_socket=$(( vm_socket_offset + 4 ))
640	local vnc_socket=$(( 100 + vm_num ))
641	local qemu_pid_file="$vm_dir/qemu.pid"
642	local cpu_num=0
643
644	set +x
645	# cpu list for taskset can be comma separated or range
646	# or both at the same time, so first split on commas
647	cpu_list=$(echo $task_mask | tr "," "\n")
648	queue_number=0
649	for c in $cpu_list; do
650		# if range is detected - count how many cpus
651		if [[ $c =~ [0-9]+-[0-9]+ ]]; then
652			val=$(($c-1))
653			val=${val#-}
654		else
655			val=1
656		fi
657		cpu_num=$((cpu_num+val))
658		queue_number=$((queue_number+val))
659	done
660
661	if [ -z $queue_number ]; then
662		queue_number=$cpu_num
663	fi
664
665	$shell_restore_x
666
667	local node_num=${!qemu_numa_node_param}
668	local boot_disk_present=false
669	notice "NUMA NODE: $node_num"
670	cmd+="-m $guest_memory --enable-kvm -cpu host -smp $cpu_num -vga std -vnc :$vnc_socket -daemonize ${eol}"
671	cmd+="-object memory-backend-file,id=mem,size=${guest_memory}M,mem-path=/dev/hugepages,share=on,prealloc=yes,host-nodes=$node_num,policy=bind ${eol}"
672	[[ $os_mode == snapshot ]] && cmd+="-snapshot ${eol}"
673	[[ -n "$vm_incoming" ]] && cmd+=" -incoming tcp:0:$migration_port ${eol}"
674	cmd+="-monitor telnet:127.0.0.1:$monitor_port,server,nowait ${eol}"
675	cmd+="-numa node,memdev=mem ${eol}"
676	cmd+="-pidfile $qemu_pid_file ${eol}"
677	cmd+="-serial file:$vm_dir/serial.log ${eol}"
678	cmd+="-D $vm_dir/qemu.log ${eol}"
679	cmd+="-chardev file,path=$vm_dir/seabios.log,id=seabios -device isa-debugcon,iobase=0x402,chardev=seabios ${eol}"
680	cmd+="-net user,hostfwd=tcp::$ssh_socket-:22,hostfwd=tcp::$fio_socket-:8765 ${eol}"
681	cmd+="-net nic ${eol}"
682	if [[ -z "$boot_from" ]]; then
683		cmd+="-drive file=$os,if=none,id=os_disk ${eol}"
684		cmd+="-device ide-hd,drive=os_disk,bootindex=0 ${eol}"
685	fi
686
687	if [[ $disks == '' ]] && [[ $disk_type_g == virtio* ]]; then
688		disks=1
689	fi
690
691	for disk in ${disks//:/ }; do
692		if [[ $disk = *","* ]]; then
693			disk_type=${disk#*,}
694			disk=${disk%,*}
695		else
696			disk_type=$disk_type_g
697		fi
698
699		case $disk_type in
700			virtio)
701				local raw_name="RAWSCSI"
702				local raw_disk=$vm_dir/test.img
703
704				if [[ -n $disk ]]; then
705					[[ ! -b $disk ]] && touch $disk
706					local raw_disk=$(readlink -f $disk)
707				fi
708
709				# Create disk file if it not exist or it is smaller than 1G
710				if ( [[ -f $raw_disk ]] && [[ $(stat --printf="%s" $raw_disk) -lt $((1024 * 1024 * 1024)) ]] ) || \
711					[[ ! -e $raw_disk ]]; then
712					if [[ $raw_disk =~ /dev/.* ]]; then
713						error \
714							"ERROR: Virtio disk point to missing device ($raw_disk) -\n" \
715							"       this is probably not what you want."
716							return 1
717					fi
718
719					notice "Creating Virtio disc $raw_disk"
720					dd if=/dev/zero of=$raw_disk bs=1024k count=1024
721				else
722					notice "Using existing image $raw_disk"
723				fi
724
725				cmd+="-device virtio-scsi-pci,num_queues=$queue_number ${eol}"
726				cmd+="-device scsi-hd,drive=hd$i,vendor=$raw_name ${eol}"
727				cmd+="-drive if=none,id=hd$i,file=$raw_disk,format=raw$raw_cache ${eol}"
728				;;
729			spdk_vhost_scsi)
730				notice "using socket $vhost_dir/naa.$disk.$vm_num"
731				cmd+="-chardev socket,id=char_$disk,path=$vhost_dir/naa.$disk.$vm_num ${eol}"
732				cmd+="-device vhost-user-scsi-pci,id=scsi_$disk,num_queues=$queue_number,chardev=char_$disk"
733				if [[ "$disk" == "$boot_from" ]]; then
734					cmd+=",bootindex=0"
735					boot_disk_present=true
736				fi
737				cmd+=" ${eol}"
738				;;
739			spdk_vhost_blk)
740				notice "using socket $vhost_dir/naa.$disk.$vm_num"
741				cmd+="-chardev socket,id=char_$disk,path=$vhost_dir/naa.$disk.$vm_num ${eol}"
742				cmd+="-device vhost-user-blk-pci,num-queues=$queue_number,chardev=char_$disk"
743				if [[ "$disk" == "$boot_from" ]]; then
744					cmd+=",bootindex=0"
745					boot_disk_present=true
746				fi
747				cmd+=" ${eol}"
748				;;
749			kernel_vhost)
750				if [[ -z $disk ]]; then
751					error "need WWN for $disk_type"
752					return 1
753				elif [[ ! $disk =~ ^[[:alpha:]]{3}[.][[:xdigit:]]+$ ]]; then
754					error "$disk_type - disk(wnn)=$disk does not look like WNN number"
755					return 1
756				fi
757				notice "Using kernel vhost disk wwn=$disk"
758				cmd+=" -device vhost-scsi-pci,wwpn=$disk,num_queues=$queue_number ${eol}"
759				;;
760			*)
761				error "unknown mode '$disk_type', use: virtio, spdk_vhost_scsi, spdk_vhost_blk or kernel_vhost"
762				return 1
763		esac
764	done
765
766	if [[ -n $boot_from ]] && [[ $boot_disk_present == false ]]; then
767		error "Boot from $boot_from is selected but device is not present"
768		return 1
769	fi
770
771	[[ -n $qemu_args ]] && cmd+=" $qemu_args ${eol}"
772	# remove last $eol
773	cmd="${cmd%\\\\\\n  }"
774
775	notice "Saving to $vm_dir/run.sh"
776	(
777	echo '#!/bin/bash'
778	echo 'if [[ $EUID -ne 0 ]]; then '
779	echo '	echo "Go away user come back as root"'
780	echo '	exit 1'
781	echo 'fi';
782	echo
783	echo -e "qemu_cmd=\"$cmd\"";
784	echo
785	echo "echo 'Running VM in $vm_dir'"
786	echo "rm -f $qemu_pid_file"
787	echo '$qemu_cmd'
788	echo "echo 'Waiting for QEMU pid file'"
789	echo "sleep 1"
790	echo "[[ ! -f $qemu_pid_file ]] && sleep 1"
791	echo "[[ ! -f $qemu_pid_file ]] && echo 'ERROR: no qemu pid file found' && exit 1"
792	echo
793	echo "chmod +r $vm_dir/*"
794	echo
795	echo "echo '=== qemu.log ==='"
796	echo "cat $vm_dir/qemu.log"
797	echo "echo '=== qemu.log ==='"
798	echo '# EOF'
799	) > $vm_dir/run.sh
800	chmod +x $vm_dir/run.sh
801
802	# Save generated sockets redirection
803	echo $ssh_socket > $vm_dir/ssh_socket
804	echo $fio_socket > $vm_dir/fio_socket
805	echo $monitor_port > $vm_dir/monitor_port
806
807	rm -f $vm_dir/migration_port
808	[[ -z $vm_incoming ]] || echo $migration_port > $vm_dir/migration_port
809
810	echo $gdbserver_socket > $vm_dir/gdbserver_socket
811	echo $vnc_socket >> $vm_dir/vnc_socket
812
813	[[ -z $vm_incoming ]] || ln -fs $VM_DIR/$vm_incoming $vm_dir/vm_incoming
814	[[ -z $vm_migrate_to ]] || ln -fs $VM_DIR/$vm_migrate_to $vm_dir/vm_migrate_to
815}
816
817function vm_run()
818{
819	local OPTIND optchar vm
820	local run_all=false
821	local vms_to_run=""
822
823	while getopts 'a-:' optchar; do
824		case "$optchar" in
825			a) run_all=true ;;
826			*)
827				error "Unknown param $OPTARG"
828				return 1
829			;;
830		esac
831	done
832
833	if $run_all; then
834		vms_to_run="$(vm_list_all)"
835	else
836		shift $((OPTIND-1))
837		for vm in $@; do
838			vm_num_is_valid $1 || return 1
839			if [[ ! -x $VM_DIR/$vm/run.sh ]]; then
840				error "VM$vm not defined - setup it first"
841				return 1
842			fi
843			vms_to_run+=" $vm"
844		done
845	fi
846
847	for vm in $vms_to_run; do
848		if vm_is_running $vm; then
849			warning "VM$vm ($VM_DIR/$vm) already running"
850			continue
851		fi
852
853		notice "running $VM_DIR/$vm/run.sh"
854		if ! $VM_DIR/$vm/run.sh; then
855			error "FAILED to run vm $vm"
856			return 1
857		fi
858	done
859}
860
861function vm_print_logs()
862{
863	vm_num=$1
864	warning "================"
865	warning "QEMU LOG:"
866	if [[ -r $VM_DIR/$vm_num/qemu.log ]]; then
867		cat $VM_DIR/$vm_num/qemu.log
868	else
869		warning "LOG qemu.log not found"
870	fi
871
872	warning "VM LOG:"
873	if [[ -r $VM_DIR/$vm_num/serial.log ]]; then
874		cat $VM_DIR/$vm_num/serial.log
875	else
876		warning "LOG serial.log not found"
877	fi
878
879	warning "SEABIOS LOG:"
880	if [[ -r $VM_DIR/$vm_num/seabios.log ]]; then
881		cat $VM_DIR/$vm_num/seabios.log
882	else
883		warning "LOG seabios.log not found"
884	fi
885	warning "================"
886}
887
888# Wait for all created VMs to boot.
889# param $1 max wait time
890function vm_wait_for_boot()
891{
892	assert_number $1
893
894	local shell_restore_x="$( [[ "$-" =~ x ]] && echo 'set -x' )"
895	set +x
896
897	local all_booted=false
898	local timeout_time=$1
899	[[ $timeout_time -lt 10 ]] && timeout_time=10
900	local timeout_time=$(date -d "+$timeout_time seconds" +%s)
901
902	notice "Waiting for VMs to boot"
903	shift
904	if [[ "$@" == "" ]]; then
905		local vms_to_check="$VM_DIR/[0-9]*"
906	else
907		local vms_to_check=""
908		for vm in $@; do
909			vms_to_check+=" $VM_DIR/$vm"
910		done
911	fi
912
913	for vm in $vms_to_check; do
914		local vm_num=$(basename $vm)
915		local i=0
916		notice "waiting for VM$vm_num ($vm)"
917		while ! vm_os_booted $vm_num; do
918			if ! vm_is_running $vm_num; then
919				warning "VM $vm_num is not running"
920				vm_print_logs $vm_num
921				$shell_restore_x
922				return 1
923			fi
924
925			if [[ $(date +%s) -gt $timeout_time ]]; then
926				warning "timeout waiting for machines to boot"
927				vm_print_logs $vm_num
928				$shell_restore_x
929				return 1
930			fi
931			if (( i > 30 )); then
932				local i=0
933				echo
934			fi
935			echo -n "."
936			sleep 1
937		done
938		echo ""
939		notice "VM$vm_num ready"
940		#Change Timeout for stopping services to prevent lengthy powerdowns
941		#Check that remote system is not Cygwin in case of Windows VMs
942		local vm_os=$(vm_exec $vm_num "uname -o")
943		if [[ "$vm_os" != "Cygwin" ]]; then
944			vm_exec $vm_num "echo 'DefaultTimeoutStopSec=10' >> /etc/systemd/system.conf; systemctl daemon-reexec"
945		fi
946	done
947
948	notice "all VMs ready"
949	$shell_restore_x
950	return 0
951}
952
953function vm_start_fio_server()
954{
955	local OPTIND optchar
956	local readonly=''
957	while getopts ':-:' optchar; do
958		case "$optchar" in
959			-)
960			case "$OPTARG" in
961				fio-bin=*) local fio_bin="${OPTARG#*=}" ;;
962				readonly) local readonly="--readonly" ;;
963				*) error "Invalid argument '$OPTARG'" && return 1;;
964			esac
965			;;
966			*) error "Invalid argument '$OPTARG'" && return 1;;
967		esac
968	done
969
970	shift $(( OPTIND - 1 ))
971	for vm_num in $@; do
972		notice "Starting fio server on VM$vm_num"
973		if [[ $fio_bin != "" ]]; then
974			cat $fio_bin | vm_exec $vm_num 'cat > /root/fio; chmod +x /root/fio'
975			vm_exec $vm_num /root/fio $readonly --eta=never --server --daemonize=/root/fio.pid
976		else
977			vm_exec $vm_num fio $readonly --eta=never --server --daemonize=/root/fio.pid
978		fi
979	done
980}
981
982function vm_check_scsi_location()
983{
984	# Script to find wanted disc
985	local script='shopt -s nullglob; \
986	for entry in /sys/block/sd*; do \
987		disk_type="$(cat $entry/device/vendor)"; \
988		if [[ $disk_type == INTEL* ]] || [[ $disk_type == RAWSCSI* ]] || [[ $disk_type == LIO-ORG* ]]; then \
989			fname=$(basename $entry); \
990			echo -n " $fname"; \
991		fi; \
992	done'
993
994	SCSI_DISK="$(echo "$script" | vm_exec $1 bash -s)"
995
996	if [[ -z "$SCSI_DISK" ]]; then
997		error "no test disk found!"
998		return 1
999	fi
1000}
1001
1002# Script to perform scsi device reset on all disks in VM
1003# param $1 VM num
1004# param $2..$n Disks to perform reset on
1005function vm_reset_scsi_devices()
1006{
1007	for disk in "${@:2}"; do
1008		notice "VM$1 Performing device reset on disk $disk"
1009		vm_exec $1 sg_reset /dev/$disk -vNd
1010	done
1011}
1012
1013function vm_check_blk_location()
1014{
1015	local script='shopt -s nullglob; cd /sys/block; echo vd*'
1016	SCSI_DISK="$(echo "$script" | vm_exec $1 bash -s)"
1017
1018	if [[ -z "$SCSI_DISK" ]]; then
1019		error "no blk test disk found!"
1020		return 1
1021	fi
1022}
1023
1024function run_fio()
1025{
1026	local arg
1027	local job_file=""
1028	local fio_bin=""
1029	local vms=()
1030	local out=""
1031	local fio_disks=""
1032	local vm
1033	local run_server_mode=true
1034
1035	for arg in $@; do
1036		case "$arg" in
1037			--job-file=*) local job_file="${arg#*=}" ;;
1038			--fio-bin=*) local fio_bin="${arg#*=}" ;;
1039			--vm=*) vms+=( "${arg#*=}" ) ;;
1040			--out=*)
1041				local out="${arg#*=}"
1042				mkdir -p $out
1043				;;
1044			--local) run_server_mode=false ;;
1045			--json) json="--json" ;;
1046		*)
1047			error "Invalid argument '$arg'"
1048			return 1
1049			;;
1050		esac
1051	done
1052
1053	if [[ -n "$fio_bin" && ! -r "$fio_bin" ]]; then
1054		error "FIO binary '$fio_bin' does not exist"
1055		return 1
1056	fi
1057
1058	if [[ ! -r "$job_file" ]]; then
1059		error "Fio job '$job_file' does not exist"
1060		return 1
1061	fi
1062
1063	local job_fname=$(basename "$job_file")
1064	# prepare job file for each VM
1065	for vm in ${vms[@]}; do
1066		local vm_num=${vm%%:*}
1067		local vmdisks=${vm#*:}
1068
1069		sed "s@filename=@filename=$vmdisks@" $job_file | vm_exec $vm_num "cat > /root/$job_fname"
1070		fio_disks+="127.0.0.1:$(vm_fio_socket $vm_num):$vmdisks,"
1071
1072		vm_exec $vm_num cat /root/$job_fname
1073		if ! $run_server_mode; then
1074			if [[ -n "$fio_bin" ]]; then
1075				cat $fio_bin | vm_exec $vm_num 'cat > /root/fio; chmod +x /root/fio'
1076			fi
1077
1078			notice "Running local fio on VM $vm_num"
1079			vm_exec $vm_num "nohup /root/fio /root/$job_fname 1>/root/$job_fname.out 2>/root/$job_fname.out </dev/null & echo \$! > /root/fio.pid"
1080		fi
1081	done
1082
1083	if ! $run_server_mode; then
1084		# Give FIO time to run
1085		sleep 0.5
1086		return 0
1087	fi
1088
1089	$rootdir/test/vhost/common/run_fio.py --job-file=/root/$job_fname \
1090		$([[ -n "$fio_bin" ]] && echo "--fio-bin=$fio_bin") \
1091		--out=$out $json ${fio_disks%,}
1092}
1093
1094# Shutdown or kill any running VM and SPDK APP.
1095#
1096function at_app_exit()
1097{
1098	local vhost_name
1099
1100	notice "APP EXITING"
1101	notice "killing all VMs"
1102	vm_kill_all
1103	# Kill vhost application
1104	notice "killing vhost app"
1105
1106	for vhost_name in $(ls $TARGET_DIR); do
1107		vhost_kill $vhost_name
1108	done
1109
1110	notice "EXIT DONE"
1111}
1112
1113function error_exit()
1114{
1115	trap - ERR
1116	print_backtrace
1117	set +e
1118	error "Error on $1 $2"
1119
1120	at_app_exit
1121	exit 1
1122}
1123