xref: /spdk/test/vhost/fiotest/fio.sh (revision c6c1234de9e0015e670dd0b51bf6ce39ee0e07bd)
1#!/usr/bin/env bash
2#  SPDX-License-Identifier: BSD-3-Clause
3#  Copyright (C) 2017 Intel Corporation
4#  All rights reserved.
5#
6testdir=$(readlink -f $(dirname $0))
7rootdir=$(readlink -f $testdir/../../..)
8source $rootdir/test/common/autotest_common.sh
9source $rootdir/test/vhost/common.sh
10
11dry_run=false
12no_shutdown=false
13fio_bin=""
14remote_fio_bin=""
15fio_jobs=""
16test_type=spdk_vhost_scsi
17reuse_vms=false
18vms=()
19used_vms=""
20x=""
21readonly=""
22packed=false
23
24function usage() {
25	[[ -n $2 ]] && (
26		echo "$2"
27		echo ""
28	)
29	echo "Shortcut script for doing automated test"
30	echo "Usage: $(basename $1) [OPTIONS]"
31	echo
32	echo "-h, --help                print help and exit"
33	echo "    --test-type=TYPE      Perform specified test:"
34	echo "                          virtio - test host virtio-scsi-pci using file as disk image"
35	echo "                          kernel_vhost - use kernel driver vhost-scsi"
36	echo "                          spdk_vhost_scsi - use spdk vhost scsi"
37	echo "                          spdk_vhost_blk - use spdk vhost block"
38	echo "-x                        set -x for script debug"
39	echo "    --fio-bin=FIO         Use specific fio binary (will be uploaded to VM)"
40	echo "    --fio-job=            Fio config to use for test."
41	echo "                          All VMs will run the same fio job when FIO executes."
42	echo "                          (no unique jobs for specific VMs)"
43	echo "    --dry-run             Don't perform any tests, run only and wait for enter to terminate"
44	echo "    --no-shutdown         Don't shutdown at the end but leave environment working"
45	echo "    --vm=NUM[,OS][,DISKS] VM configuration. This parameter might be used more than once:"
46	echo "                          NUM - VM number (mandatory)"
47	echo "                          OS - VM os disk path (optional)"
48	echo "                          DISKS - VM os test disks/devices path (virtio - optional, kernel_vhost - mandatory)"
49	echo "    --readonly            Use readonly for fio"
50	echo "    --packed              Virtqueue format is packed"
51	exit 0
52}
53
54#default raw file is NVMe drive
55
56while getopts 'xh-:' optchar; do
57	case "$optchar" in
58		-)
59			case "$OPTARG" in
60				help) usage $0 ;;
61				fio-bin=*) fio_bin="--fio-bin=${OPTARG#*=}" ;;
62				fio-job=*) fio_job="${OPTARG#*=}" ;;
63				dry-run) dry_run=true ;;
64				no-shutdown) no_shutdown=true ;;
65				test-type=*) test_type="${OPTARG#*=}" ;;
66				vm=*) vms+=("${OPTARG#*=}") ;;
67				readonly) readonly="--readonly" ;;
68				packed) packed=true ;;
69				*) usage $0 "Invalid argument '$OPTARG'" ;;
70			esac
71			;;
72		h) usage $0 ;;
73		x)
74			set -x
75			x="-x"
76			;;
77		*) usage $0 "Invalid argument '$OPTARG'" ;;
78	esac
79done
80shift $((OPTIND - 1))
81
82if [[ ! -r "$fio_job" ]]; then
83	fail "no fio job file specified"
84fi
85
86vhosttestinit
87
88trap 'error_exit "${FUNCNAME}" "${LINENO}"' ERR
89
90vm_kill_all
91
92if [[ $test_type =~ "spdk_vhost" ]]; then
93	notice "==============="
94	notice ""
95	notice "running SPDK"
96	notice ""
97	vhost_run -n 0
98	rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
99	$rpc_py bdev_split_create Nvme0n1 4
100	$rpc_py bdev_malloc_create -b Malloc0 128 4096
101	$rpc_py bdev_malloc_create -b Malloc1 128 4096
102	$rpc_py bdev_malloc_create -b Malloc2 64 512
103	$rpc_py bdev_malloc_create -b Malloc3 64 512
104	$rpc_py bdev_malloc_create -b Malloc4 64 512
105	$rpc_py bdev_malloc_create -b Malloc5 64 512
106	$rpc_py bdev_malloc_create -b Malloc6 64 512
107	$rpc_py bdev_raid_create -n RaidBdev0 -z 128 -r 0 -b "Malloc2 Malloc3"
108	$rpc_py bdev_raid_create -n RaidBdev1 -z 128 -r 0 -b "Nvme0n1p2 Malloc4"
109	$rpc_py bdev_raid_create -n RaidBdev2 -z 128 -r 0 -b "Malloc5 Malloc6"
110	$rpc_py vhost_create_scsi_controller --cpumask 0x1 vhost.0
111	$rpc_py vhost_scsi_controller_add_target vhost.0 0 Malloc0
112	$rpc_py vhost_create_blk_controller --cpumask 0x1 -r vhost.1 Malloc1
113	notice ""
114fi
115
116notice "==============="
117notice ""
118notice "Setting up VM"
119notice ""
120
121rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
122
123for vm_conf in "${vms[@]}"; do
124	IFS=',' read -ra conf <<< "$vm_conf"
125	if [[ -z ${conf[0]} ]] || ! assert_number ${conf[0]}; then
126		fail "invalid VM configuration syntax $vm_conf"
127	fi
128
129	# Sanity check if VM is not defined twice
130	for vm_num in $used_vms; do
131		if [[ $vm_num -eq ${conf[0]} ]]; then
132			fail "VM$vm_num defined more than twice ( $(printf "'%s' " "${vms[@]}"))!"
133		fi
134	done
135
136	used_vms+=" ${conf[0]}"
137
138	if [[ $test_type =~ "spdk_vhost" ]]; then
139
140		notice "Adding device via RPC ..."
141
142		while IFS=':' read -ra disks; do
143			for disk in "${disks[@]}"; do
144				notice "Create a lvol store on RaidBdev2 and then a lvol bdev on the lvol store"
145				if [[ $disk == "RaidBdev2" ]]; then
146					ls_guid=$($rpc_py bdev_lvol_create_lvstore RaidBdev2 lvs_0 -c 4194304)
147					free_mb=$(get_lvs_free_mb "$ls_guid")
148					based_disk=$($rpc_py bdev_lvol_create -u $ls_guid lbd_0 $free_mb)
149				else
150					based_disk="$disk"
151				fi
152
153				if [[ "$test_type" == "spdk_vhost_blk" ]]; then
154					disk=${disk%%_*}
155					notice "Creating vhost block controller naa.$disk.${conf[0]} with device $disk"
156					$rpc_py vhost_create_blk_controller naa.$disk.${conf[0]} $based_disk
157				else
158					notice "Creating controller naa.$disk.${conf[0]}"
159					$rpc_py vhost_create_scsi_controller naa.$disk.${conf[0]}
160
161					notice "Adding device (0) to naa.$disk.${conf[0]}"
162					$rpc_py vhost_scsi_controller_add_target naa.$disk.${conf[0]} 0 $based_disk
163				fi
164			done
165		done <<< "${conf[2]}"
166		unset IFS
167		$rpc_py vhost_get_controllers
168	fi
169
170	setup_cmd="vm_setup --force=${conf[0]} --disk-type=$test_type"
171	[[ x"${conf[1]}" != x"" ]] && setup_cmd+=" --os=${conf[1]}"
172	[[ x"${conf[2]}" != x"" ]] && setup_cmd+=" --disks=${conf[2]}"
173
174	if [[ "$test_type" == "spdk_vhost_blk" ]] && $packed; then
175		setup_cmd+=" --packed"
176	fi
177
178	$setup_cmd
179done
180
181# Run everything
182vm_run $used_vms
183vm_wait_for_boot 300 $used_vms
184
185if [[ $test_type == "spdk_vhost_scsi" ]]; then
186	for vm_conf in "${vms[@]}"; do
187		IFS=',' read -ra conf <<< "$vm_conf"
188		while IFS=':' read -ra disks; do
189			for disk in "${disks[@]}"; do
190				# For RaidBdev2, the lvol bdev on RaidBdev2 is being used.
191				if [[ $disk == "RaidBdev2" ]]; then
192					based_disk="lvs_0/lbd_0"
193				else
194					based_disk="$disk"
195				fi
196				notice "Hotdetach test. Trying to remove existing device from a controller naa.$disk.${conf[0]}"
197				$rpc_py vhost_scsi_controller_remove_target naa.$disk.${conf[0]} 0
198
199				sleep 0.1
200
201				notice "Hotattach test. Re-adding device 0 to naa.$disk.${conf[0]}"
202				$rpc_py vhost_scsi_controller_add_target naa.$disk.${conf[0]} 0 $based_disk
203			done
204		done <<< "${conf[2]}"
205		unset IFS
206	done
207fi
208
209sleep 0.1
210
211notice "==============="
212notice ""
213notice "Testing..."
214
215notice "Running fio jobs ..."
216
217# Check if all VM have disk in the same location
218DISK=""
219
220fio_disks=""
221for vm_num in $used_vms; do
222	qemu_mask_param="VM_${vm_num}_qemu_mask"
223
224	host_name="VM-$vm_num"
225	notice "Setting up hostname: $host_name"
226	vm_exec $vm_num "hostname $host_name"
227	vm_start_fio_server $fio_bin $readonly $vm_num
228
229	if [[ "$test_type" == "spdk_vhost_scsi" ]]; then
230		vm_check_scsi_location $vm_num
231		#vm_reset_scsi_devices $vm_num $SCSI_DISK
232	elif [[ "$test_type" == "spdk_vhost_blk" ]]; then
233		vm_check_blk_location $vm_num
234	fi
235
236	fio_disks+=" --vm=${vm_num}$(printf ':/dev/%s' $SCSI_DISK)"
237done
238
239if $dry_run; then
240	read -r -p "Enter to kill everything" xx
241	sleep 3
242	at_app_exit
243	exit 0
244fi
245
246run_fio $fio_bin --job-file="$fio_job" --out="$VHOST_DIR/fio_results" $fio_disks
247
248if [[ "$test_type" == "spdk_vhost_scsi" ]]; then
249	for vm_num in $used_vms; do
250		vm_reset_scsi_devices $vm_num $SCSI_DISK
251	done
252fi
253
254if ! $no_shutdown; then
255	notice "==============="
256	notice "APP EXITING"
257	notice "killing all VMs"
258	vm_shutdown_all
259	notice "waiting 2 seconds to let all VMs die"
260	sleep 2
261	if [[ $test_type =~ "spdk_vhost" ]]; then
262		notice "Removing vhost devices & controllers via RPC ..."
263		for vm_conf in "${vms[@]}"; do
264			IFS=',' read -ra conf <<< "$vm_conf"
265
266			while IFS=':' read -ra disks; do
267				for disk in "${disks[@]}"; do
268					disk=${disk%%_*}
269					notice "Removing all vhost devices from controller naa.$disk.${conf[0]}"
270					if [[ "$test_type" == "spdk_vhost_scsi" ]]; then
271						$rpc_py vhost_scsi_controller_remove_target naa.$disk.${conf[0]} 0
272					fi
273
274					$rpc_py vhost_delete_controller naa.$disk.${conf[0]}
275					if [[ $disk == "RaidBdev2" ]]; then
276						notice "Removing lvol bdev and lvol store"
277						$rpc_py bdev_lvol_delete lvs_0/lbd_0
278						$rpc_py bdev_lvol_delete_lvstore -l lvs_0
279					fi
280				done
281			done <<< "${conf[2]}"
282		done
283	fi
284	notice "Testing done -> shutting down"
285	notice "killing vhost app"
286	vhost_kill 0
287
288	notice "EXIT DONE"
289	notice "==============="
290else
291	notice "==============="
292	notice ""
293	notice "Leaving environment working!"
294	notice ""
295	notice "==============="
296fi
297
298vhosttestfini
299