xref: /spdk/test/vhost/fiotest/fio.sh (revision cc6920a4763d4b9a43aa40583c8397d8f14fa100)
1#!/usr/bin/env bash
2
3testdir=$(readlink -f $(dirname $0))
4rootdir=$(readlink -f $testdir/../../..)
5source $rootdir/test/common/autotest_common.sh
6source $rootdir/test/vhost/common.sh
7
8dry_run=false
9no_shutdown=false
10fio_bin=""
11remote_fio_bin=""
12fio_jobs=""
13test_type=spdk_vhost_scsi
14reuse_vms=false
15vms=()
16used_vms=""
17x=""
18readonly=""
19packed=false
20
21function usage() {
22	[[ -n $2 ]] && (
23		echo "$2"
24		echo ""
25	)
26	echo "Shortcut script for doing automated test"
27	echo "Usage: $(basename $1) [OPTIONS]"
28	echo
29	echo "-h, --help                print help and exit"
30	echo "    --test-type=TYPE      Perform specified test:"
31	echo "                          virtio - test host virtio-scsi-pci using file as disk image"
32	echo "                          kernel_vhost - use kernel driver vhost-scsi"
33	echo "                          spdk_vhost_scsi - use spdk vhost scsi"
34	echo "                          spdk_vhost_blk - use spdk vhost block"
35	echo "-x                        set -x for script debug"
36	echo "    --fio-bin=FIO         Use specific fio binary (will be uploaded to VM)"
37	echo "    --fio-job=            Fio config to use for test."
38	echo "                          All VMs will run the same fio job when FIO executes."
39	echo "                          (no unique jobs for specific VMs)"
40	echo "    --dry-run             Don't perform any tests, run only and wait for enter to terminate"
41	echo "    --no-shutdown         Don't shutdown at the end but leave envirionment working"
42	echo "    --vm=NUM[,OS][,DISKS] VM configuration. This parameter might be used more than once:"
43	echo "                          NUM - VM number (mandatory)"
44	echo "                          OS - VM os disk path (optional)"
45	echo "                          DISKS - VM os test disks/devices path (virtio - optional, kernel_vhost - mandatory)"
46	echo "    --readonly            Use readonly for fio"
47	echo "    --packed              Virtqueue format is packed"
48	exit 0
49}
50
51#default raw file is NVMe drive
52
53while getopts 'xh-:' optchar; do
54	case "$optchar" in
55		-)
56			case "$OPTARG" in
57				help) usage $0 ;;
58				fio-bin=*) fio_bin="--fio-bin=${OPTARG#*=}" ;;
59				fio-job=*) fio_job="${OPTARG#*=}" ;;
60				dry-run) dry_run=true ;;
61				no-shutdown) no_shutdown=true ;;
62				test-type=*) test_type="${OPTARG#*=}" ;;
63				vm=*) vms+=("${OPTARG#*=}") ;;
64				readonly) readonly="--readonly" ;;
65				packed) packed=true ;;
66				*) usage $0 "Invalid argument '$OPTARG'" ;;
67			esac
68			;;
69		h) usage $0 ;;
70		x)
71			set -x
72			x="-x"
73			;;
74		*) usage $0 "Invalid argument '$OPTARG'" ;;
75	esac
76done
77shift $((OPTIND - 1))
78
79if [[ ! -r "$fio_job" ]]; then
80	fail "no fio job file specified"
81fi
82
83vhosttestinit
84
85trap 'error_exit "${FUNCNAME}" "${LINENO}"' ERR
86
87vm_kill_all
88
89if [[ $test_type =~ "spdk_vhost" ]]; then
90	notice "==============="
91	notice ""
92	notice "running SPDK"
93	notice ""
94	vhost_run -n 0
95	rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
96	$rpc_py bdev_split_create Nvme0n1 4
97	$rpc_py bdev_malloc_create -b Malloc0 128 4096
98	$rpc_py bdev_malloc_create -b Malloc1 128 4096
99	$rpc_py bdev_malloc_create -b Malloc2 64 512
100	$rpc_py bdev_malloc_create -b Malloc3 64 512
101	$rpc_py bdev_malloc_create -b Malloc4 64 512
102	$rpc_py bdev_malloc_create -b Malloc5 64 512
103	$rpc_py bdev_malloc_create -b Malloc6 64 512
104	$rpc_py bdev_raid_create -n RaidBdev0 -z 128 -r 0 -b "Malloc2 Malloc3"
105	$rpc_py bdev_raid_create -n RaidBdev1 -z 128 -r 0 -b "Nvme0n1p2 Malloc4"
106	$rpc_py bdev_raid_create -n RaidBdev2 -z 128 -r 0 -b "Malloc5 Malloc6"
107	$rpc_py vhost_create_scsi_controller --cpumask 0x1 vhost.0
108	$rpc_py vhost_scsi_controller_add_target vhost.0 0 Malloc0
109	$rpc_py vhost_create_blk_controller --cpumask 0x1 -r vhost.1 Malloc1
110	notice ""
111fi
112
113notice "==============="
114notice ""
115notice "Setting up VM"
116notice ""
117
118rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
119
120for vm_conf in "${vms[@]}"; do
121	IFS=',' read -ra conf <<< "$vm_conf"
122	if [[ -z ${conf[0]} ]] || ! assert_number ${conf[0]}; then
123		fail "invalid VM configuration syntax $vm_conf"
124	fi
125
126	# Sanity check if VM is not defined twice
127	for vm_num in $used_vms; do
128		if [[ $vm_num -eq ${conf[0]} ]]; then
129			fail "VM$vm_num defined more than twice ( $(printf "'%s' " "${vms[@]}"))!"
130		fi
131	done
132
133	used_vms+=" ${conf[0]}"
134
135	if [[ $test_type =~ "spdk_vhost" ]]; then
136
137		notice "Adding device via RPC ..."
138
139		while IFS=':' read -ra disks; do
140			for disk in "${disks[@]}"; do
141				notice "Create a lvol store on RaidBdev2 and then a lvol bdev on the lvol store"
142				if [[ $disk == "RaidBdev2" ]]; then
143					ls_guid=$($rpc_py bdev_lvol_create_lvstore RaidBdev2 lvs_0 -c 4194304)
144					free_mb=$(get_lvs_free_mb "$ls_guid")
145					based_disk=$($rpc_py bdev_lvol_create -u $ls_guid lbd_0 $free_mb)
146				else
147					based_disk="$disk"
148				fi
149
150				if [[ "$test_type" == "spdk_vhost_blk" ]]; then
151					disk=${disk%%_*}
152					notice "Creating vhost block controller naa.$disk.${conf[0]} with device $disk"
153					$rpc_py vhost_create_blk_controller naa.$disk.${conf[0]} $based_disk
154				else
155					notice "Creating controller naa.$disk.${conf[0]}"
156					$rpc_py vhost_create_scsi_controller naa.$disk.${conf[0]}
157
158					notice "Adding device (0) to naa.$disk.${conf[0]}"
159					$rpc_py vhost_scsi_controller_add_target naa.$disk.${conf[0]} 0 $based_disk
160				fi
161			done
162		done <<< "${conf[2]}"
163		unset IFS
164		$rpc_py vhost_get_controllers
165	fi
166
167	setup_cmd="vm_setup --force=${conf[0]} --disk-type=$test_type"
168	[[ x"${conf[1]}" != x"" ]] && setup_cmd+=" --os=${conf[1]}"
169	[[ x"${conf[2]}" != x"" ]] && setup_cmd+=" --disks=${conf[2]}"
170
171	if [[ "$test_type" == "spdk_vhost_blk" ]] && $packed; then
172		setup_cmd+=" --packed"
173	fi
174
175	$setup_cmd
176done
177
178# Run everything
179vm_run $used_vms
180vm_wait_for_boot 300 $used_vms
181
182if [[ $test_type == "spdk_vhost_scsi" ]]; then
183	for vm_conf in "${vms[@]}"; do
184		IFS=',' read -ra conf <<< "$vm_conf"
185		while IFS=':' read -ra disks; do
186			for disk in "${disks[@]}"; do
187				# For RaidBdev2, the lvol bdev on RaidBdev2 is being used.
188				if [[ $disk == "RaidBdev2" ]]; then
189					based_disk="lvs_0/lbd_0"
190				else
191					based_disk="$disk"
192				fi
193				notice "Hotdetach test. Trying to remove existing device from a controller naa.$disk.${conf[0]}"
194				$rpc_py vhost_scsi_controller_remove_target naa.$disk.${conf[0]} 0
195
196				sleep 0.1
197
198				notice "Hotattach test. Re-adding device 0 to naa.$disk.${conf[0]}"
199				$rpc_py vhost_scsi_controller_add_target naa.$disk.${conf[0]} 0 $based_disk
200			done
201		done <<< "${conf[2]}"
202		unset IFS
203	done
204fi
205
206sleep 0.1
207
208notice "==============="
209notice ""
210notice "Testing..."
211
212notice "Running fio jobs ..."
213
214# Check if all VM have disk in tha same location
215DISK=""
216
217fio_disks=""
218for vm_num in $used_vms; do
219	qemu_mask_param="VM_${vm_num}_qemu_mask"
220
221	host_name="VM-$vm_num"
222	notice "Setting up hostname: $host_name"
223	vm_exec $vm_num "hostname $host_name"
224	vm_start_fio_server $fio_bin $readonly $vm_num
225
226	if [[ "$test_type" == "spdk_vhost_scsi" ]]; then
227		vm_check_scsi_location $vm_num
228		#vm_reset_scsi_devices $vm_num $SCSI_DISK
229	elif [[ "$test_type" == "spdk_vhost_blk" ]]; then
230		vm_check_blk_location $vm_num
231	fi
232
233	fio_disks+=" --vm=${vm_num}$(printf ':/dev/%s' $SCSI_DISK)"
234done
235
236if $dry_run; then
237	read -r -p "Enter to kill evething" xx
238	sleep 3
239	at_app_exit
240	exit 0
241fi
242
243run_fio $fio_bin --job-file="$fio_job" --out="$VHOST_DIR/fio_results" $fio_disks
244
245if [[ "$test_type" == "spdk_vhost_scsi" ]]; then
246	for vm_num in $used_vms; do
247		vm_reset_scsi_devices $vm_num $SCSI_DISK
248	done
249fi
250
251if ! $no_shutdown; then
252	notice "==============="
253	notice "APP EXITING"
254	notice "killing all VMs"
255	vm_shutdown_all
256	notice "waiting 2 seconds to let all VMs die"
257	sleep 2
258	if [[ $test_type =~ "spdk_vhost" ]]; then
259		notice "Removing vhost devices & controllers via RPC ..."
260		for vm_conf in "${vms[@]}"; do
261			IFS=',' read -ra conf <<< "$vm_conf"
262
263			while IFS=':' read -ra disks; do
264				for disk in "${disks[@]}"; do
265					disk=${disk%%_*}
266					notice "Removing all vhost devices from controller naa.$disk.${conf[0]}"
267					if [[ "$test_type" == "spdk_vhost_scsi" ]]; then
268						$rpc_py vhost_scsi_controller_remove_target naa.$disk.${conf[0]} 0
269					fi
270
271					$rpc_py vhost_delete_controller naa.$disk.${conf[0]}
272					if [[ $disk == "RaidBdev2" ]]; then
273						notice "Removing lvol bdev and lvol store"
274						$rpc_py bdev_lvol_delete lvs_0/lbd_0
275						$rpc_py bdev_lvol_delete_lvstore -l lvs_0
276					fi
277				done
278			done <<< "${conf[2]}"
279		done
280	fi
281	notice "Testing done -> shutting down"
282	notice "killing vhost app"
283	vhost_kill 0
284
285	notice "EXIT DONE"
286	notice "==============="
287else
288	notice "==============="
289	notice ""
290	notice "Leaving environment working!"
291	notice ""
292	notice "==============="
293fi
294
295vhosttestfini
296