xref: /spdk/test/vhost/hotplug/blk_hotremove.sh (revision eb53c23236cccb6b698b7ca70ee783da1c574b5f)
1#  SPDX-License-Identifier: BSD-3-Clause
2#  Copyright (C) 2017 Intel Corporation
3#  All rights reserved.
4#
5
6# Vhost blk hot remove tests
7#
8# Objective
9# The purpose of these tests is to verify that SPDK vhost remains stable during
10# hot-remove operations performed on SCSI and BLK controllers devices.
11# Hot-remove is a scenario where a NVMe device is removed when already in use.
12#
13# Test cases description
14# 1. FIO I/O traffic is run during hot-remove operations.
15#    By default FIO uses default_integrity*.job config files located in
16#    test/vhost/hotplug/fio_jobs directory.
17# 2. FIO mode of operation is random write (randwrite) with verification enabled
18#    which results in also performing read operations.
19# 3. In test cases fio status is checked after every run if any errors occurred.
20
21function prepare_fio_cmd_tc1() {
22	print_test_fio_header
23
24	run_fio="$fio_bin --eta=never "
25	for vm_num in $1; do
26		cp $fio_job $tmp_detach_job
27		vm_check_blk_location $vm_num
28		for disk in $SCSI_DISK; do
29			echo "[nvme-host$disk]" >> $tmp_detach_job
30			echo "filename=/dev/$disk" >> $tmp_detach_job
31		done
32		vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/default_integrity_2discs.job
33		run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/default_integrity_2discs.job "
34		rm $tmp_detach_job
35	done
36}
37
38function vhost_delete_controllers() {
39	$rpc_py vhost_delete_controller naa.Nvme0n1p0.0
40	$rpc_py vhost_delete_controller naa.Nvme0n1p1.0
41	$rpc_py vhost_delete_controller naa.Nvme0n1p2.1
42	$rpc_py vhost_delete_controller naa.Nvme0n1p3.1
43}
44
45# Vhost blk hot remove test cases
46#
47# Test Case 1
48function blk_hotremove_tc1() {
49	echo "Blk hotremove test case 1"
50	traddr=""
51	# 1. Run the command to hot remove NVMe disk.
52	get_traddr "Nvme0"
53	delete_nvme "Nvme0"
54	# 2. If vhost had crashed then tests would stop running
55	sleep 1
56	add_nvme "HotInNvme0" "$traddr"
57	sleep 1
58}
59
60# Test Case 2
61function blk_hotremove_tc2() {
62	echo "Blk hotremove test case 2"
63	# 1. Use rpc command to create blk controllers.
64	$rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 HotInNvme0n1p0
65	$rpc_py vhost_create_blk_controller naa.Nvme0n1p1.0 Mallocp0
66	$rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 Mallocp1
67	$rpc_py vhost_create_blk_controller naa.Nvme0n1p3.1 Mallocp2
68	# 2. Run two VMs and attach every VM to two blk controllers.
69	vm_run_with_arg "0 1"
70	vms_prepare "0"
71
72	traddr=""
73	get_traddr "Nvme0"
74	prepare_fio_cmd_tc1 "0"
75	# 3. Run FIO I/O traffic with verification enabled on NVMe disk.
76	$run_fio &
77	local last_pid=$!
78	sleep 3
79	# 4. Run the command to hot remove NVMe disk.
80	delete_nvme "HotInNvme0"
81	local retcode=0
82	wait_for_finish $last_pid || retcode=$?
83	# 5. Check that fio job run on hot-removed device stopped.
84	#    Expected: Fio should return error message and return code != 0.
85	check_fio_retcode "Blk hotremove test case 2: Iteration 1." 1 $retcode
86
87	# 6. Reboot VM
88	reboot_all_and_prepare "0"
89	# 7. Run FIO I/O traffic with verification enabled on NVMe disk.
90	$run_fio &
91	local retcode=0
92	wait_for_finish $! || retcode=$?
93	# 8. Check that fio job run on hot-removed device stopped.
94	#    Expected: Fio should return error message and return code != 0.
95	check_fio_retcode "Blk hotremove test case 2: Iteration 2." 1 $retcode
96	vm_shutdown_all
97	vhost_delete_controllers
98	add_nvme "HotInNvme1" "$traddr"
99	sleep 1
100}
101
102# ## Test Case 3
103function blk_hotremove_tc3() {
104	echo "Blk hotremove test case 3"
105	# 1. Use rpc command to create blk controllers.
106	$rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 HotInNvme1n1p0
107	$rpc_py vhost_create_blk_controller naa.Nvme0n1p1.0 Mallocp0
108	$rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 HotInNvme1n1p1
109	$rpc_py vhost_create_blk_controller naa.Nvme0n1p3.1 Mallocp1
110	# 2. Run two VMs and attach every VM to two blk controllers.
111	vm_run_with_arg "0 1"
112	vms_prepare "0 1"
113
114	traddr=""
115	get_traddr "Nvme0"
116	prepare_fio_cmd_tc1 "0"
117	# 3. Run FIO I/O traffic with verification enabled on first NVMe disk.
118	$run_fio &
119	local last_pid=$!
120	sleep 3
121	# 4. Run the command to hot remove of first NVMe disk.
122	delete_nvme "HotInNvme1"
123	local retcode=0
124	wait_for_finish $last_pid || retcode=$?
125	# 6. Check that fio job run on hot-removed device stopped.
126	#    Expected: Fio should return error message and return code != 0.
127	check_fio_retcode "Blk hotremove test case 3: Iteration 1." 1 $retcode
128
129	# 7. Reboot VM
130	reboot_all_and_prepare "0"
131	local retcode=0
132	# 8. Run FIO I/O traffic with verification enabled on removed NVMe disk.
133	$run_fio &
134	wait_for_finish $! || retcode=$?
135	# 9. Check that fio job run on hot-removed device stopped.
136	#    Expected: Fio should return error message and return code != 0.
137	check_fio_retcode "Blk hotremove test case 3: Iteration 2." 1 $retcode
138	vm_shutdown_all
139	vhost_delete_controllers
140	add_nvme "HotInNvme2" "$traddr"
141	sleep 1
142}
143
144# Test Case 4
145function blk_hotremove_tc4() {
146	echo "Blk hotremove test case 4"
147	# 1. Use rpc command to create blk controllers.
148	$rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 HotInNvme2n1p0
149	$rpc_py vhost_create_blk_controller naa.Nvme0n1p1.0 Mallocp0
150	$rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 HotInNvme2n1p1
151	$rpc_py vhost_create_blk_controller naa.Nvme0n1p3.1 Mallocp1
152	# 2. Run two VM, attached to blk controllers.
153	vm_run_with_arg "0 1"
154	vms_prepare "0 1"
155
156	prepare_fio_cmd_tc1 "0"
157	# 3. Run FIO I/O traffic on first VM with verification enabled on both NVMe disks.
158	$run_fio &
159	local last_pid_vm0=$!
160
161	prepare_fio_cmd_tc1 "1"
162	# 4. Run FIO I/O traffic on second VM with verification enabled on both NVMe disks.
163	$run_fio &
164	local last_pid_vm1=$!
165
166	sleep 3
167	prepare_fio_cmd_tc1 "0 1"
168	# 5. Run the command to hot remove of first NVMe disk.
169	delete_nvme "HotInNvme2"
170	local retcode_vm0=0
171	local retcode_vm1=0
172	wait_for_finish $last_pid_vm0 || retcode_vm0=$?
173	wait_for_finish $last_pid_vm1 || retcode_vm1=$?
174	# 6. Check that fio job run on hot-removed device stopped.
175	#    Expected: Fio should return error message and return code != 0.
176	check_fio_retcode "Blk hotremove test case 4: Iteration 1." 1 $retcode_vm0
177	check_fio_retcode "Blk hotremove test case 4: Iteration 2." 1 $retcode_vm1
178
179	# 7. Reboot all VMs.
180	reboot_all_and_prepare "0 1"
181	# 8. Run FIO I/O traffic with verification enabled on removed NVMe disk.
182	$run_fio &
183	local retcode=0
184	wait_for_finish $! || retcode=$?
185	# 9. Check that fio job run on hot-removed device stopped.
186	#    Expected: Fio should return error message and return code != 0.
187	check_fio_retcode "Blk hotremove test case 4: Iteration 3." 1 $retcode
188
189	vm_shutdown_all
190	vhost_delete_controllers
191	add_nvme "HotInNvme3" "$traddr"
192	sleep 1
193}
194
195# Test Case 5
196function blk_hotremove_tc5() {
197	echo "Blk hotremove test case 5"
198	# 1. Use rpc command to create blk controllers.
199	$rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 HotInNvme3n1p0
200	$rpc_py vhost_create_blk_controller naa.Nvme0n1p1.0 Mallocp0
201	$rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 Mallocp1
202	$rpc_py vhost_create_blk_controller naa.Nvme0n1p3.1 Mallocp2
203	# 2. Run two VM, attached to blk controllers.
204	vm_run_with_arg "0 1"
205	vms_prepare "0 1"
206
207	prepare_fio_cmd_tc1 "0"
208	# 3. Run FIO I/O traffic on first VM with verification enabled on both NVMe disks.
209	$run_fio &
210	local last_pid=$!
211	sleep 3
212	# 4. Run the command to hot remove of first NVMe disk.
213	delete_nvme "HotInNvme3"
214	local retcode=0
215	wait_for_finish $last_pid || retcode=$?
216	# 5. Check that fio job run on hot-removed device stopped.
217	#    Expected: Fio should return error message and return code != 0.
218	check_fio_retcode "Blk hotremove test case 5: Iteration 1." 1 $retcode
219
220	# 6. Reboot VM.
221	reboot_all_and_prepare "0"
222	local retcode=0
223	# 7. Run FIO I/O traffic with verification enabled on removed NVMe disk.
224	$run_fio &
225	wait_for_finish $! || retcode=$?
226	# 8. Check that fio job run on hot-removed device stopped.
227	#    Expected: Fio should return error message and return code != 0.
228	check_fio_retcode "Blk hotremove test case 5: Iteration 2." 1 $retcode
229	vm_shutdown_all
230	vhost_delete_controllers
231	add_nvme "HotInNvme4" "$traddr"
232	sleep 1
233}
234
235vms_setup
236blk_hotremove_tc1
237blk_hotremove_tc2
238blk_hotremove_tc3
239blk_hotremove_tc4
240blk_hotremove_tc5
241