xref: /spdk/test/bdev/bdev_raid.sh (revision 42fd001310188f0635a3953f3b0ea0b33a840902)
1#!/usr/bin/env bash
2#  SPDX-License-Identifier: BSD-3-Clause
3#  Copyright (C) 2019 Intel Corporation
4#  All rights reserved.
5#
6testdir=$(readlink -f $(dirname $0))
7rootdir=$(readlink -f $testdir/../..)
8rpc_server=/var/tmp/spdk-raid.sock
9tmp_file=$SPDK_TEST_STORAGE/raidrandtest
10
11source $rootdir/test/common/autotest_common.sh
12source $testdir/nbd_common.sh
13
14rpc_py="$rootdir/scripts/rpc.py -s $rpc_server"
15
16function raid_unmap_data_verify() {
17	if hash blkdiscard; then
18		local nbd=$1
19		local rpc_server=$2
20		local blksize
21		blksize=$(lsblk -o LOG-SEC $nbd | grep -v LOG-SEC | cut -d ' ' -f 5)
22		local rw_blk_num=4096
23		local rw_len=$((blksize * rw_blk_num))
24		local unmap_blk_offs=(0 1028 321)
25		local unmap_blk_nums=(128 2035 456)
26		local unmap_off
27		local unmap_len
28
29		# data write
30		dd if=/dev/urandom of=$tmp_file bs=$blksize count=$rw_blk_num
31		dd if=$tmp_file of=$nbd bs=$blksize count=$rw_blk_num oflag=direct
32		blockdev --flushbufs $nbd
33
34		# confirm random data is written correctly in raid0 device
35		cmp -b -n $rw_len $tmp_file $nbd
36
37		for ((i = 0; i < ${#unmap_blk_offs[@]}; i++)); do
38			unmap_off=$((blksize * ${unmap_blk_offs[$i]}))
39			unmap_len=$((blksize * ${unmap_blk_nums[$i]}))
40
41			# data unmap on tmp_file
42			dd if=/dev/zero of=$tmp_file bs=$blksize seek=${unmap_blk_offs[$i]} count=${unmap_blk_nums[$i]} conv=notrunc
43
44			# data unmap on raid bdev
45			blkdiscard -o $unmap_off -l $unmap_len $nbd
46			blockdev --flushbufs $nbd
47
48			# data verify after unmap
49			cmp -b -n $rw_len $tmp_file $nbd
50		done
51	fi
52
53	return 0
54}
55
56function on_error_exit() {
57	if [ -n "$raid_pid" ]; then
58		killprocess $raid_pid
59	fi
60
61	rm -f $tmp_file
62	print_backtrace
63	exit 1
64}
65
66function configure_raid_bdev() {
67	local raid_level=$1
68	rm -rf $testdir/rpcs.txt
69
70	cat <<- EOL >> $testdir/rpcs.txt
71		bdev_malloc_create 32 512 -b Base_1
72		bdev_malloc_create 32 512 -b Base_2
73		bdev_raid_create -z 64 -r $raid_level -b "Base_1 Base_2" -n raid
74	EOL
75	$rpc_py < $testdir/rpcs.txt
76
77	rm -rf $testdir/rpcs.txt
78}
79
80function raid_function_test() {
81	local raid_level=$1
82	local nbd=/dev/nbd0
83	local raid_bdev
84
85	$rootdir/test/app/bdev_svc/bdev_svc -r $rpc_server -i 0 -L bdev_raid &
86	raid_pid=$!
87	echo "Process raid pid: $raid_pid"
88	waitforlisten $raid_pid $rpc_server
89
90	configure_raid_bdev $raid_level
91	raid_bdev=$($rpc_py bdev_raid_get_bdevs online | jq -r '.[0]["name"] | select(.)')
92	if [ $raid_bdev = "" ]; then
93		echo "No raid0 device in SPDK app"
94		return 1
95	fi
96
97	nbd_start_disks $rpc_server $raid_bdev $nbd
98	count=$(nbd_get_count $rpc_server)
99	if [ $count -ne 1 ]; then
100		return 1
101	fi
102
103	raid_unmap_data_verify $nbd $rpc_server
104
105	nbd_stop_disks $rpc_server $nbd
106	count=$(nbd_get_count $rpc_server)
107	if [ $count -ne 0 ]; then
108		return 1
109	fi
110
111	killprocess $raid_pid
112
113	return 0
114}
115
116function verify_raid_bdev_state() {
117	local raid_bdev_name=$1
118	local expected_state=$2
119	local raid_level=$3
120	local strip_size=$4
121	local num_base_bdevs_operational=$5
122	local raid_bdev_info
123	local num_base_bdevs
124	local num_base_bdevs_discovered
125	local tmp
126
127	raid_bdev_info=$($rpc_py bdev_raid_get_bdevs all | jq -r ".[] | select(.name == \"$raid_bdev_name\")")
128
129	xtrace_disable
130	if [ -z "$raid_bdev_info" ]; then
131		echo "No raid device \"$raid_bdev_name\" in SPDK app"
132		return 1
133	fi
134
135	raid_bdev_info=$($rpc_py bdev_raid_get_bdevs $expected_state | jq -r ".[] | select(.name == \"$raid_bdev_name\")")
136	if [ -z "$raid_bdev_info" ]; then
137		echo "$raid_bdev_name is not in $expected_state state"
138		return 1
139	fi
140
141	tmp=$(echo $raid_bdev_info | jq -r '.state')
142	if [ "$tmp" != $expected_state ]; then
143		echo "incorrect state: $tmp, expected: $expected_state"
144		return 1
145	fi
146
147	tmp=$(echo $raid_bdev_info | jq -r '.raid_level')
148	if [ "$tmp" != $raid_level ]; then
149		echo "incorrect level: $tmp, expected: $raid_level"
150		return 1
151	fi
152
153	tmp=$(echo $raid_bdev_info | jq -r '.strip_size_kb')
154	if [ "$tmp" != $strip_size ]; then
155		echo "incorrect strip size: $tmp, expected: $strip_size"
156		return 1
157	fi
158
159	num_base_bdevs=$(echo $raid_bdev_info | jq -r '[.base_bdevs_list[]] | length')
160	tmp=$(echo $raid_bdev_info | jq -r '.num_base_bdevs')
161	if [ "$num_base_bdevs" != "$tmp" ]; then
162		echo "incorrect num_base_bdevs: $tmp, expected: $num_base_bdevs"
163		return 1
164	fi
165
166	num_base_bdevs_discovered=$(echo $raid_bdev_info | jq -r '[.base_bdevs_list[] | select(.is_configured)] | length')
167	tmp=$(echo $raid_bdev_info | jq -r '.num_base_bdevs_discovered')
168	if [ "$num_base_bdevs_discovered" != "$tmp" ]; then
169		echo "incorrect num_base_bdevs_discovered: $tmp, expected: $num_base_bdevs_discovered"
170		return 1
171	fi
172
173	tmp=$(echo $raid_bdev_info | jq -r '.num_base_bdevs_operational')
174	if [ "$num_base_bdevs_operational" != "$tmp" ]; then
175		echo "incorrect num_base_bdevs_operational $tmp, expected: $num_base_bdevs_operational"
176		return 1
177	fi
178
179	xtrace_restore
180}
181
182function verify_raid_bdev_process() {
183	local raid_bdev_name=$1
184	local process_type=$2
185	local target=$3
186	local raid_bdev_info
187
188	raid_bdev_info=$($rpc_py bdev_raid_get_bdevs all | jq -r ".[] | select(.name == \"$raid_bdev_name\")")
189
190	[[ $(jq -r '.process.type // "none"' <<< "$raid_bdev_info") == "$process_type" ]]
191	[[ $(jq -r '.process.target // "none"' <<< "$raid_bdev_info") == "$target" ]]
192}
193
194function has_redundancy() {
195	case $1 in
196		"raid1" | "raid5f") return 0 ;;
197		*) return 1 ;;
198	esac
199}
200
201function raid_state_function_test() {
202	local raid_level=$1
203	local num_base_bdevs=$2
204	local superblock=$3
205	local raid_bdev
206	local base_bdevs=($(for ((i = 1; i <= num_base_bdevs; i++)); do echo BaseBdev$i; done))
207	local raid_bdev_name="Existed_Raid"
208	local strip_size
209	local strip_size_create_arg
210	local superblock_create_arg
211
212	if [ $raid_level != "raid1" ]; then
213		strip_size=64
214		strip_size_create_arg="-z $strip_size"
215	else
216		strip_size=0
217	fi
218
219	if [ $superblock = true ]; then
220		superblock_create_arg="-s"
221	else
222		superblock_create_arg=""
223	fi
224
225	$rootdir/test/app/bdev_svc/bdev_svc -r $rpc_server -i 0 -L bdev_raid &
226	raid_pid=$!
227	echo "Process raid pid: $raid_pid"
228	waitforlisten $raid_pid $rpc_server
229
230	# Step1: create a RAID bdev with no base bdevs
231	# Expect state: CONFIGURING
232	$rpc_py bdev_raid_create $strip_size_create_arg $superblock_create_arg -r $raid_level -b "${base_bdevs[*]}" -n $raid_bdev_name
233	verify_raid_bdev_state $raid_bdev_name "configuring" $raid_level $strip_size $num_base_bdevs
234	$rpc_py bdev_raid_delete $raid_bdev_name
235
236	# Step2: create one base bdev and add to the RAID bdev
237	# Expect state: CONFIGURING
238	$rpc_py bdev_raid_create $strip_size_create_arg $superblock_create_arg -r $raid_level -b "${base_bdevs[*]}" -n $raid_bdev_name
239	$rpc_py bdev_malloc_create 32 512 -b ${base_bdevs[0]}
240	waitforbdev ${base_bdevs[0]}
241	verify_raid_bdev_state $raid_bdev_name "configuring" $raid_level $strip_size $num_base_bdevs
242	$rpc_py bdev_raid_delete $raid_bdev_name
243
244	if [ $superblock = true ]; then
245		# recreate the bdev to remove superblock
246		$rpc_py bdev_malloc_delete ${base_bdevs[0]}
247		$rpc_py bdev_malloc_create 32 512 -b ${base_bdevs[0]}
248		waitforbdev ${base_bdevs[0]}
249	fi
250
251	# Step3: create remaining base bdevs and add to the RAID bdev
252	# Expect state: ONLINE
253	$rpc_py bdev_raid_create $strip_size_create_arg $superblock_create_arg -r $raid_level -b "${base_bdevs[*]}" -n $raid_bdev_name
254	for ((i = 1; i < num_base_bdevs; i++)); do
255		verify_raid_bdev_state $raid_bdev_name "configuring" $raid_level $strip_size $num_base_bdevs
256		$rpc_py bdev_malloc_create 32 512 -b ${base_bdevs[$i]}
257		waitforbdev ${base_bdevs[$i]}
258	done
259	verify_raid_bdev_state $raid_bdev_name "online" $raid_level $strip_size $num_base_bdevs
260
261	# Step4: delete one base bdev from the RAID bdev
262	$rpc_py bdev_malloc_delete ${base_bdevs[0]}
263	local expected_state
264	if ! has_redundancy $raid_level; then
265		expected_state="offline"
266	else
267		expected_state="online"
268	fi
269	verify_raid_bdev_state $raid_bdev_name $expected_state $raid_level $strip_size $((num_base_bdevs - 1))
270
271	# Step5: delete remaining base bdevs from the RAID bdev
272	# Expect state: removed from system
273	for ((i = 1; i < num_base_bdevs; i++)); do
274		raid_bdev=$($rpc_py bdev_raid_get_bdevs all | jq -r '.[0]["name"]')
275		if [ "$raid_bdev" != $raid_bdev_name ]; then
276			echo "$raid_bdev_name removed before all base bdevs were deleted"
277			return 1
278		fi
279		$rpc_py bdev_malloc_delete ${base_bdevs[$i]}
280	done
281	raid_bdev=$($rpc_py bdev_raid_get_bdevs all | jq -r '.[0]["name"] | select(.)')
282	if [ -n "$raid_bdev" ]; then
283		echo "$raid_bdev_name is not removed"
284		return 1
285	fi
286
287	killprocess $raid_pid
288
289	return 0
290}
291
292function raid0_resize_test() {
293	local blksize=512
294	local bdev_size_mb=32
295	local new_bdev_size_mb=$((bdev_size_mb * 2))
296	local blkcnt
297	local raid_size_mb
298	local new_raid_size_mb
299
300	$rootdir/test/app/bdev_svc/bdev_svc -r $rpc_server -i 0 -L bdev_raid &
301	raid_pid=$!
302	echo "Process raid pid: $raid_pid"
303	waitforlisten $raid_pid $rpc_server
304
305	$rpc_py bdev_null_create Base_1 $bdev_size_mb $blksize
306	$rpc_py bdev_null_create Base_2 $bdev_size_mb $blksize
307
308	$rpc_py bdev_raid_create -z 64 -r 0 -b "Base_1 Base_2" -n Raid
309
310	# Resize Base_1 first.
311	$rpc_py bdev_null_resize Base_1 $new_bdev_size_mb
312
313	# The size of Raid should not be changed.
314	blkcnt=$($rpc_py bdev_get_bdevs -b Raid | jq '.[].num_blocks')
315	raid_size_mb=$((blkcnt * blksize / 1048576))
316	if [ $raid_size_mb != $((bdev_size_mb * 2)) ]; then
317		echo "resize failed"
318		return 1
319	fi
320
321	# Resize Base_2 next.
322	$rpc_py bdev_null_resize Base_2 $new_bdev_size_mb
323
324	# The size of Raid should be updated to the expected value.
325	blkcnt=$($rpc_py bdev_get_bdevs -b Raid | jq '.[].num_blocks')
326	raid_size_mb=$((blkcnt * blksize / 1048576))
327	if [ $raid_size_mb != $((new_bdev_size_mb * 2)) ]; then
328		echo "resize failed"
329		return 1
330	fi
331
332	killprocess $raid_pid
333
334	return 0
335}
336
337function raid_superblock_test() {
338	local raid_level=$1
339	local num_base_bdevs=$2
340	local base_bdevs_malloc=()
341	local base_bdevs_pt=()
342	local base_bdevs_pt_uuid=()
343	local raid_bdev_name="raid_bdev1"
344	local strip_size
345	local strip_size_create_arg
346	local raid_bdev_uuid
347	local raid_bdev
348
349	if [ $raid_level != "raid1" ]; then
350		strip_size=64
351		strip_size_create_arg="-z $strip_size"
352	else
353		strip_size=0
354	fi
355
356	"$rootdir/test/app/bdev_svc/bdev_svc" -r $rpc_server -L bdev_raid &
357	raid_pid=$!
358	waitforlisten $raid_pid $rpc_server
359
360	# Create base bdevs
361	for ((i = 1; i <= num_base_bdevs; i++)); do
362		local bdev_malloc="malloc$i"
363		local bdev_pt="pt$i"
364		local bdev_pt_uuid="00000000-0000-0000-0000-00000000000$i"
365
366		base_bdevs_malloc+=($bdev_malloc)
367		base_bdevs_pt+=($bdev_pt)
368		base_bdevs_pt_uuid+=($bdev_pt_uuid)
369
370		$rpc_py bdev_malloc_create 32 512 -b $bdev_malloc
371		$rpc_py bdev_passthru_create -b $bdev_malloc -p $bdev_pt -u $bdev_pt_uuid
372	done
373
374	# Create RAID bdev with superblock
375	$rpc_py bdev_raid_create $strip_size_create_arg -r $raid_level -b "${base_bdevs_pt[*]}" -n $raid_bdev_name -s
376	verify_raid_bdev_state $raid_bdev_name "online" $raid_level $strip_size $num_base_bdevs
377
378	# Get RAID bdev's UUID
379	raid_bdev_uuid=$($rpc_py bdev_get_bdevs -b $raid_bdev_name | jq -r '.[] | .uuid')
380	if [ -z "$raid_bdev_uuid" ]; then
381		return 1
382	fi
383
384	# Stop the RAID bdev
385	$rpc_py bdev_raid_delete $raid_bdev_name
386	raid_bdev=$($rpc_py bdev_raid_get_bdevs all | jq -r '.[]')
387	if [ -n "$raid_bdev" ]; then
388		return 1
389	fi
390
391	# Delete the passthru bdevs
392	for i in "${base_bdevs_pt[@]}"; do
393		$rpc_py bdev_passthru_delete $i
394	done
395	if [ "$($rpc_py bdev_get_bdevs | jq -r '[.[] | select(.product_name == "passthru")] | any')" == "true" ]; then
396		return 1
397	fi
398
399	# Try to create new RAID bdev from malloc bdevs
400	# Should fail due to superblock still present on base bdevs
401	NOT $rpc_py bdev_raid_create $strip_size_create_arg -r $raid_level -b "${base_bdevs_malloc[*]}" -n $raid_bdev_name
402
403	raid_bdev=$($rpc_py bdev_raid_get_bdevs all | jq -r '.[]')
404	if [ -n "$raid_bdev" ]; then
405		return 1
406	fi
407
408	# Re-add first base bdev
409	$rpc_py bdev_passthru_create -b ${base_bdevs_malloc[0]} -p ${base_bdevs_pt[0]} -u ${base_bdevs_pt_uuid[0]}
410
411	# Check if the RAID bdev was assembled from superblock
412	verify_raid_bdev_state $raid_bdev_name "configuring" $raid_level $strip_size $num_base_bdevs
413
414	if [ $num_base_bdevs -gt 2 ]; then
415		# Re-add the second base bdev and remove it again
416		$rpc_py bdev_passthru_create -b ${base_bdevs_malloc[1]} -p ${base_bdevs_pt[1]} -u ${base_bdevs_pt_uuid[1]}
417		$rpc_py bdev_passthru_delete ${base_bdevs_pt[1]}
418		verify_raid_bdev_state $raid_bdev_name "configuring" $raid_level $strip_size $num_base_bdevs
419	fi
420
421	# Re-add remaining base bdevs
422	for ((i = 1; i < num_base_bdevs; i++)); do
423		$rpc_py bdev_passthru_create -b ${base_bdevs_malloc[$i]} -p ${base_bdevs_pt[$i]} -u ${base_bdevs_pt_uuid[$i]}
424	done
425
426	# Check if the RAID bdev is in online state
427	verify_raid_bdev_state $raid_bdev_name "online" $raid_level $strip_size $num_base_bdevs
428
429	# Check if the RAID bdev has the same UUID as when first created
430	if [ "$($rpc_py bdev_get_bdevs -b $raid_bdev_name | jq -r '.[] | .uuid')" != "$raid_bdev_uuid" ]; then
431		return 1
432	fi
433
434	if has_redundancy $raid_level; then
435		# Delete one base bdev
436		$rpc_py bdev_passthru_delete ${base_bdevs_pt[0]}
437
438		# Check if the RAID bdev is in online state (degraded)
439		verify_raid_bdev_state $raid_bdev_name "online" $raid_level $strip_size $((num_base_bdevs - 1))
440
441		# Stop the RAID bdev
442		$rpc_py bdev_raid_delete $raid_bdev_name
443		raid_bdev=$($rpc_py bdev_raid_get_bdevs all | jq -r '.[]')
444		if [ -n "$raid_bdev" ]; then
445			return 1
446		fi
447
448		# Delete remaining base bdevs
449		for ((i = 1; i < num_base_bdevs; i++)); do
450			$rpc_py bdev_passthru_delete ${base_bdevs_pt[$i]}
451		done
452
453		# Re-add base bdevs from the second up to (not including) the last one
454		for ((i = 1; i < num_base_bdevs - 1; i++)); do
455			$rpc_py bdev_passthru_create -b ${base_bdevs_malloc[$i]} -p ${base_bdevs_pt[$i]} -u ${base_bdevs_pt_uuid[$i]}
456
457			# Check if the RAID bdev is in configuring state
458			verify_raid_bdev_state $raid_bdev_name "configuring" $raid_level $strip_size $((num_base_bdevs - 1))
459		done
460
461		# Re-add the last base bdev
462		i=$((num_base_bdevs - 1))
463		$rpc_py bdev_passthru_create -b ${base_bdevs_malloc[$i]} -p ${base_bdevs_pt[$i]} -u ${base_bdevs_pt_uuid[$i]}
464
465		# Check if the RAID bdev is in online state (degraded)
466		verify_raid_bdev_state $raid_bdev_name "online" $raid_level $strip_size $((num_base_bdevs - 1))
467
468		if [ $num_base_bdevs -gt 2 ]; then
469			# Stop the RAID bdev
470			$rpc_py bdev_raid_delete $raid_bdev_name
471			raid_bdev=$($rpc_py bdev_raid_get_bdevs all | jq -r '.[]')
472			if [ -n "$raid_bdev" ]; then
473				return 1
474			fi
475
476			# Re-add first base bdev
477			# This is the "failed" device and contains the "old" version of the superblock
478			$rpc_py bdev_passthru_create -b ${base_bdevs_malloc[0]} -p ${base_bdevs_pt[0]} -u ${base_bdevs_pt_uuid[0]}
479
480			# Check if the RAID bdev is in configuring state
481			verify_raid_bdev_state $raid_bdev_name "configuring" $raid_level $strip_size $num_base_bdevs
482
483			# Delete remaining base bdevs
484			for ((i = 1; i < num_base_bdevs; i++)); do
485				$rpc_py bdev_passthru_delete ${base_bdevs_pt[$i]}
486			done
487
488			# Re-add the last base bdev
489			i=$((num_base_bdevs - 1))
490			$rpc_py bdev_passthru_create -b ${base_bdevs_malloc[$i]} -p ${base_bdevs_pt[$i]} -u ${base_bdevs_pt_uuid[$i]}
491
492			# Check if the RAID bdev is in configuring state
493			# This should use the newer superblock version and have n-1 online base bdevs
494			verify_raid_bdev_state $raid_bdev_name "configuring" $raid_level $strip_size $((num_base_bdevs - 1))
495
496			# Re-add remaining base bdevs
497			for ((i = 1; i < num_base_bdevs - 1; i++)); do
498				$rpc_py bdev_passthru_create -b ${base_bdevs_malloc[$i]} -p ${base_bdevs_pt[$i]} -u ${base_bdevs_pt_uuid[$i]}
499			done
500
501			# Check if the RAID bdev is in online state (degraded)
502			verify_raid_bdev_state $raid_bdev_name "online" $raid_level $strip_size $((num_base_bdevs - 1))
503		fi
504
505		# Check if the RAID bdev has the same UUID as when first created
506		if [ "$($rpc_py bdev_get_bdevs -b $raid_bdev_name | jq -r '.[] | .uuid')" != "$raid_bdev_uuid" ]; then
507			return 1
508		fi
509	fi
510
511	killprocess $raid_pid
512
513	return 0
514}
515
516function raid_rebuild_test() {
517	local raid_level=$1
518	local num_base_bdevs=$2
519	local superblock=$3
520	local background_io=$4
521	local base_bdevs=($(for ((i = 1; i <= num_base_bdevs; i++)); do echo BaseBdev$i; done))
522	local raid_bdev_name="raid_bdev1"
523	local strip_size
524	local create_arg
525	local raid_bdev_size
526	local data_offset
527
528	if [ $raid_level != "raid1" ]; then
529		if [ $background_io = true ]; then
530			echo "skipping rebuild test with io for level $raid_level"
531			return 1
532		fi
533		strip_size=64
534		create_arg+=" -z $strip_size"
535	else
536		strip_size=0
537	fi
538
539	if [ $superblock = true ]; then
540		create_arg+=" -s"
541	fi
542
543	"$rootdir/build/examples/bdevperf" -r $rpc_server -T $raid_bdev_name -t 60 -w randrw -M 50 -o 3M -q 2 -U -z -L bdev_raid &
544	raid_pid=$!
545	waitforlisten $raid_pid $rpc_server
546
547	# Create base bdevs
548	for bdev in "${base_bdevs[@]}"; do
549		if [ $superblock = true ]; then
550			$rpc_py bdev_malloc_create 32 512 -b ${bdev}_malloc
551			$rpc_py bdev_passthru_create -b ${bdev}_malloc -p $bdev
552		else
553			$rpc_py bdev_malloc_create 32 512 -b $bdev
554		fi
555	done
556
557	# Create spare bdev
558	$rpc_py bdev_malloc_create 32 512 -b "spare_malloc"
559	$rpc_py bdev_delay_create -b "spare_malloc" -d "spare_delay" -r 0 -t 0 -w 100000 -n 100000
560	$rpc_py bdev_passthru_create -b "spare_delay" -p "spare"
561
562	# Create RAID bdev
563	$rpc_py bdev_raid_create $create_arg -r $raid_level -b "${base_bdevs[*]}" -n $raid_bdev_name
564	verify_raid_bdev_state $raid_bdev_name "online" $raid_level $strip_size $num_base_bdevs
565
566	# Get RAID bdev's size
567	raid_bdev_size=$($rpc_py bdev_get_bdevs -b $raid_bdev_name | jq -r '.[].num_blocks')
568
569	# Get base bdev's data offset
570	data_offset=$($rpc_py bdev_raid_get_bdevs all | jq -r '.[].base_bdevs_list[0].data_offset')
571
572	if [ $background_io = true ]; then
573		# Start user I/O
574		"$rootdir/examples/bdev/bdevperf/bdevperf.py" -s $rpc_server perform_tests &
575	else
576		local write_unit_size
577
578		# Write random data to the RAID bdev
579		nbd_start_disks $rpc_server $raid_bdev_name /dev/nbd0
580		if [ $raid_level = "raid5f" ]; then
581			write_unit_size=$((strip_size * 2 * (num_base_bdevs - 1)))
582			echo $((512 * write_unit_size / 1024)) > /sys/block/nbd0/queue/max_sectors_kb
583		else
584			write_unit_size=1
585		fi
586		dd if=/dev/urandom of=/dev/nbd0 bs=$((512 * write_unit_size)) count=$((raid_bdev_size / write_unit_size)) oflag=direct
587		nbd_stop_disks $rpc_server /dev/nbd0
588	fi
589
590	# Remove one base bdev
591	$rpc_py bdev_raid_remove_base_bdev ${base_bdevs[0]}
592
593	# Check if the RAID bdev is in online state (degraded)
594	verify_raid_bdev_state $raid_bdev_name "online" $raid_level $strip_size $((num_base_bdevs - 1))
595
596	# Add bdev for rebuild
597	$rpc_py bdev_raid_add_base_bdev $raid_bdev_name "spare"
598	sleep 1
599
600	# Check if rebuild started
601	verify_raid_bdev_process $raid_bdev_name "rebuild" "spare"
602
603	# Remove the rebuild target bdev
604	$rpc_py bdev_raid_remove_base_bdev "spare"
605
606	# Check if the RAID bdev is in online state (degraded)
607	verify_raid_bdev_state $raid_bdev_name "online" $raid_level $strip_size $((num_base_bdevs - 1))
608
609	# Check if rebuild was stopped
610	verify_raid_bdev_process $raid_bdev_name "none" "none"
611
612	# Again, start the rebuild
613	$rpc_py bdev_raid_add_base_bdev $raid_bdev_name "spare"
614	sleep 1
615	verify_raid_bdev_process $raid_bdev_name "rebuild" "spare"
616
617	if [ $superblock = true ] && [ $with_io = false ]; then
618		# Stop the RAID bdev
619		$rpc_py bdev_raid_delete $raid_bdev_name
620		[[ $($rpc_py bdev_raid_get_bdevs all | jq 'length') == 0 ]]
621
622		# Remove the passthru base bdevs, then re-add them to assemble the raid bdev again
623		for ((i = 0; i < num_base_bdevs; i++)); do
624			$rpc_py bdev_passthru_delete ${base_bdevs[$i]}
625		done
626		for ((i = 0; i < num_base_bdevs; i++)); do
627			$rpc_py bdev_passthru_create -b ${base_bdevs[$i]}_malloc -p ${base_bdevs[$i]}
628		done
629
630		# Check if the RAID bdev is in online state (degraded)
631		verify_raid_bdev_state $raid_bdev_name "online" $raid_level $strip_size $((num_base_bdevs - 1))
632
633		# Check if rebuild is not started
634		verify_raid_bdev_process $raid_bdev_name "none" "none"
635
636		# Again, start the rebuild
637		$rpc_py bdev_raid_add_base_bdev $raid_bdev_name "spare"
638		sleep 1
639		verify_raid_bdev_process $raid_bdev_name "rebuild" "spare"
640	fi
641
642	local num_base_bdevs_operational=$num_base_bdevs
643
644	if [ $raid_level = "raid1" ] && [ $num_base_bdevs -gt 2 ]; then
645		# Remove one more base bdev (not rebuild target)
646		$rpc_py bdev_raid_remove_base_bdev ${base_bdevs[1]}
647
648		# Ignore this bdev later when comparing data
649		base_bdevs[1]=""
650		((num_base_bdevs_operational--))
651
652		# Check if rebuild is still running
653		verify_raid_bdev_process $raid_bdev_name "rebuild" "spare"
654	fi
655
656	# Wait for rebuild to finish
657	local timeout=$((SECONDS + 30))
658	while ((SECONDS < timeout)); do
659		if ! verify_raid_bdev_process $raid_bdev_name "rebuild" "spare" > /dev/null; then
660			break
661		fi
662		sleep 1
663	done
664
665	# Check if rebuild is not running and the RAID bdev has the correct number of operational devices
666	verify_raid_bdev_process $raid_bdev_name "none" "none"
667	verify_raid_bdev_state $raid_bdev_name "online" $raid_level $strip_size $num_base_bdevs_operational
668
669	# Stop the RAID bdev
670	$rpc_py bdev_raid_delete $raid_bdev_name
671	[[ $($rpc_py bdev_raid_get_bdevs all | jq 'length') == 0 ]]
672
673	if [ $background_io = true ]; then
674		# Compare data on the rebuilt and other base bdevs
675		nbd_start_disks $rpc_server "spare" "/dev/nbd0"
676		for bdev in "${base_bdevs[@]:1}"; do
677			if [ -z "$bdev" ]; then
678				continue
679			fi
680			nbd_start_disks $rpc_server $bdev "/dev/nbd1"
681			cmp -i $((data_offset * 512)) /dev/nbd0 /dev/nbd1
682			nbd_stop_disks $rpc_server "/dev/nbd1"
683		done
684		nbd_stop_disks $rpc_server "/dev/nbd0"
685	else
686		# Compare data on the removed and rebuilt base bdevs
687		nbd_start_disks $rpc_server "${base_bdevs[0]} spare" "/dev/nbd0 /dev/nbd1"
688		cmp -i $((data_offset * 512)) /dev/nbd0 /dev/nbd1
689		nbd_stop_disks $rpc_server "/dev/nbd0 /dev/nbd1"
690	fi
691
692	if [ $superblock = true ]; then
693		# Remove the passthru base bdevs, then re-add them to assemble the raid bdev again
694		for bdev in "${base_bdevs[@]}"; do
695			if [ -z "$bdev" ]; then
696				continue
697			fi
698			$rpc_py bdev_passthru_delete $bdev
699			$rpc_py bdev_passthru_create -b ${bdev}_malloc -p $bdev
700		done
701		$rpc_py bdev_passthru_delete "spare"
702		$rpc_py bdev_passthru_create -b "spare_delay" -p "spare"
703
704		verify_raid_bdev_state $raid_bdev_name "online" $raid_level $strip_size $num_base_bdevs_operational
705		verify_raid_bdev_process $raid_bdev_name "none" "none"
706		[[ $($rpc_py bdev_raid_get_bdevs all | jq -r '.[].base_bdevs_list[0].name') == "spare" ]]
707	fi
708
709	killprocess $raid_pid
710
711	return 0
712}
713
714trap 'on_error_exit;' ERR
715
716if [ $(uname -s) = Linux ] && modprobe -n nbd; then
717	has_nbd=true
718	modprobe nbd
719	run_test "raid_function_test_raid0" raid_function_test raid0
720	run_test "raid_function_test_concat" raid_function_test concat
721fi
722
723run_test "raid0_resize_test" raid0_resize_test
724
725for n in {2..4}; do
726	for level in raid0 concat raid1; do
727		run_test "raid_state_function_test" raid_state_function_test $level $n false
728		run_test "raid_state_function_test_sb" raid_state_function_test $level $n true
729		run_test "raid_superblock_test" raid_superblock_test $level $n
730	done
731done
732
733if [ "$has_nbd" = true ]; then
734	for n in 2 4; do
735		run_test "raid_rebuild_test" raid_rebuild_test raid1 $n false false
736		run_test "raid_rebuild_test_sb" raid_rebuild_test raid1 $n true false
737		run_test "raid_rebuild_test_io" raid_rebuild_test raid1 $n false true
738		run_test "raid_rebuild_test_sb_io" raid_rebuild_test raid1 $n true true
739	done
740fi
741
742if [ "$CONFIG_RAID5F" == y ]; then
743	for n in {3..4}; do
744		run_test "raid5f_state_function_test" raid_state_function_test raid5f $n false
745		run_test "raid5f_state_function_test_sb" raid_state_function_test raid5f $n true
746		run_test "raid5f_superblock_test" raid_superblock_test raid5f $n
747		if [ "$has_nbd" = true ]; then
748			run_test "raid5f_rebuild_test" raid_rebuild_test raid5f $n false false
749			run_test "raid5f_rebuild_test_sb" raid_rebuild_test raid5f $n true false
750		fi
751	done
752fi
753
754rm -f $tmp_file
755