xref: /spdk/test/nvme/perf/common.sh (revision 830c9c6a8009968dac5bd84b9d5f7ddd0faef7ff)
1#!/usr/bin/env bash
2#  SPDX-License-Identifier: BSD-3-Clause
3#  Copyright (C) 2018 Intel Corporation
4#  All rights reserved.
5#
6source "$rootdir/test/dd/common.sh"
7source "$rootdir/test/scheduler/common.sh"
8
9xtrace_disable
10map_cpus
11xtrace_restore
12
13function discover_bdevs() {
14	local rootdir=$1
15	local config_file=$2
16	local wait_for_spdk_bdev=90
17	local rpc_server=/var/tmp/spdk-discover-bdevs.sock
18
19	if [ ! -e $config_file ]; then
20		echo "Invalid Configuration File: $config_file"
21		return 1
22	fi
23
24	# Start the bdev service to query for the list of available
25	# bdevs.
26	$rootdir/test/app/bdev_svc/bdev_svc -r $rpc_server -i 0 \
27		--json $config_file &> /dev/null &
28	stubpid=$!
29	while ! [ -e /var/run/spdk_bdev0 ]; do
30		# If this counter drops to zero, errexit will be caught to abort the test
31		((wait_for_spdk_bdev--))
32		sleep 1
33	done
34
35	# Get all of the bdevs
36	$rootdir/scripts/rpc.py -s "$rpc_server" bdev_get_bdevs
37
38	# Shut down the bdev service
39	kill $stubpid
40	wait $stubpid
41	rm -f /var/run/spdk_bdev0
42}
43
44function get_disk_cfg() {
45	grep -vP "^\s*#" "$DISKCFG"
46}
47
48function create_spdk_bdev_conf() {
49	local output
50	local disk_cfg
51	local bdev_io_cache_size=$1
52	local bdev_io_pool_size=$2
53	local bdev_json_cfg=()
54	local dev_opts=()
55	local i
56
57	disk_cfg=($(get_disk_cfg))
58
59	if [[ -n "$bdev_io_cache_size" ]]; then
60		bdev_opts+=("\"bdev_io_cache_size\": $bdev_io_cache_size")
61	fi
62
63	if [[ -n "$bdev_io_pool_size" ]]; then
64		bdev_opts+=("\"bdev_io_pool_size\": $bdev_io_pool_size")
65	fi
66
67	local IFS=","
68	if [[ ${#bdev_opts[@]} -gt 0 ]]; then
69		bdev_json_cfg+=("$(
70			cat <<- JSON
71				{
72					"method": "bdev_set_options",
73					"params": {
74						${bdev_opts[*]}
75					}
76				}
77			JSON
78		)")
79	fi
80
81	for i in "${!disk_cfg[@]}"; do
82		bdev_json_cfg+=("$(
83			cat <<- JSON
84				{
85					"method": "bdev_nvme_attach_controller",
86					"params": {
87						"trtype": "PCIe",
88						"name":"Nvme${i}",
89						"traddr":"${disk_cfg[i]}"
90					}
91				}
92			JSON
93		)")
94	done
95
96	local IFS=","
97	jq -r '.' <<- JSON > $testdir/bdev.conf
98		{
99			"subsystems": [
100				{
101					"subsystem": "bdev",
102					"config": [
103						${bdev_json_cfg[*]},
104					        {
105					                "method": "bdev_wait_for_examine"
106					        }
107					]
108				}
109			]
110		}
111	JSON
112}
113
114function is_bdf_not_mounted() {
115	local bdf=$1
116	local blkname
117	local mountpoints
118	blkname=$(ls -l /sys/block/ | grep $bdf | awk '{print $9}')
119	mountpoints=$(lsblk /dev/$blkname --output MOUNTPOINT -n | wc -w)
120	return $mountpoints
121}
122
123function get_cores() {
124	parse_cpu_list <(echo "$1")
125}
126
127function get_cores_numa_node() {
128	local cores=$1
129	for core in $cores; do
130		lscpu -p=cpu,node | grep "^$core\b" | awk -F ',' '{print $2}'
131	done
132}
133
134function get_numa_node() {
135	local plugin=$1
136	local disks=$2
137	if [[ "$plugin" =~ "nvme" ]]; then
138		for bdf in $disks; do
139			local driver
140			driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent | awk -F"=" '{print $2}')
141			# Use this check to omit blocked devices ( not bound to driver with setup.sh script )
142			if [ "$driver" = "vfio-pci" ] || [ "$driver" = "uio_pci_generic" ]; then
143				cat /sys/bus/pci/devices/$bdf/numa_node
144			fi
145		done
146	elif [[ "$plugin" =~ "bdev" ]]; then
147		local bdevs
148		bdevs=$(discover_bdevs $rootdir $testdir/bdev.conf)
149		for name in $disks; do
150			local bdev_bdf
151			bdev_bdf=$(jq -r ".[] | select(.name==\"$name\").driver_specific.nvme[].pci_address" <<< "$bdevs")
152			cat /sys/bus/pci/devices/$bdev_bdf/numa_node
153		done
154	else
155		for name in $disks; do
156			cat "/sys/block/$name/device/numa_node"
157		done
158	fi
159}
160
161function get_disks() {
162	local plugin=$1
163	local disk_cfg=($(get_disk_cfg))
164
165	if [[ "$plugin" =~ "nvme" ]]; then
166		# PCI BDF address is enough for nvme-perf and nvme-fio-plugin,
167		# so just print them from configuration file
168		echo "${disk_cfg[*]}"
169	elif [[ "$plugin" =~ "bdev" ]]; then
170		# Generate NvmeXn1 bdev name configuration file for bdev-perf
171		# and bdev-fio-plugin
172		local bdevs
173		local disk_no
174		disk_no=${#disk_cfg[@]}
175		eval echo "Nvme{0..$((disk_no - 1))}n1"
176	else
177		# Find nvme block devices and only use the ones which
178		# are not mounted
179		for bdf in "${disk_cfg[@]}"; do
180			if is_bdf_not_mounted $bdf; then
181				local blkname
182				blkname=$(ls -l /sys/block/ | grep $bdf | awk '{print $9}')
183				echo $blkname
184			fi
185		done
186	fi
187}
188
189function get_disks_on_numa() {
190	local devs=($1)
191	local numas=($2)
192	local numa_no=$3
193	local disks_on_numa=""
194	local i
195
196	for ((i = 0; i < ${#devs[@]}; i++)); do
197		if [ ${numas[$i]} = $numa_no ]; then
198			disks_on_numa=$((disks_on_numa + 1))
199		fi
200	done
201	echo $disks_on_numa
202}
203
204function set_potential_poll_threads() {
205	local _cpus=("$@") all_fio_cpus=() cpu _cpu poll_thread
206	local -g sqpoll_cpu_threads
207	local node
208
209	# Collect all siblings in case smt is enabled.
210	for cpu in "${_cpus[@]}"; do
211		for _cpu in "${!cpu_siblings[cpu]}"; do
212			all_fio_cpus[_cpu]=$cpu
213		done
214	done
215
216	# Move fio's polling thread to a different cpu. In case smt is enabled, we  try to move it to a completely
217	# different core under the same numa node. Note that we can set sgthread_poll_cpu only to a single cpu -
218	# in case NUMJOBS is > 1 we will have multiple poll threads using that particular cpu. FIXME: what would be
219	# the potential impact here of spamming poll threads like that? In case of higher numjobs, does it make
220	# sense to still use sqthread_poll?
221	#
222	# Here we build list of all potential candidates to hold sqpoll thread. Lists are per node and should hold
223	# all cpus outside of the fio cpus that were requested. So each get_sqthread_poll_cpu() should return a cpu
224	# thread which is guaranteed to be outside of the physical core of each of fio cpus (but still bound to
225	# the same numa node as the fio cpu selected for given job).
226	for cpu in "${cpus[@]}"; do
227		[[ -n ${all_fio_cpus[cpu]} ]] && continue
228		node=${cpu_node_map[cpu]}
229		local -n sqpoll_node=sqpoll_threads_node_$node
230		sqpoll_node+=("$cpu")
231		sqpoll_cpu_threads[node]=sqpoll_threads_node_$node
232	done
233}
234
235function get_sqthread_poll_cpu() {
236	((${#sqpoll_cpu_threads[@]} > 0)) || return 0
237
238	local node=$1 idx=$2
239	local -n node=${sqpoll_cpu_threads[node]}
240
241	# Default to the highest cpu
242	echo "sqthread_poll_cpu=${node[idx]:-${cpus[-1]}}"
243}
244
245function create_fio_config() {
246	local disk_no=$1
247	local plugin=$2
248	local disks=($3)
249	local disks_numa=($4)
250	local cores=($5)
251	local total_disks=${#disks[@]}
252	local fio_job_section=()
253	local num_cores=${#cores[@]}
254	local disks_per_core=$((disk_no / num_cores))
255	local disks_per_core_mod=$((disk_no % num_cores))
256	local cores_numa
257	cores_numa=($(get_cores_numa_node "${cores[*]}"))
258
259	# Following part of this function still leverages global variables a lot.
260	# It's a mix of local variables passed as arguments to function with global variables. This is messy.
261	# TODO: Modify this to be consistent with how variables are used here. Aim for using only
262	# local variables to get rid of globals as much as possible.
263	desc="\"Test io_plugin=$PLUGIN Blocksize=${BLK_SIZE} Workload=$RW MIX=${MIX} qd=${IODEPTH}\""
264	cp "$testdir/config.fio.tmp" "$testdir/config.fio"
265	cat <<- EOF >> $testdir/config.fio
266		description=$desc
267
268		rw=$RW
269		rwmixread=$MIX
270		bs=$BLK_SIZE
271		runtime=$RUNTIME
272		ramp_time=$RAMP_TIME
273		numjobs=$NUMJOBS
274		log_avg_msec=$SAMPLING_INT
275	EOF
276
277	if $GTOD_REDUCE; then
278		echo "gtod_reduce=1" >> $testdir/config.fio
279	fi
280
281	if [[ $PLUGIN =~ "uring" || $PLUGIN =~ "xnvme" ]]; then
282		cat <<- EOF >> $testdir/config.fio
283			fixedbufs=1
284			hipri=1
285			registerfiles=1
286			sqthread_poll=1
287		EOF
288	fi
289
290	if [[ "$IO_BATCH_SUBMIT" -gt 0 ]]; then
291		echo "iodepth_batch_submit=$IO_BATCH_SUBMIT" >> $testdir/config.fio
292	fi
293
294	if [[ "$IO_BATCH_COMPLETE" -gt 0 ]]; then
295		echo "iodepth_batch_complete=$IO_BATCH_COMPLETE" >> $testdir/config.fio
296	fi
297
298	# shellcheck disable=SC2068
299	if [[ $PLUGIN =~ "uring" || $PLUGIN =~ "xnvme" ]]; then
300		set_potential_poll_threads ${cores[@]//,/ }
301	fi
302
303	for i in "${!cores[@]}"; do
304		local m=0 #Counter of disks per NUMA node
305		local n=0 #Counter of all disks in test
306		core_numa=${cores_numa[$i]}
307
308		total_disks_per_core=$disks_per_core
309		# Check how many "stray" disks are unassigned to CPU cores
310		# Assign one disk to current CPU core and subtract it from the total of
311		# unassigned disks
312		if [[ "$disks_per_core_mod" -gt "0" ]]; then
313			total_disks_per_core=$((disks_per_core + 1))
314			disks_per_core_mod=$((disks_per_core_mod - 1))
315		fi
316		# SPDK fio plugin supports submitting/completing I/Os to multiple SSDs from a single thread.
317		# Therefore, the per thread queue depth is set to the desired IODEPTH/device X the number of devices per thread.
318		QD=$IODEPTH
319		if [[ "$NOIOSCALING" == false ]]; then
320			QD=$((IODEPTH * total_disks_per_core))
321		fi
322
323		if [[ "$FIO_FNAME_STRATEGY" == "group" ]]; then
324			fio_job_section+=("")
325			fio_job_section+=("[filename${i}]")
326			fio_job_section+=("iodepth=$QD")
327			fio_job_section+=("cpus_allowed=${cores[$i]} #CPU NUMA Node ${cores_numa[$i]} ($FIO_FNAME_STRATEGY)")
328			fio_job_section+=("$(get_sqthread_poll_cpu "${cores_numa[i]}" "$i")")
329		fi
330
331		while [[ "$m" -lt "$total_disks_per_core" ]]; do
332			# Try to add disks to job section if it's NUMA node matches NUMA
333			# for currently selected CPU
334			if [[ "${disks_numa[$n]}" == "$core_numa" ]]; then
335				if [[ "$FIO_FNAME_STRATEGY" == "split" ]]; then
336					fio_job_section+=("")
337					fio_job_section+=("[filename${m}-${cores[$i]}]")
338					fio_job_section+=("iodepth=$QD")
339					fio_job_section+=("cpus_allowed=${cores[$i]} #CPU NUMA Node ${cores_numa[$i]}")
340					fio_job_section+=("$(get_sqthread_poll_cpu "${cores_numa[i]}" "$i")")
341				fi
342
343				if [[ "$plugin" == "spdk-plugin-nvme" ]]; then
344					fio_job_section+=("filename=trtype=PCIe traddr=${disks[$n]//:/.} ns=1 #NVMe NUMA Node ${disks_numa[$n]}")
345				elif [[ "$plugin" == "spdk-plugin-bdev" || "$plugin" == "spdk-plugin-bdev-xnvme" ]]; then
346					fio_job_section+=("filename=${disks[$n]} #NVMe NUMA Node ${disks_numa[$n]}")
347				elif [[ "$plugin" =~ "kernel" ]]; then
348					fio_job_section+=("filename=/dev/${disks[$n]} #NVMe NUMA Node ${disks_numa[$n]} ($FIO_FNAME_STRATEGY)")
349				fi
350				m=$((m + 1))
351
352				#Mark numa of n'th disk as "x" to mark it as claimed for next loop iterations
353				disks_numa[n]="x"
354			fi
355			n=$((n + 1))
356
357			# If there is no more disks with numa node same as cpu numa node, switch to
358			# other numa node, go back to start of loop and try again.
359			if [[ $n -ge $total_disks ]]; then
360				echo "WARNING! Cannot assign any more NVMes for CPU ${cores[$i]}"
361				echo "NVMe assignment for this CPU will be cross-NUMA."
362				if [[ "$core_numa" == "1" ]]; then
363					core_numa=0
364				else
365					core_numa=1
366				fi
367				n=0
368			fi
369		done
370	done
371
372	printf "%s\n" "${fio_job_section[@]}" >> $testdir/config.fio
373	echo "INFO: Generated fio configuration file:"
374	cat $testdir/config.fio
375}
376
377function bc() {
378	$(type -P bc) -l <<< "scale=3; $1"
379}
380
381function get_results() {
382	local iops bw stdev
383	local p90_lat p99_lat p99_99_lat
384	local mean_slat mean_clat
385	local reads_pct
386	local writes_pct
387
388	reads_pct=$(bc "$1 / 100")
389	writes_pct=$(bc "1 - $reads_pct")
390
391	iops=$(jq -r '.jobs[] | .read.iops + .write.iops' $TMP_RESULT_FILE)
392	bw=$(jq -r ".jobs[] | (.read.bw + .write.bw)" $TMP_RESULT_FILE)
393	mean_lat=$(jq -r ".jobs[] | (.read.lat_ns.mean * $reads_pct + .write.lat_ns.mean * $writes_pct)/1000" $TMP_RESULT_FILE)
394	p90_lat=$(jq -r ".jobs[] | (.read.clat_ns.percentile.\"90.000000\"  // 0 * $reads_pct + .write.clat_ns.percentile.\"90.000000\" // 0 * $writes_pct)/1000" $TMP_RESULT_FILE)
395	p99_lat=$(jq -r ".jobs[] | (.read.clat_ns.percentile.\"99.000000\"  // 0 * $reads_pct + .write.clat_ns.percentile.\"99.000000\" // 0 * $writes_pct)/1000" $TMP_RESULT_FILE)
396	p99_99_lat=$(jq -r ".jobs[] | (.read.clat_ns.percentile.\"99.990000\" // 0 * $reads_pct + .write.clat_ns.percentile.\"99.990000\" // 0 * $writes_pct)/1000" $TMP_RESULT_FILE)
397	stdev=$(jq -r ".jobs[] | (.read.clat_ns.stddev * $reads_pct + .write.clat_ns.stddev * $writes_pct)/1000" $TMP_RESULT_FILE)
398	mean_slat=$(jq -r ".jobs[] | (.read.slat_ns.mean * $reads_pct + .write.slat_ns.mean * $writes_pct)/1000" $TMP_RESULT_FILE)
399	mean_clat=$(jq -r ".jobs[] | (.read.clat_ns.mean * $reads_pct + .write.clat_ns.mean * $writes_pct)/1000" $TMP_RESULT_FILE)
400
401	echo "$iops $bw $mean_lat $p90_lat $p99_lat $p99_99_lat $stdev $mean_slat $mean_clat"
402}
403
404function get_bdevperf_results() {
405	local iops
406	local bw_MBs
407	read -r iops bw_MBs <<< $(grep Total $TMP_RESULT_FILE | tr -s " " | awk -F ":| " '{print $5" "$7}')
408	echo "$iops $(bc "$bw_MBs * 1024")"
409}
410
411function get_nvmeperf_results() {
412	local iops
413	local bw_MBs
414	local mean_lat_usec
415	local max_lat_usec
416	local min_lat_usec
417
418	read -r iops bw_MBs mean_lat_usec min_lat_usec max_lat_usec <<< $(tr -s " " < $TMP_RESULT_FILE | grep -oP "(?<=Total : )(.*+)")
419	echo "$iops $(bc "$bw_MBs * 1024") $mean_lat_usec $min_lat_usec $max_lat_usec"
420}
421
422function run_spdk_nvme_fio() {
423	local plugin=$1
424	echo "** Running fio test, this can take a while, depending on the run-time and ramp-time setting."
425	if [[ "$plugin" = "spdk-plugin-nvme" ]]; then
426		LD_PRELOAD=$plugin_dir/spdk_nvme $FIO_BIN $testdir/config.fio --output-format=json "${@:2}" --ioengine=spdk
427	elif [[ "$plugin" = "spdk-plugin-bdev" || "$plugin" = "spdk-plugin-bdev-xnvme" ]]; then
428		LD_PRELOAD=$plugin_dir/spdk_bdev $FIO_BIN $testdir/config.fio --output-format=json "${@:2}" --ioengine=spdk_bdev --spdk_json_conf=$testdir/bdev.conf --spdk_mem=4096
429	fi
430
431	sleep 1
432}
433
434function run_nvme_fio() {
435	echo "** Running fio test, this can take a while, depending on the run-time and ramp-time setting."
436	$FIO_BIN $testdir/config.fio --output-format=json "$@"
437	sleep 1
438}
439
440function run_bdevperf() {
441	local bdevperf_rpc
442	local bdevperf_pid
443	local rpc_socket
444	local bpf_script_cmd
445	local bpf_script_pid
446	local bpf_app_pid
447	local main_core_param=""
448
449	bdevperf_rpc="$rootdir/examples/bdev/bdevperf/bdevperf.py"
450	rpc_socket="/var/tmp/spdk.sock"
451
452	if [[ -n $MAIN_CORE ]]; then
453		main_core_param="-p ${MAIN_CORE}"
454	fi
455
456	echo "** Running bdevperf test, this can take a while, depending on the run-time setting."
457	$_examples_dir/bdevperf --json $testdir/bdev.conf -q $IODEPTH -o $BLK_SIZE -w $RW -M $MIX -t $RUNTIME -m "[$CPUS_ALLOWED]" -r "$rpc_socket" $main_core_param -z &
458	bdevperf_pid=$!
459	waitforlisten $bdevperf_pid "$rpc_socket" 500
460
461	if [[ ${#BPFTRACES[@]} -gt 0 ]]; then
462		echo "INFO: Enabling BPF Traces ${BPFTRACES[*]}"
463		bpf_script_cmd=("$rootdir/scripts/bpftrace.sh")
464		bpf_script_cmd+=("$bdevperf_pid")
465		for trace in "${BPFTRACES[@]}"; do
466			bpf_script_cmd+=("$rootdir/scripts/bpf/$trace")
467		done
468
469		BPF_OUTFILE=$TMP_BPF_FILE "${bpf_script_cmd[@]}" &
470		bpf_script_pid=$!
471		sleep 3
472	fi
473
474	PYTHONPATH=$PYTHONPATH:$rootdir/python $bdevperf_rpc -s "$rpc_socket" -t $((RUNTIME + 10)) perform_tests
475
476	# Using "-z" option causes bdevperf to NOT exit automatically after running the test,
477	# so we need to stop it ourselves.
478	kill -s SIGINT $bdevperf_pid
479	wait $bdevperf_pid
480
481	if ((bpf_script_pid)); then
482		wait $bpf_script_pid
483	fi
484	sleep 1
485}
486
487function run_nvmeperf() {
488	# Prepare -r argument string for nvme perf command
489	local r_opt
490	local disks
491
492	# Limit the number of disks to $1 if needed
493	disks=($(get_disks nvme))
494	disks=("${disks[@]:0:$1}")
495	r_opt=$(printf -- ' -r "trtype:PCIe traddr:%s"' "${disks[@]}")
496
497	echo "** Running nvme perf test, this can take a while, depending on the run-time setting."
498
499	# Run command in separate shell as this solves quoting issues related to r_opt var
500	$SHELL -c "$_app_dir/spdk_nvme_perf $r_opt -q $IODEPTH -o $BLK_SIZE -w $RW -M $MIX -t $RUNTIME -c [$CPUS_ALLOWED]"
501	sleep 1
502}
503
504function wait_for_nvme_reload() {
505	local nvmes=$1
506
507	for disk in $nvmes; do
508		cmd="ls /sys/block/$disk/queue/*@(iostats|rq_affinity|nomerges|io_poll_delay)*"
509		until $cmd 2> /dev/null; do
510			echo "Waiting for full nvme driver reload..."
511			sleep 0.5
512		done
513	done
514}
515
516function verify_disk_number() {
517	# Check if we have appropriate number of disks to carry out the test
518	disks=($(get_disks $PLUGIN))
519	if [[ $DISKNO == "ALL" ]] || [[ $DISKNO == "all" ]]; then
520		DISKNO=${#disks[@]}
521	elif [[ $DISKNO -gt ${#disks[@]} ]] || [[ ! $DISKNO =~ ^[0-9]+$ ]]; then
522		echo "error: Required devices number ($DISKNO) is not a valid number or it's larger than the number of devices found (${#disks[@]})"
523		false
524	fi
525}
526
527function create_spdk_xnvme_bdev_conf() {
528	local bdev_io_cache_size=$1 bdev_io_pool_size=$2
529	local blocks block_idx io_mechanism=libaio
530
531	(($#)) && local -A method_bdev_set_options_0
532
533	blocks=($(get_disks))
534
535	if [[ -n $bdev_io_cache_size ]]; then
536		method_bdev_set_options_0["bdev_io_cache_size"]=$bdev_io_cache_size
537	fi
538	if [[ -n $bdev_io_pool_size ]]; then
539		method_bdev_set_options_0["bdev_io_pool_size"]=$bdev_io_pool_size
540	fi
541
542	for block_idx in "${!blocks[@]}"; do
543		local -A method_bdev_xnvme_create_$block_idx
544		local -n rpc_ref=method_bdev_xnvme_create_$block_idx
545		rpc_ref["filename"]=/dev/${blocks[block_idx]}
546		rpc_ref["io_mechanism"]=io_uring
547		rpc_ref["name"]=${blocks[block_idx]}
548	done
549	gen_conf > "$testdir/bdev.conf"
550}
551
552# LVOL support functions
553function start_spdk_tgt() {
554	$SPDK_BIN_DIR/spdk_tgt -g &
555	spdk_tgt_pid=$!
556
557	waitforlisten $spdk_tgt_pid
558}
559
560function stop_spdk_tgt() {
561	killprocess $spdk_tgt_pid
562}
563
564function attach_bdevs() {
565	local disk_cfg=($(get_disk_cfg))
566	local i
567	for i in "${!disk_cfg[@]}"; do
568		$rpc_py bdev_nvme_attach_controller -b "Nvme${i}" -t pcie -a "${disk_cfg[i]}"
569		echo "Attached NVMe Bdev $nvme_bdev with BDF"
570	done
571}
572
573function cleanup_lvol_cfg() {
574	local -a lvol_stores
575	local -a lvol_bdevs
576	local lvol_store lvol_bdev
577
578	echo "Cleanup lvols"
579	lvol_stores=($($rpc_py bdev_lvol_get_lvstores | jq -r '.[].uuid'))
580	for lvol_store in "${lvol_stores[@]}"; do
581		lvol_bdevs=($($rpc_py bdev_lvol_get_lvols -u $lvol_store | jq -r '.[].uuid'))
582		for lvol_bdev in "${lvol_bdevs[@]}"; do
583			$rpc_py bdev_lvol_delete $lvol_bdev
584			echo "lvol bdev $lvol_bdev removed"
585		done
586
587		$rpc_py bdev_lvol_delete_lvstore -u $lvol_store
588		echo "lvol store $lvol_store removed"
589	done
590}
591
592function cleanup_lvols() {
593	start_spdk_tgt
594	attach_bdevs
595	cleanup_lvol_cfg
596	stop_spdk_tgt
597}
598
599function create_lvols() {
600	start_spdk_tgt
601	attach_bdevs
602	cleanup_lvol_cfg
603
604	nvme_bdevs=($($rpc_py bdev_get_bdevs | jq -r '.[].name'))
605	for nvme_bdev in "${nvme_bdevs[@]}"; do
606		ls_guid=$($rpc_py bdev_lvol_create_lvstore $nvme_bdev lvs_0 --clear-method none)
607		echo "Created LVOL Store $ls_guid on Bdev $nvme_bdev"
608
609		free_mb=$(get_lvs_free_mb "$ls_guid")
610		lb_name=$($rpc_py bdev_lvol_create -u $ls_guid lbd_0 $free_mb --clear-method none)
611		LVOL_BDEVS+=("$lb_name")
612		echo "Created LVOL Bdev $lb_name ($free_mb MB) on Lvol Store $ls_guid on Bdev $nvme_bdev"
613	done
614
615	stop_spdk_tgt
616}
617