1#!/usr/bin/env bash 2 3testdir=$(readlink -f $(dirname $0)) 4rootdir=$(readlink -f $testdir/../..) 5source $rootdir/test/common/autotest_common.sh 6source $testdir/nbd_common.sh 7 8rpc_py=rpc_cmd 9conf_file="$testdir/bdev.json" 10# Make sure the configuration is clean 11: > "$conf_file" 12 13function cleanup() { 14 rm -f "$SPDK_TEST_STORAGE/aiofile" 15 rm -f "$SPDK_TEST_STORAGE/spdk-pmem-pool" 16 rm -f "$conf_file" 17 18 if [[ $test_type == rbd ]]; then 19 rbd_cleanup 20 fi 21 22 if [[ "$test_type" = "gpt" ]]; then 23 "$rootdir/scripts/setup.sh" reset 24 if [[ -b $gpt_nvme ]]; then 25 wipefs --all "$gpt_nvme" 26 fi 27 fi 28} 29 30function start_spdk_tgt() { 31 "$SPDK_BIN_DIR/spdk_tgt" & 32 spdk_tgt_pid=$! 33 trap 'killprocess "$spdk_tgt_pid"; exit 1' SIGINT SIGTERM EXIT 34 waitforlisten "$spdk_tgt_pid" 35} 36 37function setup_bdev_conf() { 38 "$rpc_py" <<- RPC 39 bdev_split_create Malloc1 2 40 bdev_split_create -s 4 Malloc2 8 41 bdev_malloc_create -b Malloc0 32 512 42 bdev_malloc_create -b Malloc1 32 512 43 bdev_malloc_create -b Malloc2 32 512 44 bdev_malloc_create -b Malloc3 32 512 45 bdev_malloc_create -b Malloc4 32 512 46 bdev_malloc_create -b Malloc5 32 512 47 bdev_passthru_create -p TestPT -b Malloc3 48 bdev_raid_create -n raid0 -z 64 -r 0 -b "Malloc4 Malloc5" 49 bdev_set_qos_limit --rw_mbytes_per_sec 100 Malloc3 50 bdev_set_qos_limit --rw_ios_per_sec 20000 Malloc0 51 RPC 52 if [[ $(uname -s) != "FreeBSD" ]]; then 53 dd if=/dev/zero of="$SPDK_TEST_STORAGE/aiofile" bs=2048 count=5000 54 "$rpc_py" bdev_aio_create "$SPDK_TEST_STORAGE/aiofile" AIO0 2048 55 fi 56} 57 58function setup_nvme_conf() { 59 local json 60 mapfile -t json < <("$rootdir/scripts/gen_nvme.sh") 61 "$rpc_py" load_subsystem_config -j "'${json[*]}'" 62} 63 64function setup_gpt_conf() { 65 $rootdir/scripts/setup.sh reset 66 # Get nvme devices by following drivers' links towards nvme class 67 local nvme_devs=(/sys/bus/pci/drivers/nvme/*/nvme/nvme*/nvme*n*) nvme_dev 68 gpt_nvme="" 69 # Pick first device which doesn't have any valid partition table 70 for nvme_dev in "${nvme_devs[@]}"; do 71 dev=/dev/${nvme_dev##*/} 72 if ! pt=$(parted "$dev" -ms print 2>&1); then 73 [[ $pt == *"$dev: unrecognised disk label"* ]] || continue 74 gpt_nvme=$dev 75 break 76 fi 77 done 78 if [[ -n $gpt_nvme ]]; then 79 # Create gpt partition table 80 parted -s "$gpt_nvme" mklabel gpt mkpart first '0%' '50%' mkpart second '50%' '100%' 81 # change the GUID to SPDK GUID value 82 SPDK_GPT_GUID=$(get_spdk_gpt) 83 sgdisk -t "1:$SPDK_GPT_GUID" "$gpt_nvme" 84 sgdisk -t "2:$SPDK_GPT_GUID" "$gpt_nvme" 85 "$rootdir/scripts/setup.sh" 86 "$rpc_py" bdev_get_bdevs 87 setup_nvme_conf 88 else 89 printf 'Did not find any nvme block devices to work with, aborting the test\n' >&2 90 "$rootdir/scripts/setup.sh" 91 return 1 92 fi 93} 94 95function setup_crypto_aesni_conf() { 96 # Malloc0 and Malloc1 use AESNI 97 "$rpc_py" <<- RPC 98 bdev_malloc_create -b Malloc0 16 512 99 bdev_malloc_create -b Malloc1 16 512 100 bdev_crypto_create Malloc0 crypto_ram crypto_aesni_mb 0123456789123456 101 bdev_crypto_create Malloc1 crypto_ram2 crypto_aesni_mb 9012345678912345 102 RPC 103} 104 105function setup_crypto_qat_conf() { 106 # Malloc0 will use QAT AES_CBC 107 # Malloc1 will use QAT AES_XTS 108 "$rpc_py" <<- RPC 109 bdev_malloc_create -b Malloc0 16 512 110 bdev_malloc_create -b Malloc1 16 512 111 bdev_crypto_create Malloc0 crypto_ram crypto_qat 0123456789123456 112 bdev_crypto_create -c AES_XTS -k2 0123456789123456 Malloc1 crypto_ram3 crypto_qat 0123456789123456 113 bdev_get_bdevs -b Malloc1 114 RPC 115} 116 117function setup_pmem_conf() { 118 if hash pmempool; then 119 rm -f "$SPDK_TEST_STORAGE/spdk-pmem-pool" 120 pmempool create blk --size=32M 512 "$SPDK_TEST_STORAGE/spdk-pmem-pool" 121 "$rpc_py" bdev_pmem_create -n Pmem0 "$SPDK_TEST_STORAGE/spdk-pmem-pool" 122 else 123 return 1 124 fi 125} 126 127function setup_rbd_conf() { 128 timing_enter rbd_setup 129 rbd_setup 127.0.0.1 130 timing_exit rbd_setup 131 132 "$rpc_py" bdev_rbd_create -b Ceph0 rbd foo 512 133} 134 135function bdev_bounds() { 136 $testdir/bdevio/bdevio -w -s $PRE_RESERVED_MEM --json "$conf_file" & 137 bdevio_pid=$! 138 trap 'cleanup; killprocess $bdevio_pid; exit 1' SIGINT SIGTERM EXIT 139 echo "Process bdevio pid: $bdevio_pid" 140 waitforlisten $bdevio_pid 141 $testdir/bdevio/tests.py perform_tests 142 killprocess $bdevio_pid 143 trap - SIGINT SIGTERM EXIT 144} 145 146function nbd_function_test() { 147 if [ $(uname -s) = Linux ] && modprobe -n nbd; then 148 local rpc_server=/var/tmp/spdk-nbd.sock 149 local conf=$1 150 local nbd_all=($(ls /dev/nbd* | grep -v p)) 151 local bdev_all=($bdevs_name) 152 local nbd_num=${#bdev_all[@]} 153 if ((nbd_num < 1)); then 154 # There should be at least one bdev and one valid nbd device 155 return 1 156 fi 157 if [ ${#nbd_all[@]} -le $nbd_num ]; then 158 nbd_num=${#nbd_all[@]} 159 fi 160 local nbd_list=(${nbd_all[@]:0:$nbd_num}) 161 local bdev_list=(${bdev_all[@]:0:$nbd_num}) 162 163 if [ ! -e $conf ]; then 164 return 1 165 fi 166 167 modprobe nbd 168 $rootdir/test/app/bdev_svc/bdev_svc -r $rpc_server -i 0 --json "$conf" & 169 nbd_pid=$! 170 trap 'cleanup; killprocess $nbd_pid; exit 1' SIGINT SIGTERM EXIT 171 echo "Process nbd pid: $nbd_pid" 172 waitforlisten $nbd_pid $rpc_server 173 174 nbd_rpc_start_stop_verify $rpc_server "${bdev_list[*]}" 175 nbd_rpc_data_verify $rpc_server "${bdev_list[*]}" "${nbd_list[*]}" 176 177 killprocess $nbd_pid 178 trap - SIGINT SIGTERM EXIT 179 fi 180 181 return 0 182} 183 184function fio_test_suite() { 185 # Generate the fio config file given the list of all unclaimed bdevs 186 fio_config_gen $testdir/bdev.fio verify AIO 187 for b in $(echo $bdevs | jq -r '.name'); do 188 echo "[job_$b]" >> $testdir/bdev.fio 189 echo "filename=$b" >> $testdir/bdev.fio 190 done 191 192 local fio_params="--ioengine=spdk_bdev --iodepth=8 --bs=4k --runtime=10 $testdir/bdev.fio --spdk_json_conf=$conf_file" 193 194 run_test "bdev_fio_rw_verify" fio_bdev $fio_params --spdk_mem=$PRE_RESERVED_MEM \ 195 --output=$output_dir/blockdev_fio_verify.txt 196 rm -f ./*.state 197 rm -f $testdir/bdev.fio 198 199 # Generate the fio config file given the list of all unclaimed bdevs that support unmap 200 fio_config_gen $testdir/bdev.fio trim 201 if [ "$(echo $bdevs | jq -r 'select(.supported_io_types.unmap == true) | .name')" != "" ]; then 202 for b in $(echo $bdevs | jq -r 'select(.supported_io_types.unmap == true) | .name'); do 203 echo "[job_$b]" >> $testdir/bdev.fio 204 echo "filename=$b" >> $testdir/bdev.fio 205 done 206 else 207 rm -f $testdir/bdev.fio 208 return 0 209 fi 210 211 run_test "bdev_fio_trim" fio_bdev $fio_params --output=$output_dir/blockdev_trim.txt 212 rm -f ./*.state 213 rm -f $testdir/bdev.fio 214} 215 216function get_io_result() { 217 local limit_type=$1 218 local qos_dev=$2 219 local iostat_result 220 iostat_result=$($rootdir/scripts/iostat.py -d -i 1 -t $QOS_RUN_TIME | grep $qos_dev | tail -1) 221 if [ $limit_type = IOPS ]; then 222 iostat_result=$(awk '{print $2}' <<< $iostat_result) 223 elif [ $limit_type = BANDWIDTH ]; then 224 iostat_result=$(awk '{print $6}' <<< $iostat_result) 225 fi 226 227 echo ${iostat_result/.*/} 228} 229 230function run_qos_test() { 231 local qos_limit=$1 232 local qos_result=0 233 234 qos_result=$(get_io_result $2 $3) 235 if [ $2 = BANDWIDTH ]; then 236 qos_limit=$((qos_limit * 1024)) 237 fi 238 lower_limit=$((qos_limit * 9 / 10)) 239 upper_limit=$((qos_limit * 11 / 10)) 240 241 # QoS realization is related with bytes transfered. It currently has some variation. 242 if [ $qos_result -lt $lower_limit ] || [ $qos_result -gt $upper_limit ]; then 243 echo "Failed to limit the io read rate of NULL bdev by qos" 244 $rpc_py bdev_malloc_delete $QOS_DEV_1 245 $rpc_py bdev_null_delete $QOS_DEV_2 246 killprocess $QOS_PID 247 exit 1 248 fi 249} 250 251function qos_function_test() { 252 local qos_lower_iops_limit=1000 253 local qos_lower_bw_limit=2 254 local io_result=0 255 local iops_limit=0 256 local bw_limit=0 257 258 io_result=$(get_io_result IOPS $QOS_DEV_1) 259 # Set the IOPS limit as one quarter of the measured performance without QoS 260 iops_limit=$(((io_result / 4) / qos_lower_iops_limit * qos_lower_iops_limit)) 261 if [ $iops_limit -gt $qos_lower_iops_limit ]; then 262 263 # Run bdevperf with IOPS rate limit on bdev 1 264 $rpc_py bdev_set_qos_limit --rw_ios_per_sec $iops_limit $QOS_DEV_1 265 run_test "bdev_qos_iops" run_qos_test $iops_limit IOPS $QOS_DEV_1 266 267 # Run bdevperf with bandwidth rate limit on bdev 2 268 # Set the bandwidth limit as 1/10 of the measure performance without QoS 269 bw_limit=$(get_io_result BANDWIDTH $QOS_DEV_2) 270 bw_limit=$((bw_limit / 1024 / 10)) 271 if [ $bw_limit -lt $qos_lower_bw_limit ]; then 272 bw_limit=$qos_lower_bw_limit 273 fi 274 $rpc_py bdev_set_qos_limit --rw_mbytes_per_sec $bw_limit $QOS_DEV_2 275 run_test "bdev_qos_bw" run_qos_test $bw_limit BANDWIDTH $QOS_DEV_2 276 277 # Run bdevperf with additional read only bandwidth rate limit on bdev 1 278 $rpc_py bdev_set_qos_limit --r_mbytes_per_sec $qos_lower_bw_limit $QOS_DEV_1 279 run_test "bdev_qos_ro_bw" run_qos_test $qos_lower_bw_limit BANDWIDTH $QOS_DEV_1 280 else 281 echo "Actual IOPS without limiting is too low - exit testing" 282 fi 283} 284 285function qos_test_suite() { 286 # Run bdevperf with QoS disabled first 287 "$testdir/bdevperf/bdevperf" -z -m 0x2 -q 256 -o 4096 -w randread -t 60 & 288 QOS_PID=$! 289 echo "Process qos testing pid: $QOS_PID" 290 trap 'cleanup; killprocess $QOS_PID; exit 1' SIGINT SIGTERM EXIT 291 waitforlisten $QOS_PID 292 293 $rpc_py bdev_malloc_create -b $QOS_DEV_1 128 512 294 waitforbdev $QOS_DEV_1 295 $rpc_py bdev_null_create $QOS_DEV_2 128 512 296 waitforbdev $QOS_DEV_2 297 298 $rootdir/test/bdev/bdevperf/bdevperf.py perform_tests & 299 qos_function_test 300 301 $rpc_py bdev_malloc_delete $QOS_DEV_1 302 $rpc_py bdev_null_delete $QOS_DEV_2 303 killprocess $QOS_PID 304 trap - SIGINT SIGTERM EXIT 305} 306 307# Inital bdev creation and configuration 308#----------------------------------------------------- 309QOS_DEV_1="Malloc_0" 310QOS_DEV_2="Null_1" 311QOS_RUN_TIME=5 312 313if [ $(uname -s) = Linux ]; then 314 # Test dynamic memory management. All hugepages will be reserved at runtime 315 PRE_RESERVED_MEM=0 316else 317 # Dynamic memory management is not supported on BSD 318 PRE_RESERVED_MEM=2048 319fi 320 321test_type=${1:-bdev} 322start_spdk_tgt 323case "$test_type" in 324 bdev) 325 setup_bdev_conf 326 ;; 327 nvme) 328 setup_nvme_conf 329 ;; 330 gpt) 331 setup_gpt_conf 332 ;; 333 crypto_aesni) 334 setup_crypto_aesni_conf 335 ;; 336 crypto_qat) 337 setup_crypto_qat_conf 338 ;; 339 pmem) 340 setup_pmem_conf 341 ;; 342 rbd) 343 setup_rbd_conf 344 ;; 345 *) 346 echo "invalid test name" 347 exit 1 348 ;; 349esac 350 351"$rpc_py" bdev_wait_for_examine 352 353# Generate json config and use it throughout all the tests 354cat <<- CONF > "$conf_file" 355 {"subsystems":[ 356 $("$rpc_py" save_subsystem_config -n bdev) 357 ]} 358CONF 359 360bdevs=$("$rpc_py" bdev_get_bdevs | jq -r '.[] | select(.claimed == false)') 361bdevs_name=$(echo $bdevs | jq -r '.name') 362bdev_list=($bdevs_name) 363hello_world_bdev=${bdev_list[0]} 364trap - SIGINT SIGTERM EXIT 365killprocess "$spdk_tgt_pid" 366# End bdev configuration 367#----------------------------------------------------- 368 369trap "cleanup" SIGINT SIGTERM EXIT 370 371run_test "bdev_hello_world" $SPDK_EXAMPLE_DIR/hello_bdev --json "$conf_file" -b "$hello_world_bdev" 372run_test "bdev_bounds" bdev_bounds 373run_test "bdev_nbd" nbd_function_test $conf_file "$bdevs_name" 374if [[ $CONFIG_FIO_PLUGIN == y ]]; then 375 if [ "$test_type" = "nvme" ] || [ "$test_type" = "gpt" ]; then 376 # TODO: once we get real multi-ns drives, re-enable this test for NVMe. 377 echo "skipping fio tests on NVMe due to multi-ns failures." 378 else 379 run_test "bdev_fio" fio_test_suite 380 fi 381else 382 echo "FIO not available" 383 exit 1 384fi 385 386trap "cleanup" SIGINT SIGTERM EXIT 387 388run_test "bdev_verify" $testdir/bdevperf/bdevperf --json "$conf_file" -q 128 -o 4096 -w verify -t 5 -C -m 0x3 389run_test "bdev_write_zeroes" $testdir/bdevperf/bdevperf --json "$conf_file" -q 128 -o 4096 -w write_zeroes -t 1 390 391if [[ $test_type == bdev ]]; then 392 run_test "bdev_qos" qos_test_suite 393fi 394 395# Temporarily disabled - infinite loop 396# if [ $RUN_NIGHTLY -eq 1 ]; then 397# run_test "bdev_reset" $testdir/bdevperf/bdevperf --json "$conf_file" -q 16 -w reset -o 4096 -t 60 398# fi 399 400# Bdev and configuration cleanup below this line 401#----------------------------------------------------- 402 403trap - SIGINT SIGTERM EXIT 404cleanup 405