1#!/usr/bin/env bash 2# SPDX-License-Identifier: BSD-3-Clause 3# Copyright (C) 2019 Intel Corporation 4# All rights reserved. 5# 6testdir=$(readlink -f $(dirname $0)) 7rootdir=$(readlink -f $testdir/../../..) 8source $rootdir/test/common/autotest_common.sh 9source $rootdir/test/nvmf/common.sh 10 11MALLOC_BDEV_SIZE=64 12MALLOC_BLOCK_SIZE=512 13NVME_CONNECT="nvme connect -i 16" 14 15nvmftestinit 16 17nvmfappstart -m 0xF 18 19# create the rdma transport with an intentionally small SRQ depth 20$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 -s 1024 21 22for i in $(seq 0 5); do 23 $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode$i -a -s "SPDK0000000000000${i}" 24 $rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc$i 25 $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode$i Malloc$i 26 $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode$i -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT 27 $NVME_CONNECT "${NVME_HOST[@]}" -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode${i}" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT" 28 waitforblk "nvme${i}n1" 29done 30 31# by running 6 different FIO jobs, each with 13 subjobs, we end up with 78 fio threads trying to write to 32# our target at once. This completely overwhelms the target SRQ, but allows us to verify that rnr_retry is 33# working even at very high queue depths because the rdma qpair doesn't fail. 34# It is normal to see the initiator timeout and reconnect waiting for completions from an overwhelmed target, 35# but the connection should come up and FIO should complete without errors. 36$rootdir/scripts/fio-wrapper -p nvmf -i 1048576 -d 128 -t read -r 10 -n 13 37 38sync 39 40for i in $(seq 0 5); do 41 nvme disconnect -n "nqn.2016-06.io.spdk:cnode${i}" 42 waitforserial_disconnect "SPDK0000000000000${i}" 43 $rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode$i 44done 45 46trap - SIGINT SIGTERM EXIT 47 48nvmftestfini 49