xref: /spdk/test/nvmf/target/srq_overwhelm.sh (revision 2172c432cfdaecc5a279d64e37c6b51e794683c1)
1#!/usr/bin/env bash
2
3testdir=$(readlink -f $(dirname $0))
4rootdir=$(readlink -f $testdir/../../..)
5source $rootdir/test/common/autotest_common.sh
6source $rootdir/test/nvmf/common.sh
7
8MALLOC_BDEV_SIZE=64
9MALLOC_BLOCK_SIZE=512
10
11rpc_py="$rootdir/scripts/rpc.py"
12
13nvmftestinit
14
15nvmfappstart -m 0xF
16
17# create the rdma transport with an intentionally small SRQ depth
18$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 -s 1024
19
20for i in $(seq 0 5); do
21	$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode$i -a -s SPDK00000000000001
22	$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc$i
23	$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode$i Malloc$i
24	$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode$i -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
25	nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode${i}" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT" -i 16
26	waitforblk "nvme${i}n1"
27done
28
29# by running 6 different FIO jobs, each with 13 subjobs, we end up with 78 fio threads trying to write to
30# our target at once. This completely overwhelms the target SRQ, but allows us to verify that rnr_retry is
31# working even at very high queue depths because the rdma qpair doesn't fail.
32# It is normal to see the initiator timeout and reconnect waiting for completions from an overwhelmmed target,
33# but the connection should come up and FIO should complete without errors.
34$rootdir/scripts/fio.py -p nvmf -i 1048576 -d 128 -t read -r 10 -n 13
35
36sync
37
38for i in $(seq 0 5); do
39	nvme disconnect -n "nqn.2016-06.io.spdk:cnode${i}"
40	$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode$i
41done
42
43trap - SIGINT SIGTERM EXIT
44
45nvmftestfini
46