xref: /spdk/test/nvmf/host/discovery.sh (revision 18c8b52afa69f39481ebb75711b2f30b11693f9d)
1#!/usr/bin/env bash
2
3testdir=$(readlink -f $(dirname $0))
4rootdir=$(readlink -f $testdir/../../..)
5source $rootdir/test/common/autotest_common.sh
6source $rootdir/test/nvmf/common.sh
7
8if [ "$TEST_TRANSPORT" == "rdma" ]; then
9	echo "Skipping tests on RDMA because the rdma stack fails to configure the same IP for host and target."
10	exit 0
11fi
12
13DISCOVERY_PORT=8009
14DISCOVERY_NQN=nqn.2014-08.org.nvmexpress.discovery
15
16# NQN prefix to use for subsystem NQNs
17NQN=nqn.2016-06.io.spdk:cnode
18
19HOST_NQN=nqn.2021-12.io.spdk:test
20HOST_SOCK=/tmp/host.sock
21
22nvmftestinit
23
24# We will start the target as normal, emulating a storage cluster. We will simulate new paths
25# to the cluster via multiple listeners with different TCP ports.
26
27nvmfappstart -m 0x2
28
29$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
30$rpc_py nvmf_subsystem_add_listener $DISCOVERY_NQN -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP \
31	-s $DISCOVERY_PORT
32$rpc_py bdev_null_create null0 1000 512
33$rpc_py bdev_null_create null1 1000 512
34$rpc_py bdev_wait_for_examine
35
36# Now start the host where the discovery service will run.  For our tests, we will send RPCs to
37# the "cluster" to create subsystems, add namespaces, add and remove listeners and add hosts,
38# and then check if the discovery service has detected the changes and constructed the correct
39# subsystem, ctrlr and bdev objects.
40
41$SPDK_BIN_DIR/nvmf_tgt -m 0x1 -r $HOST_SOCK &
42hostpid=$!
43waitforlisten $hostpid $HOST_SOCK
44
45trap 'process_shm --id $NVMF_APP_SHM_ID; kill $hostpid; nvmftestfini; exit 1' SIGINT SIGTERM EXIT
46
47$rpc_py -s $HOST_SOCK log_set_flag bdev_nvme
48$rpc_py -s $HOST_SOCK bdev_nvme_start_discovery -b nvme -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP \
49	-s $DISCOVERY_PORT -f ipv4 -q $HOST_NQN
50
51function get_bdev_list() {
52	$rpc_py -s $HOST_SOCK bdev_get_bdevs | jq -r '.[].name' | sort | xargs
53}
54
55function get_subsystem_names() {
56	$rpc_py -s $HOST_SOCK bdev_nvme_get_controllers | jq -r '.[].name' | sort | xargs
57}
58
59function get_subsystem_paths() {
60	$rpc_py -s $HOST_SOCK bdev_nvme_get_controllers -n $1 | jq -r '.[].ctrlrs[].trid.trsvcid' | sort -n | xargs
61}
62
63function get_discovery_ctrlrs() {
64	$rpc_py -s $HOST_SOCK bdev_nvme_get_discovery_info | jq -r '.[].name' | sort | xargs
65}
66
67# Note that tests need to call get_notification_count and then check $notification_count,
68# because if we use $(get_notification_count), the notify_id gets updated in the subshell.
69notify_id=0
70function get_notification_count() {
71	notification_count=$($rpc_py -s $HOST_SOCK notify_get_notifications -i $notify_id | jq '. | length')
72	notify_id=$((notify_id + notification_count))
73}
74
75[[ "$(get_subsystem_names)" == "" ]]
76[[ "$(get_bdev_list)" == "" ]]
77
78$rpc_py nvmf_create_subsystem ${NQN}0
79[[ "$(get_subsystem_names)" == "" ]]
80[[ "$(get_bdev_list)" == "" ]]
81
82$rpc_py nvmf_subsystem_add_ns ${NQN}0 null0
83[[ "$(get_subsystem_names)" == "" ]]
84[[ "$(get_bdev_list)" == "" ]]
85
86# Add listener for the subsystem.  But since the the subsystem was not created with -a option, the
87# discovery host will not be able to see the subsystem until its hostnqn is added.
88$rpc_py nvmf_subsystem_add_listener ${NQN}0 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
89[[ "$(get_subsystem_names)" == "" ]]
90[[ "$(get_bdev_list)" == "" ]]
91get_notification_count
92[[ $notification_count == 0 ]]
93
94# Discovery hostnqn is added, so now the host should see the subsystem, with a single path for the
95# port of the single listener on the target.
96$rpc_py nvmf_subsystem_add_host ${NQN}0 $HOST_NQN
97sleep 1 # Wait a bit to make sure the discovery service has a chance to detect the changes
98[[ "$(get_subsystem_names)" == "nvme0" ]]
99[[ "$(get_bdev_list)" == "nvme0n1" ]]
100[[ "$(get_subsystem_paths nvme0)" == "$NVMF_PORT" ]]
101get_notification_count
102[[ $notification_count == 1 ]]
103
104# Adding a namespace isn't a discovery function, but do it here anyways just to confirm we see a new bdev.
105$rpc_py nvmf_subsystem_add_ns ${NQN}0 null1
106sleep 1 # Wait a bit to make sure the discovery service has a chance to detect the changes
107[[ "$(get_bdev_list)" == "nvme0n1 nvme0n2" ]]
108get_notification_count
109[[ $notification_count == 1 ]]
110
111# Add a second path to the same subsystem.  This shouldn't change the list of subsystems or bdevs, but
112# we should see a second path on the nvme0 subsystem now.
113$rpc_py nvmf_subsystem_add_listener ${NQN}0 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_SECOND_PORT
114sleep 1 # Wait a bit to make sure the discovery service has a chance to detect the changes
115[[ "$(get_subsystem_names)" == "nvme0" ]]
116[[ "$(get_bdev_list)" == "nvme0n1 nvme0n2" ]]
117[[ "$(get_subsystem_paths nvme0)" == "$NVMF_PORT $NVMF_SECOND_PORT" ]]
118get_notification_count
119[[ $notification_count == 0 ]]
120
121# Remove the listener for the first port.  The subsystem and bdevs should stay, but we should see
122# the path to that first port disappear.
123$rpc_py nvmf_subsystem_remove_listener ${NQN}0 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
124sleep 1 # Wait a bit to make sure the discovery service has a chance to detect the changes
125[[ "$(get_subsystem_names)" == "nvme0" ]]
126[[ "$(get_bdev_list)" == "nvme0n1 nvme0n2" ]]
127[[ "$(get_subsystem_paths nvme0)" == "$NVMF_SECOND_PORT" ]]
128get_notification_count
129[[ $notification_count == 0 ]]
130
131$rpc_py -s $HOST_SOCK bdev_nvme_stop_discovery -b nvme
132sleep 1 # Wait a bit to make sure the discovery service has a chance to detect the changes
133[[ "$(get_subsystem_names)" == "" ]]
134[[ "$(get_bdev_list)" == "" ]]
135get_notification_count
136[[ $notification_count == 2 ]]
137
138# Make sure that it's not possible to start two discovery services with the same name
139$rpc_py -s $HOST_SOCK bdev_nvme_start_discovery -b nvme -t $TEST_TRANSPORT \
140	-a $NVMF_FIRST_TARGET_IP -s $DISCOVERY_PORT -f ipv4 -q $HOST_NQN -w
141NOT $rpc_py -s $HOST_SOCK bdev_nvme_start_discovery -b nvme -t $TEST_TRANSPORT \
142	-a $NVMF_FIRST_TARGET_IP -s $DISCOVERY_PORT -f ipv4 -q $HOST_NQN -w
143[[ $(get_discovery_ctrlrs) == "nvme" ]]
144[[ $(get_bdev_list) == "nvme0n1 nvme0n2" ]]
145
146# Make sure that it's also impossible to start the discovery using the same trid
147NOT $rpc_py -s $HOST_SOCK bdev_nvme_start_discovery -b nvme_second -t $TEST_TRANSPORT \
148	-a $NVMF_FIRST_TARGET_IP -s $DISCOVERY_PORT -f ipv4 -q $HOST_NQN -w
149[[ $(get_discovery_ctrlrs) == "nvme" ]]
150[[ $(get_bdev_list) == "nvme0n1 nvme0n2" ]]
151
152# Try to connect to a non-existing discovery endpoint and verify that it'll timeout
153NOT $rpc_py -s $HOST_SOCK bdev_nvme_start_discovery -b nvme_second -t $TEST_TRANSPORT \
154	-a $NVMF_FIRST_TARGET_IP -s $((DISCOVERY_PORT + 1)) -f ipv4 -q $HOST_NQN -T 3000
155[[ $(get_discovery_ctrlrs) == "nvme" ]]
156
157trap - SIGINT SIGTERM EXIT
158
159kill $hostpid
160nvmftestfini
161