xref: /spdk/test/unit/lib/nvmf/rdma.c/rdma_ut.c (revision 488570ebd418ba07c9e69e65106dcc964f3bb41b)
1  /*   SPDX-License-Identifier: BSD-3-Clause
2   *   Copyright (c) Intel Corporation. All rights reserved.
3   *   Copyright (c) 2019, 2021 Mellanox Technologies LTD. All rights reserved.
4   */
5  
6  #include "spdk/stdinc.h"
7  #include "spdk_cunit.h"
8  #include "common/lib/test_env.c"
9  #include "common/lib/test_rdma.c"
10  #include "nvmf/rdma.c"
11  #include "nvmf/transport.c"
12  
13  #define RDMA_UT_UNITS_IN_MAX_IO 16
14  
15  struct spdk_nvmf_transport_opts g_rdma_ut_transport_opts = {
16  	.max_queue_depth = SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH,
17  	.max_qpairs_per_ctrlr = SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR,
18  	.in_capsule_data_size = SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE,
19  	.max_io_size = (SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE * RDMA_UT_UNITS_IN_MAX_IO),
20  	.io_unit_size = SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE,
21  	.max_aq_depth = SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH,
22  	.num_shared_buffers = SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS,
23  };
24  
25  SPDK_LOG_REGISTER_COMPONENT(nvmf)
26  DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
27  		uint64_t size, uint64_t translation), 0);
28  DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
29  		uint64_t size), 0);
30  DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation,
31  		const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL);
32  DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair,
33  		nvmf_qpair_disconnect_cb cb_fn, void *ctx), 0);
34  DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, int,
35  	    (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 0);
36  DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap));
37  
38  DEFINE_STUB_V(spdk_nvmf_ctrlr_data_init, (struct spdk_nvmf_transport_opts *opts,
39  		struct spdk_nvmf_ctrlr_data *cdata));
40  DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req));
41  DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), 0);
42  DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1,
43  		const struct spdk_nvme_transport_id *trid2), 0);
44  DEFINE_STUB_V(nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr));
45  DEFINE_STUB(spdk_nvmf_request_get_dif_ctx, bool, (struct spdk_nvmf_request *req,
46  		struct spdk_dif_ctx *dif_ctx), false);
47  DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
48  		enum spdk_nvme_transport_type trtype));
49  DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair));
50  DEFINE_STUB(nvmf_ctrlr_abort_request, int, (struct spdk_nvmf_request *req), 0);
51  DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
52  DEFINE_STUB(ibv_dereg_mr, int, (struct ibv_mr *mr), 0);
53  DEFINE_STUB(ibv_resize_cq, int, (struct ibv_cq *cq, int cqe), 0);
54  
55  /* ibv_reg_mr can be a macro, need to undefine it */
56  #ifdef ibv_reg_mr
57  #undef ibv_reg_mr
58  #endif
59  
60  DEFINE_RETURN_MOCK(ibv_reg_mr, struct ibv_mr *);
61  struct ibv_mr *
62  ibv_reg_mr(struct ibv_pd *pd, void *addr, size_t length, int access)
63  {
64  	HANDLE_RETURN_MOCK(ibv_reg_mr);
65  	if (length > 0) {
66  		return &g_rdma_mr;
67  	} else {
68  		return NULL;
69  	}
70  }
71  
72  int ibv_query_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
73  		 int attr_mask, struct ibv_qp_init_attr *init_attr)
74  {
75  	if (qp == NULL) {
76  		return -1;
77  	} else {
78  		attr->port_num = 80;
79  
80  		if (qp->state == IBV_QPS_ERR) {
81  			attr->qp_state = 10;
82  		} else {
83  			attr->qp_state = IBV_QPS_INIT;
84  		}
85  
86  		return 0;
87  	}
88  }
89  
90  const char *
91  spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype)
92  {
93  	switch (trtype) {
94  	case SPDK_NVME_TRANSPORT_PCIE:
95  		return "PCIe";
96  	case SPDK_NVME_TRANSPORT_RDMA:
97  		return "RDMA";
98  	case SPDK_NVME_TRANSPORT_FC:
99  		return "FC";
100  	default:
101  		return NULL;
102  	}
103  }
104  
105  int
106  spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring)
107  {
108  	int len, i;
109  
110  	if (trstring == NULL) {
111  		return -EINVAL;
112  	}
113  
114  	len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN);
115  	if (len == SPDK_NVMF_TRSTRING_MAX_LEN) {
116  		return -EINVAL;
117  	}
118  
119  	/* cast official trstring to uppercase version of input. */
120  	for (i = 0; i < len; i++) {
121  		trid->trstring[i] = toupper(trstring[i]);
122  	}
123  	return 0;
124  }
125  
126  static void reset_nvmf_rdma_request(struct spdk_nvmf_rdma_request *rdma_req)
127  {
128  	int i;
129  
130  	rdma_req->req.length = 0;
131  	rdma_req->req.data_from_pool = false;
132  	rdma_req->req.data = NULL;
133  	rdma_req->data.wr.num_sge = 0;
134  	rdma_req->data.wr.wr.rdma.remote_addr = 0;
135  	rdma_req->data.wr.wr.rdma.rkey = 0;
136  	rdma_req->offset = 0;
137  	memset(&rdma_req->req.dif, 0, sizeof(rdma_req->req.dif));
138  
139  	for (i = 0; i < SPDK_NVMF_MAX_SGL_ENTRIES; i++) {
140  		rdma_req->req.iov[i].iov_base = 0;
141  		rdma_req->req.iov[i].iov_len = 0;
142  		rdma_req->req.buffers[i] = 0;
143  		rdma_req->data.wr.sg_list[i].addr = 0;
144  		rdma_req->data.wr.sg_list[i].length = 0;
145  		rdma_req->data.wr.sg_list[i].lkey = 0;
146  	}
147  	rdma_req->req.iovcnt = 0;
148  	if (rdma_req->req.stripped_data) {
149  		free(rdma_req->req.stripped_data);
150  		rdma_req->req.stripped_data = NULL;
151  	}
152  }
153  
154  static void
155  test_spdk_nvmf_rdma_request_parse_sgl(void)
156  {
157  	struct spdk_nvmf_rdma_transport rtransport;
158  	struct spdk_nvmf_rdma_device device;
159  	struct spdk_nvmf_rdma_request rdma_req = {};
160  	struct spdk_nvmf_rdma_recv recv;
161  	struct spdk_nvmf_rdma_poll_group group;
162  	struct spdk_nvmf_rdma_qpair rqpair;
163  	struct spdk_nvmf_rdma_poller poller;
164  	union nvmf_c2h_msg cpl;
165  	union nvmf_h2c_msg cmd;
166  	struct spdk_nvme_sgl_descriptor *sgl;
167  	struct spdk_nvmf_transport_pg_cache_buf bufs[4];
168  	struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}};
169  	struct spdk_nvmf_rdma_request_data data;
170  	int rc, i;
171  	uint32_t sgl_length;
172  	uintptr_t aligned_buffer_address;
173  
174  	data.wr.sg_list = data.sgl;
175  	STAILQ_INIT(&group.group.buf_cache);
176  	group.group.buf_cache_size = 0;
177  	group.group.buf_cache_count = 0;
178  	group.group.transport = &rtransport.transport;
179  	poller.group = &group;
180  	rqpair.poller = &poller;
181  	rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES;
182  
183  	sgl = &cmd.nvme_cmd.dptr.sgl1;
184  	rdma_req.recv = &recv;
185  	rdma_req.req.cmd = &cmd;
186  	rdma_req.req.rsp = &cpl;
187  	rdma_req.data.wr.sg_list = rdma_req.data.sgl;
188  	rdma_req.req.qpair = &rqpair.qpair;
189  	rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
190  
191  	rtransport.transport.opts = g_rdma_ut_transport_opts;
192  	rtransport.data_wr_pool = NULL;
193  	rtransport.transport.data_buf_pool = NULL;
194  
195  	device.attr.device_cap_flags = 0;
196  	sgl->keyed.key = 0xEEEE;
197  	sgl->address = 0xFFFF;
198  	rdma_req.recv->buf = (void *)0xDDDD;
199  
200  	/* Test 1: sgl type: keyed data block subtype: address */
201  	sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
202  	sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
203  
204  	/* Part 1: simple I/O, one SGL smaller than the transport io unit size */
205  	MOCK_SET(spdk_mempool_get, (void *)0x2000);
206  	reset_nvmf_rdma_request(&rdma_req);
207  	sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2;
208  
209  	device.map = (void *)0x0;
210  	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
211  	CU_ASSERT(rc == 0);
212  	CU_ASSERT(rdma_req.req.data_from_pool == true);
213  	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2);
214  	CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
215  	CU_ASSERT(rdma_req.data.wr.num_sge == 1);
216  	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
217  	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
218  	CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
219  	CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
220  	CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2);
221  	CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY);
222  
223  	/* Part 2: simple I/O, one SGL larger than the transport io unit size (equal to the max io size) */
224  	reset_nvmf_rdma_request(&rdma_req);
225  	sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO;
226  	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
227  
228  	CU_ASSERT(rc == 0);
229  	CU_ASSERT(rdma_req.req.data_from_pool == true);
230  	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO);
231  	CU_ASSERT(rdma_req.data.wr.num_sge == RDMA_UT_UNITS_IN_MAX_IO);
232  	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
233  	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
234  	for (i = 0; i < RDMA_UT_UNITS_IN_MAX_IO; i++) {
235  		CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000);
236  		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
237  		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
238  		CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY);
239  	}
240  
241  	/* Part 3: simple I/O one SGL larger than the transport max io size */
242  	reset_nvmf_rdma_request(&rdma_req);
243  	sgl->keyed.length = rtransport.transport.opts.max_io_size * 2;
244  	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
245  
246  	CU_ASSERT(rc == -1);
247  
248  	/* Part 4: Pretend there are no buffer pools */
249  	MOCK_SET(spdk_mempool_get, NULL);
250  	reset_nvmf_rdma_request(&rdma_req);
251  	sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO;
252  	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
253  
254  	CU_ASSERT(rc == 0);
255  	CU_ASSERT(rdma_req.req.data_from_pool == false);
256  	CU_ASSERT(rdma_req.req.data == NULL);
257  	CU_ASSERT(rdma_req.data.wr.num_sge == 0);
258  	CU_ASSERT(rdma_req.req.buffers[0] == NULL);
259  	CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0);
260  	CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 0);
261  	CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == 0);
262  
263  	rdma_req.recv->buf = (void *)0xDDDD;
264  	/* Test 2: sgl type: keyed data block subtype: offset (in capsule data) */
265  	sgl->generic.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
266  	sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
267  
268  	/* Part 1: Normal I/O smaller than in capsule data size no offset */
269  	reset_nvmf_rdma_request(&rdma_req);
270  	sgl->address = 0;
271  	sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size;
272  	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
273  
274  	CU_ASSERT(rc == 0);
275  	CU_ASSERT(rdma_req.req.data == (void *)0xDDDD);
276  	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.in_capsule_data_size);
277  	CU_ASSERT(rdma_req.req.data_from_pool == false);
278  
279  	/* Part 2: I/O offset + length too large */
280  	reset_nvmf_rdma_request(&rdma_req);
281  	sgl->address = rtransport.transport.opts.in_capsule_data_size;
282  	sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size;
283  	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
284  
285  	CU_ASSERT(rc == -1);
286  
287  	/* Part 3: I/O too large */
288  	reset_nvmf_rdma_request(&rdma_req);
289  	sgl->address = 0;
290  	sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size * 2;
291  	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
292  
293  	CU_ASSERT(rc == -1);
294  
295  	/* Test 3: Multi SGL */
296  	sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
297  	sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
298  	sgl->address = 0;
299  	rdma_req.recv->buf = (void *)&sgl_desc;
300  	MOCK_SET(spdk_mempool_get, &data);
301  
302  	/* part 1: 2 segments each with 1 wr. */
303  	reset_nvmf_rdma_request(&rdma_req);
304  	sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor);
305  	for (i = 0; i < 2; i++) {
306  		sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
307  		sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
308  		sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size;
309  		sgl_desc[i].address = 0x4000 + i * rtransport.transport.opts.io_unit_size;
310  		sgl_desc[i].keyed.key = 0x44;
311  	}
312  
313  	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
314  
315  	CU_ASSERT(rc == 0);
316  	CU_ASSERT(rdma_req.req.data_from_pool == true);
317  	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 2);
318  	CU_ASSERT(rdma_req.data.wr.num_sge == 1);
319  	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
320  	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
321  	CU_ASSERT(rdma_req.data.wr.next == &data.wr);
322  	CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
323  	CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size);
324  	CU_ASSERT(data.wr.num_sge == 1);
325  	CU_ASSERT(data.wr.next == &rdma_req.rsp.wr);
326  
327  	/* part 2: 2 segments, each with 1 wr containing 8 sge_elements */
328  	reset_nvmf_rdma_request(&rdma_req);
329  	sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor);
330  	for (i = 0; i < 2; i++) {
331  		sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
332  		sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
333  		sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size * 8;
334  		sgl_desc[i].address = 0x4000 + i * 8 * rtransport.transport.opts.io_unit_size;
335  		sgl_desc[i].keyed.key = 0x44;
336  	}
337  
338  	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
339  
340  	CU_ASSERT(rc == 0);
341  	CU_ASSERT(rdma_req.req.data_from_pool == true);
342  	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16);
343  	CU_ASSERT(rdma_req.req.iovcnt == 16);
344  	CU_ASSERT(rdma_req.data.wr.num_sge == 8);
345  	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
346  	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
347  	CU_ASSERT(rdma_req.data.wr.next == &data.wr);
348  	CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
349  	CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 8);
350  	CU_ASSERT(data.wr.num_sge == 8);
351  	CU_ASSERT(data.wr.next == &rdma_req.rsp.wr);
352  
353  	/* part 3: 2 segments, one very large, one very small */
354  	reset_nvmf_rdma_request(&rdma_req);
355  	for (i = 0; i < 2; i++) {
356  		sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
357  		sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
358  		sgl_desc[i].keyed.key = 0x44;
359  	}
360  
361  	sgl_desc[0].keyed.length = rtransport.transport.opts.io_unit_size * 15 +
362  				   rtransport.transport.opts.io_unit_size / 2;
363  	sgl_desc[0].address = 0x4000;
364  	sgl_desc[1].keyed.length = rtransport.transport.opts.io_unit_size / 2;
365  	sgl_desc[1].address = 0x4000 + rtransport.transport.opts.io_unit_size * 15 +
366  			      rtransport.transport.opts.io_unit_size / 2;
367  
368  	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
369  
370  	CU_ASSERT(rc == 0);
371  	CU_ASSERT(rdma_req.req.data_from_pool == true);
372  	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16);
373  	CU_ASSERT(rdma_req.req.iovcnt == 16);
374  	CU_ASSERT(rdma_req.data.wr.num_sge == 16);
375  	for (i = 0; i < 15; i++) {
376  		CU_ASSERT(rdma_req.data.sgl[i].length == rtransport.transport.opts.io_unit_size);
377  	}
378  	CU_ASSERT(rdma_req.data.sgl[15].length == rtransport.transport.opts.io_unit_size / 2);
379  	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
380  	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
381  	CU_ASSERT(rdma_req.data.wr.next == &data.wr);
382  	CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
383  	CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 15 +
384  		  rtransport.transport.opts.io_unit_size / 2);
385  	CU_ASSERT(data.sgl[0].length == rtransport.transport.opts.io_unit_size / 2);
386  	CU_ASSERT(data.wr.num_sge == 1);
387  	CU_ASSERT(data.wr.next == &rdma_req.rsp.wr);
388  
389  	/* part 4: 2 SGL descriptors, each length is transport buffer / 2
390  	 * 1 transport buffers should be allocated */
391  	reset_nvmf_rdma_request(&rdma_req);
392  	aligned_buffer_address = ((uintptr_t)(&data) + NVMF_DATA_BUFFER_MASK) & ~NVMF_DATA_BUFFER_MASK;
393  	sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor);
394  	sgl_length = rtransport.transport.opts.io_unit_size / 2;
395  	for (i = 0; i < 2; i++) {
396  		sgl_desc[i].keyed.length = sgl_length;
397  		sgl_desc[i].address = 0x4000 + i * sgl_length;
398  	}
399  
400  	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
401  
402  	CU_ASSERT(rc == 0);
403  	CU_ASSERT(rdma_req.req.data_from_pool == true);
404  	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size);
405  	CU_ASSERT(rdma_req.req.iovcnt == 1);
406  
407  	CU_ASSERT(rdma_req.data.sgl[0].length == sgl_length);
408  	/* We mocked mempool_get to return address of data variable. Mempool is used
409  	 * to get both additional WRs and data buffers, so data points to &data */
410  	CU_ASSERT(rdma_req.data.sgl[0].addr == aligned_buffer_address);
411  	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
412  	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
413  	CU_ASSERT(rdma_req.data.wr.num_sge == 1);
414  	CU_ASSERT(rdma_req.data.wr.next == &data.wr);
415  
416  	CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
417  	CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + sgl_length);
418  	CU_ASSERT(data.sgl[0].length == sgl_length);
419  	CU_ASSERT(data.sgl[0].addr == aligned_buffer_address + sgl_length);
420  	CU_ASSERT(data.wr.num_sge == 1);
421  
422  	/* Test 4: use PG buffer cache */
423  	sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
424  	sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
425  	sgl->address = 0xFFFF;
426  	rdma_req.recv->buf = (void *)0xDDDD;
427  	sgl->keyed.key = 0xEEEE;
428  
429  	for (i = 0; i < 4; i++) {
430  		STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link);
431  	}
432  
433  	/* part 1: use the four buffers from the pg cache */
434  	group.group.buf_cache_size = 4;
435  	group.group.buf_cache_count = 4;
436  	MOCK_SET(spdk_mempool_get, (void *)0x2000);
437  	reset_nvmf_rdma_request(&rdma_req);
438  	sgl->keyed.length = rtransport.transport.opts.io_unit_size * 4;
439  	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
440  
441  	SPDK_CU_ASSERT_FATAL(rc == 0);
442  	CU_ASSERT(rdma_req.req.data_from_pool == true);
443  	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4);
444  	CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) &
445  			~NVMF_DATA_BUFFER_MASK));
446  	CU_ASSERT(rdma_req.data.wr.num_sge == 4);
447  	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
448  	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
449  	CU_ASSERT(group.group.buf_cache_count == 0);
450  	CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache));
451  	for (i = 0; i < 4; i++) {
452  		CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]);
453  		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) &
454  				~NVMF_DATA_BUFFER_MASK));
455  		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
456  	}
457  
458  	/* part 2: now that we have used the buffers from the cache, try again. We should get mempool buffers. */
459  	reset_nvmf_rdma_request(&rdma_req);
460  	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
461  
462  	SPDK_CU_ASSERT_FATAL(rc == 0);
463  	CU_ASSERT(rdma_req.req.data_from_pool == true);
464  	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4);
465  	CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
466  	CU_ASSERT(rdma_req.data.wr.num_sge == 4);
467  	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
468  	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
469  	CU_ASSERT(group.group.buf_cache_count == 0);
470  	CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache));
471  	for (i = 0; i < 4; i++) {
472  		CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000);
473  		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
474  		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
475  		CU_ASSERT(group.group.buf_cache_count == 0);
476  	}
477  
478  	/* part 3: half and half */
479  	group.group.buf_cache_count = 2;
480  
481  	for (i = 0; i < 2; i++) {
482  		STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link);
483  	}
484  	reset_nvmf_rdma_request(&rdma_req);
485  	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
486  
487  	SPDK_CU_ASSERT_FATAL(rc == 0);
488  	CU_ASSERT(rdma_req.req.data_from_pool == true);
489  	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4);
490  	CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) &
491  			~NVMF_DATA_BUFFER_MASK));
492  	CU_ASSERT(rdma_req.data.wr.num_sge == 4);
493  	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
494  	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
495  	CU_ASSERT(group.group.buf_cache_count == 0);
496  	for (i = 0; i < 2; i++) {
497  		CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]);
498  		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) &
499  				~NVMF_DATA_BUFFER_MASK));
500  		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
501  	}
502  	for (i = 2; i < 4; i++) {
503  		CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000);
504  		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
505  		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
506  	}
507  
508  	reset_nvmf_rdma_request(&rdma_req);
509  }
510  
511  static struct spdk_nvmf_rdma_recv *
512  create_recv(struct spdk_nvmf_rdma_qpair *rqpair, enum spdk_nvme_nvm_opcode opc)
513  {
514  	struct spdk_nvmf_rdma_recv *rdma_recv;
515  	union nvmf_h2c_msg *cmd;
516  	struct spdk_nvme_sgl_descriptor *sgl;
517  
518  	rdma_recv = calloc(1, sizeof(*rdma_recv));
519  	rdma_recv->qpair = rqpair;
520  	cmd = calloc(1, sizeof(*cmd));
521  	rdma_recv->sgl[0].addr = (uintptr_t)cmd;
522  	cmd->nvme_cmd.opc = opc;
523  	sgl = &cmd->nvme_cmd.dptr.sgl1;
524  	sgl->keyed.key = 0xEEEE;
525  	sgl->address = 0xFFFF;
526  	sgl->keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
527  	sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
528  	sgl->keyed.length = 1;
529  
530  	return rdma_recv;
531  }
532  
533  static void
534  free_recv(struct spdk_nvmf_rdma_recv *rdma_recv)
535  {
536  	free((void *)rdma_recv->sgl[0].addr);
537  	free(rdma_recv);
538  }
539  
540  static struct spdk_nvmf_rdma_request *
541  create_req(struct spdk_nvmf_rdma_qpair *rqpair,
542  	   struct spdk_nvmf_rdma_recv *rdma_recv)
543  {
544  	struct spdk_nvmf_rdma_request *rdma_req;
545  	union nvmf_c2h_msg *cpl;
546  
547  	rdma_req = calloc(1, sizeof(*rdma_req));
548  	rdma_req->recv = rdma_recv;
549  	rdma_req->req.qpair = &rqpair->qpair;
550  	rdma_req->state = RDMA_REQUEST_STATE_NEW;
551  	rdma_req->data.wr.wr_id = (uintptr_t)&rdma_req->data.rdma_wr;
552  	rdma_req->data.wr.sg_list = rdma_req->data.sgl;
553  	cpl = calloc(1, sizeof(*cpl));
554  	rdma_req->rsp.sgl[0].addr = (uintptr_t)cpl;
555  	rdma_req->req.rsp = cpl;
556  
557  	return rdma_req;
558  }
559  
560  static void
561  free_req(struct spdk_nvmf_rdma_request *rdma_req)
562  {
563  	free((void *)rdma_req->rsp.sgl[0].addr);
564  	free(rdma_req);
565  }
566  
567  static void
568  qpair_reset(struct spdk_nvmf_rdma_qpair *rqpair,
569  	    struct spdk_nvmf_rdma_poller *poller,
570  	    struct spdk_nvmf_rdma_device *device,
571  	    struct spdk_nvmf_rdma_resources *resources,
572  	    struct spdk_nvmf_transport *transport)
573  {
574  	memset(rqpair, 0, sizeof(*rqpair));
575  	STAILQ_INIT(&rqpair->pending_rdma_write_queue);
576  	STAILQ_INIT(&rqpair->pending_rdma_read_queue);
577  	rqpair->poller = poller;
578  	rqpair->device = device;
579  	rqpair->resources = resources;
580  	rqpair->qpair.qid = 1;
581  	rqpair->ibv_state = IBV_QPS_RTS;
582  	rqpair->qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
583  	rqpair->max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES;
584  	rqpair->max_send_depth = 16;
585  	rqpair->max_read_depth = 16;
586  	rqpair->qpair.transport = transport;
587  }
588  
589  static void
590  poller_reset(struct spdk_nvmf_rdma_poller *poller,
591  	     struct spdk_nvmf_rdma_poll_group *group)
592  {
593  	memset(poller, 0, sizeof(*poller));
594  	STAILQ_INIT(&poller->qpairs_pending_recv);
595  	STAILQ_INIT(&poller->qpairs_pending_send);
596  	poller->group = group;
597  }
598  
599  static void
600  test_spdk_nvmf_rdma_request_process(void)
601  {
602  	struct spdk_nvmf_rdma_transport rtransport = {};
603  	struct spdk_nvmf_rdma_poll_group group = {};
604  	struct spdk_nvmf_rdma_poller poller = {};
605  	struct spdk_nvmf_rdma_device device = {};
606  	struct spdk_nvmf_rdma_resources resources = {};
607  	struct spdk_nvmf_rdma_qpair rqpair = {};
608  	struct spdk_nvmf_rdma_recv *rdma_recv;
609  	struct spdk_nvmf_rdma_request *rdma_req;
610  	bool progress;
611  
612  	STAILQ_INIT(&group.group.buf_cache);
613  	STAILQ_INIT(&group.group.pending_buf_queue);
614  	group.group.buf_cache_size = 0;
615  	group.group.buf_cache_count = 0;
616  	poller_reset(&poller, &group);
617  	qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport);
618  
619  	rtransport.transport.opts = g_rdma_ut_transport_opts;
620  	rtransport.transport.data_buf_pool = spdk_mempool_create("test_data_pool", 16, 128, 0, 0);
621  	rtransport.data_wr_pool = spdk_mempool_create("test_wr_pool", 128,
622  				  sizeof(struct spdk_nvmf_rdma_request_data),
623  				  0, 0);
624  	MOCK_CLEAR(spdk_mempool_get);
625  
626  	device.attr.device_cap_flags = 0;
627  	device.map = (void *)0x0;
628  
629  	/* Test 1: single SGL READ request */
630  	rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_READ);
631  	rdma_req = create_req(&rqpair, rdma_recv);
632  	rqpair.current_recv_depth = 1;
633  	/* NEW -> EXECUTING */
634  	progress = nvmf_rdma_request_process(&rtransport, rdma_req);
635  	CU_ASSERT(progress == true);
636  	CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING);
637  	CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST);
638  	/* EXECUTED -> TRANSFERRING_C2H */
639  	rdma_req->state = RDMA_REQUEST_STATE_EXECUTED;
640  	progress = nvmf_rdma_request_process(&rtransport, rdma_req);
641  	CU_ASSERT(progress == true);
642  	CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
643  	CU_ASSERT(rdma_req->recv == NULL);
644  	/* COMPLETED -> FREE */
645  	rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
646  	progress = nvmf_rdma_request_process(&rtransport, rdma_req);
647  	CU_ASSERT(progress == true);
648  	CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE);
649  
650  	free_recv(rdma_recv);
651  	free_req(rdma_req);
652  	poller_reset(&poller, &group);
653  	qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport);
654  
655  	/* Test 2: single SGL WRITE request */
656  	rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_WRITE);
657  	rdma_req = create_req(&rqpair, rdma_recv);
658  	rqpair.current_recv_depth = 1;
659  	/* NEW -> TRANSFERRING_H2C */
660  	progress = nvmf_rdma_request_process(&rtransport, rdma_req);
661  	CU_ASSERT(progress == true);
662  	CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
663  	CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER);
664  	STAILQ_INIT(&poller.qpairs_pending_send);
665  	/* READY_TO_EXECUTE -> EXECUTING */
666  	rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
667  	progress = nvmf_rdma_request_process(&rtransport, rdma_req);
668  	CU_ASSERT(progress == true);
669  	CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING);
670  	/* EXECUTED -> COMPLETING */
671  	rdma_req->state = RDMA_REQUEST_STATE_EXECUTED;
672  	progress = nvmf_rdma_request_process(&rtransport, rdma_req);
673  	CU_ASSERT(progress == true);
674  	CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_COMPLETING);
675  	CU_ASSERT(rdma_req->recv == NULL);
676  	/* COMPLETED -> FREE */
677  	rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
678  	progress = nvmf_rdma_request_process(&rtransport, rdma_req);
679  	CU_ASSERT(progress == true);
680  	CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE);
681  
682  	free_recv(rdma_recv);
683  	free_req(rdma_req);
684  	poller_reset(&poller, &group);
685  	qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport);
686  
687  	/* Test 3: WRITE+WRITE ibv_send batching */
688  	{
689  		struct spdk_nvmf_rdma_recv *recv1, *recv2;
690  		struct spdk_nvmf_rdma_request *req1, *req2;
691  		recv1 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE);
692  		req1 = create_req(&rqpair, recv1);
693  		recv2 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE);
694  		req2 = create_req(&rqpair, recv2);
695  
696  		/* WRITE 1: NEW -> TRANSFERRING_H2C */
697  		rqpair.current_recv_depth = 1;
698  		nvmf_rdma_request_process(&rtransport, req1);
699  		CU_ASSERT(req1->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
700  
701  		/* WRITE 2: NEW -> TRANSFERRING_H2C */
702  		rqpair.current_recv_depth = 2;
703  		nvmf_rdma_request_process(&rtransport, req2);
704  		CU_ASSERT(req2->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
705  
706  		STAILQ_INIT(&poller.qpairs_pending_send);
707  
708  		/* WRITE 1 completes before WRITE 2 has finished RDMA reading */
709  		/* WRITE 1: READY_TO_EXECUTE -> EXECUTING */
710  		req1->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
711  		nvmf_rdma_request_process(&rtransport, req1);
712  		CU_ASSERT(req1->state == RDMA_REQUEST_STATE_EXECUTING);
713  		/* WRITE 1: EXECUTED -> COMPLETING */
714  		req1->state = RDMA_REQUEST_STATE_EXECUTED;
715  		nvmf_rdma_request_process(&rtransport, req1);
716  		CU_ASSERT(req1->state == RDMA_REQUEST_STATE_COMPLETING);
717  		STAILQ_INIT(&poller.qpairs_pending_send);
718  		/* WRITE 1: COMPLETED -> FREE */
719  		req1->state = RDMA_REQUEST_STATE_COMPLETED;
720  		nvmf_rdma_request_process(&rtransport, req1);
721  		CU_ASSERT(req1->state == RDMA_REQUEST_STATE_FREE);
722  
723  		/* Now WRITE 2 has finished reading and completes */
724  		/* WRITE 2: COMPLETED -> FREE */
725  		/* WRITE 2: READY_TO_EXECUTE -> EXECUTING */
726  		req2->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
727  		nvmf_rdma_request_process(&rtransport, req2);
728  		CU_ASSERT(req2->state == RDMA_REQUEST_STATE_EXECUTING);
729  		/* WRITE 1: EXECUTED -> COMPLETING */
730  		req2->state = RDMA_REQUEST_STATE_EXECUTED;
731  		nvmf_rdma_request_process(&rtransport, req2);
732  		CU_ASSERT(req2->state == RDMA_REQUEST_STATE_COMPLETING);
733  		STAILQ_INIT(&poller.qpairs_pending_send);
734  		/* WRITE 1: COMPLETED -> FREE */
735  		req2->state = RDMA_REQUEST_STATE_COMPLETED;
736  		nvmf_rdma_request_process(&rtransport, req2);
737  		CU_ASSERT(req2->state == RDMA_REQUEST_STATE_FREE);
738  
739  		free_recv(recv1);
740  		free_req(req1);
741  		free_recv(recv2);
742  		free_req(req2);
743  		poller_reset(&poller, &group);
744  		qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport);
745  	}
746  
747  	/* Test 4, invalid command, check xfer type */
748  	{
749  		struct spdk_nvmf_rdma_recv *rdma_recv_inv;
750  		struct spdk_nvmf_rdma_request *rdma_req_inv;
751  		/* construct an opcode that specifies BIDIRECTIONAL transfer */
752  		uint8_t opc = 0x10 | SPDK_NVME_DATA_BIDIRECTIONAL;
753  
754  		rdma_recv_inv = create_recv(&rqpair, opc);
755  		rdma_req_inv = create_req(&rqpair, rdma_recv_inv);
756  
757  		/* NEW -> RDMA_REQUEST_STATE_COMPLETING */
758  		rqpair.current_recv_depth = 1;
759  		progress = nvmf_rdma_request_process(&rtransport, rdma_req_inv);
760  		CU_ASSERT(progress == true);
761  		CU_ASSERT(rdma_req_inv->state == RDMA_REQUEST_STATE_COMPLETING);
762  		CU_ASSERT(rdma_req_inv->req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
763  		CU_ASSERT(rdma_req_inv->req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
764  
765  		/* RDMA_REQUEST_STATE_COMPLETED -> FREE */
766  		rdma_req_inv->state = RDMA_REQUEST_STATE_COMPLETED;
767  		nvmf_rdma_request_process(&rtransport, rdma_req_inv);
768  		CU_ASSERT(rdma_req_inv->state == RDMA_REQUEST_STATE_FREE);
769  
770  		free_recv(rdma_recv_inv);
771  		free_req(rdma_req_inv);
772  		poller_reset(&poller, &group);
773  		qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport);
774  	}
775  
776  	spdk_mempool_free(rtransport.transport.data_buf_pool);
777  	spdk_mempool_free(rtransport.data_wr_pool);
778  }
779  
780  #define TEST_GROUPS_COUNT 5
781  static void
782  test_nvmf_rdma_get_optimal_poll_group(void)
783  {
784  	struct spdk_nvmf_rdma_transport rtransport = {};
785  	struct spdk_nvmf_transport *transport = &rtransport.transport;
786  	struct spdk_nvmf_rdma_qpair rqpair = {};
787  	struct spdk_nvmf_transport_poll_group *groups[TEST_GROUPS_COUNT];
788  	struct spdk_nvmf_rdma_poll_group *rgroups[TEST_GROUPS_COUNT];
789  	struct spdk_nvmf_transport_poll_group *result;
790  	uint32_t i;
791  
792  	rqpair.qpair.transport = transport;
793  	pthread_mutex_init(&rtransport.lock, NULL);
794  	TAILQ_INIT(&rtransport.poll_groups);
795  
796  	for (i = 0; i < TEST_GROUPS_COUNT; i++) {
797  		groups[i] = nvmf_rdma_poll_group_create(transport, NULL);
798  		CU_ASSERT(groups[i] != NULL);
799  		rgroups[i] = SPDK_CONTAINEROF(groups[i], struct spdk_nvmf_rdma_poll_group, group);
800  		groups[i]->transport = transport;
801  	}
802  	CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[0]);
803  	CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[0]);
804  
805  	/* Emulate connection of %TEST_GROUPS_COUNT% initiators - each creates 1 admin and 1 io qp */
806  	for (i = 0; i < TEST_GROUPS_COUNT; i++) {
807  		rqpair.qpair.qid = 0;
808  		result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
809  		CU_ASSERT(result == groups[i]);
810  		CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]);
811  		CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i]);
812  
813  		rqpair.qpair.qid = 1;
814  		result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
815  		CU_ASSERT(result == groups[i]);
816  		CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]);
817  		CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]);
818  	}
819  	/* wrap around, admin/io pg point to the first pg
820  	   Destroy all poll groups except of the last one */
821  	for (i = 0; i < TEST_GROUPS_COUNT - 1; i++) {
822  		nvmf_rdma_poll_group_destroy(groups[i]);
823  		CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[i + 1]);
824  		CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i + 1]);
825  	}
826  
827  	CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]);
828  	CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]);
829  
830  	/* Check that pointers to the next admin/io poll groups are not changed */
831  	rqpair.qpair.qid = 0;
832  	result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
833  	CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]);
834  	CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]);
835  	CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]);
836  
837  	rqpair.qpair.qid = 1;
838  	result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
839  	CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]);
840  	CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]);
841  	CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]);
842  
843  	/* Remove the last poll group, check that pointers are NULL */
844  	nvmf_rdma_poll_group_destroy(groups[TEST_GROUPS_COUNT - 1]);
845  	CU_ASSERT(rtransport.conn_sched.next_admin_pg == NULL);
846  	CU_ASSERT(rtransport.conn_sched.next_io_pg == NULL);
847  
848  	/* Request optimal poll group, result must be NULL */
849  	rqpair.qpair.qid = 0;
850  	result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
851  	CU_ASSERT(result == NULL);
852  
853  	rqpair.qpair.qid = 1;
854  	result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
855  	CU_ASSERT(result == NULL);
856  
857  	pthread_mutex_destroy(&rtransport.lock);
858  }
859  #undef TEST_GROUPS_COUNT
860  
861  static void
862  test_spdk_nvmf_rdma_request_parse_sgl_with_md(void)
863  {
864  	struct spdk_nvmf_rdma_transport rtransport;
865  	struct spdk_nvmf_rdma_device device;
866  	struct spdk_nvmf_rdma_request rdma_req = {};
867  	struct spdk_nvmf_rdma_recv recv;
868  	struct spdk_nvmf_rdma_poll_group group;
869  	struct spdk_nvmf_rdma_qpair rqpair;
870  	struct spdk_nvmf_rdma_poller poller;
871  	union nvmf_c2h_msg cpl;
872  	union nvmf_h2c_msg cmd;
873  	struct spdk_nvme_sgl_descriptor *sgl;
874  	struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}};
875  	char data_buffer[8192];
876  	struct spdk_nvmf_rdma_request_data *data = (struct spdk_nvmf_rdma_request_data *)data_buffer;
877  	char data2_buffer[8192];
878  	struct spdk_nvmf_rdma_request_data *data2 = (struct spdk_nvmf_rdma_request_data *)data2_buffer;
879  	const uint32_t data_bs = 512;
880  	const uint32_t md_size = 8;
881  	int rc, i;
882  	void *aligned_buffer;
883  
884  	data->wr.sg_list = data->sgl;
885  	STAILQ_INIT(&group.group.buf_cache);
886  	group.group.buf_cache_size = 0;
887  	group.group.buf_cache_count = 0;
888  	group.group.transport = &rtransport.transport;
889  	poller.group = &group;
890  	rqpair.poller = &poller;
891  	rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES;
892  
893  	sgl = &cmd.nvme_cmd.dptr.sgl1;
894  	rdma_req.recv = &recv;
895  	rdma_req.req.cmd = &cmd;
896  	rdma_req.req.rsp = &cpl;
897  	rdma_req.data.wr.sg_list = rdma_req.data.sgl;
898  	rdma_req.req.qpair = &rqpair.qpair;
899  	rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
900  
901  	rtransport.transport.opts = g_rdma_ut_transport_opts;
902  	rtransport.data_wr_pool = NULL;
903  	rtransport.transport.data_buf_pool = NULL;
904  
905  	device.attr.device_cap_flags = 0;
906  	device.map = NULL;
907  	sgl->keyed.key = 0xEEEE;
908  	sgl->address = 0xFFFF;
909  	rdma_req.recv->buf = (void *)0xDDDD;
910  
911  	/* Test 1: sgl type: keyed data block subtype: address */
912  	sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
913  	sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
914  
915  	/* Part 1: simple I/O, one SGL smaller than the transport io unit size, block size 512 */
916  	MOCK_SET(spdk_mempool_get, (void *)0x2000);
917  	reset_nvmf_rdma_request(&rdma_req);
918  	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
919  			  SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
920  			  0, 0, 0, 0, 0);
921  	rdma_req.req.dif_enabled = true;
922  	rtransport.transport.opts.io_unit_size = data_bs * 8;
923  	rdma_req.req.qpair->transport = &rtransport.transport;
924  	sgl->keyed.length = data_bs * 4;
925  
926  	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
927  
928  	CU_ASSERT(rc == 0);
929  	CU_ASSERT(rdma_req.req.data_from_pool == true);
930  	CU_ASSERT(rdma_req.req.length == data_bs * 4);
931  	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
932  	CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4);
933  	CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
934  	CU_ASSERT(rdma_req.data.wr.num_sge == 1);
935  	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
936  	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
937  	CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
938  
939  	CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
940  	CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rdma_req.req.length);
941  	CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY);
942  
943  	/* Part 2: simple I/O, one SGL equal to io unit size, io_unit_size is not aligned with md_size,
944  		block size 512 */
945  	MOCK_SET(spdk_mempool_get, (void *)0x2000);
946  	reset_nvmf_rdma_request(&rdma_req);
947  	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
948  			  SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
949  			  0, 0, 0, 0, 0);
950  	rdma_req.req.dif_enabled = true;
951  	rtransport.transport.opts.io_unit_size = data_bs * 4;
952  	sgl->keyed.length = data_bs * 4;
953  
954  	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
955  
956  	CU_ASSERT(rc == 0);
957  	CU_ASSERT(rdma_req.req.data_from_pool == true);
958  	CU_ASSERT(rdma_req.req.length == data_bs * 4);
959  	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
960  	CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4);
961  	CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
962  	CU_ASSERT(rdma_req.data.wr.num_sge == 5);
963  	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
964  	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
965  	CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
966  
967  	for (i = 0; i < 3; ++i) {
968  		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size));
969  		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
970  		CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY);
971  	}
972  	CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size));
973  	CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488);
974  	CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == RDMA_UT_LKEY);
975  
976  	/* 2nd buffer consumed */
977  	CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000);
978  	CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24);
979  	CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == RDMA_UT_LKEY);
980  
981  	/* Part 3: simple I/O, one SGL equal io unit size, io_unit_size is equal to block size 512 bytes */
982  	MOCK_SET(spdk_mempool_get, (void *)0x2000);
983  	reset_nvmf_rdma_request(&rdma_req);
984  	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
985  			  SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
986  			  0, 0, 0, 0, 0);
987  	rdma_req.req.dif_enabled = true;
988  	rtransport.transport.opts.io_unit_size = data_bs;
989  	sgl->keyed.length = data_bs;
990  
991  	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
992  
993  	CU_ASSERT(rc == 0);
994  	CU_ASSERT(rdma_req.req.data_from_pool == true);
995  	CU_ASSERT(rdma_req.req.length == data_bs);
996  	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
997  	CU_ASSERT(rdma_req.req.dif.elba_length == data_bs + md_size);
998  	CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
999  	CU_ASSERT(rdma_req.data.wr.num_sge == 1);
1000  	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
1001  	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
1002  	CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
1003  
1004  	CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
1005  	CU_ASSERT(rdma_req.data.wr.sg_list[0].length == data_bs);
1006  	CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY);
1007  
1008  	CU_ASSERT(rdma_req.req.iovcnt == 2);
1009  	CU_ASSERT(rdma_req.req.iov[0].iov_base == (void *)((unsigned long)0x2000));
1010  	CU_ASSERT(rdma_req.req.iov[0].iov_len == data_bs);
1011  	/* 2nd buffer consumed for metadata */
1012  	CU_ASSERT(rdma_req.req.iov[1].iov_base == (void *)((unsigned long)0x2000));
1013  	CU_ASSERT(rdma_req.req.iov[1].iov_len == md_size);
1014  
1015  	/* Part 4: simple I/O, one SGL equal io unit size, io_unit_size is aligned with md_size,
1016  	   block size 512 */
1017  	MOCK_SET(spdk_mempool_get, (void *)0x2000);
1018  	reset_nvmf_rdma_request(&rdma_req);
1019  	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
1020  			  SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
1021  			  0, 0, 0, 0, 0);
1022  	rdma_req.req.dif_enabled = true;
1023  	rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4;
1024  	sgl->keyed.length = data_bs * 4;
1025  
1026  	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
1027  
1028  	CU_ASSERT(rc == 0);
1029  	CU_ASSERT(rdma_req.req.data_from_pool == true);
1030  	CU_ASSERT(rdma_req.req.length == data_bs * 4);
1031  	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
1032  	CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4);
1033  	CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
1034  	CU_ASSERT(rdma_req.data.wr.num_sge == 1);
1035  	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
1036  	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
1037  	CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
1038  
1039  	CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
1040  	CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rdma_req.req.length);
1041  	CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY);
1042  
1043  	/* Part 5: simple I/O, one SGL equal to 2x io unit size, io_unit_size is aligned with md_size,
1044  	   block size 512 */
1045  	MOCK_SET(spdk_mempool_get, (void *)0x2000);
1046  	reset_nvmf_rdma_request(&rdma_req);
1047  	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
1048  			  SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
1049  			  0, 0, 0, 0, 0);
1050  	rdma_req.req.dif_enabled = true;
1051  	rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 2;
1052  	sgl->keyed.length = data_bs * 4;
1053  
1054  	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
1055  
1056  	CU_ASSERT(rc == 0);
1057  	CU_ASSERT(rdma_req.req.data_from_pool == true);
1058  	CU_ASSERT(rdma_req.req.length == data_bs * 4);
1059  	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
1060  	CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4);
1061  	CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
1062  	CU_ASSERT(rdma_req.data.wr.num_sge == 2);
1063  	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
1064  	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
1065  	CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
1066  
1067  	for (i = 0; i < 2; ++i) {
1068  		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
1069  		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs * 2);
1070  	}
1071  
1072  	/* Part 6: simple I/O, one SGL larger than the transport io unit size, io_unit_size is not aligned to md_size,
1073  	   block size 512 */
1074  	MOCK_SET(spdk_mempool_get, (void *)0x2000);
1075  	reset_nvmf_rdma_request(&rdma_req);
1076  	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
1077  			  SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
1078  			  0, 0, 0, 0, 0);
1079  	rdma_req.req.dif_enabled = true;
1080  	rtransport.transport.opts.io_unit_size = data_bs * 4;
1081  	sgl->keyed.length = data_bs * 6;
1082  
1083  	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
1084  
1085  	CU_ASSERT(rc == 0);
1086  	CU_ASSERT(rdma_req.req.data_from_pool == true);
1087  	CU_ASSERT(rdma_req.req.length == data_bs * 6);
1088  	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
1089  	CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 6);
1090  	CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
1091  	CU_ASSERT(rdma_req.data.wr.num_sge == 7);
1092  	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
1093  	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
1094  	CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
1095  
1096  	for (i = 0; i < 3; ++i) {
1097  		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size));
1098  		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
1099  		CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY);
1100  	}
1101  	CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size));
1102  	CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488);
1103  	CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == RDMA_UT_LKEY);
1104  
1105  	/* 2nd IO buffer consumed */
1106  	CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000);
1107  	CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24);
1108  	CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == RDMA_UT_LKEY);
1109  
1110  	CU_ASSERT(rdma_req.data.wr.sg_list[5].addr == 0x2000 + 24 + md_size);
1111  	CU_ASSERT(rdma_req.data.wr.sg_list[5].length == 512);
1112  	CU_ASSERT(rdma_req.data.wr.sg_list[5].lkey == RDMA_UT_LKEY);
1113  
1114  	CU_ASSERT(rdma_req.data.wr.sg_list[6].addr == 0x2000 + 24 + 512 + md_size * 2);
1115  	CU_ASSERT(rdma_req.data.wr.sg_list[6].length == 512);
1116  	CU_ASSERT(rdma_req.data.wr.sg_list[6].lkey == RDMA_UT_LKEY);
1117  
1118  	/* Part 7: simple I/O, number of SGL entries exceeds the number of entries
1119  	   one WR can hold. Additional WR is chained */
1120  	MOCK_SET(spdk_mempool_get, data2_buffer);
1121  	aligned_buffer = (void *)((uintptr_t)(data2_buffer + NVMF_DATA_BUFFER_MASK) &
1122  				  ~NVMF_DATA_BUFFER_MASK);
1123  	reset_nvmf_rdma_request(&rdma_req);
1124  	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
1125  			  SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
1126  			  0, 0, 0, 0, 0);
1127  	rdma_req.req.dif_enabled = true;
1128  	rtransport.transport.opts.io_unit_size = data_bs * 16;
1129  	sgl->keyed.length = data_bs * 16;
1130  
1131  	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
1132  
1133  	CU_ASSERT(rc == 0);
1134  	CU_ASSERT(rdma_req.req.data_from_pool == true);
1135  	CU_ASSERT(rdma_req.req.length == data_bs * 16);
1136  	CU_ASSERT(rdma_req.req.iovcnt == 2);
1137  	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
1138  	CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 16);
1139  	CU_ASSERT(rdma_req.req.data == aligned_buffer);
1140  	CU_ASSERT(rdma_req.data.wr.num_sge == 16);
1141  	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
1142  	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
1143  
1144  	for (i = 0; i < 15; ++i) {
1145  		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uintptr_t)aligned_buffer + i * (data_bs + md_size));
1146  		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
1147  		CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY);
1148  	}
1149  
1150  	/* 8192 - (512 + 8) * 15 = 392 */
1151  	CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uintptr_t)aligned_buffer + i * (data_bs + md_size));
1152  	CU_ASSERT(rdma_req.data.wr.sg_list[i].length == 392);
1153  	CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY);
1154  
1155  	/* additional wr from pool */
1156  	CU_ASSERT(rdma_req.data.wr.next == (void *)&data2->wr);
1157  	CU_ASSERT(rdma_req.data.wr.next->num_sge == 1);
1158  	CU_ASSERT(rdma_req.data.wr.next->next == &rdma_req.rsp.wr);
1159  	/* 2nd IO buffer */
1160  	CU_ASSERT(data2->wr.sg_list[0].addr == (uintptr_t)aligned_buffer);
1161  	CU_ASSERT(data2->wr.sg_list[0].length == 120);
1162  	CU_ASSERT(data2->wr.sg_list[0].lkey == RDMA_UT_LKEY);
1163  
1164  	/* Part 8: simple I/O, data with metadata do not fit to 1 io_buffer */
1165  	MOCK_SET(spdk_mempool_get, (void *)0x2000);
1166  	reset_nvmf_rdma_request(&rdma_req);
1167  	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
1168  			  SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
1169  			  0, 0, 0, 0, 0);
1170  	rdma_req.req.dif_enabled = true;
1171  	rtransport.transport.opts.io_unit_size = 516;
1172  	sgl->keyed.length = data_bs * 2;
1173  
1174  	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
1175  
1176  	CU_ASSERT(rc == 0);
1177  	CU_ASSERT(rdma_req.req.data_from_pool == true);
1178  	CU_ASSERT(rdma_req.req.length == data_bs * 2);
1179  	CU_ASSERT(rdma_req.req.iovcnt == 3);
1180  	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
1181  	CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 2);
1182  	CU_ASSERT(rdma_req.req.data == (void *)0x2000);
1183  	CU_ASSERT(rdma_req.data.wr.num_sge == 2);
1184  	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
1185  	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
1186  
1187  	CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
1188  	CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 512);
1189  	CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY);
1190  
1191  	/* 2nd IO buffer consumed, offset 4 bytes due to part of the metadata
1192  	  is located at the beginning of that buffer */
1193  	CU_ASSERT(rdma_req.data.wr.sg_list[1].addr == 0x2000 + 4);
1194  	CU_ASSERT(rdma_req.data.wr.sg_list[1].length == 512);
1195  	CU_ASSERT(rdma_req.data.wr.sg_list[1].lkey == RDMA_UT_LKEY);
1196  
1197  	/* Test 2: Multi SGL */
1198  	sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
1199  	sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
1200  	sgl->address = 0;
1201  	rdma_req.recv->buf = (void *)&sgl_desc;
1202  	MOCK_SET(spdk_mempool_get, data_buffer);
1203  	aligned_buffer = (void *)((uintptr_t)(data_buffer + NVMF_DATA_BUFFER_MASK) &
1204  				  ~NVMF_DATA_BUFFER_MASK);
1205  
1206  	/* part 1: 2 segments each with 1 wr. io_unit_size is aligned with data_bs + md_size */
1207  	reset_nvmf_rdma_request(&rdma_req);
1208  	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
1209  			  SPDK_DIF_TYPE1,
1210  			  SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 0, 0, 0, 0, 0);
1211  	rdma_req.req.dif_enabled = true;
1212  	rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4;
1213  	sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor);
1214  
1215  	for (i = 0; i < 2; i++) {
1216  		sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
1217  		sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
1218  		sgl_desc[i].keyed.length = data_bs * 4;
1219  		sgl_desc[i].address = 0x4000 + i * data_bs * 4;
1220  		sgl_desc[i].keyed.key = 0x44;
1221  	}
1222  
1223  	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
1224  
1225  	CU_ASSERT(rc == 0);
1226  	CU_ASSERT(rdma_req.req.data_from_pool == true);
1227  	CU_ASSERT(rdma_req.req.length == data_bs * 4 * 2);
1228  	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
1229  	CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4 * 2);
1230  	CU_ASSERT(rdma_req.data.wr.num_sge == 1);
1231  	CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == (uintptr_t)(aligned_buffer));
1232  	CU_ASSERT(rdma_req.data.wr.sg_list[0].length == data_bs * 4);
1233  
1234  	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
1235  	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
1236  	CU_ASSERT(rdma_req.data.wr.next == &data->wr);
1237  	CU_ASSERT(data->wr.wr.rdma.rkey == 0x44);
1238  	CU_ASSERT(data->wr.wr.rdma.remote_addr == 0x4000 + data_bs * 4);
1239  	CU_ASSERT(data->wr.num_sge == 1);
1240  	CU_ASSERT(data->wr.sg_list[0].addr == (uintptr_t)(aligned_buffer));
1241  	CU_ASSERT(data->wr.sg_list[0].length == data_bs * 4);
1242  
1243  	CU_ASSERT(data->wr.next == &rdma_req.rsp.wr);
1244  	reset_nvmf_rdma_request(&rdma_req);
1245  }
1246  
1247  static void
1248  test_nvmf_rdma_opts_init(void)
1249  {
1250  	struct spdk_nvmf_transport_opts	opts = {};
1251  
1252  	nvmf_rdma_opts_init(&opts);
1253  	CU_ASSERT(opts.max_queue_depth == SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH);
1254  	CU_ASSERT(opts.max_qpairs_per_ctrlr ==	SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR);
1255  	CU_ASSERT(opts.in_capsule_data_size ==	SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE);
1256  	CU_ASSERT(opts.max_io_size == SPDK_NVMF_RDMA_DEFAULT_MAX_IO_SIZE);
1257  	CU_ASSERT(opts.io_unit_size == SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE);
1258  	CU_ASSERT(opts.max_aq_depth == SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH);
1259  	CU_ASSERT(opts.num_shared_buffers == SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS);
1260  	CU_ASSERT(opts.buf_cache_size == SPDK_NVMF_RDMA_DEFAULT_BUFFER_CACHE_SIZE);
1261  	CU_ASSERT(opts.dif_insert_or_strip == SPDK_NVMF_RDMA_DIF_INSERT_OR_STRIP);
1262  	CU_ASSERT(opts.abort_timeout_sec == SPDK_NVMF_RDMA_DEFAULT_ABORT_TIMEOUT_SEC);
1263  	CU_ASSERT(opts.transport_specific == NULL);
1264  }
1265  
1266  static void
1267  test_nvmf_rdma_request_free_data(void)
1268  {
1269  	struct spdk_nvmf_rdma_request rdma_req = {};
1270  	struct spdk_nvmf_rdma_transport rtransport = {};
1271  	struct spdk_nvmf_rdma_request_data *next_request_data = NULL;
1272  
1273  	MOCK_CLEAR(spdk_mempool_get);
1274  	rtransport.data_wr_pool = spdk_mempool_create("spdk_nvmf_rdma_wr_data",
1275  				  SPDK_NVMF_MAX_SGL_ENTRIES,
1276  				  sizeof(struct spdk_nvmf_rdma_request_data),
1277  				  SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
1278  				  SPDK_ENV_SOCKET_ID_ANY);
1279  	next_request_data = spdk_mempool_get(rtransport.data_wr_pool);
1280  	SPDK_CU_ASSERT_FATAL(((struct test_mempool *)rtransport.data_wr_pool)->count ==
1281  			     SPDK_NVMF_MAX_SGL_ENTRIES - 1);
1282  	next_request_data->wr.wr_id = 1;
1283  	next_request_data->wr.num_sge = 2;
1284  	next_request_data->wr.next = NULL;
1285  	rdma_req.data.wr.next = &next_request_data->wr;
1286  	rdma_req.data.wr.wr_id = 1;
1287  	rdma_req.data.wr.num_sge = 2;
1288  
1289  	nvmf_rdma_request_free_data(&rdma_req, &rtransport);
1290  	/* Check if next_request_data put into memory pool */
1291  	CU_ASSERT(((struct test_mempool *)rtransport.data_wr_pool)->count == SPDK_NVMF_MAX_SGL_ENTRIES);
1292  	CU_ASSERT(rdma_req.data.wr.num_sge == 0);
1293  
1294  	spdk_mempool_free(rtransport.data_wr_pool);
1295  }
1296  
1297  static void
1298  test_nvmf_rdma_update_ibv_state(void)
1299  {
1300  	struct spdk_nvmf_rdma_qpair rqpair = {};
1301  	struct spdk_rdma_qp rdma_qp = {};
1302  	struct ibv_qp qp = {};
1303  	int rc = 0;
1304  
1305  	rqpair.rdma_qp = &rdma_qp;
1306  
1307  	/* Case 1: Failed to get updated RDMA queue pair state */
1308  	rqpair.ibv_state = IBV_QPS_INIT;
1309  	rqpair.rdma_qp->qp = NULL;
1310  
1311  	rc = nvmf_rdma_update_ibv_state(&rqpair);
1312  	CU_ASSERT(rc == IBV_QPS_ERR + 1);
1313  
1314  	/* Case 2: Bad state updated */
1315  	rqpair.rdma_qp->qp = &qp;
1316  	qp.state = IBV_QPS_ERR;
1317  	rc = nvmf_rdma_update_ibv_state(&rqpair);
1318  	CU_ASSERT(rqpair.ibv_state == 10);
1319  	CU_ASSERT(rc == IBV_QPS_ERR + 1);
1320  
1321  	/* Case 3: Pass */
1322  	qp.state = IBV_QPS_INIT;
1323  	rc = nvmf_rdma_update_ibv_state(&rqpair);
1324  	CU_ASSERT(rqpair.ibv_state == IBV_QPS_INIT);
1325  	CU_ASSERT(rc == IBV_QPS_INIT);
1326  }
1327  
1328  static void
1329  test_nvmf_rdma_resources_create(void)
1330  {
1331  	static struct spdk_nvmf_rdma_resources *rdma_resource;
1332  	struct spdk_nvmf_rdma_resource_opts opts = {};
1333  	struct spdk_nvmf_rdma_qpair qpair = {};
1334  	struct spdk_nvmf_rdma_recv *recv = NULL;
1335  	struct spdk_nvmf_rdma_request *req = NULL;
1336  	const int DEPTH = 128;
1337  
1338  	opts.max_queue_depth = DEPTH;
1339  	opts.in_capsule_data_size = 4096;
1340  	opts.shared = true;
1341  	opts.qpair = &qpair;
1342  
1343  	rdma_resource = nvmf_rdma_resources_create(&opts);
1344  	CU_ASSERT(rdma_resource != NULL);
1345  	/* Just check first and last entry */
1346  	recv = &rdma_resource->recvs[0];
1347  	req = &rdma_resource->reqs[0];
1348  	CU_ASSERT(recv->rdma_wr.type == RDMA_WR_TYPE_RECV);
1349  	CU_ASSERT((uintptr_t)recv->buf == (uintptr_t)(rdma_resource->bufs));
1350  	CU_ASSERT(recv->sgl[0].addr == (uintptr_t)&rdma_resource->cmds[0]);
1351  	CU_ASSERT(recv->sgl[0].length == sizeof(rdma_resource->cmds[0]));
1352  	CU_ASSERT(recv->sgl[0].lkey == RDMA_UT_LKEY);
1353  	CU_ASSERT(recv->wr.num_sge == 2);
1354  	CU_ASSERT(recv->wr.wr_id == (uintptr_t)&rdma_resource->recvs[0].rdma_wr);
1355  	CU_ASSERT(recv->wr.sg_list == rdma_resource->recvs[0].sgl);
1356  	CU_ASSERT(req->req.rsp == &rdma_resource->cpls[0]);
1357  	CU_ASSERT(req->rsp.sgl[0].addr == (uintptr_t)&rdma_resource->cpls[0]);
1358  	CU_ASSERT(req->rsp.sgl[0].length == sizeof(rdma_resource->cpls[0]));
1359  	CU_ASSERT(req->rsp.sgl[0].lkey == RDMA_UT_LKEY);
1360  	CU_ASSERT(req->rsp.rdma_wr.type == RDMA_WR_TYPE_SEND);
1361  	CU_ASSERT(req->rsp.wr.wr_id == (uintptr_t)&rdma_resource->reqs[0].rsp.rdma_wr);
1362  	CU_ASSERT(req->rsp.wr.next == NULL);
1363  	CU_ASSERT(req->rsp.wr.opcode == IBV_WR_SEND);
1364  	CU_ASSERT(req->rsp.wr.send_flags == IBV_SEND_SIGNALED);
1365  	CU_ASSERT(req->rsp.wr.sg_list == rdma_resource->reqs[0].rsp.sgl);
1366  	CU_ASSERT(req->rsp.wr.num_sge == NVMF_DEFAULT_RSP_SGE);
1367  	CU_ASSERT(req->data.rdma_wr.type == RDMA_WR_TYPE_DATA);
1368  	CU_ASSERT(req->data.wr.wr_id == (uintptr_t)&rdma_resource->reqs[0].data.rdma_wr);
1369  	CU_ASSERT(req->data.wr.next == NULL);
1370  	CU_ASSERT(req->data.wr.send_flags == IBV_SEND_SIGNALED);
1371  	CU_ASSERT(req->data.wr.sg_list == rdma_resource->reqs[0].data.sgl);
1372  	CU_ASSERT(req->data.wr.num_sge == SPDK_NVMF_MAX_SGL_ENTRIES);
1373  	CU_ASSERT(req->state == RDMA_REQUEST_STATE_FREE);
1374  
1375  	recv = &rdma_resource->recvs[DEPTH - 1];
1376  	req = &rdma_resource->reqs[DEPTH - 1];
1377  	CU_ASSERT(recv->rdma_wr.type == RDMA_WR_TYPE_RECV);
1378  	CU_ASSERT((uintptr_t)recv->buf == (uintptr_t)(rdma_resource->bufs +
1379  			(DEPTH - 1) * 4096));
1380  	CU_ASSERT(recv->sgl[0].addr == (uintptr_t)&rdma_resource->cmds[DEPTH - 1]);
1381  	CU_ASSERT(recv->sgl[0].length == sizeof(rdma_resource->cmds[DEPTH - 1]));
1382  	CU_ASSERT(recv->sgl[0].lkey == RDMA_UT_LKEY);
1383  	CU_ASSERT(recv->wr.num_sge == 2);
1384  	CU_ASSERT(recv->wr.wr_id == (uintptr_t)&rdma_resource->recvs[DEPTH - 1].rdma_wr);
1385  	CU_ASSERT(recv->wr.sg_list == rdma_resource->recvs[DEPTH - 1].sgl);
1386  	CU_ASSERT(req->req.rsp == &rdma_resource->cpls[DEPTH - 1]);
1387  	CU_ASSERT(req->rsp.sgl[0].addr == (uintptr_t)&rdma_resource->cpls[DEPTH - 1]);
1388  	CU_ASSERT(req->rsp.sgl[0].length == sizeof(rdma_resource->cpls[DEPTH - 1]));
1389  	CU_ASSERT(req->rsp.sgl[0].lkey == RDMA_UT_LKEY);
1390  	CU_ASSERT(req->rsp.rdma_wr.type == RDMA_WR_TYPE_SEND);
1391  	CU_ASSERT(req->rsp.wr.wr_id == (uintptr_t)
1392  		  &req->rsp.rdma_wr);
1393  	CU_ASSERT(req->rsp.wr.next == NULL);
1394  	CU_ASSERT(req->rsp.wr.opcode == IBV_WR_SEND);
1395  	CU_ASSERT(req->rsp.wr.send_flags == IBV_SEND_SIGNALED);
1396  	CU_ASSERT(req->rsp.wr.sg_list == rdma_resource->reqs[DEPTH - 1].rsp.sgl);
1397  	CU_ASSERT(req->rsp.wr.num_sge == NVMF_DEFAULT_RSP_SGE);
1398  	CU_ASSERT(req->data.rdma_wr.type == RDMA_WR_TYPE_DATA);
1399  	CU_ASSERT(req->data.wr.wr_id == (uintptr_t)
1400  		  &req->data.rdma_wr);
1401  	CU_ASSERT(req->data.wr.next == NULL);
1402  	CU_ASSERT(req->data.wr.send_flags == IBV_SEND_SIGNALED);
1403  	CU_ASSERT(req->data.wr.sg_list == rdma_resource->reqs[DEPTH - 1].data.sgl);
1404  	CU_ASSERT(req->data.wr.num_sge == SPDK_NVMF_MAX_SGL_ENTRIES);
1405  	CU_ASSERT(req->state == RDMA_REQUEST_STATE_FREE);
1406  
1407  	nvmf_rdma_resources_destroy(rdma_resource);
1408  }
1409  
1410  static void
1411  test_nvmf_rdma_qpair_compare(void)
1412  {
1413  	struct spdk_nvmf_rdma_qpair rqpair1 = {}, rqpair2 = {};
1414  
1415  	rqpair1.qp_num = 0;
1416  	rqpair2.qp_num = UINT32_MAX;
1417  
1418  	CU_ASSERT(nvmf_rdma_qpair_compare(&rqpair1, &rqpair2) < 0);
1419  	CU_ASSERT(nvmf_rdma_qpair_compare(&rqpair2, &rqpair1) > 0);
1420  }
1421  
1422  static void
1423  test_nvmf_rdma_resize_cq(void)
1424  {
1425  	int rc = -1;
1426  	int tnum_wr = 0;
1427  	int tnum_cqe = 0;
1428  	struct spdk_nvmf_rdma_qpair rqpair = {};
1429  	struct spdk_nvmf_rdma_poller rpoller = {};
1430  	struct spdk_nvmf_rdma_device rdevice = {};
1431  	struct ibv_context ircontext = {};
1432  	struct ibv_device idevice = {};
1433  
1434  	rdevice.context = &ircontext;
1435  	rqpair.poller = &rpoller;
1436  	ircontext.device = &idevice;
1437  
1438  	/* Test1: Current capacity support required size. */
1439  	rpoller.required_num_wr = 10;
1440  	rpoller.num_cqe = 20;
1441  	rqpair.max_queue_depth = 2;
1442  	tnum_wr = rpoller.required_num_wr;
1443  	tnum_cqe = rpoller.num_cqe;
1444  
1445  	rc = nvmf_rdma_resize_cq(&rqpair, &rdevice);
1446  	CU_ASSERT(rc == 0);
1447  	CU_ASSERT(rpoller.required_num_wr == 10 + MAX_WR_PER_QP(rqpair.max_queue_depth));
1448  	CU_ASSERT(rpoller.required_num_wr > tnum_wr);
1449  	CU_ASSERT(rpoller.num_cqe == tnum_cqe);
1450  
1451  	/* Test2: iWARP doesn't support CQ resize. */
1452  	tnum_wr = rpoller.required_num_wr;
1453  	tnum_cqe = rpoller.num_cqe;
1454  	idevice.transport_type = IBV_TRANSPORT_IWARP;
1455  
1456  	rc = nvmf_rdma_resize_cq(&rqpair, &rdevice);
1457  	CU_ASSERT(rc == -1);
1458  	CU_ASSERT(rpoller.required_num_wr == tnum_wr);
1459  	CU_ASSERT(rpoller.num_cqe == tnum_cqe);
1460  
1461  
1462  	/* Test3: RDMA CQE requirement exceeds device max_cqe limitation. */
1463  	tnum_wr = rpoller.required_num_wr;
1464  	tnum_cqe = rpoller.num_cqe;
1465  	idevice.transport_type = IBV_TRANSPORT_UNKNOWN;
1466  	rdevice.attr.max_cqe = 3;
1467  
1468  	rc = nvmf_rdma_resize_cq(&rqpair, &rdevice);
1469  	CU_ASSERT(rc == -1);
1470  	CU_ASSERT(rpoller.required_num_wr == tnum_wr);
1471  	CU_ASSERT(rpoller.num_cqe == tnum_cqe);
1472  
1473  	/* Test4: RDMA CQ resize failed. */
1474  	tnum_wr = rpoller.required_num_wr;
1475  	tnum_cqe = rpoller.num_cqe;
1476  	idevice.transport_type = IBV_TRANSPORT_IB;
1477  	rdevice.attr.max_cqe = 30;
1478  	MOCK_SET(ibv_resize_cq, -1);
1479  
1480  	rc = nvmf_rdma_resize_cq(&rqpair, &rdevice);
1481  	CU_ASSERT(rc == -1);
1482  	CU_ASSERT(rpoller.required_num_wr == tnum_wr);
1483  	CU_ASSERT(rpoller.num_cqe == tnum_cqe);
1484  
1485  	/* Test5: RDMA CQ resize success. rsize = MIN(MAX(num_cqe * 2, required_num_wr), device->attr.max_cqe). */
1486  	tnum_wr = rpoller.required_num_wr;
1487  	tnum_cqe = rpoller.num_cqe;
1488  	MOCK_SET(ibv_resize_cq, 0);
1489  
1490  	rc = nvmf_rdma_resize_cq(&rqpair, &rdevice);
1491  	CU_ASSERT(rc == 0);
1492  	CU_ASSERT(rpoller.num_cqe = 30);
1493  	CU_ASSERT(rpoller.required_num_wr == 18 + MAX_WR_PER_QP(rqpair.max_queue_depth));
1494  	CU_ASSERT(rpoller.required_num_wr > tnum_wr);
1495  	CU_ASSERT(rpoller.num_cqe > tnum_cqe);
1496  }
1497  
1498  int main(int argc, char **argv)
1499  {
1500  	CU_pSuite	suite = NULL;
1501  	unsigned int	num_failures;
1502  
1503  	CU_set_error_action(CUEA_ABORT);
1504  	CU_initialize_registry();
1505  
1506  	suite = CU_add_suite("nvmf", NULL, NULL);
1507  
1508  	CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl);
1509  	CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_process);
1510  	CU_ADD_TEST(suite, test_nvmf_rdma_get_optimal_poll_group);
1511  	CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl_with_md);
1512  	CU_ADD_TEST(suite, test_nvmf_rdma_opts_init);
1513  	CU_ADD_TEST(suite, test_nvmf_rdma_request_free_data);
1514  	CU_ADD_TEST(suite, test_nvmf_rdma_update_ibv_state);
1515  	CU_ADD_TEST(suite, test_nvmf_rdma_resources_create);
1516  	CU_ADD_TEST(suite, test_nvmf_rdma_qpair_compare);
1517  	CU_ADD_TEST(suite, test_nvmf_rdma_resize_cq);
1518  
1519  	CU_basic_set_mode(CU_BRM_VERBOSE);
1520  	CU_basic_run_tests();
1521  	num_failures = CU_get_number_of_failures();
1522  	CU_cleanup_registry();
1523  	return num_failures;
1524  }
1525