xref: /spdk/test/common/lib/test_rdma.c (revision 8a01b4d6366393ba157b7c42f076389b9cebaafa)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2021 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2020, 2021 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_internal/rdma_provider.h"
10 #include "spdk_internal/rdma_utils.h"
11 #include "spdk_internal/mock.h"
12 
13 #define RDMA_UT_LKEY 123
14 #define RDMA_UT_RKEY 312
15 
16 struct spdk_nvme_transport_opts g_spdk_nvme_transport_opts = {};
17 struct spdk_rdma_provider_qp g_spdk_rdma_qp = {};
18 struct spdk_rdma_provider_srq g_spdk_rdma_srq = {};
19 
20 DEFINE_STUB(spdk_rdma_provider_qp_create, struct spdk_rdma_provider_qp *, (struct rdma_cm_id *cm_id,
21 		struct spdk_rdma_provider_qp_init_attr *qp_attr), &g_spdk_rdma_qp);
22 DEFINE_STUB(spdk_rdma_provider_qp_accept, int, (struct spdk_rdma_provider_qp *spdk_rdma_qp,
23 		struct rdma_conn_param *conn_param), 0);
24 DEFINE_STUB(spdk_rdma_provider_qp_complete_connect, int,
25 	    (struct spdk_rdma_provider_qp *spdk_rdma_qp), 0);
26 DEFINE_STUB_V(spdk_rdma_provider_qp_destroy, (struct spdk_rdma_provider_qp *spdk_rdma_qp));
27 DEFINE_STUB(spdk_rdma_provider_qp_disconnect, int, (struct spdk_rdma_provider_qp *spdk_rdma_qp), 0);
28 DEFINE_STUB(spdk_rdma_provider_qp_queue_send_wrs, bool, (struct spdk_rdma_provider_qp *spdk_rdma_qp,
29 		struct ibv_send_wr *first), true);
30 DEFINE_STUB(spdk_rdma_provider_qp_flush_send_wrs, int, (struct spdk_rdma_provider_qp *spdk_rdma_qp,
31 		struct ibv_send_wr **bad_wr), 0);
32 DEFINE_STUB(spdk_rdma_provider_srq_create, struct spdk_rdma_provider_srq *,
33 	    (struct spdk_rdma_provider_srq_init_attr *init_attr), &g_spdk_rdma_srq);
34 DEFINE_STUB(spdk_rdma_provider_srq_destroy, int, (struct spdk_rdma_provider_srq *rdma_srq), 0);
35 DEFINE_STUB(spdk_rdma_provider_srq_queue_recv_wrs, bool, (struct spdk_rdma_provider_srq *rdma_srq,
36 		struct ibv_recv_wr *first), true);
37 DEFINE_STUB(spdk_rdma_provider_srq_flush_recv_wrs, int, (struct spdk_rdma_provider_srq *rdma_srq,
38 		struct ibv_recv_wr **bad_wr), 0);
39 DEFINE_STUB(spdk_rdma_provider_qp_queue_recv_wrs, bool, (struct spdk_rdma_provider_qp *spdk_rdma_qp,
40 		struct ibv_recv_wr *first), true);
41 DEFINE_STUB(spdk_rdma_provider_qp_flush_recv_wrs, int, (struct spdk_rdma_provider_qp *spdk_rdma_qp,
42 		struct ibv_recv_wr **bad_wr), 0);
43 DEFINE_STUB(spdk_rdma_utils_create_mem_map, struct spdk_rdma_utils_mem_map *, (struct ibv_pd *pd,
44 		struct spdk_nvme_rdma_hooks *hooks, enum spdk_rdma_utils_memory_map_role role), NULL);
45 DEFINE_STUB_V(spdk_rdma_utils_free_mem_map, (struct spdk_rdma_utils_mem_map **map));
46 
47 /* used to mock out having to split an SGL over a memory region */
48 size_t g_mr_size;
49 uint64_t g_mr_next_size;
50 struct ibv_mr g_rdma_mr = {
51 	.addr = (void *)0xC0FFEE,
52 	.lkey = RDMA_UT_LKEY,
53 	.rkey = RDMA_UT_RKEY
54 };
55 
56 DEFINE_RETURN_MOCK(spdk_rdma_utils_get_translation, int);
57 int
58 spdk_rdma_utils_get_translation(struct spdk_rdma_utils_mem_map *map, void *address,
59 				size_t length, struct spdk_rdma_utils_memory_translation *translation)
60 {
61 	translation->mr_or_key.mr = &g_rdma_mr;
62 	translation->translation_type = SPDK_RDMA_UTILS_TRANSLATION_MR;
63 	HANDLE_RETURN_MOCK(spdk_rdma_utils_get_translation);
64 
65 	if (g_mr_size && length > g_mr_size) {
66 		if (g_mr_next_size) {
67 			g_mr_size = g_mr_next_size;
68 		}
69 		return -ERANGE;
70 	}
71 
72 	return 0;
73 }
74 
75 DEFINE_RETURN_MOCK(spdk_rdma_utils_get_pd, struct ibv_pd *);
76 struct ibv_pd *
77 spdk_rdma_utils_get_pd(struct ibv_context *context)
78 {
79 	HANDLE_RETURN_MOCK(spdk_rdma_utils_get_pd);
80 	return NULL;
81 }
82 
83 DEFINE_STUB_V(spdk_rdma_utils_put_pd, (struct ibv_pd *pd));
84