xref: /spdk/test/common/lib/test_rdma.c (revision 8afdeef3becfe9409cc9e7372bd0bc10e8b7d46d)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2021 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2020, 2021 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_internal/rdma_provider.h"
10 #include "spdk_internal/rdma_utils.h"
11 #include "spdk_internal/mock.h"
12 
13 #define RDMA_UT_LKEY 123
14 #define RDMA_UT_RKEY 312
15 
16 struct spdk_nvme_transport_opts g_spdk_nvme_transport_opts = {};
17 struct spdk_rdma_provider_qp g_spdk_rdma_qp = {};
18 struct spdk_rdma_provider_srq g_spdk_rdma_srq = {};
19 
20 DEFINE_STUB(spdk_rdma_provider_qp_create, struct spdk_rdma_provider_qp *, (struct rdma_cm_id *cm_id,
21 		struct spdk_rdma_provider_qp_init_attr *qp_attr), &g_spdk_rdma_qp);
22 DEFINE_STUB(spdk_rdma_provider_qp_accept, int, (struct spdk_rdma_provider_qp *spdk_rdma_qp,
23 		struct rdma_conn_param *conn_param), 0);
24 DEFINE_STUB(spdk_rdma_provider_qp_complete_connect, int,
25 	    (struct spdk_rdma_provider_qp *spdk_rdma_qp), 0);
26 DEFINE_STUB_V(spdk_rdma_provider_qp_destroy, (struct spdk_rdma_provider_qp *spdk_rdma_qp));
27 DEFINE_STUB(spdk_rdma_provider_qp_disconnect, int, (struct spdk_rdma_provider_qp *spdk_rdma_qp), 0);
28 DEFINE_STUB(spdk_rdma_provider_qp_queue_send_wrs, bool, (struct spdk_rdma_provider_qp *spdk_rdma_qp,
29 		struct ibv_send_wr *first), true);
30 DEFINE_STUB(spdk_rdma_provider_qp_flush_send_wrs, int, (struct spdk_rdma_provider_qp *spdk_rdma_qp,
31 		struct ibv_send_wr **bad_wr), 0);
32 DEFINE_STUB(spdk_rdma_provider_srq_create, struct spdk_rdma_provider_srq *,
33 	    (struct spdk_rdma_provider_srq_init_attr *init_attr), &g_spdk_rdma_srq);
34 DEFINE_STUB(spdk_rdma_provider_srq_destroy, int, (struct spdk_rdma_provider_srq *rdma_srq), 0);
35 DEFINE_STUB(spdk_rdma_provider_srq_queue_recv_wrs, bool, (struct spdk_rdma_provider_srq *rdma_srq,
36 		struct ibv_recv_wr *first), true);
37 DEFINE_STUB(spdk_rdma_provider_srq_flush_recv_wrs, int, (struct spdk_rdma_provider_srq *rdma_srq,
38 		struct ibv_recv_wr **bad_wr), 0);
39 DEFINE_STUB(spdk_rdma_provider_qp_queue_recv_wrs, bool, (struct spdk_rdma_provider_qp *spdk_rdma_qp,
40 		struct ibv_recv_wr *first), true);
41 DEFINE_STUB(spdk_rdma_provider_qp_flush_recv_wrs, int, (struct spdk_rdma_provider_qp *spdk_rdma_qp,
42 		struct ibv_recv_wr **bad_wr), 0);
43 DEFINE_STUB(spdk_rdma_utils_create_mem_map, struct spdk_rdma_utils_mem_map *, (struct ibv_pd *pd,
44 		struct spdk_nvme_rdma_hooks *hooks, uint32_t access_flags), NULL)
45 DEFINE_STUB_V(spdk_rdma_utils_free_mem_map, (struct spdk_rdma_utils_mem_map **map));
46 DEFINE_STUB(spdk_rdma_utils_get_memory_domain, struct spdk_memory_domain *, (struct ibv_pd *pd),
47 	    NULL);
48 DEFINE_STUB(spdk_rdma_utils_put_memory_domain, int, (struct spdk_memory_domain *domain), 0);
49 
50 /* used to mock out having to split an SGL over a memory region */
51 size_t g_mr_size;
52 uint64_t g_mr_next_size;
53 struct ibv_mr g_rdma_mr = {
54 	.addr = (void *)0xC0FFEE,
55 	.lkey = RDMA_UT_LKEY,
56 	.rkey = RDMA_UT_RKEY
57 };
58 
59 DEFINE_RETURN_MOCK(spdk_rdma_utils_get_translation, int);
60 int
61 spdk_rdma_utils_get_translation(struct spdk_rdma_utils_mem_map *map, void *address,
62 				size_t length, struct spdk_rdma_utils_memory_translation *translation)
63 {
64 	translation->mr_or_key.mr = &g_rdma_mr;
65 	translation->translation_type = SPDK_RDMA_UTILS_TRANSLATION_MR;
66 	HANDLE_RETURN_MOCK(spdk_rdma_utils_get_translation);
67 
68 	if (g_mr_size && length > g_mr_size) {
69 		if (g_mr_next_size) {
70 			g_mr_size = g_mr_next_size;
71 		}
72 		return -ERANGE;
73 	}
74 
75 	return 0;
76 }
77 
78 DEFINE_RETURN_MOCK(spdk_rdma_utils_get_pd, struct ibv_pd *);
79 struct ibv_pd *
80 spdk_rdma_utils_get_pd(struct ibv_context *context)
81 {
82 	HANDLE_RETURN_MOCK(spdk_rdma_utils_get_pd);
83 	return NULL;
84 }
85 
86 DEFINE_STUB_V(spdk_rdma_utils_put_pd, (struct ibv_pd *pd));
87