xref: /dpdk/drivers/common/mlx5/mlx5_common_mr.h (revision e77506397fc8005c5129e22e9e2d15d5876790fd)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 6WIND S.A.
3  * Copyright 2018 Mellanox Technologies, Ltd
4  */
5 
6 #ifndef RTE_PMD_MLX5_COMMON_MR_H_
7 #define RTE_PMD_MLX5_COMMON_MR_H_
8 
9 #include <stddef.h>
10 #include <stdint.h>
11 #include <sys/queue.h>
12 
13 
14 #include <rte_compat.h>
15 #include <rte_rwlock.h>
16 #include <rte_bitmap.h>
17 #include <rte_mbuf.h>
18 #include <rte_memory.h>
19 
20 #include "mlx5_glue.h"
21 #include "mlx5_common_mp.h"
22 #include "mlx5_common_defs.h"
23 
24 /* mlx5 PMD MR struct. */
25 struct mlx5_pmd_mr {
26 	uint32_t	     lkey;
27 	void		     *addr;
28 	size_t		     len;
29 	void		     *obj;  /* verbs mr object or devx umem object. */
30 	struct mlx5_devx_obj *mkey; /* devx mkey object. */
31 };
32 
33 /**
34  * mr operations typedef
35  */
36 typedef int (*mlx5_reg_mr_t)(void *pd, void *addr, size_t length,
37 			     struct mlx5_pmd_mr *pmd_mr);
38 typedef void (*mlx5_dereg_mr_t)(struct mlx5_pmd_mr *pmd_mr);
39 
40 /* Memory Region object. */
41 struct mlx5_mr {
42 	LIST_ENTRY(mlx5_mr) mr; /**< Pointer to the prev/next entry. */
43 	struct mlx5_pmd_mr pmd_mr; /* PMD memory region. */
44 	const struct rte_memseg_list *msl;
45 	int ms_base_idx; /* Start index of msl->memseg_arr[]. */
46 	int ms_n; /* Number of memsegs in use. */
47 	uint32_t ms_bmp_n; /* Number of bits in memsegs bit-mask. */
48 	struct rte_bitmap *ms_bmp; /* Bit-mask of memsegs belonged to MR. */
49 };
50 
51 /* Cache entry for Memory Region. */
52 struct __rte_packed_begin mr_cache_entry {
53 	uintptr_t start; /* Start address of MR. */
54 	uintptr_t end; /* End address of MR. */
55 	uint32_t lkey; /* rte_cpu_to_be_32(lkey). */
56 } __rte_packed_end;
57 
58 /* MR Cache table for Binary search. */
59 struct __rte_packed_begin mlx5_mr_btree {
60 	uint32_t len; /* Number of entries. */
61 	uint32_t size; /* Total number of entries. */
62 	struct mr_cache_entry (*table)[];
63 } __rte_packed_end;
64 
65 struct mlx5_common_device;
66 
67 /* Per-queue MR control descriptor. */
68 struct __rte_packed_begin mlx5_mr_ctrl {
69 	uint32_t *dev_gen_ptr; /* Generation number of device to poll. */
70 	uint32_t cur_gen; /* Generation number saved to flush caches. */
71 	uint16_t mru; /* Index of last hit entry in top-half cache. */
72 	uint16_t head; /* Index of the oldest entry in top-half cache. */
73 	struct mr_cache_entry cache[MLX5_MR_CACHE_N]; /* Cache for top-half. */
74 	struct mlx5_mr_btree cache_bh; /* Cache for bottom-half. */
75 } __rte_packed_end;
76 
77 LIST_HEAD(mlx5_mr_list, mlx5_mr);
78 LIST_HEAD(mlx5_mempool_reg_list, mlx5_mempool_reg);
79 
80 /* Global per-device MR cache. */
81 struct __rte_packed_begin mlx5_mr_share_cache {
82 	uint32_t dev_gen; /* Generation number to flush local caches. */
83 	rte_rwlock_t rwlock; /* MR cache Lock. */
84 	rte_rwlock_t mprwlock; /* Mempool Registration Lock. */
85 	struct mlx5_mr_btree cache; /* Global MR cache table. */
86 	struct mlx5_mr_list mr_list; /* Registered MR list. */
87 	struct mlx5_mr_list mr_free_list; /* Freed MR list. */
88 	struct mlx5_mempool_reg_list mempool_reg_list; /* Mempool database. */
89 	mlx5_reg_mr_t reg_mr_cb; /* Callback to reg_mr func */
90 	mlx5_dereg_mr_t dereg_mr_cb; /* Callback to dereg_mr func */
91 } __rte_packed_end;
92 
93 /* Multi-Packet RQ buffer header. */
94 struct __rte_cache_aligned mlx5_mprq_buf {
95 	struct rte_mempool *mp;
96 	RTE_ATOMIC(uint16_t) refcnt; /* Atomically accessed refcnt. */
97 	struct rte_mbuf_ext_shared_info shinfos[];
98 	/*
99 	 * Shared information per stride.
100 	 * More memory will be allocated for the first stride head-room and for
101 	 * the strides data.
102 	 */
103 };
104 
105 __rte_internal
106 void mlx5_mprq_buf_free_cb(void *addr, void *opaque);
107 
108 /**
109  * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which the
110  * cloned mbuf is allocated is returned instead.
111  *
112  * @param buf
113  *   Pointer to mbuf.
114  *
115  * @return
116  *   Memory pool where data is located for given mbuf.
117  */
118 static inline struct rte_mempool *
119 mlx5_mb2mp(struct rte_mbuf *buf)
120 {
121 	if (unlikely(RTE_MBUF_CLONED(buf)))
122 		return rte_mbuf_from_indirect(buf)->pool;
123 	return buf->pool;
124 }
125 
126 /**
127  * Look up LKey from given lookup table by linear search. Firstly look up the
128  * last-hit entry. If miss, the entire array is searched. If found, update the
129  * last-hit index and return LKey.
130  *
131  * @param lkp_tbl
132  *   Pointer to lookup table.
133  * @param[in,out] cached_idx
134  *   Pointer to last-hit index.
135  * @param n
136  *   Size of lookup table.
137  * @param addr
138  *   Search key.
139  *
140  * @return
141  *   Searched LKey on success, UINT32_MAX on no match.
142  */
143 static __rte_always_inline uint32_t
144 mlx5_mr_lookup_lkey(struct mr_cache_entry *lkp_tbl, uint16_t *cached_idx,
145 		    uint16_t n, uintptr_t addr)
146 {
147 	uint16_t idx;
148 
149 	if (likely(addr >= lkp_tbl[*cached_idx].start &&
150 		   addr < lkp_tbl[*cached_idx].end))
151 		return lkp_tbl[*cached_idx].lkey;
152 	for (idx = 0; idx < n && lkp_tbl[idx].start != 0; ++idx) {
153 		if (addr >= lkp_tbl[idx].start &&
154 		    addr < lkp_tbl[idx].end) {
155 			/* Found. */
156 			*cached_idx = idx;
157 			return lkp_tbl[idx].lkey;
158 		}
159 	}
160 	return UINT32_MAX;
161 }
162 
163 __rte_internal
164 void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);
165 
166 /**
167  * Bottom-half of LKey search on. If supported, lookup for the address from
168  * the mempool. Otherwise, search in old mechanism caches.
169  *
170  * @param mr_ctrl
171  *   Pointer to per-queue MR control structure.
172  * @param mb
173  *   Pointer to mbuf.
174  *
175  * @return
176  *   Searched LKey on success, UINT32_MAX on no match.
177  */
178 __rte_internal
179 uint32_t mlx5_mr_mb2mr_bh(struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mbuf);
180 
181 /**
182  * Query LKey from a packet buffer.
183  *
184  * @param mr_ctrl
185  *   Pointer to per-queue MR control structure.
186  * @param mbuf
187  *   Pointer to mbuf.
188  *
189  * @return
190  *   Searched LKey on success, UINT32_MAX on no match.
191  */
192 static __rte_always_inline uint32_t
193 mlx5_mr_mb2mr(struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mbuf)
194 {
195 	uint32_t lkey;
196 
197 	/* Check generation bit to see if there's any change on existing MRs. */
198 	if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
199 		mlx5_mr_flush_local_cache(mr_ctrl);
200 	/* Linear search on MR cache array. */
201 	lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru,
202 				   MLX5_MR_CACHE_N, (uintptr_t)mbuf->buf_addr);
203 	if (likely(lkey != UINT32_MAX))
204 		return lkey;
205 	/* Take slower bottom-half on miss. */
206 	return mlx5_mr_mb2mr_bh(mr_ctrl, mbuf);
207 }
208 
209 /* mlx5_common_mr.c */
210 
211 __rte_internal
212 int mlx5_mr_ctrl_init(struct mlx5_mr_ctrl *mr_ctrl, uint32_t *dev_gen_ptr,
213 		      int socket);
214 __rte_internal
215 void mlx5_mr_btree_free(struct mlx5_mr_btree *bt);
216 void mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused);
217 __rte_internal
218 uint32_t mlx5_mr_mempool2mr_bh(struct mlx5_mr_ctrl *mr_ctrl,
219 			       struct rte_mempool *mp, uintptr_t addr);
220 int mlx5_mr_expand_cache(struct mlx5_mr_share_cache *share_cache,
221 			 uint32_t new_size, int socket);
222 void mlx5_mr_release_cache(struct mlx5_mr_share_cache *mr_cache);
223 int mlx5_mr_create_cache(struct mlx5_mr_share_cache *share_cache, int socket);
224 void mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused);
225 void mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache);
226 void mlx5_free_mr_by_addr(struct mlx5_mr_share_cache *share_cache,
227 			  const char *ibdev_name, const void *addr, size_t len);
228 int mlx5_mr_insert_cache(struct mlx5_mr_share_cache *share_cache,
229 			 struct mlx5_mr *mr);
230 struct mlx5_mr *
231 mlx5_mr_lookup_list(struct mlx5_mr_share_cache *share_cache,
232 		    struct mr_cache_entry *entry, uintptr_t addr);
233 struct mlx5_mr *
234 mlx5_create_mr_ext(void *pd, uintptr_t addr, size_t len, int socket_id,
235 		   mlx5_reg_mr_t reg_mr_cb);
236 void mlx5_mr_free(struct mlx5_mr *mr, mlx5_dereg_mr_t dereg_mr_cb);
237 __rte_internal
238 uint32_t
239 mlx5_mr_create(struct mlx5_common_device *cdev,
240 	       struct mlx5_mr_share_cache *share_cache,
241 	       struct mr_cache_entry *entry, uintptr_t addr);
242 
243 __rte_internal
244 uint32_t
245 mlx5_mr_addr2mr_bh(struct mlx5_mr_ctrl *mr_ctrl, uintptr_t addr);
246 
247 /* mlx5_common_verbs.c */
248 
249 __rte_internal
250 int
251 mlx5_common_verbs_reg_mr(void *pd, void *addr, size_t length,
252 			 struct mlx5_pmd_mr *pmd_mr);
253 __rte_internal
254 void
255 mlx5_common_verbs_dereg_mr(struct mlx5_pmd_mr *pmd_mr);
256 
257 __rte_internal
258 void
259 mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb, mlx5_dereg_mr_t *dereg_mr_cb);
260 
261 __rte_internal
262 int
263 mlx5_mr_mempool_register(struct mlx5_common_device *cdev,
264 			 struct rte_mempool *mp, bool is_extmem);
265 __rte_internal
266 int
267 mlx5_mr_mempool_unregister(struct mlx5_common_device *cdev,
268 			   struct rte_mempool *mp);
269 
270 __rte_internal
271 int
272 mlx5_mr_mempool_populate_cache(struct mlx5_mr_ctrl *mr_ctrl,
273 			       struct rte_mempool *mp);
274 
275 #endif /* RTE_PMD_MLX5_COMMON_MR_H_ */
276