xref: /dpdk/drivers/common/mlx5/mlx5_common_mr.h (revision b8dc6b0e29d86bfda4b635aa6e12469728c46e63)
1*b8dc6b0eSVu Pham /* SPDX-License-Identifier: BSD-3-Clause
2*b8dc6b0eSVu Pham  * Copyright 2018 6WIND S.A.
3*b8dc6b0eSVu Pham  * Copyright 2018 Mellanox Technologies, Ltd
4*b8dc6b0eSVu Pham  */
5*b8dc6b0eSVu Pham 
6*b8dc6b0eSVu Pham #ifndef RTE_PMD_MLX5_COMMON_MR_H_
7*b8dc6b0eSVu Pham #define RTE_PMD_MLX5_COMMON_MR_H_
8*b8dc6b0eSVu Pham 
9*b8dc6b0eSVu Pham #include <stddef.h>
10*b8dc6b0eSVu Pham #include <stdint.h>
11*b8dc6b0eSVu Pham #include <sys/queue.h>
12*b8dc6b0eSVu Pham 
13*b8dc6b0eSVu Pham /* Verbs header. */
14*b8dc6b0eSVu Pham /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
15*b8dc6b0eSVu Pham #ifdef PEDANTIC
16*b8dc6b0eSVu Pham #pragma GCC diagnostic ignored "-Wpedantic"
17*b8dc6b0eSVu Pham #endif
18*b8dc6b0eSVu Pham #include <infiniband/verbs.h>
19*b8dc6b0eSVu Pham #include <infiniband/mlx5dv.h>
20*b8dc6b0eSVu Pham #ifdef PEDANTIC
21*b8dc6b0eSVu Pham #pragma GCC diagnostic error "-Wpedantic"
22*b8dc6b0eSVu Pham #endif
23*b8dc6b0eSVu Pham 
24*b8dc6b0eSVu Pham #include <rte_rwlock.h>
25*b8dc6b0eSVu Pham #include <rte_bitmap.h>
26*b8dc6b0eSVu Pham #include <rte_memory.h>
27*b8dc6b0eSVu Pham 
28*b8dc6b0eSVu Pham #include "mlx5_common_mp.h"
29*b8dc6b0eSVu Pham 
30*b8dc6b0eSVu Pham /* Size of per-queue MR cache array for linear search. */
31*b8dc6b0eSVu Pham #define MLX5_MR_CACHE_N 8
32*b8dc6b0eSVu Pham #define MLX5_MR_BTREE_CACHE_N 256
33*b8dc6b0eSVu Pham 
34*b8dc6b0eSVu Pham /* Memory Region object. */
35*b8dc6b0eSVu Pham struct mlx5_mr {
36*b8dc6b0eSVu Pham 	LIST_ENTRY(mlx5_mr) mr; /**< Pointer to the prev/next entry. */
37*b8dc6b0eSVu Pham 	struct ibv_mr *ibv_mr; /* Verbs Memory Region. */
38*b8dc6b0eSVu Pham 	const struct rte_memseg_list *msl;
39*b8dc6b0eSVu Pham 	int ms_base_idx; /* Start index of msl->memseg_arr[]. */
40*b8dc6b0eSVu Pham 	int ms_n; /* Number of memsegs in use. */
41*b8dc6b0eSVu Pham 	uint32_t ms_bmp_n; /* Number of bits in memsegs bit-mask. */
42*b8dc6b0eSVu Pham 	struct rte_bitmap *ms_bmp; /* Bit-mask of memsegs belonged to MR. */
43*b8dc6b0eSVu Pham };
44*b8dc6b0eSVu Pham 
45*b8dc6b0eSVu Pham /* Cache entry for Memory Region. */
46*b8dc6b0eSVu Pham struct mr_cache_entry {
47*b8dc6b0eSVu Pham 	uintptr_t start; /* Start address of MR. */
48*b8dc6b0eSVu Pham 	uintptr_t end; /* End address of MR. */
49*b8dc6b0eSVu Pham 	uint32_t lkey; /* rte_cpu_to_be_32(ibv_mr->lkey). */
50*b8dc6b0eSVu Pham } __rte_packed;
51*b8dc6b0eSVu Pham 
52*b8dc6b0eSVu Pham /* MR Cache table for Binary search. */
53*b8dc6b0eSVu Pham struct mlx5_mr_btree {
54*b8dc6b0eSVu Pham 	uint16_t len; /* Number of entries. */
55*b8dc6b0eSVu Pham 	uint16_t size; /* Total number of entries. */
56*b8dc6b0eSVu Pham 	int overflow; /* Mark failure of table expansion. */
57*b8dc6b0eSVu Pham 	struct mr_cache_entry (*table)[];
58*b8dc6b0eSVu Pham } __rte_packed;
59*b8dc6b0eSVu Pham 
60*b8dc6b0eSVu Pham /* Per-queue MR control descriptor. */
61*b8dc6b0eSVu Pham struct mlx5_mr_ctrl {
62*b8dc6b0eSVu Pham 	uint32_t *dev_gen_ptr; /* Generation number of device to poll. */
63*b8dc6b0eSVu Pham 	uint32_t cur_gen; /* Generation number saved to flush caches. */
64*b8dc6b0eSVu Pham 	uint16_t mru; /* Index of last hit entry in top-half cache. */
65*b8dc6b0eSVu Pham 	uint16_t head; /* Index of the oldest entry in top-half cache. */
66*b8dc6b0eSVu Pham 	struct mr_cache_entry cache[MLX5_MR_CACHE_N]; /* Cache for top-half. */
67*b8dc6b0eSVu Pham 	struct mlx5_mr_btree cache_bh; /* Cache for bottom-half. */
68*b8dc6b0eSVu Pham } __rte_packed;
69*b8dc6b0eSVu Pham 
70*b8dc6b0eSVu Pham LIST_HEAD(mlx5_mr_list, mlx5_mr);
71*b8dc6b0eSVu Pham 
72*b8dc6b0eSVu Pham /* Global per-device MR cache. */
73*b8dc6b0eSVu Pham struct mlx5_mr_share_cache {
74*b8dc6b0eSVu Pham 	uint32_t dev_gen; /* Generation number to flush local caches. */
75*b8dc6b0eSVu Pham 	rte_rwlock_t rwlock; /* MR cache Lock. */
76*b8dc6b0eSVu Pham 	struct mlx5_mr_btree cache; /* Global MR cache table. */
77*b8dc6b0eSVu Pham 	struct mlx5_mr_list mr_list; /* Registered MR list. */
78*b8dc6b0eSVu Pham 	struct mlx5_mr_list mr_free_list; /* Freed MR list. */
79*b8dc6b0eSVu Pham } __rte_packed;
80*b8dc6b0eSVu Pham 
81*b8dc6b0eSVu Pham /**
82*b8dc6b0eSVu Pham  * Look up LKey from given lookup table by linear search. Firstly look up the
83*b8dc6b0eSVu Pham  * last-hit entry. If miss, the entire array is searched. If found, update the
84*b8dc6b0eSVu Pham  * last-hit index and return LKey.
85*b8dc6b0eSVu Pham  *
86*b8dc6b0eSVu Pham  * @param lkp_tbl
87*b8dc6b0eSVu Pham  *   Pointer to lookup table.
88*b8dc6b0eSVu Pham  * @param[in,out] cached_idx
89*b8dc6b0eSVu Pham  *   Pointer to last-hit index.
90*b8dc6b0eSVu Pham  * @param n
91*b8dc6b0eSVu Pham  *   Size of lookup table.
92*b8dc6b0eSVu Pham  * @param addr
93*b8dc6b0eSVu Pham  *   Search key.
94*b8dc6b0eSVu Pham  *
95*b8dc6b0eSVu Pham  * @return
96*b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on no match.
97*b8dc6b0eSVu Pham  */
98*b8dc6b0eSVu Pham static __rte_always_inline uint32_t
99*b8dc6b0eSVu Pham mlx5_mr_lookup_lkey(struct mr_cache_entry *lkp_tbl, uint16_t *cached_idx,
100*b8dc6b0eSVu Pham 		    uint16_t n, uintptr_t addr)
101*b8dc6b0eSVu Pham {
102*b8dc6b0eSVu Pham 	uint16_t idx;
103*b8dc6b0eSVu Pham 
104*b8dc6b0eSVu Pham 	if (likely(addr >= lkp_tbl[*cached_idx].start &&
105*b8dc6b0eSVu Pham 		   addr < lkp_tbl[*cached_idx].end))
106*b8dc6b0eSVu Pham 		return lkp_tbl[*cached_idx].lkey;
107*b8dc6b0eSVu Pham 	for (idx = 0; idx < n && lkp_tbl[idx].start != 0; ++idx) {
108*b8dc6b0eSVu Pham 		if (addr >= lkp_tbl[idx].start &&
109*b8dc6b0eSVu Pham 		    addr < lkp_tbl[idx].end) {
110*b8dc6b0eSVu Pham 			/* Found. */
111*b8dc6b0eSVu Pham 			*cached_idx = idx;
112*b8dc6b0eSVu Pham 			return lkp_tbl[idx].lkey;
113*b8dc6b0eSVu Pham 		}
114*b8dc6b0eSVu Pham 	}
115*b8dc6b0eSVu Pham 	return UINT32_MAX;
116*b8dc6b0eSVu Pham }
117*b8dc6b0eSVu Pham 
118*b8dc6b0eSVu Pham __rte_experimental
119*b8dc6b0eSVu Pham int mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket);
120*b8dc6b0eSVu Pham __rte_experimental
121*b8dc6b0eSVu Pham void mlx5_mr_btree_free(struct mlx5_mr_btree *bt);
122*b8dc6b0eSVu Pham __rte_experimental
123*b8dc6b0eSVu Pham void mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused);
124*b8dc6b0eSVu Pham __rte_experimental
125*b8dc6b0eSVu Pham uint32_t mlx5_mr_addr2mr_bh(struct ibv_pd *pd, struct mlx5_mp_id *mp_id,
126*b8dc6b0eSVu Pham 			    struct mlx5_mr_share_cache *share_cache,
127*b8dc6b0eSVu Pham 			    struct mlx5_mr_ctrl *mr_ctrl,
128*b8dc6b0eSVu Pham 			    uintptr_t addr, unsigned int mr_ext_memseg_en);
129*b8dc6b0eSVu Pham __rte_experimental
130*b8dc6b0eSVu Pham void mlx5_mr_release_cache(struct mlx5_mr_share_cache *mr_cache);
131*b8dc6b0eSVu Pham __rte_experimental
132*b8dc6b0eSVu Pham void mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused);
133*b8dc6b0eSVu Pham __rte_experimental
134*b8dc6b0eSVu Pham void mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache);
135*b8dc6b0eSVu Pham __rte_experimental
136*b8dc6b0eSVu Pham void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);
137*b8dc6b0eSVu Pham __rte_experimental
138*b8dc6b0eSVu Pham int
139*b8dc6b0eSVu Pham mlx5_mr_insert_cache(struct mlx5_mr_share_cache *share_cache,
140*b8dc6b0eSVu Pham 		     struct mlx5_mr *mr);
141*b8dc6b0eSVu Pham __rte_experimental
142*b8dc6b0eSVu Pham uint32_t
143*b8dc6b0eSVu Pham mlx5_mr_lookup_cache(struct mlx5_mr_share_cache *share_cache,
144*b8dc6b0eSVu Pham 		     struct mr_cache_entry *entry, uintptr_t addr);
145*b8dc6b0eSVu Pham __rte_experimental
146*b8dc6b0eSVu Pham struct mlx5_mr *
147*b8dc6b0eSVu Pham mlx5_mr_lookup_list(struct mlx5_mr_share_cache *share_cache,
148*b8dc6b0eSVu Pham 		    struct mr_cache_entry *entry, uintptr_t addr);
149*b8dc6b0eSVu Pham __rte_experimental
150*b8dc6b0eSVu Pham struct mlx5_mr *
151*b8dc6b0eSVu Pham mlx5_create_mr_ext(struct ibv_pd *pd, uintptr_t addr, size_t len,
152*b8dc6b0eSVu Pham 		   int socket_id);
153*b8dc6b0eSVu Pham __rte_experimental
154*b8dc6b0eSVu Pham uint32_t
155*b8dc6b0eSVu Pham mlx5_mr_create_primary(struct ibv_pd *pd,
156*b8dc6b0eSVu Pham 		       struct mlx5_mr_share_cache *share_cache,
157*b8dc6b0eSVu Pham 		       struct mr_cache_entry *entry, uintptr_t addr,
158*b8dc6b0eSVu Pham 		       unsigned int mr_ext_memseg_en);
159*b8dc6b0eSVu Pham 
160*b8dc6b0eSVu Pham #endif /* RTE_PMD_MLX5_COMMON_MR_H_ */
161