xref: /dpdk/drivers/common/mlx5/mlx5_common_mr.c (revision c47d7b90a1911202c131ccf3d3f430e441621e5e)
1b8dc6b0eSVu Pham /* SPDX-License-Identifier: BSD-3-Clause
2b8dc6b0eSVu Pham  * Copyright 2016 6WIND S.A.
3b8dc6b0eSVu Pham  * Copyright 2020 Mellanox Technologies, Ltd
4b8dc6b0eSVu Pham  */
5690b2a88SDmitry Kozlyuk #include <stddef.h>
6690b2a88SDmitry Kozlyuk 
7b8dc6b0eSVu Pham #include <rte_eal_memconfig.h>
8690b2a88SDmitry Kozlyuk #include <rte_eal_paging.h>
9b8dc6b0eSVu Pham #include <rte_errno.h>
10b8dc6b0eSVu Pham #include <rte_mempool.h>
11b8dc6b0eSVu Pham #include <rte_malloc.h>
12b8dc6b0eSVu Pham #include <rte_rwlock.h>
13b8dc6b0eSVu Pham 
14b8dc6b0eSVu Pham #include "mlx5_glue.h"
15b8dc6b0eSVu Pham #include "mlx5_common_mp.h"
16b8dc6b0eSVu Pham #include "mlx5_common_mr.h"
1725245d5dSShiri Kuzin #include "mlx5_common_log.h"
18fd970a54SSuanming Mou #include "mlx5_malloc.h"
19b8dc6b0eSVu Pham 
20b8dc6b0eSVu Pham struct mr_find_contig_memsegs_data {
21b8dc6b0eSVu Pham 	uintptr_t addr;
22b8dc6b0eSVu Pham 	uintptr_t start;
23b8dc6b0eSVu Pham 	uintptr_t end;
24b8dc6b0eSVu Pham 	const struct rte_memseg_list *msl;
25b8dc6b0eSVu Pham };
26b8dc6b0eSVu Pham 
27690b2a88SDmitry Kozlyuk /* Virtual memory range. */
28690b2a88SDmitry Kozlyuk struct mlx5_range {
29690b2a88SDmitry Kozlyuk 	uintptr_t start;
30690b2a88SDmitry Kozlyuk 	uintptr_t end;
31690b2a88SDmitry Kozlyuk };
32690b2a88SDmitry Kozlyuk 
33690b2a88SDmitry Kozlyuk /** Memory region for a mempool. */
34690b2a88SDmitry Kozlyuk struct mlx5_mempool_mr {
35690b2a88SDmitry Kozlyuk 	struct mlx5_pmd_mr pmd_mr;
36690b2a88SDmitry Kozlyuk 	uint32_t refcnt; /**< Number of mempools sharing this MR. */
37690b2a88SDmitry Kozlyuk };
38690b2a88SDmitry Kozlyuk 
39690b2a88SDmitry Kozlyuk /* Mempool registration. */
40690b2a88SDmitry Kozlyuk struct mlx5_mempool_reg {
41690b2a88SDmitry Kozlyuk 	LIST_ENTRY(mlx5_mempool_reg) next;
42690b2a88SDmitry Kozlyuk 	/** Registered mempool, used to designate registrations. */
43690b2a88SDmitry Kozlyuk 	struct rte_mempool *mp;
44690b2a88SDmitry Kozlyuk 	/** Memory regions for the address ranges of the mempool. */
45690b2a88SDmitry Kozlyuk 	struct mlx5_mempool_mr *mrs;
46690b2a88SDmitry Kozlyuk 	/** Number of memory regions. */
47690b2a88SDmitry Kozlyuk 	unsigned int mrs_n;
48690b2a88SDmitry Kozlyuk };
49690b2a88SDmitry Kozlyuk 
50b8dc6b0eSVu Pham /**
51b8dc6b0eSVu Pham  * Expand B-tree table to a given size. Can't be called with holding
52b8dc6b0eSVu Pham  * memory_hotplug_lock or share_cache.rwlock due to rte_realloc().
53b8dc6b0eSVu Pham  *
54b8dc6b0eSVu Pham  * @param bt
55b8dc6b0eSVu Pham  *   Pointer to B-tree structure.
56b8dc6b0eSVu Pham  * @param n
57b8dc6b0eSVu Pham  *   Number of entries for expansion.
58b8dc6b0eSVu Pham  *
59b8dc6b0eSVu Pham  * @return
60b8dc6b0eSVu Pham  *   0 on success, -1 on failure.
61b8dc6b0eSVu Pham  */
62b8dc6b0eSVu Pham static int
63b8dc6b0eSVu Pham mr_btree_expand(struct mlx5_mr_btree *bt, int n)
64b8dc6b0eSVu Pham {
65b8dc6b0eSVu Pham 	void *mem;
66b8dc6b0eSVu Pham 	int ret = 0;
67b8dc6b0eSVu Pham 
68b8dc6b0eSVu Pham 	if (n <= bt->size)
69b8dc6b0eSVu Pham 		return ret;
70b8dc6b0eSVu Pham 	/*
71b8dc6b0eSVu Pham 	 * Downside of directly using rte_realloc() is that SOCKET_ID_ANY is
72b8dc6b0eSVu Pham 	 * used inside if there's no room to expand. Because this is a quite
73b8dc6b0eSVu Pham 	 * rare case and a part of very slow path, it is very acceptable.
74b8dc6b0eSVu Pham 	 * Initially cache_bh[] will be given practically enough space and once
75b8dc6b0eSVu Pham 	 * it is expanded, expansion wouldn't be needed again ever.
76b8dc6b0eSVu Pham 	 */
77fd970a54SSuanming Mou 	mem = mlx5_realloc(bt->table, MLX5_MEM_RTE | MLX5_MEM_ZERO,
78fd970a54SSuanming Mou 			   n * sizeof(struct mr_cache_entry), 0, SOCKET_ID_ANY);
79b8dc6b0eSVu Pham 	if (mem == NULL) {
80b8dc6b0eSVu Pham 		/* Not an error, B-tree search will be skipped. */
81b8dc6b0eSVu Pham 		DRV_LOG(WARNING, "failed to expand MR B-tree (%p) table",
82b8dc6b0eSVu Pham 			(void *)bt);
83b8dc6b0eSVu Pham 		ret = -1;
84b8dc6b0eSVu Pham 	} else {
85b8dc6b0eSVu Pham 		DRV_LOG(DEBUG, "expanded MR B-tree table (size=%u)", n);
86b8dc6b0eSVu Pham 		bt->table = mem;
87b8dc6b0eSVu Pham 		bt->size = n;
88b8dc6b0eSVu Pham 	}
89b8dc6b0eSVu Pham 	return ret;
90b8dc6b0eSVu Pham }
91b8dc6b0eSVu Pham 
92b8dc6b0eSVu Pham /**
93b8dc6b0eSVu Pham  * Look up LKey from given B-tree lookup table, store the last index and return
94b8dc6b0eSVu Pham  * searched LKey.
95b8dc6b0eSVu Pham  *
96b8dc6b0eSVu Pham  * @param bt
97b8dc6b0eSVu Pham  *   Pointer to B-tree structure.
98b8dc6b0eSVu Pham  * @param[out] idx
99b8dc6b0eSVu Pham  *   Pointer to index. Even on search failure, returns index where it stops
100b8dc6b0eSVu Pham  *   searching so that index can be used when inserting a new entry.
101b8dc6b0eSVu Pham  * @param addr
102b8dc6b0eSVu Pham  *   Search key.
103b8dc6b0eSVu Pham  *
104b8dc6b0eSVu Pham  * @return
105b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on no match.
106b8dc6b0eSVu Pham  */
107b8dc6b0eSVu Pham static uint32_t
108b8dc6b0eSVu Pham mr_btree_lookup(struct mlx5_mr_btree *bt, uint16_t *idx, uintptr_t addr)
109b8dc6b0eSVu Pham {
110b8dc6b0eSVu Pham 	struct mr_cache_entry *lkp_tbl;
111b8dc6b0eSVu Pham 	uint16_t n;
112b8dc6b0eSVu Pham 	uint16_t base = 0;
113b8dc6b0eSVu Pham 
114b8dc6b0eSVu Pham 	MLX5_ASSERT(bt != NULL);
115b8dc6b0eSVu Pham 	lkp_tbl = *bt->table;
116b8dc6b0eSVu Pham 	n = bt->len;
117b8dc6b0eSVu Pham 	/* First entry must be NULL for comparison. */
118b8dc6b0eSVu Pham 	MLX5_ASSERT(bt->len > 0 || (lkp_tbl[0].start == 0 &&
119b8dc6b0eSVu Pham 				    lkp_tbl[0].lkey == UINT32_MAX));
120b8dc6b0eSVu Pham 	/* Binary search. */
121b8dc6b0eSVu Pham 	do {
122b8dc6b0eSVu Pham 		register uint16_t delta = n >> 1;
123b8dc6b0eSVu Pham 
124b8dc6b0eSVu Pham 		if (addr < lkp_tbl[base + delta].start) {
125b8dc6b0eSVu Pham 			n = delta;
126b8dc6b0eSVu Pham 		} else {
127b8dc6b0eSVu Pham 			base += delta;
128b8dc6b0eSVu Pham 			n -= delta;
129b8dc6b0eSVu Pham 		}
130b8dc6b0eSVu Pham 	} while (n > 1);
131b8dc6b0eSVu Pham 	MLX5_ASSERT(addr >= lkp_tbl[base].start);
132b8dc6b0eSVu Pham 	*idx = base;
133b8dc6b0eSVu Pham 	if (addr < lkp_tbl[base].end)
134b8dc6b0eSVu Pham 		return lkp_tbl[base].lkey;
135b8dc6b0eSVu Pham 	/* Not found. */
136b8dc6b0eSVu Pham 	return UINT32_MAX;
137b8dc6b0eSVu Pham }
138b8dc6b0eSVu Pham 
139b8dc6b0eSVu Pham /**
140b8dc6b0eSVu Pham  * Insert an entry to B-tree lookup table.
141b8dc6b0eSVu Pham  *
142b8dc6b0eSVu Pham  * @param bt
143b8dc6b0eSVu Pham  *   Pointer to B-tree structure.
144b8dc6b0eSVu Pham  * @param entry
145b8dc6b0eSVu Pham  *   Pointer to new entry to insert.
146b8dc6b0eSVu Pham  *
147b8dc6b0eSVu Pham  * @return
148b8dc6b0eSVu Pham  *   0 on success, -1 on failure.
149b8dc6b0eSVu Pham  */
150b8dc6b0eSVu Pham static int
151b8dc6b0eSVu Pham mr_btree_insert(struct mlx5_mr_btree *bt, struct mr_cache_entry *entry)
152b8dc6b0eSVu Pham {
153b8dc6b0eSVu Pham 	struct mr_cache_entry *lkp_tbl;
154b8dc6b0eSVu Pham 	uint16_t idx = 0;
155b8dc6b0eSVu Pham 	size_t shift;
156b8dc6b0eSVu Pham 
157b8dc6b0eSVu Pham 	MLX5_ASSERT(bt != NULL);
158b8dc6b0eSVu Pham 	MLX5_ASSERT(bt->len <= bt->size);
159b8dc6b0eSVu Pham 	MLX5_ASSERT(bt->len > 0);
160b8dc6b0eSVu Pham 	lkp_tbl = *bt->table;
161b8dc6b0eSVu Pham 	/* Find out the slot for insertion. */
162b8dc6b0eSVu Pham 	if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) {
163b8dc6b0eSVu Pham 		DRV_LOG(DEBUG,
164b8dc6b0eSVu Pham 			"abort insertion to B-tree(%p): already exist at"
165b8dc6b0eSVu Pham 			" idx=%u [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
166b8dc6b0eSVu Pham 			(void *)bt, idx, entry->start, entry->end, entry->lkey);
167b8dc6b0eSVu Pham 		/* Already exist, return. */
168b8dc6b0eSVu Pham 		return 0;
169b8dc6b0eSVu Pham 	}
170b8dc6b0eSVu Pham 	/* If table is full, return error. */
171b8dc6b0eSVu Pham 	if (unlikely(bt->len == bt->size)) {
172b8dc6b0eSVu Pham 		bt->overflow = 1;
173b8dc6b0eSVu Pham 		return -1;
174b8dc6b0eSVu Pham 	}
175b8dc6b0eSVu Pham 	/* Insert entry. */
176b8dc6b0eSVu Pham 	++idx;
177b8dc6b0eSVu Pham 	shift = (bt->len - idx) * sizeof(struct mr_cache_entry);
178b8dc6b0eSVu Pham 	if (shift)
179b8dc6b0eSVu Pham 		memmove(&lkp_tbl[idx + 1], &lkp_tbl[idx], shift);
180b8dc6b0eSVu Pham 	lkp_tbl[idx] = *entry;
181b8dc6b0eSVu Pham 	bt->len++;
182b8dc6b0eSVu Pham 	DRV_LOG(DEBUG,
183b8dc6b0eSVu Pham 		"inserted B-tree(%p)[%u],"
184b8dc6b0eSVu Pham 		" [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
185b8dc6b0eSVu Pham 		(void *)bt, idx, entry->start, entry->end, entry->lkey);
186b8dc6b0eSVu Pham 	return 0;
187b8dc6b0eSVu Pham }
188b8dc6b0eSVu Pham 
189b8dc6b0eSVu Pham /**
190b8dc6b0eSVu Pham  * Initialize B-tree and allocate memory for lookup table.
191b8dc6b0eSVu Pham  *
192b8dc6b0eSVu Pham  * @param bt
193b8dc6b0eSVu Pham  *   Pointer to B-tree structure.
194b8dc6b0eSVu Pham  * @param n
195b8dc6b0eSVu Pham  *   Number of entries to allocate.
196b8dc6b0eSVu Pham  * @param socket
197b8dc6b0eSVu Pham  *   NUMA socket on which memory must be allocated.
198b8dc6b0eSVu Pham  *
199b8dc6b0eSVu Pham  * @return
200b8dc6b0eSVu Pham  *   0 on success, a negative errno value otherwise and rte_errno is set.
201b8dc6b0eSVu Pham  */
202b8dc6b0eSVu Pham int
203b8dc6b0eSVu Pham mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket)
204b8dc6b0eSVu Pham {
205b8dc6b0eSVu Pham 	if (bt == NULL) {
206b8dc6b0eSVu Pham 		rte_errno = EINVAL;
207b8dc6b0eSVu Pham 		return -rte_errno;
208b8dc6b0eSVu Pham 	}
209b8dc6b0eSVu Pham 	MLX5_ASSERT(!bt->table && !bt->size);
210b8dc6b0eSVu Pham 	memset(bt, 0, sizeof(*bt));
211fd970a54SSuanming Mou 	bt->table = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
212fd970a54SSuanming Mou 				sizeof(struct mr_cache_entry) * n,
213b8dc6b0eSVu Pham 				0, socket);
214b8dc6b0eSVu Pham 	if (bt->table == NULL) {
215b8dc6b0eSVu Pham 		rte_errno = ENOMEM;
21687acdcc7SThomas Monjalon 		DRV_LOG(DEBUG,
21787acdcc7SThomas Monjalon 			"failed to allocate memory for btree cache on socket "
21887acdcc7SThomas Monjalon 			"%d", socket);
219b8dc6b0eSVu Pham 		return -rte_errno;
220b8dc6b0eSVu Pham 	}
221b8dc6b0eSVu Pham 	bt->size = n;
222b8dc6b0eSVu Pham 	/* First entry must be NULL for binary search. */
223b8dc6b0eSVu Pham 	(*bt->table)[bt->len++] = (struct mr_cache_entry) {
224b8dc6b0eSVu Pham 		.lkey = UINT32_MAX,
225b8dc6b0eSVu Pham 	};
22687acdcc7SThomas Monjalon 	DRV_LOG(DEBUG, "initialized B-tree %p with table %p",
227b8dc6b0eSVu Pham 	      (void *)bt, (void *)bt->table);
228b8dc6b0eSVu Pham 	return 0;
229b8dc6b0eSVu Pham }
230b8dc6b0eSVu Pham 
231b8dc6b0eSVu Pham /**
232b8dc6b0eSVu Pham  * Free B-tree resources.
233b8dc6b0eSVu Pham  *
234b8dc6b0eSVu Pham  * @param bt
235b8dc6b0eSVu Pham  *   Pointer to B-tree structure.
236b8dc6b0eSVu Pham  */
237b8dc6b0eSVu Pham void
238b8dc6b0eSVu Pham mlx5_mr_btree_free(struct mlx5_mr_btree *bt)
239b8dc6b0eSVu Pham {
240b8dc6b0eSVu Pham 	if (bt == NULL)
241b8dc6b0eSVu Pham 		return;
24287acdcc7SThomas Monjalon 	DRV_LOG(DEBUG, "freeing B-tree %p with table %p",
243b8dc6b0eSVu Pham 	      (void *)bt, (void *)bt->table);
244fd970a54SSuanming Mou 	mlx5_free(bt->table);
245b8dc6b0eSVu Pham 	memset(bt, 0, sizeof(*bt));
246b8dc6b0eSVu Pham }
247b8dc6b0eSVu Pham 
248b8dc6b0eSVu Pham /**
249b8dc6b0eSVu Pham  * Dump all the entries in a B-tree
250b8dc6b0eSVu Pham  *
251b8dc6b0eSVu Pham  * @param bt
252b8dc6b0eSVu Pham  *   Pointer to B-tree structure.
253b8dc6b0eSVu Pham  */
254b8dc6b0eSVu Pham void
255b8dc6b0eSVu Pham mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused)
256b8dc6b0eSVu Pham {
257b8dc6b0eSVu Pham #ifdef RTE_LIBRTE_MLX5_DEBUG
258b8dc6b0eSVu Pham 	int idx;
259b8dc6b0eSVu Pham 	struct mr_cache_entry *lkp_tbl;
260b8dc6b0eSVu Pham 
261b8dc6b0eSVu Pham 	if (bt == NULL)
262b8dc6b0eSVu Pham 		return;
263b8dc6b0eSVu Pham 	lkp_tbl = *bt->table;
264b8dc6b0eSVu Pham 	for (idx = 0; idx < bt->len; ++idx) {
265b8dc6b0eSVu Pham 		struct mr_cache_entry *entry = &lkp_tbl[idx];
266b8dc6b0eSVu Pham 
26787acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "B-tree(%p)[%u],"
268b8dc6b0eSVu Pham 		      " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
269b8dc6b0eSVu Pham 		      (void *)bt, idx, entry->start, entry->end, entry->lkey);
270b8dc6b0eSVu Pham 	}
271b8dc6b0eSVu Pham #endif
272b8dc6b0eSVu Pham }
273b8dc6b0eSVu Pham 
274b8dc6b0eSVu Pham /**
275b8dc6b0eSVu Pham  * Find virtually contiguous memory chunk in a given MR.
276b8dc6b0eSVu Pham  *
277b8dc6b0eSVu Pham  * @param dev
278b8dc6b0eSVu Pham  *   Pointer to MR structure.
279b8dc6b0eSVu Pham  * @param[out] entry
280b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry. If not found, this will not be
281b8dc6b0eSVu Pham  *   updated.
282b8dc6b0eSVu Pham  * @param start_idx
283b8dc6b0eSVu Pham  *   Start index of the memseg bitmap.
284b8dc6b0eSVu Pham  *
285b8dc6b0eSVu Pham  * @return
286b8dc6b0eSVu Pham  *   Next index to go on lookup.
287b8dc6b0eSVu Pham  */
288b8dc6b0eSVu Pham static int
289b8dc6b0eSVu Pham mr_find_next_chunk(struct mlx5_mr *mr, struct mr_cache_entry *entry,
290b8dc6b0eSVu Pham 		   int base_idx)
291b8dc6b0eSVu Pham {
292b8dc6b0eSVu Pham 	uintptr_t start = 0;
293b8dc6b0eSVu Pham 	uintptr_t end = 0;
294b8dc6b0eSVu Pham 	uint32_t idx = 0;
295b8dc6b0eSVu Pham 
296b8dc6b0eSVu Pham 	/* MR for external memory doesn't have memseg list. */
297b8dc6b0eSVu Pham 	if (mr->msl == NULL) {
298b8dc6b0eSVu Pham 		MLX5_ASSERT(mr->ms_bmp_n == 1);
299b8dc6b0eSVu Pham 		MLX5_ASSERT(mr->ms_n == 1);
300b8dc6b0eSVu Pham 		MLX5_ASSERT(base_idx == 0);
301b8dc6b0eSVu Pham 		/*
302b8dc6b0eSVu Pham 		 * Can't search it from memseg list but get it directly from
30356d20677SOphir Munk 		 * pmd_mr as there's only one chunk.
304b8dc6b0eSVu Pham 		 */
30556d20677SOphir Munk 		entry->start = (uintptr_t)mr->pmd_mr.addr;
30656d20677SOphir Munk 		entry->end = (uintptr_t)mr->pmd_mr.addr + mr->pmd_mr.len;
30756d20677SOphir Munk 		entry->lkey = rte_cpu_to_be_32(mr->pmd_mr.lkey);
308b8dc6b0eSVu Pham 		/* Returning 1 ends iteration. */
309b8dc6b0eSVu Pham 		return 1;
310b8dc6b0eSVu Pham 	}
311b8dc6b0eSVu Pham 	for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) {
312b8dc6b0eSVu Pham 		if (rte_bitmap_get(mr->ms_bmp, idx)) {
313b8dc6b0eSVu Pham 			const struct rte_memseg_list *msl;
314b8dc6b0eSVu Pham 			const struct rte_memseg *ms;
315b8dc6b0eSVu Pham 
316b8dc6b0eSVu Pham 			msl = mr->msl;
317b8dc6b0eSVu Pham 			ms = rte_fbarray_get(&msl->memseg_arr,
318b8dc6b0eSVu Pham 					     mr->ms_base_idx + idx);
319b8dc6b0eSVu Pham 			MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
320b8dc6b0eSVu Pham 			if (!start)
321b8dc6b0eSVu Pham 				start = ms->addr_64;
322b8dc6b0eSVu Pham 			end = ms->addr_64 + ms->hugepage_sz;
323b8dc6b0eSVu Pham 		} else if (start) {
324b8dc6b0eSVu Pham 			/* Passed the end of a fragment. */
325b8dc6b0eSVu Pham 			break;
326b8dc6b0eSVu Pham 		}
327b8dc6b0eSVu Pham 	}
328b8dc6b0eSVu Pham 	if (start) {
329b8dc6b0eSVu Pham 		/* Found one chunk. */
330b8dc6b0eSVu Pham 		entry->start = start;
331b8dc6b0eSVu Pham 		entry->end = end;
33256d20677SOphir Munk 		entry->lkey = rte_cpu_to_be_32(mr->pmd_mr.lkey);
333b8dc6b0eSVu Pham 	}
334b8dc6b0eSVu Pham 	return idx;
335b8dc6b0eSVu Pham }
336b8dc6b0eSVu Pham 
337b8dc6b0eSVu Pham /**
338b8dc6b0eSVu Pham  * Insert a MR to the global B-tree cache. It may fail due to low-on-memory.
339b8dc6b0eSVu Pham  * Then, this entry will have to be searched by mr_lookup_list() in
340b8dc6b0eSVu Pham  * mlx5_mr_create() on miss.
341b8dc6b0eSVu Pham  *
342b8dc6b0eSVu Pham  * @param share_cache
343b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
344b8dc6b0eSVu Pham  * @param mr
345b8dc6b0eSVu Pham  *   Pointer to MR to insert.
346b8dc6b0eSVu Pham  *
347b8dc6b0eSVu Pham  * @return
348b8dc6b0eSVu Pham  *   0 on success, -1 on failure.
349b8dc6b0eSVu Pham  */
350b8dc6b0eSVu Pham int
351b8dc6b0eSVu Pham mlx5_mr_insert_cache(struct mlx5_mr_share_cache *share_cache,
352b8dc6b0eSVu Pham 		     struct mlx5_mr *mr)
353b8dc6b0eSVu Pham {
354b8dc6b0eSVu Pham 	unsigned int n;
355b8dc6b0eSVu Pham 
356b8dc6b0eSVu Pham 	DRV_LOG(DEBUG, "Inserting MR(%p) to global cache(%p)",
357b8dc6b0eSVu Pham 		(void *)mr, (void *)share_cache);
358b8dc6b0eSVu Pham 	for (n = 0; n < mr->ms_bmp_n; ) {
359b8dc6b0eSVu Pham 		struct mr_cache_entry entry;
360b8dc6b0eSVu Pham 
361b8dc6b0eSVu Pham 		memset(&entry, 0, sizeof(entry));
362b8dc6b0eSVu Pham 		/* Find a contiguous chunk and advance the index. */
363b8dc6b0eSVu Pham 		n = mr_find_next_chunk(mr, &entry, n);
364b8dc6b0eSVu Pham 		if (!entry.end)
365b8dc6b0eSVu Pham 			break;
366b8dc6b0eSVu Pham 		if (mr_btree_insert(&share_cache->cache, &entry) < 0) {
367b8dc6b0eSVu Pham 			/*
368b8dc6b0eSVu Pham 			 * Overflowed, but the global table cannot be expanded
369b8dc6b0eSVu Pham 			 * because of deadlock.
370b8dc6b0eSVu Pham 			 */
371b8dc6b0eSVu Pham 			return -1;
372b8dc6b0eSVu Pham 		}
373b8dc6b0eSVu Pham 	}
374b8dc6b0eSVu Pham 	return 0;
375b8dc6b0eSVu Pham }
376b8dc6b0eSVu Pham 
377b8dc6b0eSVu Pham /**
378b8dc6b0eSVu Pham  * Look up address in the original global MR list.
379b8dc6b0eSVu Pham  *
380b8dc6b0eSVu Pham  * @param share_cache
381b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
382b8dc6b0eSVu Pham  * @param[out] entry
383b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry. If no match, this will not be updated.
384b8dc6b0eSVu Pham  * @param addr
385b8dc6b0eSVu Pham  *   Search key.
386b8dc6b0eSVu Pham  *
387b8dc6b0eSVu Pham  * @return
388b8dc6b0eSVu Pham  *   Found MR on match, NULL otherwise.
389b8dc6b0eSVu Pham  */
390b8dc6b0eSVu Pham struct mlx5_mr *
391b8dc6b0eSVu Pham mlx5_mr_lookup_list(struct mlx5_mr_share_cache *share_cache,
392b8dc6b0eSVu Pham 		    struct mr_cache_entry *entry, uintptr_t addr)
393b8dc6b0eSVu Pham {
394b8dc6b0eSVu Pham 	struct mlx5_mr *mr;
395b8dc6b0eSVu Pham 
396b8dc6b0eSVu Pham 	/* Iterate all the existing MRs. */
397b8dc6b0eSVu Pham 	LIST_FOREACH(mr, &share_cache->mr_list, mr) {
398b8dc6b0eSVu Pham 		unsigned int n;
399b8dc6b0eSVu Pham 
400b8dc6b0eSVu Pham 		if (mr->ms_n == 0)
401b8dc6b0eSVu Pham 			continue;
402b8dc6b0eSVu Pham 		for (n = 0; n < mr->ms_bmp_n; ) {
403b8dc6b0eSVu Pham 			struct mr_cache_entry ret;
404b8dc6b0eSVu Pham 
405b8dc6b0eSVu Pham 			memset(&ret, 0, sizeof(ret));
406b8dc6b0eSVu Pham 			n = mr_find_next_chunk(mr, &ret, n);
407b8dc6b0eSVu Pham 			if (addr >= ret.start && addr < ret.end) {
408b8dc6b0eSVu Pham 				/* Found. */
409b8dc6b0eSVu Pham 				*entry = ret;
410b8dc6b0eSVu Pham 				return mr;
411b8dc6b0eSVu Pham 			}
412b8dc6b0eSVu Pham 		}
413b8dc6b0eSVu Pham 	}
414b8dc6b0eSVu Pham 	return NULL;
415b8dc6b0eSVu Pham }
416b8dc6b0eSVu Pham 
417b8dc6b0eSVu Pham /**
418b8dc6b0eSVu Pham  * Look up address on global MR cache.
419b8dc6b0eSVu Pham  *
420b8dc6b0eSVu Pham  * @param share_cache
421b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
422b8dc6b0eSVu Pham  * @param[out] entry
423b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry. If no match, this will not be updated.
424b8dc6b0eSVu Pham  * @param addr
425b8dc6b0eSVu Pham  *   Search key.
426b8dc6b0eSVu Pham  *
427b8dc6b0eSVu Pham  * @return
428b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
429b8dc6b0eSVu Pham  */
430b8dc6b0eSVu Pham uint32_t
431b8dc6b0eSVu Pham mlx5_mr_lookup_cache(struct mlx5_mr_share_cache *share_cache,
432b8dc6b0eSVu Pham 		     struct mr_cache_entry *entry, uintptr_t addr)
433b8dc6b0eSVu Pham {
434b8dc6b0eSVu Pham 	uint16_t idx;
435b8dc6b0eSVu Pham 	uint32_t lkey = UINT32_MAX;
436b8dc6b0eSVu Pham 	struct mlx5_mr *mr;
437b8dc6b0eSVu Pham 
438b8dc6b0eSVu Pham 	/*
439b8dc6b0eSVu Pham 	 * If the global cache has overflowed since it failed to expand the
440b8dc6b0eSVu Pham 	 * B-tree table, it can't have all the existing MRs. Then, the address
441b8dc6b0eSVu Pham 	 * has to be searched by traversing the original MR list instead, which
442b8dc6b0eSVu Pham 	 * is very slow path. Otherwise, the global cache is all inclusive.
443b8dc6b0eSVu Pham 	 */
444b8dc6b0eSVu Pham 	if (!unlikely(share_cache->cache.overflow)) {
445b8dc6b0eSVu Pham 		lkey = mr_btree_lookup(&share_cache->cache, &idx, addr);
446b8dc6b0eSVu Pham 		if (lkey != UINT32_MAX)
447b8dc6b0eSVu Pham 			*entry = (*share_cache->cache.table)[idx];
448b8dc6b0eSVu Pham 	} else {
449b8dc6b0eSVu Pham 		/* Falling back to the slowest path. */
450b8dc6b0eSVu Pham 		mr = mlx5_mr_lookup_list(share_cache, entry, addr);
451b8dc6b0eSVu Pham 		if (mr != NULL)
452b8dc6b0eSVu Pham 			lkey = entry->lkey;
453b8dc6b0eSVu Pham 	}
454b8dc6b0eSVu Pham 	MLX5_ASSERT(lkey == UINT32_MAX || (addr >= entry->start &&
455b8dc6b0eSVu Pham 					   addr < entry->end));
456b8dc6b0eSVu Pham 	return lkey;
457b8dc6b0eSVu Pham }
458b8dc6b0eSVu Pham 
459b8dc6b0eSVu Pham /**
460b8dc6b0eSVu Pham  * Free MR resources. MR lock must not be held to avoid a deadlock. rte_free()
461b8dc6b0eSVu Pham  * can raise memory free event and the callback function will spin on the lock.
462b8dc6b0eSVu Pham  *
463b8dc6b0eSVu Pham  * @param mr
464b8dc6b0eSVu Pham  *   Pointer to MR to free.
465b8dc6b0eSVu Pham  */
466992e6df3SJiawei Wang void
467992e6df3SJiawei Wang mlx5_mr_free(struct mlx5_mr *mr, mlx5_dereg_mr_t dereg_mr_cb)
468b8dc6b0eSVu Pham {
469b8dc6b0eSVu Pham 	if (mr == NULL)
470b8dc6b0eSVu Pham 		return;
471b8dc6b0eSVu Pham 	DRV_LOG(DEBUG, "freeing MR(%p):", (void *)mr);
472d5ed8aa9SOphir Munk 	dereg_mr_cb(&mr->pmd_mr);
473b8dc6b0eSVu Pham 	if (mr->ms_bmp != NULL)
474b8dc6b0eSVu Pham 		rte_bitmap_free(mr->ms_bmp);
475fd970a54SSuanming Mou 	mlx5_free(mr);
476b8dc6b0eSVu Pham }
477b8dc6b0eSVu Pham 
478b8dc6b0eSVu Pham void
479b8dc6b0eSVu Pham mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache)
480b8dc6b0eSVu Pham {
481b8dc6b0eSVu Pham 	struct mlx5_mr *mr;
482b8dc6b0eSVu Pham 
483b8dc6b0eSVu Pham 	DRV_LOG(DEBUG, "Rebuild dev cache[] %p", (void *)share_cache);
484b8dc6b0eSVu Pham 	/* Flush cache to rebuild. */
485b8dc6b0eSVu Pham 	share_cache->cache.len = 1;
486b8dc6b0eSVu Pham 	share_cache->cache.overflow = 0;
487b8dc6b0eSVu Pham 	/* Iterate all the existing MRs. */
488b8dc6b0eSVu Pham 	LIST_FOREACH(mr, &share_cache->mr_list, mr)
489b8dc6b0eSVu Pham 		if (mlx5_mr_insert_cache(share_cache, mr) < 0)
490b8dc6b0eSVu Pham 			return;
491b8dc6b0eSVu Pham }
492b8dc6b0eSVu Pham 
493b8dc6b0eSVu Pham /**
494b8dc6b0eSVu Pham  * Release resources of detached MR having no online entry.
495b8dc6b0eSVu Pham  *
496b8dc6b0eSVu Pham  * @param share_cache
497b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
498b8dc6b0eSVu Pham  */
499b8dc6b0eSVu Pham static void
500b8dc6b0eSVu Pham mlx5_mr_garbage_collect(struct mlx5_mr_share_cache *share_cache)
501b8dc6b0eSVu Pham {
502b8dc6b0eSVu Pham 	struct mlx5_mr *mr_next;
503b8dc6b0eSVu Pham 	struct mlx5_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
504b8dc6b0eSVu Pham 
505b8dc6b0eSVu Pham 	/* Must be called from the primary process. */
506b8dc6b0eSVu Pham 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
507b8dc6b0eSVu Pham 	/*
508b8dc6b0eSVu Pham 	 * MR can't be freed with holding the lock because rte_free() could call
509b8dc6b0eSVu Pham 	 * memory free callback function. This will be a deadlock situation.
510b8dc6b0eSVu Pham 	 */
511b8dc6b0eSVu Pham 	rte_rwlock_write_lock(&share_cache->rwlock);
512b8dc6b0eSVu Pham 	/* Detach the whole free list and release it after unlocking. */
513b8dc6b0eSVu Pham 	free_list = share_cache->mr_free_list;
514b8dc6b0eSVu Pham 	LIST_INIT(&share_cache->mr_free_list);
515b8dc6b0eSVu Pham 	rte_rwlock_write_unlock(&share_cache->rwlock);
516b8dc6b0eSVu Pham 	/* Release resources. */
517b8dc6b0eSVu Pham 	mr_next = LIST_FIRST(&free_list);
518b8dc6b0eSVu Pham 	while (mr_next != NULL) {
519b8dc6b0eSVu Pham 		struct mlx5_mr *mr = mr_next;
520b8dc6b0eSVu Pham 
521b8dc6b0eSVu Pham 		mr_next = LIST_NEXT(mr, mr);
522992e6df3SJiawei Wang 		mlx5_mr_free(mr, share_cache->dereg_mr_cb);
523b8dc6b0eSVu Pham 	}
524b8dc6b0eSVu Pham }
525b8dc6b0eSVu Pham 
526b8dc6b0eSVu Pham /* Called during rte_memseg_contig_walk() by mlx5_mr_create(). */
527b8dc6b0eSVu Pham static int
528b8dc6b0eSVu Pham mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl,
529b8dc6b0eSVu Pham 			  const struct rte_memseg *ms, size_t len, void *arg)
530b8dc6b0eSVu Pham {
531b8dc6b0eSVu Pham 	struct mr_find_contig_memsegs_data *data = arg;
532b8dc6b0eSVu Pham 
533b8dc6b0eSVu Pham 	if (data->addr < ms->addr_64 || data->addr >= ms->addr_64 + len)
534b8dc6b0eSVu Pham 		return 0;
535b8dc6b0eSVu Pham 	/* Found, save it and stop walking. */
536b8dc6b0eSVu Pham 	data->start = ms->addr_64;
537b8dc6b0eSVu Pham 	data->end = ms->addr_64 + len;
538b8dc6b0eSVu Pham 	data->msl = msl;
539b8dc6b0eSVu Pham 	return 1;
540b8dc6b0eSVu Pham }
541b8dc6b0eSVu Pham 
542b8dc6b0eSVu Pham /**
543b8dc6b0eSVu Pham  * Create a new global Memory Region (MR) for a missing virtual address.
544b8dc6b0eSVu Pham  * This API should be called on a secondary process, then a request is sent to
545b8dc6b0eSVu Pham  * the primary process in order to create a MR for the address. As the global MR
546b8dc6b0eSVu Pham  * list is on the shared memory, following LKey lookup should succeed unless the
547b8dc6b0eSVu Pham  * request fails.
548b8dc6b0eSVu Pham  *
549b8dc6b0eSVu Pham  * @param pd
550c4685016SOphir Munk  *   Pointer to pd of a device (net, regex, vdpa,...).
551b8dc6b0eSVu Pham  * @param share_cache
552b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
553b8dc6b0eSVu Pham  * @param[out] entry
554b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry, found in the global cache or newly
555b8dc6b0eSVu Pham  *   created. If failed to create one, this will not be updated.
556b8dc6b0eSVu Pham  * @param addr
557b8dc6b0eSVu Pham  *   Target virtual address to register.
558b8dc6b0eSVu Pham  * @param mr_ext_memseg_en
559b8dc6b0eSVu Pham  *   Configurable flag about external memory segment enable or not.
560b8dc6b0eSVu Pham  *
561b8dc6b0eSVu Pham  * @return
562b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
563b8dc6b0eSVu Pham  */
564b8dc6b0eSVu Pham static uint32_t
565c4685016SOphir Munk mlx5_mr_create_secondary(void *pd __rte_unused,
566b8dc6b0eSVu Pham 			 struct mlx5_mp_id *mp_id,
567b8dc6b0eSVu Pham 			 struct mlx5_mr_share_cache *share_cache,
568b8dc6b0eSVu Pham 			 struct mr_cache_entry *entry, uintptr_t addr,
569b8dc6b0eSVu Pham 			 unsigned int mr_ext_memseg_en __rte_unused)
570b8dc6b0eSVu Pham {
571b8dc6b0eSVu Pham 	int ret;
572b8dc6b0eSVu Pham 
57387acdcc7SThomas Monjalon 	DRV_LOG(DEBUG, "port %u requesting MR creation for address (%p)",
574b8dc6b0eSVu Pham 	      mp_id->port_id, (void *)addr);
575b8dc6b0eSVu Pham 	ret = mlx5_mp_req_mr_create(mp_id, addr);
576b8dc6b0eSVu Pham 	if (ret) {
57787acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "Fail to request MR creation for address (%p)",
578b8dc6b0eSVu Pham 		      (void *)addr);
579b8dc6b0eSVu Pham 		return UINT32_MAX;
580b8dc6b0eSVu Pham 	}
581b8dc6b0eSVu Pham 	rte_rwlock_read_lock(&share_cache->rwlock);
582b8dc6b0eSVu Pham 	/* Fill in output data. */
583b8dc6b0eSVu Pham 	mlx5_mr_lookup_cache(share_cache, entry, addr);
584b8dc6b0eSVu Pham 	/* Lookup can't fail. */
585b8dc6b0eSVu Pham 	MLX5_ASSERT(entry->lkey != UINT32_MAX);
586b8dc6b0eSVu Pham 	rte_rwlock_read_unlock(&share_cache->rwlock);
58787acdcc7SThomas Monjalon 	DRV_LOG(DEBUG, "MR CREATED by primary process for %p:\n"
588b8dc6b0eSVu Pham 	      "  [0x%" PRIxPTR ", 0x%" PRIxPTR "), lkey=0x%x",
589b8dc6b0eSVu Pham 	      (void *)addr, entry->start, entry->end, entry->lkey);
590b8dc6b0eSVu Pham 	return entry->lkey;
591b8dc6b0eSVu Pham }
592b8dc6b0eSVu Pham 
593b8dc6b0eSVu Pham /**
594b8dc6b0eSVu Pham  * Create a new global Memory Region (MR) for a missing virtual address.
595b8dc6b0eSVu Pham  * Register entire virtually contiguous memory chunk around the address.
596b8dc6b0eSVu Pham  *
597b8dc6b0eSVu Pham  * @param pd
598c4685016SOphir Munk  *   Pointer to pd of a device (net, regex, vdpa,...).
599b8dc6b0eSVu Pham  * @param share_cache
600b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
601b8dc6b0eSVu Pham  * @param[out] entry
602b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry, found in the global cache or newly
603b8dc6b0eSVu Pham  *   created. If failed to create one, this will not be updated.
604b8dc6b0eSVu Pham  * @param addr
605b8dc6b0eSVu Pham  *   Target virtual address to register.
606b8dc6b0eSVu Pham  * @param mr_ext_memseg_en
607b8dc6b0eSVu Pham  *   Configurable flag about external memory segment enable or not.
608b8dc6b0eSVu Pham  *
609b8dc6b0eSVu Pham  * @return
610b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
611b8dc6b0eSVu Pham  */
612b8dc6b0eSVu Pham uint32_t
613c4685016SOphir Munk mlx5_mr_create_primary(void *pd,
614b8dc6b0eSVu Pham 		       struct mlx5_mr_share_cache *share_cache,
615b8dc6b0eSVu Pham 		       struct mr_cache_entry *entry, uintptr_t addr,
616b8dc6b0eSVu Pham 		       unsigned int mr_ext_memseg_en)
617b8dc6b0eSVu Pham {
618b8dc6b0eSVu Pham 	struct mr_find_contig_memsegs_data data = {.addr = addr, };
619b8dc6b0eSVu Pham 	struct mr_find_contig_memsegs_data data_re;
620b8dc6b0eSVu Pham 	const struct rte_memseg_list *msl;
621b8dc6b0eSVu Pham 	const struct rte_memseg *ms;
622b8dc6b0eSVu Pham 	struct mlx5_mr *mr = NULL;
623b8dc6b0eSVu Pham 	int ms_idx_shift = -1;
624b8dc6b0eSVu Pham 	uint32_t bmp_size;
625b8dc6b0eSVu Pham 	void *bmp_mem;
626b8dc6b0eSVu Pham 	uint32_t ms_n;
627b8dc6b0eSVu Pham 	uint32_t n;
628b8dc6b0eSVu Pham 	size_t len;
629b8dc6b0eSVu Pham 
630b8dc6b0eSVu Pham 	DRV_LOG(DEBUG, "Creating a MR using address (%p)", (void *)addr);
631b8dc6b0eSVu Pham 	/*
632b8dc6b0eSVu Pham 	 * Release detached MRs if any. This can't be called with holding either
633b8dc6b0eSVu Pham 	 * memory_hotplug_lock or share_cache->rwlock. MRs on the free list have
634b8dc6b0eSVu Pham 	 * been detached by the memory free event but it couldn't be released
635b8dc6b0eSVu Pham 	 * inside the callback due to deadlock. As a result, releasing resources
636b8dc6b0eSVu Pham 	 * is quite opportunistic.
637b8dc6b0eSVu Pham 	 */
638b8dc6b0eSVu Pham 	mlx5_mr_garbage_collect(share_cache);
639b8dc6b0eSVu Pham 	/*
640b8dc6b0eSVu Pham 	 * If enabled, find out a contiguous virtual address chunk in use, to
641b8dc6b0eSVu Pham 	 * which the given address belongs, in order to register maximum range.
642b8dc6b0eSVu Pham 	 * In the best case where mempools are not dynamically recreated and
643b8dc6b0eSVu Pham 	 * '--socket-mem' is specified as an EAL option, it is very likely to
644b8dc6b0eSVu Pham 	 * have only one MR(LKey) per a socket and per a hugepage-size even
645b8dc6b0eSVu Pham 	 * though the system memory is highly fragmented. As the whole memory
646b8dc6b0eSVu Pham 	 * chunk will be pinned by kernel, it can't be reused unless entire
647b8dc6b0eSVu Pham 	 * chunk is freed from EAL.
648b8dc6b0eSVu Pham 	 *
649b8dc6b0eSVu Pham 	 * If disabled, just register one memseg (page). Then, memory
650b8dc6b0eSVu Pham 	 * consumption will be minimized but it may drop performance if there
651b8dc6b0eSVu Pham 	 * are many MRs to lookup on the datapath.
652b8dc6b0eSVu Pham 	 */
653b8dc6b0eSVu Pham 	if (!mr_ext_memseg_en) {
654b8dc6b0eSVu Pham 		data.msl = rte_mem_virt2memseg_list((void *)addr);
655b8dc6b0eSVu Pham 		data.start = RTE_ALIGN_FLOOR(addr, data.msl->page_sz);
656b8dc6b0eSVu Pham 		data.end = data.start + data.msl->page_sz;
657b8dc6b0eSVu Pham 	} else if (!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data)) {
658b8dc6b0eSVu Pham 		DRV_LOG(WARNING,
659b8dc6b0eSVu Pham 			"Unable to find virtually contiguous"
660b8dc6b0eSVu Pham 			" chunk for address (%p)."
661b8dc6b0eSVu Pham 			" rte_memseg_contig_walk() failed.", (void *)addr);
662b8dc6b0eSVu Pham 		rte_errno = ENXIO;
663b8dc6b0eSVu Pham 		goto err_nolock;
664b8dc6b0eSVu Pham 	}
665b8dc6b0eSVu Pham alloc_resources:
666b8dc6b0eSVu Pham 	/* Addresses must be page-aligned. */
667b8dc6b0eSVu Pham 	MLX5_ASSERT(data.msl);
668b8dc6b0eSVu Pham 	MLX5_ASSERT(rte_is_aligned((void *)data.start, data.msl->page_sz));
669b8dc6b0eSVu Pham 	MLX5_ASSERT(rte_is_aligned((void *)data.end, data.msl->page_sz));
670b8dc6b0eSVu Pham 	msl = data.msl;
671b8dc6b0eSVu Pham 	ms = rte_mem_virt2memseg((void *)data.start, msl);
672b8dc6b0eSVu Pham 	len = data.end - data.start;
673b8dc6b0eSVu Pham 	MLX5_ASSERT(ms);
674b8dc6b0eSVu Pham 	MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
675b8dc6b0eSVu Pham 	/* Number of memsegs in the range. */
676b8dc6b0eSVu Pham 	ms_n = len / msl->page_sz;
67787acdcc7SThomas Monjalon 	DRV_LOG(DEBUG, "Extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
678b8dc6b0eSVu Pham 	      " page_sz=0x%" PRIx64 ", ms_n=%u",
679b8dc6b0eSVu Pham 	      (void *)addr, data.start, data.end, msl->page_sz, ms_n);
680b8dc6b0eSVu Pham 	/* Size of memory for bitmap. */
681b8dc6b0eSVu Pham 	bmp_size = rte_bitmap_get_memory_footprint(ms_n);
682fd970a54SSuanming Mou 	mr = mlx5_malloc(MLX5_MEM_RTE |  MLX5_MEM_ZERO,
683fd970a54SSuanming Mou 			 RTE_ALIGN_CEIL(sizeof(*mr), RTE_CACHE_LINE_SIZE) +
684fd970a54SSuanming Mou 			 bmp_size, RTE_CACHE_LINE_SIZE, msl->socket_id);
685b8dc6b0eSVu Pham 	if (mr == NULL) {
68687acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "Unable to allocate memory for a new MR of"
687b8dc6b0eSVu Pham 		      " address (%p).", (void *)addr);
688b8dc6b0eSVu Pham 		rte_errno = ENOMEM;
689b8dc6b0eSVu Pham 		goto err_nolock;
690b8dc6b0eSVu Pham 	}
691b8dc6b0eSVu Pham 	mr->msl = msl;
692b8dc6b0eSVu Pham 	/*
693b8dc6b0eSVu Pham 	 * Save the index of the first memseg and initialize memseg bitmap. To
694b8dc6b0eSVu Pham 	 * see if a memseg of ms_idx in the memseg-list is still valid, check:
695b8dc6b0eSVu Pham 	 *	rte_bitmap_get(mr->bmp, ms_idx - mr->ms_base_idx)
696b8dc6b0eSVu Pham 	 */
697b8dc6b0eSVu Pham 	mr->ms_base_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
698b8dc6b0eSVu Pham 	bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE);
699b8dc6b0eSVu Pham 	mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size);
700b8dc6b0eSVu Pham 	if (mr->ms_bmp == NULL) {
70187acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "Unable to initialize bitmap for a new MR of"
702b8dc6b0eSVu Pham 		      " address (%p).", (void *)addr);
703b8dc6b0eSVu Pham 		rte_errno = EINVAL;
704b8dc6b0eSVu Pham 		goto err_nolock;
705b8dc6b0eSVu Pham 	}
706b8dc6b0eSVu Pham 	/*
707b8dc6b0eSVu Pham 	 * Should recheck whether the extended contiguous chunk is still valid.
708b8dc6b0eSVu Pham 	 * Because memory_hotplug_lock can't be held if there's any memory
709b8dc6b0eSVu Pham 	 * related calls in a critical path, resource allocation above can't be
710b8dc6b0eSVu Pham 	 * locked. If the memory has been changed at this point, try again with
711b8dc6b0eSVu Pham 	 * just single page. If not, go on with the big chunk atomically from
712b8dc6b0eSVu Pham 	 * here.
713b8dc6b0eSVu Pham 	 */
714b8dc6b0eSVu Pham 	rte_mcfg_mem_read_lock();
715b8dc6b0eSVu Pham 	data_re = data;
716b8dc6b0eSVu Pham 	if (len > msl->page_sz &&
717b8dc6b0eSVu Pham 	    !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
71887acdcc7SThomas Monjalon 		DRV_LOG(DEBUG,
71987acdcc7SThomas Monjalon 			"Unable to find virtually contiguous chunk for address "
72087acdcc7SThomas Monjalon 			"(%p). rte_memseg_contig_walk() failed.", (void *)addr);
721b8dc6b0eSVu Pham 		rte_errno = ENXIO;
722b8dc6b0eSVu Pham 		goto err_memlock;
723b8dc6b0eSVu Pham 	}
724b8dc6b0eSVu Pham 	if (data.start != data_re.start || data.end != data_re.end) {
725b8dc6b0eSVu Pham 		/*
726b8dc6b0eSVu Pham 		 * The extended contiguous chunk has been changed. Try again
727b8dc6b0eSVu Pham 		 * with single memseg instead.
728b8dc6b0eSVu Pham 		 */
729b8dc6b0eSVu Pham 		data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
730b8dc6b0eSVu Pham 		data.end = data.start + msl->page_sz;
731b8dc6b0eSVu Pham 		rte_mcfg_mem_read_unlock();
732992e6df3SJiawei Wang 		mlx5_mr_free(mr, share_cache->dereg_mr_cb);
733b8dc6b0eSVu Pham 		goto alloc_resources;
734b8dc6b0eSVu Pham 	}
735b8dc6b0eSVu Pham 	MLX5_ASSERT(data.msl == data_re.msl);
736b8dc6b0eSVu Pham 	rte_rwlock_write_lock(&share_cache->rwlock);
737b8dc6b0eSVu Pham 	/*
738b8dc6b0eSVu Pham 	 * Check the address is really missing. If other thread already created
739b8dc6b0eSVu Pham 	 * one or it is not found due to overflow, abort and return.
740b8dc6b0eSVu Pham 	 */
741b8dc6b0eSVu Pham 	if (mlx5_mr_lookup_cache(share_cache, entry, addr) != UINT32_MAX) {
742b8dc6b0eSVu Pham 		/*
743b8dc6b0eSVu Pham 		 * Insert to the global cache table. It may fail due to
744b8dc6b0eSVu Pham 		 * low-on-memory. Then, this entry will have to be searched
745b8dc6b0eSVu Pham 		 * here again.
746b8dc6b0eSVu Pham 		 */
747b8dc6b0eSVu Pham 		mr_btree_insert(&share_cache->cache, entry);
74887acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "Found MR for %p on final lookup, abort",
74987acdcc7SThomas Monjalon 			(void *)addr);
750b8dc6b0eSVu Pham 		rte_rwlock_write_unlock(&share_cache->rwlock);
751b8dc6b0eSVu Pham 		rte_mcfg_mem_read_unlock();
752b8dc6b0eSVu Pham 		/*
753b8dc6b0eSVu Pham 		 * Must be unlocked before calling rte_free() because
754b8dc6b0eSVu Pham 		 * mlx5_mr_mem_event_free_cb() can be called inside.
755b8dc6b0eSVu Pham 		 */
756992e6df3SJiawei Wang 		mlx5_mr_free(mr, share_cache->dereg_mr_cb);
757b8dc6b0eSVu Pham 		return entry->lkey;
758b8dc6b0eSVu Pham 	}
759b8dc6b0eSVu Pham 	/*
760b8dc6b0eSVu Pham 	 * Trim start and end addresses for verbs MR. Set bits for registering
761b8dc6b0eSVu Pham 	 * memsegs but exclude already registered ones. Bitmap can be
762b8dc6b0eSVu Pham 	 * fragmented.
763b8dc6b0eSVu Pham 	 */
764b8dc6b0eSVu Pham 	for (n = 0; n < ms_n; ++n) {
765b8dc6b0eSVu Pham 		uintptr_t start;
766b8dc6b0eSVu Pham 		struct mr_cache_entry ret;
767b8dc6b0eSVu Pham 
768b8dc6b0eSVu Pham 		memset(&ret, 0, sizeof(ret));
769b8dc6b0eSVu Pham 		start = data_re.start + n * msl->page_sz;
770b8dc6b0eSVu Pham 		/* Exclude memsegs already registered by other MRs. */
771b8dc6b0eSVu Pham 		if (mlx5_mr_lookup_cache(share_cache, &ret, start) ==
772b8dc6b0eSVu Pham 		    UINT32_MAX) {
773b8dc6b0eSVu Pham 			/*
774b8dc6b0eSVu Pham 			 * Start from the first unregistered memseg in the
775b8dc6b0eSVu Pham 			 * extended range.
776b8dc6b0eSVu Pham 			 */
777b8dc6b0eSVu Pham 			if (ms_idx_shift == -1) {
778b8dc6b0eSVu Pham 				mr->ms_base_idx += n;
779b8dc6b0eSVu Pham 				data.start = start;
780b8dc6b0eSVu Pham 				ms_idx_shift = n;
781b8dc6b0eSVu Pham 			}
782b8dc6b0eSVu Pham 			data.end = start + msl->page_sz;
783b8dc6b0eSVu Pham 			rte_bitmap_set(mr->ms_bmp, n - ms_idx_shift);
784b8dc6b0eSVu Pham 			++mr->ms_n;
785b8dc6b0eSVu Pham 		}
786b8dc6b0eSVu Pham 	}
787b8dc6b0eSVu Pham 	len = data.end - data.start;
788b8dc6b0eSVu Pham 	mr->ms_bmp_n = len / msl->page_sz;
789b8dc6b0eSVu Pham 	MLX5_ASSERT(ms_idx_shift + mr->ms_bmp_n <= ms_n);
790b8dc6b0eSVu Pham 	/*
791d5ed8aa9SOphir Munk 	 * Finally create an MR for the memory chunk. Verbs: ibv_reg_mr() can
792d5ed8aa9SOphir Munk 	 * be called with holding the memory lock because it doesn't use
793b8dc6b0eSVu Pham 	 * mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket()
794b8dc6b0eSVu Pham 	 * through mlx5_alloc_verbs_buf().
795b8dc6b0eSVu Pham 	 */
796d5ed8aa9SOphir Munk 	share_cache->reg_mr_cb(pd, (void *)data.start, len, &mr->pmd_mr);
79758a17853SOphir Munk 	if (mr->pmd_mr.obj == NULL) {
79887acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "Fail to create an MR for address (%p)",
799b8dc6b0eSVu Pham 		      (void *)addr);
800b8dc6b0eSVu Pham 		rte_errno = EINVAL;
801b8dc6b0eSVu Pham 		goto err_mrlock;
802b8dc6b0eSVu Pham 	}
80356d20677SOphir Munk 	MLX5_ASSERT((uintptr_t)mr->pmd_mr.addr == data.start);
80456d20677SOphir Munk 	MLX5_ASSERT(mr->pmd_mr.len);
805b8dc6b0eSVu Pham 	LIST_INSERT_HEAD(&share_cache->mr_list, mr, mr);
80687acdcc7SThomas Monjalon 	DRV_LOG(DEBUG, "MR CREATED (%p) for %p:\n"
807b8dc6b0eSVu Pham 	      "  [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
808b8dc6b0eSVu Pham 	      " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
809b8dc6b0eSVu Pham 	      (void *)mr, (void *)addr, data.start, data.end,
81056d20677SOphir Munk 	      rte_cpu_to_be_32(mr->pmd_mr.lkey),
811b8dc6b0eSVu Pham 	      mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
812b8dc6b0eSVu Pham 	/* Insert to the global cache table. */
813b8dc6b0eSVu Pham 	mlx5_mr_insert_cache(share_cache, mr);
814b8dc6b0eSVu Pham 	/* Fill in output data. */
815b8dc6b0eSVu Pham 	mlx5_mr_lookup_cache(share_cache, entry, addr);
816b8dc6b0eSVu Pham 	/* Lookup can't fail. */
817b8dc6b0eSVu Pham 	MLX5_ASSERT(entry->lkey != UINT32_MAX);
818b8dc6b0eSVu Pham 	rte_rwlock_write_unlock(&share_cache->rwlock);
819b8dc6b0eSVu Pham 	rte_mcfg_mem_read_unlock();
820b8dc6b0eSVu Pham 	return entry->lkey;
821b8dc6b0eSVu Pham err_mrlock:
822b8dc6b0eSVu Pham 	rte_rwlock_write_unlock(&share_cache->rwlock);
823b8dc6b0eSVu Pham err_memlock:
824b8dc6b0eSVu Pham 	rte_mcfg_mem_read_unlock();
825b8dc6b0eSVu Pham err_nolock:
826b8dc6b0eSVu Pham 	/*
827b8dc6b0eSVu Pham 	 * In case of error, as this can be called in a datapath, a warning
828b8dc6b0eSVu Pham 	 * message per an error is preferable instead. Must be unlocked before
829b8dc6b0eSVu Pham 	 * calling rte_free() because mlx5_mr_mem_event_free_cb() can be called
830b8dc6b0eSVu Pham 	 * inside.
831b8dc6b0eSVu Pham 	 */
832992e6df3SJiawei Wang 	mlx5_mr_free(mr, share_cache->dereg_mr_cb);
833b8dc6b0eSVu Pham 	return UINT32_MAX;
834b8dc6b0eSVu Pham }
835b8dc6b0eSVu Pham 
836b8dc6b0eSVu Pham /**
837b8dc6b0eSVu Pham  * Create a new global Memory Region (MR) for a missing virtual address.
838b8dc6b0eSVu Pham  * This can be called from primary and secondary process.
839b8dc6b0eSVu Pham  *
840b8dc6b0eSVu Pham  * @param pd
841c4685016SOphir Munk  *   Pointer to pd handle of a device (net, regex, vdpa,...).
842b8dc6b0eSVu Pham  * @param share_cache
843b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
844b8dc6b0eSVu Pham  * @param[out] entry
845b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry, found in the global cache or newly
846b8dc6b0eSVu Pham  *   created. If failed to create one, this will not be updated.
847b8dc6b0eSVu Pham  * @param addr
848b8dc6b0eSVu Pham  *   Target virtual address to register.
849b8dc6b0eSVu Pham  *
850b8dc6b0eSVu Pham  * @return
851b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
852b8dc6b0eSVu Pham  */
853b8dc6b0eSVu Pham static uint32_t
854c4685016SOphir Munk mlx5_mr_create(void *pd, struct mlx5_mp_id *mp_id,
855b8dc6b0eSVu Pham 	       struct mlx5_mr_share_cache *share_cache,
856b8dc6b0eSVu Pham 	       struct mr_cache_entry *entry, uintptr_t addr,
857b8dc6b0eSVu Pham 	       unsigned int mr_ext_memseg_en)
858b8dc6b0eSVu Pham {
859b8dc6b0eSVu Pham 	uint32_t ret = 0;
860b8dc6b0eSVu Pham 
861b8dc6b0eSVu Pham 	switch (rte_eal_process_type()) {
862b8dc6b0eSVu Pham 	case RTE_PROC_PRIMARY:
863b8dc6b0eSVu Pham 		ret = mlx5_mr_create_primary(pd, share_cache, entry,
864b8dc6b0eSVu Pham 					     addr, mr_ext_memseg_en);
865b8dc6b0eSVu Pham 		break;
866b8dc6b0eSVu Pham 	case RTE_PROC_SECONDARY:
867b8dc6b0eSVu Pham 		ret = mlx5_mr_create_secondary(pd, mp_id, share_cache, entry,
868b8dc6b0eSVu Pham 					       addr, mr_ext_memseg_en);
869b8dc6b0eSVu Pham 		break;
870b8dc6b0eSVu Pham 	default:
871b8dc6b0eSVu Pham 		break;
872b8dc6b0eSVu Pham 	}
873b8dc6b0eSVu Pham 	return ret;
874b8dc6b0eSVu Pham }
875b8dc6b0eSVu Pham 
876b8dc6b0eSVu Pham /**
877b8dc6b0eSVu Pham  * Look up address in the global MR cache table. If not found, create a new MR.
878b8dc6b0eSVu Pham  * Insert the found/created entry to local bottom-half cache table.
879b8dc6b0eSVu Pham  *
880b8dc6b0eSVu Pham  * @param pd
881c4685016SOphir Munk  *   Pointer to pd of a device (net, regex, vdpa,...).
882b8dc6b0eSVu Pham  * @param share_cache
883b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
884b8dc6b0eSVu Pham  * @param mr_ctrl
885b8dc6b0eSVu Pham  *   Pointer to per-queue MR control structure.
886b8dc6b0eSVu Pham  * @param[out] entry
887b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry, found in the global cache or newly
888b8dc6b0eSVu Pham  *   created. If failed to create one, this is not written.
889b8dc6b0eSVu Pham  * @param addr
890b8dc6b0eSVu Pham  *   Search key.
891b8dc6b0eSVu Pham  *
892b8dc6b0eSVu Pham  * @return
893b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on no match.
894b8dc6b0eSVu Pham  */
895b8dc6b0eSVu Pham static uint32_t
896c4685016SOphir Munk mr_lookup_caches(void *pd, struct mlx5_mp_id *mp_id,
897b8dc6b0eSVu Pham 		 struct mlx5_mr_share_cache *share_cache,
898b8dc6b0eSVu Pham 		 struct mlx5_mr_ctrl *mr_ctrl,
899b8dc6b0eSVu Pham 		 struct mr_cache_entry *entry, uintptr_t addr,
900b8dc6b0eSVu Pham 		 unsigned int mr_ext_memseg_en)
901b8dc6b0eSVu Pham {
902b8dc6b0eSVu Pham 	struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
903b8dc6b0eSVu Pham 	uint32_t lkey;
904b8dc6b0eSVu Pham 	uint16_t idx;
905b8dc6b0eSVu Pham 
906b8dc6b0eSVu Pham 	/* If local cache table is full, try to double it. */
907b8dc6b0eSVu Pham 	if (unlikely(bt->len == bt->size))
908b8dc6b0eSVu Pham 		mr_btree_expand(bt, bt->size << 1);
909b8dc6b0eSVu Pham 	/* Look up in the global cache. */
910b8dc6b0eSVu Pham 	rte_rwlock_read_lock(&share_cache->rwlock);
911b8dc6b0eSVu Pham 	lkey = mr_btree_lookup(&share_cache->cache, &idx, addr);
912b8dc6b0eSVu Pham 	if (lkey != UINT32_MAX) {
913b8dc6b0eSVu Pham 		/* Found. */
914b8dc6b0eSVu Pham 		*entry = (*share_cache->cache.table)[idx];
915b8dc6b0eSVu Pham 		rte_rwlock_read_unlock(&share_cache->rwlock);
916b8dc6b0eSVu Pham 		/*
917b8dc6b0eSVu Pham 		 * Update local cache. Even if it fails, return the found entry
918b8dc6b0eSVu Pham 		 * to update top-half cache. Next time, this entry will be found
919b8dc6b0eSVu Pham 		 * in the global cache.
920b8dc6b0eSVu Pham 		 */
921b8dc6b0eSVu Pham 		mr_btree_insert(bt, entry);
922b8dc6b0eSVu Pham 		return lkey;
923b8dc6b0eSVu Pham 	}
924b8dc6b0eSVu Pham 	rte_rwlock_read_unlock(&share_cache->rwlock);
925b8dc6b0eSVu Pham 	/* First time to see the address? Create a new MR. */
926b8dc6b0eSVu Pham 	lkey = mlx5_mr_create(pd, mp_id, share_cache, entry, addr,
927b8dc6b0eSVu Pham 			      mr_ext_memseg_en);
928b8dc6b0eSVu Pham 	/*
929b8dc6b0eSVu Pham 	 * Update the local cache if successfully created a new global MR. Even
930b8dc6b0eSVu Pham 	 * if failed to create one, there's no action to take in this datapath
931b8dc6b0eSVu Pham 	 * code. As returning LKey is invalid, this will eventually make HW
932b8dc6b0eSVu Pham 	 * fail.
933b8dc6b0eSVu Pham 	 */
934b8dc6b0eSVu Pham 	if (lkey != UINT32_MAX)
935b8dc6b0eSVu Pham 		mr_btree_insert(bt, entry);
936b8dc6b0eSVu Pham 	return lkey;
937b8dc6b0eSVu Pham }
938b8dc6b0eSVu Pham 
939b8dc6b0eSVu Pham /**
940b8dc6b0eSVu Pham  * Bottom-half of LKey search on datapath. First search in cache_bh[] and if
941b8dc6b0eSVu Pham  * misses, search in the global MR cache table and update the new entry to
942b8dc6b0eSVu Pham  * per-queue local caches.
943b8dc6b0eSVu Pham  *
944b8dc6b0eSVu Pham  * @param pd
945c4685016SOphir Munk  *   Pointer to pd of a device (net, regex, vdpa,...).
946b8dc6b0eSVu Pham  * @param share_cache
947b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
948b8dc6b0eSVu Pham  * @param mr_ctrl
949b8dc6b0eSVu Pham  *   Pointer to per-queue MR control structure.
950b8dc6b0eSVu Pham  * @param addr
951b8dc6b0eSVu Pham  *   Search key.
952b8dc6b0eSVu Pham  *
953b8dc6b0eSVu Pham  * @return
954b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on no match.
955b8dc6b0eSVu Pham  */
956c4685016SOphir Munk uint32_t mlx5_mr_addr2mr_bh(void *pd, struct mlx5_mp_id *mp_id,
957b8dc6b0eSVu Pham 			    struct mlx5_mr_share_cache *share_cache,
958b8dc6b0eSVu Pham 			    struct mlx5_mr_ctrl *mr_ctrl,
959b8dc6b0eSVu Pham 			    uintptr_t addr, unsigned int mr_ext_memseg_en)
960b8dc6b0eSVu Pham {
961b8dc6b0eSVu Pham 	uint32_t lkey;
962b8dc6b0eSVu Pham 	uint16_t bh_idx = 0;
963b8dc6b0eSVu Pham 	/* Victim in top-half cache to replace with new entry. */
964b8dc6b0eSVu Pham 	struct mr_cache_entry *repl = &mr_ctrl->cache[mr_ctrl->head];
965b8dc6b0eSVu Pham 
966b8dc6b0eSVu Pham 	/* Binary-search MR translation table. */
967b8dc6b0eSVu Pham 	lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);
968b8dc6b0eSVu Pham 	/* Update top-half cache. */
969b8dc6b0eSVu Pham 	if (likely(lkey != UINT32_MAX)) {
970b8dc6b0eSVu Pham 		*repl = (*mr_ctrl->cache_bh.table)[bh_idx];
971b8dc6b0eSVu Pham 	} else {
972b8dc6b0eSVu Pham 		/*
973b8dc6b0eSVu Pham 		 * If missed in local lookup table, search in the global cache
974b8dc6b0eSVu Pham 		 * and local cache_bh[] will be updated inside if possible.
975b8dc6b0eSVu Pham 		 * Top-half cache entry will also be updated.
976b8dc6b0eSVu Pham 		 */
977b8dc6b0eSVu Pham 		lkey = mr_lookup_caches(pd, mp_id, share_cache, mr_ctrl,
978b8dc6b0eSVu Pham 					repl, addr, mr_ext_memseg_en);
979b8dc6b0eSVu Pham 		if (unlikely(lkey == UINT32_MAX))
980b8dc6b0eSVu Pham 			return UINT32_MAX;
981b8dc6b0eSVu Pham 	}
982b8dc6b0eSVu Pham 	/* Update the most recently used entry. */
983b8dc6b0eSVu Pham 	mr_ctrl->mru = mr_ctrl->head;
984b8dc6b0eSVu Pham 	/* Point to the next victim, the oldest. */
985b8dc6b0eSVu Pham 	mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N;
986b8dc6b0eSVu Pham 	return lkey;
987b8dc6b0eSVu Pham }
988b8dc6b0eSVu Pham 
989b8dc6b0eSVu Pham /**
990b8dc6b0eSVu Pham  * Release all the created MRs and resources on global MR cache of a device.
991b8dc6b0eSVu Pham  * list.
992b8dc6b0eSVu Pham  *
993b8dc6b0eSVu Pham  * @param share_cache
994b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
995b8dc6b0eSVu Pham  */
996b8dc6b0eSVu Pham void
997b8dc6b0eSVu Pham mlx5_mr_release_cache(struct mlx5_mr_share_cache *share_cache)
998b8dc6b0eSVu Pham {
999b8dc6b0eSVu Pham 	struct mlx5_mr *mr_next;
1000b8dc6b0eSVu Pham 
1001b8dc6b0eSVu Pham 	rte_rwlock_write_lock(&share_cache->rwlock);
1002b8dc6b0eSVu Pham 	/* Detach from MR list and move to free list. */
1003b8dc6b0eSVu Pham 	mr_next = LIST_FIRST(&share_cache->mr_list);
1004b8dc6b0eSVu Pham 	while (mr_next != NULL) {
1005b8dc6b0eSVu Pham 		struct mlx5_mr *mr = mr_next;
1006b8dc6b0eSVu Pham 
1007b8dc6b0eSVu Pham 		mr_next = LIST_NEXT(mr, mr);
1008b8dc6b0eSVu Pham 		LIST_REMOVE(mr, mr);
1009b8dc6b0eSVu Pham 		LIST_INSERT_HEAD(&share_cache->mr_free_list, mr, mr);
1010b8dc6b0eSVu Pham 	}
1011b8dc6b0eSVu Pham 	LIST_INIT(&share_cache->mr_list);
1012b8dc6b0eSVu Pham 	/* Free global cache. */
1013b8dc6b0eSVu Pham 	mlx5_mr_btree_free(&share_cache->cache);
1014b8dc6b0eSVu Pham 	rte_rwlock_write_unlock(&share_cache->rwlock);
1015b8dc6b0eSVu Pham 	/* Free all remaining MRs. */
1016b8dc6b0eSVu Pham 	mlx5_mr_garbage_collect(share_cache);
1017b8dc6b0eSVu Pham }
1018b8dc6b0eSVu Pham 
1019b8dc6b0eSVu Pham /**
1020b8dc6b0eSVu Pham  * Flush all of the local cache entries.
1021b8dc6b0eSVu Pham  *
1022b8dc6b0eSVu Pham  * @param mr_ctrl
1023b8dc6b0eSVu Pham  *   Pointer to per-queue MR local cache.
1024b8dc6b0eSVu Pham  */
1025b8dc6b0eSVu Pham void
1026b8dc6b0eSVu Pham mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl)
1027b8dc6b0eSVu Pham {
1028b8dc6b0eSVu Pham 	/* Reset the most-recently-used index. */
1029b8dc6b0eSVu Pham 	mr_ctrl->mru = 0;
1030b8dc6b0eSVu Pham 	/* Reset the linear search array. */
1031b8dc6b0eSVu Pham 	mr_ctrl->head = 0;
1032b8dc6b0eSVu Pham 	memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache));
1033b8dc6b0eSVu Pham 	/* Reset the B-tree table. */
1034b8dc6b0eSVu Pham 	mr_ctrl->cache_bh.len = 1;
1035b8dc6b0eSVu Pham 	mr_ctrl->cache_bh.overflow = 0;
1036b8dc6b0eSVu Pham 	/* Update the generation number. */
1037b8dc6b0eSVu Pham 	mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr;
1038b8dc6b0eSVu Pham 	DRV_LOG(DEBUG, "mr_ctrl(%p): flushed, cur_gen=%d",
1039b8dc6b0eSVu Pham 		(void *)mr_ctrl, mr_ctrl->cur_gen);
1040b8dc6b0eSVu Pham }
1041b8dc6b0eSVu Pham 
1042b8dc6b0eSVu Pham /**
1043b8dc6b0eSVu Pham  * Creates a memory region for external memory, that is memory which is not
1044b8dc6b0eSVu Pham  * part of the DPDK memory segments.
1045b8dc6b0eSVu Pham  *
1046b8dc6b0eSVu Pham  * @param pd
1047c4685016SOphir Munk  *   Pointer to pd of a device (net, regex, vdpa,...).
1048b8dc6b0eSVu Pham  * @param addr
1049b8dc6b0eSVu Pham  *   Starting virtual address of memory.
1050b8dc6b0eSVu Pham  * @param len
1051b8dc6b0eSVu Pham  *   Length of memory segment being mapped.
1052b8dc6b0eSVu Pham  * @param socked_id
1053b8dc6b0eSVu Pham  *   Socket to allocate heap memory for the control structures.
1054b8dc6b0eSVu Pham  *
1055b8dc6b0eSVu Pham  * @return
1056b8dc6b0eSVu Pham  *   Pointer to MR structure on success, NULL otherwise.
1057b8dc6b0eSVu Pham  */
1058b8dc6b0eSVu Pham struct mlx5_mr *
1059d5ed8aa9SOphir Munk mlx5_create_mr_ext(void *pd, uintptr_t addr, size_t len, int socket_id,
1060d5ed8aa9SOphir Munk 		   mlx5_reg_mr_t reg_mr_cb)
1061b8dc6b0eSVu Pham {
1062b8dc6b0eSVu Pham 	struct mlx5_mr *mr = NULL;
1063b8dc6b0eSVu Pham 
1064fd970a54SSuanming Mou 	mr = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1065fd970a54SSuanming Mou 			 RTE_ALIGN_CEIL(sizeof(*mr), RTE_CACHE_LINE_SIZE),
1066b8dc6b0eSVu Pham 			 RTE_CACHE_LINE_SIZE, socket_id);
1067b8dc6b0eSVu Pham 	if (mr == NULL)
1068b8dc6b0eSVu Pham 		return NULL;
1069d5ed8aa9SOphir Munk 	reg_mr_cb(pd, (void *)addr, len, &mr->pmd_mr);
107058a17853SOphir Munk 	if (mr->pmd_mr.obj == NULL) {
1071b8dc6b0eSVu Pham 		DRV_LOG(WARNING,
107256d20677SOphir Munk 			"Fail to create MR for address (%p)",
1073b8dc6b0eSVu Pham 			(void *)addr);
1074fd970a54SSuanming Mou 		mlx5_free(mr);
1075b8dc6b0eSVu Pham 		return NULL;
1076b8dc6b0eSVu Pham 	}
1077b8dc6b0eSVu Pham 	mr->msl = NULL; /* Mark it is external memory. */
1078b8dc6b0eSVu Pham 	mr->ms_bmp = NULL;
1079b8dc6b0eSVu Pham 	mr->ms_n = 1;
1080b8dc6b0eSVu Pham 	mr->ms_bmp_n = 1;
1081b8dc6b0eSVu Pham 	DRV_LOG(DEBUG,
1082b8dc6b0eSVu Pham 		"MR CREATED (%p) for external memory %p:\n"
1083b8dc6b0eSVu Pham 		"  [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
1084b8dc6b0eSVu Pham 		" lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
1085b8dc6b0eSVu Pham 		(void *)mr, (void *)addr,
108656d20677SOphir Munk 		addr, addr + len, rte_cpu_to_be_32(mr->pmd_mr.lkey),
1087b8dc6b0eSVu Pham 		mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
1088b8dc6b0eSVu Pham 	return mr;
1089b8dc6b0eSVu Pham }
1090b8dc6b0eSVu Pham 
1091b8dc6b0eSVu Pham /**
10922f6c2adbSMichael Baum  * Callback for memory free event. Iterate freed memsegs and check whether it
10932f6c2adbSMichael Baum  * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a
10942f6c2adbSMichael Baum  * result, the MR would be fragmented. If it becomes empty, the MR will be freed
10952f6c2adbSMichael Baum  * later by mlx5_mr_garbage_collect(). Even if this callback is called from a
10962f6c2adbSMichael Baum  * secondary process, the garbage collector will be called in primary process
10972f6c2adbSMichael Baum  * as the secondary process can't call mlx5_mr_create().
10982f6c2adbSMichael Baum  *
10992f6c2adbSMichael Baum  * The global cache must be rebuilt if there's any change and this event has to
11002f6c2adbSMichael Baum  * be propagated to dataplane threads to flush the local caches.
11012f6c2adbSMichael Baum  *
11022f6c2adbSMichael Baum  * @param share_cache
11032f6c2adbSMichael Baum  *   Pointer to a global shared MR cache.
11042f6c2adbSMichael Baum  * @param ibdev_name
11052f6c2adbSMichael Baum  *   Name of ibv device.
11062f6c2adbSMichael Baum  * @param addr
11072f6c2adbSMichael Baum  *   Address of freed memory.
11082f6c2adbSMichael Baum  * @param len
11092f6c2adbSMichael Baum  *   Size of freed memory.
11102f6c2adbSMichael Baum  */
11112f6c2adbSMichael Baum void
11122f6c2adbSMichael Baum mlx5_free_mr_by_addr(struct mlx5_mr_share_cache *share_cache,
11132f6c2adbSMichael Baum 		     const char *ibdev_name, const void *addr, size_t len)
11142f6c2adbSMichael Baum {
11152f6c2adbSMichael Baum 	const struct rte_memseg_list *msl;
11162f6c2adbSMichael Baum 	struct mlx5_mr *mr;
11172f6c2adbSMichael Baum 	int ms_n;
11182f6c2adbSMichael Baum 	int i;
11192f6c2adbSMichael Baum 	int rebuild = 0;
11202f6c2adbSMichael Baum 
11212f6c2adbSMichael Baum 	DRV_LOG(DEBUG, "device %s free callback: addr=%p, len=%zu",
11222f6c2adbSMichael Baum 		ibdev_name, addr, len);
11232f6c2adbSMichael Baum 	msl = rte_mem_virt2memseg_list(addr);
11242f6c2adbSMichael Baum 	/* addr and len must be page-aligned. */
11252f6c2adbSMichael Baum 	MLX5_ASSERT((uintptr_t)addr ==
11262f6c2adbSMichael Baum 		    RTE_ALIGN((uintptr_t)addr, msl->page_sz));
11272f6c2adbSMichael Baum 	MLX5_ASSERT(len == RTE_ALIGN(len, msl->page_sz));
11282f6c2adbSMichael Baum 	ms_n = len / msl->page_sz;
11292f6c2adbSMichael Baum 	rte_rwlock_write_lock(&share_cache->rwlock);
11302f6c2adbSMichael Baum 	/* Clear bits of freed memsegs from MR. */
11312f6c2adbSMichael Baum 	for (i = 0; i < ms_n; ++i) {
11322f6c2adbSMichael Baum 		const struct rte_memseg *ms;
11332f6c2adbSMichael Baum 		struct mr_cache_entry entry;
11342f6c2adbSMichael Baum 		uintptr_t start;
11352f6c2adbSMichael Baum 		int ms_idx;
11362f6c2adbSMichael Baum 		uint32_t pos;
11372f6c2adbSMichael Baum 
11382f6c2adbSMichael Baum 		/* Find MR having this memseg. */
11392f6c2adbSMichael Baum 		start = (uintptr_t)addr + i * msl->page_sz;
11402f6c2adbSMichael Baum 		mr = mlx5_mr_lookup_list(share_cache, &entry, start);
11412f6c2adbSMichael Baum 		if (mr == NULL)
11422f6c2adbSMichael Baum 			continue;
11432f6c2adbSMichael Baum 		MLX5_ASSERT(mr->msl); /* Can't be external memory. */
11442f6c2adbSMichael Baum 		ms = rte_mem_virt2memseg((void *)start, msl);
11452f6c2adbSMichael Baum 		MLX5_ASSERT(ms != NULL);
11462f6c2adbSMichael Baum 		MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
11472f6c2adbSMichael Baum 		ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
11482f6c2adbSMichael Baum 		pos = ms_idx - mr->ms_base_idx;
11492f6c2adbSMichael Baum 		MLX5_ASSERT(rte_bitmap_get(mr->ms_bmp, pos));
11502f6c2adbSMichael Baum 		MLX5_ASSERT(pos < mr->ms_bmp_n);
11512f6c2adbSMichael Baum 		DRV_LOG(DEBUG, "device %s MR(%p): clear bitmap[%u] for addr %p",
11522f6c2adbSMichael Baum 			ibdev_name, (void *)mr, pos, (void *)start);
11532f6c2adbSMichael Baum 		rte_bitmap_clear(mr->ms_bmp, pos);
11542f6c2adbSMichael Baum 		if (--mr->ms_n == 0) {
11552f6c2adbSMichael Baum 			LIST_REMOVE(mr, mr);
11562f6c2adbSMichael Baum 			LIST_INSERT_HEAD(&share_cache->mr_free_list, mr, mr);
11572f6c2adbSMichael Baum 			DRV_LOG(DEBUG, "device %s remove MR(%p) from list",
11582f6c2adbSMichael Baum 				ibdev_name, (void *)mr);
11592f6c2adbSMichael Baum 		}
11602f6c2adbSMichael Baum 		/*
11612f6c2adbSMichael Baum 		 * MR is fragmented or will be freed. the global cache must be
11622f6c2adbSMichael Baum 		 * rebuilt.
11632f6c2adbSMichael Baum 		 */
11642f6c2adbSMichael Baum 		rebuild = 1;
11652f6c2adbSMichael Baum 	}
11662f6c2adbSMichael Baum 	if (rebuild) {
11672f6c2adbSMichael Baum 		mlx5_mr_rebuild_cache(share_cache);
11682f6c2adbSMichael Baum 		/*
11692f6c2adbSMichael Baum 		 * No explicit wmb is needed after updating dev_gen due to
11702f6c2adbSMichael Baum 		 * store-release ordering in unlock that provides the
11712f6c2adbSMichael Baum 		 * implicit barrier at the software visible level.
11722f6c2adbSMichael Baum 		 */
11732f6c2adbSMichael Baum 		++share_cache->dev_gen;
11742f6c2adbSMichael Baum 		DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d",
11752f6c2adbSMichael Baum 			share_cache->dev_gen);
11762f6c2adbSMichael Baum 	}
11772f6c2adbSMichael Baum 	rte_rwlock_write_unlock(&share_cache->rwlock);
11782f6c2adbSMichael Baum }
11792f6c2adbSMichael Baum 
11802f6c2adbSMichael Baum /**
1181b8dc6b0eSVu Pham  * Dump all the created MRs and the global cache entries.
1182b8dc6b0eSVu Pham  *
1183b8dc6b0eSVu Pham  * @param sh
1184b8dc6b0eSVu Pham  *   Pointer to Ethernet device shared context.
1185b8dc6b0eSVu Pham  */
1186b8dc6b0eSVu Pham void
1187b8dc6b0eSVu Pham mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused)
1188b8dc6b0eSVu Pham {
1189b8dc6b0eSVu Pham #ifdef RTE_LIBRTE_MLX5_DEBUG
1190b8dc6b0eSVu Pham 	struct mlx5_mr *mr;
1191b8dc6b0eSVu Pham 	int mr_n = 0;
1192b8dc6b0eSVu Pham 	int chunk_n = 0;
1193b8dc6b0eSVu Pham 
1194b8dc6b0eSVu Pham 	rte_rwlock_read_lock(&share_cache->rwlock);
1195b8dc6b0eSVu Pham 	/* Iterate all the existing MRs. */
1196b8dc6b0eSVu Pham 	LIST_FOREACH(mr, &share_cache->mr_list, mr) {
1197b8dc6b0eSVu Pham 		unsigned int n;
1198b8dc6b0eSVu Pham 
119987acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u",
120056d20677SOphir Munk 		      mr_n++, rte_cpu_to_be_32(mr->pmd_mr.lkey),
1201b8dc6b0eSVu Pham 		      mr->ms_n, mr->ms_bmp_n);
1202b8dc6b0eSVu Pham 		if (mr->ms_n == 0)
1203b8dc6b0eSVu Pham 			continue;
1204b8dc6b0eSVu Pham 		for (n = 0; n < mr->ms_bmp_n; ) {
1205b8dc6b0eSVu Pham 			struct mr_cache_entry ret = { 0, };
1206b8dc6b0eSVu Pham 
1207b8dc6b0eSVu Pham 			n = mr_find_next_chunk(mr, &ret, n);
1208b8dc6b0eSVu Pham 			if (!ret.end)
1209b8dc6b0eSVu Pham 				break;
121087acdcc7SThomas Monjalon 			DRV_LOG(DEBUG,
121187acdcc7SThomas Monjalon 				"  chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")",
1212b8dc6b0eSVu Pham 				chunk_n++, ret.start, ret.end);
1213b8dc6b0eSVu Pham 		}
1214b8dc6b0eSVu Pham 	}
121587acdcc7SThomas Monjalon 	DRV_LOG(DEBUG, "Dumping global cache %p", (void *)share_cache);
1216b8dc6b0eSVu Pham 	mlx5_mr_btree_dump(&share_cache->cache);
1217b8dc6b0eSVu Pham 	rte_rwlock_read_unlock(&share_cache->rwlock);
1218b8dc6b0eSVu Pham #endif
1219b8dc6b0eSVu Pham }
1220690b2a88SDmitry Kozlyuk 
1221690b2a88SDmitry Kozlyuk static int
1222690b2a88SDmitry Kozlyuk mlx5_range_compare_start(const void *lhs, const void *rhs)
1223690b2a88SDmitry Kozlyuk {
1224690b2a88SDmitry Kozlyuk 	const struct mlx5_range *r1 = lhs, *r2 = rhs;
1225690b2a88SDmitry Kozlyuk 
1226690b2a88SDmitry Kozlyuk 	if (r1->start > r2->start)
1227690b2a88SDmitry Kozlyuk 		return 1;
1228690b2a88SDmitry Kozlyuk 	else if (r1->start < r2->start)
1229690b2a88SDmitry Kozlyuk 		return -1;
1230690b2a88SDmitry Kozlyuk 	return 0;
1231690b2a88SDmitry Kozlyuk }
1232690b2a88SDmitry Kozlyuk 
1233690b2a88SDmitry Kozlyuk static void
1234690b2a88SDmitry Kozlyuk mlx5_range_from_mempool_chunk(struct rte_mempool *mp, void *opaque,
1235690b2a88SDmitry Kozlyuk 			      struct rte_mempool_memhdr *memhdr,
1236690b2a88SDmitry Kozlyuk 			      unsigned int idx)
1237690b2a88SDmitry Kozlyuk {
1238690b2a88SDmitry Kozlyuk 	struct mlx5_range *ranges = opaque, *range = &ranges[idx];
1239690b2a88SDmitry Kozlyuk 	uint64_t page_size = rte_mem_page_size();
1240690b2a88SDmitry Kozlyuk 
1241690b2a88SDmitry Kozlyuk 	RTE_SET_USED(mp);
1242690b2a88SDmitry Kozlyuk 	range->start = RTE_ALIGN_FLOOR((uintptr_t)memhdr->addr, page_size);
1243690b2a88SDmitry Kozlyuk 	range->end = RTE_ALIGN_CEIL(range->start + memhdr->len, page_size);
1244690b2a88SDmitry Kozlyuk }
1245690b2a88SDmitry Kozlyuk 
1246690b2a88SDmitry Kozlyuk /**
1247690b2a88SDmitry Kozlyuk  * Get VA-contiguous ranges of the mempool memory.
1248690b2a88SDmitry Kozlyuk  * Each range start and end is aligned to the system page size.
1249690b2a88SDmitry Kozlyuk  *
1250690b2a88SDmitry Kozlyuk  * @param[in] mp
1251690b2a88SDmitry Kozlyuk  *   Analyzed mempool.
1252690b2a88SDmitry Kozlyuk  * @param[out] out
1253690b2a88SDmitry Kozlyuk  *   Receives the ranges, caller must release it with free().
1254690b2a88SDmitry Kozlyuk  * @param[out] ount_n
1255690b2a88SDmitry Kozlyuk  *   Receives the number of @p out elements.
1256690b2a88SDmitry Kozlyuk  *
1257690b2a88SDmitry Kozlyuk  * @return
1258690b2a88SDmitry Kozlyuk  *   0 on success, (-1) on failure.
1259690b2a88SDmitry Kozlyuk  */
1260690b2a88SDmitry Kozlyuk static int
1261690b2a88SDmitry Kozlyuk mlx5_get_mempool_ranges(struct rte_mempool *mp, struct mlx5_range **out,
1262690b2a88SDmitry Kozlyuk 			unsigned int *out_n)
1263690b2a88SDmitry Kozlyuk {
1264690b2a88SDmitry Kozlyuk 	struct mlx5_range *chunks;
1265690b2a88SDmitry Kozlyuk 	unsigned int chunks_n = mp->nb_mem_chunks, contig_n, i;
1266690b2a88SDmitry Kozlyuk 
1267690b2a88SDmitry Kozlyuk 	/* Collect page-aligned memory ranges of the mempool. */
1268690b2a88SDmitry Kozlyuk 	chunks = calloc(sizeof(chunks[0]), chunks_n);
1269690b2a88SDmitry Kozlyuk 	if (chunks == NULL)
1270690b2a88SDmitry Kozlyuk 		return -1;
1271690b2a88SDmitry Kozlyuk 	rte_mempool_mem_iter(mp, mlx5_range_from_mempool_chunk, chunks);
1272690b2a88SDmitry Kozlyuk 	/* Merge adjacent chunks and place them at the beginning. */
1273690b2a88SDmitry Kozlyuk 	qsort(chunks, chunks_n, sizeof(chunks[0]), mlx5_range_compare_start);
1274690b2a88SDmitry Kozlyuk 	contig_n = 1;
1275690b2a88SDmitry Kozlyuk 	for (i = 1; i < chunks_n; i++)
1276690b2a88SDmitry Kozlyuk 		if (chunks[i - 1].end != chunks[i].start) {
1277690b2a88SDmitry Kozlyuk 			chunks[contig_n - 1].end = chunks[i - 1].end;
1278690b2a88SDmitry Kozlyuk 			chunks[contig_n] = chunks[i];
1279690b2a88SDmitry Kozlyuk 			contig_n++;
1280690b2a88SDmitry Kozlyuk 		}
1281690b2a88SDmitry Kozlyuk 	/* Extend the last contiguous chunk to the end of the mempool. */
1282690b2a88SDmitry Kozlyuk 	chunks[contig_n - 1].end = chunks[i - 1].end;
1283690b2a88SDmitry Kozlyuk 	*out = chunks;
1284690b2a88SDmitry Kozlyuk 	*out_n = contig_n;
1285690b2a88SDmitry Kozlyuk 	return 0;
1286690b2a88SDmitry Kozlyuk }
1287690b2a88SDmitry Kozlyuk 
1288690b2a88SDmitry Kozlyuk /**
1289690b2a88SDmitry Kozlyuk  * Analyze mempool memory to select memory ranges to register.
1290690b2a88SDmitry Kozlyuk  *
1291690b2a88SDmitry Kozlyuk  * @param[in] mp
1292690b2a88SDmitry Kozlyuk  *   Mempool to analyze.
1293690b2a88SDmitry Kozlyuk  * @param[out] out
1294690b2a88SDmitry Kozlyuk  *   Receives memory ranges to register, aligned to the system page size.
1295690b2a88SDmitry Kozlyuk  *   The caller must release them with free().
1296690b2a88SDmitry Kozlyuk  * @param[out] out_n
1297690b2a88SDmitry Kozlyuk  *   Receives the number of @p out items.
1298690b2a88SDmitry Kozlyuk  * @param[out] share_hugepage
1299690b2a88SDmitry Kozlyuk  *   Receives True if the entire pool resides within a single hugepage.
1300690b2a88SDmitry Kozlyuk  *
1301690b2a88SDmitry Kozlyuk  * @return
1302690b2a88SDmitry Kozlyuk  *   0 on success, (-1) on failure.
1303690b2a88SDmitry Kozlyuk  */
1304690b2a88SDmitry Kozlyuk static int
1305690b2a88SDmitry Kozlyuk mlx5_mempool_reg_analyze(struct rte_mempool *mp, struct mlx5_range **out,
1306690b2a88SDmitry Kozlyuk 			 unsigned int *out_n, bool *share_hugepage)
1307690b2a88SDmitry Kozlyuk {
1308690b2a88SDmitry Kozlyuk 	struct mlx5_range *ranges = NULL;
1309690b2a88SDmitry Kozlyuk 	unsigned int i, ranges_n = 0;
1310690b2a88SDmitry Kozlyuk 	struct rte_memseg_list *msl;
1311690b2a88SDmitry Kozlyuk 
1312690b2a88SDmitry Kozlyuk 	if (mlx5_get_mempool_ranges(mp, &ranges, &ranges_n) < 0) {
1313690b2a88SDmitry Kozlyuk 		DRV_LOG(ERR, "Cannot get address ranges for mempool %s",
1314690b2a88SDmitry Kozlyuk 			mp->name);
1315690b2a88SDmitry Kozlyuk 		return -1;
1316690b2a88SDmitry Kozlyuk 	}
1317690b2a88SDmitry Kozlyuk 	/* Check if the hugepage of the pool can be shared. */
1318690b2a88SDmitry Kozlyuk 	*share_hugepage = false;
1319690b2a88SDmitry Kozlyuk 	msl = rte_mem_virt2memseg_list((void *)ranges[0].start);
1320690b2a88SDmitry Kozlyuk 	if (msl != NULL) {
1321690b2a88SDmitry Kozlyuk 		uint64_t hugepage_sz = 0;
1322690b2a88SDmitry Kozlyuk 
1323690b2a88SDmitry Kozlyuk 		/* Check that all ranges are on pages of the same size. */
1324690b2a88SDmitry Kozlyuk 		for (i = 0; i < ranges_n; i++) {
1325690b2a88SDmitry Kozlyuk 			if (hugepage_sz != 0 && hugepage_sz != msl->page_sz)
1326690b2a88SDmitry Kozlyuk 				break;
1327690b2a88SDmitry Kozlyuk 			hugepage_sz = msl->page_sz;
1328690b2a88SDmitry Kozlyuk 		}
1329690b2a88SDmitry Kozlyuk 		if (i == ranges_n) {
1330690b2a88SDmitry Kozlyuk 			/*
1331690b2a88SDmitry Kozlyuk 			 * If the entire pool is within one hugepage,
1332690b2a88SDmitry Kozlyuk 			 * combine all ranges into one of the hugepage size.
1333690b2a88SDmitry Kozlyuk 			 */
1334690b2a88SDmitry Kozlyuk 			uintptr_t reg_start = ranges[0].start;
1335690b2a88SDmitry Kozlyuk 			uintptr_t reg_end = ranges[ranges_n - 1].end;
1336690b2a88SDmitry Kozlyuk 			uintptr_t hugepage_start =
1337690b2a88SDmitry Kozlyuk 				RTE_ALIGN_FLOOR(reg_start, hugepage_sz);
1338690b2a88SDmitry Kozlyuk 			uintptr_t hugepage_end = hugepage_start + hugepage_sz;
1339690b2a88SDmitry Kozlyuk 			if (reg_end < hugepage_end) {
1340690b2a88SDmitry Kozlyuk 				ranges[0].start = hugepage_start;
1341690b2a88SDmitry Kozlyuk 				ranges[0].end = hugepage_end;
1342690b2a88SDmitry Kozlyuk 				ranges_n = 1;
1343690b2a88SDmitry Kozlyuk 				*share_hugepage = true;
1344690b2a88SDmitry Kozlyuk 			}
1345690b2a88SDmitry Kozlyuk 		}
1346690b2a88SDmitry Kozlyuk 	}
1347690b2a88SDmitry Kozlyuk 	*out = ranges;
1348690b2a88SDmitry Kozlyuk 	*out_n = ranges_n;
1349690b2a88SDmitry Kozlyuk 	return 0;
1350690b2a88SDmitry Kozlyuk }
1351690b2a88SDmitry Kozlyuk 
1352690b2a88SDmitry Kozlyuk /** Create a registration object for the mempool. */
1353690b2a88SDmitry Kozlyuk static struct mlx5_mempool_reg *
1354690b2a88SDmitry Kozlyuk mlx5_mempool_reg_create(struct rte_mempool *mp, unsigned int mrs_n)
1355690b2a88SDmitry Kozlyuk {
1356690b2a88SDmitry Kozlyuk 	struct mlx5_mempool_reg *mpr = NULL;
1357690b2a88SDmitry Kozlyuk 
1358690b2a88SDmitry Kozlyuk 	mpr = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1359690b2a88SDmitry Kozlyuk 			  sizeof(*mpr) + mrs_n * sizeof(mpr->mrs[0]),
1360690b2a88SDmitry Kozlyuk 			  RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
1361690b2a88SDmitry Kozlyuk 	if (mpr == NULL) {
1362690b2a88SDmitry Kozlyuk 		DRV_LOG(ERR, "Cannot allocate mempool %s registration object",
1363690b2a88SDmitry Kozlyuk 			mp->name);
1364690b2a88SDmitry Kozlyuk 		return NULL;
1365690b2a88SDmitry Kozlyuk 	}
1366690b2a88SDmitry Kozlyuk 	mpr->mp = mp;
1367690b2a88SDmitry Kozlyuk 	mpr->mrs = (struct mlx5_mempool_mr *)(mpr + 1);
1368690b2a88SDmitry Kozlyuk 	mpr->mrs_n = mrs_n;
1369690b2a88SDmitry Kozlyuk 	return mpr;
1370690b2a88SDmitry Kozlyuk }
1371690b2a88SDmitry Kozlyuk 
1372690b2a88SDmitry Kozlyuk /**
1373690b2a88SDmitry Kozlyuk  * Destroy a mempool registration object.
1374690b2a88SDmitry Kozlyuk  *
1375690b2a88SDmitry Kozlyuk  * @param standalone
1376690b2a88SDmitry Kozlyuk  *   Whether @p mpr owns its MRs excludively, i.e. they are not shared.
1377690b2a88SDmitry Kozlyuk  */
1378690b2a88SDmitry Kozlyuk static void
1379690b2a88SDmitry Kozlyuk mlx5_mempool_reg_destroy(struct mlx5_mr_share_cache *share_cache,
1380690b2a88SDmitry Kozlyuk 			 struct mlx5_mempool_reg *mpr, bool standalone)
1381690b2a88SDmitry Kozlyuk {
1382690b2a88SDmitry Kozlyuk 	if (standalone) {
1383690b2a88SDmitry Kozlyuk 		unsigned int i;
1384690b2a88SDmitry Kozlyuk 
1385690b2a88SDmitry Kozlyuk 		for (i = 0; i < mpr->mrs_n; i++)
1386690b2a88SDmitry Kozlyuk 			share_cache->dereg_mr_cb(&mpr->mrs[i].pmd_mr);
1387690b2a88SDmitry Kozlyuk 	}
1388690b2a88SDmitry Kozlyuk 	mlx5_free(mpr);
1389690b2a88SDmitry Kozlyuk }
1390690b2a88SDmitry Kozlyuk 
1391690b2a88SDmitry Kozlyuk /** Find registration object of a mempool. */
1392690b2a88SDmitry Kozlyuk static struct mlx5_mempool_reg *
1393690b2a88SDmitry Kozlyuk mlx5_mempool_reg_lookup(struct mlx5_mr_share_cache *share_cache,
1394690b2a88SDmitry Kozlyuk 			struct rte_mempool *mp)
1395690b2a88SDmitry Kozlyuk {
1396690b2a88SDmitry Kozlyuk 	struct mlx5_mempool_reg *mpr;
1397690b2a88SDmitry Kozlyuk 
1398690b2a88SDmitry Kozlyuk 	LIST_FOREACH(mpr, &share_cache->mempool_reg_list, next)
1399690b2a88SDmitry Kozlyuk 		if (mpr->mp == mp)
1400690b2a88SDmitry Kozlyuk 			break;
1401690b2a88SDmitry Kozlyuk 	return mpr;
1402690b2a88SDmitry Kozlyuk }
1403690b2a88SDmitry Kozlyuk 
1404690b2a88SDmitry Kozlyuk /** Increment reference counters of MRs used in the registration. */
1405690b2a88SDmitry Kozlyuk static void
1406690b2a88SDmitry Kozlyuk mlx5_mempool_reg_attach(struct mlx5_mempool_reg *mpr)
1407690b2a88SDmitry Kozlyuk {
1408690b2a88SDmitry Kozlyuk 	unsigned int i;
1409690b2a88SDmitry Kozlyuk 
1410690b2a88SDmitry Kozlyuk 	for (i = 0; i < mpr->mrs_n; i++)
1411690b2a88SDmitry Kozlyuk 		__atomic_add_fetch(&mpr->mrs[i].refcnt, 1, __ATOMIC_RELAXED);
1412690b2a88SDmitry Kozlyuk }
1413690b2a88SDmitry Kozlyuk 
1414690b2a88SDmitry Kozlyuk /**
1415690b2a88SDmitry Kozlyuk  * Decrement reference counters of MRs used in the registration.
1416690b2a88SDmitry Kozlyuk  *
1417690b2a88SDmitry Kozlyuk  * @return True if no more references to @p mpr MRs exist, False otherwise.
1418690b2a88SDmitry Kozlyuk  */
1419690b2a88SDmitry Kozlyuk static bool
1420690b2a88SDmitry Kozlyuk mlx5_mempool_reg_detach(struct mlx5_mempool_reg *mpr)
1421690b2a88SDmitry Kozlyuk {
1422690b2a88SDmitry Kozlyuk 	unsigned int i;
1423690b2a88SDmitry Kozlyuk 	bool ret = false;
1424690b2a88SDmitry Kozlyuk 
1425690b2a88SDmitry Kozlyuk 	for (i = 0; i < mpr->mrs_n; i++)
1426690b2a88SDmitry Kozlyuk 		ret |= __atomic_sub_fetch(&mpr->mrs[i].refcnt, 1,
1427690b2a88SDmitry Kozlyuk 					  __ATOMIC_RELAXED) == 0;
1428690b2a88SDmitry Kozlyuk 	return ret;
1429690b2a88SDmitry Kozlyuk }
1430690b2a88SDmitry Kozlyuk 
1431690b2a88SDmitry Kozlyuk static int
1432690b2a88SDmitry Kozlyuk mlx5_mr_mempool_register_primary(struct mlx5_mr_share_cache *share_cache,
1433690b2a88SDmitry Kozlyuk 				 void *pd, struct rte_mempool *mp)
1434690b2a88SDmitry Kozlyuk {
1435690b2a88SDmitry Kozlyuk 	struct mlx5_range *ranges = NULL;
1436690b2a88SDmitry Kozlyuk 	struct mlx5_mempool_reg *mpr, *new_mpr;
1437690b2a88SDmitry Kozlyuk 	unsigned int i, ranges_n;
1438690b2a88SDmitry Kozlyuk 	bool share_hugepage;
1439690b2a88SDmitry Kozlyuk 	int ret = -1;
1440690b2a88SDmitry Kozlyuk 
1441690b2a88SDmitry Kozlyuk 	/* Early check to avoid unnecessary creation of MRs. */
1442690b2a88SDmitry Kozlyuk 	rte_rwlock_read_lock(&share_cache->rwlock);
1443690b2a88SDmitry Kozlyuk 	mpr = mlx5_mempool_reg_lookup(share_cache, mp);
1444690b2a88SDmitry Kozlyuk 	rte_rwlock_read_unlock(&share_cache->rwlock);
1445690b2a88SDmitry Kozlyuk 	if (mpr != NULL) {
1446690b2a88SDmitry Kozlyuk 		DRV_LOG(DEBUG, "Mempool %s is already registered for PD %p",
1447690b2a88SDmitry Kozlyuk 			mp->name, pd);
1448690b2a88SDmitry Kozlyuk 		rte_errno = EEXIST;
1449690b2a88SDmitry Kozlyuk 		goto exit;
1450690b2a88SDmitry Kozlyuk 	}
1451690b2a88SDmitry Kozlyuk 	if (mlx5_mempool_reg_analyze(mp, &ranges, &ranges_n,
1452690b2a88SDmitry Kozlyuk 				     &share_hugepage) < 0) {
1453690b2a88SDmitry Kozlyuk 		DRV_LOG(ERR, "Cannot get mempool %s memory ranges", mp->name);
1454690b2a88SDmitry Kozlyuk 		rte_errno = ENOMEM;
1455690b2a88SDmitry Kozlyuk 		goto exit;
1456690b2a88SDmitry Kozlyuk 	}
1457690b2a88SDmitry Kozlyuk 	new_mpr = mlx5_mempool_reg_create(mp, ranges_n);
1458690b2a88SDmitry Kozlyuk 	if (new_mpr == NULL) {
1459690b2a88SDmitry Kozlyuk 		DRV_LOG(ERR,
1460690b2a88SDmitry Kozlyuk 			"Cannot create a registration object for mempool %s in PD %p",
1461690b2a88SDmitry Kozlyuk 			mp->name, pd);
1462690b2a88SDmitry Kozlyuk 		rte_errno = ENOMEM;
1463690b2a88SDmitry Kozlyuk 		goto exit;
1464690b2a88SDmitry Kozlyuk 	}
1465690b2a88SDmitry Kozlyuk 	/*
1466690b2a88SDmitry Kozlyuk 	 * If the entire mempool fits in a single hugepage, the MR for this
1467690b2a88SDmitry Kozlyuk 	 * hugepage can be shared across mempools that also fit in it.
1468690b2a88SDmitry Kozlyuk 	 */
1469690b2a88SDmitry Kozlyuk 	if (share_hugepage) {
1470690b2a88SDmitry Kozlyuk 		rte_rwlock_write_lock(&share_cache->rwlock);
1471690b2a88SDmitry Kozlyuk 		LIST_FOREACH(mpr, &share_cache->mempool_reg_list, next) {
1472690b2a88SDmitry Kozlyuk 			if (mpr->mrs[0].pmd_mr.addr == (void *)ranges[0].start)
1473690b2a88SDmitry Kozlyuk 				break;
1474690b2a88SDmitry Kozlyuk 		}
1475690b2a88SDmitry Kozlyuk 		if (mpr != NULL) {
1476690b2a88SDmitry Kozlyuk 			new_mpr->mrs = mpr->mrs;
1477690b2a88SDmitry Kozlyuk 			mlx5_mempool_reg_attach(new_mpr);
1478690b2a88SDmitry Kozlyuk 			LIST_INSERT_HEAD(&share_cache->mempool_reg_list,
1479690b2a88SDmitry Kozlyuk 					 new_mpr, next);
1480690b2a88SDmitry Kozlyuk 		}
1481690b2a88SDmitry Kozlyuk 		rte_rwlock_write_unlock(&share_cache->rwlock);
1482690b2a88SDmitry Kozlyuk 		if (mpr != NULL) {
1483690b2a88SDmitry Kozlyuk 			DRV_LOG(DEBUG, "Shared MR %#x in PD %p for mempool %s with mempool %s",
1484690b2a88SDmitry Kozlyuk 				mpr->mrs[0].pmd_mr.lkey, pd, mp->name,
1485690b2a88SDmitry Kozlyuk 				mpr->mp->name);
1486690b2a88SDmitry Kozlyuk 			ret = 0;
1487690b2a88SDmitry Kozlyuk 			goto exit;
1488690b2a88SDmitry Kozlyuk 		}
1489690b2a88SDmitry Kozlyuk 	}
1490690b2a88SDmitry Kozlyuk 	for (i = 0; i < ranges_n; i++) {
1491690b2a88SDmitry Kozlyuk 		struct mlx5_mempool_mr *mr = &new_mpr->mrs[i];
1492690b2a88SDmitry Kozlyuk 		const struct mlx5_range *range = &ranges[i];
1493690b2a88SDmitry Kozlyuk 		size_t len = range->end - range->start;
1494690b2a88SDmitry Kozlyuk 
1495690b2a88SDmitry Kozlyuk 		if (share_cache->reg_mr_cb(pd, (void *)range->start, len,
1496690b2a88SDmitry Kozlyuk 		    &mr->pmd_mr) < 0) {
1497690b2a88SDmitry Kozlyuk 			DRV_LOG(ERR,
1498690b2a88SDmitry Kozlyuk 				"Failed to create an MR in PD %p for address range "
1499690b2a88SDmitry Kozlyuk 				"[0x%" PRIxPTR ", 0x%" PRIxPTR "] (%zu bytes) for mempool %s",
1500690b2a88SDmitry Kozlyuk 				pd, range->start, range->end, len, mp->name);
1501690b2a88SDmitry Kozlyuk 			break;
1502690b2a88SDmitry Kozlyuk 		}
1503690b2a88SDmitry Kozlyuk 		DRV_LOG(DEBUG,
1504690b2a88SDmitry Kozlyuk 			"Created a new MR %#x in PD %p for address range "
1505690b2a88SDmitry Kozlyuk 			"[0x%" PRIxPTR ", 0x%" PRIxPTR "] (%zu bytes) for mempool %s",
1506690b2a88SDmitry Kozlyuk 			mr->pmd_mr.lkey, pd, range->start, range->end, len,
1507690b2a88SDmitry Kozlyuk 			mp->name);
1508690b2a88SDmitry Kozlyuk 	}
1509690b2a88SDmitry Kozlyuk 	if (i != ranges_n) {
1510690b2a88SDmitry Kozlyuk 		mlx5_mempool_reg_destroy(share_cache, new_mpr, true);
1511690b2a88SDmitry Kozlyuk 		rte_errno = EINVAL;
1512690b2a88SDmitry Kozlyuk 		goto exit;
1513690b2a88SDmitry Kozlyuk 	}
1514690b2a88SDmitry Kozlyuk 	/* Concurrent registration is not supposed to happen. */
1515690b2a88SDmitry Kozlyuk 	rte_rwlock_write_lock(&share_cache->rwlock);
1516690b2a88SDmitry Kozlyuk 	mpr = mlx5_mempool_reg_lookup(share_cache, mp);
1517690b2a88SDmitry Kozlyuk 	if (mpr == NULL) {
1518690b2a88SDmitry Kozlyuk 		mlx5_mempool_reg_attach(new_mpr);
1519690b2a88SDmitry Kozlyuk 		LIST_INSERT_HEAD(&share_cache->mempool_reg_list,
1520690b2a88SDmitry Kozlyuk 				 new_mpr, next);
1521690b2a88SDmitry Kozlyuk 		ret = 0;
1522690b2a88SDmitry Kozlyuk 	}
1523690b2a88SDmitry Kozlyuk 	rte_rwlock_write_unlock(&share_cache->rwlock);
1524690b2a88SDmitry Kozlyuk 	if (mpr != NULL) {
1525690b2a88SDmitry Kozlyuk 		DRV_LOG(DEBUG, "Mempool %s is already registered for PD %p",
1526690b2a88SDmitry Kozlyuk 			mp->name, pd);
1527690b2a88SDmitry Kozlyuk 		mlx5_mempool_reg_destroy(share_cache, new_mpr, true);
1528690b2a88SDmitry Kozlyuk 		rte_errno = EEXIST;
1529690b2a88SDmitry Kozlyuk 		goto exit;
1530690b2a88SDmitry Kozlyuk 	}
1531690b2a88SDmitry Kozlyuk exit:
1532690b2a88SDmitry Kozlyuk 	free(ranges);
1533690b2a88SDmitry Kozlyuk 	return ret;
1534690b2a88SDmitry Kozlyuk }
1535690b2a88SDmitry Kozlyuk 
1536690b2a88SDmitry Kozlyuk static int
1537690b2a88SDmitry Kozlyuk mlx5_mr_mempool_register_secondary(struct mlx5_mr_share_cache *share_cache,
1538690b2a88SDmitry Kozlyuk 				   void *pd, struct rte_mempool *mp,
1539690b2a88SDmitry Kozlyuk 				   struct mlx5_mp_id *mp_id)
1540690b2a88SDmitry Kozlyuk {
1541690b2a88SDmitry Kozlyuk 	if (mp_id == NULL) {
1542690b2a88SDmitry Kozlyuk 		rte_errno = EINVAL;
1543690b2a88SDmitry Kozlyuk 		return -1;
1544690b2a88SDmitry Kozlyuk 	}
1545690b2a88SDmitry Kozlyuk 	return mlx5_mp_req_mempool_reg(mp_id, share_cache, pd, mp, true);
1546690b2a88SDmitry Kozlyuk }
1547690b2a88SDmitry Kozlyuk 
1548690b2a88SDmitry Kozlyuk /**
1549690b2a88SDmitry Kozlyuk  * Register the memory of a mempool in the protection domain.
1550690b2a88SDmitry Kozlyuk  *
1551690b2a88SDmitry Kozlyuk  * @param share_cache
1552690b2a88SDmitry Kozlyuk  *   Shared MR cache of the protection domain.
1553690b2a88SDmitry Kozlyuk  * @param pd
1554690b2a88SDmitry Kozlyuk  *   Protection domain object.
1555690b2a88SDmitry Kozlyuk  * @param mp
1556690b2a88SDmitry Kozlyuk  *   Mempool to register.
1557690b2a88SDmitry Kozlyuk  * @param mp_id
1558690b2a88SDmitry Kozlyuk  *   Multi-process identifier, may be NULL for the primary process.
1559690b2a88SDmitry Kozlyuk  *
1560690b2a88SDmitry Kozlyuk  * @return
1561690b2a88SDmitry Kozlyuk  *   0 on success, (-1) on failure and rte_errno is set.
1562690b2a88SDmitry Kozlyuk  */
1563690b2a88SDmitry Kozlyuk int
1564690b2a88SDmitry Kozlyuk mlx5_mr_mempool_register(struct mlx5_mr_share_cache *share_cache, void *pd,
1565690b2a88SDmitry Kozlyuk 			 struct rte_mempool *mp, struct mlx5_mp_id *mp_id)
1566690b2a88SDmitry Kozlyuk {
1567*c47d7b90SAndrew Rybchenko 	if (mp->flags & RTE_MEMPOOL_F_NON_IO)
1568690b2a88SDmitry Kozlyuk 		return 0;
1569690b2a88SDmitry Kozlyuk 	switch (rte_eal_process_type()) {
1570690b2a88SDmitry Kozlyuk 	case RTE_PROC_PRIMARY:
1571690b2a88SDmitry Kozlyuk 		return mlx5_mr_mempool_register_primary(share_cache, pd, mp);
1572690b2a88SDmitry Kozlyuk 	case RTE_PROC_SECONDARY:
1573690b2a88SDmitry Kozlyuk 		return mlx5_mr_mempool_register_secondary(share_cache, pd, mp,
1574690b2a88SDmitry Kozlyuk 							  mp_id);
1575690b2a88SDmitry Kozlyuk 	default:
1576690b2a88SDmitry Kozlyuk 		return -1;
1577690b2a88SDmitry Kozlyuk 	}
1578690b2a88SDmitry Kozlyuk }
1579690b2a88SDmitry Kozlyuk 
1580690b2a88SDmitry Kozlyuk static int
1581690b2a88SDmitry Kozlyuk mlx5_mr_mempool_unregister_primary(struct mlx5_mr_share_cache *share_cache,
1582690b2a88SDmitry Kozlyuk 				   struct rte_mempool *mp)
1583690b2a88SDmitry Kozlyuk {
1584690b2a88SDmitry Kozlyuk 	struct mlx5_mempool_reg *mpr;
1585690b2a88SDmitry Kozlyuk 	bool standalone = false;
1586690b2a88SDmitry Kozlyuk 
1587690b2a88SDmitry Kozlyuk 	rte_rwlock_write_lock(&share_cache->rwlock);
1588690b2a88SDmitry Kozlyuk 	LIST_FOREACH(mpr, &share_cache->mempool_reg_list, next)
1589690b2a88SDmitry Kozlyuk 		if (mpr->mp == mp) {
1590690b2a88SDmitry Kozlyuk 			LIST_REMOVE(mpr, next);
1591690b2a88SDmitry Kozlyuk 			standalone = mlx5_mempool_reg_detach(mpr);
1592690b2a88SDmitry Kozlyuk 			if (standalone)
1593690b2a88SDmitry Kozlyuk 				/*
1594690b2a88SDmitry Kozlyuk 				 * The unlock operation below provides a memory
1595690b2a88SDmitry Kozlyuk 				 * barrier due to its store-release semantics.
1596690b2a88SDmitry Kozlyuk 				 */
1597690b2a88SDmitry Kozlyuk 				++share_cache->dev_gen;
1598690b2a88SDmitry Kozlyuk 			break;
1599690b2a88SDmitry Kozlyuk 		}
1600690b2a88SDmitry Kozlyuk 	rte_rwlock_write_unlock(&share_cache->rwlock);
1601690b2a88SDmitry Kozlyuk 	if (mpr == NULL) {
1602690b2a88SDmitry Kozlyuk 		rte_errno = ENOENT;
1603690b2a88SDmitry Kozlyuk 		return -1;
1604690b2a88SDmitry Kozlyuk 	}
1605690b2a88SDmitry Kozlyuk 	mlx5_mempool_reg_destroy(share_cache, mpr, standalone);
1606690b2a88SDmitry Kozlyuk 	return 0;
1607690b2a88SDmitry Kozlyuk }
1608690b2a88SDmitry Kozlyuk 
1609690b2a88SDmitry Kozlyuk static int
1610690b2a88SDmitry Kozlyuk mlx5_mr_mempool_unregister_secondary(struct mlx5_mr_share_cache *share_cache,
1611690b2a88SDmitry Kozlyuk 				     struct rte_mempool *mp,
1612690b2a88SDmitry Kozlyuk 				     struct mlx5_mp_id *mp_id)
1613690b2a88SDmitry Kozlyuk {
1614690b2a88SDmitry Kozlyuk 	if (mp_id == NULL) {
1615690b2a88SDmitry Kozlyuk 		rte_errno = EINVAL;
1616690b2a88SDmitry Kozlyuk 		return -1;
1617690b2a88SDmitry Kozlyuk 	}
1618690b2a88SDmitry Kozlyuk 	return mlx5_mp_req_mempool_reg(mp_id, share_cache, NULL, mp, false);
1619690b2a88SDmitry Kozlyuk }
1620690b2a88SDmitry Kozlyuk 
1621690b2a88SDmitry Kozlyuk /**
1622690b2a88SDmitry Kozlyuk  * Unregister the memory of a mempool from the protection domain.
1623690b2a88SDmitry Kozlyuk  *
1624690b2a88SDmitry Kozlyuk  * @param share_cache
1625690b2a88SDmitry Kozlyuk  *   Shared MR cache of the protection domain.
1626690b2a88SDmitry Kozlyuk  * @param mp
1627690b2a88SDmitry Kozlyuk  *   Mempool to unregister.
1628690b2a88SDmitry Kozlyuk  * @param mp_id
1629690b2a88SDmitry Kozlyuk  *   Multi-process identifier, may be NULL for the primary process.
1630690b2a88SDmitry Kozlyuk  *
1631690b2a88SDmitry Kozlyuk  * @return
1632690b2a88SDmitry Kozlyuk  *   0 on success, (-1) on failure and rte_errno is set.
1633690b2a88SDmitry Kozlyuk  */
1634690b2a88SDmitry Kozlyuk int
1635690b2a88SDmitry Kozlyuk mlx5_mr_mempool_unregister(struct mlx5_mr_share_cache *share_cache,
1636690b2a88SDmitry Kozlyuk 			   struct rte_mempool *mp, struct mlx5_mp_id *mp_id)
1637690b2a88SDmitry Kozlyuk {
1638*c47d7b90SAndrew Rybchenko 	if (mp->flags & RTE_MEMPOOL_F_NON_IO)
1639690b2a88SDmitry Kozlyuk 		return 0;
1640690b2a88SDmitry Kozlyuk 	switch (rte_eal_process_type()) {
1641690b2a88SDmitry Kozlyuk 	case RTE_PROC_PRIMARY:
1642690b2a88SDmitry Kozlyuk 		return mlx5_mr_mempool_unregister_primary(share_cache, mp);
1643690b2a88SDmitry Kozlyuk 	case RTE_PROC_SECONDARY:
1644690b2a88SDmitry Kozlyuk 		return mlx5_mr_mempool_unregister_secondary(share_cache, mp,
1645690b2a88SDmitry Kozlyuk 							    mp_id);
1646690b2a88SDmitry Kozlyuk 	default:
1647690b2a88SDmitry Kozlyuk 		return -1;
1648690b2a88SDmitry Kozlyuk 	}
1649690b2a88SDmitry Kozlyuk }
1650690b2a88SDmitry Kozlyuk 
1651690b2a88SDmitry Kozlyuk /**
1652690b2a88SDmitry Kozlyuk  * Lookup a MR key by and address in a registered mempool.
1653690b2a88SDmitry Kozlyuk  *
1654690b2a88SDmitry Kozlyuk  * @param mpr
1655690b2a88SDmitry Kozlyuk  *   Mempool registration object.
1656690b2a88SDmitry Kozlyuk  * @param addr
1657690b2a88SDmitry Kozlyuk  *   Address within the mempool.
1658690b2a88SDmitry Kozlyuk  * @param entry
1659690b2a88SDmitry Kozlyuk  *   Bottom-half cache entry to fill.
1660690b2a88SDmitry Kozlyuk  *
1661690b2a88SDmitry Kozlyuk  * @return
1662690b2a88SDmitry Kozlyuk  *   MR key or UINT32_MAX on failure, which can only happen
1663690b2a88SDmitry Kozlyuk  *   if the address is not from within the mempool.
1664690b2a88SDmitry Kozlyuk  */
1665690b2a88SDmitry Kozlyuk static uint32_t
1666690b2a88SDmitry Kozlyuk mlx5_mempool_reg_addr2mr(struct mlx5_mempool_reg *mpr, uintptr_t addr,
1667690b2a88SDmitry Kozlyuk 			 struct mr_cache_entry *entry)
1668690b2a88SDmitry Kozlyuk {
1669690b2a88SDmitry Kozlyuk 	uint32_t lkey = UINT32_MAX;
1670690b2a88SDmitry Kozlyuk 	unsigned int i;
1671690b2a88SDmitry Kozlyuk 
1672690b2a88SDmitry Kozlyuk 	for (i = 0; i < mpr->mrs_n; i++) {
1673690b2a88SDmitry Kozlyuk 		const struct mlx5_pmd_mr *mr = &mpr->mrs[i].pmd_mr;
1674690b2a88SDmitry Kozlyuk 		uintptr_t mr_addr = (uintptr_t)mr->addr;
1675690b2a88SDmitry Kozlyuk 
1676690b2a88SDmitry Kozlyuk 		if (mr_addr <= addr) {
1677690b2a88SDmitry Kozlyuk 			lkey = rte_cpu_to_be_32(mr->lkey);
1678690b2a88SDmitry Kozlyuk 			entry->start = mr_addr;
1679690b2a88SDmitry Kozlyuk 			entry->end = mr_addr + mr->len;
1680690b2a88SDmitry Kozlyuk 			entry->lkey = lkey;
1681690b2a88SDmitry Kozlyuk 			break;
1682690b2a88SDmitry Kozlyuk 		}
1683690b2a88SDmitry Kozlyuk 	}
1684690b2a88SDmitry Kozlyuk 	return lkey;
1685690b2a88SDmitry Kozlyuk }
1686690b2a88SDmitry Kozlyuk 
1687690b2a88SDmitry Kozlyuk /**
1688690b2a88SDmitry Kozlyuk  * Update bottom-half cache from the list of mempool registrations.
1689690b2a88SDmitry Kozlyuk  *
1690690b2a88SDmitry Kozlyuk  * @param share_cache
1691690b2a88SDmitry Kozlyuk  *   Pointer to a global shared MR cache.
1692690b2a88SDmitry Kozlyuk  * @param mr_ctrl
1693690b2a88SDmitry Kozlyuk  *   Per-queue MR control handle.
1694690b2a88SDmitry Kozlyuk  * @param entry
1695690b2a88SDmitry Kozlyuk  *   Pointer to an entry in the bottom-half cache to update
1696690b2a88SDmitry Kozlyuk  *   with the MR lkey looked up.
1697690b2a88SDmitry Kozlyuk  * @param mp
1698690b2a88SDmitry Kozlyuk  *   Mempool containing the address.
1699690b2a88SDmitry Kozlyuk  * @param addr
1700690b2a88SDmitry Kozlyuk  *   Address to lookup.
1701690b2a88SDmitry Kozlyuk  * @return
1702690b2a88SDmitry Kozlyuk  *   MR lkey on success, UINT32_MAX on failure.
1703690b2a88SDmitry Kozlyuk  */
1704690b2a88SDmitry Kozlyuk static uint32_t
1705690b2a88SDmitry Kozlyuk mlx5_lookup_mempool_regs(struct mlx5_mr_share_cache *share_cache,
1706690b2a88SDmitry Kozlyuk 			 struct mlx5_mr_ctrl *mr_ctrl,
1707690b2a88SDmitry Kozlyuk 			 struct mr_cache_entry *entry,
1708690b2a88SDmitry Kozlyuk 			 struct rte_mempool *mp, uintptr_t addr)
1709690b2a88SDmitry Kozlyuk {
1710690b2a88SDmitry Kozlyuk 	struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
1711690b2a88SDmitry Kozlyuk 	struct mlx5_mempool_reg *mpr;
1712690b2a88SDmitry Kozlyuk 	uint32_t lkey = UINT32_MAX;
1713690b2a88SDmitry Kozlyuk 
1714690b2a88SDmitry Kozlyuk 	/* If local cache table is full, try to double it. */
1715690b2a88SDmitry Kozlyuk 	if (unlikely(bt->len == bt->size))
1716690b2a88SDmitry Kozlyuk 		mr_btree_expand(bt, bt->size << 1);
1717690b2a88SDmitry Kozlyuk 	/* Look up in mempool registrations. */
1718690b2a88SDmitry Kozlyuk 	rte_rwlock_read_lock(&share_cache->rwlock);
1719690b2a88SDmitry Kozlyuk 	mpr = mlx5_mempool_reg_lookup(share_cache, mp);
1720690b2a88SDmitry Kozlyuk 	if (mpr != NULL)
1721690b2a88SDmitry Kozlyuk 		lkey = mlx5_mempool_reg_addr2mr(mpr, addr, entry);
1722690b2a88SDmitry Kozlyuk 	rte_rwlock_read_unlock(&share_cache->rwlock);
1723690b2a88SDmitry Kozlyuk 	/*
1724690b2a88SDmitry Kozlyuk 	 * Update local cache. Even if it fails, return the found entry
1725690b2a88SDmitry Kozlyuk 	 * to update top-half cache. Next time, this entry will be found
1726690b2a88SDmitry Kozlyuk 	 * in the global cache.
1727690b2a88SDmitry Kozlyuk 	 */
1728690b2a88SDmitry Kozlyuk 	if (lkey != UINT32_MAX)
1729690b2a88SDmitry Kozlyuk 		mr_btree_insert(bt, entry);
1730690b2a88SDmitry Kozlyuk 	return lkey;
1731690b2a88SDmitry Kozlyuk }
1732690b2a88SDmitry Kozlyuk 
1733690b2a88SDmitry Kozlyuk /**
1734690b2a88SDmitry Kozlyuk  * Bottom-half lookup for the address from the mempool.
1735690b2a88SDmitry Kozlyuk  *
1736690b2a88SDmitry Kozlyuk  * @param share_cache
1737690b2a88SDmitry Kozlyuk  *   Pointer to a global shared MR cache.
1738690b2a88SDmitry Kozlyuk  * @param mr_ctrl
1739690b2a88SDmitry Kozlyuk  *   Per-queue MR control handle.
1740690b2a88SDmitry Kozlyuk  * @param mp
1741690b2a88SDmitry Kozlyuk  *   Mempool containing the address.
1742690b2a88SDmitry Kozlyuk  * @param addr
1743690b2a88SDmitry Kozlyuk  *   Address to lookup.
1744690b2a88SDmitry Kozlyuk  * @return
1745690b2a88SDmitry Kozlyuk  *   MR lkey on success, UINT32_MAX on failure.
1746690b2a88SDmitry Kozlyuk  */
1747690b2a88SDmitry Kozlyuk uint32_t
1748690b2a88SDmitry Kozlyuk mlx5_mr_mempool2mr_bh(struct mlx5_mr_share_cache *share_cache,
1749690b2a88SDmitry Kozlyuk 		      struct mlx5_mr_ctrl *mr_ctrl,
1750690b2a88SDmitry Kozlyuk 		      struct rte_mempool *mp, uintptr_t addr)
1751690b2a88SDmitry Kozlyuk {
1752690b2a88SDmitry Kozlyuk 	struct mr_cache_entry *repl = &mr_ctrl->cache[mr_ctrl->head];
1753690b2a88SDmitry Kozlyuk 	uint32_t lkey;
1754690b2a88SDmitry Kozlyuk 	uint16_t bh_idx = 0;
1755690b2a88SDmitry Kozlyuk 
1756690b2a88SDmitry Kozlyuk 	/* Binary-search MR translation table. */
1757690b2a88SDmitry Kozlyuk 	lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);
1758690b2a88SDmitry Kozlyuk 	/* Update top-half cache. */
1759690b2a88SDmitry Kozlyuk 	if (likely(lkey != UINT32_MAX)) {
1760690b2a88SDmitry Kozlyuk 		*repl = (*mr_ctrl->cache_bh.table)[bh_idx];
1761690b2a88SDmitry Kozlyuk 	} else {
1762690b2a88SDmitry Kozlyuk 		lkey = mlx5_lookup_mempool_regs(share_cache, mr_ctrl, repl,
1763690b2a88SDmitry Kozlyuk 						mp, addr);
1764690b2a88SDmitry Kozlyuk 		/* Can only fail if the address is not from the mempool. */
1765690b2a88SDmitry Kozlyuk 		if (unlikely(lkey == UINT32_MAX))
1766690b2a88SDmitry Kozlyuk 			return UINT32_MAX;
1767690b2a88SDmitry Kozlyuk 	}
1768690b2a88SDmitry Kozlyuk 	/* Update the most recently used entry. */
1769690b2a88SDmitry Kozlyuk 	mr_ctrl->mru = mr_ctrl->head;
1770690b2a88SDmitry Kozlyuk 	/* Point to the next victim, the oldest. */
1771690b2a88SDmitry Kozlyuk 	mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N;
1772690b2a88SDmitry Kozlyuk 	return lkey;
1773690b2a88SDmitry Kozlyuk }
1774