xref: /dpdk/drivers/common/mlx5/mlx5_common_mr.c (revision 7297d2cdecce71aeaf33d915db1659d6dea0bad6)
1b8dc6b0eSVu Pham /* SPDX-License-Identifier: BSD-3-Clause
2b8dc6b0eSVu Pham  * Copyright 2016 6WIND S.A.
3b8dc6b0eSVu Pham  * Copyright 2020 Mellanox Technologies, Ltd
4b8dc6b0eSVu Pham  */
5690b2a88SDmitry Kozlyuk #include <stddef.h>
6690b2a88SDmitry Kozlyuk 
7b8dc6b0eSVu Pham #include <rte_eal_memconfig.h>
8690b2a88SDmitry Kozlyuk #include <rte_eal_paging.h>
9b8dc6b0eSVu Pham #include <rte_errno.h>
10b8dc6b0eSVu Pham #include <rte_mempool.h>
11b8dc6b0eSVu Pham #include <rte_malloc.h>
12b8dc6b0eSVu Pham #include <rte_rwlock.h>
13b8dc6b0eSVu Pham 
14b8dc6b0eSVu Pham #include "mlx5_glue.h"
15fc59a1ecSMichael Baum #include "mlx5_common.h"
16b8dc6b0eSVu Pham #include "mlx5_common_mp.h"
17b8dc6b0eSVu Pham #include "mlx5_common_mr.h"
18fc59a1ecSMichael Baum #include "mlx5_common_os.h"
1925245d5dSShiri Kuzin #include "mlx5_common_log.h"
20fd970a54SSuanming Mou #include "mlx5_malloc.h"
21b8dc6b0eSVu Pham 
22b8dc6b0eSVu Pham struct mr_find_contig_memsegs_data {
23b8dc6b0eSVu Pham 	uintptr_t addr;
24b8dc6b0eSVu Pham 	uintptr_t start;
25b8dc6b0eSVu Pham 	uintptr_t end;
26b8dc6b0eSVu Pham 	const struct rte_memseg_list *msl;
27b8dc6b0eSVu Pham };
28b8dc6b0eSVu Pham 
29690b2a88SDmitry Kozlyuk /* Virtual memory range. */
30690b2a88SDmitry Kozlyuk struct mlx5_range {
31690b2a88SDmitry Kozlyuk 	uintptr_t start;
32690b2a88SDmitry Kozlyuk 	uintptr_t end;
33690b2a88SDmitry Kozlyuk };
34690b2a88SDmitry Kozlyuk 
35690b2a88SDmitry Kozlyuk /** Memory region for a mempool. */
36690b2a88SDmitry Kozlyuk struct mlx5_mempool_mr {
37690b2a88SDmitry Kozlyuk 	struct mlx5_pmd_mr pmd_mr;
38690b2a88SDmitry Kozlyuk 	uint32_t refcnt; /**< Number of mempools sharing this MR. */
39690b2a88SDmitry Kozlyuk };
40690b2a88SDmitry Kozlyuk 
41690b2a88SDmitry Kozlyuk /* Mempool registration. */
42690b2a88SDmitry Kozlyuk struct mlx5_mempool_reg {
43690b2a88SDmitry Kozlyuk 	LIST_ENTRY(mlx5_mempool_reg) next;
44690b2a88SDmitry Kozlyuk 	/** Registered mempool, used to designate registrations. */
45690b2a88SDmitry Kozlyuk 	struct rte_mempool *mp;
46690b2a88SDmitry Kozlyuk 	/** Memory regions for the address ranges of the mempool. */
47690b2a88SDmitry Kozlyuk 	struct mlx5_mempool_mr *mrs;
48690b2a88SDmitry Kozlyuk 	/** Number of memory regions. */
49690b2a88SDmitry Kozlyuk 	unsigned int mrs_n;
50690b2a88SDmitry Kozlyuk };
51690b2a88SDmitry Kozlyuk 
52fc59a1ecSMichael Baum void
53fc59a1ecSMichael Baum mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
54fc59a1ecSMichael Baum {
55fc59a1ecSMichael Baum 	struct mlx5_mprq_buf *buf = opaque;
56fc59a1ecSMichael Baum 
57fc59a1ecSMichael Baum 	if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
58fc59a1ecSMichael Baum 		rte_mempool_put(buf->mp, buf);
59fc59a1ecSMichael Baum 	} else if (unlikely(__atomic_sub_fetch(&buf->refcnt, 1,
60fc59a1ecSMichael Baum 					       __ATOMIC_RELAXED) == 0)) {
61fc59a1ecSMichael Baum 		__atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
62fc59a1ecSMichael Baum 		rte_mempool_put(buf->mp, buf);
63fc59a1ecSMichael Baum 	}
64fc59a1ecSMichael Baum }
65fc59a1ecSMichael Baum 
66b8dc6b0eSVu Pham /**
67b8dc6b0eSVu Pham  * Expand B-tree table to a given size. Can't be called with holding
68b8dc6b0eSVu Pham  * memory_hotplug_lock or share_cache.rwlock due to rte_realloc().
69b8dc6b0eSVu Pham  *
70b8dc6b0eSVu Pham  * @param bt
71b8dc6b0eSVu Pham  *   Pointer to B-tree structure.
72b8dc6b0eSVu Pham  * @param n
73b8dc6b0eSVu Pham  *   Number of entries for expansion.
74b8dc6b0eSVu Pham  *
75b8dc6b0eSVu Pham  * @return
76b8dc6b0eSVu Pham  *   0 on success, -1 on failure.
77b8dc6b0eSVu Pham  */
78b8dc6b0eSVu Pham static int
79b8dc6b0eSVu Pham mr_btree_expand(struct mlx5_mr_btree *bt, int n)
80b8dc6b0eSVu Pham {
81b8dc6b0eSVu Pham 	void *mem;
82b8dc6b0eSVu Pham 	int ret = 0;
83b8dc6b0eSVu Pham 
84b8dc6b0eSVu Pham 	if (n <= bt->size)
85b8dc6b0eSVu Pham 		return ret;
86b8dc6b0eSVu Pham 	/*
87b8dc6b0eSVu Pham 	 * Downside of directly using rte_realloc() is that SOCKET_ID_ANY is
88b8dc6b0eSVu Pham 	 * used inside if there's no room to expand. Because this is a quite
89b8dc6b0eSVu Pham 	 * rare case and a part of very slow path, it is very acceptable.
90b8dc6b0eSVu Pham 	 * Initially cache_bh[] will be given practically enough space and once
91b8dc6b0eSVu Pham 	 * it is expanded, expansion wouldn't be needed again ever.
92b8dc6b0eSVu Pham 	 */
93fd970a54SSuanming Mou 	mem = mlx5_realloc(bt->table, MLX5_MEM_RTE | MLX5_MEM_ZERO,
94fd970a54SSuanming Mou 			   n * sizeof(struct mr_cache_entry), 0, SOCKET_ID_ANY);
95b8dc6b0eSVu Pham 	if (mem == NULL) {
96b8dc6b0eSVu Pham 		/* Not an error, B-tree search will be skipped. */
97b8dc6b0eSVu Pham 		DRV_LOG(WARNING, "failed to expand MR B-tree (%p) table",
98b8dc6b0eSVu Pham 			(void *)bt);
99b8dc6b0eSVu Pham 		ret = -1;
100b8dc6b0eSVu Pham 	} else {
101b8dc6b0eSVu Pham 		DRV_LOG(DEBUG, "expanded MR B-tree table (size=%u)", n);
102b8dc6b0eSVu Pham 		bt->table = mem;
103b8dc6b0eSVu Pham 		bt->size = n;
104b8dc6b0eSVu Pham 	}
105b8dc6b0eSVu Pham 	return ret;
106b8dc6b0eSVu Pham }
107b8dc6b0eSVu Pham 
108b8dc6b0eSVu Pham /**
109b8dc6b0eSVu Pham  * Look up LKey from given B-tree lookup table, store the last index and return
110b8dc6b0eSVu Pham  * searched LKey.
111b8dc6b0eSVu Pham  *
112b8dc6b0eSVu Pham  * @param bt
113b8dc6b0eSVu Pham  *   Pointer to B-tree structure.
114b8dc6b0eSVu Pham  * @param[out] idx
115b8dc6b0eSVu Pham  *   Pointer to index. Even on search failure, returns index where it stops
116b8dc6b0eSVu Pham  *   searching so that index can be used when inserting a new entry.
117b8dc6b0eSVu Pham  * @param addr
118b8dc6b0eSVu Pham  *   Search key.
119b8dc6b0eSVu Pham  *
120b8dc6b0eSVu Pham  * @return
121b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on no match.
122b8dc6b0eSVu Pham  */
123b8dc6b0eSVu Pham static uint32_t
124b8dc6b0eSVu Pham mr_btree_lookup(struct mlx5_mr_btree *bt, uint16_t *idx, uintptr_t addr)
125b8dc6b0eSVu Pham {
126b8dc6b0eSVu Pham 	struct mr_cache_entry *lkp_tbl;
127b8dc6b0eSVu Pham 	uint16_t n;
128b8dc6b0eSVu Pham 	uint16_t base = 0;
129b8dc6b0eSVu Pham 
130b8dc6b0eSVu Pham 	MLX5_ASSERT(bt != NULL);
131b8dc6b0eSVu Pham 	lkp_tbl = *bt->table;
132b8dc6b0eSVu Pham 	n = bt->len;
133b8dc6b0eSVu Pham 	/* First entry must be NULL for comparison. */
134b8dc6b0eSVu Pham 	MLX5_ASSERT(bt->len > 0 || (lkp_tbl[0].start == 0 &&
135b8dc6b0eSVu Pham 				    lkp_tbl[0].lkey == UINT32_MAX));
136b8dc6b0eSVu Pham 	/* Binary search. */
137b8dc6b0eSVu Pham 	do {
138b8dc6b0eSVu Pham 		register uint16_t delta = n >> 1;
139b8dc6b0eSVu Pham 
140b8dc6b0eSVu Pham 		if (addr < lkp_tbl[base + delta].start) {
141b8dc6b0eSVu Pham 			n = delta;
142b8dc6b0eSVu Pham 		} else {
143b8dc6b0eSVu Pham 			base += delta;
144b8dc6b0eSVu Pham 			n -= delta;
145b8dc6b0eSVu Pham 		}
146b8dc6b0eSVu Pham 	} while (n > 1);
147b8dc6b0eSVu Pham 	MLX5_ASSERT(addr >= lkp_tbl[base].start);
148b8dc6b0eSVu Pham 	*idx = base;
149b8dc6b0eSVu Pham 	if (addr < lkp_tbl[base].end)
150b8dc6b0eSVu Pham 		return lkp_tbl[base].lkey;
151b8dc6b0eSVu Pham 	/* Not found. */
152b8dc6b0eSVu Pham 	return UINT32_MAX;
153b8dc6b0eSVu Pham }
154b8dc6b0eSVu Pham 
155b8dc6b0eSVu Pham /**
156b8dc6b0eSVu Pham  * Insert an entry to B-tree lookup table.
157b8dc6b0eSVu Pham  *
158b8dc6b0eSVu Pham  * @param bt
159b8dc6b0eSVu Pham  *   Pointer to B-tree structure.
160b8dc6b0eSVu Pham  * @param entry
161b8dc6b0eSVu Pham  *   Pointer to new entry to insert.
162b8dc6b0eSVu Pham  *
163b8dc6b0eSVu Pham  * @return
164b8dc6b0eSVu Pham  *   0 on success, -1 on failure.
165b8dc6b0eSVu Pham  */
166b8dc6b0eSVu Pham static int
167b8dc6b0eSVu Pham mr_btree_insert(struct mlx5_mr_btree *bt, struct mr_cache_entry *entry)
168b8dc6b0eSVu Pham {
169b8dc6b0eSVu Pham 	struct mr_cache_entry *lkp_tbl;
170b8dc6b0eSVu Pham 	uint16_t idx = 0;
171b8dc6b0eSVu Pham 	size_t shift;
172b8dc6b0eSVu Pham 
173b8dc6b0eSVu Pham 	MLX5_ASSERT(bt != NULL);
174b8dc6b0eSVu Pham 	MLX5_ASSERT(bt->len <= bt->size);
175b8dc6b0eSVu Pham 	MLX5_ASSERT(bt->len > 0);
176b8dc6b0eSVu Pham 	lkp_tbl = *bt->table;
177b8dc6b0eSVu Pham 	/* Find out the slot for insertion. */
178b8dc6b0eSVu Pham 	if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) {
179b8dc6b0eSVu Pham 		DRV_LOG(DEBUG,
180b8dc6b0eSVu Pham 			"abort insertion to B-tree(%p): already exist at"
181b8dc6b0eSVu Pham 			" idx=%u [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
182b8dc6b0eSVu Pham 			(void *)bt, idx, entry->start, entry->end, entry->lkey);
183b8dc6b0eSVu Pham 		/* Already exist, return. */
184b8dc6b0eSVu Pham 		return 0;
185b8dc6b0eSVu Pham 	}
186b8dc6b0eSVu Pham 	/* If table is full, return error. */
187b8dc6b0eSVu Pham 	if (unlikely(bt->len == bt->size)) {
188b8dc6b0eSVu Pham 		bt->overflow = 1;
189b8dc6b0eSVu Pham 		return -1;
190b8dc6b0eSVu Pham 	}
191b8dc6b0eSVu Pham 	/* Insert entry. */
192b8dc6b0eSVu Pham 	++idx;
193b8dc6b0eSVu Pham 	shift = (bt->len - idx) * sizeof(struct mr_cache_entry);
194b8dc6b0eSVu Pham 	if (shift)
195b8dc6b0eSVu Pham 		memmove(&lkp_tbl[idx + 1], &lkp_tbl[idx], shift);
196b8dc6b0eSVu Pham 	lkp_tbl[idx] = *entry;
197b8dc6b0eSVu Pham 	bt->len++;
198b8dc6b0eSVu Pham 	DRV_LOG(DEBUG,
199b8dc6b0eSVu Pham 		"inserted B-tree(%p)[%u],"
200b8dc6b0eSVu Pham 		" [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
201b8dc6b0eSVu Pham 		(void *)bt, idx, entry->start, entry->end, entry->lkey);
202b8dc6b0eSVu Pham 	return 0;
203b8dc6b0eSVu Pham }
204b8dc6b0eSVu Pham 
205b8dc6b0eSVu Pham /**
206b8dc6b0eSVu Pham  * Initialize B-tree and allocate memory for lookup table.
207b8dc6b0eSVu Pham  *
208b8dc6b0eSVu Pham  * @param bt
209b8dc6b0eSVu Pham  *   Pointer to B-tree structure.
210b8dc6b0eSVu Pham  * @param n
211b8dc6b0eSVu Pham  *   Number of entries to allocate.
212b8dc6b0eSVu Pham  * @param socket
213b8dc6b0eSVu Pham  *   NUMA socket on which memory must be allocated.
214b8dc6b0eSVu Pham  *
215b8dc6b0eSVu Pham  * @return
216b8dc6b0eSVu Pham  *   0 on success, a negative errno value otherwise and rte_errno is set.
217b8dc6b0eSVu Pham  */
2185fbc75acSMichael Baum static int
219b8dc6b0eSVu Pham mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket)
220b8dc6b0eSVu Pham {
221b8dc6b0eSVu Pham 	if (bt == NULL) {
222b8dc6b0eSVu Pham 		rte_errno = EINVAL;
223b8dc6b0eSVu Pham 		return -rte_errno;
224b8dc6b0eSVu Pham 	}
225b8dc6b0eSVu Pham 	MLX5_ASSERT(!bt->table && !bt->size);
226b8dc6b0eSVu Pham 	memset(bt, 0, sizeof(*bt));
227fd970a54SSuanming Mou 	bt->table = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
228fd970a54SSuanming Mou 				sizeof(struct mr_cache_entry) * n,
229b8dc6b0eSVu Pham 				0, socket);
230b8dc6b0eSVu Pham 	if (bt->table == NULL) {
231b8dc6b0eSVu Pham 		rte_errno = ENOMEM;
23287acdcc7SThomas Monjalon 		DRV_LOG(DEBUG,
23387acdcc7SThomas Monjalon 			"failed to allocate memory for btree cache on socket "
23487acdcc7SThomas Monjalon 			"%d", socket);
235b8dc6b0eSVu Pham 		return -rte_errno;
236b8dc6b0eSVu Pham 	}
237b8dc6b0eSVu Pham 	bt->size = n;
238b8dc6b0eSVu Pham 	/* First entry must be NULL for binary search. */
239b8dc6b0eSVu Pham 	(*bt->table)[bt->len++] = (struct mr_cache_entry) {
240b8dc6b0eSVu Pham 		.lkey = UINT32_MAX,
241b8dc6b0eSVu Pham 	};
24287acdcc7SThomas Monjalon 	DRV_LOG(DEBUG, "initialized B-tree %p with table %p",
243b8dc6b0eSVu Pham 	      (void *)bt, (void *)bt->table);
244b8dc6b0eSVu Pham 	return 0;
245b8dc6b0eSVu Pham }
246b8dc6b0eSVu Pham 
247b8dc6b0eSVu Pham /**
248b8dc6b0eSVu Pham  * Free B-tree resources.
249b8dc6b0eSVu Pham  *
250b8dc6b0eSVu Pham  * @param bt
251b8dc6b0eSVu Pham  *   Pointer to B-tree structure.
252b8dc6b0eSVu Pham  */
253b8dc6b0eSVu Pham void
254b8dc6b0eSVu Pham mlx5_mr_btree_free(struct mlx5_mr_btree *bt)
255b8dc6b0eSVu Pham {
256b8dc6b0eSVu Pham 	if (bt == NULL)
257b8dc6b0eSVu Pham 		return;
25887acdcc7SThomas Monjalon 	DRV_LOG(DEBUG, "freeing B-tree %p with table %p",
259b8dc6b0eSVu Pham 	      (void *)bt, (void *)bt->table);
260fd970a54SSuanming Mou 	mlx5_free(bt->table);
261b8dc6b0eSVu Pham 	memset(bt, 0, sizeof(*bt));
262b8dc6b0eSVu Pham }
263b8dc6b0eSVu Pham 
264b8dc6b0eSVu Pham /**
265b8dc6b0eSVu Pham  * Dump all the entries in a B-tree
266b8dc6b0eSVu Pham  *
267b8dc6b0eSVu Pham  * @param bt
268b8dc6b0eSVu Pham  *   Pointer to B-tree structure.
269b8dc6b0eSVu Pham  */
270b8dc6b0eSVu Pham void
271b8dc6b0eSVu Pham mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused)
272b8dc6b0eSVu Pham {
273b8dc6b0eSVu Pham #ifdef RTE_LIBRTE_MLX5_DEBUG
274b8dc6b0eSVu Pham 	int idx;
275b8dc6b0eSVu Pham 	struct mr_cache_entry *lkp_tbl;
276b8dc6b0eSVu Pham 
277b8dc6b0eSVu Pham 	if (bt == NULL)
278b8dc6b0eSVu Pham 		return;
279b8dc6b0eSVu Pham 	lkp_tbl = *bt->table;
280b8dc6b0eSVu Pham 	for (idx = 0; idx < bt->len; ++idx) {
281b8dc6b0eSVu Pham 		struct mr_cache_entry *entry = &lkp_tbl[idx];
282b8dc6b0eSVu Pham 
28387acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "B-tree(%p)[%u],"
284b8dc6b0eSVu Pham 		      " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
285b8dc6b0eSVu Pham 		      (void *)bt, idx, entry->start, entry->end, entry->lkey);
286b8dc6b0eSVu Pham 	}
287b8dc6b0eSVu Pham #endif
288b8dc6b0eSVu Pham }
289b8dc6b0eSVu Pham 
290b8dc6b0eSVu Pham /**
29185c7005eSMichael Baum  * Initialize per-queue MR control descriptor.
29285c7005eSMichael Baum  *
29385c7005eSMichael Baum  * @param mr_ctrl
29485c7005eSMichael Baum  *   Pointer to MR control structure.
295334ed198SMichael Baum  * @param cdev
296334ed198SMichael Baum  *   Pointer to the mlx5 device structure.
29785c7005eSMichael Baum  * @param socket
29885c7005eSMichael Baum  *   NUMA socket on which memory must be allocated.
29985c7005eSMichael Baum  *
30085c7005eSMichael Baum  * @return
30185c7005eSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
30285c7005eSMichael Baum  */
30385c7005eSMichael Baum int
304334ed198SMichael Baum mlx5_mr_ctrl_init(struct mlx5_mr_ctrl *mr_ctrl, struct mlx5_common_device *cdev,
30585c7005eSMichael Baum 		  int socket)
30685c7005eSMichael Baum {
30785c7005eSMichael Baum 	if (mr_ctrl == NULL) {
30885c7005eSMichael Baum 		rte_errno = EINVAL;
30985c7005eSMichael Baum 		return -rte_errno;
31085c7005eSMichael Baum 	}
311334ed198SMichael Baum 	mr_ctrl->cdev = cdev;
31285c7005eSMichael Baum 	/* Save pointer of global generation number to check memory event. */
313334ed198SMichael Baum 	mr_ctrl->dev_gen_ptr = &cdev->mr_scache.dev_gen;
31485c7005eSMichael Baum 	/* Initialize B-tree and allocate memory for bottom-half cache table. */
31585c7005eSMichael Baum 	return mlx5_mr_btree_init(&mr_ctrl->cache_bh, MLX5_MR_BTREE_CACHE_N,
31685c7005eSMichael Baum 				  socket);
31785c7005eSMichael Baum }
31885c7005eSMichael Baum 
31985c7005eSMichael Baum /**
320b8dc6b0eSVu Pham  * Find virtually contiguous memory chunk in a given MR.
321b8dc6b0eSVu Pham  *
322b8dc6b0eSVu Pham  * @param dev
323b8dc6b0eSVu Pham  *   Pointer to MR structure.
324b8dc6b0eSVu Pham  * @param[out] entry
325b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry. If not found, this will not be
326b8dc6b0eSVu Pham  *   updated.
327b8dc6b0eSVu Pham  * @param start_idx
328b8dc6b0eSVu Pham  *   Start index of the memseg bitmap.
329b8dc6b0eSVu Pham  *
330b8dc6b0eSVu Pham  * @return
331b8dc6b0eSVu Pham  *   Next index to go on lookup.
332b8dc6b0eSVu Pham  */
333b8dc6b0eSVu Pham static int
334b8dc6b0eSVu Pham mr_find_next_chunk(struct mlx5_mr *mr, struct mr_cache_entry *entry,
335b8dc6b0eSVu Pham 		   int base_idx)
336b8dc6b0eSVu Pham {
337b8dc6b0eSVu Pham 	uintptr_t start = 0;
338b8dc6b0eSVu Pham 	uintptr_t end = 0;
339b8dc6b0eSVu Pham 	uint32_t idx = 0;
340b8dc6b0eSVu Pham 
341b8dc6b0eSVu Pham 	/* MR for external memory doesn't have memseg list. */
342b8dc6b0eSVu Pham 	if (mr->msl == NULL) {
343b8dc6b0eSVu Pham 		MLX5_ASSERT(mr->ms_bmp_n == 1);
344b8dc6b0eSVu Pham 		MLX5_ASSERT(mr->ms_n == 1);
345b8dc6b0eSVu Pham 		MLX5_ASSERT(base_idx == 0);
346b8dc6b0eSVu Pham 		/*
347b8dc6b0eSVu Pham 		 * Can't search it from memseg list but get it directly from
34856d20677SOphir Munk 		 * pmd_mr as there's only one chunk.
349b8dc6b0eSVu Pham 		 */
35056d20677SOphir Munk 		entry->start = (uintptr_t)mr->pmd_mr.addr;
35156d20677SOphir Munk 		entry->end = (uintptr_t)mr->pmd_mr.addr + mr->pmd_mr.len;
35256d20677SOphir Munk 		entry->lkey = rte_cpu_to_be_32(mr->pmd_mr.lkey);
353b8dc6b0eSVu Pham 		/* Returning 1 ends iteration. */
354b8dc6b0eSVu Pham 		return 1;
355b8dc6b0eSVu Pham 	}
356b8dc6b0eSVu Pham 	for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) {
357b8dc6b0eSVu Pham 		if (rte_bitmap_get(mr->ms_bmp, idx)) {
358b8dc6b0eSVu Pham 			const struct rte_memseg_list *msl;
359b8dc6b0eSVu Pham 			const struct rte_memseg *ms;
360b8dc6b0eSVu Pham 
361b8dc6b0eSVu Pham 			msl = mr->msl;
362b8dc6b0eSVu Pham 			ms = rte_fbarray_get(&msl->memseg_arr,
363b8dc6b0eSVu Pham 					     mr->ms_base_idx + idx);
364b8dc6b0eSVu Pham 			MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
365b8dc6b0eSVu Pham 			if (!start)
366b8dc6b0eSVu Pham 				start = ms->addr_64;
367b8dc6b0eSVu Pham 			end = ms->addr_64 + ms->hugepage_sz;
368b8dc6b0eSVu Pham 		} else if (start) {
369b8dc6b0eSVu Pham 			/* Passed the end of a fragment. */
370b8dc6b0eSVu Pham 			break;
371b8dc6b0eSVu Pham 		}
372b8dc6b0eSVu Pham 	}
373b8dc6b0eSVu Pham 	if (start) {
374b8dc6b0eSVu Pham 		/* Found one chunk. */
375b8dc6b0eSVu Pham 		entry->start = start;
376b8dc6b0eSVu Pham 		entry->end = end;
37756d20677SOphir Munk 		entry->lkey = rte_cpu_to_be_32(mr->pmd_mr.lkey);
378b8dc6b0eSVu Pham 	}
379b8dc6b0eSVu Pham 	return idx;
380b8dc6b0eSVu Pham }
381b8dc6b0eSVu Pham 
382b8dc6b0eSVu Pham /**
383b8dc6b0eSVu Pham  * Insert a MR to the global B-tree cache. It may fail due to low-on-memory.
384b8dc6b0eSVu Pham  * Then, this entry will have to be searched by mr_lookup_list() in
385b8dc6b0eSVu Pham  * mlx5_mr_create() on miss.
386b8dc6b0eSVu Pham  *
387b8dc6b0eSVu Pham  * @param share_cache
388b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
389b8dc6b0eSVu Pham  * @param mr
390b8dc6b0eSVu Pham  *   Pointer to MR to insert.
391b8dc6b0eSVu Pham  *
392b8dc6b0eSVu Pham  * @return
393b8dc6b0eSVu Pham  *   0 on success, -1 on failure.
394b8dc6b0eSVu Pham  */
395b8dc6b0eSVu Pham int
396b8dc6b0eSVu Pham mlx5_mr_insert_cache(struct mlx5_mr_share_cache *share_cache,
397b8dc6b0eSVu Pham 		     struct mlx5_mr *mr)
398b8dc6b0eSVu Pham {
399b8dc6b0eSVu Pham 	unsigned int n;
400b8dc6b0eSVu Pham 
401b8dc6b0eSVu Pham 	DRV_LOG(DEBUG, "Inserting MR(%p) to global cache(%p)",
402b8dc6b0eSVu Pham 		(void *)mr, (void *)share_cache);
403b8dc6b0eSVu Pham 	for (n = 0; n < mr->ms_bmp_n; ) {
404b8dc6b0eSVu Pham 		struct mr_cache_entry entry;
405b8dc6b0eSVu Pham 
406b8dc6b0eSVu Pham 		memset(&entry, 0, sizeof(entry));
407b8dc6b0eSVu Pham 		/* Find a contiguous chunk and advance the index. */
408b8dc6b0eSVu Pham 		n = mr_find_next_chunk(mr, &entry, n);
409b8dc6b0eSVu Pham 		if (!entry.end)
410b8dc6b0eSVu Pham 			break;
411b8dc6b0eSVu Pham 		if (mr_btree_insert(&share_cache->cache, &entry) < 0) {
412b8dc6b0eSVu Pham 			/*
413b8dc6b0eSVu Pham 			 * Overflowed, but the global table cannot be expanded
414b8dc6b0eSVu Pham 			 * because of deadlock.
415b8dc6b0eSVu Pham 			 */
416b8dc6b0eSVu Pham 			return -1;
417b8dc6b0eSVu Pham 		}
418b8dc6b0eSVu Pham 	}
419b8dc6b0eSVu Pham 	return 0;
420b8dc6b0eSVu Pham }
421b8dc6b0eSVu Pham 
422b8dc6b0eSVu Pham /**
423b8dc6b0eSVu Pham  * Look up address in the original global MR list.
424b8dc6b0eSVu Pham  *
425b8dc6b0eSVu Pham  * @param share_cache
426b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
427b8dc6b0eSVu Pham  * @param[out] entry
428b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry. If no match, this will not be updated.
429b8dc6b0eSVu Pham  * @param addr
430b8dc6b0eSVu Pham  *   Search key.
431b8dc6b0eSVu Pham  *
432b8dc6b0eSVu Pham  * @return
433b8dc6b0eSVu Pham  *   Found MR on match, NULL otherwise.
434b8dc6b0eSVu Pham  */
435b8dc6b0eSVu Pham struct mlx5_mr *
436b8dc6b0eSVu Pham mlx5_mr_lookup_list(struct mlx5_mr_share_cache *share_cache,
437b8dc6b0eSVu Pham 		    struct mr_cache_entry *entry, uintptr_t addr)
438b8dc6b0eSVu Pham {
439b8dc6b0eSVu Pham 	struct mlx5_mr *mr;
440b8dc6b0eSVu Pham 
441b8dc6b0eSVu Pham 	/* Iterate all the existing MRs. */
442b8dc6b0eSVu Pham 	LIST_FOREACH(mr, &share_cache->mr_list, mr) {
443b8dc6b0eSVu Pham 		unsigned int n;
444b8dc6b0eSVu Pham 
445b8dc6b0eSVu Pham 		if (mr->ms_n == 0)
446b8dc6b0eSVu Pham 			continue;
447b8dc6b0eSVu Pham 		for (n = 0; n < mr->ms_bmp_n; ) {
448b8dc6b0eSVu Pham 			struct mr_cache_entry ret;
449b8dc6b0eSVu Pham 
450b8dc6b0eSVu Pham 			memset(&ret, 0, sizeof(ret));
451b8dc6b0eSVu Pham 			n = mr_find_next_chunk(mr, &ret, n);
452b8dc6b0eSVu Pham 			if (addr >= ret.start && addr < ret.end) {
453b8dc6b0eSVu Pham 				/* Found. */
454b8dc6b0eSVu Pham 				*entry = ret;
455b8dc6b0eSVu Pham 				return mr;
456b8dc6b0eSVu Pham 			}
457b8dc6b0eSVu Pham 		}
458b8dc6b0eSVu Pham 	}
459b8dc6b0eSVu Pham 	return NULL;
460b8dc6b0eSVu Pham }
461b8dc6b0eSVu Pham 
462b8dc6b0eSVu Pham /**
463b8dc6b0eSVu Pham  * Look up address on global MR cache.
464b8dc6b0eSVu Pham  *
465b8dc6b0eSVu Pham  * @param share_cache
466b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
467b8dc6b0eSVu Pham  * @param[out] entry
468b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry. If no match, this will not be updated.
469b8dc6b0eSVu Pham  * @param addr
470b8dc6b0eSVu Pham  *   Search key.
471b8dc6b0eSVu Pham  *
472b8dc6b0eSVu Pham  * @return
473b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
474b8dc6b0eSVu Pham  */
475a5d06c90SMichael Baum static uint32_t
476b8dc6b0eSVu Pham mlx5_mr_lookup_cache(struct mlx5_mr_share_cache *share_cache,
477b8dc6b0eSVu Pham 		     struct mr_cache_entry *entry, uintptr_t addr)
478b8dc6b0eSVu Pham {
479b8dc6b0eSVu Pham 	uint16_t idx;
480b8dc6b0eSVu Pham 	uint32_t lkey = UINT32_MAX;
481b8dc6b0eSVu Pham 	struct mlx5_mr *mr;
482b8dc6b0eSVu Pham 
483b8dc6b0eSVu Pham 	/*
484b8dc6b0eSVu Pham 	 * If the global cache has overflowed since it failed to expand the
485b8dc6b0eSVu Pham 	 * B-tree table, it can't have all the existing MRs. Then, the address
486b8dc6b0eSVu Pham 	 * has to be searched by traversing the original MR list instead, which
487b8dc6b0eSVu Pham 	 * is very slow path. Otherwise, the global cache is all inclusive.
488b8dc6b0eSVu Pham 	 */
489b8dc6b0eSVu Pham 	if (!unlikely(share_cache->cache.overflow)) {
490b8dc6b0eSVu Pham 		lkey = mr_btree_lookup(&share_cache->cache, &idx, addr);
491b8dc6b0eSVu Pham 		if (lkey != UINT32_MAX)
492b8dc6b0eSVu Pham 			*entry = (*share_cache->cache.table)[idx];
493b8dc6b0eSVu Pham 	} else {
494b8dc6b0eSVu Pham 		/* Falling back to the slowest path. */
495b8dc6b0eSVu Pham 		mr = mlx5_mr_lookup_list(share_cache, entry, addr);
496b8dc6b0eSVu Pham 		if (mr != NULL)
497b8dc6b0eSVu Pham 			lkey = entry->lkey;
498b8dc6b0eSVu Pham 	}
499b8dc6b0eSVu Pham 	MLX5_ASSERT(lkey == UINT32_MAX || (addr >= entry->start &&
500b8dc6b0eSVu Pham 					   addr < entry->end));
501b8dc6b0eSVu Pham 	return lkey;
502b8dc6b0eSVu Pham }
503b8dc6b0eSVu Pham 
504b8dc6b0eSVu Pham /**
505b8dc6b0eSVu Pham  * Free MR resources. MR lock must not be held to avoid a deadlock. rte_free()
506b8dc6b0eSVu Pham  * can raise memory free event and the callback function will spin on the lock.
507b8dc6b0eSVu Pham  *
508b8dc6b0eSVu Pham  * @param mr
509b8dc6b0eSVu Pham  *   Pointer to MR to free.
510b8dc6b0eSVu Pham  */
511992e6df3SJiawei Wang void
512992e6df3SJiawei Wang mlx5_mr_free(struct mlx5_mr *mr, mlx5_dereg_mr_t dereg_mr_cb)
513b8dc6b0eSVu Pham {
514b8dc6b0eSVu Pham 	if (mr == NULL)
515b8dc6b0eSVu Pham 		return;
516b8dc6b0eSVu Pham 	DRV_LOG(DEBUG, "freeing MR(%p):", (void *)mr);
517d5ed8aa9SOphir Munk 	dereg_mr_cb(&mr->pmd_mr);
518b8dc6b0eSVu Pham 	if (mr->ms_bmp != NULL)
519b8dc6b0eSVu Pham 		rte_bitmap_free(mr->ms_bmp);
520fd970a54SSuanming Mou 	mlx5_free(mr);
521b8dc6b0eSVu Pham }
522b8dc6b0eSVu Pham 
523b8dc6b0eSVu Pham void
524b8dc6b0eSVu Pham mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache)
525b8dc6b0eSVu Pham {
526b8dc6b0eSVu Pham 	struct mlx5_mr *mr;
527b8dc6b0eSVu Pham 
528b8dc6b0eSVu Pham 	DRV_LOG(DEBUG, "Rebuild dev cache[] %p", (void *)share_cache);
529b8dc6b0eSVu Pham 	/* Flush cache to rebuild. */
530b8dc6b0eSVu Pham 	share_cache->cache.len = 1;
531b8dc6b0eSVu Pham 	share_cache->cache.overflow = 0;
532b8dc6b0eSVu Pham 	/* Iterate all the existing MRs. */
533b8dc6b0eSVu Pham 	LIST_FOREACH(mr, &share_cache->mr_list, mr)
534b8dc6b0eSVu Pham 		if (mlx5_mr_insert_cache(share_cache, mr) < 0)
535b8dc6b0eSVu Pham 			return;
536b8dc6b0eSVu Pham }
537b8dc6b0eSVu Pham 
538b8dc6b0eSVu Pham /**
539b8dc6b0eSVu Pham  * Release resources of detached MR having no online entry.
540b8dc6b0eSVu Pham  *
541b8dc6b0eSVu Pham  * @param share_cache
542b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
543b8dc6b0eSVu Pham  */
544b8dc6b0eSVu Pham static void
545b8dc6b0eSVu Pham mlx5_mr_garbage_collect(struct mlx5_mr_share_cache *share_cache)
546b8dc6b0eSVu Pham {
547b8dc6b0eSVu Pham 	struct mlx5_mr *mr_next;
548b8dc6b0eSVu Pham 	struct mlx5_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
549b8dc6b0eSVu Pham 
550b8dc6b0eSVu Pham 	/* Must be called from the primary process. */
551b8dc6b0eSVu Pham 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
552b8dc6b0eSVu Pham 	/*
553b8dc6b0eSVu Pham 	 * MR can't be freed with holding the lock because rte_free() could call
554b8dc6b0eSVu Pham 	 * memory free callback function. This will be a deadlock situation.
555b8dc6b0eSVu Pham 	 */
556b8dc6b0eSVu Pham 	rte_rwlock_write_lock(&share_cache->rwlock);
557b8dc6b0eSVu Pham 	/* Detach the whole free list and release it after unlocking. */
558b8dc6b0eSVu Pham 	free_list = share_cache->mr_free_list;
559b8dc6b0eSVu Pham 	LIST_INIT(&share_cache->mr_free_list);
560b8dc6b0eSVu Pham 	rte_rwlock_write_unlock(&share_cache->rwlock);
561b8dc6b0eSVu Pham 	/* Release resources. */
562b8dc6b0eSVu Pham 	mr_next = LIST_FIRST(&free_list);
563b8dc6b0eSVu Pham 	while (mr_next != NULL) {
564b8dc6b0eSVu Pham 		struct mlx5_mr *mr = mr_next;
565b8dc6b0eSVu Pham 
566b8dc6b0eSVu Pham 		mr_next = LIST_NEXT(mr, mr);
567992e6df3SJiawei Wang 		mlx5_mr_free(mr, share_cache->dereg_mr_cb);
568b8dc6b0eSVu Pham 	}
569b8dc6b0eSVu Pham }
570b8dc6b0eSVu Pham 
571b8dc6b0eSVu Pham /* Called during rte_memseg_contig_walk() by mlx5_mr_create(). */
572b8dc6b0eSVu Pham static int
573b8dc6b0eSVu Pham mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl,
574b8dc6b0eSVu Pham 			  const struct rte_memseg *ms, size_t len, void *arg)
575b8dc6b0eSVu Pham {
576b8dc6b0eSVu Pham 	struct mr_find_contig_memsegs_data *data = arg;
577b8dc6b0eSVu Pham 
578b8dc6b0eSVu Pham 	if (data->addr < ms->addr_64 || data->addr >= ms->addr_64 + len)
579b8dc6b0eSVu Pham 		return 0;
580b8dc6b0eSVu Pham 	/* Found, save it and stop walking. */
581b8dc6b0eSVu Pham 	data->start = ms->addr_64;
582b8dc6b0eSVu Pham 	data->end = ms->addr_64 + len;
583b8dc6b0eSVu Pham 	data->msl = msl;
584b8dc6b0eSVu Pham 	return 1;
585b8dc6b0eSVu Pham }
586b8dc6b0eSVu Pham 
587b8dc6b0eSVu Pham /**
588b8dc6b0eSVu Pham  * Create a new global Memory Region (MR) for a missing virtual address.
589b8dc6b0eSVu Pham  * This API should be called on a secondary process, then a request is sent to
590b8dc6b0eSVu Pham  * the primary process in order to create a MR for the address. As the global MR
591b8dc6b0eSVu Pham  * list is on the shared memory, following LKey lookup should succeed unless the
592b8dc6b0eSVu Pham  * request fails.
593b8dc6b0eSVu Pham  *
59420489176SMichael Baum  * @param cdev
59520489176SMichael Baum  *   Pointer to the mlx5 common device.
596b8dc6b0eSVu Pham  * @param share_cache
597b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
598b8dc6b0eSVu Pham  * @param[out] entry
599b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry, found in the global cache or newly
600b8dc6b0eSVu Pham  *   created. If failed to create one, this will not be updated.
601b8dc6b0eSVu Pham  * @param addr
602b8dc6b0eSVu Pham  *   Target virtual address to register.
603b8dc6b0eSVu Pham  *
604b8dc6b0eSVu Pham  * @return
605b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
606b8dc6b0eSVu Pham  */
607b8dc6b0eSVu Pham static uint32_t
60820489176SMichael Baum mlx5_mr_create_secondary(struct mlx5_common_device *cdev,
609b8dc6b0eSVu Pham 			 struct mlx5_mr_share_cache *share_cache,
61020489176SMichael Baum 			 struct mr_cache_entry *entry, uintptr_t addr)
611b8dc6b0eSVu Pham {
612b8dc6b0eSVu Pham 	int ret;
613b8dc6b0eSVu Pham 
61420489176SMichael Baum 	DRV_LOG(DEBUG, "Requesting MR creation for address (%p)", (void *)addr);
61520489176SMichael Baum 	ret = mlx5_mp_req_mr_create(cdev, addr);
616b8dc6b0eSVu Pham 	if (ret) {
61787acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "Fail to request MR creation for address (%p)",
618b8dc6b0eSVu Pham 			(void *)addr);
619b8dc6b0eSVu Pham 		return UINT32_MAX;
620b8dc6b0eSVu Pham 	}
621b8dc6b0eSVu Pham 	rte_rwlock_read_lock(&share_cache->rwlock);
622b8dc6b0eSVu Pham 	/* Fill in output data. */
623b8dc6b0eSVu Pham 	mlx5_mr_lookup_cache(share_cache, entry, addr);
624b8dc6b0eSVu Pham 	/* Lookup can't fail. */
625b8dc6b0eSVu Pham 	MLX5_ASSERT(entry->lkey != UINT32_MAX);
626b8dc6b0eSVu Pham 	rte_rwlock_read_unlock(&share_cache->rwlock);
62787acdcc7SThomas Monjalon 	DRV_LOG(DEBUG, "MR CREATED by primary process for %p:\n"
628b8dc6b0eSVu Pham 		"  [0x%" PRIxPTR ", 0x%" PRIxPTR "), lkey=0x%x",
629b8dc6b0eSVu Pham 		(void *)addr, entry->start, entry->end, entry->lkey);
630b8dc6b0eSVu Pham 	return entry->lkey;
631b8dc6b0eSVu Pham }
632b8dc6b0eSVu Pham 
633b8dc6b0eSVu Pham /**
634b8dc6b0eSVu Pham  * Create a new global Memory Region (MR) for a missing virtual address.
635b8dc6b0eSVu Pham  * Register entire virtually contiguous memory chunk around the address.
636b8dc6b0eSVu Pham  *
637b8dc6b0eSVu Pham  * @param pd
638c4685016SOphir Munk  *   Pointer to pd of a device (net, regex, vdpa,...).
639b8dc6b0eSVu Pham  * @param share_cache
640b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
641b8dc6b0eSVu Pham  * @param[out] entry
642b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry, found in the global cache or newly
643b8dc6b0eSVu Pham  *   created. If failed to create one, this will not be updated.
644b8dc6b0eSVu Pham  * @param addr
645b8dc6b0eSVu Pham  *   Target virtual address to register.
646b8dc6b0eSVu Pham  * @param mr_ext_memseg_en
647b8dc6b0eSVu Pham  *   Configurable flag about external memory segment enable or not.
648b8dc6b0eSVu Pham  *
649b8dc6b0eSVu Pham  * @return
650b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
651b8dc6b0eSVu Pham  */
65220489176SMichael Baum static uint32_t
653c4685016SOphir Munk mlx5_mr_create_primary(void *pd,
654b8dc6b0eSVu Pham 		       struct mlx5_mr_share_cache *share_cache,
655b8dc6b0eSVu Pham 		       struct mr_cache_entry *entry, uintptr_t addr,
656b8dc6b0eSVu Pham 		       unsigned int mr_ext_memseg_en)
657b8dc6b0eSVu Pham {
658b8dc6b0eSVu Pham 	struct mr_find_contig_memsegs_data data = {.addr = addr, };
659b8dc6b0eSVu Pham 	struct mr_find_contig_memsegs_data data_re;
660b8dc6b0eSVu Pham 	const struct rte_memseg_list *msl;
661b8dc6b0eSVu Pham 	const struct rte_memseg *ms;
662b8dc6b0eSVu Pham 	struct mlx5_mr *mr = NULL;
663b8dc6b0eSVu Pham 	int ms_idx_shift = -1;
664b8dc6b0eSVu Pham 	uint32_t bmp_size;
665b8dc6b0eSVu Pham 	void *bmp_mem;
666b8dc6b0eSVu Pham 	uint32_t ms_n;
667b8dc6b0eSVu Pham 	uint32_t n;
668b8dc6b0eSVu Pham 	size_t len;
669b8dc6b0eSVu Pham 
670b8dc6b0eSVu Pham 	DRV_LOG(DEBUG, "Creating a MR using address (%p)", (void *)addr);
671b8dc6b0eSVu Pham 	/*
672b8dc6b0eSVu Pham 	 * Release detached MRs if any. This can't be called with holding either
673b8dc6b0eSVu Pham 	 * memory_hotplug_lock or share_cache->rwlock. MRs on the free list have
674b8dc6b0eSVu Pham 	 * been detached by the memory free event but it couldn't be released
675b8dc6b0eSVu Pham 	 * inside the callback due to deadlock. As a result, releasing resources
676b8dc6b0eSVu Pham 	 * is quite opportunistic.
677b8dc6b0eSVu Pham 	 */
678b8dc6b0eSVu Pham 	mlx5_mr_garbage_collect(share_cache);
679b8dc6b0eSVu Pham 	/*
680b8dc6b0eSVu Pham 	 * If enabled, find out a contiguous virtual address chunk in use, to
681b8dc6b0eSVu Pham 	 * which the given address belongs, in order to register maximum range.
682b8dc6b0eSVu Pham 	 * In the best case where mempools are not dynamically recreated and
683b8dc6b0eSVu Pham 	 * '--socket-mem' is specified as an EAL option, it is very likely to
684b8dc6b0eSVu Pham 	 * have only one MR(LKey) per a socket and per a hugepage-size even
685b8dc6b0eSVu Pham 	 * though the system memory is highly fragmented. As the whole memory
686b8dc6b0eSVu Pham 	 * chunk will be pinned by kernel, it can't be reused unless entire
687b8dc6b0eSVu Pham 	 * chunk is freed from EAL.
688b8dc6b0eSVu Pham 	 *
689b8dc6b0eSVu Pham 	 * If disabled, just register one memseg (page). Then, memory
690b8dc6b0eSVu Pham 	 * consumption will be minimized but it may drop performance if there
691b8dc6b0eSVu Pham 	 * are many MRs to lookup on the datapath.
692b8dc6b0eSVu Pham 	 */
693b8dc6b0eSVu Pham 	if (!mr_ext_memseg_en) {
694b8dc6b0eSVu Pham 		data.msl = rte_mem_virt2memseg_list((void *)addr);
695b8dc6b0eSVu Pham 		data.start = RTE_ALIGN_FLOOR(addr, data.msl->page_sz);
696b8dc6b0eSVu Pham 		data.end = data.start + data.msl->page_sz;
697b8dc6b0eSVu Pham 	} else if (!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data)) {
698b8dc6b0eSVu Pham 		DRV_LOG(WARNING,
699b8dc6b0eSVu Pham 			"Unable to find virtually contiguous"
700b8dc6b0eSVu Pham 			" chunk for address (%p)."
701b8dc6b0eSVu Pham 			" rte_memseg_contig_walk() failed.", (void *)addr);
702b8dc6b0eSVu Pham 		rte_errno = ENXIO;
703b8dc6b0eSVu Pham 		goto err_nolock;
704b8dc6b0eSVu Pham 	}
705b8dc6b0eSVu Pham alloc_resources:
706b8dc6b0eSVu Pham 	/* Addresses must be page-aligned. */
707b8dc6b0eSVu Pham 	MLX5_ASSERT(data.msl);
708b8dc6b0eSVu Pham 	MLX5_ASSERT(rte_is_aligned((void *)data.start, data.msl->page_sz));
709b8dc6b0eSVu Pham 	MLX5_ASSERT(rte_is_aligned((void *)data.end, data.msl->page_sz));
710b8dc6b0eSVu Pham 	msl = data.msl;
711b8dc6b0eSVu Pham 	ms = rte_mem_virt2memseg((void *)data.start, msl);
712b8dc6b0eSVu Pham 	len = data.end - data.start;
713b8dc6b0eSVu Pham 	MLX5_ASSERT(ms);
714b8dc6b0eSVu Pham 	MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
715b8dc6b0eSVu Pham 	/* Number of memsegs in the range. */
716b8dc6b0eSVu Pham 	ms_n = len / msl->page_sz;
71787acdcc7SThomas Monjalon 	DRV_LOG(DEBUG, "Extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
718b8dc6b0eSVu Pham 	      " page_sz=0x%" PRIx64 ", ms_n=%u",
719b8dc6b0eSVu Pham 	      (void *)addr, data.start, data.end, msl->page_sz, ms_n);
720b8dc6b0eSVu Pham 	/* Size of memory for bitmap. */
721b8dc6b0eSVu Pham 	bmp_size = rte_bitmap_get_memory_footprint(ms_n);
722fd970a54SSuanming Mou 	mr = mlx5_malloc(MLX5_MEM_RTE |  MLX5_MEM_ZERO,
723fd970a54SSuanming Mou 			 RTE_ALIGN_CEIL(sizeof(*mr), RTE_CACHE_LINE_SIZE) +
724fd970a54SSuanming Mou 			 bmp_size, RTE_CACHE_LINE_SIZE, msl->socket_id);
725b8dc6b0eSVu Pham 	if (mr == NULL) {
72687acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "Unable to allocate memory for a new MR of"
727b8dc6b0eSVu Pham 		      " address (%p).", (void *)addr);
728b8dc6b0eSVu Pham 		rte_errno = ENOMEM;
729b8dc6b0eSVu Pham 		goto err_nolock;
730b8dc6b0eSVu Pham 	}
731b8dc6b0eSVu Pham 	mr->msl = msl;
732b8dc6b0eSVu Pham 	/*
733b8dc6b0eSVu Pham 	 * Save the index of the first memseg and initialize memseg bitmap. To
734b8dc6b0eSVu Pham 	 * see if a memseg of ms_idx in the memseg-list is still valid, check:
735b8dc6b0eSVu Pham 	 *	rte_bitmap_get(mr->bmp, ms_idx - mr->ms_base_idx)
736b8dc6b0eSVu Pham 	 */
737b8dc6b0eSVu Pham 	mr->ms_base_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
738b8dc6b0eSVu Pham 	bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE);
739b8dc6b0eSVu Pham 	mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size);
740b8dc6b0eSVu Pham 	if (mr->ms_bmp == NULL) {
74187acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "Unable to initialize bitmap for a new MR of"
742b8dc6b0eSVu Pham 		      " address (%p).", (void *)addr);
743b8dc6b0eSVu Pham 		rte_errno = EINVAL;
744b8dc6b0eSVu Pham 		goto err_nolock;
745b8dc6b0eSVu Pham 	}
746b8dc6b0eSVu Pham 	/*
747b8dc6b0eSVu Pham 	 * Should recheck whether the extended contiguous chunk is still valid.
748b8dc6b0eSVu Pham 	 * Because memory_hotplug_lock can't be held if there's any memory
749b8dc6b0eSVu Pham 	 * related calls in a critical path, resource allocation above can't be
750b8dc6b0eSVu Pham 	 * locked. If the memory has been changed at this point, try again with
751b8dc6b0eSVu Pham 	 * just single page. If not, go on with the big chunk atomically from
752b8dc6b0eSVu Pham 	 * here.
753b8dc6b0eSVu Pham 	 */
754b8dc6b0eSVu Pham 	rte_mcfg_mem_read_lock();
755b8dc6b0eSVu Pham 	data_re = data;
756b8dc6b0eSVu Pham 	if (len > msl->page_sz &&
757b8dc6b0eSVu Pham 	    !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
75887acdcc7SThomas Monjalon 		DRV_LOG(DEBUG,
75987acdcc7SThomas Monjalon 			"Unable to find virtually contiguous chunk for address "
76087acdcc7SThomas Monjalon 			"(%p). rte_memseg_contig_walk() failed.", (void *)addr);
761b8dc6b0eSVu Pham 		rte_errno = ENXIO;
762b8dc6b0eSVu Pham 		goto err_memlock;
763b8dc6b0eSVu Pham 	}
764b8dc6b0eSVu Pham 	if (data.start != data_re.start || data.end != data_re.end) {
765b8dc6b0eSVu Pham 		/*
766b8dc6b0eSVu Pham 		 * The extended contiguous chunk has been changed. Try again
767b8dc6b0eSVu Pham 		 * with single memseg instead.
768b8dc6b0eSVu Pham 		 */
769b8dc6b0eSVu Pham 		data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
770b8dc6b0eSVu Pham 		data.end = data.start + msl->page_sz;
771b8dc6b0eSVu Pham 		rte_mcfg_mem_read_unlock();
772992e6df3SJiawei Wang 		mlx5_mr_free(mr, share_cache->dereg_mr_cb);
773b8dc6b0eSVu Pham 		goto alloc_resources;
774b8dc6b0eSVu Pham 	}
775b8dc6b0eSVu Pham 	MLX5_ASSERT(data.msl == data_re.msl);
776b8dc6b0eSVu Pham 	rte_rwlock_write_lock(&share_cache->rwlock);
777b8dc6b0eSVu Pham 	/*
778b8dc6b0eSVu Pham 	 * Check the address is really missing. If other thread already created
779b8dc6b0eSVu Pham 	 * one or it is not found due to overflow, abort and return.
780b8dc6b0eSVu Pham 	 */
781b8dc6b0eSVu Pham 	if (mlx5_mr_lookup_cache(share_cache, entry, addr) != UINT32_MAX) {
782b8dc6b0eSVu Pham 		/*
783b8dc6b0eSVu Pham 		 * Insert to the global cache table. It may fail due to
784b8dc6b0eSVu Pham 		 * low-on-memory. Then, this entry will have to be searched
785b8dc6b0eSVu Pham 		 * here again.
786b8dc6b0eSVu Pham 		 */
787b8dc6b0eSVu Pham 		mr_btree_insert(&share_cache->cache, entry);
78887acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "Found MR for %p on final lookup, abort",
78987acdcc7SThomas Monjalon 			(void *)addr);
790b8dc6b0eSVu Pham 		rte_rwlock_write_unlock(&share_cache->rwlock);
791b8dc6b0eSVu Pham 		rte_mcfg_mem_read_unlock();
792b8dc6b0eSVu Pham 		/*
793b8dc6b0eSVu Pham 		 * Must be unlocked before calling rte_free() because
794b8dc6b0eSVu Pham 		 * mlx5_mr_mem_event_free_cb() can be called inside.
795b8dc6b0eSVu Pham 		 */
796992e6df3SJiawei Wang 		mlx5_mr_free(mr, share_cache->dereg_mr_cb);
797b8dc6b0eSVu Pham 		return entry->lkey;
798b8dc6b0eSVu Pham 	}
799b8dc6b0eSVu Pham 	/*
800b8dc6b0eSVu Pham 	 * Trim start and end addresses for verbs MR. Set bits for registering
801b8dc6b0eSVu Pham 	 * memsegs but exclude already registered ones. Bitmap can be
802b8dc6b0eSVu Pham 	 * fragmented.
803b8dc6b0eSVu Pham 	 */
804b8dc6b0eSVu Pham 	for (n = 0; n < ms_n; ++n) {
805b8dc6b0eSVu Pham 		uintptr_t start;
806b8dc6b0eSVu Pham 		struct mr_cache_entry ret;
807b8dc6b0eSVu Pham 
808b8dc6b0eSVu Pham 		memset(&ret, 0, sizeof(ret));
809b8dc6b0eSVu Pham 		start = data_re.start + n * msl->page_sz;
810b8dc6b0eSVu Pham 		/* Exclude memsegs already registered by other MRs. */
811b8dc6b0eSVu Pham 		if (mlx5_mr_lookup_cache(share_cache, &ret, start) ==
812b8dc6b0eSVu Pham 		    UINT32_MAX) {
813b8dc6b0eSVu Pham 			/*
814b8dc6b0eSVu Pham 			 * Start from the first unregistered memseg in the
815b8dc6b0eSVu Pham 			 * extended range.
816b8dc6b0eSVu Pham 			 */
817b8dc6b0eSVu Pham 			if (ms_idx_shift == -1) {
818b8dc6b0eSVu Pham 				mr->ms_base_idx += n;
819b8dc6b0eSVu Pham 				data.start = start;
820b8dc6b0eSVu Pham 				ms_idx_shift = n;
821b8dc6b0eSVu Pham 			}
822b8dc6b0eSVu Pham 			data.end = start + msl->page_sz;
823b8dc6b0eSVu Pham 			rte_bitmap_set(mr->ms_bmp, n - ms_idx_shift);
824b8dc6b0eSVu Pham 			++mr->ms_n;
825b8dc6b0eSVu Pham 		}
826b8dc6b0eSVu Pham 	}
827b8dc6b0eSVu Pham 	len = data.end - data.start;
828b8dc6b0eSVu Pham 	mr->ms_bmp_n = len / msl->page_sz;
829b8dc6b0eSVu Pham 	MLX5_ASSERT(ms_idx_shift + mr->ms_bmp_n <= ms_n);
830b8dc6b0eSVu Pham 	/*
831d5ed8aa9SOphir Munk 	 * Finally create an MR for the memory chunk. Verbs: ibv_reg_mr() can
832d5ed8aa9SOphir Munk 	 * be called with holding the memory lock because it doesn't use
833b8dc6b0eSVu Pham 	 * mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket()
834b8dc6b0eSVu Pham 	 * through mlx5_alloc_verbs_buf().
835b8dc6b0eSVu Pham 	 */
836d5ed8aa9SOphir Munk 	share_cache->reg_mr_cb(pd, (void *)data.start, len, &mr->pmd_mr);
83758a17853SOphir Munk 	if (mr->pmd_mr.obj == NULL) {
83887acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "Fail to create an MR for address (%p)",
839b8dc6b0eSVu Pham 		      (void *)addr);
840b8dc6b0eSVu Pham 		rte_errno = EINVAL;
841b8dc6b0eSVu Pham 		goto err_mrlock;
842b8dc6b0eSVu Pham 	}
84356d20677SOphir Munk 	MLX5_ASSERT((uintptr_t)mr->pmd_mr.addr == data.start);
84456d20677SOphir Munk 	MLX5_ASSERT(mr->pmd_mr.len);
845b8dc6b0eSVu Pham 	LIST_INSERT_HEAD(&share_cache->mr_list, mr, mr);
84687acdcc7SThomas Monjalon 	DRV_LOG(DEBUG, "MR CREATED (%p) for %p:\n"
847b8dc6b0eSVu Pham 	      "  [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
848b8dc6b0eSVu Pham 	      " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
849b8dc6b0eSVu Pham 	      (void *)mr, (void *)addr, data.start, data.end,
85056d20677SOphir Munk 	      rte_cpu_to_be_32(mr->pmd_mr.lkey),
851b8dc6b0eSVu Pham 	      mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
852b8dc6b0eSVu Pham 	/* Insert to the global cache table. */
853b8dc6b0eSVu Pham 	mlx5_mr_insert_cache(share_cache, mr);
854b8dc6b0eSVu Pham 	/* Fill in output data. */
855b8dc6b0eSVu Pham 	mlx5_mr_lookup_cache(share_cache, entry, addr);
856b8dc6b0eSVu Pham 	/* Lookup can't fail. */
857b8dc6b0eSVu Pham 	MLX5_ASSERT(entry->lkey != UINT32_MAX);
858b8dc6b0eSVu Pham 	rte_rwlock_write_unlock(&share_cache->rwlock);
859b8dc6b0eSVu Pham 	rte_mcfg_mem_read_unlock();
860b8dc6b0eSVu Pham 	return entry->lkey;
861b8dc6b0eSVu Pham err_mrlock:
862b8dc6b0eSVu Pham 	rte_rwlock_write_unlock(&share_cache->rwlock);
863b8dc6b0eSVu Pham err_memlock:
864b8dc6b0eSVu Pham 	rte_mcfg_mem_read_unlock();
865b8dc6b0eSVu Pham err_nolock:
866b8dc6b0eSVu Pham 	/*
867b8dc6b0eSVu Pham 	 * In case of error, as this can be called in a datapath, a warning
868b8dc6b0eSVu Pham 	 * message per an error is preferable instead. Must be unlocked before
869b8dc6b0eSVu Pham 	 * calling rte_free() because mlx5_mr_mem_event_free_cb() can be called
870b8dc6b0eSVu Pham 	 * inside.
871b8dc6b0eSVu Pham 	 */
872992e6df3SJiawei Wang 	mlx5_mr_free(mr, share_cache->dereg_mr_cb);
873b8dc6b0eSVu Pham 	return UINT32_MAX;
874b8dc6b0eSVu Pham }
875b8dc6b0eSVu Pham 
876b8dc6b0eSVu Pham /**
877b8dc6b0eSVu Pham  * Create a new global Memory Region (MR) for a missing virtual address.
878b8dc6b0eSVu Pham  * This can be called from primary and secondary process.
879b8dc6b0eSVu Pham  *
88020489176SMichael Baum  * @param cdev
88120489176SMichael Baum  *   Pointer to the mlx5 common device.
882b8dc6b0eSVu Pham  * @param share_cache
883b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
884b8dc6b0eSVu Pham  * @param[out] entry
885b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry, found in the global cache or newly
886b8dc6b0eSVu Pham  *   created. If failed to create one, this will not be updated.
887b8dc6b0eSVu Pham  * @param addr
888b8dc6b0eSVu Pham  *   Target virtual address to register.
889b8dc6b0eSVu Pham  *
890b8dc6b0eSVu Pham  * @return
891b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
892b8dc6b0eSVu Pham  */
89320489176SMichael Baum uint32_t
89420489176SMichael Baum mlx5_mr_create(struct mlx5_common_device *cdev,
895b8dc6b0eSVu Pham 	       struct mlx5_mr_share_cache *share_cache,
89620489176SMichael Baum 	       struct mr_cache_entry *entry, uintptr_t addr)
897b8dc6b0eSVu Pham {
898b8dc6b0eSVu Pham 	uint32_t ret = 0;
899b8dc6b0eSVu Pham 
900b8dc6b0eSVu Pham 	switch (rte_eal_process_type()) {
901b8dc6b0eSVu Pham 	case RTE_PROC_PRIMARY:
90220489176SMichael Baum 		ret = mlx5_mr_create_primary(cdev->pd, share_cache, entry, addr,
90320489176SMichael Baum 					     cdev->config.mr_ext_memseg_en);
904b8dc6b0eSVu Pham 		break;
905b8dc6b0eSVu Pham 	case RTE_PROC_SECONDARY:
90620489176SMichael Baum 		ret = mlx5_mr_create_secondary(cdev, share_cache, entry, addr);
907b8dc6b0eSVu Pham 		break;
908b8dc6b0eSVu Pham 	default:
909b8dc6b0eSVu Pham 		break;
910b8dc6b0eSVu Pham 	}
911b8dc6b0eSVu Pham 	return ret;
912b8dc6b0eSVu Pham }
913b8dc6b0eSVu Pham 
914b8dc6b0eSVu Pham /**
915b8dc6b0eSVu Pham  * Look up address in the global MR cache table. If not found, create a new MR.
916b8dc6b0eSVu Pham  * Insert the found/created entry to local bottom-half cache table.
917b8dc6b0eSVu Pham  *
918b8dc6b0eSVu Pham  * @param mr_ctrl
919b8dc6b0eSVu Pham  *   Pointer to per-queue MR control structure.
920b8dc6b0eSVu Pham  * @param[out] entry
921b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry, found in the global cache or newly
922b8dc6b0eSVu Pham  *   created. If failed to create one, this is not written.
923b8dc6b0eSVu Pham  * @param addr
924b8dc6b0eSVu Pham  *   Search key.
925b8dc6b0eSVu Pham  *
926b8dc6b0eSVu Pham  * @return
927b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on no match.
928b8dc6b0eSVu Pham  */
929b8dc6b0eSVu Pham static uint32_t
93020489176SMichael Baum mr_lookup_caches(struct mlx5_mr_ctrl *mr_ctrl,
93120489176SMichael Baum 		 struct mr_cache_entry *entry, uintptr_t addr)
932b8dc6b0eSVu Pham {
93320489176SMichael Baum 	struct mlx5_mr_share_cache *share_cache = &mr_ctrl->cdev->mr_scache;
934b8dc6b0eSVu Pham 	struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
935b8dc6b0eSVu Pham 	uint32_t lkey;
936b8dc6b0eSVu Pham 	uint16_t idx;
937b8dc6b0eSVu Pham 
938b8dc6b0eSVu Pham 	/* If local cache table is full, try to double it. */
939b8dc6b0eSVu Pham 	if (unlikely(bt->len == bt->size))
940b8dc6b0eSVu Pham 		mr_btree_expand(bt, bt->size << 1);
941b8dc6b0eSVu Pham 	/* Look up in the global cache. */
942b8dc6b0eSVu Pham 	rte_rwlock_read_lock(&share_cache->rwlock);
943b8dc6b0eSVu Pham 	lkey = mr_btree_lookup(&share_cache->cache, &idx, addr);
944b8dc6b0eSVu Pham 	if (lkey != UINT32_MAX) {
945b8dc6b0eSVu Pham 		/* Found. */
946b8dc6b0eSVu Pham 		*entry = (*share_cache->cache.table)[idx];
947b8dc6b0eSVu Pham 		rte_rwlock_read_unlock(&share_cache->rwlock);
948b8dc6b0eSVu Pham 		/*
949b8dc6b0eSVu Pham 		 * Update local cache. Even if it fails, return the found entry
950b8dc6b0eSVu Pham 		 * to update top-half cache. Next time, this entry will be found
951b8dc6b0eSVu Pham 		 * in the global cache.
952b8dc6b0eSVu Pham 		 */
953b8dc6b0eSVu Pham 		mr_btree_insert(bt, entry);
954b8dc6b0eSVu Pham 		return lkey;
955b8dc6b0eSVu Pham 	}
956b8dc6b0eSVu Pham 	rte_rwlock_read_unlock(&share_cache->rwlock);
957b8dc6b0eSVu Pham 	/* First time to see the address? Create a new MR. */
95820489176SMichael Baum 	lkey = mlx5_mr_create(mr_ctrl->cdev, share_cache, entry, addr);
959b8dc6b0eSVu Pham 	/*
960b8dc6b0eSVu Pham 	 * Update the local cache if successfully created a new global MR. Even
961b8dc6b0eSVu Pham 	 * if failed to create one, there's no action to take in this datapath
962b8dc6b0eSVu Pham 	 * code. As returning LKey is invalid, this will eventually make HW
963b8dc6b0eSVu Pham 	 * fail.
964b8dc6b0eSVu Pham 	 */
965b8dc6b0eSVu Pham 	if (lkey != UINT32_MAX)
966b8dc6b0eSVu Pham 		mr_btree_insert(bt, entry);
967b8dc6b0eSVu Pham 	return lkey;
968b8dc6b0eSVu Pham }
969b8dc6b0eSVu Pham 
970b8dc6b0eSVu Pham /**
971b8dc6b0eSVu Pham  * Bottom-half of LKey search on datapath. First search in cache_bh[] and if
972b8dc6b0eSVu Pham  * misses, search in the global MR cache table and update the new entry to
973b8dc6b0eSVu Pham  * per-queue local caches.
974b8dc6b0eSVu Pham  *
975b8dc6b0eSVu Pham  * @param mr_ctrl
976b8dc6b0eSVu Pham  *   Pointer to per-queue MR control structure.
977b8dc6b0eSVu Pham  * @param addr
978b8dc6b0eSVu Pham  *   Search key.
979b8dc6b0eSVu Pham  *
980b8dc6b0eSVu Pham  * @return
981b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on no match.
982b8dc6b0eSVu Pham  */
983fc59a1ecSMichael Baum static uint32_t
98420489176SMichael Baum mlx5_mr_addr2mr_bh(struct mlx5_mr_ctrl *mr_ctrl, uintptr_t addr)
985b8dc6b0eSVu Pham {
986b8dc6b0eSVu Pham 	uint32_t lkey;
987b8dc6b0eSVu Pham 	uint16_t bh_idx = 0;
988b8dc6b0eSVu Pham 	/* Victim in top-half cache to replace with new entry. */
989b8dc6b0eSVu Pham 	struct mr_cache_entry *repl = &mr_ctrl->cache[mr_ctrl->head];
990b8dc6b0eSVu Pham 
991b8dc6b0eSVu Pham 	/* Binary-search MR translation table. */
992b8dc6b0eSVu Pham 	lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);
993b8dc6b0eSVu Pham 	/* Update top-half cache. */
994b8dc6b0eSVu Pham 	if (likely(lkey != UINT32_MAX)) {
995b8dc6b0eSVu Pham 		*repl = (*mr_ctrl->cache_bh.table)[bh_idx];
996b8dc6b0eSVu Pham 	} else {
997b8dc6b0eSVu Pham 		/*
998b8dc6b0eSVu Pham 		 * If missed in local lookup table, search in the global cache
999b8dc6b0eSVu Pham 		 * and local cache_bh[] will be updated inside if possible.
1000b8dc6b0eSVu Pham 		 * Top-half cache entry will also be updated.
1001b8dc6b0eSVu Pham 		 */
100220489176SMichael Baum 		lkey = mr_lookup_caches(mr_ctrl, repl, addr);
1003b8dc6b0eSVu Pham 		if (unlikely(lkey == UINT32_MAX))
1004b8dc6b0eSVu Pham 			return UINT32_MAX;
1005b8dc6b0eSVu Pham 	}
1006b8dc6b0eSVu Pham 	/* Update the most recently used entry. */
1007b8dc6b0eSVu Pham 	mr_ctrl->mru = mr_ctrl->head;
1008b8dc6b0eSVu Pham 	/* Point to the next victim, the oldest. */
1009b8dc6b0eSVu Pham 	mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N;
1010b8dc6b0eSVu Pham 	return lkey;
1011b8dc6b0eSVu Pham }
1012b8dc6b0eSVu Pham 
1013b8dc6b0eSVu Pham /**
1014fc59a1ecSMichael Baum  * Release all the created MRs and resources on global MR cache of a device
1015b8dc6b0eSVu Pham  * list.
1016b8dc6b0eSVu Pham  *
1017b8dc6b0eSVu Pham  * @param share_cache
1018b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
1019b8dc6b0eSVu Pham  */
1020b8dc6b0eSVu Pham void
1021b8dc6b0eSVu Pham mlx5_mr_release_cache(struct mlx5_mr_share_cache *share_cache)
1022b8dc6b0eSVu Pham {
1023b8dc6b0eSVu Pham 	struct mlx5_mr *mr_next;
1024b8dc6b0eSVu Pham 
1025b8dc6b0eSVu Pham 	rte_rwlock_write_lock(&share_cache->rwlock);
1026b8dc6b0eSVu Pham 	/* Detach from MR list and move to free list. */
1027b8dc6b0eSVu Pham 	mr_next = LIST_FIRST(&share_cache->mr_list);
1028b8dc6b0eSVu Pham 	while (mr_next != NULL) {
1029b8dc6b0eSVu Pham 		struct mlx5_mr *mr = mr_next;
1030b8dc6b0eSVu Pham 
1031b8dc6b0eSVu Pham 		mr_next = LIST_NEXT(mr, mr);
1032b8dc6b0eSVu Pham 		LIST_REMOVE(mr, mr);
1033b8dc6b0eSVu Pham 		LIST_INSERT_HEAD(&share_cache->mr_free_list, mr, mr);
1034b8dc6b0eSVu Pham 	}
1035b8dc6b0eSVu Pham 	LIST_INIT(&share_cache->mr_list);
1036b8dc6b0eSVu Pham 	/* Free global cache. */
1037b8dc6b0eSVu Pham 	mlx5_mr_btree_free(&share_cache->cache);
1038b8dc6b0eSVu Pham 	rte_rwlock_write_unlock(&share_cache->rwlock);
1039b8dc6b0eSVu Pham 	/* Free all remaining MRs. */
1040b8dc6b0eSVu Pham 	mlx5_mr_garbage_collect(share_cache);
1041b8dc6b0eSVu Pham }
1042b8dc6b0eSVu Pham 
1043b8dc6b0eSVu Pham /**
10445fbc75acSMichael Baum  * Initialize global MR cache of a device.
10455fbc75acSMichael Baum  *
10465fbc75acSMichael Baum  * @param share_cache
10475fbc75acSMichael Baum  *   Pointer to a global shared MR cache.
10485fbc75acSMichael Baum  * @param socket
10495fbc75acSMichael Baum  *   NUMA socket on which memory must be allocated.
10505fbc75acSMichael Baum  *
10515fbc75acSMichael Baum  * @return
10525fbc75acSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
10535fbc75acSMichael Baum  */
10545fbc75acSMichael Baum int
10555fbc75acSMichael Baum mlx5_mr_create_cache(struct mlx5_mr_share_cache *share_cache, int socket)
10565fbc75acSMichael Baum {
10575fbc75acSMichael Baum 	/* Set the reg_mr and dereg_mr callback functions */
10585fbc75acSMichael Baum 	mlx5_os_set_reg_mr_cb(&share_cache->reg_mr_cb,
10595fbc75acSMichael Baum 			      &share_cache->dereg_mr_cb);
10605fbc75acSMichael Baum 	rte_rwlock_init(&share_cache->rwlock);
1061fc59a1ecSMichael Baum 	rte_rwlock_init(&share_cache->mprwlock);
1062fc59a1ecSMichael Baum 	share_cache->mp_cb_registered = 0;
10635fbc75acSMichael Baum 	/* Initialize B-tree and allocate memory for global MR cache table. */
10645fbc75acSMichael Baum 	return mlx5_mr_btree_init(&share_cache->cache,
10655fbc75acSMichael Baum 				  MLX5_MR_BTREE_CACHE_N * 2, socket);
10665fbc75acSMichael Baum }
10675fbc75acSMichael Baum 
10685fbc75acSMichael Baum /**
1069b8dc6b0eSVu Pham  * Flush all of the local cache entries.
1070b8dc6b0eSVu Pham  *
1071b8dc6b0eSVu Pham  * @param mr_ctrl
1072b8dc6b0eSVu Pham  *   Pointer to per-queue MR local cache.
1073b8dc6b0eSVu Pham  */
1074b8dc6b0eSVu Pham void
1075b8dc6b0eSVu Pham mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl)
1076b8dc6b0eSVu Pham {
1077b8dc6b0eSVu Pham 	/* Reset the most-recently-used index. */
1078b8dc6b0eSVu Pham 	mr_ctrl->mru = 0;
1079b8dc6b0eSVu Pham 	/* Reset the linear search array. */
1080b8dc6b0eSVu Pham 	mr_ctrl->head = 0;
1081b8dc6b0eSVu Pham 	memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache));
1082b8dc6b0eSVu Pham 	/* Reset the B-tree table. */
1083b8dc6b0eSVu Pham 	mr_ctrl->cache_bh.len = 1;
1084b8dc6b0eSVu Pham 	mr_ctrl->cache_bh.overflow = 0;
1085b8dc6b0eSVu Pham 	/* Update the generation number. */
1086b8dc6b0eSVu Pham 	mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr;
1087b8dc6b0eSVu Pham 	DRV_LOG(DEBUG, "mr_ctrl(%p): flushed, cur_gen=%d",
1088b8dc6b0eSVu Pham 		(void *)mr_ctrl, mr_ctrl->cur_gen);
1089b8dc6b0eSVu Pham }
1090b8dc6b0eSVu Pham 
1091b8dc6b0eSVu Pham /**
1092b8dc6b0eSVu Pham  * Creates a memory region for external memory, that is memory which is not
1093b8dc6b0eSVu Pham  * part of the DPDK memory segments.
1094b8dc6b0eSVu Pham  *
1095b8dc6b0eSVu Pham  * @param pd
1096c4685016SOphir Munk  *   Pointer to pd of a device (net, regex, vdpa,...).
1097b8dc6b0eSVu Pham  * @param addr
1098b8dc6b0eSVu Pham  *   Starting virtual address of memory.
1099b8dc6b0eSVu Pham  * @param len
1100b8dc6b0eSVu Pham  *   Length of memory segment being mapped.
1101b8dc6b0eSVu Pham  * @param socked_id
1102b8dc6b0eSVu Pham  *   Socket to allocate heap memory for the control structures.
1103b8dc6b0eSVu Pham  *
1104b8dc6b0eSVu Pham  * @return
1105b8dc6b0eSVu Pham  *   Pointer to MR structure on success, NULL otherwise.
1106b8dc6b0eSVu Pham  */
1107b8dc6b0eSVu Pham struct mlx5_mr *
1108d5ed8aa9SOphir Munk mlx5_create_mr_ext(void *pd, uintptr_t addr, size_t len, int socket_id,
1109d5ed8aa9SOphir Munk 		   mlx5_reg_mr_t reg_mr_cb)
1110b8dc6b0eSVu Pham {
1111b8dc6b0eSVu Pham 	struct mlx5_mr *mr = NULL;
1112b8dc6b0eSVu Pham 
1113fd970a54SSuanming Mou 	mr = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1114fd970a54SSuanming Mou 			 RTE_ALIGN_CEIL(sizeof(*mr), RTE_CACHE_LINE_SIZE),
1115b8dc6b0eSVu Pham 			 RTE_CACHE_LINE_SIZE, socket_id);
1116b8dc6b0eSVu Pham 	if (mr == NULL)
1117b8dc6b0eSVu Pham 		return NULL;
1118d5ed8aa9SOphir Munk 	reg_mr_cb(pd, (void *)addr, len, &mr->pmd_mr);
111958a17853SOphir Munk 	if (mr->pmd_mr.obj == NULL) {
1120b8dc6b0eSVu Pham 		DRV_LOG(WARNING,
112156d20677SOphir Munk 			"Fail to create MR for address (%p)",
1122b8dc6b0eSVu Pham 			(void *)addr);
1123fd970a54SSuanming Mou 		mlx5_free(mr);
1124b8dc6b0eSVu Pham 		return NULL;
1125b8dc6b0eSVu Pham 	}
1126b8dc6b0eSVu Pham 	mr->msl = NULL; /* Mark it is external memory. */
1127b8dc6b0eSVu Pham 	mr->ms_bmp = NULL;
1128b8dc6b0eSVu Pham 	mr->ms_n = 1;
1129b8dc6b0eSVu Pham 	mr->ms_bmp_n = 1;
1130b8dc6b0eSVu Pham 	DRV_LOG(DEBUG,
1131b8dc6b0eSVu Pham 		"MR CREATED (%p) for external memory %p:\n"
1132b8dc6b0eSVu Pham 		"  [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
1133b8dc6b0eSVu Pham 		" lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
1134b8dc6b0eSVu Pham 		(void *)mr, (void *)addr,
113556d20677SOphir Munk 		addr, addr + len, rte_cpu_to_be_32(mr->pmd_mr.lkey),
1136b8dc6b0eSVu Pham 		mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
1137b8dc6b0eSVu Pham 	return mr;
1138b8dc6b0eSVu Pham }
1139b8dc6b0eSVu Pham 
1140b8dc6b0eSVu Pham /**
11412f6c2adbSMichael Baum  * Callback for memory free event. Iterate freed memsegs and check whether it
11422f6c2adbSMichael Baum  * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a
11432f6c2adbSMichael Baum  * result, the MR would be fragmented. If it becomes empty, the MR will be freed
11442f6c2adbSMichael Baum  * later by mlx5_mr_garbage_collect(). Even if this callback is called from a
11452f6c2adbSMichael Baum  * secondary process, the garbage collector will be called in primary process
11462f6c2adbSMichael Baum  * as the secondary process can't call mlx5_mr_create().
11472f6c2adbSMichael Baum  *
11482f6c2adbSMichael Baum  * The global cache must be rebuilt if there's any change and this event has to
11492f6c2adbSMichael Baum  * be propagated to dataplane threads to flush the local caches.
11502f6c2adbSMichael Baum  *
11512f6c2adbSMichael Baum  * @param share_cache
11522f6c2adbSMichael Baum  *   Pointer to a global shared MR cache.
11532f6c2adbSMichael Baum  * @param ibdev_name
11542f6c2adbSMichael Baum  *   Name of ibv device.
11552f6c2adbSMichael Baum  * @param addr
11562f6c2adbSMichael Baum  *   Address of freed memory.
11572f6c2adbSMichael Baum  * @param len
11582f6c2adbSMichael Baum  *   Size of freed memory.
11592f6c2adbSMichael Baum  */
11602f6c2adbSMichael Baum void
11612f6c2adbSMichael Baum mlx5_free_mr_by_addr(struct mlx5_mr_share_cache *share_cache,
11622f6c2adbSMichael Baum 		     const char *ibdev_name, const void *addr, size_t len)
11632f6c2adbSMichael Baum {
11642f6c2adbSMichael Baum 	const struct rte_memseg_list *msl;
11652f6c2adbSMichael Baum 	struct mlx5_mr *mr;
11662f6c2adbSMichael Baum 	int ms_n;
11672f6c2adbSMichael Baum 	int i;
11682f6c2adbSMichael Baum 	int rebuild = 0;
11692f6c2adbSMichael Baum 
11702f6c2adbSMichael Baum 	DRV_LOG(DEBUG, "device %s free callback: addr=%p, len=%zu",
11712f6c2adbSMichael Baum 		ibdev_name, addr, len);
11722f6c2adbSMichael Baum 	msl = rte_mem_virt2memseg_list(addr);
11732f6c2adbSMichael Baum 	/* addr and len must be page-aligned. */
11742f6c2adbSMichael Baum 	MLX5_ASSERT((uintptr_t)addr ==
11752f6c2adbSMichael Baum 		    RTE_ALIGN((uintptr_t)addr, msl->page_sz));
11762f6c2adbSMichael Baum 	MLX5_ASSERT(len == RTE_ALIGN(len, msl->page_sz));
11772f6c2adbSMichael Baum 	ms_n = len / msl->page_sz;
11782f6c2adbSMichael Baum 	rte_rwlock_write_lock(&share_cache->rwlock);
11792f6c2adbSMichael Baum 	/* Clear bits of freed memsegs from MR. */
11802f6c2adbSMichael Baum 	for (i = 0; i < ms_n; ++i) {
11812f6c2adbSMichael Baum 		const struct rte_memseg *ms;
11822f6c2adbSMichael Baum 		struct mr_cache_entry entry;
11832f6c2adbSMichael Baum 		uintptr_t start;
11842f6c2adbSMichael Baum 		int ms_idx;
11852f6c2adbSMichael Baum 		uint32_t pos;
11862f6c2adbSMichael Baum 
11872f6c2adbSMichael Baum 		/* Find MR having this memseg. */
11882f6c2adbSMichael Baum 		start = (uintptr_t)addr + i * msl->page_sz;
11892f6c2adbSMichael Baum 		mr = mlx5_mr_lookup_list(share_cache, &entry, start);
11902f6c2adbSMichael Baum 		if (mr == NULL)
11912f6c2adbSMichael Baum 			continue;
11922f6c2adbSMichael Baum 		MLX5_ASSERT(mr->msl); /* Can't be external memory. */
11932f6c2adbSMichael Baum 		ms = rte_mem_virt2memseg((void *)start, msl);
11942f6c2adbSMichael Baum 		MLX5_ASSERT(ms != NULL);
11952f6c2adbSMichael Baum 		MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
11962f6c2adbSMichael Baum 		ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
11972f6c2adbSMichael Baum 		pos = ms_idx - mr->ms_base_idx;
11982f6c2adbSMichael Baum 		MLX5_ASSERT(rte_bitmap_get(mr->ms_bmp, pos));
11992f6c2adbSMichael Baum 		MLX5_ASSERT(pos < mr->ms_bmp_n);
12002f6c2adbSMichael Baum 		DRV_LOG(DEBUG, "device %s MR(%p): clear bitmap[%u] for addr %p",
12012f6c2adbSMichael Baum 			ibdev_name, (void *)mr, pos, (void *)start);
12022f6c2adbSMichael Baum 		rte_bitmap_clear(mr->ms_bmp, pos);
12032f6c2adbSMichael Baum 		if (--mr->ms_n == 0) {
12042f6c2adbSMichael Baum 			LIST_REMOVE(mr, mr);
12052f6c2adbSMichael Baum 			LIST_INSERT_HEAD(&share_cache->mr_free_list, mr, mr);
12062f6c2adbSMichael Baum 			DRV_LOG(DEBUG, "device %s remove MR(%p) from list",
12072f6c2adbSMichael Baum 				ibdev_name, (void *)mr);
12082f6c2adbSMichael Baum 		}
12092f6c2adbSMichael Baum 		/*
12102f6c2adbSMichael Baum 		 * MR is fragmented or will be freed. the global cache must be
12112f6c2adbSMichael Baum 		 * rebuilt.
12122f6c2adbSMichael Baum 		 */
12132f6c2adbSMichael Baum 		rebuild = 1;
12142f6c2adbSMichael Baum 	}
12152f6c2adbSMichael Baum 	if (rebuild) {
12162f6c2adbSMichael Baum 		mlx5_mr_rebuild_cache(share_cache);
12172f6c2adbSMichael Baum 		/*
12182f6c2adbSMichael Baum 		 * No explicit wmb is needed after updating dev_gen due to
12192f6c2adbSMichael Baum 		 * store-release ordering in unlock that provides the
12202f6c2adbSMichael Baum 		 * implicit barrier at the software visible level.
12212f6c2adbSMichael Baum 		 */
12222f6c2adbSMichael Baum 		++share_cache->dev_gen;
12232f6c2adbSMichael Baum 		DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d",
12242f6c2adbSMichael Baum 			share_cache->dev_gen);
12252f6c2adbSMichael Baum 	}
12262f6c2adbSMichael Baum 	rte_rwlock_write_unlock(&share_cache->rwlock);
12272f6c2adbSMichael Baum }
12282f6c2adbSMichael Baum 
12292f6c2adbSMichael Baum /**
1230b8dc6b0eSVu Pham  * Dump all the created MRs and the global cache entries.
1231b8dc6b0eSVu Pham  *
1232fc59a1ecSMichael Baum  * @param share_cache
1233fc59a1ecSMichael Baum  *   Pointer to a global shared MR cache.
1234b8dc6b0eSVu Pham  */
1235b8dc6b0eSVu Pham void
1236b8dc6b0eSVu Pham mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused)
1237b8dc6b0eSVu Pham {
1238b8dc6b0eSVu Pham #ifdef RTE_LIBRTE_MLX5_DEBUG
1239b8dc6b0eSVu Pham 	struct mlx5_mr *mr;
1240b8dc6b0eSVu Pham 	int mr_n = 0;
1241b8dc6b0eSVu Pham 	int chunk_n = 0;
1242b8dc6b0eSVu Pham 
1243b8dc6b0eSVu Pham 	rte_rwlock_read_lock(&share_cache->rwlock);
1244b8dc6b0eSVu Pham 	/* Iterate all the existing MRs. */
1245b8dc6b0eSVu Pham 	LIST_FOREACH(mr, &share_cache->mr_list, mr) {
1246b8dc6b0eSVu Pham 		unsigned int n;
1247b8dc6b0eSVu Pham 
124887acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u",
124956d20677SOphir Munk 		      mr_n++, rte_cpu_to_be_32(mr->pmd_mr.lkey),
1250b8dc6b0eSVu Pham 		      mr->ms_n, mr->ms_bmp_n);
1251b8dc6b0eSVu Pham 		if (mr->ms_n == 0)
1252b8dc6b0eSVu Pham 			continue;
1253b8dc6b0eSVu Pham 		for (n = 0; n < mr->ms_bmp_n; ) {
1254b8dc6b0eSVu Pham 			struct mr_cache_entry ret = { 0, };
1255b8dc6b0eSVu Pham 
1256b8dc6b0eSVu Pham 			n = mr_find_next_chunk(mr, &ret, n);
1257b8dc6b0eSVu Pham 			if (!ret.end)
1258b8dc6b0eSVu Pham 				break;
125987acdcc7SThomas Monjalon 			DRV_LOG(DEBUG,
126087acdcc7SThomas Monjalon 				"  chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")",
1261b8dc6b0eSVu Pham 				chunk_n++, ret.start, ret.end);
1262b8dc6b0eSVu Pham 		}
1263b8dc6b0eSVu Pham 	}
126487acdcc7SThomas Monjalon 	DRV_LOG(DEBUG, "Dumping global cache %p", (void *)share_cache);
1265b8dc6b0eSVu Pham 	mlx5_mr_btree_dump(&share_cache->cache);
1266b8dc6b0eSVu Pham 	rte_rwlock_read_unlock(&share_cache->rwlock);
1267b8dc6b0eSVu Pham #endif
1268b8dc6b0eSVu Pham }
1269690b2a88SDmitry Kozlyuk 
1270690b2a88SDmitry Kozlyuk static int
1271690b2a88SDmitry Kozlyuk mlx5_range_compare_start(const void *lhs, const void *rhs)
1272690b2a88SDmitry Kozlyuk {
1273690b2a88SDmitry Kozlyuk 	const struct mlx5_range *r1 = lhs, *r2 = rhs;
1274690b2a88SDmitry Kozlyuk 
1275690b2a88SDmitry Kozlyuk 	if (r1->start > r2->start)
1276690b2a88SDmitry Kozlyuk 		return 1;
1277690b2a88SDmitry Kozlyuk 	else if (r1->start < r2->start)
1278690b2a88SDmitry Kozlyuk 		return -1;
1279690b2a88SDmitry Kozlyuk 	return 0;
1280690b2a88SDmitry Kozlyuk }
1281690b2a88SDmitry Kozlyuk 
1282690b2a88SDmitry Kozlyuk static void
1283690b2a88SDmitry Kozlyuk mlx5_range_from_mempool_chunk(struct rte_mempool *mp, void *opaque,
1284690b2a88SDmitry Kozlyuk 			      struct rte_mempool_memhdr *memhdr,
1285690b2a88SDmitry Kozlyuk 			      unsigned int idx)
1286690b2a88SDmitry Kozlyuk {
1287690b2a88SDmitry Kozlyuk 	struct mlx5_range *ranges = opaque, *range = &ranges[idx];
1288690b2a88SDmitry Kozlyuk 	uint64_t page_size = rte_mem_page_size();
1289690b2a88SDmitry Kozlyuk 
1290690b2a88SDmitry Kozlyuk 	RTE_SET_USED(mp);
1291690b2a88SDmitry Kozlyuk 	range->start = RTE_ALIGN_FLOOR((uintptr_t)memhdr->addr, page_size);
1292690b2a88SDmitry Kozlyuk 	range->end = RTE_ALIGN_CEIL(range->start + memhdr->len, page_size);
1293690b2a88SDmitry Kozlyuk }
1294690b2a88SDmitry Kozlyuk 
1295690b2a88SDmitry Kozlyuk /**
1296*7297d2cdSDmitry Kozlyuk  * Collect page-aligned memory ranges of the mempool.
1297*7297d2cdSDmitry Kozlyuk  */
1298*7297d2cdSDmitry Kozlyuk static int
1299*7297d2cdSDmitry Kozlyuk mlx5_mempool_get_chunks(struct rte_mempool *mp, struct mlx5_range **out,
1300*7297d2cdSDmitry Kozlyuk 			unsigned int *out_n)
1301*7297d2cdSDmitry Kozlyuk {
1302*7297d2cdSDmitry Kozlyuk 	struct mlx5_range *chunks;
1303*7297d2cdSDmitry Kozlyuk 	unsigned int n;
1304*7297d2cdSDmitry Kozlyuk 
1305*7297d2cdSDmitry Kozlyuk 	n = mp->nb_mem_chunks;
1306*7297d2cdSDmitry Kozlyuk 	chunks = calloc(sizeof(chunks[0]), n);
1307*7297d2cdSDmitry Kozlyuk 	if (chunks == NULL)
1308*7297d2cdSDmitry Kozlyuk 		return -1;
1309*7297d2cdSDmitry Kozlyuk 	rte_mempool_mem_iter(mp, mlx5_range_from_mempool_chunk, chunks);
1310*7297d2cdSDmitry Kozlyuk 	*out = chunks;
1311*7297d2cdSDmitry Kozlyuk 	*out_n = n;
1312*7297d2cdSDmitry Kozlyuk 	return 0;
1313*7297d2cdSDmitry Kozlyuk }
1314*7297d2cdSDmitry Kozlyuk 
1315*7297d2cdSDmitry Kozlyuk struct mlx5_mempool_get_extmem_data {
1316*7297d2cdSDmitry Kozlyuk 	struct mlx5_range *heap;
1317*7297d2cdSDmitry Kozlyuk 	unsigned int heap_size;
1318*7297d2cdSDmitry Kozlyuk 	int ret;
1319*7297d2cdSDmitry Kozlyuk };
1320*7297d2cdSDmitry Kozlyuk 
1321*7297d2cdSDmitry Kozlyuk static void
1322*7297d2cdSDmitry Kozlyuk mlx5_mempool_get_extmem_cb(struct rte_mempool *mp, void *opaque,
1323*7297d2cdSDmitry Kozlyuk 			   void *obj, unsigned int obj_idx)
1324*7297d2cdSDmitry Kozlyuk {
1325*7297d2cdSDmitry Kozlyuk 	struct mlx5_mempool_get_extmem_data *data = opaque;
1326*7297d2cdSDmitry Kozlyuk 	struct rte_mbuf *mbuf = obj;
1327*7297d2cdSDmitry Kozlyuk 	uintptr_t addr = (uintptr_t)mbuf->buf_addr;
1328*7297d2cdSDmitry Kozlyuk 	struct mlx5_range *seg, *heap;
1329*7297d2cdSDmitry Kozlyuk 	struct rte_memseg_list *msl;
1330*7297d2cdSDmitry Kozlyuk 	size_t page_size;
1331*7297d2cdSDmitry Kozlyuk 	uintptr_t page_start;
1332*7297d2cdSDmitry Kozlyuk 	unsigned int pos = 0, len = data->heap_size, delta;
1333*7297d2cdSDmitry Kozlyuk 
1334*7297d2cdSDmitry Kozlyuk 	RTE_SET_USED(mp);
1335*7297d2cdSDmitry Kozlyuk 	RTE_SET_USED(obj_idx);
1336*7297d2cdSDmitry Kozlyuk 	if (data->ret < 0)
1337*7297d2cdSDmitry Kozlyuk 		return;
1338*7297d2cdSDmitry Kozlyuk 	/* Binary search for an already visited page. */
1339*7297d2cdSDmitry Kozlyuk 	while (len > 1) {
1340*7297d2cdSDmitry Kozlyuk 		delta = len / 2;
1341*7297d2cdSDmitry Kozlyuk 		if (addr < data->heap[pos + delta].start) {
1342*7297d2cdSDmitry Kozlyuk 			len = delta;
1343*7297d2cdSDmitry Kozlyuk 		} else {
1344*7297d2cdSDmitry Kozlyuk 			pos += delta;
1345*7297d2cdSDmitry Kozlyuk 			len -= delta;
1346*7297d2cdSDmitry Kozlyuk 		}
1347*7297d2cdSDmitry Kozlyuk 	}
1348*7297d2cdSDmitry Kozlyuk 	if (data->heap != NULL) {
1349*7297d2cdSDmitry Kozlyuk 		seg = &data->heap[pos];
1350*7297d2cdSDmitry Kozlyuk 		if (seg->start <= addr && addr < seg->end)
1351*7297d2cdSDmitry Kozlyuk 			return;
1352*7297d2cdSDmitry Kozlyuk 	}
1353*7297d2cdSDmitry Kozlyuk 	/* Determine the page boundaries and remember them. */
1354*7297d2cdSDmitry Kozlyuk 	heap = realloc(data->heap, sizeof(heap[0]) * (data->heap_size + 1));
1355*7297d2cdSDmitry Kozlyuk 	if (heap == NULL) {
1356*7297d2cdSDmitry Kozlyuk 		free(data->heap);
1357*7297d2cdSDmitry Kozlyuk 		data->heap = NULL;
1358*7297d2cdSDmitry Kozlyuk 		data->ret = -1;
1359*7297d2cdSDmitry Kozlyuk 		return;
1360*7297d2cdSDmitry Kozlyuk 	}
1361*7297d2cdSDmitry Kozlyuk 	data->heap = heap;
1362*7297d2cdSDmitry Kozlyuk 	data->heap_size++;
1363*7297d2cdSDmitry Kozlyuk 	seg = &heap[data->heap_size - 1];
1364*7297d2cdSDmitry Kozlyuk 	msl = rte_mem_virt2memseg_list((void *)addr);
1365*7297d2cdSDmitry Kozlyuk 	page_size = msl != NULL ? msl->page_sz : rte_mem_page_size();
1366*7297d2cdSDmitry Kozlyuk 	page_start = RTE_PTR_ALIGN_FLOOR(addr, page_size);
1367*7297d2cdSDmitry Kozlyuk 	seg->start = page_start;
1368*7297d2cdSDmitry Kozlyuk 	seg->end = page_start + page_size;
1369*7297d2cdSDmitry Kozlyuk 	/* Maintain the heap order. */
1370*7297d2cdSDmitry Kozlyuk 	qsort(data->heap, data->heap_size, sizeof(heap[0]),
1371*7297d2cdSDmitry Kozlyuk 	      mlx5_range_compare_start);
1372*7297d2cdSDmitry Kozlyuk }
1373*7297d2cdSDmitry Kozlyuk 
1374*7297d2cdSDmitry Kozlyuk /**
1375*7297d2cdSDmitry Kozlyuk  * Recover pages of external memory as close as possible
1376*7297d2cdSDmitry Kozlyuk  * for a mempool with RTE_PKTMBUF_POOL_PINNED_EXT_BUF.
1377*7297d2cdSDmitry Kozlyuk  * Pages are stored in a heap for efficient search, for mbufs are many.
1378*7297d2cdSDmitry Kozlyuk  */
1379*7297d2cdSDmitry Kozlyuk static int
1380*7297d2cdSDmitry Kozlyuk mlx5_mempool_get_extmem(struct rte_mempool *mp, struct mlx5_range **out,
1381*7297d2cdSDmitry Kozlyuk 			unsigned int *out_n)
1382*7297d2cdSDmitry Kozlyuk {
1383*7297d2cdSDmitry Kozlyuk 	struct mlx5_mempool_get_extmem_data data;
1384*7297d2cdSDmitry Kozlyuk 
1385*7297d2cdSDmitry Kozlyuk 	memset(&data, 0, sizeof(data));
1386*7297d2cdSDmitry Kozlyuk 	rte_mempool_obj_iter(mp, mlx5_mempool_get_extmem_cb, &data);
1387*7297d2cdSDmitry Kozlyuk 	if (data.ret < 0)
1388*7297d2cdSDmitry Kozlyuk 		return -1;
1389*7297d2cdSDmitry Kozlyuk 	*out = data.heap;
1390*7297d2cdSDmitry Kozlyuk 	*out_n = data.heap_size;
1391*7297d2cdSDmitry Kozlyuk 	return 0;
1392*7297d2cdSDmitry Kozlyuk }
1393*7297d2cdSDmitry Kozlyuk 
1394*7297d2cdSDmitry Kozlyuk /**
1395690b2a88SDmitry Kozlyuk  * Get VA-contiguous ranges of the mempool memory.
1396690b2a88SDmitry Kozlyuk  * Each range start and end is aligned to the system page size.
1397690b2a88SDmitry Kozlyuk  *
1398690b2a88SDmitry Kozlyuk  * @param[in] mp
1399690b2a88SDmitry Kozlyuk  *   Analyzed mempool.
1400690b2a88SDmitry Kozlyuk  * @param[out] out
1401690b2a88SDmitry Kozlyuk  *   Receives the ranges, caller must release it with free().
1402690b2a88SDmitry Kozlyuk  * @param[out] ount_n
1403690b2a88SDmitry Kozlyuk  *   Receives the number of @p out elements.
1404690b2a88SDmitry Kozlyuk  *
1405690b2a88SDmitry Kozlyuk  * @return
1406690b2a88SDmitry Kozlyuk  *   0 on success, (-1) on failure.
1407690b2a88SDmitry Kozlyuk  */
1408690b2a88SDmitry Kozlyuk static int
1409690b2a88SDmitry Kozlyuk mlx5_get_mempool_ranges(struct rte_mempool *mp, struct mlx5_range **out,
1410690b2a88SDmitry Kozlyuk 			unsigned int *out_n)
1411690b2a88SDmitry Kozlyuk {
1412690b2a88SDmitry Kozlyuk 	struct mlx5_range *chunks;
1413*7297d2cdSDmitry Kozlyuk 	unsigned int chunks_n, contig_n, i;
1414*7297d2cdSDmitry Kozlyuk 	int ret;
1415690b2a88SDmitry Kozlyuk 
1416*7297d2cdSDmitry Kozlyuk 	/* Collect the pool underlying memory. */
1417*7297d2cdSDmitry Kozlyuk 	ret = (rte_pktmbuf_priv_flags(mp) & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) ?
1418*7297d2cdSDmitry Kozlyuk 	      mlx5_mempool_get_extmem(mp, &chunks, &chunks_n) :
1419*7297d2cdSDmitry Kozlyuk 	      mlx5_mempool_get_chunks(mp, &chunks, &chunks_n);
1420*7297d2cdSDmitry Kozlyuk 	if (ret < 0)
1421*7297d2cdSDmitry Kozlyuk 		return ret;
1422690b2a88SDmitry Kozlyuk 	/* Merge adjacent chunks and place them at the beginning. */
1423690b2a88SDmitry Kozlyuk 	qsort(chunks, chunks_n, sizeof(chunks[0]), mlx5_range_compare_start);
1424690b2a88SDmitry Kozlyuk 	contig_n = 1;
1425690b2a88SDmitry Kozlyuk 	for (i = 1; i < chunks_n; i++)
1426690b2a88SDmitry Kozlyuk 		if (chunks[i - 1].end != chunks[i].start) {
1427690b2a88SDmitry Kozlyuk 			chunks[contig_n - 1].end = chunks[i - 1].end;
1428690b2a88SDmitry Kozlyuk 			chunks[contig_n] = chunks[i];
1429690b2a88SDmitry Kozlyuk 			contig_n++;
1430690b2a88SDmitry Kozlyuk 		}
1431690b2a88SDmitry Kozlyuk 	/* Extend the last contiguous chunk to the end of the mempool. */
1432690b2a88SDmitry Kozlyuk 	chunks[contig_n - 1].end = chunks[i - 1].end;
1433690b2a88SDmitry Kozlyuk 	*out = chunks;
1434690b2a88SDmitry Kozlyuk 	*out_n = contig_n;
1435690b2a88SDmitry Kozlyuk 	return 0;
1436690b2a88SDmitry Kozlyuk }
1437690b2a88SDmitry Kozlyuk 
1438690b2a88SDmitry Kozlyuk /**
1439690b2a88SDmitry Kozlyuk  * Analyze mempool memory to select memory ranges to register.
1440690b2a88SDmitry Kozlyuk  *
1441690b2a88SDmitry Kozlyuk  * @param[in] mp
1442690b2a88SDmitry Kozlyuk  *   Mempool to analyze.
1443690b2a88SDmitry Kozlyuk  * @param[out] out
1444690b2a88SDmitry Kozlyuk  *   Receives memory ranges to register, aligned to the system page size.
1445690b2a88SDmitry Kozlyuk  *   The caller must release them with free().
1446690b2a88SDmitry Kozlyuk  * @param[out] out_n
1447690b2a88SDmitry Kozlyuk  *   Receives the number of @p out items.
1448690b2a88SDmitry Kozlyuk  * @param[out] share_hugepage
1449690b2a88SDmitry Kozlyuk  *   Receives True if the entire pool resides within a single hugepage.
1450690b2a88SDmitry Kozlyuk  *
1451690b2a88SDmitry Kozlyuk  * @return
1452690b2a88SDmitry Kozlyuk  *   0 on success, (-1) on failure.
1453690b2a88SDmitry Kozlyuk  */
1454690b2a88SDmitry Kozlyuk static int
1455690b2a88SDmitry Kozlyuk mlx5_mempool_reg_analyze(struct rte_mempool *mp, struct mlx5_range **out,
1456690b2a88SDmitry Kozlyuk 			 unsigned int *out_n, bool *share_hugepage)
1457690b2a88SDmitry Kozlyuk {
1458690b2a88SDmitry Kozlyuk 	struct mlx5_range *ranges = NULL;
1459690b2a88SDmitry Kozlyuk 	unsigned int i, ranges_n = 0;
1460690b2a88SDmitry Kozlyuk 	struct rte_memseg_list *msl;
1461690b2a88SDmitry Kozlyuk 
1462690b2a88SDmitry Kozlyuk 	if (mlx5_get_mempool_ranges(mp, &ranges, &ranges_n) < 0) {
1463690b2a88SDmitry Kozlyuk 		DRV_LOG(ERR, "Cannot get address ranges for mempool %s",
1464690b2a88SDmitry Kozlyuk 			mp->name);
1465690b2a88SDmitry Kozlyuk 		return -1;
1466690b2a88SDmitry Kozlyuk 	}
1467690b2a88SDmitry Kozlyuk 	/* Check if the hugepage of the pool can be shared. */
1468690b2a88SDmitry Kozlyuk 	*share_hugepage = false;
1469690b2a88SDmitry Kozlyuk 	msl = rte_mem_virt2memseg_list((void *)ranges[0].start);
1470690b2a88SDmitry Kozlyuk 	if (msl != NULL) {
1471690b2a88SDmitry Kozlyuk 		uint64_t hugepage_sz = 0;
1472690b2a88SDmitry Kozlyuk 
1473690b2a88SDmitry Kozlyuk 		/* Check that all ranges are on pages of the same size. */
1474690b2a88SDmitry Kozlyuk 		for (i = 0; i < ranges_n; i++) {
1475690b2a88SDmitry Kozlyuk 			if (hugepage_sz != 0 && hugepage_sz != msl->page_sz)
1476690b2a88SDmitry Kozlyuk 				break;
1477690b2a88SDmitry Kozlyuk 			hugepage_sz = msl->page_sz;
1478690b2a88SDmitry Kozlyuk 		}
1479690b2a88SDmitry Kozlyuk 		if (i == ranges_n) {
1480690b2a88SDmitry Kozlyuk 			/*
1481690b2a88SDmitry Kozlyuk 			 * If the entire pool is within one hugepage,
1482690b2a88SDmitry Kozlyuk 			 * combine all ranges into one of the hugepage size.
1483690b2a88SDmitry Kozlyuk 			 */
1484690b2a88SDmitry Kozlyuk 			uintptr_t reg_start = ranges[0].start;
1485690b2a88SDmitry Kozlyuk 			uintptr_t reg_end = ranges[ranges_n - 1].end;
1486690b2a88SDmitry Kozlyuk 			uintptr_t hugepage_start =
1487690b2a88SDmitry Kozlyuk 				RTE_ALIGN_FLOOR(reg_start, hugepage_sz);
1488690b2a88SDmitry Kozlyuk 			uintptr_t hugepage_end = hugepage_start + hugepage_sz;
1489690b2a88SDmitry Kozlyuk 			if (reg_end < hugepage_end) {
1490690b2a88SDmitry Kozlyuk 				ranges[0].start = hugepage_start;
1491690b2a88SDmitry Kozlyuk 				ranges[0].end = hugepage_end;
1492690b2a88SDmitry Kozlyuk 				ranges_n = 1;
1493690b2a88SDmitry Kozlyuk 				*share_hugepage = true;
1494690b2a88SDmitry Kozlyuk 			}
1495690b2a88SDmitry Kozlyuk 		}
1496690b2a88SDmitry Kozlyuk 	}
1497690b2a88SDmitry Kozlyuk 	*out = ranges;
1498690b2a88SDmitry Kozlyuk 	*out_n = ranges_n;
1499690b2a88SDmitry Kozlyuk 	return 0;
1500690b2a88SDmitry Kozlyuk }
1501690b2a88SDmitry Kozlyuk 
1502690b2a88SDmitry Kozlyuk /** Create a registration object for the mempool. */
1503690b2a88SDmitry Kozlyuk static struct mlx5_mempool_reg *
1504690b2a88SDmitry Kozlyuk mlx5_mempool_reg_create(struct rte_mempool *mp, unsigned int mrs_n)
1505690b2a88SDmitry Kozlyuk {
1506690b2a88SDmitry Kozlyuk 	struct mlx5_mempool_reg *mpr = NULL;
1507690b2a88SDmitry Kozlyuk 
1508690b2a88SDmitry Kozlyuk 	mpr = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1509690b2a88SDmitry Kozlyuk 			  sizeof(*mpr) + mrs_n * sizeof(mpr->mrs[0]),
1510690b2a88SDmitry Kozlyuk 			  RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
1511690b2a88SDmitry Kozlyuk 	if (mpr == NULL) {
1512690b2a88SDmitry Kozlyuk 		DRV_LOG(ERR, "Cannot allocate mempool %s registration object",
1513690b2a88SDmitry Kozlyuk 			mp->name);
1514690b2a88SDmitry Kozlyuk 		return NULL;
1515690b2a88SDmitry Kozlyuk 	}
1516690b2a88SDmitry Kozlyuk 	mpr->mp = mp;
1517690b2a88SDmitry Kozlyuk 	mpr->mrs = (struct mlx5_mempool_mr *)(mpr + 1);
1518690b2a88SDmitry Kozlyuk 	mpr->mrs_n = mrs_n;
1519690b2a88SDmitry Kozlyuk 	return mpr;
1520690b2a88SDmitry Kozlyuk }
1521690b2a88SDmitry Kozlyuk 
1522690b2a88SDmitry Kozlyuk /**
1523690b2a88SDmitry Kozlyuk  * Destroy a mempool registration object.
1524690b2a88SDmitry Kozlyuk  *
1525690b2a88SDmitry Kozlyuk  * @param standalone
1526690b2a88SDmitry Kozlyuk  *   Whether @p mpr owns its MRs excludively, i.e. they are not shared.
1527690b2a88SDmitry Kozlyuk  */
1528690b2a88SDmitry Kozlyuk static void
1529690b2a88SDmitry Kozlyuk mlx5_mempool_reg_destroy(struct mlx5_mr_share_cache *share_cache,
1530690b2a88SDmitry Kozlyuk 			 struct mlx5_mempool_reg *mpr, bool standalone)
1531690b2a88SDmitry Kozlyuk {
1532690b2a88SDmitry Kozlyuk 	if (standalone) {
1533690b2a88SDmitry Kozlyuk 		unsigned int i;
1534690b2a88SDmitry Kozlyuk 
1535690b2a88SDmitry Kozlyuk 		for (i = 0; i < mpr->mrs_n; i++)
1536690b2a88SDmitry Kozlyuk 			share_cache->dereg_mr_cb(&mpr->mrs[i].pmd_mr);
1537690b2a88SDmitry Kozlyuk 	}
1538690b2a88SDmitry Kozlyuk 	mlx5_free(mpr);
1539690b2a88SDmitry Kozlyuk }
1540690b2a88SDmitry Kozlyuk 
1541690b2a88SDmitry Kozlyuk /** Find registration object of a mempool. */
1542690b2a88SDmitry Kozlyuk static struct mlx5_mempool_reg *
1543690b2a88SDmitry Kozlyuk mlx5_mempool_reg_lookup(struct mlx5_mr_share_cache *share_cache,
1544690b2a88SDmitry Kozlyuk 			struct rte_mempool *mp)
1545690b2a88SDmitry Kozlyuk {
1546690b2a88SDmitry Kozlyuk 	struct mlx5_mempool_reg *mpr;
1547690b2a88SDmitry Kozlyuk 
1548690b2a88SDmitry Kozlyuk 	LIST_FOREACH(mpr, &share_cache->mempool_reg_list, next)
1549690b2a88SDmitry Kozlyuk 		if (mpr->mp == mp)
1550690b2a88SDmitry Kozlyuk 			break;
1551690b2a88SDmitry Kozlyuk 	return mpr;
1552690b2a88SDmitry Kozlyuk }
1553690b2a88SDmitry Kozlyuk 
1554690b2a88SDmitry Kozlyuk /** Increment reference counters of MRs used in the registration. */
1555690b2a88SDmitry Kozlyuk static void
1556690b2a88SDmitry Kozlyuk mlx5_mempool_reg_attach(struct mlx5_mempool_reg *mpr)
1557690b2a88SDmitry Kozlyuk {
1558690b2a88SDmitry Kozlyuk 	unsigned int i;
1559690b2a88SDmitry Kozlyuk 
1560690b2a88SDmitry Kozlyuk 	for (i = 0; i < mpr->mrs_n; i++)
1561690b2a88SDmitry Kozlyuk 		__atomic_add_fetch(&mpr->mrs[i].refcnt, 1, __ATOMIC_RELAXED);
1562690b2a88SDmitry Kozlyuk }
1563690b2a88SDmitry Kozlyuk 
1564690b2a88SDmitry Kozlyuk /**
1565690b2a88SDmitry Kozlyuk  * Decrement reference counters of MRs used in the registration.
1566690b2a88SDmitry Kozlyuk  *
1567690b2a88SDmitry Kozlyuk  * @return True if no more references to @p mpr MRs exist, False otherwise.
1568690b2a88SDmitry Kozlyuk  */
1569690b2a88SDmitry Kozlyuk static bool
1570690b2a88SDmitry Kozlyuk mlx5_mempool_reg_detach(struct mlx5_mempool_reg *mpr)
1571690b2a88SDmitry Kozlyuk {
1572690b2a88SDmitry Kozlyuk 	unsigned int i;
1573690b2a88SDmitry Kozlyuk 	bool ret = false;
1574690b2a88SDmitry Kozlyuk 
1575690b2a88SDmitry Kozlyuk 	for (i = 0; i < mpr->mrs_n; i++)
1576690b2a88SDmitry Kozlyuk 		ret |= __atomic_sub_fetch(&mpr->mrs[i].refcnt, 1,
1577690b2a88SDmitry Kozlyuk 					  __ATOMIC_RELAXED) == 0;
1578690b2a88SDmitry Kozlyuk 	return ret;
1579690b2a88SDmitry Kozlyuk }
1580690b2a88SDmitry Kozlyuk 
1581690b2a88SDmitry Kozlyuk static int
1582690b2a88SDmitry Kozlyuk mlx5_mr_mempool_register_primary(struct mlx5_mr_share_cache *share_cache,
1583690b2a88SDmitry Kozlyuk 				 void *pd, struct rte_mempool *mp)
1584690b2a88SDmitry Kozlyuk {
1585690b2a88SDmitry Kozlyuk 	struct mlx5_range *ranges = NULL;
1586690b2a88SDmitry Kozlyuk 	struct mlx5_mempool_reg *mpr, *new_mpr;
1587690b2a88SDmitry Kozlyuk 	unsigned int i, ranges_n;
1588690b2a88SDmitry Kozlyuk 	bool share_hugepage;
1589690b2a88SDmitry Kozlyuk 	int ret = -1;
1590690b2a88SDmitry Kozlyuk 
1591690b2a88SDmitry Kozlyuk 	/* Early check to avoid unnecessary creation of MRs. */
1592690b2a88SDmitry Kozlyuk 	rte_rwlock_read_lock(&share_cache->rwlock);
1593690b2a88SDmitry Kozlyuk 	mpr = mlx5_mempool_reg_lookup(share_cache, mp);
1594690b2a88SDmitry Kozlyuk 	rte_rwlock_read_unlock(&share_cache->rwlock);
1595690b2a88SDmitry Kozlyuk 	if (mpr != NULL) {
1596690b2a88SDmitry Kozlyuk 		DRV_LOG(DEBUG, "Mempool %s is already registered for PD %p",
1597690b2a88SDmitry Kozlyuk 			mp->name, pd);
1598690b2a88SDmitry Kozlyuk 		rte_errno = EEXIST;
1599690b2a88SDmitry Kozlyuk 		goto exit;
1600690b2a88SDmitry Kozlyuk 	}
1601690b2a88SDmitry Kozlyuk 	if (mlx5_mempool_reg_analyze(mp, &ranges, &ranges_n,
1602690b2a88SDmitry Kozlyuk 				     &share_hugepage) < 0) {
1603690b2a88SDmitry Kozlyuk 		DRV_LOG(ERR, "Cannot get mempool %s memory ranges", mp->name);
1604690b2a88SDmitry Kozlyuk 		rte_errno = ENOMEM;
1605690b2a88SDmitry Kozlyuk 		goto exit;
1606690b2a88SDmitry Kozlyuk 	}
1607690b2a88SDmitry Kozlyuk 	new_mpr = mlx5_mempool_reg_create(mp, ranges_n);
1608690b2a88SDmitry Kozlyuk 	if (new_mpr == NULL) {
1609690b2a88SDmitry Kozlyuk 		DRV_LOG(ERR,
1610690b2a88SDmitry Kozlyuk 			"Cannot create a registration object for mempool %s in PD %p",
1611690b2a88SDmitry Kozlyuk 			mp->name, pd);
1612690b2a88SDmitry Kozlyuk 		rte_errno = ENOMEM;
1613690b2a88SDmitry Kozlyuk 		goto exit;
1614690b2a88SDmitry Kozlyuk 	}
1615690b2a88SDmitry Kozlyuk 	/*
1616690b2a88SDmitry Kozlyuk 	 * If the entire mempool fits in a single hugepage, the MR for this
1617690b2a88SDmitry Kozlyuk 	 * hugepage can be shared across mempools that also fit in it.
1618690b2a88SDmitry Kozlyuk 	 */
1619690b2a88SDmitry Kozlyuk 	if (share_hugepage) {
1620690b2a88SDmitry Kozlyuk 		rte_rwlock_write_lock(&share_cache->rwlock);
1621690b2a88SDmitry Kozlyuk 		LIST_FOREACH(mpr, &share_cache->mempool_reg_list, next) {
1622690b2a88SDmitry Kozlyuk 			if (mpr->mrs[0].pmd_mr.addr == (void *)ranges[0].start)
1623690b2a88SDmitry Kozlyuk 				break;
1624690b2a88SDmitry Kozlyuk 		}
1625690b2a88SDmitry Kozlyuk 		if (mpr != NULL) {
1626690b2a88SDmitry Kozlyuk 			new_mpr->mrs = mpr->mrs;
1627690b2a88SDmitry Kozlyuk 			mlx5_mempool_reg_attach(new_mpr);
1628690b2a88SDmitry Kozlyuk 			LIST_INSERT_HEAD(&share_cache->mempool_reg_list,
1629690b2a88SDmitry Kozlyuk 					 new_mpr, next);
1630690b2a88SDmitry Kozlyuk 		}
1631690b2a88SDmitry Kozlyuk 		rte_rwlock_write_unlock(&share_cache->rwlock);
1632690b2a88SDmitry Kozlyuk 		if (mpr != NULL) {
1633690b2a88SDmitry Kozlyuk 			DRV_LOG(DEBUG, "Shared MR %#x in PD %p for mempool %s with mempool %s",
1634690b2a88SDmitry Kozlyuk 				mpr->mrs[0].pmd_mr.lkey, pd, mp->name,
1635690b2a88SDmitry Kozlyuk 				mpr->mp->name);
1636690b2a88SDmitry Kozlyuk 			ret = 0;
1637690b2a88SDmitry Kozlyuk 			goto exit;
1638690b2a88SDmitry Kozlyuk 		}
1639690b2a88SDmitry Kozlyuk 	}
1640690b2a88SDmitry Kozlyuk 	for (i = 0; i < ranges_n; i++) {
1641690b2a88SDmitry Kozlyuk 		struct mlx5_mempool_mr *mr = &new_mpr->mrs[i];
1642690b2a88SDmitry Kozlyuk 		const struct mlx5_range *range = &ranges[i];
1643690b2a88SDmitry Kozlyuk 		size_t len = range->end - range->start;
1644690b2a88SDmitry Kozlyuk 
1645690b2a88SDmitry Kozlyuk 		if (share_cache->reg_mr_cb(pd, (void *)range->start, len,
1646690b2a88SDmitry Kozlyuk 		    &mr->pmd_mr) < 0) {
1647690b2a88SDmitry Kozlyuk 			DRV_LOG(ERR,
1648690b2a88SDmitry Kozlyuk 				"Failed to create an MR in PD %p for address range "
1649690b2a88SDmitry Kozlyuk 				"[0x%" PRIxPTR ", 0x%" PRIxPTR "] (%zu bytes) for mempool %s",
1650690b2a88SDmitry Kozlyuk 				pd, range->start, range->end, len, mp->name);
1651690b2a88SDmitry Kozlyuk 			break;
1652690b2a88SDmitry Kozlyuk 		}
1653690b2a88SDmitry Kozlyuk 		DRV_LOG(DEBUG,
1654690b2a88SDmitry Kozlyuk 			"Created a new MR %#x in PD %p for address range "
1655690b2a88SDmitry Kozlyuk 			"[0x%" PRIxPTR ", 0x%" PRIxPTR "] (%zu bytes) for mempool %s",
1656690b2a88SDmitry Kozlyuk 			mr->pmd_mr.lkey, pd, range->start, range->end, len,
1657690b2a88SDmitry Kozlyuk 			mp->name);
1658690b2a88SDmitry Kozlyuk 	}
1659690b2a88SDmitry Kozlyuk 	if (i != ranges_n) {
1660690b2a88SDmitry Kozlyuk 		mlx5_mempool_reg_destroy(share_cache, new_mpr, true);
1661690b2a88SDmitry Kozlyuk 		rte_errno = EINVAL;
1662690b2a88SDmitry Kozlyuk 		goto exit;
1663690b2a88SDmitry Kozlyuk 	}
1664690b2a88SDmitry Kozlyuk 	/* Concurrent registration is not supposed to happen. */
1665690b2a88SDmitry Kozlyuk 	rte_rwlock_write_lock(&share_cache->rwlock);
1666690b2a88SDmitry Kozlyuk 	mpr = mlx5_mempool_reg_lookup(share_cache, mp);
1667690b2a88SDmitry Kozlyuk 	if (mpr == NULL) {
1668690b2a88SDmitry Kozlyuk 		mlx5_mempool_reg_attach(new_mpr);
1669fc59a1ecSMichael Baum 		LIST_INSERT_HEAD(&share_cache->mempool_reg_list, new_mpr, next);
1670690b2a88SDmitry Kozlyuk 		ret = 0;
1671690b2a88SDmitry Kozlyuk 	}
1672690b2a88SDmitry Kozlyuk 	rte_rwlock_write_unlock(&share_cache->rwlock);
1673690b2a88SDmitry Kozlyuk 	if (mpr != NULL) {
1674690b2a88SDmitry Kozlyuk 		DRV_LOG(DEBUG, "Mempool %s is already registered for PD %p",
1675690b2a88SDmitry Kozlyuk 			mp->name, pd);
1676690b2a88SDmitry Kozlyuk 		mlx5_mempool_reg_destroy(share_cache, new_mpr, true);
1677690b2a88SDmitry Kozlyuk 		rte_errno = EEXIST;
1678690b2a88SDmitry Kozlyuk 		goto exit;
1679690b2a88SDmitry Kozlyuk 	}
1680690b2a88SDmitry Kozlyuk exit:
1681690b2a88SDmitry Kozlyuk 	free(ranges);
1682690b2a88SDmitry Kozlyuk 	return ret;
1683690b2a88SDmitry Kozlyuk }
1684690b2a88SDmitry Kozlyuk 
1685690b2a88SDmitry Kozlyuk static int
168620489176SMichael Baum mlx5_mr_mempool_register_secondary(struct mlx5_common_device *cdev,
168720489176SMichael Baum 				   struct rte_mempool *mp)
1688690b2a88SDmitry Kozlyuk {
168920489176SMichael Baum 	return mlx5_mp_req_mempool_reg(cdev, mp, true);
1690690b2a88SDmitry Kozlyuk }
1691690b2a88SDmitry Kozlyuk 
1692690b2a88SDmitry Kozlyuk /**
1693690b2a88SDmitry Kozlyuk  * Register the memory of a mempool in the protection domain.
1694690b2a88SDmitry Kozlyuk  *
169520489176SMichael Baum  * @param cdev
169620489176SMichael Baum  *   Pointer to the mlx5 common device.
1697690b2a88SDmitry Kozlyuk  * @param mp
1698690b2a88SDmitry Kozlyuk  *   Mempool to register.
1699690b2a88SDmitry Kozlyuk  *
1700690b2a88SDmitry Kozlyuk  * @return
1701690b2a88SDmitry Kozlyuk  *   0 on success, (-1) on failure and rte_errno is set.
1702690b2a88SDmitry Kozlyuk  */
1703690b2a88SDmitry Kozlyuk int
170420489176SMichael Baum mlx5_mr_mempool_register(struct mlx5_common_device *cdev,
170520489176SMichael Baum 			 struct rte_mempool *mp)
1706690b2a88SDmitry Kozlyuk {
1707c47d7b90SAndrew Rybchenko 	if (mp->flags & RTE_MEMPOOL_F_NON_IO)
1708690b2a88SDmitry Kozlyuk 		return 0;
1709690b2a88SDmitry Kozlyuk 	switch (rte_eal_process_type()) {
1710690b2a88SDmitry Kozlyuk 	case RTE_PROC_PRIMARY:
171120489176SMichael Baum 		return mlx5_mr_mempool_register_primary(&cdev->mr_scache,
171220489176SMichael Baum 							cdev->pd, mp);
1713690b2a88SDmitry Kozlyuk 	case RTE_PROC_SECONDARY:
171420489176SMichael Baum 		return mlx5_mr_mempool_register_secondary(cdev, mp);
1715690b2a88SDmitry Kozlyuk 	default:
1716690b2a88SDmitry Kozlyuk 		return -1;
1717690b2a88SDmitry Kozlyuk 	}
1718690b2a88SDmitry Kozlyuk }
1719690b2a88SDmitry Kozlyuk 
1720690b2a88SDmitry Kozlyuk static int
1721690b2a88SDmitry Kozlyuk mlx5_mr_mempool_unregister_primary(struct mlx5_mr_share_cache *share_cache,
1722690b2a88SDmitry Kozlyuk 				   struct rte_mempool *mp)
1723690b2a88SDmitry Kozlyuk {
1724690b2a88SDmitry Kozlyuk 	struct mlx5_mempool_reg *mpr;
1725690b2a88SDmitry Kozlyuk 	bool standalone = false;
1726690b2a88SDmitry Kozlyuk 
1727690b2a88SDmitry Kozlyuk 	rte_rwlock_write_lock(&share_cache->rwlock);
1728690b2a88SDmitry Kozlyuk 	LIST_FOREACH(mpr, &share_cache->mempool_reg_list, next)
1729690b2a88SDmitry Kozlyuk 		if (mpr->mp == mp) {
1730690b2a88SDmitry Kozlyuk 			LIST_REMOVE(mpr, next);
1731690b2a88SDmitry Kozlyuk 			standalone = mlx5_mempool_reg_detach(mpr);
1732690b2a88SDmitry Kozlyuk 			if (standalone)
1733690b2a88SDmitry Kozlyuk 				/*
1734690b2a88SDmitry Kozlyuk 				 * The unlock operation below provides a memory
1735690b2a88SDmitry Kozlyuk 				 * barrier due to its store-release semantics.
1736690b2a88SDmitry Kozlyuk 				 */
1737690b2a88SDmitry Kozlyuk 				++share_cache->dev_gen;
1738690b2a88SDmitry Kozlyuk 			break;
1739690b2a88SDmitry Kozlyuk 		}
1740690b2a88SDmitry Kozlyuk 	rte_rwlock_write_unlock(&share_cache->rwlock);
1741690b2a88SDmitry Kozlyuk 	if (mpr == NULL) {
1742690b2a88SDmitry Kozlyuk 		rte_errno = ENOENT;
1743690b2a88SDmitry Kozlyuk 		return -1;
1744690b2a88SDmitry Kozlyuk 	}
1745690b2a88SDmitry Kozlyuk 	mlx5_mempool_reg_destroy(share_cache, mpr, standalone);
1746690b2a88SDmitry Kozlyuk 	return 0;
1747690b2a88SDmitry Kozlyuk }
1748690b2a88SDmitry Kozlyuk 
1749690b2a88SDmitry Kozlyuk static int
175020489176SMichael Baum mlx5_mr_mempool_unregister_secondary(struct mlx5_common_device *cdev,
175120489176SMichael Baum 				     struct rte_mempool *mp)
1752690b2a88SDmitry Kozlyuk {
175320489176SMichael Baum 	return mlx5_mp_req_mempool_reg(cdev, mp, false);
1754690b2a88SDmitry Kozlyuk }
1755690b2a88SDmitry Kozlyuk 
1756690b2a88SDmitry Kozlyuk /**
1757690b2a88SDmitry Kozlyuk  * Unregister the memory of a mempool from the protection domain.
1758690b2a88SDmitry Kozlyuk  *
175920489176SMichael Baum  * @param cdev
176020489176SMichael Baum  *   Pointer to the mlx5 common device.
1761690b2a88SDmitry Kozlyuk  * @param mp
1762690b2a88SDmitry Kozlyuk  *   Mempool to unregister.
1763690b2a88SDmitry Kozlyuk  *
1764690b2a88SDmitry Kozlyuk  * @return
1765690b2a88SDmitry Kozlyuk  *   0 on success, (-1) on failure and rte_errno is set.
1766690b2a88SDmitry Kozlyuk  */
1767690b2a88SDmitry Kozlyuk int
176820489176SMichael Baum mlx5_mr_mempool_unregister(struct mlx5_common_device *cdev,
176920489176SMichael Baum 			   struct rte_mempool *mp)
1770690b2a88SDmitry Kozlyuk {
1771c47d7b90SAndrew Rybchenko 	if (mp->flags & RTE_MEMPOOL_F_NON_IO)
1772690b2a88SDmitry Kozlyuk 		return 0;
1773690b2a88SDmitry Kozlyuk 	switch (rte_eal_process_type()) {
1774690b2a88SDmitry Kozlyuk 	case RTE_PROC_PRIMARY:
177520489176SMichael Baum 		return mlx5_mr_mempool_unregister_primary(&cdev->mr_scache, mp);
1776690b2a88SDmitry Kozlyuk 	case RTE_PROC_SECONDARY:
177720489176SMichael Baum 		return mlx5_mr_mempool_unregister_secondary(cdev, mp);
1778690b2a88SDmitry Kozlyuk 	default:
1779690b2a88SDmitry Kozlyuk 		return -1;
1780690b2a88SDmitry Kozlyuk 	}
1781690b2a88SDmitry Kozlyuk }
1782690b2a88SDmitry Kozlyuk 
1783690b2a88SDmitry Kozlyuk /**
1784690b2a88SDmitry Kozlyuk  * Lookup a MR key by and address in a registered mempool.
1785690b2a88SDmitry Kozlyuk  *
1786690b2a88SDmitry Kozlyuk  * @param mpr
1787690b2a88SDmitry Kozlyuk  *   Mempool registration object.
1788690b2a88SDmitry Kozlyuk  * @param addr
1789690b2a88SDmitry Kozlyuk  *   Address within the mempool.
1790690b2a88SDmitry Kozlyuk  * @param entry
1791690b2a88SDmitry Kozlyuk  *   Bottom-half cache entry to fill.
1792690b2a88SDmitry Kozlyuk  *
1793690b2a88SDmitry Kozlyuk  * @return
1794690b2a88SDmitry Kozlyuk  *   MR key or UINT32_MAX on failure, which can only happen
1795690b2a88SDmitry Kozlyuk  *   if the address is not from within the mempool.
1796690b2a88SDmitry Kozlyuk  */
1797690b2a88SDmitry Kozlyuk static uint32_t
1798690b2a88SDmitry Kozlyuk mlx5_mempool_reg_addr2mr(struct mlx5_mempool_reg *mpr, uintptr_t addr,
1799690b2a88SDmitry Kozlyuk 			 struct mr_cache_entry *entry)
1800690b2a88SDmitry Kozlyuk {
1801690b2a88SDmitry Kozlyuk 	uint32_t lkey = UINT32_MAX;
1802690b2a88SDmitry Kozlyuk 	unsigned int i;
1803690b2a88SDmitry Kozlyuk 
1804690b2a88SDmitry Kozlyuk 	for (i = 0; i < mpr->mrs_n; i++) {
1805690b2a88SDmitry Kozlyuk 		const struct mlx5_pmd_mr *mr = &mpr->mrs[i].pmd_mr;
1806690b2a88SDmitry Kozlyuk 		uintptr_t mr_addr = (uintptr_t)mr->addr;
1807690b2a88SDmitry Kozlyuk 
1808690b2a88SDmitry Kozlyuk 		if (mr_addr <= addr) {
1809690b2a88SDmitry Kozlyuk 			lkey = rte_cpu_to_be_32(mr->lkey);
1810690b2a88SDmitry Kozlyuk 			entry->start = mr_addr;
1811690b2a88SDmitry Kozlyuk 			entry->end = mr_addr + mr->len;
1812690b2a88SDmitry Kozlyuk 			entry->lkey = lkey;
1813690b2a88SDmitry Kozlyuk 			break;
1814690b2a88SDmitry Kozlyuk 		}
1815690b2a88SDmitry Kozlyuk 	}
1816690b2a88SDmitry Kozlyuk 	return lkey;
1817690b2a88SDmitry Kozlyuk }
1818690b2a88SDmitry Kozlyuk 
1819690b2a88SDmitry Kozlyuk /**
1820690b2a88SDmitry Kozlyuk  * Update bottom-half cache from the list of mempool registrations.
1821690b2a88SDmitry Kozlyuk  *
1822690b2a88SDmitry Kozlyuk  * @param share_cache
1823690b2a88SDmitry Kozlyuk  *   Pointer to a global shared MR cache.
1824690b2a88SDmitry Kozlyuk  * @param mr_ctrl
1825690b2a88SDmitry Kozlyuk  *   Per-queue MR control handle.
1826690b2a88SDmitry Kozlyuk  * @param entry
1827690b2a88SDmitry Kozlyuk  *   Pointer to an entry in the bottom-half cache to update
1828690b2a88SDmitry Kozlyuk  *   with the MR lkey looked up.
1829690b2a88SDmitry Kozlyuk  * @param mp
1830690b2a88SDmitry Kozlyuk  *   Mempool containing the address.
1831690b2a88SDmitry Kozlyuk  * @param addr
1832690b2a88SDmitry Kozlyuk  *   Address to lookup.
1833690b2a88SDmitry Kozlyuk  * @return
1834690b2a88SDmitry Kozlyuk  *   MR lkey on success, UINT32_MAX on failure.
1835690b2a88SDmitry Kozlyuk  */
1836690b2a88SDmitry Kozlyuk static uint32_t
1837690b2a88SDmitry Kozlyuk mlx5_lookup_mempool_regs(struct mlx5_mr_share_cache *share_cache,
1838690b2a88SDmitry Kozlyuk 			 struct mlx5_mr_ctrl *mr_ctrl,
1839690b2a88SDmitry Kozlyuk 			 struct mr_cache_entry *entry,
1840690b2a88SDmitry Kozlyuk 			 struct rte_mempool *mp, uintptr_t addr)
1841690b2a88SDmitry Kozlyuk {
1842690b2a88SDmitry Kozlyuk 	struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
1843690b2a88SDmitry Kozlyuk 	struct mlx5_mempool_reg *mpr;
1844690b2a88SDmitry Kozlyuk 	uint32_t lkey = UINT32_MAX;
1845690b2a88SDmitry Kozlyuk 
1846690b2a88SDmitry Kozlyuk 	/* If local cache table is full, try to double it. */
1847690b2a88SDmitry Kozlyuk 	if (unlikely(bt->len == bt->size))
1848690b2a88SDmitry Kozlyuk 		mr_btree_expand(bt, bt->size << 1);
1849690b2a88SDmitry Kozlyuk 	/* Look up in mempool registrations. */
1850690b2a88SDmitry Kozlyuk 	rte_rwlock_read_lock(&share_cache->rwlock);
1851690b2a88SDmitry Kozlyuk 	mpr = mlx5_mempool_reg_lookup(share_cache, mp);
1852690b2a88SDmitry Kozlyuk 	if (mpr != NULL)
1853690b2a88SDmitry Kozlyuk 		lkey = mlx5_mempool_reg_addr2mr(mpr, addr, entry);
1854690b2a88SDmitry Kozlyuk 	rte_rwlock_read_unlock(&share_cache->rwlock);
1855690b2a88SDmitry Kozlyuk 	/*
1856690b2a88SDmitry Kozlyuk 	 * Update local cache. Even if it fails, return the found entry
1857690b2a88SDmitry Kozlyuk 	 * to update top-half cache. Next time, this entry will be found
1858690b2a88SDmitry Kozlyuk 	 * in the global cache.
1859690b2a88SDmitry Kozlyuk 	 */
1860690b2a88SDmitry Kozlyuk 	if (lkey != UINT32_MAX)
1861690b2a88SDmitry Kozlyuk 		mr_btree_insert(bt, entry);
1862690b2a88SDmitry Kozlyuk 	return lkey;
1863690b2a88SDmitry Kozlyuk }
1864690b2a88SDmitry Kozlyuk 
1865690b2a88SDmitry Kozlyuk /**
1866690b2a88SDmitry Kozlyuk  * Bottom-half lookup for the address from the mempool.
1867690b2a88SDmitry Kozlyuk  *
1868690b2a88SDmitry Kozlyuk  * @param share_cache
1869690b2a88SDmitry Kozlyuk  *   Pointer to a global shared MR cache.
1870690b2a88SDmitry Kozlyuk  * @param mr_ctrl
1871690b2a88SDmitry Kozlyuk  *   Per-queue MR control handle.
1872690b2a88SDmitry Kozlyuk  * @param mp
1873690b2a88SDmitry Kozlyuk  *   Mempool containing the address.
1874690b2a88SDmitry Kozlyuk  * @param addr
1875690b2a88SDmitry Kozlyuk  *   Address to lookup.
1876690b2a88SDmitry Kozlyuk  * @return
1877690b2a88SDmitry Kozlyuk  *   MR lkey on success, UINT32_MAX on failure.
1878690b2a88SDmitry Kozlyuk  */
1879690b2a88SDmitry Kozlyuk uint32_t
1880690b2a88SDmitry Kozlyuk mlx5_mr_mempool2mr_bh(struct mlx5_mr_share_cache *share_cache,
1881690b2a88SDmitry Kozlyuk 		      struct mlx5_mr_ctrl *mr_ctrl,
1882690b2a88SDmitry Kozlyuk 		      struct rte_mempool *mp, uintptr_t addr)
1883690b2a88SDmitry Kozlyuk {
1884690b2a88SDmitry Kozlyuk 	struct mr_cache_entry *repl = &mr_ctrl->cache[mr_ctrl->head];
1885690b2a88SDmitry Kozlyuk 	uint32_t lkey;
1886690b2a88SDmitry Kozlyuk 	uint16_t bh_idx = 0;
1887690b2a88SDmitry Kozlyuk 
1888690b2a88SDmitry Kozlyuk 	/* Binary-search MR translation table. */
1889690b2a88SDmitry Kozlyuk 	lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);
1890690b2a88SDmitry Kozlyuk 	/* Update top-half cache. */
1891690b2a88SDmitry Kozlyuk 	if (likely(lkey != UINT32_MAX)) {
1892690b2a88SDmitry Kozlyuk 		*repl = (*mr_ctrl->cache_bh.table)[bh_idx];
1893690b2a88SDmitry Kozlyuk 	} else {
1894690b2a88SDmitry Kozlyuk 		lkey = mlx5_lookup_mempool_regs(share_cache, mr_ctrl, repl,
1895690b2a88SDmitry Kozlyuk 						mp, addr);
1896690b2a88SDmitry Kozlyuk 		/* Can only fail if the address is not from the mempool. */
1897690b2a88SDmitry Kozlyuk 		if (unlikely(lkey == UINT32_MAX))
1898690b2a88SDmitry Kozlyuk 			return UINT32_MAX;
1899690b2a88SDmitry Kozlyuk 	}
1900690b2a88SDmitry Kozlyuk 	/* Update the most recently used entry. */
1901690b2a88SDmitry Kozlyuk 	mr_ctrl->mru = mr_ctrl->head;
1902690b2a88SDmitry Kozlyuk 	/* Point to the next victim, the oldest. */
1903690b2a88SDmitry Kozlyuk 	mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N;
1904690b2a88SDmitry Kozlyuk 	return lkey;
1905690b2a88SDmitry Kozlyuk }
1906fb690f71SMichael Baum 
19076a4e4385SMichael Baum uint32_t
190820489176SMichael Baum mlx5_mr_mb2mr_bh(struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mb)
1909fc59a1ecSMichael Baum {
1910fc59a1ecSMichael Baum 	uint32_t lkey;
1911fc59a1ecSMichael Baum 	uintptr_t addr = (uintptr_t)mb->buf_addr;
1912334ed198SMichael Baum 	struct mlx5_common_device *cdev = mr_ctrl->cdev;
1913fc59a1ecSMichael Baum 
1914fc59a1ecSMichael Baum 	if (cdev->config.mr_mempool_reg_en) {
1915fc59a1ecSMichael Baum 		struct rte_mempool *mp = NULL;
1916fc59a1ecSMichael Baum 		struct mlx5_mprq_buf *buf;
1917fc59a1ecSMichael Baum 
1918fc59a1ecSMichael Baum 		if (!RTE_MBUF_HAS_EXTBUF(mb)) {
1919fc59a1ecSMichael Baum 			mp = mlx5_mb2mp(mb);
1920fc59a1ecSMichael Baum 		} else if (mb->shinfo->free_cb == mlx5_mprq_buf_free_cb) {
1921fc59a1ecSMichael Baum 			/* Recover MPRQ mempool. */
1922fc59a1ecSMichael Baum 			buf = mb->shinfo->fcb_opaque;
1923fc59a1ecSMichael Baum 			mp = buf->mp;
1924fc59a1ecSMichael Baum 		}
1925fc59a1ecSMichael Baum 		if (mp != NULL) {
1926fc59a1ecSMichael Baum 			lkey = mlx5_mr_mempool2mr_bh(&cdev->mr_scache,
1927fc59a1ecSMichael Baum 						     mr_ctrl, mp, addr);
1928fc59a1ecSMichael Baum 			/*
1929fc59a1ecSMichael Baum 			 * Lookup can only fail on invalid input, e.g. "addr"
1930fc59a1ecSMichael Baum 			 * is not from "mp" or "mp" has MEMPOOL_F_NON_IO set.
1931fc59a1ecSMichael Baum 			 */
1932fc59a1ecSMichael Baum 			if (lkey != UINT32_MAX)
1933fc59a1ecSMichael Baum 				return lkey;
1934fc59a1ecSMichael Baum 		}
1935fc59a1ecSMichael Baum 		/* Fallback for generic mechanism in corner cases. */
1936fc59a1ecSMichael Baum 	}
193720489176SMichael Baum 	return mlx5_mr_addr2mr_bh(mr_ctrl, addr);
1938fc59a1ecSMichael Baum }
1939