xref: /dpdk/drivers/common/mlx5/mlx5_common_mr.c (revision 2eb92b0fbbabcb1fb49d2006de7682cca80ebea7)
1b8dc6b0eSVu Pham /* SPDX-License-Identifier: BSD-3-Clause
2b8dc6b0eSVu Pham  * Copyright 2016 6WIND S.A.
3b8dc6b0eSVu Pham  * Copyright 2020 Mellanox Technologies, Ltd
4b8dc6b0eSVu Pham  */
5690b2a88SDmitry Kozlyuk #include <stddef.h>
6690b2a88SDmitry Kozlyuk 
7b8dc6b0eSVu Pham #include <rte_eal_memconfig.h>
8690b2a88SDmitry Kozlyuk #include <rte_eal_paging.h>
9b8dc6b0eSVu Pham #include <rte_errno.h>
10b8dc6b0eSVu Pham #include <rte_mempool.h>
11b8dc6b0eSVu Pham #include <rte_malloc.h>
12b8dc6b0eSVu Pham #include <rte_rwlock.h>
13b8dc6b0eSVu Pham 
14b8dc6b0eSVu Pham #include "mlx5_glue.h"
15fc59a1ecSMichael Baum #include "mlx5_common.h"
16b8dc6b0eSVu Pham #include "mlx5_common_mp.h"
17b8dc6b0eSVu Pham #include "mlx5_common_mr.h"
18fc59a1ecSMichael Baum #include "mlx5_common_os.h"
1925245d5dSShiri Kuzin #include "mlx5_common_log.h"
20fd970a54SSuanming Mou #include "mlx5_malloc.h"
21b8dc6b0eSVu Pham 
22b8dc6b0eSVu Pham struct mr_find_contig_memsegs_data {
23b8dc6b0eSVu Pham 	uintptr_t addr;
24b8dc6b0eSVu Pham 	uintptr_t start;
25b8dc6b0eSVu Pham 	uintptr_t end;
26b8dc6b0eSVu Pham 	const struct rte_memseg_list *msl;
27b8dc6b0eSVu Pham };
28b8dc6b0eSVu Pham 
29690b2a88SDmitry Kozlyuk /* Virtual memory range. */
30690b2a88SDmitry Kozlyuk struct mlx5_range {
31690b2a88SDmitry Kozlyuk 	uintptr_t start;
32690b2a88SDmitry Kozlyuk 	uintptr_t end;
33690b2a88SDmitry Kozlyuk };
34690b2a88SDmitry Kozlyuk 
35690b2a88SDmitry Kozlyuk /** Memory region for a mempool. */
36690b2a88SDmitry Kozlyuk struct mlx5_mempool_mr {
37690b2a88SDmitry Kozlyuk 	struct mlx5_pmd_mr pmd_mr;
38690b2a88SDmitry Kozlyuk 	uint32_t refcnt; /**< Number of mempools sharing this MR. */
39690b2a88SDmitry Kozlyuk };
40690b2a88SDmitry Kozlyuk 
41690b2a88SDmitry Kozlyuk /* Mempool registration. */
42690b2a88SDmitry Kozlyuk struct mlx5_mempool_reg {
43690b2a88SDmitry Kozlyuk 	LIST_ENTRY(mlx5_mempool_reg) next;
44690b2a88SDmitry Kozlyuk 	/** Registered mempool, used to designate registrations. */
45690b2a88SDmitry Kozlyuk 	struct rte_mempool *mp;
46690b2a88SDmitry Kozlyuk 	/** Memory regions for the address ranges of the mempool. */
47690b2a88SDmitry Kozlyuk 	struct mlx5_mempool_mr *mrs;
48690b2a88SDmitry Kozlyuk 	/** Number of memory regions. */
49690b2a88SDmitry Kozlyuk 	unsigned int mrs_n;
5008ac0358SDmitry Kozlyuk 	/** Whether the MR were created for external pinned memory. */
5108ac0358SDmitry Kozlyuk 	bool is_extmem;
52690b2a88SDmitry Kozlyuk };
53690b2a88SDmitry Kozlyuk 
54fc59a1ecSMichael Baum void
55fc59a1ecSMichael Baum mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
56fc59a1ecSMichael Baum {
57fc59a1ecSMichael Baum 	struct mlx5_mprq_buf *buf = opaque;
58fc59a1ecSMichael Baum 
59fc59a1ecSMichael Baum 	if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
60fc59a1ecSMichael Baum 		rte_mempool_put(buf->mp, buf);
61fc59a1ecSMichael Baum 	} else if (unlikely(__atomic_sub_fetch(&buf->refcnt, 1,
62fc59a1ecSMichael Baum 					       __ATOMIC_RELAXED) == 0)) {
63fc59a1ecSMichael Baum 		__atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
64fc59a1ecSMichael Baum 		rte_mempool_put(buf->mp, buf);
65fc59a1ecSMichael Baum 	}
66fc59a1ecSMichael Baum }
67fc59a1ecSMichael Baum 
68b8dc6b0eSVu Pham /**
69b8dc6b0eSVu Pham  * Expand B-tree table to a given size. Can't be called with holding
70b8dc6b0eSVu Pham  * memory_hotplug_lock or share_cache.rwlock due to rte_realloc().
71b8dc6b0eSVu Pham  *
72b8dc6b0eSVu Pham  * @param bt
73b8dc6b0eSVu Pham  *   Pointer to B-tree structure.
74b8dc6b0eSVu Pham  * @param n
75b8dc6b0eSVu Pham  *   Number of entries for expansion.
76b8dc6b0eSVu Pham  *
77b8dc6b0eSVu Pham  * @return
78b8dc6b0eSVu Pham  *   0 on success, -1 on failure.
79b8dc6b0eSVu Pham  */
80b8dc6b0eSVu Pham static int
81b8dc6b0eSVu Pham mr_btree_expand(struct mlx5_mr_btree *bt, int n)
82b8dc6b0eSVu Pham {
83b8dc6b0eSVu Pham 	void *mem;
84b8dc6b0eSVu Pham 	int ret = 0;
85b8dc6b0eSVu Pham 
86b8dc6b0eSVu Pham 	if (n <= bt->size)
87b8dc6b0eSVu Pham 		return ret;
88b8dc6b0eSVu Pham 	/*
89b8dc6b0eSVu Pham 	 * Downside of directly using rte_realloc() is that SOCKET_ID_ANY is
90b8dc6b0eSVu Pham 	 * used inside if there's no room to expand. Because this is a quite
91b8dc6b0eSVu Pham 	 * rare case and a part of very slow path, it is very acceptable.
92b8dc6b0eSVu Pham 	 * Initially cache_bh[] will be given practically enough space and once
93b8dc6b0eSVu Pham 	 * it is expanded, expansion wouldn't be needed again ever.
94b8dc6b0eSVu Pham 	 */
95fd970a54SSuanming Mou 	mem = mlx5_realloc(bt->table, MLX5_MEM_RTE | MLX5_MEM_ZERO,
96fd970a54SSuanming Mou 			   n * sizeof(struct mr_cache_entry), 0, SOCKET_ID_ANY);
97b8dc6b0eSVu Pham 	if (mem == NULL) {
98b8dc6b0eSVu Pham 		/* Not an error, B-tree search will be skipped. */
99b8dc6b0eSVu Pham 		DRV_LOG(WARNING, "failed to expand MR B-tree (%p) table",
100b8dc6b0eSVu Pham 			(void *)bt);
101b8dc6b0eSVu Pham 		ret = -1;
102b8dc6b0eSVu Pham 	} else {
103b8dc6b0eSVu Pham 		DRV_LOG(DEBUG, "expanded MR B-tree table (size=%u)", n);
104b8dc6b0eSVu Pham 		bt->table = mem;
105b8dc6b0eSVu Pham 		bt->size = n;
106b8dc6b0eSVu Pham 	}
107b8dc6b0eSVu Pham 	return ret;
108b8dc6b0eSVu Pham }
109b8dc6b0eSVu Pham 
110b8dc6b0eSVu Pham /**
111b8dc6b0eSVu Pham  * Look up LKey from given B-tree lookup table, store the last index and return
112b8dc6b0eSVu Pham  * searched LKey.
113b8dc6b0eSVu Pham  *
114b8dc6b0eSVu Pham  * @param bt
115b8dc6b0eSVu Pham  *   Pointer to B-tree structure.
116b8dc6b0eSVu Pham  * @param[out] idx
117b8dc6b0eSVu Pham  *   Pointer to index. Even on search failure, returns index where it stops
118b8dc6b0eSVu Pham  *   searching so that index can be used when inserting a new entry.
119b8dc6b0eSVu Pham  * @param addr
120b8dc6b0eSVu Pham  *   Search key.
121b8dc6b0eSVu Pham  *
122b8dc6b0eSVu Pham  * @return
123b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on no match.
124b8dc6b0eSVu Pham  */
125b8dc6b0eSVu Pham static uint32_t
126b8dc6b0eSVu Pham mr_btree_lookup(struct mlx5_mr_btree *bt, uint16_t *idx, uintptr_t addr)
127b8dc6b0eSVu Pham {
128b8dc6b0eSVu Pham 	struct mr_cache_entry *lkp_tbl;
129b8dc6b0eSVu Pham 	uint16_t n;
130b8dc6b0eSVu Pham 	uint16_t base = 0;
131b8dc6b0eSVu Pham 
132b8dc6b0eSVu Pham 	MLX5_ASSERT(bt != NULL);
133b8dc6b0eSVu Pham 	lkp_tbl = *bt->table;
134b8dc6b0eSVu Pham 	n = bt->len;
135b8dc6b0eSVu Pham 	/* First entry must be NULL for comparison. */
136b8dc6b0eSVu Pham 	MLX5_ASSERT(bt->len > 0 || (lkp_tbl[0].start == 0 &&
137b8dc6b0eSVu Pham 				    lkp_tbl[0].lkey == UINT32_MAX));
138b8dc6b0eSVu Pham 	/* Binary search. */
139b8dc6b0eSVu Pham 	do {
140b8dc6b0eSVu Pham 		register uint16_t delta = n >> 1;
141b8dc6b0eSVu Pham 
142b8dc6b0eSVu Pham 		if (addr < lkp_tbl[base + delta].start) {
143b8dc6b0eSVu Pham 			n = delta;
144b8dc6b0eSVu Pham 		} else {
145b8dc6b0eSVu Pham 			base += delta;
146b8dc6b0eSVu Pham 			n -= delta;
147b8dc6b0eSVu Pham 		}
148b8dc6b0eSVu Pham 	} while (n > 1);
149b8dc6b0eSVu Pham 	MLX5_ASSERT(addr >= lkp_tbl[base].start);
150b8dc6b0eSVu Pham 	*idx = base;
151b8dc6b0eSVu Pham 	if (addr < lkp_tbl[base].end)
152b8dc6b0eSVu Pham 		return lkp_tbl[base].lkey;
153b8dc6b0eSVu Pham 	/* Not found. */
154b8dc6b0eSVu Pham 	return UINT32_MAX;
155b8dc6b0eSVu Pham }
156b8dc6b0eSVu Pham 
157b8dc6b0eSVu Pham /**
158b8dc6b0eSVu Pham  * Insert an entry to B-tree lookup table.
159b8dc6b0eSVu Pham  *
160b8dc6b0eSVu Pham  * @param bt
161b8dc6b0eSVu Pham  *   Pointer to B-tree structure.
162b8dc6b0eSVu Pham  * @param entry
163b8dc6b0eSVu Pham  *   Pointer to new entry to insert.
164b8dc6b0eSVu Pham  *
165b8dc6b0eSVu Pham  * @return
166b8dc6b0eSVu Pham  *   0 on success, -1 on failure.
167b8dc6b0eSVu Pham  */
168b8dc6b0eSVu Pham static int
169b8dc6b0eSVu Pham mr_btree_insert(struct mlx5_mr_btree *bt, struct mr_cache_entry *entry)
170b8dc6b0eSVu Pham {
171b8dc6b0eSVu Pham 	struct mr_cache_entry *lkp_tbl;
172b8dc6b0eSVu Pham 	uint16_t idx = 0;
173b8dc6b0eSVu Pham 	size_t shift;
174b8dc6b0eSVu Pham 
175b8dc6b0eSVu Pham 	MLX5_ASSERT(bt != NULL);
176b8dc6b0eSVu Pham 	MLX5_ASSERT(bt->len <= bt->size);
177b8dc6b0eSVu Pham 	MLX5_ASSERT(bt->len > 0);
178b8dc6b0eSVu Pham 	lkp_tbl = *bt->table;
179b8dc6b0eSVu Pham 	/* Find out the slot for insertion. */
180b8dc6b0eSVu Pham 	if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) {
181b8dc6b0eSVu Pham 		DRV_LOG(DEBUG,
182b8dc6b0eSVu Pham 			"abort insertion to B-tree(%p): already exist at"
183b8dc6b0eSVu Pham 			" idx=%u [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
184b8dc6b0eSVu Pham 			(void *)bt, idx, entry->start, entry->end, entry->lkey);
185b8dc6b0eSVu Pham 		/* Already exist, return. */
186b8dc6b0eSVu Pham 		return 0;
187b8dc6b0eSVu Pham 	}
188b8dc6b0eSVu Pham 	/* If table is full, return error. */
189b8dc6b0eSVu Pham 	if (unlikely(bt->len == bt->size)) {
190b8dc6b0eSVu Pham 		bt->overflow = 1;
191b8dc6b0eSVu Pham 		return -1;
192b8dc6b0eSVu Pham 	}
193b8dc6b0eSVu Pham 	/* Insert entry. */
194b8dc6b0eSVu Pham 	++idx;
195b8dc6b0eSVu Pham 	shift = (bt->len - idx) * sizeof(struct mr_cache_entry);
196b8dc6b0eSVu Pham 	if (shift)
197b8dc6b0eSVu Pham 		memmove(&lkp_tbl[idx + 1], &lkp_tbl[idx], shift);
198b8dc6b0eSVu Pham 	lkp_tbl[idx] = *entry;
199b8dc6b0eSVu Pham 	bt->len++;
200b8dc6b0eSVu Pham 	DRV_LOG(DEBUG,
201b8dc6b0eSVu Pham 		"inserted B-tree(%p)[%u],"
202b8dc6b0eSVu Pham 		" [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
203b8dc6b0eSVu Pham 		(void *)bt, idx, entry->start, entry->end, entry->lkey);
204b8dc6b0eSVu Pham 	return 0;
205b8dc6b0eSVu Pham }
206b8dc6b0eSVu Pham 
207b8dc6b0eSVu Pham /**
208b8dc6b0eSVu Pham  * Initialize B-tree and allocate memory for lookup table.
209b8dc6b0eSVu Pham  *
210b8dc6b0eSVu Pham  * @param bt
211b8dc6b0eSVu Pham  *   Pointer to B-tree structure.
212b8dc6b0eSVu Pham  * @param n
213b8dc6b0eSVu Pham  *   Number of entries to allocate.
214b8dc6b0eSVu Pham  * @param socket
215b8dc6b0eSVu Pham  *   NUMA socket on which memory must be allocated.
216b8dc6b0eSVu Pham  *
217b8dc6b0eSVu Pham  * @return
218b8dc6b0eSVu Pham  *   0 on success, a negative errno value otherwise and rte_errno is set.
219b8dc6b0eSVu Pham  */
2205fbc75acSMichael Baum static int
221b8dc6b0eSVu Pham mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket)
222b8dc6b0eSVu Pham {
223b8dc6b0eSVu Pham 	if (bt == NULL) {
224b8dc6b0eSVu Pham 		rte_errno = EINVAL;
225b8dc6b0eSVu Pham 		return -rte_errno;
226b8dc6b0eSVu Pham 	}
227b8dc6b0eSVu Pham 	MLX5_ASSERT(!bt->table && !bt->size);
228b8dc6b0eSVu Pham 	memset(bt, 0, sizeof(*bt));
229fd970a54SSuanming Mou 	bt->table = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
230fd970a54SSuanming Mou 				sizeof(struct mr_cache_entry) * n,
231b8dc6b0eSVu Pham 				0, socket);
232b8dc6b0eSVu Pham 	if (bt->table == NULL) {
233b8dc6b0eSVu Pham 		rte_errno = ENOMEM;
23487acdcc7SThomas Monjalon 		DRV_LOG(DEBUG,
23587acdcc7SThomas Monjalon 			"failed to allocate memory for btree cache on socket "
23687acdcc7SThomas Monjalon 			"%d", socket);
237b8dc6b0eSVu Pham 		return -rte_errno;
238b8dc6b0eSVu Pham 	}
239b8dc6b0eSVu Pham 	bt->size = n;
240b8dc6b0eSVu Pham 	/* First entry must be NULL for binary search. */
241b8dc6b0eSVu Pham 	(*bt->table)[bt->len++] = (struct mr_cache_entry) {
242b8dc6b0eSVu Pham 		.lkey = UINT32_MAX,
243b8dc6b0eSVu Pham 	};
24487acdcc7SThomas Monjalon 	DRV_LOG(DEBUG, "initialized B-tree %p with table %p",
245b8dc6b0eSVu Pham 	      (void *)bt, (void *)bt->table);
246b8dc6b0eSVu Pham 	return 0;
247b8dc6b0eSVu Pham }
248b8dc6b0eSVu Pham 
249b8dc6b0eSVu Pham /**
250b8dc6b0eSVu Pham  * Free B-tree resources.
251b8dc6b0eSVu Pham  *
252b8dc6b0eSVu Pham  * @param bt
253b8dc6b0eSVu Pham  *   Pointer to B-tree structure.
254b8dc6b0eSVu Pham  */
255b8dc6b0eSVu Pham void
256b8dc6b0eSVu Pham mlx5_mr_btree_free(struct mlx5_mr_btree *bt)
257b8dc6b0eSVu Pham {
258b8dc6b0eSVu Pham 	if (bt == NULL)
259b8dc6b0eSVu Pham 		return;
26087acdcc7SThomas Monjalon 	DRV_LOG(DEBUG, "freeing B-tree %p with table %p",
261b8dc6b0eSVu Pham 	      (void *)bt, (void *)bt->table);
262fd970a54SSuanming Mou 	mlx5_free(bt->table);
263b8dc6b0eSVu Pham 	memset(bt, 0, sizeof(*bt));
264b8dc6b0eSVu Pham }
265b8dc6b0eSVu Pham 
266b8dc6b0eSVu Pham /**
267b8dc6b0eSVu Pham  * Dump all the entries in a B-tree
268b8dc6b0eSVu Pham  *
269b8dc6b0eSVu Pham  * @param bt
270b8dc6b0eSVu Pham  *   Pointer to B-tree structure.
271b8dc6b0eSVu Pham  */
272b8dc6b0eSVu Pham void
273b8dc6b0eSVu Pham mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused)
274b8dc6b0eSVu Pham {
275b8dc6b0eSVu Pham #ifdef RTE_LIBRTE_MLX5_DEBUG
276b8dc6b0eSVu Pham 	int idx;
277b8dc6b0eSVu Pham 	struct mr_cache_entry *lkp_tbl;
278b8dc6b0eSVu Pham 
279b8dc6b0eSVu Pham 	if (bt == NULL)
280b8dc6b0eSVu Pham 		return;
281b8dc6b0eSVu Pham 	lkp_tbl = *bt->table;
282b8dc6b0eSVu Pham 	for (idx = 0; idx < bt->len; ++idx) {
283b8dc6b0eSVu Pham 		struct mr_cache_entry *entry = &lkp_tbl[idx];
284b8dc6b0eSVu Pham 
28587acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "B-tree(%p)[%u],"
286b8dc6b0eSVu Pham 		      " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
287b8dc6b0eSVu Pham 		      (void *)bt, idx, entry->start, entry->end, entry->lkey);
288b8dc6b0eSVu Pham 	}
289b8dc6b0eSVu Pham #endif
290b8dc6b0eSVu Pham }
291b8dc6b0eSVu Pham 
292b8dc6b0eSVu Pham /**
29385c7005eSMichael Baum  * Initialize per-queue MR control descriptor.
29485c7005eSMichael Baum  *
29585c7005eSMichael Baum  * @param mr_ctrl
29685c7005eSMichael Baum  *   Pointer to MR control structure.
29771304b5cSMichael Baum  * @param dev_gen_ptr
29871304b5cSMichael Baum  *   Pointer to generation number of global cache.
29985c7005eSMichael Baum  * @param socket
30085c7005eSMichael Baum  *   NUMA socket on which memory must be allocated.
30185c7005eSMichael Baum  *
30285c7005eSMichael Baum  * @return
30385c7005eSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
30485c7005eSMichael Baum  */
30585c7005eSMichael Baum int
30671304b5cSMichael Baum mlx5_mr_ctrl_init(struct mlx5_mr_ctrl *mr_ctrl, uint32_t *dev_gen_ptr,
30785c7005eSMichael Baum 		  int socket)
30885c7005eSMichael Baum {
30985c7005eSMichael Baum 	if (mr_ctrl == NULL) {
31085c7005eSMichael Baum 		rte_errno = EINVAL;
31185c7005eSMichael Baum 		return -rte_errno;
31285c7005eSMichael Baum 	}
31385c7005eSMichael Baum 	/* Save pointer of global generation number to check memory event. */
31471304b5cSMichael Baum 	mr_ctrl->dev_gen_ptr = dev_gen_ptr;
31585c7005eSMichael Baum 	/* Initialize B-tree and allocate memory for bottom-half cache table. */
31685c7005eSMichael Baum 	return mlx5_mr_btree_init(&mr_ctrl->cache_bh, MLX5_MR_BTREE_CACHE_N,
31785c7005eSMichael Baum 				  socket);
31885c7005eSMichael Baum }
31985c7005eSMichael Baum 
32085c7005eSMichael Baum /**
321b8dc6b0eSVu Pham  * Find virtually contiguous memory chunk in a given MR.
322b8dc6b0eSVu Pham  *
323b8dc6b0eSVu Pham  * @param dev
324b8dc6b0eSVu Pham  *   Pointer to MR structure.
325b8dc6b0eSVu Pham  * @param[out] entry
326b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry. If not found, this will not be
327b8dc6b0eSVu Pham  *   updated.
328b8dc6b0eSVu Pham  * @param start_idx
329b8dc6b0eSVu Pham  *   Start index of the memseg bitmap.
330b8dc6b0eSVu Pham  *
331b8dc6b0eSVu Pham  * @return
332b8dc6b0eSVu Pham  *   Next index to go on lookup.
333b8dc6b0eSVu Pham  */
334b8dc6b0eSVu Pham static int
335b8dc6b0eSVu Pham mr_find_next_chunk(struct mlx5_mr *mr, struct mr_cache_entry *entry,
336b8dc6b0eSVu Pham 		   int base_idx)
337b8dc6b0eSVu Pham {
338b8dc6b0eSVu Pham 	uintptr_t start = 0;
339b8dc6b0eSVu Pham 	uintptr_t end = 0;
340b8dc6b0eSVu Pham 	uint32_t idx = 0;
341b8dc6b0eSVu Pham 
342b8dc6b0eSVu Pham 	/* MR for external memory doesn't have memseg list. */
343b8dc6b0eSVu Pham 	if (mr->msl == NULL) {
344b8dc6b0eSVu Pham 		MLX5_ASSERT(mr->ms_bmp_n == 1);
345b8dc6b0eSVu Pham 		MLX5_ASSERT(mr->ms_n == 1);
346b8dc6b0eSVu Pham 		MLX5_ASSERT(base_idx == 0);
347b8dc6b0eSVu Pham 		/*
348b8dc6b0eSVu Pham 		 * Can't search it from memseg list but get it directly from
34956d20677SOphir Munk 		 * pmd_mr as there's only one chunk.
350b8dc6b0eSVu Pham 		 */
35156d20677SOphir Munk 		entry->start = (uintptr_t)mr->pmd_mr.addr;
35256d20677SOphir Munk 		entry->end = (uintptr_t)mr->pmd_mr.addr + mr->pmd_mr.len;
35356d20677SOphir Munk 		entry->lkey = rte_cpu_to_be_32(mr->pmd_mr.lkey);
354b8dc6b0eSVu Pham 		/* Returning 1 ends iteration. */
355b8dc6b0eSVu Pham 		return 1;
356b8dc6b0eSVu Pham 	}
357b8dc6b0eSVu Pham 	for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) {
358b8dc6b0eSVu Pham 		if (rte_bitmap_get(mr->ms_bmp, idx)) {
359b8dc6b0eSVu Pham 			const struct rte_memseg_list *msl;
360b8dc6b0eSVu Pham 			const struct rte_memseg *ms;
361b8dc6b0eSVu Pham 
362b8dc6b0eSVu Pham 			msl = mr->msl;
363b8dc6b0eSVu Pham 			ms = rte_fbarray_get(&msl->memseg_arr,
364b8dc6b0eSVu Pham 					     mr->ms_base_idx + idx);
365b8dc6b0eSVu Pham 			MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
366b8dc6b0eSVu Pham 			if (!start)
367b8dc6b0eSVu Pham 				start = ms->addr_64;
368b8dc6b0eSVu Pham 			end = ms->addr_64 + ms->hugepage_sz;
369b8dc6b0eSVu Pham 		} else if (start) {
370b8dc6b0eSVu Pham 			/* Passed the end of a fragment. */
371b8dc6b0eSVu Pham 			break;
372b8dc6b0eSVu Pham 		}
373b8dc6b0eSVu Pham 	}
374b8dc6b0eSVu Pham 	if (start) {
375b8dc6b0eSVu Pham 		/* Found one chunk. */
376b8dc6b0eSVu Pham 		entry->start = start;
377b8dc6b0eSVu Pham 		entry->end = end;
37856d20677SOphir Munk 		entry->lkey = rte_cpu_to_be_32(mr->pmd_mr.lkey);
379b8dc6b0eSVu Pham 	}
380b8dc6b0eSVu Pham 	return idx;
381b8dc6b0eSVu Pham }
382b8dc6b0eSVu Pham 
383b8dc6b0eSVu Pham /**
384b8dc6b0eSVu Pham  * Insert a MR to the global B-tree cache. It may fail due to low-on-memory.
385b8dc6b0eSVu Pham  * Then, this entry will have to be searched by mr_lookup_list() in
386b8dc6b0eSVu Pham  * mlx5_mr_create() on miss.
387b8dc6b0eSVu Pham  *
388b8dc6b0eSVu Pham  * @param share_cache
389b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
390b8dc6b0eSVu Pham  * @param mr
391b8dc6b0eSVu Pham  *   Pointer to MR to insert.
392b8dc6b0eSVu Pham  *
393b8dc6b0eSVu Pham  * @return
394b8dc6b0eSVu Pham  *   0 on success, -1 on failure.
395b8dc6b0eSVu Pham  */
396b8dc6b0eSVu Pham int
397b8dc6b0eSVu Pham mlx5_mr_insert_cache(struct mlx5_mr_share_cache *share_cache,
398b8dc6b0eSVu Pham 		     struct mlx5_mr *mr)
399b8dc6b0eSVu Pham {
400b8dc6b0eSVu Pham 	unsigned int n;
401b8dc6b0eSVu Pham 
402b8dc6b0eSVu Pham 	DRV_LOG(DEBUG, "Inserting MR(%p) to global cache(%p)",
403b8dc6b0eSVu Pham 		(void *)mr, (void *)share_cache);
404b8dc6b0eSVu Pham 	for (n = 0; n < mr->ms_bmp_n; ) {
405b8dc6b0eSVu Pham 		struct mr_cache_entry entry;
406b8dc6b0eSVu Pham 
407b8dc6b0eSVu Pham 		memset(&entry, 0, sizeof(entry));
408b8dc6b0eSVu Pham 		/* Find a contiguous chunk and advance the index. */
409b8dc6b0eSVu Pham 		n = mr_find_next_chunk(mr, &entry, n);
410b8dc6b0eSVu Pham 		if (!entry.end)
411b8dc6b0eSVu Pham 			break;
412b8dc6b0eSVu Pham 		if (mr_btree_insert(&share_cache->cache, &entry) < 0) {
413b8dc6b0eSVu Pham 			/*
414b8dc6b0eSVu Pham 			 * Overflowed, but the global table cannot be expanded
415b8dc6b0eSVu Pham 			 * because of deadlock.
416b8dc6b0eSVu Pham 			 */
417b8dc6b0eSVu Pham 			return -1;
418b8dc6b0eSVu Pham 		}
419b8dc6b0eSVu Pham 	}
420b8dc6b0eSVu Pham 	return 0;
421b8dc6b0eSVu Pham }
422b8dc6b0eSVu Pham 
423b8dc6b0eSVu Pham /**
424b8dc6b0eSVu Pham  * Look up address in the original global MR list.
425b8dc6b0eSVu Pham  *
426b8dc6b0eSVu Pham  * @param share_cache
427b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
428b8dc6b0eSVu Pham  * @param[out] entry
429b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry. If no match, this will not be updated.
430b8dc6b0eSVu Pham  * @param addr
431b8dc6b0eSVu Pham  *   Search key.
432b8dc6b0eSVu Pham  *
433b8dc6b0eSVu Pham  * @return
434b8dc6b0eSVu Pham  *   Found MR on match, NULL otherwise.
435b8dc6b0eSVu Pham  */
436b8dc6b0eSVu Pham struct mlx5_mr *
437b8dc6b0eSVu Pham mlx5_mr_lookup_list(struct mlx5_mr_share_cache *share_cache,
438b8dc6b0eSVu Pham 		    struct mr_cache_entry *entry, uintptr_t addr)
439b8dc6b0eSVu Pham {
440b8dc6b0eSVu Pham 	struct mlx5_mr *mr;
441b8dc6b0eSVu Pham 
442b8dc6b0eSVu Pham 	/* Iterate all the existing MRs. */
443b8dc6b0eSVu Pham 	LIST_FOREACH(mr, &share_cache->mr_list, mr) {
444b8dc6b0eSVu Pham 		unsigned int n;
445b8dc6b0eSVu Pham 
446b8dc6b0eSVu Pham 		if (mr->ms_n == 0)
447b8dc6b0eSVu Pham 			continue;
448b8dc6b0eSVu Pham 		for (n = 0; n < mr->ms_bmp_n; ) {
449b8dc6b0eSVu Pham 			struct mr_cache_entry ret;
450b8dc6b0eSVu Pham 
451b8dc6b0eSVu Pham 			memset(&ret, 0, sizeof(ret));
452b8dc6b0eSVu Pham 			n = mr_find_next_chunk(mr, &ret, n);
453b8dc6b0eSVu Pham 			if (addr >= ret.start && addr < ret.end) {
454b8dc6b0eSVu Pham 				/* Found. */
455b8dc6b0eSVu Pham 				*entry = ret;
456b8dc6b0eSVu Pham 				return mr;
457b8dc6b0eSVu Pham 			}
458b8dc6b0eSVu Pham 		}
459b8dc6b0eSVu Pham 	}
460b8dc6b0eSVu Pham 	return NULL;
461b8dc6b0eSVu Pham }
462b8dc6b0eSVu Pham 
463b8dc6b0eSVu Pham /**
464b8dc6b0eSVu Pham  * Look up address on global MR cache.
465b8dc6b0eSVu Pham  *
466b8dc6b0eSVu Pham  * @param share_cache
467b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
468b8dc6b0eSVu Pham  * @param[out] entry
469b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry. If no match, this will not be updated.
470b8dc6b0eSVu Pham  * @param addr
471b8dc6b0eSVu Pham  *   Search key.
472b8dc6b0eSVu Pham  *
473b8dc6b0eSVu Pham  * @return
474b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
475b8dc6b0eSVu Pham  */
476a5d06c90SMichael Baum static uint32_t
477b8dc6b0eSVu Pham mlx5_mr_lookup_cache(struct mlx5_mr_share_cache *share_cache,
478b8dc6b0eSVu Pham 		     struct mr_cache_entry *entry, uintptr_t addr)
479b8dc6b0eSVu Pham {
480b8dc6b0eSVu Pham 	uint16_t idx;
481b8dc6b0eSVu Pham 	uint32_t lkey = UINT32_MAX;
482b8dc6b0eSVu Pham 	struct mlx5_mr *mr;
483b8dc6b0eSVu Pham 
484b8dc6b0eSVu Pham 	/*
485b8dc6b0eSVu Pham 	 * If the global cache has overflowed since it failed to expand the
486b8dc6b0eSVu Pham 	 * B-tree table, it can't have all the existing MRs. Then, the address
487b8dc6b0eSVu Pham 	 * has to be searched by traversing the original MR list instead, which
488b8dc6b0eSVu Pham 	 * is very slow path. Otherwise, the global cache is all inclusive.
489b8dc6b0eSVu Pham 	 */
490b8dc6b0eSVu Pham 	if (!unlikely(share_cache->cache.overflow)) {
491b8dc6b0eSVu Pham 		lkey = mr_btree_lookup(&share_cache->cache, &idx, addr);
492b8dc6b0eSVu Pham 		if (lkey != UINT32_MAX)
493b8dc6b0eSVu Pham 			*entry = (*share_cache->cache.table)[idx];
494b8dc6b0eSVu Pham 	} else {
495b8dc6b0eSVu Pham 		/* Falling back to the slowest path. */
496b8dc6b0eSVu Pham 		mr = mlx5_mr_lookup_list(share_cache, entry, addr);
497b8dc6b0eSVu Pham 		if (mr != NULL)
498b8dc6b0eSVu Pham 			lkey = entry->lkey;
499b8dc6b0eSVu Pham 	}
500b8dc6b0eSVu Pham 	MLX5_ASSERT(lkey == UINT32_MAX || (addr >= entry->start &&
501b8dc6b0eSVu Pham 					   addr < entry->end));
502b8dc6b0eSVu Pham 	return lkey;
503b8dc6b0eSVu Pham }
504b8dc6b0eSVu Pham 
505b8dc6b0eSVu Pham /**
506b8dc6b0eSVu Pham  * Free MR resources. MR lock must not be held to avoid a deadlock. rte_free()
507b8dc6b0eSVu Pham  * can raise memory free event and the callback function will spin on the lock.
508b8dc6b0eSVu Pham  *
509b8dc6b0eSVu Pham  * @param mr
510b8dc6b0eSVu Pham  *   Pointer to MR to free.
511b8dc6b0eSVu Pham  */
512992e6df3SJiawei Wang void
513992e6df3SJiawei Wang mlx5_mr_free(struct mlx5_mr *mr, mlx5_dereg_mr_t dereg_mr_cb)
514b8dc6b0eSVu Pham {
515b8dc6b0eSVu Pham 	if (mr == NULL)
516b8dc6b0eSVu Pham 		return;
517b8dc6b0eSVu Pham 	DRV_LOG(DEBUG, "freeing MR(%p):", (void *)mr);
518d5ed8aa9SOphir Munk 	dereg_mr_cb(&mr->pmd_mr);
519b8dc6b0eSVu Pham 	if (mr->ms_bmp != NULL)
520b8dc6b0eSVu Pham 		rte_bitmap_free(mr->ms_bmp);
521fd970a54SSuanming Mou 	mlx5_free(mr);
522b8dc6b0eSVu Pham }
523b8dc6b0eSVu Pham 
524b8dc6b0eSVu Pham void
525b8dc6b0eSVu Pham mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache)
526b8dc6b0eSVu Pham {
527b8dc6b0eSVu Pham 	struct mlx5_mr *mr;
528b8dc6b0eSVu Pham 
529b8dc6b0eSVu Pham 	DRV_LOG(DEBUG, "Rebuild dev cache[] %p", (void *)share_cache);
530b8dc6b0eSVu Pham 	/* Flush cache to rebuild. */
531b8dc6b0eSVu Pham 	share_cache->cache.len = 1;
532b8dc6b0eSVu Pham 	share_cache->cache.overflow = 0;
533b8dc6b0eSVu Pham 	/* Iterate all the existing MRs. */
534b8dc6b0eSVu Pham 	LIST_FOREACH(mr, &share_cache->mr_list, mr)
535b8dc6b0eSVu Pham 		if (mlx5_mr_insert_cache(share_cache, mr) < 0)
536b8dc6b0eSVu Pham 			return;
537b8dc6b0eSVu Pham }
538b8dc6b0eSVu Pham 
539b8dc6b0eSVu Pham /**
540b8dc6b0eSVu Pham  * Release resources of detached MR having no online entry.
541b8dc6b0eSVu Pham  *
542b8dc6b0eSVu Pham  * @param share_cache
543b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
544b8dc6b0eSVu Pham  */
545b8dc6b0eSVu Pham static void
546b8dc6b0eSVu Pham mlx5_mr_garbage_collect(struct mlx5_mr_share_cache *share_cache)
547b8dc6b0eSVu Pham {
548b8dc6b0eSVu Pham 	struct mlx5_mr *mr_next;
549b8dc6b0eSVu Pham 	struct mlx5_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
550b8dc6b0eSVu Pham 
551b8dc6b0eSVu Pham 	/* Must be called from the primary process. */
552b8dc6b0eSVu Pham 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
553b8dc6b0eSVu Pham 	/*
554b8dc6b0eSVu Pham 	 * MR can't be freed with holding the lock because rte_free() could call
555b8dc6b0eSVu Pham 	 * memory free callback function. This will be a deadlock situation.
556b8dc6b0eSVu Pham 	 */
557b8dc6b0eSVu Pham 	rte_rwlock_write_lock(&share_cache->rwlock);
558b8dc6b0eSVu Pham 	/* Detach the whole free list and release it after unlocking. */
559b8dc6b0eSVu Pham 	free_list = share_cache->mr_free_list;
560b8dc6b0eSVu Pham 	LIST_INIT(&share_cache->mr_free_list);
561b8dc6b0eSVu Pham 	rte_rwlock_write_unlock(&share_cache->rwlock);
562b8dc6b0eSVu Pham 	/* Release resources. */
563b8dc6b0eSVu Pham 	mr_next = LIST_FIRST(&free_list);
564b8dc6b0eSVu Pham 	while (mr_next != NULL) {
565b8dc6b0eSVu Pham 		struct mlx5_mr *mr = mr_next;
566b8dc6b0eSVu Pham 
567b8dc6b0eSVu Pham 		mr_next = LIST_NEXT(mr, mr);
568992e6df3SJiawei Wang 		mlx5_mr_free(mr, share_cache->dereg_mr_cb);
569b8dc6b0eSVu Pham 	}
570b8dc6b0eSVu Pham }
571b8dc6b0eSVu Pham 
572b8dc6b0eSVu Pham /* Called during rte_memseg_contig_walk() by mlx5_mr_create(). */
573b8dc6b0eSVu Pham static int
574b8dc6b0eSVu Pham mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl,
575b8dc6b0eSVu Pham 			  const struct rte_memseg *ms, size_t len, void *arg)
576b8dc6b0eSVu Pham {
577b8dc6b0eSVu Pham 	struct mr_find_contig_memsegs_data *data = arg;
578b8dc6b0eSVu Pham 
579b8dc6b0eSVu Pham 	if (data->addr < ms->addr_64 || data->addr >= ms->addr_64 + len)
580b8dc6b0eSVu Pham 		return 0;
581b8dc6b0eSVu Pham 	/* Found, save it and stop walking. */
582b8dc6b0eSVu Pham 	data->start = ms->addr_64;
583b8dc6b0eSVu Pham 	data->end = ms->addr_64 + len;
584b8dc6b0eSVu Pham 	data->msl = msl;
585b8dc6b0eSVu Pham 	return 1;
586b8dc6b0eSVu Pham }
587b8dc6b0eSVu Pham 
588b8dc6b0eSVu Pham /**
589b8dc6b0eSVu Pham  * Create a new global Memory Region (MR) for a missing virtual address.
590b8dc6b0eSVu Pham  * This API should be called on a secondary process, then a request is sent to
591b8dc6b0eSVu Pham  * the primary process in order to create a MR for the address. As the global MR
592b8dc6b0eSVu Pham  * list is on the shared memory, following LKey lookup should succeed unless the
593b8dc6b0eSVu Pham  * request fails.
594b8dc6b0eSVu Pham  *
59520489176SMichael Baum  * @param cdev
59620489176SMichael Baum  *   Pointer to the mlx5 common device.
597b8dc6b0eSVu Pham  * @param share_cache
598b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
599b8dc6b0eSVu Pham  * @param[out] entry
600b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry, found in the global cache or newly
601b8dc6b0eSVu Pham  *   created. If failed to create one, this will not be updated.
602b8dc6b0eSVu Pham  * @param addr
603b8dc6b0eSVu Pham  *   Target virtual address to register.
604b8dc6b0eSVu Pham  *
605b8dc6b0eSVu Pham  * @return
606b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
607b8dc6b0eSVu Pham  */
608b8dc6b0eSVu Pham static uint32_t
60920489176SMichael Baum mlx5_mr_create_secondary(struct mlx5_common_device *cdev,
610b8dc6b0eSVu Pham 			 struct mlx5_mr_share_cache *share_cache,
61120489176SMichael Baum 			 struct mr_cache_entry *entry, uintptr_t addr)
612b8dc6b0eSVu Pham {
613b8dc6b0eSVu Pham 	int ret;
614b8dc6b0eSVu Pham 
61520489176SMichael Baum 	DRV_LOG(DEBUG, "Requesting MR creation for address (%p)", (void *)addr);
61620489176SMichael Baum 	ret = mlx5_mp_req_mr_create(cdev, addr);
617b8dc6b0eSVu Pham 	if (ret) {
61887acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "Fail to request MR creation for address (%p)",
619b8dc6b0eSVu Pham 			(void *)addr);
620b8dc6b0eSVu Pham 		return UINT32_MAX;
621b8dc6b0eSVu Pham 	}
622b8dc6b0eSVu Pham 	rte_rwlock_read_lock(&share_cache->rwlock);
623b8dc6b0eSVu Pham 	/* Fill in output data. */
624b8dc6b0eSVu Pham 	mlx5_mr_lookup_cache(share_cache, entry, addr);
625b8dc6b0eSVu Pham 	/* Lookup can't fail. */
626b8dc6b0eSVu Pham 	MLX5_ASSERT(entry->lkey != UINT32_MAX);
627b8dc6b0eSVu Pham 	rte_rwlock_read_unlock(&share_cache->rwlock);
62887acdcc7SThomas Monjalon 	DRV_LOG(DEBUG, "MR CREATED by primary process for %p:\n"
629b8dc6b0eSVu Pham 		"  [0x%" PRIxPTR ", 0x%" PRIxPTR "), lkey=0x%x",
630b8dc6b0eSVu Pham 		(void *)addr, entry->start, entry->end, entry->lkey);
631b8dc6b0eSVu Pham 	return entry->lkey;
632b8dc6b0eSVu Pham }
633b8dc6b0eSVu Pham 
634b8dc6b0eSVu Pham /**
635b8dc6b0eSVu Pham  * Create a new global Memory Region (MR) for a missing virtual address.
636b8dc6b0eSVu Pham  * Register entire virtually contiguous memory chunk around the address.
637b8dc6b0eSVu Pham  *
638b8dc6b0eSVu Pham  * @param pd
639c4685016SOphir Munk  *   Pointer to pd of a device (net, regex, vdpa,...).
640b8dc6b0eSVu Pham  * @param share_cache
641b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
642b8dc6b0eSVu Pham  * @param[out] entry
643b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry, found in the global cache or newly
644b8dc6b0eSVu Pham  *   created. If failed to create one, this will not be updated.
645b8dc6b0eSVu Pham  * @param addr
646b8dc6b0eSVu Pham  *   Target virtual address to register.
647b8dc6b0eSVu Pham  * @param mr_ext_memseg_en
648b8dc6b0eSVu Pham  *   Configurable flag about external memory segment enable or not.
649b8dc6b0eSVu Pham  *
650b8dc6b0eSVu Pham  * @return
651b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
652b8dc6b0eSVu Pham  */
65320489176SMichael Baum static uint32_t
654c4685016SOphir Munk mlx5_mr_create_primary(void *pd,
655b8dc6b0eSVu Pham 		       struct mlx5_mr_share_cache *share_cache,
656b8dc6b0eSVu Pham 		       struct mr_cache_entry *entry, uintptr_t addr,
657b8dc6b0eSVu Pham 		       unsigned int mr_ext_memseg_en)
658b8dc6b0eSVu Pham {
659b8dc6b0eSVu Pham 	struct mr_find_contig_memsegs_data data = {.addr = addr, };
660b8dc6b0eSVu Pham 	struct mr_find_contig_memsegs_data data_re;
661b8dc6b0eSVu Pham 	const struct rte_memseg_list *msl;
662b8dc6b0eSVu Pham 	const struct rte_memseg *ms;
663b8dc6b0eSVu Pham 	struct mlx5_mr *mr = NULL;
664b8dc6b0eSVu Pham 	int ms_idx_shift = -1;
665b8dc6b0eSVu Pham 	uint32_t bmp_size;
666b8dc6b0eSVu Pham 	void *bmp_mem;
667b8dc6b0eSVu Pham 	uint32_t ms_n;
668b8dc6b0eSVu Pham 	uint32_t n;
669b8dc6b0eSVu Pham 	size_t len;
670b8dc6b0eSVu Pham 
671b8dc6b0eSVu Pham 	DRV_LOG(DEBUG, "Creating a MR using address (%p)", (void *)addr);
672b8dc6b0eSVu Pham 	/*
673b8dc6b0eSVu Pham 	 * Release detached MRs if any. This can't be called with holding either
674b8dc6b0eSVu Pham 	 * memory_hotplug_lock or share_cache->rwlock. MRs on the free list have
675b8dc6b0eSVu Pham 	 * been detached by the memory free event but it couldn't be released
676b8dc6b0eSVu Pham 	 * inside the callback due to deadlock. As a result, releasing resources
677b8dc6b0eSVu Pham 	 * is quite opportunistic.
678b8dc6b0eSVu Pham 	 */
679b8dc6b0eSVu Pham 	mlx5_mr_garbage_collect(share_cache);
680b8dc6b0eSVu Pham 	/*
681b8dc6b0eSVu Pham 	 * If enabled, find out a contiguous virtual address chunk in use, to
682b8dc6b0eSVu Pham 	 * which the given address belongs, in order to register maximum range.
683b8dc6b0eSVu Pham 	 * In the best case where mempools are not dynamically recreated and
684b8dc6b0eSVu Pham 	 * '--socket-mem' is specified as an EAL option, it is very likely to
685b8dc6b0eSVu Pham 	 * have only one MR(LKey) per a socket and per a hugepage-size even
686b8dc6b0eSVu Pham 	 * though the system memory is highly fragmented. As the whole memory
687b8dc6b0eSVu Pham 	 * chunk will be pinned by kernel, it can't be reused unless entire
688b8dc6b0eSVu Pham 	 * chunk is freed from EAL.
689b8dc6b0eSVu Pham 	 *
690b8dc6b0eSVu Pham 	 * If disabled, just register one memseg (page). Then, memory
691b8dc6b0eSVu Pham 	 * consumption will be minimized but it may drop performance if there
692b8dc6b0eSVu Pham 	 * are many MRs to lookup on the datapath.
693b8dc6b0eSVu Pham 	 */
694b8dc6b0eSVu Pham 	if (!mr_ext_memseg_en) {
695b8dc6b0eSVu Pham 		data.msl = rte_mem_virt2memseg_list((void *)addr);
696b8dc6b0eSVu Pham 		data.start = RTE_ALIGN_FLOOR(addr, data.msl->page_sz);
697b8dc6b0eSVu Pham 		data.end = data.start + data.msl->page_sz;
698b8dc6b0eSVu Pham 	} else if (!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data)) {
699b8dc6b0eSVu Pham 		DRV_LOG(WARNING,
700b8dc6b0eSVu Pham 			"Unable to find virtually contiguous"
701b8dc6b0eSVu Pham 			" chunk for address (%p)."
702b8dc6b0eSVu Pham 			" rte_memseg_contig_walk() failed.", (void *)addr);
703b8dc6b0eSVu Pham 		rte_errno = ENXIO;
704b8dc6b0eSVu Pham 		goto err_nolock;
705b8dc6b0eSVu Pham 	}
706b8dc6b0eSVu Pham alloc_resources:
707b8dc6b0eSVu Pham 	/* Addresses must be page-aligned. */
708b8dc6b0eSVu Pham 	MLX5_ASSERT(data.msl);
709b8dc6b0eSVu Pham 	MLX5_ASSERT(rte_is_aligned((void *)data.start, data.msl->page_sz));
710b8dc6b0eSVu Pham 	MLX5_ASSERT(rte_is_aligned((void *)data.end, data.msl->page_sz));
711b8dc6b0eSVu Pham 	msl = data.msl;
712b8dc6b0eSVu Pham 	ms = rte_mem_virt2memseg((void *)data.start, msl);
713b8dc6b0eSVu Pham 	len = data.end - data.start;
714b8dc6b0eSVu Pham 	MLX5_ASSERT(ms);
715b8dc6b0eSVu Pham 	MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
716b8dc6b0eSVu Pham 	/* Number of memsegs in the range. */
717b8dc6b0eSVu Pham 	ms_n = len / msl->page_sz;
71887acdcc7SThomas Monjalon 	DRV_LOG(DEBUG, "Extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
719b8dc6b0eSVu Pham 	      " page_sz=0x%" PRIx64 ", ms_n=%u",
720b8dc6b0eSVu Pham 	      (void *)addr, data.start, data.end, msl->page_sz, ms_n);
721b8dc6b0eSVu Pham 	/* Size of memory for bitmap. */
722b8dc6b0eSVu Pham 	bmp_size = rte_bitmap_get_memory_footprint(ms_n);
723fd970a54SSuanming Mou 	mr = mlx5_malloc(MLX5_MEM_RTE |  MLX5_MEM_ZERO,
724fd970a54SSuanming Mou 			 RTE_ALIGN_CEIL(sizeof(*mr), RTE_CACHE_LINE_SIZE) +
725fd970a54SSuanming Mou 			 bmp_size, RTE_CACHE_LINE_SIZE, msl->socket_id);
726b8dc6b0eSVu Pham 	if (mr == NULL) {
72787acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "Unable to allocate memory for a new MR of"
728b8dc6b0eSVu Pham 		      " address (%p).", (void *)addr);
729b8dc6b0eSVu Pham 		rte_errno = ENOMEM;
730b8dc6b0eSVu Pham 		goto err_nolock;
731b8dc6b0eSVu Pham 	}
732b8dc6b0eSVu Pham 	mr->msl = msl;
733b8dc6b0eSVu Pham 	/*
734b8dc6b0eSVu Pham 	 * Save the index of the first memseg and initialize memseg bitmap. To
735b8dc6b0eSVu Pham 	 * see if a memseg of ms_idx in the memseg-list is still valid, check:
736b8dc6b0eSVu Pham 	 *	rte_bitmap_get(mr->bmp, ms_idx - mr->ms_base_idx)
737b8dc6b0eSVu Pham 	 */
738b8dc6b0eSVu Pham 	mr->ms_base_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
739b8dc6b0eSVu Pham 	bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE);
740b8dc6b0eSVu Pham 	mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size);
741b8dc6b0eSVu Pham 	if (mr->ms_bmp == NULL) {
74287acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "Unable to initialize bitmap for a new MR of"
743b8dc6b0eSVu Pham 		      " address (%p).", (void *)addr);
744b8dc6b0eSVu Pham 		rte_errno = EINVAL;
745b8dc6b0eSVu Pham 		goto err_nolock;
746b8dc6b0eSVu Pham 	}
747b8dc6b0eSVu Pham 	/*
748b8dc6b0eSVu Pham 	 * Should recheck whether the extended contiguous chunk is still valid.
749b8dc6b0eSVu Pham 	 * Because memory_hotplug_lock can't be held if there's any memory
750b8dc6b0eSVu Pham 	 * related calls in a critical path, resource allocation above can't be
751b8dc6b0eSVu Pham 	 * locked. If the memory has been changed at this point, try again with
752b8dc6b0eSVu Pham 	 * just single page. If not, go on with the big chunk atomically from
753b8dc6b0eSVu Pham 	 * here.
754b8dc6b0eSVu Pham 	 */
755b8dc6b0eSVu Pham 	rte_mcfg_mem_read_lock();
756b8dc6b0eSVu Pham 	data_re = data;
757b8dc6b0eSVu Pham 	if (len > msl->page_sz &&
758b8dc6b0eSVu Pham 	    !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
75987acdcc7SThomas Monjalon 		DRV_LOG(DEBUG,
76087acdcc7SThomas Monjalon 			"Unable to find virtually contiguous chunk for address "
76187acdcc7SThomas Monjalon 			"(%p). rte_memseg_contig_walk() failed.", (void *)addr);
762b8dc6b0eSVu Pham 		rte_errno = ENXIO;
763b8dc6b0eSVu Pham 		goto err_memlock;
764b8dc6b0eSVu Pham 	}
765b8dc6b0eSVu Pham 	if (data.start != data_re.start || data.end != data_re.end) {
766b8dc6b0eSVu Pham 		/*
767b8dc6b0eSVu Pham 		 * The extended contiguous chunk has been changed. Try again
768b8dc6b0eSVu Pham 		 * with single memseg instead.
769b8dc6b0eSVu Pham 		 */
770b8dc6b0eSVu Pham 		data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
771b8dc6b0eSVu Pham 		data.end = data.start + msl->page_sz;
772b8dc6b0eSVu Pham 		rte_mcfg_mem_read_unlock();
773992e6df3SJiawei Wang 		mlx5_mr_free(mr, share_cache->dereg_mr_cb);
774b8dc6b0eSVu Pham 		goto alloc_resources;
775b8dc6b0eSVu Pham 	}
776b8dc6b0eSVu Pham 	MLX5_ASSERT(data.msl == data_re.msl);
777b8dc6b0eSVu Pham 	rte_rwlock_write_lock(&share_cache->rwlock);
778b8dc6b0eSVu Pham 	/*
779b8dc6b0eSVu Pham 	 * Check the address is really missing. If other thread already created
780b8dc6b0eSVu Pham 	 * one or it is not found due to overflow, abort and return.
781b8dc6b0eSVu Pham 	 */
782b8dc6b0eSVu Pham 	if (mlx5_mr_lookup_cache(share_cache, entry, addr) != UINT32_MAX) {
783b8dc6b0eSVu Pham 		/*
784b8dc6b0eSVu Pham 		 * Insert to the global cache table. It may fail due to
785b8dc6b0eSVu Pham 		 * low-on-memory. Then, this entry will have to be searched
786b8dc6b0eSVu Pham 		 * here again.
787b8dc6b0eSVu Pham 		 */
788b8dc6b0eSVu Pham 		mr_btree_insert(&share_cache->cache, entry);
78987acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "Found MR for %p on final lookup, abort",
79087acdcc7SThomas Monjalon 			(void *)addr);
791b8dc6b0eSVu Pham 		rte_rwlock_write_unlock(&share_cache->rwlock);
792b8dc6b0eSVu Pham 		rte_mcfg_mem_read_unlock();
793b8dc6b0eSVu Pham 		/*
794b8dc6b0eSVu Pham 		 * Must be unlocked before calling rte_free() because
795b8dc6b0eSVu Pham 		 * mlx5_mr_mem_event_free_cb() can be called inside.
796b8dc6b0eSVu Pham 		 */
797992e6df3SJiawei Wang 		mlx5_mr_free(mr, share_cache->dereg_mr_cb);
798b8dc6b0eSVu Pham 		return entry->lkey;
799b8dc6b0eSVu Pham 	}
800b8dc6b0eSVu Pham 	/*
801b8dc6b0eSVu Pham 	 * Trim start and end addresses for verbs MR. Set bits for registering
802b8dc6b0eSVu Pham 	 * memsegs but exclude already registered ones. Bitmap can be
803b8dc6b0eSVu Pham 	 * fragmented.
804b8dc6b0eSVu Pham 	 */
805b8dc6b0eSVu Pham 	for (n = 0; n < ms_n; ++n) {
806b8dc6b0eSVu Pham 		uintptr_t start;
807b8dc6b0eSVu Pham 		struct mr_cache_entry ret;
808b8dc6b0eSVu Pham 
809b8dc6b0eSVu Pham 		memset(&ret, 0, sizeof(ret));
810b8dc6b0eSVu Pham 		start = data_re.start + n * msl->page_sz;
811b8dc6b0eSVu Pham 		/* Exclude memsegs already registered by other MRs. */
812b8dc6b0eSVu Pham 		if (mlx5_mr_lookup_cache(share_cache, &ret, start) ==
813b8dc6b0eSVu Pham 		    UINT32_MAX) {
814b8dc6b0eSVu Pham 			/*
815b8dc6b0eSVu Pham 			 * Start from the first unregistered memseg in the
816b8dc6b0eSVu Pham 			 * extended range.
817b8dc6b0eSVu Pham 			 */
818b8dc6b0eSVu Pham 			if (ms_idx_shift == -1) {
819b8dc6b0eSVu Pham 				mr->ms_base_idx += n;
820b8dc6b0eSVu Pham 				data.start = start;
821b8dc6b0eSVu Pham 				ms_idx_shift = n;
822b8dc6b0eSVu Pham 			}
823b8dc6b0eSVu Pham 			data.end = start + msl->page_sz;
824b8dc6b0eSVu Pham 			rte_bitmap_set(mr->ms_bmp, n - ms_idx_shift);
825b8dc6b0eSVu Pham 			++mr->ms_n;
826b8dc6b0eSVu Pham 		}
827b8dc6b0eSVu Pham 	}
828b8dc6b0eSVu Pham 	len = data.end - data.start;
829b8dc6b0eSVu Pham 	mr->ms_bmp_n = len / msl->page_sz;
830b8dc6b0eSVu Pham 	MLX5_ASSERT(ms_idx_shift + mr->ms_bmp_n <= ms_n);
831b8dc6b0eSVu Pham 	/*
832d5ed8aa9SOphir Munk 	 * Finally create an MR for the memory chunk. Verbs: ibv_reg_mr() can
833d5ed8aa9SOphir Munk 	 * be called with holding the memory lock because it doesn't use
834b8dc6b0eSVu Pham 	 * mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket()
835b8dc6b0eSVu Pham 	 * through mlx5_alloc_verbs_buf().
836b8dc6b0eSVu Pham 	 */
837d5ed8aa9SOphir Munk 	share_cache->reg_mr_cb(pd, (void *)data.start, len, &mr->pmd_mr);
83858a17853SOphir Munk 	if (mr->pmd_mr.obj == NULL) {
83987acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "Fail to create an MR for address (%p)",
840b8dc6b0eSVu Pham 		      (void *)addr);
841b8dc6b0eSVu Pham 		rte_errno = EINVAL;
842b8dc6b0eSVu Pham 		goto err_mrlock;
843b8dc6b0eSVu Pham 	}
84456d20677SOphir Munk 	MLX5_ASSERT((uintptr_t)mr->pmd_mr.addr == data.start);
84556d20677SOphir Munk 	MLX5_ASSERT(mr->pmd_mr.len);
846b8dc6b0eSVu Pham 	LIST_INSERT_HEAD(&share_cache->mr_list, mr, mr);
84787acdcc7SThomas Monjalon 	DRV_LOG(DEBUG, "MR CREATED (%p) for %p:\n"
848b8dc6b0eSVu Pham 	      "  [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
849b8dc6b0eSVu Pham 	      " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
850b8dc6b0eSVu Pham 	      (void *)mr, (void *)addr, data.start, data.end,
85156d20677SOphir Munk 	      rte_cpu_to_be_32(mr->pmd_mr.lkey),
852b8dc6b0eSVu Pham 	      mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
853b8dc6b0eSVu Pham 	/* Insert to the global cache table. */
854b8dc6b0eSVu Pham 	mlx5_mr_insert_cache(share_cache, mr);
855b8dc6b0eSVu Pham 	/* Fill in output data. */
856b8dc6b0eSVu Pham 	mlx5_mr_lookup_cache(share_cache, entry, addr);
857b8dc6b0eSVu Pham 	/* Lookup can't fail. */
858b8dc6b0eSVu Pham 	MLX5_ASSERT(entry->lkey != UINT32_MAX);
859b8dc6b0eSVu Pham 	rte_rwlock_write_unlock(&share_cache->rwlock);
860b8dc6b0eSVu Pham 	rte_mcfg_mem_read_unlock();
861b8dc6b0eSVu Pham 	return entry->lkey;
862b8dc6b0eSVu Pham err_mrlock:
863b8dc6b0eSVu Pham 	rte_rwlock_write_unlock(&share_cache->rwlock);
864b8dc6b0eSVu Pham err_memlock:
865b8dc6b0eSVu Pham 	rte_mcfg_mem_read_unlock();
866b8dc6b0eSVu Pham err_nolock:
867b8dc6b0eSVu Pham 	/*
868b8dc6b0eSVu Pham 	 * In case of error, as this can be called in a datapath, a warning
869b8dc6b0eSVu Pham 	 * message per an error is preferable instead. Must be unlocked before
870b8dc6b0eSVu Pham 	 * calling rte_free() because mlx5_mr_mem_event_free_cb() can be called
871b8dc6b0eSVu Pham 	 * inside.
872b8dc6b0eSVu Pham 	 */
873992e6df3SJiawei Wang 	mlx5_mr_free(mr, share_cache->dereg_mr_cb);
874b8dc6b0eSVu Pham 	return UINT32_MAX;
875b8dc6b0eSVu Pham }
876b8dc6b0eSVu Pham 
877b8dc6b0eSVu Pham /**
878b8dc6b0eSVu Pham  * Create a new global Memory Region (MR) for a missing virtual address.
879b8dc6b0eSVu Pham  * This can be called from primary and secondary process.
880b8dc6b0eSVu Pham  *
88120489176SMichael Baum  * @param cdev
88220489176SMichael Baum  *   Pointer to the mlx5 common device.
883b8dc6b0eSVu Pham  * @param share_cache
884b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
885b8dc6b0eSVu Pham  * @param[out] entry
886b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry, found in the global cache or newly
887b8dc6b0eSVu Pham  *   created. If failed to create one, this will not be updated.
888b8dc6b0eSVu Pham  * @param addr
889b8dc6b0eSVu Pham  *   Target virtual address to register.
890b8dc6b0eSVu Pham  *
891b8dc6b0eSVu Pham  * @return
892b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
893b8dc6b0eSVu Pham  */
89420489176SMichael Baum uint32_t
89520489176SMichael Baum mlx5_mr_create(struct mlx5_common_device *cdev,
896b8dc6b0eSVu Pham 	       struct mlx5_mr_share_cache *share_cache,
89720489176SMichael Baum 	       struct mr_cache_entry *entry, uintptr_t addr)
898b8dc6b0eSVu Pham {
899b8dc6b0eSVu Pham 	uint32_t ret = 0;
900b8dc6b0eSVu Pham 
901b8dc6b0eSVu Pham 	switch (rte_eal_process_type()) {
902b8dc6b0eSVu Pham 	case RTE_PROC_PRIMARY:
90320489176SMichael Baum 		ret = mlx5_mr_create_primary(cdev->pd, share_cache, entry, addr,
90420489176SMichael Baum 					     cdev->config.mr_ext_memseg_en);
905b8dc6b0eSVu Pham 		break;
906b8dc6b0eSVu Pham 	case RTE_PROC_SECONDARY:
90720489176SMichael Baum 		ret = mlx5_mr_create_secondary(cdev, share_cache, entry, addr);
908b8dc6b0eSVu Pham 		break;
909b8dc6b0eSVu Pham 	default:
910b8dc6b0eSVu Pham 		break;
911b8dc6b0eSVu Pham 	}
912b8dc6b0eSVu Pham 	return ret;
913b8dc6b0eSVu Pham }
914b8dc6b0eSVu Pham 
915b8dc6b0eSVu Pham /**
916b8dc6b0eSVu Pham  * Look up address in the global MR cache table. If not found, create a new MR.
917b8dc6b0eSVu Pham  * Insert the found/created entry to local bottom-half cache table.
918b8dc6b0eSVu Pham  *
919b8dc6b0eSVu Pham  * @param mr_ctrl
920b8dc6b0eSVu Pham  *   Pointer to per-queue MR control structure.
921b8dc6b0eSVu Pham  * @param[out] entry
922b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry, found in the global cache or newly
923b8dc6b0eSVu Pham  *   created. If failed to create one, this is not written.
924b8dc6b0eSVu Pham  * @param addr
925b8dc6b0eSVu Pham  *   Search key.
926b8dc6b0eSVu Pham  *
927b8dc6b0eSVu Pham  * @return
928b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on no match.
929b8dc6b0eSVu Pham  */
930b8dc6b0eSVu Pham static uint32_t
93120489176SMichael Baum mr_lookup_caches(struct mlx5_mr_ctrl *mr_ctrl,
93220489176SMichael Baum 		 struct mr_cache_entry *entry, uintptr_t addr)
933b8dc6b0eSVu Pham {
93471304b5cSMichael Baum 	struct mlx5_mr_share_cache *share_cache =
93571304b5cSMichael Baum 		container_of(mr_ctrl->dev_gen_ptr, struct mlx5_mr_share_cache,
93671304b5cSMichael Baum 			     dev_gen);
93771304b5cSMichael Baum 	struct mlx5_common_device *cdev =
93871304b5cSMichael Baum 		container_of(share_cache, struct mlx5_common_device, mr_scache);
939b8dc6b0eSVu Pham 	struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
940b8dc6b0eSVu Pham 	uint32_t lkey;
941b8dc6b0eSVu Pham 	uint16_t idx;
942b8dc6b0eSVu Pham 
943b8dc6b0eSVu Pham 	/* If local cache table is full, try to double it. */
944b8dc6b0eSVu Pham 	if (unlikely(bt->len == bt->size))
945b8dc6b0eSVu Pham 		mr_btree_expand(bt, bt->size << 1);
946b8dc6b0eSVu Pham 	/* Look up in the global cache. */
947b8dc6b0eSVu Pham 	rte_rwlock_read_lock(&share_cache->rwlock);
948b8dc6b0eSVu Pham 	lkey = mr_btree_lookup(&share_cache->cache, &idx, addr);
949b8dc6b0eSVu Pham 	if (lkey != UINT32_MAX) {
950b8dc6b0eSVu Pham 		/* Found. */
951b8dc6b0eSVu Pham 		*entry = (*share_cache->cache.table)[idx];
952b8dc6b0eSVu Pham 		rte_rwlock_read_unlock(&share_cache->rwlock);
953b8dc6b0eSVu Pham 		/*
954b8dc6b0eSVu Pham 		 * Update local cache. Even if it fails, return the found entry
955b8dc6b0eSVu Pham 		 * to update top-half cache. Next time, this entry will be found
956b8dc6b0eSVu Pham 		 * in the global cache.
957b8dc6b0eSVu Pham 		 */
958b8dc6b0eSVu Pham 		mr_btree_insert(bt, entry);
959b8dc6b0eSVu Pham 		return lkey;
960b8dc6b0eSVu Pham 	}
961b8dc6b0eSVu Pham 	rte_rwlock_read_unlock(&share_cache->rwlock);
962b8dc6b0eSVu Pham 	/* First time to see the address? Create a new MR. */
96371304b5cSMichael Baum 	lkey = mlx5_mr_create(cdev, share_cache, entry, addr);
964b8dc6b0eSVu Pham 	/*
965b8dc6b0eSVu Pham 	 * Update the local cache if successfully created a new global MR. Even
966b8dc6b0eSVu Pham 	 * if failed to create one, there's no action to take in this datapath
967b8dc6b0eSVu Pham 	 * code. As returning LKey is invalid, this will eventually make HW
968b8dc6b0eSVu Pham 	 * fail.
969b8dc6b0eSVu Pham 	 */
970b8dc6b0eSVu Pham 	if (lkey != UINT32_MAX)
971b8dc6b0eSVu Pham 		mr_btree_insert(bt, entry);
972b8dc6b0eSVu Pham 	return lkey;
973b8dc6b0eSVu Pham }
974b8dc6b0eSVu Pham 
975b8dc6b0eSVu Pham /**
976b8dc6b0eSVu Pham  * Bottom-half of LKey search on datapath. First search in cache_bh[] and if
977b8dc6b0eSVu Pham  * misses, search in the global MR cache table and update the new entry to
978b8dc6b0eSVu Pham  * per-queue local caches.
979b8dc6b0eSVu Pham  *
980b8dc6b0eSVu Pham  * @param mr_ctrl
981b8dc6b0eSVu Pham  *   Pointer to per-queue MR control structure.
982b8dc6b0eSVu Pham  * @param addr
983b8dc6b0eSVu Pham  *   Search key.
984b8dc6b0eSVu Pham  *
985b8dc6b0eSVu Pham  * @return
986b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on no match.
987b8dc6b0eSVu Pham  */
988fc59a1ecSMichael Baum static uint32_t
98920489176SMichael Baum mlx5_mr_addr2mr_bh(struct mlx5_mr_ctrl *mr_ctrl, uintptr_t addr)
990b8dc6b0eSVu Pham {
991b8dc6b0eSVu Pham 	uint32_t lkey;
992b8dc6b0eSVu Pham 	uint16_t bh_idx = 0;
993b8dc6b0eSVu Pham 	/* Victim in top-half cache to replace with new entry. */
994b8dc6b0eSVu Pham 	struct mr_cache_entry *repl = &mr_ctrl->cache[mr_ctrl->head];
995b8dc6b0eSVu Pham 
996b8dc6b0eSVu Pham 	/* Binary-search MR translation table. */
997b8dc6b0eSVu Pham 	lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);
998b8dc6b0eSVu Pham 	/* Update top-half cache. */
999b8dc6b0eSVu Pham 	if (likely(lkey != UINT32_MAX)) {
1000b8dc6b0eSVu Pham 		*repl = (*mr_ctrl->cache_bh.table)[bh_idx];
1001b8dc6b0eSVu Pham 	} else {
1002b8dc6b0eSVu Pham 		/*
1003b8dc6b0eSVu Pham 		 * If missed in local lookup table, search in the global cache
1004b8dc6b0eSVu Pham 		 * and local cache_bh[] will be updated inside if possible.
1005b8dc6b0eSVu Pham 		 * Top-half cache entry will also be updated.
1006b8dc6b0eSVu Pham 		 */
100720489176SMichael Baum 		lkey = mr_lookup_caches(mr_ctrl, repl, addr);
1008b8dc6b0eSVu Pham 		if (unlikely(lkey == UINT32_MAX))
1009b8dc6b0eSVu Pham 			return UINT32_MAX;
1010b8dc6b0eSVu Pham 	}
1011b8dc6b0eSVu Pham 	/* Update the most recently used entry. */
1012b8dc6b0eSVu Pham 	mr_ctrl->mru = mr_ctrl->head;
1013b8dc6b0eSVu Pham 	/* Point to the next victim, the oldest. */
1014b8dc6b0eSVu Pham 	mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N;
1015b8dc6b0eSVu Pham 	return lkey;
1016b8dc6b0eSVu Pham }
1017b8dc6b0eSVu Pham 
1018b8dc6b0eSVu Pham /**
1019fc59a1ecSMichael Baum  * Release all the created MRs and resources on global MR cache of a device
1020b8dc6b0eSVu Pham  * list.
1021b8dc6b0eSVu Pham  *
1022b8dc6b0eSVu Pham  * @param share_cache
1023b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
1024b8dc6b0eSVu Pham  */
1025b8dc6b0eSVu Pham void
1026b8dc6b0eSVu Pham mlx5_mr_release_cache(struct mlx5_mr_share_cache *share_cache)
1027b8dc6b0eSVu Pham {
1028b8dc6b0eSVu Pham 	struct mlx5_mr *mr_next;
1029b8dc6b0eSVu Pham 
1030b8dc6b0eSVu Pham 	rte_rwlock_write_lock(&share_cache->rwlock);
1031b8dc6b0eSVu Pham 	/* Detach from MR list and move to free list. */
1032b8dc6b0eSVu Pham 	mr_next = LIST_FIRST(&share_cache->mr_list);
1033b8dc6b0eSVu Pham 	while (mr_next != NULL) {
1034b8dc6b0eSVu Pham 		struct mlx5_mr *mr = mr_next;
1035b8dc6b0eSVu Pham 
1036b8dc6b0eSVu Pham 		mr_next = LIST_NEXT(mr, mr);
1037b8dc6b0eSVu Pham 		LIST_REMOVE(mr, mr);
1038b8dc6b0eSVu Pham 		LIST_INSERT_HEAD(&share_cache->mr_free_list, mr, mr);
1039b8dc6b0eSVu Pham 	}
1040b8dc6b0eSVu Pham 	LIST_INIT(&share_cache->mr_list);
1041b8dc6b0eSVu Pham 	/* Free global cache. */
1042b8dc6b0eSVu Pham 	mlx5_mr_btree_free(&share_cache->cache);
1043b8dc6b0eSVu Pham 	rte_rwlock_write_unlock(&share_cache->rwlock);
1044b8dc6b0eSVu Pham 	/* Free all remaining MRs. */
1045b8dc6b0eSVu Pham 	mlx5_mr_garbage_collect(share_cache);
1046b8dc6b0eSVu Pham }
1047b8dc6b0eSVu Pham 
1048b8dc6b0eSVu Pham /**
10495fbc75acSMichael Baum  * Initialize global MR cache of a device.
10505fbc75acSMichael Baum  *
10515fbc75acSMichael Baum  * @param share_cache
10525fbc75acSMichael Baum  *   Pointer to a global shared MR cache.
10535fbc75acSMichael Baum  * @param socket
10545fbc75acSMichael Baum  *   NUMA socket on which memory must be allocated.
10555fbc75acSMichael Baum  *
10565fbc75acSMichael Baum  * @return
10575fbc75acSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
10585fbc75acSMichael Baum  */
10595fbc75acSMichael Baum int
10605fbc75acSMichael Baum mlx5_mr_create_cache(struct mlx5_mr_share_cache *share_cache, int socket)
10615fbc75acSMichael Baum {
10625fbc75acSMichael Baum 	/* Set the reg_mr and dereg_mr callback functions */
10635fbc75acSMichael Baum 	mlx5_os_set_reg_mr_cb(&share_cache->reg_mr_cb,
10645fbc75acSMichael Baum 			      &share_cache->dereg_mr_cb);
10655fbc75acSMichael Baum 	rte_rwlock_init(&share_cache->rwlock);
1066fc59a1ecSMichael Baum 	rte_rwlock_init(&share_cache->mprwlock);
1067fc59a1ecSMichael Baum 	share_cache->mp_cb_registered = 0;
10685fbc75acSMichael Baum 	/* Initialize B-tree and allocate memory for global MR cache table. */
10695fbc75acSMichael Baum 	return mlx5_mr_btree_init(&share_cache->cache,
10705fbc75acSMichael Baum 				  MLX5_MR_BTREE_CACHE_N * 2, socket);
10715fbc75acSMichael Baum }
10725fbc75acSMichael Baum 
10735fbc75acSMichael Baum /**
1074b8dc6b0eSVu Pham  * Flush all of the local cache entries.
1075b8dc6b0eSVu Pham  *
1076b8dc6b0eSVu Pham  * @param mr_ctrl
1077b8dc6b0eSVu Pham  *   Pointer to per-queue MR local cache.
1078b8dc6b0eSVu Pham  */
1079b8dc6b0eSVu Pham void
1080b8dc6b0eSVu Pham mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl)
1081b8dc6b0eSVu Pham {
1082b8dc6b0eSVu Pham 	/* Reset the most-recently-used index. */
1083b8dc6b0eSVu Pham 	mr_ctrl->mru = 0;
1084b8dc6b0eSVu Pham 	/* Reset the linear search array. */
1085b8dc6b0eSVu Pham 	mr_ctrl->head = 0;
1086b8dc6b0eSVu Pham 	memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache));
1087b8dc6b0eSVu Pham 	/* Reset the B-tree table. */
1088b8dc6b0eSVu Pham 	mr_ctrl->cache_bh.len = 1;
1089b8dc6b0eSVu Pham 	mr_ctrl->cache_bh.overflow = 0;
1090b8dc6b0eSVu Pham 	/* Update the generation number. */
1091b8dc6b0eSVu Pham 	mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr;
1092b8dc6b0eSVu Pham 	DRV_LOG(DEBUG, "mr_ctrl(%p): flushed, cur_gen=%d",
1093b8dc6b0eSVu Pham 		(void *)mr_ctrl, mr_ctrl->cur_gen);
1094b8dc6b0eSVu Pham }
1095b8dc6b0eSVu Pham 
1096b8dc6b0eSVu Pham /**
1097b8dc6b0eSVu Pham  * Creates a memory region for external memory, that is memory which is not
1098b8dc6b0eSVu Pham  * part of the DPDK memory segments.
1099b8dc6b0eSVu Pham  *
1100b8dc6b0eSVu Pham  * @param pd
1101c4685016SOphir Munk  *   Pointer to pd of a device (net, regex, vdpa,...).
1102b8dc6b0eSVu Pham  * @param addr
1103b8dc6b0eSVu Pham  *   Starting virtual address of memory.
1104b8dc6b0eSVu Pham  * @param len
1105b8dc6b0eSVu Pham  *   Length of memory segment being mapped.
1106b8dc6b0eSVu Pham  * @param socked_id
1107b8dc6b0eSVu Pham  *   Socket to allocate heap memory for the control structures.
1108b8dc6b0eSVu Pham  *
1109b8dc6b0eSVu Pham  * @return
1110b8dc6b0eSVu Pham  *   Pointer to MR structure on success, NULL otherwise.
1111b8dc6b0eSVu Pham  */
1112b8dc6b0eSVu Pham struct mlx5_mr *
1113d5ed8aa9SOphir Munk mlx5_create_mr_ext(void *pd, uintptr_t addr, size_t len, int socket_id,
1114d5ed8aa9SOphir Munk 		   mlx5_reg_mr_t reg_mr_cb)
1115b8dc6b0eSVu Pham {
1116b8dc6b0eSVu Pham 	struct mlx5_mr *mr = NULL;
1117b8dc6b0eSVu Pham 
1118fd970a54SSuanming Mou 	mr = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1119fd970a54SSuanming Mou 			 RTE_ALIGN_CEIL(sizeof(*mr), RTE_CACHE_LINE_SIZE),
1120b8dc6b0eSVu Pham 			 RTE_CACHE_LINE_SIZE, socket_id);
1121b8dc6b0eSVu Pham 	if (mr == NULL)
1122b8dc6b0eSVu Pham 		return NULL;
1123d5ed8aa9SOphir Munk 	reg_mr_cb(pd, (void *)addr, len, &mr->pmd_mr);
112458a17853SOphir Munk 	if (mr->pmd_mr.obj == NULL) {
1125b8dc6b0eSVu Pham 		DRV_LOG(WARNING,
112656d20677SOphir Munk 			"Fail to create MR for address (%p)",
1127b8dc6b0eSVu Pham 			(void *)addr);
1128fd970a54SSuanming Mou 		mlx5_free(mr);
1129b8dc6b0eSVu Pham 		return NULL;
1130b8dc6b0eSVu Pham 	}
1131b8dc6b0eSVu Pham 	mr->msl = NULL; /* Mark it is external memory. */
1132b8dc6b0eSVu Pham 	mr->ms_bmp = NULL;
1133b8dc6b0eSVu Pham 	mr->ms_n = 1;
1134b8dc6b0eSVu Pham 	mr->ms_bmp_n = 1;
1135b8dc6b0eSVu Pham 	DRV_LOG(DEBUG,
1136b8dc6b0eSVu Pham 		"MR CREATED (%p) for external memory %p:\n"
1137b8dc6b0eSVu Pham 		"  [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
1138b8dc6b0eSVu Pham 		" lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
1139b8dc6b0eSVu Pham 		(void *)mr, (void *)addr,
114056d20677SOphir Munk 		addr, addr + len, rte_cpu_to_be_32(mr->pmd_mr.lkey),
1141b8dc6b0eSVu Pham 		mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
1142b8dc6b0eSVu Pham 	return mr;
1143b8dc6b0eSVu Pham }
1144b8dc6b0eSVu Pham 
1145b8dc6b0eSVu Pham /**
11462f6c2adbSMichael Baum  * Callback for memory free event. Iterate freed memsegs and check whether it
11472f6c2adbSMichael Baum  * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a
11482f6c2adbSMichael Baum  * result, the MR would be fragmented. If it becomes empty, the MR will be freed
11492f6c2adbSMichael Baum  * later by mlx5_mr_garbage_collect(). Even if this callback is called from a
11502f6c2adbSMichael Baum  * secondary process, the garbage collector will be called in primary process
11512f6c2adbSMichael Baum  * as the secondary process can't call mlx5_mr_create().
11522f6c2adbSMichael Baum  *
11532f6c2adbSMichael Baum  * The global cache must be rebuilt if there's any change and this event has to
11542f6c2adbSMichael Baum  * be propagated to dataplane threads to flush the local caches.
11552f6c2adbSMichael Baum  *
11562f6c2adbSMichael Baum  * @param share_cache
11572f6c2adbSMichael Baum  *   Pointer to a global shared MR cache.
11582f6c2adbSMichael Baum  * @param ibdev_name
11592f6c2adbSMichael Baum  *   Name of ibv device.
11602f6c2adbSMichael Baum  * @param addr
11612f6c2adbSMichael Baum  *   Address of freed memory.
11622f6c2adbSMichael Baum  * @param len
11632f6c2adbSMichael Baum  *   Size of freed memory.
11642f6c2adbSMichael Baum  */
11652f6c2adbSMichael Baum void
11662f6c2adbSMichael Baum mlx5_free_mr_by_addr(struct mlx5_mr_share_cache *share_cache,
11672f6c2adbSMichael Baum 		     const char *ibdev_name, const void *addr, size_t len)
11682f6c2adbSMichael Baum {
11692f6c2adbSMichael Baum 	const struct rte_memseg_list *msl;
11702f6c2adbSMichael Baum 	struct mlx5_mr *mr;
11712f6c2adbSMichael Baum 	int ms_n;
11722f6c2adbSMichael Baum 	int i;
11732f6c2adbSMichael Baum 	int rebuild = 0;
11742f6c2adbSMichael Baum 
11752f6c2adbSMichael Baum 	DRV_LOG(DEBUG, "device %s free callback: addr=%p, len=%zu",
11762f6c2adbSMichael Baum 		ibdev_name, addr, len);
11772f6c2adbSMichael Baum 	msl = rte_mem_virt2memseg_list(addr);
11782f6c2adbSMichael Baum 	/* addr and len must be page-aligned. */
11792f6c2adbSMichael Baum 	MLX5_ASSERT((uintptr_t)addr ==
11802f6c2adbSMichael Baum 		    RTE_ALIGN((uintptr_t)addr, msl->page_sz));
11812f6c2adbSMichael Baum 	MLX5_ASSERT(len == RTE_ALIGN(len, msl->page_sz));
11822f6c2adbSMichael Baum 	ms_n = len / msl->page_sz;
11832f6c2adbSMichael Baum 	rte_rwlock_write_lock(&share_cache->rwlock);
11842f6c2adbSMichael Baum 	/* Clear bits of freed memsegs from MR. */
11852f6c2adbSMichael Baum 	for (i = 0; i < ms_n; ++i) {
11862f6c2adbSMichael Baum 		const struct rte_memseg *ms;
11872f6c2adbSMichael Baum 		struct mr_cache_entry entry;
11882f6c2adbSMichael Baum 		uintptr_t start;
11892f6c2adbSMichael Baum 		int ms_idx;
11902f6c2adbSMichael Baum 		uint32_t pos;
11912f6c2adbSMichael Baum 
11922f6c2adbSMichael Baum 		/* Find MR having this memseg. */
11932f6c2adbSMichael Baum 		start = (uintptr_t)addr + i * msl->page_sz;
11942f6c2adbSMichael Baum 		mr = mlx5_mr_lookup_list(share_cache, &entry, start);
11952f6c2adbSMichael Baum 		if (mr == NULL)
11962f6c2adbSMichael Baum 			continue;
11972f6c2adbSMichael Baum 		MLX5_ASSERT(mr->msl); /* Can't be external memory. */
11982f6c2adbSMichael Baum 		ms = rte_mem_virt2memseg((void *)start, msl);
11992f6c2adbSMichael Baum 		MLX5_ASSERT(ms != NULL);
12002f6c2adbSMichael Baum 		MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
12012f6c2adbSMichael Baum 		ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
12022f6c2adbSMichael Baum 		pos = ms_idx - mr->ms_base_idx;
12032f6c2adbSMichael Baum 		MLX5_ASSERT(rte_bitmap_get(mr->ms_bmp, pos));
12042f6c2adbSMichael Baum 		MLX5_ASSERT(pos < mr->ms_bmp_n);
12052f6c2adbSMichael Baum 		DRV_LOG(DEBUG, "device %s MR(%p): clear bitmap[%u] for addr %p",
12062f6c2adbSMichael Baum 			ibdev_name, (void *)mr, pos, (void *)start);
12072f6c2adbSMichael Baum 		rte_bitmap_clear(mr->ms_bmp, pos);
12082f6c2adbSMichael Baum 		if (--mr->ms_n == 0) {
12092f6c2adbSMichael Baum 			LIST_REMOVE(mr, mr);
12102f6c2adbSMichael Baum 			LIST_INSERT_HEAD(&share_cache->mr_free_list, mr, mr);
12112f6c2adbSMichael Baum 			DRV_LOG(DEBUG, "device %s remove MR(%p) from list",
12122f6c2adbSMichael Baum 				ibdev_name, (void *)mr);
12132f6c2adbSMichael Baum 		}
12142f6c2adbSMichael Baum 		/*
12152f6c2adbSMichael Baum 		 * MR is fragmented or will be freed. the global cache must be
12162f6c2adbSMichael Baum 		 * rebuilt.
12172f6c2adbSMichael Baum 		 */
12182f6c2adbSMichael Baum 		rebuild = 1;
12192f6c2adbSMichael Baum 	}
12202f6c2adbSMichael Baum 	if (rebuild) {
12212f6c2adbSMichael Baum 		mlx5_mr_rebuild_cache(share_cache);
12222f6c2adbSMichael Baum 		/*
12232f6c2adbSMichael Baum 		 * No explicit wmb is needed after updating dev_gen due to
12242f6c2adbSMichael Baum 		 * store-release ordering in unlock that provides the
12252f6c2adbSMichael Baum 		 * implicit barrier at the software visible level.
12262f6c2adbSMichael Baum 		 */
12272f6c2adbSMichael Baum 		++share_cache->dev_gen;
12282f6c2adbSMichael Baum 		DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d",
12292f6c2adbSMichael Baum 			share_cache->dev_gen);
12302f6c2adbSMichael Baum 	}
12312f6c2adbSMichael Baum 	rte_rwlock_write_unlock(&share_cache->rwlock);
12322f6c2adbSMichael Baum }
12332f6c2adbSMichael Baum 
12342f6c2adbSMichael Baum /**
1235b8dc6b0eSVu Pham  * Dump all the created MRs and the global cache entries.
1236b8dc6b0eSVu Pham  *
1237fc59a1ecSMichael Baum  * @param share_cache
1238fc59a1ecSMichael Baum  *   Pointer to a global shared MR cache.
1239b8dc6b0eSVu Pham  */
1240b8dc6b0eSVu Pham void
1241b8dc6b0eSVu Pham mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused)
1242b8dc6b0eSVu Pham {
1243b8dc6b0eSVu Pham #ifdef RTE_LIBRTE_MLX5_DEBUG
1244b8dc6b0eSVu Pham 	struct mlx5_mr *mr;
1245b8dc6b0eSVu Pham 	int mr_n = 0;
1246b8dc6b0eSVu Pham 	int chunk_n = 0;
1247b8dc6b0eSVu Pham 
1248b8dc6b0eSVu Pham 	rte_rwlock_read_lock(&share_cache->rwlock);
1249b8dc6b0eSVu Pham 	/* Iterate all the existing MRs. */
1250b8dc6b0eSVu Pham 	LIST_FOREACH(mr, &share_cache->mr_list, mr) {
1251b8dc6b0eSVu Pham 		unsigned int n;
1252b8dc6b0eSVu Pham 
125387acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u",
125456d20677SOphir Munk 		      mr_n++, rte_cpu_to_be_32(mr->pmd_mr.lkey),
1255b8dc6b0eSVu Pham 		      mr->ms_n, mr->ms_bmp_n);
1256b8dc6b0eSVu Pham 		if (mr->ms_n == 0)
1257b8dc6b0eSVu Pham 			continue;
1258b8dc6b0eSVu Pham 		for (n = 0; n < mr->ms_bmp_n; ) {
1259b8dc6b0eSVu Pham 			struct mr_cache_entry ret = { 0, };
1260b8dc6b0eSVu Pham 
1261b8dc6b0eSVu Pham 			n = mr_find_next_chunk(mr, &ret, n);
1262b8dc6b0eSVu Pham 			if (!ret.end)
1263b8dc6b0eSVu Pham 				break;
126487acdcc7SThomas Monjalon 			DRV_LOG(DEBUG,
126587acdcc7SThomas Monjalon 				"  chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")",
1266b8dc6b0eSVu Pham 				chunk_n++, ret.start, ret.end);
1267b8dc6b0eSVu Pham 		}
1268b8dc6b0eSVu Pham 	}
126987acdcc7SThomas Monjalon 	DRV_LOG(DEBUG, "Dumping global cache %p", (void *)share_cache);
1270b8dc6b0eSVu Pham 	mlx5_mr_btree_dump(&share_cache->cache);
1271b8dc6b0eSVu Pham 	rte_rwlock_read_unlock(&share_cache->rwlock);
1272b8dc6b0eSVu Pham #endif
1273b8dc6b0eSVu Pham }
1274690b2a88SDmitry Kozlyuk 
1275690b2a88SDmitry Kozlyuk static int
1276690b2a88SDmitry Kozlyuk mlx5_range_compare_start(const void *lhs, const void *rhs)
1277690b2a88SDmitry Kozlyuk {
1278690b2a88SDmitry Kozlyuk 	const struct mlx5_range *r1 = lhs, *r2 = rhs;
1279690b2a88SDmitry Kozlyuk 
1280690b2a88SDmitry Kozlyuk 	if (r1->start > r2->start)
1281690b2a88SDmitry Kozlyuk 		return 1;
1282690b2a88SDmitry Kozlyuk 	else if (r1->start < r2->start)
1283690b2a88SDmitry Kozlyuk 		return -1;
1284690b2a88SDmitry Kozlyuk 	return 0;
1285690b2a88SDmitry Kozlyuk }
1286690b2a88SDmitry Kozlyuk 
1287690b2a88SDmitry Kozlyuk static void
1288690b2a88SDmitry Kozlyuk mlx5_range_from_mempool_chunk(struct rte_mempool *mp, void *opaque,
1289690b2a88SDmitry Kozlyuk 			      struct rte_mempool_memhdr *memhdr,
1290690b2a88SDmitry Kozlyuk 			      unsigned int idx)
1291690b2a88SDmitry Kozlyuk {
1292690b2a88SDmitry Kozlyuk 	struct mlx5_range *ranges = opaque, *range = &ranges[idx];
1293690b2a88SDmitry Kozlyuk 	uint64_t page_size = rte_mem_page_size();
1294690b2a88SDmitry Kozlyuk 
1295690b2a88SDmitry Kozlyuk 	RTE_SET_USED(mp);
1296690b2a88SDmitry Kozlyuk 	range->start = RTE_ALIGN_FLOOR((uintptr_t)memhdr->addr, page_size);
1297690b2a88SDmitry Kozlyuk 	range->end = RTE_ALIGN_CEIL(range->start + memhdr->len, page_size);
1298690b2a88SDmitry Kozlyuk }
1299690b2a88SDmitry Kozlyuk 
1300690b2a88SDmitry Kozlyuk /**
13017297d2cdSDmitry Kozlyuk  * Collect page-aligned memory ranges of the mempool.
13027297d2cdSDmitry Kozlyuk  */
13037297d2cdSDmitry Kozlyuk static int
13047297d2cdSDmitry Kozlyuk mlx5_mempool_get_chunks(struct rte_mempool *mp, struct mlx5_range **out,
13057297d2cdSDmitry Kozlyuk 			unsigned int *out_n)
13067297d2cdSDmitry Kozlyuk {
13077297d2cdSDmitry Kozlyuk 	unsigned int n;
13087297d2cdSDmitry Kozlyuk 
1309e4c402afSDmitry Kozlyuk 	DRV_LOG(DEBUG, "Collecting chunks of regular mempool %s", mp->name);
13107297d2cdSDmitry Kozlyuk 	n = mp->nb_mem_chunks;
131108ac0358SDmitry Kozlyuk 	*out = calloc(sizeof(**out), n);
131208ac0358SDmitry Kozlyuk 	if (*out == NULL)
13137297d2cdSDmitry Kozlyuk 		return -1;
131408ac0358SDmitry Kozlyuk 	rte_mempool_mem_iter(mp, mlx5_range_from_mempool_chunk, *out);
13157297d2cdSDmitry Kozlyuk 	*out_n = n;
13167297d2cdSDmitry Kozlyuk 	return 0;
13177297d2cdSDmitry Kozlyuk }
13187297d2cdSDmitry Kozlyuk 
13197297d2cdSDmitry Kozlyuk struct mlx5_mempool_get_extmem_data {
13207297d2cdSDmitry Kozlyuk 	struct mlx5_range *heap;
13217297d2cdSDmitry Kozlyuk 	unsigned int heap_size;
13227297d2cdSDmitry Kozlyuk 	int ret;
13237297d2cdSDmitry Kozlyuk };
13247297d2cdSDmitry Kozlyuk 
13257297d2cdSDmitry Kozlyuk static void
13267297d2cdSDmitry Kozlyuk mlx5_mempool_get_extmem_cb(struct rte_mempool *mp, void *opaque,
13277297d2cdSDmitry Kozlyuk 			   void *obj, unsigned int obj_idx)
13287297d2cdSDmitry Kozlyuk {
13297297d2cdSDmitry Kozlyuk 	struct mlx5_mempool_get_extmem_data *data = opaque;
13307297d2cdSDmitry Kozlyuk 	struct rte_mbuf *mbuf = obj;
13317297d2cdSDmitry Kozlyuk 	uintptr_t addr = (uintptr_t)mbuf->buf_addr;
13327297d2cdSDmitry Kozlyuk 	struct mlx5_range *seg, *heap;
13337297d2cdSDmitry Kozlyuk 	struct rte_memseg_list *msl;
13347297d2cdSDmitry Kozlyuk 	size_t page_size;
13357297d2cdSDmitry Kozlyuk 	uintptr_t page_start;
13367297d2cdSDmitry Kozlyuk 	unsigned int pos = 0, len = data->heap_size, delta;
13377297d2cdSDmitry Kozlyuk 
13387297d2cdSDmitry Kozlyuk 	RTE_SET_USED(mp);
13397297d2cdSDmitry Kozlyuk 	RTE_SET_USED(obj_idx);
13407297d2cdSDmitry Kozlyuk 	if (data->ret < 0)
13417297d2cdSDmitry Kozlyuk 		return;
13427297d2cdSDmitry Kozlyuk 	/* Binary search for an already visited page. */
13437297d2cdSDmitry Kozlyuk 	while (len > 1) {
13447297d2cdSDmitry Kozlyuk 		delta = len / 2;
13457297d2cdSDmitry Kozlyuk 		if (addr < data->heap[pos + delta].start) {
13467297d2cdSDmitry Kozlyuk 			len = delta;
13477297d2cdSDmitry Kozlyuk 		} else {
13487297d2cdSDmitry Kozlyuk 			pos += delta;
13497297d2cdSDmitry Kozlyuk 			len -= delta;
13507297d2cdSDmitry Kozlyuk 		}
13517297d2cdSDmitry Kozlyuk 	}
13527297d2cdSDmitry Kozlyuk 	if (data->heap != NULL) {
13537297d2cdSDmitry Kozlyuk 		seg = &data->heap[pos];
13547297d2cdSDmitry Kozlyuk 		if (seg->start <= addr && addr < seg->end)
13557297d2cdSDmitry Kozlyuk 			return;
13567297d2cdSDmitry Kozlyuk 	}
13577297d2cdSDmitry Kozlyuk 	/* Determine the page boundaries and remember them. */
13587297d2cdSDmitry Kozlyuk 	heap = realloc(data->heap, sizeof(heap[0]) * (data->heap_size + 1));
13597297d2cdSDmitry Kozlyuk 	if (heap == NULL) {
13607297d2cdSDmitry Kozlyuk 		free(data->heap);
13617297d2cdSDmitry Kozlyuk 		data->heap = NULL;
13627297d2cdSDmitry Kozlyuk 		data->ret = -1;
13637297d2cdSDmitry Kozlyuk 		return;
13647297d2cdSDmitry Kozlyuk 	}
13657297d2cdSDmitry Kozlyuk 	data->heap = heap;
13667297d2cdSDmitry Kozlyuk 	data->heap_size++;
13677297d2cdSDmitry Kozlyuk 	seg = &heap[data->heap_size - 1];
13687297d2cdSDmitry Kozlyuk 	msl = rte_mem_virt2memseg_list((void *)addr);
13697297d2cdSDmitry Kozlyuk 	page_size = msl != NULL ? msl->page_sz : rte_mem_page_size();
13707297d2cdSDmitry Kozlyuk 	page_start = RTE_PTR_ALIGN_FLOOR(addr, page_size);
13717297d2cdSDmitry Kozlyuk 	seg->start = page_start;
13727297d2cdSDmitry Kozlyuk 	seg->end = page_start + page_size;
13737297d2cdSDmitry Kozlyuk 	/* Maintain the heap order. */
13747297d2cdSDmitry Kozlyuk 	qsort(data->heap, data->heap_size, sizeof(heap[0]),
13757297d2cdSDmitry Kozlyuk 	      mlx5_range_compare_start);
13767297d2cdSDmitry Kozlyuk }
13777297d2cdSDmitry Kozlyuk 
13787297d2cdSDmitry Kozlyuk /**
13797297d2cdSDmitry Kozlyuk  * Recover pages of external memory as close as possible
13807297d2cdSDmitry Kozlyuk  * for a mempool with RTE_PKTMBUF_POOL_PINNED_EXT_BUF.
13817297d2cdSDmitry Kozlyuk  * Pages are stored in a heap for efficient search, for mbufs are many.
13827297d2cdSDmitry Kozlyuk  */
13837297d2cdSDmitry Kozlyuk static int
13847297d2cdSDmitry Kozlyuk mlx5_mempool_get_extmem(struct rte_mempool *mp, struct mlx5_range **out,
13857297d2cdSDmitry Kozlyuk 			unsigned int *out_n)
13867297d2cdSDmitry Kozlyuk {
13877297d2cdSDmitry Kozlyuk 	struct mlx5_mempool_get_extmem_data data;
13887297d2cdSDmitry Kozlyuk 
1389e4c402afSDmitry Kozlyuk 	DRV_LOG(DEBUG, "Recovering external pinned pages of mempool %s",
1390e4c402afSDmitry Kozlyuk 		mp->name);
13917297d2cdSDmitry Kozlyuk 	memset(&data, 0, sizeof(data));
13927297d2cdSDmitry Kozlyuk 	rte_mempool_obj_iter(mp, mlx5_mempool_get_extmem_cb, &data);
13937297d2cdSDmitry Kozlyuk 	*out = data.heap;
13947297d2cdSDmitry Kozlyuk 	*out_n = data.heap_size;
139508ac0358SDmitry Kozlyuk 	return data.ret;
13967297d2cdSDmitry Kozlyuk }
13977297d2cdSDmitry Kozlyuk 
13987297d2cdSDmitry Kozlyuk /**
1399690b2a88SDmitry Kozlyuk  * Get VA-contiguous ranges of the mempool memory.
1400690b2a88SDmitry Kozlyuk  * Each range start and end is aligned to the system page size.
1401690b2a88SDmitry Kozlyuk  *
1402690b2a88SDmitry Kozlyuk  * @param[in] mp
1403690b2a88SDmitry Kozlyuk  *   Analyzed mempool.
140408ac0358SDmitry Kozlyuk  * @param[in] is_extmem
140508ac0358SDmitry Kozlyuk  *   Whether the pool is contains only external pinned buffers.
1406690b2a88SDmitry Kozlyuk  * @param[out] out
1407690b2a88SDmitry Kozlyuk  *   Receives the ranges, caller must release it with free().
140808ac0358SDmitry Kozlyuk  * @param[out] out_n
1409690b2a88SDmitry Kozlyuk  *   Receives the number of @p out elements.
1410690b2a88SDmitry Kozlyuk  *
1411690b2a88SDmitry Kozlyuk  * @return
1412690b2a88SDmitry Kozlyuk  *   0 on success, (-1) on failure.
1413690b2a88SDmitry Kozlyuk  */
1414690b2a88SDmitry Kozlyuk static int
141508ac0358SDmitry Kozlyuk mlx5_get_mempool_ranges(struct rte_mempool *mp, bool is_extmem,
141608ac0358SDmitry Kozlyuk 			struct mlx5_range **out, unsigned int *out_n)
1417690b2a88SDmitry Kozlyuk {
1418690b2a88SDmitry Kozlyuk 	struct mlx5_range *chunks;
14197297d2cdSDmitry Kozlyuk 	unsigned int chunks_n, contig_n, i;
14207297d2cdSDmitry Kozlyuk 	int ret;
1421690b2a88SDmitry Kozlyuk 
14227297d2cdSDmitry Kozlyuk 	/* Collect the pool underlying memory. */
142308ac0358SDmitry Kozlyuk 	ret = is_extmem ? mlx5_mempool_get_extmem(mp, &chunks, &chunks_n) :
14247297d2cdSDmitry Kozlyuk 			  mlx5_mempool_get_chunks(mp, &chunks, &chunks_n);
14257297d2cdSDmitry Kozlyuk 	if (ret < 0)
14267297d2cdSDmitry Kozlyuk 		return ret;
1427690b2a88SDmitry Kozlyuk 	/* Merge adjacent chunks and place them at the beginning. */
1428690b2a88SDmitry Kozlyuk 	qsort(chunks, chunks_n, sizeof(chunks[0]), mlx5_range_compare_start);
1429690b2a88SDmitry Kozlyuk 	contig_n = 1;
1430690b2a88SDmitry Kozlyuk 	for (i = 1; i < chunks_n; i++)
1431690b2a88SDmitry Kozlyuk 		if (chunks[i - 1].end != chunks[i].start) {
1432690b2a88SDmitry Kozlyuk 			chunks[contig_n - 1].end = chunks[i - 1].end;
1433690b2a88SDmitry Kozlyuk 			chunks[contig_n] = chunks[i];
1434690b2a88SDmitry Kozlyuk 			contig_n++;
1435690b2a88SDmitry Kozlyuk 		}
1436690b2a88SDmitry Kozlyuk 	/* Extend the last contiguous chunk to the end of the mempool. */
1437690b2a88SDmitry Kozlyuk 	chunks[contig_n - 1].end = chunks[i - 1].end;
1438690b2a88SDmitry Kozlyuk 	*out = chunks;
1439690b2a88SDmitry Kozlyuk 	*out_n = contig_n;
1440690b2a88SDmitry Kozlyuk 	return 0;
1441690b2a88SDmitry Kozlyuk }
1442690b2a88SDmitry Kozlyuk 
1443690b2a88SDmitry Kozlyuk /**
1444690b2a88SDmitry Kozlyuk  * Analyze mempool memory to select memory ranges to register.
1445690b2a88SDmitry Kozlyuk  *
1446690b2a88SDmitry Kozlyuk  * @param[in] mp
1447690b2a88SDmitry Kozlyuk  *   Mempool to analyze.
144808ac0358SDmitry Kozlyuk  * @param[in] is_extmem
144908ac0358SDmitry Kozlyuk  *   Whether the pool is contains only external pinned buffers.
1450690b2a88SDmitry Kozlyuk  * @param[out] out
1451690b2a88SDmitry Kozlyuk  *   Receives memory ranges to register, aligned to the system page size.
1452690b2a88SDmitry Kozlyuk  *   The caller must release them with free().
1453690b2a88SDmitry Kozlyuk  * @param[out] out_n
1454690b2a88SDmitry Kozlyuk  *   Receives the number of @p out items.
1455690b2a88SDmitry Kozlyuk  * @param[out] share_hugepage
1456690b2a88SDmitry Kozlyuk  *   Receives True if the entire pool resides within a single hugepage.
1457690b2a88SDmitry Kozlyuk  *
1458690b2a88SDmitry Kozlyuk  * @return
1459690b2a88SDmitry Kozlyuk  *   0 on success, (-1) on failure.
1460690b2a88SDmitry Kozlyuk  */
1461690b2a88SDmitry Kozlyuk static int
146208ac0358SDmitry Kozlyuk mlx5_mempool_reg_analyze(struct rte_mempool *mp, bool is_extmem,
146308ac0358SDmitry Kozlyuk 			 struct mlx5_range **out, unsigned int *out_n,
146408ac0358SDmitry Kozlyuk 			 bool *share_hugepage)
1465690b2a88SDmitry Kozlyuk {
1466690b2a88SDmitry Kozlyuk 	struct mlx5_range *ranges = NULL;
1467690b2a88SDmitry Kozlyuk 	unsigned int i, ranges_n = 0;
1468690b2a88SDmitry Kozlyuk 	struct rte_memseg_list *msl;
1469690b2a88SDmitry Kozlyuk 
147008ac0358SDmitry Kozlyuk 	if (mlx5_get_mempool_ranges(mp, is_extmem, &ranges, &ranges_n) < 0) {
1471690b2a88SDmitry Kozlyuk 		DRV_LOG(ERR, "Cannot get address ranges for mempool %s",
1472690b2a88SDmitry Kozlyuk 			mp->name);
1473690b2a88SDmitry Kozlyuk 		return -1;
1474690b2a88SDmitry Kozlyuk 	}
1475690b2a88SDmitry Kozlyuk 	/* Check if the hugepage of the pool can be shared. */
1476690b2a88SDmitry Kozlyuk 	*share_hugepage = false;
1477690b2a88SDmitry Kozlyuk 	msl = rte_mem_virt2memseg_list((void *)ranges[0].start);
1478690b2a88SDmitry Kozlyuk 	if (msl != NULL) {
1479690b2a88SDmitry Kozlyuk 		uint64_t hugepage_sz = 0;
1480690b2a88SDmitry Kozlyuk 
1481690b2a88SDmitry Kozlyuk 		/* Check that all ranges are on pages of the same size. */
1482690b2a88SDmitry Kozlyuk 		for (i = 0; i < ranges_n; i++) {
1483690b2a88SDmitry Kozlyuk 			if (hugepage_sz != 0 && hugepage_sz != msl->page_sz)
1484690b2a88SDmitry Kozlyuk 				break;
1485690b2a88SDmitry Kozlyuk 			hugepage_sz = msl->page_sz;
1486690b2a88SDmitry Kozlyuk 		}
1487690b2a88SDmitry Kozlyuk 		if (i == ranges_n) {
1488690b2a88SDmitry Kozlyuk 			/*
1489690b2a88SDmitry Kozlyuk 			 * If the entire pool is within one hugepage,
1490690b2a88SDmitry Kozlyuk 			 * combine all ranges into one of the hugepage size.
1491690b2a88SDmitry Kozlyuk 			 */
1492690b2a88SDmitry Kozlyuk 			uintptr_t reg_start = ranges[0].start;
1493690b2a88SDmitry Kozlyuk 			uintptr_t reg_end = ranges[ranges_n - 1].end;
1494690b2a88SDmitry Kozlyuk 			uintptr_t hugepage_start =
1495690b2a88SDmitry Kozlyuk 				RTE_ALIGN_FLOOR(reg_start, hugepage_sz);
1496690b2a88SDmitry Kozlyuk 			uintptr_t hugepage_end = hugepage_start + hugepage_sz;
1497690b2a88SDmitry Kozlyuk 			if (reg_end < hugepage_end) {
1498690b2a88SDmitry Kozlyuk 				ranges[0].start = hugepage_start;
1499690b2a88SDmitry Kozlyuk 				ranges[0].end = hugepage_end;
1500690b2a88SDmitry Kozlyuk 				ranges_n = 1;
1501690b2a88SDmitry Kozlyuk 				*share_hugepage = true;
1502690b2a88SDmitry Kozlyuk 			}
1503690b2a88SDmitry Kozlyuk 		}
1504690b2a88SDmitry Kozlyuk 	}
1505690b2a88SDmitry Kozlyuk 	*out = ranges;
1506690b2a88SDmitry Kozlyuk 	*out_n = ranges_n;
1507690b2a88SDmitry Kozlyuk 	return 0;
1508690b2a88SDmitry Kozlyuk }
1509690b2a88SDmitry Kozlyuk 
1510690b2a88SDmitry Kozlyuk /** Create a registration object for the mempool. */
1511690b2a88SDmitry Kozlyuk static struct mlx5_mempool_reg *
151208ac0358SDmitry Kozlyuk mlx5_mempool_reg_create(struct rte_mempool *mp, unsigned int mrs_n,
151308ac0358SDmitry Kozlyuk 			bool is_extmem)
1514690b2a88SDmitry Kozlyuk {
1515690b2a88SDmitry Kozlyuk 	struct mlx5_mempool_reg *mpr = NULL;
1516690b2a88SDmitry Kozlyuk 
1517690b2a88SDmitry Kozlyuk 	mpr = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
15188947eebcSBing Zhao 			  sizeof(struct mlx5_mempool_reg),
1519690b2a88SDmitry Kozlyuk 			  RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
1520690b2a88SDmitry Kozlyuk 	if (mpr == NULL) {
1521690b2a88SDmitry Kozlyuk 		DRV_LOG(ERR, "Cannot allocate mempool %s registration object",
1522690b2a88SDmitry Kozlyuk 			mp->name);
1523690b2a88SDmitry Kozlyuk 		return NULL;
1524690b2a88SDmitry Kozlyuk 	}
15258947eebcSBing Zhao 	mpr->mrs = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
15268947eebcSBing Zhao 			       mrs_n * sizeof(struct mlx5_mempool_mr),
15278947eebcSBing Zhao 			       RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
15288947eebcSBing Zhao 	if (!mpr->mrs) {
15298947eebcSBing Zhao 		DRV_LOG(ERR, "Cannot allocate mempool %s registration MRs",
15308947eebcSBing Zhao 			mp->name);
15318947eebcSBing Zhao 		mlx5_free(mpr);
15328947eebcSBing Zhao 		return NULL;
15338947eebcSBing Zhao 	}
1534690b2a88SDmitry Kozlyuk 	mpr->mp = mp;
1535690b2a88SDmitry Kozlyuk 	mpr->mrs_n = mrs_n;
153608ac0358SDmitry Kozlyuk 	mpr->is_extmem = is_extmem;
1537690b2a88SDmitry Kozlyuk 	return mpr;
1538690b2a88SDmitry Kozlyuk }
1539690b2a88SDmitry Kozlyuk 
1540690b2a88SDmitry Kozlyuk /**
1541690b2a88SDmitry Kozlyuk  * Destroy a mempool registration object.
1542690b2a88SDmitry Kozlyuk  *
1543690b2a88SDmitry Kozlyuk  * @param standalone
15447be78d02SJosh Soref  *   Whether @p mpr owns its MRs exclusively, i.e. they are not shared.
1545690b2a88SDmitry Kozlyuk  */
1546690b2a88SDmitry Kozlyuk static void
1547690b2a88SDmitry Kozlyuk mlx5_mempool_reg_destroy(struct mlx5_mr_share_cache *share_cache,
1548690b2a88SDmitry Kozlyuk 			 struct mlx5_mempool_reg *mpr, bool standalone)
1549690b2a88SDmitry Kozlyuk {
1550690b2a88SDmitry Kozlyuk 	if (standalone) {
1551690b2a88SDmitry Kozlyuk 		unsigned int i;
1552690b2a88SDmitry Kozlyuk 
1553690b2a88SDmitry Kozlyuk 		for (i = 0; i < mpr->mrs_n; i++)
1554690b2a88SDmitry Kozlyuk 			share_cache->dereg_mr_cb(&mpr->mrs[i].pmd_mr);
15558947eebcSBing Zhao 		mlx5_free(mpr->mrs);
1556690b2a88SDmitry Kozlyuk 	}
1557690b2a88SDmitry Kozlyuk 	mlx5_free(mpr);
1558690b2a88SDmitry Kozlyuk }
1559690b2a88SDmitry Kozlyuk 
1560690b2a88SDmitry Kozlyuk /** Find registration object of a mempool. */
1561690b2a88SDmitry Kozlyuk static struct mlx5_mempool_reg *
1562690b2a88SDmitry Kozlyuk mlx5_mempool_reg_lookup(struct mlx5_mr_share_cache *share_cache,
1563690b2a88SDmitry Kozlyuk 			struct rte_mempool *mp)
1564690b2a88SDmitry Kozlyuk {
1565690b2a88SDmitry Kozlyuk 	struct mlx5_mempool_reg *mpr;
1566690b2a88SDmitry Kozlyuk 
1567690b2a88SDmitry Kozlyuk 	LIST_FOREACH(mpr, &share_cache->mempool_reg_list, next)
1568690b2a88SDmitry Kozlyuk 		if (mpr->mp == mp)
1569690b2a88SDmitry Kozlyuk 			break;
1570690b2a88SDmitry Kozlyuk 	return mpr;
1571690b2a88SDmitry Kozlyuk }
1572690b2a88SDmitry Kozlyuk 
1573690b2a88SDmitry Kozlyuk /** Increment reference counters of MRs used in the registration. */
1574690b2a88SDmitry Kozlyuk static void
1575690b2a88SDmitry Kozlyuk mlx5_mempool_reg_attach(struct mlx5_mempool_reg *mpr)
1576690b2a88SDmitry Kozlyuk {
1577690b2a88SDmitry Kozlyuk 	unsigned int i;
1578690b2a88SDmitry Kozlyuk 
1579690b2a88SDmitry Kozlyuk 	for (i = 0; i < mpr->mrs_n; i++)
1580690b2a88SDmitry Kozlyuk 		__atomic_add_fetch(&mpr->mrs[i].refcnt, 1, __ATOMIC_RELAXED);
1581690b2a88SDmitry Kozlyuk }
1582690b2a88SDmitry Kozlyuk 
1583690b2a88SDmitry Kozlyuk /**
1584690b2a88SDmitry Kozlyuk  * Decrement reference counters of MRs used in the registration.
1585690b2a88SDmitry Kozlyuk  *
1586690b2a88SDmitry Kozlyuk  * @return True if no more references to @p mpr MRs exist, False otherwise.
1587690b2a88SDmitry Kozlyuk  */
1588690b2a88SDmitry Kozlyuk static bool
1589690b2a88SDmitry Kozlyuk mlx5_mempool_reg_detach(struct mlx5_mempool_reg *mpr)
1590690b2a88SDmitry Kozlyuk {
1591690b2a88SDmitry Kozlyuk 	unsigned int i;
1592690b2a88SDmitry Kozlyuk 	bool ret = false;
1593690b2a88SDmitry Kozlyuk 
1594690b2a88SDmitry Kozlyuk 	for (i = 0; i < mpr->mrs_n; i++)
1595690b2a88SDmitry Kozlyuk 		ret |= __atomic_sub_fetch(&mpr->mrs[i].refcnt, 1,
1596690b2a88SDmitry Kozlyuk 					  __ATOMIC_RELAXED) == 0;
1597690b2a88SDmitry Kozlyuk 	return ret;
1598690b2a88SDmitry Kozlyuk }
1599690b2a88SDmitry Kozlyuk 
1600690b2a88SDmitry Kozlyuk static int
1601690b2a88SDmitry Kozlyuk mlx5_mr_mempool_register_primary(struct mlx5_mr_share_cache *share_cache,
160208ac0358SDmitry Kozlyuk 				 void *pd, struct rte_mempool *mp,
160308ac0358SDmitry Kozlyuk 				 bool is_extmem)
1604690b2a88SDmitry Kozlyuk {
1605690b2a88SDmitry Kozlyuk 	struct mlx5_range *ranges = NULL;
160608ac0358SDmitry Kozlyuk 	struct mlx5_mempool_reg *mpr, *old_mpr, *new_mpr;
1607690b2a88SDmitry Kozlyuk 	unsigned int i, ranges_n;
160808ac0358SDmitry Kozlyuk 	bool share_hugepage, standalone = false;
1609690b2a88SDmitry Kozlyuk 	int ret = -1;
1610690b2a88SDmitry Kozlyuk 
1611690b2a88SDmitry Kozlyuk 	/* Early check to avoid unnecessary creation of MRs. */
1612690b2a88SDmitry Kozlyuk 	rte_rwlock_read_lock(&share_cache->rwlock);
161308ac0358SDmitry Kozlyuk 	old_mpr = mlx5_mempool_reg_lookup(share_cache, mp);
1614690b2a88SDmitry Kozlyuk 	rte_rwlock_read_unlock(&share_cache->rwlock);
161508ac0358SDmitry Kozlyuk 	if (old_mpr != NULL && (!is_extmem || old_mpr->is_extmem)) {
1616690b2a88SDmitry Kozlyuk 		DRV_LOG(DEBUG, "Mempool %s is already registered for PD %p",
1617690b2a88SDmitry Kozlyuk 			mp->name, pd);
1618690b2a88SDmitry Kozlyuk 		rte_errno = EEXIST;
1619690b2a88SDmitry Kozlyuk 		goto exit;
1620690b2a88SDmitry Kozlyuk 	}
162108ac0358SDmitry Kozlyuk 	if (mlx5_mempool_reg_analyze(mp, is_extmem, &ranges, &ranges_n,
1622690b2a88SDmitry Kozlyuk 				     &share_hugepage) < 0) {
1623690b2a88SDmitry Kozlyuk 		DRV_LOG(ERR, "Cannot get mempool %s memory ranges", mp->name);
1624690b2a88SDmitry Kozlyuk 		rte_errno = ENOMEM;
1625690b2a88SDmitry Kozlyuk 		goto exit;
1626690b2a88SDmitry Kozlyuk 	}
162708ac0358SDmitry Kozlyuk 	new_mpr = mlx5_mempool_reg_create(mp, ranges_n, is_extmem);
1628690b2a88SDmitry Kozlyuk 	if (new_mpr == NULL) {
1629690b2a88SDmitry Kozlyuk 		DRV_LOG(ERR,
1630690b2a88SDmitry Kozlyuk 			"Cannot create a registration object for mempool %s in PD %p",
1631690b2a88SDmitry Kozlyuk 			mp->name, pd);
1632690b2a88SDmitry Kozlyuk 		rte_errno = ENOMEM;
1633690b2a88SDmitry Kozlyuk 		goto exit;
1634690b2a88SDmitry Kozlyuk 	}
1635690b2a88SDmitry Kozlyuk 	/*
1636690b2a88SDmitry Kozlyuk 	 * If the entire mempool fits in a single hugepage, the MR for this
1637690b2a88SDmitry Kozlyuk 	 * hugepage can be shared across mempools that also fit in it.
1638690b2a88SDmitry Kozlyuk 	 */
1639690b2a88SDmitry Kozlyuk 	if (share_hugepage) {
1640690b2a88SDmitry Kozlyuk 		rte_rwlock_write_lock(&share_cache->rwlock);
1641690b2a88SDmitry Kozlyuk 		LIST_FOREACH(mpr, &share_cache->mempool_reg_list, next) {
1642690b2a88SDmitry Kozlyuk 			if (mpr->mrs[0].pmd_mr.addr == (void *)ranges[0].start)
1643690b2a88SDmitry Kozlyuk 				break;
1644690b2a88SDmitry Kozlyuk 		}
1645690b2a88SDmitry Kozlyuk 		if (mpr != NULL) {
1646690b2a88SDmitry Kozlyuk 			new_mpr->mrs = mpr->mrs;
1647690b2a88SDmitry Kozlyuk 			mlx5_mempool_reg_attach(new_mpr);
1648690b2a88SDmitry Kozlyuk 			LIST_INSERT_HEAD(&share_cache->mempool_reg_list,
1649690b2a88SDmitry Kozlyuk 					 new_mpr, next);
1650690b2a88SDmitry Kozlyuk 		}
1651690b2a88SDmitry Kozlyuk 		rte_rwlock_write_unlock(&share_cache->rwlock);
1652690b2a88SDmitry Kozlyuk 		if (mpr != NULL) {
1653690b2a88SDmitry Kozlyuk 			DRV_LOG(DEBUG, "Shared MR %#x in PD %p for mempool %s with mempool %s",
1654690b2a88SDmitry Kozlyuk 				mpr->mrs[0].pmd_mr.lkey, pd, mp->name,
1655690b2a88SDmitry Kozlyuk 				mpr->mp->name);
1656690b2a88SDmitry Kozlyuk 			ret = 0;
1657690b2a88SDmitry Kozlyuk 			goto exit;
1658690b2a88SDmitry Kozlyuk 		}
1659690b2a88SDmitry Kozlyuk 	}
1660690b2a88SDmitry Kozlyuk 	for (i = 0; i < ranges_n; i++) {
1661690b2a88SDmitry Kozlyuk 		struct mlx5_mempool_mr *mr = &new_mpr->mrs[i];
1662690b2a88SDmitry Kozlyuk 		const struct mlx5_range *range = &ranges[i];
1663690b2a88SDmitry Kozlyuk 		size_t len = range->end - range->start;
1664690b2a88SDmitry Kozlyuk 
1665690b2a88SDmitry Kozlyuk 		if (share_cache->reg_mr_cb(pd, (void *)range->start, len,
1666690b2a88SDmitry Kozlyuk 		    &mr->pmd_mr) < 0) {
1667690b2a88SDmitry Kozlyuk 			DRV_LOG(ERR,
1668690b2a88SDmitry Kozlyuk 				"Failed to create an MR in PD %p for address range "
1669690b2a88SDmitry Kozlyuk 				"[0x%" PRIxPTR ", 0x%" PRIxPTR "] (%zu bytes) for mempool %s",
1670690b2a88SDmitry Kozlyuk 				pd, range->start, range->end, len, mp->name);
1671690b2a88SDmitry Kozlyuk 			break;
1672690b2a88SDmitry Kozlyuk 		}
1673690b2a88SDmitry Kozlyuk 		DRV_LOG(DEBUG,
1674690b2a88SDmitry Kozlyuk 			"Created a new MR %#x in PD %p for address range "
1675690b2a88SDmitry Kozlyuk 			"[0x%" PRIxPTR ", 0x%" PRIxPTR "] (%zu bytes) for mempool %s",
1676690b2a88SDmitry Kozlyuk 			mr->pmd_mr.lkey, pd, range->start, range->end, len,
1677690b2a88SDmitry Kozlyuk 			mp->name);
1678690b2a88SDmitry Kozlyuk 	}
1679690b2a88SDmitry Kozlyuk 	if (i != ranges_n) {
1680690b2a88SDmitry Kozlyuk 		mlx5_mempool_reg_destroy(share_cache, new_mpr, true);
1681690b2a88SDmitry Kozlyuk 		rte_errno = EINVAL;
1682690b2a88SDmitry Kozlyuk 		goto exit;
1683690b2a88SDmitry Kozlyuk 	}
1684690b2a88SDmitry Kozlyuk 	/* Concurrent registration is not supposed to happen. */
1685690b2a88SDmitry Kozlyuk 	rte_rwlock_write_lock(&share_cache->rwlock);
1686690b2a88SDmitry Kozlyuk 	mpr = mlx5_mempool_reg_lookup(share_cache, mp);
168708ac0358SDmitry Kozlyuk 	if (mpr == old_mpr && old_mpr != NULL) {
168808ac0358SDmitry Kozlyuk 		LIST_REMOVE(old_mpr, next);
168908ac0358SDmitry Kozlyuk 		standalone = mlx5_mempool_reg_detach(mpr);
169008ac0358SDmitry Kozlyuk 		/* No need to flush the cache: old MRs cannot be in use. */
169108ac0358SDmitry Kozlyuk 		mpr = NULL;
169208ac0358SDmitry Kozlyuk 	}
1693690b2a88SDmitry Kozlyuk 	if (mpr == NULL) {
1694690b2a88SDmitry Kozlyuk 		mlx5_mempool_reg_attach(new_mpr);
1695fc59a1ecSMichael Baum 		LIST_INSERT_HEAD(&share_cache->mempool_reg_list, new_mpr, next);
1696690b2a88SDmitry Kozlyuk 		ret = 0;
1697690b2a88SDmitry Kozlyuk 	}
1698690b2a88SDmitry Kozlyuk 	rte_rwlock_write_unlock(&share_cache->rwlock);
1699690b2a88SDmitry Kozlyuk 	if (mpr != NULL) {
1700690b2a88SDmitry Kozlyuk 		DRV_LOG(DEBUG, "Mempool %s is already registered for PD %p",
1701690b2a88SDmitry Kozlyuk 			mp->name, pd);
1702690b2a88SDmitry Kozlyuk 		mlx5_mempool_reg_destroy(share_cache, new_mpr, true);
1703690b2a88SDmitry Kozlyuk 		rte_errno = EEXIST;
1704690b2a88SDmitry Kozlyuk 		goto exit;
170508ac0358SDmitry Kozlyuk 	} else if (old_mpr != NULL) {
170608ac0358SDmitry Kozlyuk 		DRV_LOG(DEBUG, "Mempool %s registration for PD %p updated for external memory",
170708ac0358SDmitry Kozlyuk 			mp->name, pd);
170808ac0358SDmitry Kozlyuk 		mlx5_mempool_reg_destroy(share_cache, old_mpr, standalone);
1709690b2a88SDmitry Kozlyuk 	}
1710690b2a88SDmitry Kozlyuk exit:
1711690b2a88SDmitry Kozlyuk 	free(ranges);
1712690b2a88SDmitry Kozlyuk 	return ret;
1713690b2a88SDmitry Kozlyuk }
1714690b2a88SDmitry Kozlyuk 
1715690b2a88SDmitry Kozlyuk static int
171620489176SMichael Baum mlx5_mr_mempool_register_secondary(struct mlx5_common_device *cdev,
171708ac0358SDmitry Kozlyuk 				   struct rte_mempool *mp, bool is_extmem)
1718690b2a88SDmitry Kozlyuk {
171908ac0358SDmitry Kozlyuk 	return mlx5_mp_req_mempool_reg(cdev, mp, true, is_extmem);
1720690b2a88SDmitry Kozlyuk }
1721690b2a88SDmitry Kozlyuk 
1722690b2a88SDmitry Kozlyuk /**
1723690b2a88SDmitry Kozlyuk  * Register the memory of a mempool in the protection domain.
1724690b2a88SDmitry Kozlyuk  *
172520489176SMichael Baum  * @param cdev
172620489176SMichael Baum  *   Pointer to the mlx5 common device.
1727690b2a88SDmitry Kozlyuk  * @param mp
1728690b2a88SDmitry Kozlyuk  *   Mempool to register.
1729690b2a88SDmitry Kozlyuk  *
1730690b2a88SDmitry Kozlyuk  * @return
1731690b2a88SDmitry Kozlyuk  *   0 on success, (-1) on failure and rte_errno is set.
1732690b2a88SDmitry Kozlyuk  */
1733690b2a88SDmitry Kozlyuk int
173420489176SMichael Baum mlx5_mr_mempool_register(struct mlx5_common_device *cdev,
173508ac0358SDmitry Kozlyuk 			 struct rte_mempool *mp, bool is_extmem)
1736690b2a88SDmitry Kozlyuk {
1737c47d7b90SAndrew Rybchenko 	if (mp->flags & RTE_MEMPOOL_F_NON_IO)
1738690b2a88SDmitry Kozlyuk 		return 0;
1739690b2a88SDmitry Kozlyuk 	switch (rte_eal_process_type()) {
1740690b2a88SDmitry Kozlyuk 	case RTE_PROC_PRIMARY:
174120489176SMichael Baum 		return mlx5_mr_mempool_register_primary(&cdev->mr_scache,
174208ac0358SDmitry Kozlyuk 							cdev->pd, mp,
174308ac0358SDmitry Kozlyuk 							is_extmem);
1744690b2a88SDmitry Kozlyuk 	case RTE_PROC_SECONDARY:
174508ac0358SDmitry Kozlyuk 		return mlx5_mr_mempool_register_secondary(cdev, mp, is_extmem);
1746690b2a88SDmitry Kozlyuk 	default:
1747690b2a88SDmitry Kozlyuk 		return -1;
1748690b2a88SDmitry Kozlyuk 	}
1749690b2a88SDmitry Kozlyuk }
1750690b2a88SDmitry Kozlyuk 
1751690b2a88SDmitry Kozlyuk static int
1752690b2a88SDmitry Kozlyuk mlx5_mr_mempool_unregister_primary(struct mlx5_mr_share_cache *share_cache,
1753690b2a88SDmitry Kozlyuk 				   struct rte_mempool *mp)
1754690b2a88SDmitry Kozlyuk {
1755690b2a88SDmitry Kozlyuk 	struct mlx5_mempool_reg *mpr;
1756690b2a88SDmitry Kozlyuk 	bool standalone = false;
1757690b2a88SDmitry Kozlyuk 
1758690b2a88SDmitry Kozlyuk 	rte_rwlock_write_lock(&share_cache->rwlock);
1759690b2a88SDmitry Kozlyuk 	LIST_FOREACH(mpr, &share_cache->mempool_reg_list, next)
1760690b2a88SDmitry Kozlyuk 		if (mpr->mp == mp) {
1761690b2a88SDmitry Kozlyuk 			LIST_REMOVE(mpr, next);
1762690b2a88SDmitry Kozlyuk 			standalone = mlx5_mempool_reg_detach(mpr);
1763690b2a88SDmitry Kozlyuk 			if (standalone)
1764690b2a88SDmitry Kozlyuk 				/*
1765690b2a88SDmitry Kozlyuk 				 * The unlock operation below provides a memory
1766690b2a88SDmitry Kozlyuk 				 * barrier due to its store-release semantics.
1767690b2a88SDmitry Kozlyuk 				 */
1768690b2a88SDmitry Kozlyuk 				++share_cache->dev_gen;
1769690b2a88SDmitry Kozlyuk 			break;
1770690b2a88SDmitry Kozlyuk 		}
1771690b2a88SDmitry Kozlyuk 	rte_rwlock_write_unlock(&share_cache->rwlock);
1772690b2a88SDmitry Kozlyuk 	if (mpr == NULL) {
1773690b2a88SDmitry Kozlyuk 		rte_errno = ENOENT;
1774690b2a88SDmitry Kozlyuk 		return -1;
1775690b2a88SDmitry Kozlyuk 	}
1776690b2a88SDmitry Kozlyuk 	mlx5_mempool_reg_destroy(share_cache, mpr, standalone);
1777690b2a88SDmitry Kozlyuk 	return 0;
1778690b2a88SDmitry Kozlyuk }
1779690b2a88SDmitry Kozlyuk 
1780690b2a88SDmitry Kozlyuk static int
178120489176SMichael Baum mlx5_mr_mempool_unregister_secondary(struct mlx5_common_device *cdev,
178220489176SMichael Baum 				     struct rte_mempool *mp)
1783690b2a88SDmitry Kozlyuk {
178408ac0358SDmitry Kozlyuk 	return mlx5_mp_req_mempool_reg(cdev, mp, false, false /* is_extmem */);
1785690b2a88SDmitry Kozlyuk }
1786690b2a88SDmitry Kozlyuk 
1787690b2a88SDmitry Kozlyuk /**
1788690b2a88SDmitry Kozlyuk  * Unregister the memory of a mempool from the protection domain.
1789690b2a88SDmitry Kozlyuk  *
179020489176SMichael Baum  * @param cdev
179120489176SMichael Baum  *   Pointer to the mlx5 common device.
1792690b2a88SDmitry Kozlyuk  * @param mp
1793690b2a88SDmitry Kozlyuk  *   Mempool to unregister.
1794690b2a88SDmitry Kozlyuk  *
1795690b2a88SDmitry Kozlyuk  * @return
1796690b2a88SDmitry Kozlyuk  *   0 on success, (-1) on failure and rte_errno is set.
1797690b2a88SDmitry Kozlyuk  */
1798690b2a88SDmitry Kozlyuk int
179920489176SMichael Baum mlx5_mr_mempool_unregister(struct mlx5_common_device *cdev,
180020489176SMichael Baum 			   struct rte_mempool *mp)
1801690b2a88SDmitry Kozlyuk {
1802c47d7b90SAndrew Rybchenko 	if (mp->flags & RTE_MEMPOOL_F_NON_IO)
1803690b2a88SDmitry Kozlyuk 		return 0;
1804690b2a88SDmitry Kozlyuk 	switch (rte_eal_process_type()) {
1805690b2a88SDmitry Kozlyuk 	case RTE_PROC_PRIMARY:
180620489176SMichael Baum 		return mlx5_mr_mempool_unregister_primary(&cdev->mr_scache, mp);
1807690b2a88SDmitry Kozlyuk 	case RTE_PROC_SECONDARY:
180820489176SMichael Baum 		return mlx5_mr_mempool_unregister_secondary(cdev, mp);
1809690b2a88SDmitry Kozlyuk 	default:
1810690b2a88SDmitry Kozlyuk 		return -1;
1811690b2a88SDmitry Kozlyuk 	}
1812690b2a88SDmitry Kozlyuk }
1813690b2a88SDmitry Kozlyuk 
1814690b2a88SDmitry Kozlyuk /**
1815690b2a88SDmitry Kozlyuk  * Lookup a MR key by and address in a registered mempool.
1816690b2a88SDmitry Kozlyuk  *
1817690b2a88SDmitry Kozlyuk  * @param mpr
1818690b2a88SDmitry Kozlyuk  *   Mempool registration object.
1819690b2a88SDmitry Kozlyuk  * @param addr
1820690b2a88SDmitry Kozlyuk  *   Address within the mempool.
1821690b2a88SDmitry Kozlyuk  * @param entry
1822690b2a88SDmitry Kozlyuk  *   Bottom-half cache entry to fill.
1823690b2a88SDmitry Kozlyuk  *
1824690b2a88SDmitry Kozlyuk  * @return
1825690b2a88SDmitry Kozlyuk  *   MR key or UINT32_MAX on failure, which can only happen
1826690b2a88SDmitry Kozlyuk  *   if the address is not from within the mempool.
1827690b2a88SDmitry Kozlyuk  */
1828690b2a88SDmitry Kozlyuk static uint32_t
1829690b2a88SDmitry Kozlyuk mlx5_mempool_reg_addr2mr(struct mlx5_mempool_reg *mpr, uintptr_t addr,
1830690b2a88SDmitry Kozlyuk 			 struct mr_cache_entry *entry)
1831690b2a88SDmitry Kozlyuk {
1832690b2a88SDmitry Kozlyuk 	uint32_t lkey = UINT32_MAX;
1833690b2a88SDmitry Kozlyuk 	unsigned int i;
1834690b2a88SDmitry Kozlyuk 
1835690b2a88SDmitry Kozlyuk 	for (i = 0; i < mpr->mrs_n; i++) {
1836690b2a88SDmitry Kozlyuk 		const struct mlx5_pmd_mr *mr = &mpr->mrs[i].pmd_mr;
1837*2eb92b0fSDmitry Kozlyuk 		uintptr_t mr_start = (uintptr_t)mr->addr;
1838*2eb92b0fSDmitry Kozlyuk 		uintptr_t mr_end = mr_start + mr->len;
1839690b2a88SDmitry Kozlyuk 
1840*2eb92b0fSDmitry Kozlyuk 		if (mr_start <= addr && addr < mr_end) {
1841690b2a88SDmitry Kozlyuk 			lkey = rte_cpu_to_be_32(mr->lkey);
1842*2eb92b0fSDmitry Kozlyuk 			entry->start = mr_start;
1843*2eb92b0fSDmitry Kozlyuk 			entry->end = mr_end;
1844690b2a88SDmitry Kozlyuk 			entry->lkey = lkey;
1845690b2a88SDmitry Kozlyuk 			break;
1846690b2a88SDmitry Kozlyuk 		}
1847690b2a88SDmitry Kozlyuk 	}
1848690b2a88SDmitry Kozlyuk 	return lkey;
1849690b2a88SDmitry Kozlyuk }
1850690b2a88SDmitry Kozlyuk 
1851690b2a88SDmitry Kozlyuk /**
1852690b2a88SDmitry Kozlyuk  * Update bottom-half cache from the list of mempool registrations.
1853690b2a88SDmitry Kozlyuk  *
1854690b2a88SDmitry Kozlyuk  * @param mr_ctrl
1855690b2a88SDmitry Kozlyuk  *   Per-queue MR control handle.
1856690b2a88SDmitry Kozlyuk  * @param entry
1857690b2a88SDmitry Kozlyuk  *   Pointer to an entry in the bottom-half cache to update
1858690b2a88SDmitry Kozlyuk  *   with the MR lkey looked up.
1859690b2a88SDmitry Kozlyuk  * @param mp
1860690b2a88SDmitry Kozlyuk  *   Mempool containing the address.
1861690b2a88SDmitry Kozlyuk  * @param addr
1862690b2a88SDmitry Kozlyuk  *   Address to lookup.
1863690b2a88SDmitry Kozlyuk  * @return
1864690b2a88SDmitry Kozlyuk  *   MR lkey on success, UINT32_MAX on failure.
1865690b2a88SDmitry Kozlyuk  */
1866690b2a88SDmitry Kozlyuk static uint32_t
186771304b5cSMichael Baum mlx5_lookup_mempool_regs(struct mlx5_mr_ctrl *mr_ctrl,
1868690b2a88SDmitry Kozlyuk 			 struct mr_cache_entry *entry,
1869690b2a88SDmitry Kozlyuk 			 struct rte_mempool *mp, uintptr_t addr)
1870690b2a88SDmitry Kozlyuk {
187171304b5cSMichael Baum 	struct mlx5_mr_share_cache *share_cache =
187271304b5cSMichael Baum 		container_of(mr_ctrl->dev_gen_ptr, struct mlx5_mr_share_cache,
187371304b5cSMichael Baum 			     dev_gen);
1874690b2a88SDmitry Kozlyuk 	struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
1875690b2a88SDmitry Kozlyuk 	struct mlx5_mempool_reg *mpr;
1876690b2a88SDmitry Kozlyuk 	uint32_t lkey = UINT32_MAX;
1877690b2a88SDmitry Kozlyuk 
1878690b2a88SDmitry Kozlyuk 	/* If local cache table is full, try to double it. */
1879690b2a88SDmitry Kozlyuk 	if (unlikely(bt->len == bt->size))
1880690b2a88SDmitry Kozlyuk 		mr_btree_expand(bt, bt->size << 1);
1881690b2a88SDmitry Kozlyuk 	/* Look up in mempool registrations. */
1882690b2a88SDmitry Kozlyuk 	rte_rwlock_read_lock(&share_cache->rwlock);
1883690b2a88SDmitry Kozlyuk 	mpr = mlx5_mempool_reg_lookup(share_cache, mp);
1884690b2a88SDmitry Kozlyuk 	if (mpr != NULL)
1885690b2a88SDmitry Kozlyuk 		lkey = mlx5_mempool_reg_addr2mr(mpr, addr, entry);
1886690b2a88SDmitry Kozlyuk 	rte_rwlock_read_unlock(&share_cache->rwlock);
1887690b2a88SDmitry Kozlyuk 	/*
1888690b2a88SDmitry Kozlyuk 	 * Update local cache. Even if it fails, return the found entry
1889690b2a88SDmitry Kozlyuk 	 * to update top-half cache. Next time, this entry will be found
1890690b2a88SDmitry Kozlyuk 	 * in the global cache.
1891690b2a88SDmitry Kozlyuk 	 */
1892690b2a88SDmitry Kozlyuk 	if (lkey != UINT32_MAX)
1893690b2a88SDmitry Kozlyuk 		mr_btree_insert(bt, entry);
1894690b2a88SDmitry Kozlyuk 	return lkey;
1895690b2a88SDmitry Kozlyuk }
1896690b2a88SDmitry Kozlyuk 
1897690b2a88SDmitry Kozlyuk /**
189808ac0358SDmitry Kozlyuk  * Populate cache with LKeys of all MRs used by the mempool.
189908ac0358SDmitry Kozlyuk  * It is intended to be used to register Rx mempools in advance.
190008ac0358SDmitry Kozlyuk  *
190108ac0358SDmitry Kozlyuk  * @param mr_ctrl
190208ac0358SDmitry Kozlyuk  *  Per-queue MR control handle.
190308ac0358SDmitry Kozlyuk  * @param mp
190408ac0358SDmitry Kozlyuk  *  Registered memory pool.
190508ac0358SDmitry Kozlyuk  *
190608ac0358SDmitry Kozlyuk  * @return
190708ac0358SDmitry Kozlyuk  *  0 on success, (-1) on failure and rte_errno is set.
190808ac0358SDmitry Kozlyuk  */
190908ac0358SDmitry Kozlyuk int
191008ac0358SDmitry Kozlyuk mlx5_mr_mempool_populate_cache(struct mlx5_mr_ctrl *mr_ctrl,
191108ac0358SDmitry Kozlyuk 			       struct rte_mempool *mp)
191208ac0358SDmitry Kozlyuk {
191308ac0358SDmitry Kozlyuk 	struct mlx5_mr_share_cache *share_cache =
191408ac0358SDmitry Kozlyuk 		container_of(mr_ctrl->dev_gen_ptr, struct mlx5_mr_share_cache,
191508ac0358SDmitry Kozlyuk 			     dev_gen);
191608ac0358SDmitry Kozlyuk 	struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
191708ac0358SDmitry Kozlyuk 	struct mlx5_mempool_reg *mpr;
191808ac0358SDmitry Kozlyuk 	unsigned int i;
191908ac0358SDmitry Kozlyuk 
192008ac0358SDmitry Kozlyuk 	/*
192108ac0358SDmitry Kozlyuk 	 * Registration is valid after the lock is released,
192208ac0358SDmitry Kozlyuk 	 * because the function is called after the mempool is registered.
192308ac0358SDmitry Kozlyuk 	 */
192408ac0358SDmitry Kozlyuk 	rte_rwlock_read_lock(&share_cache->rwlock);
192508ac0358SDmitry Kozlyuk 	mpr = mlx5_mempool_reg_lookup(share_cache, mp);
192608ac0358SDmitry Kozlyuk 	rte_rwlock_read_unlock(&share_cache->rwlock);
192708ac0358SDmitry Kozlyuk 	if (mpr == NULL) {
192808ac0358SDmitry Kozlyuk 		DRV_LOG(ERR, "Mempool %s is not registered", mp->name);
192908ac0358SDmitry Kozlyuk 		rte_errno = ENOENT;
193008ac0358SDmitry Kozlyuk 		return -1;
193108ac0358SDmitry Kozlyuk 	}
193208ac0358SDmitry Kozlyuk 	for (i = 0; i < mpr->mrs_n; i++) {
193308ac0358SDmitry Kozlyuk 		struct mlx5_mempool_mr *mr = &mpr->mrs[i];
193408ac0358SDmitry Kozlyuk 		struct mr_cache_entry entry;
193508ac0358SDmitry Kozlyuk 		uint32_t lkey;
193608ac0358SDmitry Kozlyuk 		uint16_t idx;
193708ac0358SDmitry Kozlyuk 
193808ac0358SDmitry Kozlyuk 		lkey = mr_btree_lookup(bt, &idx, (uintptr_t)mr->pmd_mr.addr);
193908ac0358SDmitry Kozlyuk 		if (lkey != UINT32_MAX)
194008ac0358SDmitry Kozlyuk 			continue;
194108ac0358SDmitry Kozlyuk 		if (bt->len == bt->size)
194208ac0358SDmitry Kozlyuk 			mr_btree_expand(bt, bt->size << 1);
194308ac0358SDmitry Kozlyuk 		entry.start = (uintptr_t)mr->pmd_mr.addr;
194408ac0358SDmitry Kozlyuk 		entry.end = entry.start + mr->pmd_mr.len;
194508ac0358SDmitry Kozlyuk 		entry.lkey = rte_cpu_to_be_32(mr->pmd_mr.lkey);
194608ac0358SDmitry Kozlyuk 		if (mr_btree_insert(bt, &entry) < 0) {
194708ac0358SDmitry Kozlyuk 			DRV_LOG(ERR, "Cannot insert cache entry for mempool %s MR %08x",
194808ac0358SDmitry Kozlyuk 				mp->name, entry.lkey);
194908ac0358SDmitry Kozlyuk 			rte_errno = EINVAL;
195008ac0358SDmitry Kozlyuk 			return -1;
195108ac0358SDmitry Kozlyuk 		}
195208ac0358SDmitry Kozlyuk 	}
195308ac0358SDmitry Kozlyuk 	return 0;
195408ac0358SDmitry Kozlyuk }
195508ac0358SDmitry Kozlyuk 
195608ac0358SDmitry Kozlyuk /**
1957690b2a88SDmitry Kozlyuk  * Bottom-half lookup for the address from the mempool.
1958690b2a88SDmitry Kozlyuk  *
1959690b2a88SDmitry Kozlyuk  * @param mr_ctrl
1960690b2a88SDmitry Kozlyuk  *   Per-queue MR control handle.
1961690b2a88SDmitry Kozlyuk  * @param mp
1962690b2a88SDmitry Kozlyuk  *   Mempool containing the address.
1963690b2a88SDmitry Kozlyuk  * @param addr
1964690b2a88SDmitry Kozlyuk  *   Address to lookup.
1965690b2a88SDmitry Kozlyuk  * @return
1966690b2a88SDmitry Kozlyuk  *   MR lkey on success, UINT32_MAX on failure.
1967690b2a88SDmitry Kozlyuk  */
1968690b2a88SDmitry Kozlyuk uint32_t
196971304b5cSMichael Baum mlx5_mr_mempool2mr_bh(struct mlx5_mr_ctrl *mr_ctrl,
1970690b2a88SDmitry Kozlyuk 		      struct rte_mempool *mp, uintptr_t addr)
1971690b2a88SDmitry Kozlyuk {
1972690b2a88SDmitry Kozlyuk 	struct mr_cache_entry *repl = &mr_ctrl->cache[mr_ctrl->head];
1973690b2a88SDmitry Kozlyuk 	uint32_t lkey;
1974690b2a88SDmitry Kozlyuk 	uint16_t bh_idx = 0;
1975690b2a88SDmitry Kozlyuk 
1976690b2a88SDmitry Kozlyuk 	/* Binary-search MR translation table. */
1977690b2a88SDmitry Kozlyuk 	lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);
1978690b2a88SDmitry Kozlyuk 	/* Update top-half cache. */
1979690b2a88SDmitry Kozlyuk 	if (likely(lkey != UINT32_MAX)) {
1980690b2a88SDmitry Kozlyuk 		*repl = (*mr_ctrl->cache_bh.table)[bh_idx];
1981690b2a88SDmitry Kozlyuk 	} else {
198271304b5cSMichael Baum 		lkey = mlx5_lookup_mempool_regs(mr_ctrl, repl, mp, addr);
1983690b2a88SDmitry Kozlyuk 		/* Can only fail if the address is not from the mempool. */
1984690b2a88SDmitry Kozlyuk 		if (unlikely(lkey == UINT32_MAX))
1985690b2a88SDmitry Kozlyuk 			return UINT32_MAX;
1986690b2a88SDmitry Kozlyuk 	}
1987690b2a88SDmitry Kozlyuk 	/* Update the most recently used entry. */
1988690b2a88SDmitry Kozlyuk 	mr_ctrl->mru = mr_ctrl->head;
1989690b2a88SDmitry Kozlyuk 	/* Point to the next victim, the oldest. */
1990690b2a88SDmitry Kozlyuk 	mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N;
1991690b2a88SDmitry Kozlyuk 	return lkey;
1992690b2a88SDmitry Kozlyuk }
1993fb690f71SMichael Baum 
19946a4e4385SMichael Baum uint32_t
199520489176SMichael Baum mlx5_mr_mb2mr_bh(struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mb)
1996fc59a1ecSMichael Baum {
199708ac0358SDmitry Kozlyuk 	struct rte_mempool *mp;
199808ac0358SDmitry Kozlyuk 	struct mlx5_mprq_buf *buf;
1999fc59a1ecSMichael Baum 	uint32_t lkey;
2000fc59a1ecSMichael Baum 	uintptr_t addr = (uintptr_t)mb->buf_addr;
200171304b5cSMichael Baum 	struct mlx5_mr_share_cache *share_cache =
200271304b5cSMichael Baum 		container_of(mr_ctrl->dev_gen_ptr, struct mlx5_mr_share_cache,
200371304b5cSMichael Baum 			     dev_gen);
200471304b5cSMichael Baum 	struct mlx5_common_device *cdev =
200571304b5cSMichael Baum 		container_of(share_cache, struct mlx5_common_device, mr_scache);
200663625c5dSDmitry Kozlyuk 	bool external, mprq, pinned = false;
2007fc59a1ecSMichael Baum 
2008fc59a1ecSMichael Baum 	/* Recover MPRQ mempool. */
200963625c5dSDmitry Kozlyuk 	external = RTE_MBUF_HAS_EXTBUF(mb);
201063625c5dSDmitry Kozlyuk 	if (external && mb->shinfo->free_cb == mlx5_mprq_buf_free_cb) {
201163625c5dSDmitry Kozlyuk 		mprq = true;
2012fc59a1ecSMichael Baum 		buf = mb->shinfo->fcb_opaque;
2013fc59a1ecSMichael Baum 		mp = buf->mp;
201408ac0358SDmitry Kozlyuk 	} else {
201563625c5dSDmitry Kozlyuk 		mprq = false;
201608ac0358SDmitry Kozlyuk 		mp = mlx5_mb2mp(mb);
201763625c5dSDmitry Kozlyuk 		pinned = rte_pktmbuf_priv_flags(mp) &
201863625c5dSDmitry Kozlyuk 			 RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF;
2019fc59a1ecSMichael Baum 	}
202063625c5dSDmitry Kozlyuk 	if (!external || mprq || pinned) {
202171304b5cSMichael Baum 		lkey = mlx5_mr_mempool2mr_bh(mr_ctrl, mp, addr);
2022fc59a1ecSMichael Baum 		if (lkey != UINT32_MAX)
2023fc59a1ecSMichael Baum 			return lkey;
202463625c5dSDmitry Kozlyuk 		/* MPRQ is always registered. */
202563625c5dSDmitry Kozlyuk 		MLX5_ASSERT(!mprq);
202663625c5dSDmitry Kozlyuk 	}
202708ac0358SDmitry Kozlyuk 	/* Register pinned external memory if the mempool is not used for Rx. */
202863625c5dSDmitry Kozlyuk 	if (cdev->config.mr_mempool_reg_en && pinned) {
202908ac0358SDmitry Kozlyuk 		if (mlx5_mr_mempool_register(cdev, mp, true) < 0)
203008ac0358SDmitry Kozlyuk 			return UINT32_MAX;
203108ac0358SDmitry Kozlyuk 		lkey = mlx5_mr_mempool2mr_bh(mr_ctrl, mp, addr);
203208ac0358SDmitry Kozlyuk 		MLX5_ASSERT(lkey != UINT32_MAX);
203308ac0358SDmitry Kozlyuk 		return lkey;
2034fc59a1ecSMichael Baum 	}
203508ac0358SDmitry Kozlyuk 	/* Fallback to generic mechanism in corner cases. */
203620489176SMichael Baum 	return mlx5_mr_addr2mr_bh(mr_ctrl, addr);
2037fc59a1ecSMichael Baum }
2038