xref: /dpdk/drivers/common/mlx5/mlx5_common_mr.c (revision a5d06c90067b6c0c2facb9614f9b10b2a1f54ffc)
1b8dc6b0eSVu Pham /* SPDX-License-Identifier: BSD-3-Clause
2b8dc6b0eSVu Pham  * Copyright 2016 6WIND S.A.
3b8dc6b0eSVu Pham  * Copyright 2020 Mellanox Technologies, Ltd
4b8dc6b0eSVu Pham  */
5690b2a88SDmitry Kozlyuk #include <stddef.h>
6690b2a88SDmitry Kozlyuk 
7b8dc6b0eSVu Pham #include <rte_eal_memconfig.h>
8690b2a88SDmitry Kozlyuk #include <rte_eal_paging.h>
9b8dc6b0eSVu Pham #include <rte_errno.h>
10b8dc6b0eSVu Pham #include <rte_mempool.h>
11b8dc6b0eSVu Pham #include <rte_malloc.h>
12b8dc6b0eSVu Pham #include <rte_rwlock.h>
13b8dc6b0eSVu Pham 
14b8dc6b0eSVu Pham #include "mlx5_glue.h"
15b8dc6b0eSVu Pham #include "mlx5_common_mp.h"
16b8dc6b0eSVu Pham #include "mlx5_common_mr.h"
1725245d5dSShiri Kuzin #include "mlx5_common_log.h"
18fd970a54SSuanming Mou #include "mlx5_malloc.h"
19b8dc6b0eSVu Pham 
20b8dc6b0eSVu Pham struct mr_find_contig_memsegs_data {
21b8dc6b0eSVu Pham 	uintptr_t addr;
22b8dc6b0eSVu Pham 	uintptr_t start;
23b8dc6b0eSVu Pham 	uintptr_t end;
24b8dc6b0eSVu Pham 	const struct rte_memseg_list *msl;
25b8dc6b0eSVu Pham };
26b8dc6b0eSVu Pham 
27690b2a88SDmitry Kozlyuk /* Virtual memory range. */
28690b2a88SDmitry Kozlyuk struct mlx5_range {
29690b2a88SDmitry Kozlyuk 	uintptr_t start;
30690b2a88SDmitry Kozlyuk 	uintptr_t end;
31690b2a88SDmitry Kozlyuk };
32690b2a88SDmitry Kozlyuk 
33690b2a88SDmitry Kozlyuk /** Memory region for a mempool. */
34690b2a88SDmitry Kozlyuk struct mlx5_mempool_mr {
35690b2a88SDmitry Kozlyuk 	struct mlx5_pmd_mr pmd_mr;
36690b2a88SDmitry Kozlyuk 	uint32_t refcnt; /**< Number of mempools sharing this MR. */
37690b2a88SDmitry Kozlyuk };
38690b2a88SDmitry Kozlyuk 
39690b2a88SDmitry Kozlyuk /* Mempool registration. */
40690b2a88SDmitry Kozlyuk struct mlx5_mempool_reg {
41690b2a88SDmitry Kozlyuk 	LIST_ENTRY(mlx5_mempool_reg) next;
42690b2a88SDmitry Kozlyuk 	/** Registered mempool, used to designate registrations. */
43690b2a88SDmitry Kozlyuk 	struct rte_mempool *mp;
44690b2a88SDmitry Kozlyuk 	/** Memory regions for the address ranges of the mempool. */
45690b2a88SDmitry Kozlyuk 	struct mlx5_mempool_mr *mrs;
46690b2a88SDmitry Kozlyuk 	/** Number of memory regions. */
47690b2a88SDmitry Kozlyuk 	unsigned int mrs_n;
48690b2a88SDmitry Kozlyuk };
49690b2a88SDmitry Kozlyuk 
50b8dc6b0eSVu Pham /**
51b8dc6b0eSVu Pham  * Expand B-tree table to a given size. Can't be called with holding
52b8dc6b0eSVu Pham  * memory_hotplug_lock or share_cache.rwlock due to rte_realloc().
53b8dc6b0eSVu Pham  *
54b8dc6b0eSVu Pham  * @param bt
55b8dc6b0eSVu Pham  *   Pointer to B-tree structure.
56b8dc6b0eSVu Pham  * @param n
57b8dc6b0eSVu Pham  *   Number of entries for expansion.
58b8dc6b0eSVu Pham  *
59b8dc6b0eSVu Pham  * @return
60b8dc6b0eSVu Pham  *   0 on success, -1 on failure.
61b8dc6b0eSVu Pham  */
62b8dc6b0eSVu Pham static int
63b8dc6b0eSVu Pham mr_btree_expand(struct mlx5_mr_btree *bt, int n)
64b8dc6b0eSVu Pham {
65b8dc6b0eSVu Pham 	void *mem;
66b8dc6b0eSVu Pham 	int ret = 0;
67b8dc6b0eSVu Pham 
68b8dc6b0eSVu Pham 	if (n <= bt->size)
69b8dc6b0eSVu Pham 		return ret;
70b8dc6b0eSVu Pham 	/*
71b8dc6b0eSVu Pham 	 * Downside of directly using rte_realloc() is that SOCKET_ID_ANY is
72b8dc6b0eSVu Pham 	 * used inside if there's no room to expand. Because this is a quite
73b8dc6b0eSVu Pham 	 * rare case and a part of very slow path, it is very acceptable.
74b8dc6b0eSVu Pham 	 * Initially cache_bh[] will be given practically enough space and once
75b8dc6b0eSVu Pham 	 * it is expanded, expansion wouldn't be needed again ever.
76b8dc6b0eSVu Pham 	 */
77fd970a54SSuanming Mou 	mem = mlx5_realloc(bt->table, MLX5_MEM_RTE | MLX5_MEM_ZERO,
78fd970a54SSuanming Mou 			   n * sizeof(struct mr_cache_entry), 0, SOCKET_ID_ANY);
79b8dc6b0eSVu Pham 	if (mem == NULL) {
80b8dc6b0eSVu Pham 		/* Not an error, B-tree search will be skipped. */
81b8dc6b0eSVu Pham 		DRV_LOG(WARNING, "failed to expand MR B-tree (%p) table",
82b8dc6b0eSVu Pham 			(void *)bt);
83b8dc6b0eSVu Pham 		ret = -1;
84b8dc6b0eSVu Pham 	} else {
85b8dc6b0eSVu Pham 		DRV_LOG(DEBUG, "expanded MR B-tree table (size=%u)", n);
86b8dc6b0eSVu Pham 		bt->table = mem;
87b8dc6b0eSVu Pham 		bt->size = n;
88b8dc6b0eSVu Pham 	}
89b8dc6b0eSVu Pham 	return ret;
90b8dc6b0eSVu Pham }
91b8dc6b0eSVu Pham 
92b8dc6b0eSVu Pham /**
93b8dc6b0eSVu Pham  * Look up LKey from given B-tree lookup table, store the last index and return
94b8dc6b0eSVu Pham  * searched LKey.
95b8dc6b0eSVu Pham  *
96b8dc6b0eSVu Pham  * @param bt
97b8dc6b0eSVu Pham  *   Pointer to B-tree structure.
98b8dc6b0eSVu Pham  * @param[out] idx
99b8dc6b0eSVu Pham  *   Pointer to index. Even on search failure, returns index where it stops
100b8dc6b0eSVu Pham  *   searching so that index can be used when inserting a new entry.
101b8dc6b0eSVu Pham  * @param addr
102b8dc6b0eSVu Pham  *   Search key.
103b8dc6b0eSVu Pham  *
104b8dc6b0eSVu Pham  * @return
105b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on no match.
106b8dc6b0eSVu Pham  */
107b8dc6b0eSVu Pham static uint32_t
108b8dc6b0eSVu Pham mr_btree_lookup(struct mlx5_mr_btree *bt, uint16_t *idx, uintptr_t addr)
109b8dc6b0eSVu Pham {
110b8dc6b0eSVu Pham 	struct mr_cache_entry *lkp_tbl;
111b8dc6b0eSVu Pham 	uint16_t n;
112b8dc6b0eSVu Pham 	uint16_t base = 0;
113b8dc6b0eSVu Pham 
114b8dc6b0eSVu Pham 	MLX5_ASSERT(bt != NULL);
115b8dc6b0eSVu Pham 	lkp_tbl = *bt->table;
116b8dc6b0eSVu Pham 	n = bt->len;
117b8dc6b0eSVu Pham 	/* First entry must be NULL for comparison. */
118b8dc6b0eSVu Pham 	MLX5_ASSERT(bt->len > 0 || (lkp_tbl[0].start == 0 &&
119b8dc6b0eSVu Pham 				    lkp_tbl[0].lkey == UINT32_MAX));
120b8dc6b0eSVu Pham 	/* Binary search. */
121b8dc6b0eSVu Pham 	do {
122b8dc6b0eSVu Pham 		register uint16_t delta = n >> 1;
123b8dc6b0eSVu Pham 
124b8dc6b0eSVu Pham 		if (addr < lkp_tbl[base + delta].start) {
125b8dc6b0eSVu Pham 			n = delta;
126b8dc6b0eSVu Pham 		} else {
127b8dc6b0eSVu Pham 			base += delta;
128b8dc6b0eSVu Pham 			n -= delta;
129b8dc6b0eSVu Pham 		}
130b8dc6b0eSVu Pham 	} while (n > 1);
131b8dc6b0eSVu Pham 	MLX5_ASSERT(addr >= lkp_tbl[base].start);
132b8dc6b0eSVu Pham 	*idx = base;
133b8dc6b0eSVu Pham 	if (addr < lkp_tbl[base].end)
134b8dc6b0eSVu Pham 		return lkp_tbl[base].lkey;
135b8dc6b0eSVu Pham 	/* Not found. */
136b8dc6b0eSVu Pham 	return UINT32_MAX;
137b8dc6b0eSVu Pham }
138b8dc6b0eSVu Pham 
139b8dc6b0eSVu Pham /**
140b8dc6b0eSVu Pham  * Insert an entry to B-tree lookup table.
141b8dc6b0eSVu Pham  *
142b8dc6b0eSVu Pham  * @param bt
143b8dc6b0eSVu Pham  *   Pointer to B-tree structure.
144b8dc6b0eSVu Pham  * @param entry
145b8dc6b0eSVu Pham  *   Pointer to new entry to insert.
146b8dc6b0eSVu Pham  *
147b8dc6b0eSVu Pham  * @return
148b8dc6b0eSVu Pham  *   0 on success, -1 on failure.
149b8dc6b0eSVu Pham  */
150b8dc6b0eSVu Pham static int
151b8dc6b0eSVu Pham mr_btree_insert(struct mlx5_mr_btree *bt, struct mr_cache_entry *entry)
152b8dc6b0eSVu Pham {
153b8dc6b0eSVu Pham 	struct mr_cache_entry *lkp_tbl;
154b8dc6b0eSVu Pham 	uint16_t idx = 0;
155b8dc6b0eSVu Pham 	size_t shift;
156b8dc6b0eSVu Pham 
157b8dc6b0eSVu Pham 	MLX5_ASSERT(bt != NULL);
158b8dc6b0eSVu Pham 	MLX5_ASSERT(bt->len <= bt->size);
159b8dc6b0eSVu Pham 	MLX5_ASSERT(bt->len > 0);
160b8dc6b0eSVu Pham 	lkp_tbl = *bt->table;
161b8dc6b0eSVu Pham 	/* Find out the slot for insertion. */
162b8dc6b0eSVu Pham 	if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) {
163b8dc6b0eSVu Pham 		DRV_LOG(DEBUG,
164b8dc6b0eSVu Pham 			"abort insertion to B-tree(%p): already exist at"
165b8dc6b0eSVu Pham 			" idx=%u [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
166b8dc6b0eSVu Pham 			(void *)bt, idx, entry->start, entry->end, entry->lkey);
167b8dc6b0eSVu Pham 		/* Already exist, return. */
168b8dc6b0eSVu Pham 		return 0;
169b8dc6b0eSVu Pham 	}
170b8dc6b0eSVu Pham 	/* If table is full, return error. */
171b8dc6b0eSVu Pham 	if (unlikely(bt->len == bt->size)) {
172b8dc6b0eSVu Pham 		bt->overflow = 1;
173b8dc6b0eSVu Pham 		return -1;
174b8dc6b0eSVu Pham 	}
175b8dc6b0eSVu Pham 	/* Insert entry. */
176b8dc6b0eSVu Pham 	++idx;
177b8dc6b0eSVu Pham 	shift = (bt->len - idx) * sizeof(struct mr_cache_entry);
178b8dc6b0eSVu Pham 	if (shift)
179b8dc6b0eSVu Pham 		memmove(&lkp_tbl[idx + 1], &lkp_tbl[idx], shift);
180b8dc6b0eSVu Pham 	lkp_tbl[idx] = *entry;
181b8dc6b0eSVu Pham 	bt->len++;
182b8dc6b0eSVu Pham 	DRV_LOG(DEBUG,
183b8dc6b0eSVu Pham 		"inserted B-tree(%p)[%u],"
184b8dc6b0eSVu Pham 		" [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
185b8dc6b0eSVu Pham 		(void *)bt, idx, entry->start, entry->end, entry->lkey);
186b8dc6b0eSVu Pham 	return 0;
187b8dc6b0eSVu Pham }
188b8dc6b0eSVu Pham 
189b8dc6b0eSVu Pham /**
190b8dc6b0eSVu Pham  * Initialize B-tree and allocate memory for lookup table.
191b8dc6b0eSVu Pham  *
192b8dc6b0eSVu Pham  * @param bt
193b8dc6b0eSVu Pham  *   Pointer to B-tree structure.
194b8dc6b0eSVu Pham  * @param n
195b8dc6b0eSVu Pham  *   Number of entries to allocate.
196b8dc6b0eSVu Pham  * @param socket
197b8dc6b0eSVu Pham  *   NUMA socket on which memory must be allocated.
198b8dc6b0eSVu Pham  *
199b8dc6b0eSVu Pham  * @return
200b8dc6b0eSVu Pham  *   0 on success, a negative errno value otherwise and rte_errno is set.
201b8dc6b0eSVu Pham  */
2025fbc75acSMichael Baum static int
203b8dc6b0eSVu Pham mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket)
204b8dc6b0eSVu Pham {
205b8dc6b0eSVu Pham 	if (bt == NULL) {
206b8dc6b0eSVu Pham 		rte_errno = EINVAL;
207b8dc6b0eSVu Pham 		return -rte_errno;
208b8dc6b0eSVu Pham 	}
209b8dc6b0eSVu Pham 	MLX5_ASSERT(!bt->table && !bt->size);
210b8dc6b0eSVu Pham 	memset(bt, 0, sizeof(*bt));
211fd970a54SSuanming Mou 	bt->table = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
212fd970a54SSuanming Mou 				sizeof(struct mr_cache_entry) * n,
213b8dc6b0eSVu Pham 				0, socket);
214b8dc6b0eSVu Pham 	if (bt->table == NULL) {
215b8dc6b0eSVu Pham 		rte_errno = ENOMEM;
21687acdcc7SThomas Monjalon 		DRV_LOG(DEBUG,
21787acdcc7SThomas Monjalon 			"failed to allocate memory for btree cache on socket "
21887acdcc7SThomas Monjalon 			"%d", socket);
219b8dc6b0eSVu Pham 		return -rte_errno;
220b8dc6b0eSVu Pham 	}
221b8dc6b0eSVu Pham 	bt->size = n;
222b8dc6b0eSVu Pham 	/* First entry must be NULL for binary search. */
223b8dc6b0eSVu Pham 	(*bt->table)[bt->len++] = (struct mr_cache_entry) {
224b8dc6b0eSVu Pham 		.lkey = UINT32_MAX,
225b8dc6b0eSVu Pham 	};
22687acdcc7SThomas Monjalon 	DRV_LOG(DEBUG, "initialized B-tree %p with table %p",
227b8dc6b0eSVu Pham 	      (void *)bt, (void *)bt->table);
228b8dc6b0eSVu Pham 	return 0;
229b8dc6b0eSVu Pham }
230b8dc6b0eSVu Pham 
231b8dc6b0eSVu Pham /**
232b8dc6b0eSVu Pham  * Free B-tree resources.
233b8dc6b0eSVu Pham  *
234b8dc6b0eSVu Pham  * @param bt
235b8dc6b0eSVu Pham  *   Pointer to B-tree structure.
236b8dc6b0eSVu Pham  */
237b8dc6b0eSVu Pham void
238b8dc6b0eSVu Pham mlx5_mr_btree_free(struct mlx5_mr_btree *bt)
239b8dc6b0eSVu Pham {
240b8dc6b0eSVu Pham 	if (bt == NULL)
241b8dc6b0eSVu Pham 		return;
24287acdcc7SThomas Monjalon 	DRV_LOG(DEBUG, "freeing B-tree %p with table %p",
243b8dc6b0eSVu Pham 	      (void *)bt, (void *)bt->table);
244fd970a54SSuanming Mou 	mlx5_free(bt->table);
245b8dc6b0eSVu Pham 	memset(bt, 0, sizeof(*bt));
246b8dc6b0eSVu Pham }
247b8dc6b0eSVu Pham 
248b8dc6b0eSVu Pham /**
249b8dc6b0eSVu Pham  * Dump all the entries in a B-tree
250b8dc6b0eSVu Pham  *
251b8dc6b0eSVu Pham  * @param bt
252b8dc6b0eSVu Pham  *   Pointer to B-tree structure.
253b8dc6b0eSVu Pham  */
254b8dc6b0eSVu Pham void
255b8dc6b0eSVu Pham mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused)
256b8dc6b0eSVu Pham {
257b8dc6b0eSVu Pham #ifdef RTE_LIBRTE_MLX5_DEBUG
258b8dc6b0eSVu Pham 	int idx;
259b8dc6b0eSVu Pham 	struct mr_cache_entry *lkp_tbl;
260b8dc6b0eSVu Pham 
261b8dc6b0eSVu Pham 	if (bt == NULL)
262b8dc6b0eSVu Pham 		return;
263b8dc6b0eSVu Pham 	lkp_tbl = *bt->table;
264b8dc6b0eSVu Pham 	for (idx = 0; idx < bt->len; ++idx) {
265b8dc6b0eSVu Pham 		struct mr_cache_entry *entry = &lkp_tbl[idx];
266b8dc6b0eSVu Pham 
26787acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "B-tree(%p)[%u],"
268b8dc6b0eSVu Pham 		      " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
269b8dc6b0eSVu Pham 		      (void *)bt, idx, entry->start, entry->end, entry->lkey);
270b8dc6b0eSVu Pham 	}
271b8dc6b0eSVu Pham #endif
272b8dc6b0eSVu Pham }
273b8dc6b0eSVu Pham 
274b8dc6b0eSVu Pham /**
27585c7005eSMichael Baum  * Initialize per-queue MR control descriptor.
27685c7005eSMichael Baum  *
27785c7005eSMichael Baum  * @param mr_ctrl
27885c7005eSMichael Baum  *   Pointer to MR control structure.
27985c7005eSMichael Baum  * @param dev_gen_ptr
28085c7005eSMichael Baum  *   Pointer to generation number of global cache.
28185c7005eSMichael Baum  * @param socket
28285c7005eSMichael Baum  *   NUMA socket on which memory must be allocated.
28385c7005eSMichael Baum  *
28485c7005eSMichael Baum  * @return
28585c7005eSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
28685c7005eSMichael Baum  */
28785c7005eSMichael Baum int
28885c7005eSMichael Baum mlx5_mr_ctrl_init(struct mlx5_mr_ctrl *mr_ctrl, uint32_t *dev_gen_ptr,
28985c7005eSMichael Baum 		  int socket)
29085c7005eSMichael Baum {
29185c7005eSMichael Baum 	if (mr_ctrl == NULL) {
29285c7005eSMichael Baum 		rte_errno = EINVAL;
29385c7005eSMichael Baum 		return -rte_errno;
29485c7005eSMichael Baum 	}
29585c7005eSMichael Baum 	/* Save pointer of global generation number to check memory event. */
29685c7005eSMichael Baum 	mr_ctrl->dev_gen_ptr = dev_gen_ptr;
29785c7005eSMichael Baum 	/* Initialize B-tree and allocate memory for bottom-half cache table. */
29885c7005eSMichael Baum 	return mlx5_mr_btree_init(&mr_ctrl->cache_bh, MLX5_MR_BTREE_CACHE_N,
29985c7005eSMichael Baum 				  socket);
30085c7005eSMichael Baum }
30185c7005eSMichael Baum 
30285c7005eSMichael Baum /**
303b8dc6b0eSVu Pham  * Find virtually contiguous memory chunk in a given MR.
304b8dc6b0eSVu Pham  *
305b8dc6b0eSVu Pham  * @param dev
306b8dc6b0eSVu Pham  *   Pointer to MR structure.
307b8dc6b0eSVu Pham  * @param[out] entry
308b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry. If not found, this will not be
309b8dc6b0eSVu Pham  *   updated.
310b8dc6b0eSVu Pham  * @param start_idx
311b8dc6b0eSVu Pham  *   Start index of the memseg bitmap.
312b8dc6b0eSVu Pham  *
313b8dc6b0eSVu Pham  * @return
314b8dc6b0eSVu Pham  *   Next index to go on lookup.
315b8dc6b0eSVu Pham  */
316b8dc6b0eSVu Pham static int
317b8dc6b0eSVu Pham mr_find_next_chunk(struct mlx5_mr *mr, struct mr_cache_entry *entry,
318b8dc6b0eSVu Pham 		   int base_idx)
319b8dc6b0eSVu Pham {
320b8dc6b0eSVu Pham 	uintptr_t start = 0;
321b8dc6b0eSVu Pham 	uintptr_t end = 0;
322b8dc6b0eSVu Pham 	uint32_t idx = 0;
323b8dc6b0eSVu Pham 
324b8dc6b0eSVu Pham 	/* MR for external memory doesn't have memseg list. */
325b8dc6b0eSVu Pham 	if (mr->msl == NULL) {
326b8dc6b0eSVu Pham 		MLX5_ASSERT(mr->ms_bmp_n == 1);
327b8dc6b0eSVu Pham 		MLX5_ASSERT(mr->ms_n == 1);
328b8dc6b0eSVu Pham 		MLX5_ASSERT(base_idx == 0);
329b8dc6b0eSVu Pham 		/*
330b8dc6b0eSVu Pham 		 * Can't search it from memseg list but get it directly from
33156d20677SOphir Munk 		 * pmd_mr as there's only one chunk.
332b8dc6b0eSVu Pham 		 */
33356d20677SOphir Munk 		entry->start = (uintptr_t)mr->pmd_mr.addr;
33456d20677SOphir Munk 		entry->end = (uintptr_t)mr->pmd_mr.addr + mr->pmd_mr.len;
33556d20677SOphir Munk 		entry->lkey = rte_cpu_to_be_32(mr->pmd_mr.lkey);
336b8dc6b0eSVu Pham 		/* Returning 1 ends iteration. */
337b8dc6b0eSVu Pham 		return 1;
338b8dc6b0eSVu Pham 	}
339b8dc6b0eSVu Pham 	for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) {
340b8dc6b0eSVu Pham 		if (rte_bitmap_get(mr->ms_bmp, idx)) {
341b8dc6b0eSVu Pham 			const struct rte_memseg_list *msl;
342b8dc6b0eSVu Pham 			const struct rte_memseg *ms;
343b8dc6b0eSVu Pham 
344b8dc6b0eSVu Pham 			msl = mr->msl;
345b8dc6b0eSVu Pham 			ms = rte_fbarray_get(&msl->memseg_arr,
346b8dc6b0eSVu Pham 					     mr->ms_base_idx + idx);
347b8dc6b0eSVu Pham 			MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
348b8dc6b0eSVu Pham 			if (!start)
349b8dc6b0eSVu Pham 				start = ms->addr_64;
350b8dc6b0eSVu Pham 			end = ms->addr_64 + ms->hugepage_sz;
351b8dc6b0eSVu Pham 		} else if (start) {
352b8dc6b0eSVu Pham 			/* Passed the end of a fragment. */
353b8dc6b0eSVu Pham 			break;
354b8dc6b0eSVu Pham 		}
355b8dc6b0eSVu Pham 	}
356b8dc6b0eSVu Pham 	if (start) {
357b8dc6b0eSVu Pham 		/* Found one chunk. */
358b8dc6b0eSVu Pham 		entry->start = start;
359b8dc6b0eSVu Pham 		entry->end = end;
36056d20677SOphir Munk 		entry->lkey = rte_cpu_to_be_32(mr->pmd_mr.lkey);
361b8dc6b0eSVu Pham 	}
362b8dc6b0eSVu Pham 	return idx;
363b8dc6b0eSVu Pham }
364b8dc6b0eSVu Pham 
365b8dc6b0eSVu Pham /**
366b8dc6b0eSVu Pham  * Insert a MR to the global B-tree cache. It may fail due to low-on-memory.
367b8dc6b0eSVu Pham  * Then, this entry will have to be searched by mr_lookup_list() in
368b8dc6b0eSVu Pham  * mlx5_mr_create() on miss.
369b8dc6b0eSVu Pham  *
370b8dc6b0eSVu Pham  * @param share_cache
371b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
372b8dc6b0eSVu Pham  * @param mr
373b8dc6b0eSVu Pham  *   Pointer to MR to insert.
374b8dc6b0eSVu Pham  *
375b8dc6b0eSVu Pham  * @return
376b8dc6b0eSVu Pham  *   0 on success, -1 on failure.
377b8dc6b0eSVu Pham  */
378b8dc6b0eSVu Pham int
379b8dc6b0eSVu Pham mlx5_mr_insert_cache(struct mlx5_mr_share_cache *share_cache,
380b8dc6b0eSVu Pham 		     struct mlx5_mr *mr)
381b8dc6b0eSVu Pham {
382b8dc6b0eSVu Pham 	unsigned int n;
383b8dc6b0eSVu Pham 
384b8dc6b0eSVu Pham 	DRV_LOG(DEBUG, "Inserting MR(%p) to global cache(%p)",
385b8dc6b0eSVu Pham 		(void *)mr, (void *)share_cache);
386b8dc6b0eSVu Pham 	for (n = 0; n < mr->ms_bmp_n; ) {
387b8dc6b0eSVu Pham 		struct mr_cache_entry entry;
388b8dc6b0eSVu Pham 
389b8dc6b0eSVu Pham 		memset(&entry, 0, sizeof(entry));
390b8dc6b0eSVu Pham 		/* Find a contiguous chunk and advance the index. */
391b8dc6b0eSVu Pham 		n = mr_find_next_chunk(mr, &entry, n);
392b8dc6b0eSVu Pham 		if (!entry.end)
393b8dc6b0eSVu Pham 			break;
394b8dc6b0eSVu Pham 		if (mr_btree_insert(&share_cache->cache, &entry) < 0) {
395b8dc6b0eSVu Pham 			/*
396b8dc6b0eSVu Pham 			 * Overflowed, but the global table cannot be expanded
397b8dc6b0eSVu Pham 			 * because of deadlock.
398b8dc6b0eSVu Pham 			 */
399b8dc6b0eSVu Pham 			return -1;
400b8dc6b0eSVu Pham 		}
401b8dc6b0eSVu Pham 	}
402b8dc6b0eSVu Pham 	return 0;
403b8dc6b0eSVu Pham }
404b8dc6b0eSVu Pham 
405b8dc6b0eSVu Pham /**
406b8dc6b0eSVu Pham  * Look up address in the original global MR list.
407b8dc6b0eSVu Pham  *
408b8dc6b0eSVu Pham  * @param share_cache
409b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
410b8dc6b0eSVu Pham  * @param[out] entry
411b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry. If no match, this will not be updated.
412b8dc6b0eSVu Pham  * @param addr
413b8dc6b0eSVu Pham  *   Search key.
414b8dc6b0eSVu Pham  *
415b8dc6b0eSVu Pham  * @return
416b8dc6b0eSVu Pham  *   Found MR on match, NULL otherwise.
417b8dc6b0eSVu Pham  */
418b8dc6b0eSVu Pham struct mlx5_mr *
419b8dc6b0eSVu Pham mlx5_mr_lookup_list(struct mlx5_mr_share_cache *share_cache,
420b8dc6b0eSVu Pham 		    struct mr_cache_entry *entry, uintptr_t addr)
421b8dc6b0eSVu Pham {
422b8dc6b0eSVu Pham 	struct mlx5_mr *mr;
423b8dc6b0eSVu Pham 
424b8dc6b0eSVu Pham 	/* Iterate all the existing MRs. */
425b8dc6b0eSVu Pham 	LIST_FOREACH(mr, &share_cache->mr_list, mr) {
426b8dc6b0eSVu Pham 		unsigned int n;
427b8dc6b0eSVu Pham 
428b8dc6b0eSVu Pham 		if (mr->ms_n == 0)
429b8dc6b0eSVu Pham 			continue;
430b8dc6b0eSVu Pham 		for (n = 0; n < mr->ms_bmp_n; ) {
431b8dc6b0eSVu Pham 			struct mr_cache_entry ret;
432b8dc6b0eSVu Pham 
433b8dc6b0eSVu Pham 			memset(&ret, 0, sizeof(ret));
434b8dc6b0eSVu Pham 			n = mr_find_next_chunk(mr, &ret, n);
435b8dc6b0eSVu Pham 			if (addr >= ret.start && addr < ret.end) {
436b8dc6b0eSVu Pham 				/* Found. */
437b8dc6b0eSVu Pham 				*entry = ret;
438b8dc6b0eSVu Pham 				return mr;
439b8dc6b0eSVu Pham 			}
440b8dc6b0eSVu Pham 		}
441b8dc6b0eSVu Pham 	}
442b8dc6b0eSVu Pham 	return NULL;
443b8dc6b0eSVu Pham }
444b8dc6b0eSVu Pham 
445b8dc6b0eSVu Pham /**
446b8dc6b0eSVu Pham  * Look up address on global MR cache.
447b8dc6b0eSVu Pham  *
448b8dc6b0eSVu Pham  * @param share_cache
449b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
450b8dc6b0eSVu Pham  * @param[out] entry
451b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry. If no match, this will not be updated.
452b8dc6b0eSVu Pham  * @param addr
453b8dc6b0eSVu Pham  *   Search key.
454b8dc6b0eSVu Pham  *
455b8dc6b0eSVu Pham  * @return
456b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
457b8dc6b0eSVu Pham  */
458*a5d06c90SMichael Baum static uint32_t
459b8dc6b0eSVu Pham mlx5_mr_lookup_cache(struct mlx5_mr_share_cache *share_cache,
460b8dc6b0eSVu Pham 		     struct mr_cache_entry *entry, uintptr_t addr)
461b8dc6b0eSVu Pham {
462b8dc6b0eSVu Pham 	uint16_t idx;
463b8dc6b0eSVu Pham 	uint32_t lkey = UINT32_MAX;
464b8dc6b0eSVu Pham 	struct mlx5_mr *mr;
465b8dc6b0eSVu Pham 
466b8dc6b0eSVu Pham 	/*
467b8dc6b0eSVu Pham 	 * If the global cache has overflowed since it failed to expand the
468b8dc6b0eSVu Pham 	 * B-tree table, it can't have all the existing MRs. Then, the address
469b8dc6b0eSVu Pham 	 * has to be searched by traversing the original MR list instead, which
470b8dc6b0eSVu Pham 	 * is very slow path. Otherwise, the global cache is all inclusive.
471b8dc6b0eSVu Pham 	 */
472b8dc6b0eSVu Pham 	if (!unlikely(share_cache->cache.overflow)) {
473b8dc6b0eSVu Pham 		lkey = mr_btree_lookup(&share_cache->cache, &idx, addr);
474b8dc6b0eSVu Pham 		if (lkey != UINT32_MAX)
475b8dc6b0eSVu Pham 			*entry = (*share_cache->cache.table)[idx];
476b8dc6b0eSVu Pham 	} else {
477b8dc6b0eSVu Pham 		/* Falling back to the slowest path. */
478b8dc6b0eSVu Pham 		mr = mlx5_mr_lookup_list(share_cache, entry, addr);
479b8dc6b0eSVu Pham 		if (mr != NULL)
480b8dc6b0eSVu Pham 			lkey = entry->lkey;
481b8dc6b0eSVu Pham 	}
482b8dc6b0eSVu Pham 	MLX5_ASSERT(lkey == UINT32_MAX || (addr >= entry->start &&
483b8dc6b0eSVu Pham 					   addr < entry->end));
484b8dc6b0eSVu Pham 	return lkey;
485b8dc6b0eSVu Pham }
486b8dc6b0eSVu Pham 
487b8dc6b0eSVu Pham /**
488b8dc6b0eSVu Pham  * Free MR resources. MR lock must not be held to avoid a deadlock. rte_free()
489b8dc6b0eSVu Pham  * can raise memory free event and the callback function will spin on the lock.
490b8dc6b0eSVu Pham  *
491b8dc6b0eSVu Pham  * @param mr
492b8dc6b0eSVu Pham  *   Pointer to MR to free.
493b8dc6b0eSVu Pham  */
494992e6df3SJiawei Wang void
495992e6df3SJiawei Wang mlx5_mr_free(struct mlx5_mr *mr, mlx5_dereg_mr_t dereg_mr_cb)
496b8dc6b0eSVu Pham {
497b8dc6b0eSVu Pham 	if (mr == NULL)
498b8dc6b0eSVu Pham 		return;
499b8dc6b0eSVu Pham 	DRV_LOG(DEBUG, "freeing MR(%p):", (void *)mr);
500d5ed8aa9SOphir Munk 	dereg_mr_cb(&mr->pmd_mr);
501b8dc6b0eSVu Pham 	if (mr->ms_bmp != NULL)
502b8dc6b0eSVu Pham 		rte_bitmap_free(mr->ms_bmp);
503fd970a54SSuanming Mou 	mlx5_free(mr);
504b8dc6b0eSVu Pham }
505b8dc6b0eSVu Pham 
506b8dc6b0eSVu Pham void
507b8dc6b0eSVu Pham mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache)
508b8dc6b0eSVu Pham {
509b8dc6b0eSVu Pham 	struct mlx5_mr *mr;
510b8dc6b0eSVu Pham 
511b8dc6b0eSVu Pham 	DRV_LOG(DEBUG, "Rebuild dev cache[] %p", (void *)share_cache);
512b8dc6b0eSVu Pham 	/* Flush cache to rebuild. */
513b8dc6b0eSVu Pham 	share_cache->cache.len = 1;
514b8dc6b0eSVu Pham 	share_cache->cache.overflow = 0;
515b8dc6b0eSVu Pham 	/* Iterate all the existing MRs. */
516b8dc6b0eSVu Pham 	LIST_FOREACH(mr, &share_cache->mr_list, mr)
517b8dc6b0eSVu Pham 		if (mlx5_mr_insert_cache(share_cache, mr) < 0)
518b8dc6b0eSVu Pham 			return;
519b8dc6b0eSVu Pham }
520b8dc6b0eSVu Pham 
521b8dc6b0eSVu Pham /**
522b8dc6b0eSVu Pham  * Release resources of detached MR having no online entry.
523b8dc6b0eSVu Pham  *
524b8dc6b0eSVu Pham  * @param share_cache
525b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
526b8dc6b0eSVu Pham  */
527b8dc6b0eSVu Pham static void
528b8dc6b0eSVu Pham mlx5_mr_garbage_collect(struct mlx5_mr_share_cache *share_cache)
529b8dc6b0eSVu Pham {
530b8dc6b0eSVu Pham 	struct mlx5_mr *mr_next;
531b8dc6b0eSVu Pham 	struct mlx5_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
532b8dc6b0eSVu Pham 
533b8dc6b0eSVu Pham 	/* Must be called from the primary process. */
534b8dc6b0eSVu Pham 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
535b8dc6b0eSVu Pham 	/*
536b8dc6b0eSVu Pham 	 * MR can't be freed with holding the lock because rte_free() could call
537b8dc6b0eSVu Pham 	 * memory free callback function. This will be a deadlock situation.
538b8dc6b0eSVu Pham 	 */
539b8dc6b0eSVu Pham 	rte_rwlock_write_lock(&share_cache->rwlock);
540b8dc6b0eSVu Pham 	/* Detach the whole free list and release it after unlocking. */
541b8dc6b0eSVu Pham 	free_list = share_cache->mr_free_list;
542b8dc6b0eSVu Pham 	LIST_INIT(&share_cache->mr_free_list);
543b8dc6b0eSVu Pham 	rte_rwlock_write_unlock(&share_cache->rwlock);
544b8dc6b0eSVu Pham 	/* Release resources. */
545b8dc6b0eSVu Pham 	mr_next = LIST_FIRST(&free_list);
546b8dc6b0eSVu Pham 	while (mr_next != NULL) {
547b8dc6b0eSVu Pham 		struct mlx5_mr *mr = mr_next;
548b8dc6b0eSVu Pham 
549b8dc6b0eSVu Pham 		mr_next = LIST_NEXT(mr, mr);
550992e6df3SJiawei Wang 		mlx5_mr_free(mr, share_cache->dereg_mr_cb);
551b8dc6b0eSVu Pham 	}
552b8dc6b0eSVu Pham }
553b8dc6b0eSVu Pham 
554b8dc6b0eSVu Pham /* Called during rte_memseg_contig_walk() by mlx5_mr_create(). */
555b8dc6b0eSVu Pham static int
556b8dc6b0eSVu Pham mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl,
557b8dc6b0eSVu Pham 			  const struct rte_memseg *ms, size_t len, void *arg)
558b8dc6b0eSVu Pham {
559b8dc6b0eSVu Pham 	struct mr_find_contig_memsegs_data *data = arg;
560b8dc6b0eSVu Pham 
561b8dc6b0eSVu Pham 	if (data->addr < ms->addr_64 || data->addr >= ms->addr_64 + len)
562b8dc6b0eSVu Pham 		return 0;
563b8dc6b0eSVu Pham 	/* Found, save it and stop walking. */
564b8dc6b0eSVu Pham 	data->start = ms->addr_64;
565b8dc6b0eSVu Pham 	data->end = ms->addr_64 + len;
566b8dc6b0eSVu Pham 	data->msl = msl;
567b8dc6b0eSVu Pham 	return 1;
568b8dc6b0eSVu Pham }
569b8dc6b0eSVu Pham 
570b8dc6b0eSVu Pham /**
571b8dc6b0eSVu Pham  * Create a new global Memory Region (MR) for a missing virtual address.
572b8dc6b0eSVu Pham  * This API should be called on a secondary process, then a request is sent to
573b8dc6b0eSVu Pham  * the primary process in order to create a MR for the address. As the global MR
574b8dc6b0eSVu Pham  * list is on the shared memory, following LKey lookup should succeed unless the
575b8dc6b0eSVu Pham  * request fails.
576b8dc6b0eSVu Pham  *
577b8dc6b0eSVu Pham  * @param pd
578c4685016SOphir Munk  *   Pointer to pd of a device (net, regex, vdpa,...).
579fb690f71SMichael Baum  * @param mp_id
580fb690f71SMichael Baum  *   Multi-process identifier, may be NULL for the primary process.
581b8dc6b0eSVu Pham  * @param share_cache
582b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
583b8dc6b0eSVu Pham  * @param[out] entry
584b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry, found in the global cache or newly
585b8dc6b0eSVu Pham  *   created. If failed to create one, this will not be updated.
586b8dc6b0eSVu Pham  * @param addr
587b8dc6b0eSVu Pham  *   Target virtual address to register.
588b8dc6b0eSVu Pham  * @param mr_ext_memseg_en
589b8dc6b0eSVu Pham  *   Configurable flag about external memory segment enable or not.
590b8dc6b0eSVu Pham  *
591b8dc6b0eSVu Pham  * @return
592b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
593b8dc6b0eSVu Pham  */
594b8dc6b0eSVu Pham static uint32_t
595c4685016SOphir Munk mlx5_mr_create_secondary(void *pd __rte_unused,
596b8dc6b0eSVu Pham 			 struct mlx5_mp_id *mp_id,
597b8dc6b0eSVu Pham 			 struct mlx5_mr_share_cache *share_cache,
598b8dc6b0eSVu Pham 			 struct mr_cache_entry *entry, uintptr_t addr,
599b8dc6b0eSVu Pham 			 unsigned int mr_ext_memseg_en __rte_unused)
600b8dc6b0eSVu Pham {
601b8dc6b0eSVu Pham 	int ret;
602b8dc6b0eSVu Pham 
60387acdcc7SThomas Monjalon 	DRV_LOG(DEBUG, "port %u requesting MR creation for address (%p)",
604b8dc6b0eSVu Pham 	      mp_id->port_id, (void *)addr);
605b8dc6b0eSVu Pham 	ret = mlx5_mp_req_mr_create(mp_id, addr);
606b8dc6b0eSVu Pham 	if (ret) {
60787acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "Fail to request MR creation for address (%p)",
608b8dc6b0eSVu Pham 		      (void *)addr);
609b8dc6b0eSVu Pham 		return UINT32_MAX;
610b8dc6b0eSVu Pham 	}
611b8dc6b0eSVu Pham 	rte_rwlock_read_lock(&share_cache->rwlock);
612b8dc6b0eSVu Pham 	/* Fill in output data. */
613b8dc6b0eSVu Pham 	mlx5_mr_lookup_cache(share_cache, entry, addr);
614b8dc6b0eSVu Pham 	/* Lookup can't fail. */
615b8dc6b0eSVu Pham 	MLX5_ASSERT(entry->lkey != UINT32_MAX);
616b8dc6b0eSVu Pham 	rte_rwlock_read_unlock(&share_cache->rwlock);
61787acdcc7SThomas Monjalon 	DRV_LOG(DEBUG, "MR CREATED by primary process for %p:\n"
618b8dc6b0eSVu Pham 	      "  [0x%" PRIxPTR ", 0x%" PRIxPTR "), lkey=0x%x",
619b8dc6b0eSVu Pham 	      (void *)addr, entry->start, entry->end, entry->lkey);
620b8dc6b0eSVu Pham 	return entry->lkey;
621b8dc6b0eSVu Pham }
622b8dc6b0eSVu Pham 
623b8dc6b0eSVu Pham /**
624b8dc6b0eSVu Pham  * Create a new global Memory Region (MR) for a missing virtual address.
625b8dc6b0eSVu Pham  * Register entire virtually contiguous memory chunk around the address.
626b8dc6b0eSVu Pham  *
627b8dc6b0eSVu Pham  * @param pd
628c4685016SOphir Munk  *   Pointer to pd of a device (net, regex, vdpa,...).
629b8dc6b0eSVu Pham  * @param share_cache
630b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
631b8dc6b0eSVu Pham  * @param[out] entry
632b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry, found in the global cache or newly
633b8dc6b0eSVu Pham  *   created. If failed to create one, this will not be updated.
634b8dc6b0eSVu Pham  * @param addr
635b8dc6b0eSVu Pham  *   Target virtual address to register.
636b8dc6b0eSVu Pham  * @param mr_ext_memseg_en
637b8dc6b0eSVu Pham  *   Configurable flag about external memory segment enable or not.
638b8dc6b0eSVu Pham  *
639b8dc6b0eSVu Pham  * @return
640b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
641b8dc6b0eSVu Pham  */
642b8dc6b0eSVu Pham uint32_t
643c4685016SOphir Munk mlx5_mr_create_primary(void *pd,
644b8dc6b0eSVu Pham 		       struct mlx5_mr_share_cache *share_cache,
645b8dc6b0eSVu Pham 		       struct mr_cache_entry *entry, uintptr_t addr,
646b8dc6b0eSVu Pham 		       unsigned int mr_ext_memseg_en)
647b8dc6b0eSVu Pham {
648b8dc6b0eSVu Pham 	struct mr_find_contig_memsegs_data data = {.addr = addr, };
649b8dc6b0eSVu Pham 	struct mr_find_contig_memsegs_data data_re;
650b8dc6b0eSVu Pham 	const struct rte_memseg_list *msl;
651b8dc6b0eSVu Pham 	const struct rte_memseg *ms;
652b8dc6b0eSVu Pham 	struct mlx5_mr *mr = NULL;
653b8dc6b0eSVu Pham 	int ms_idx_shift = -1;
654b8dc6b0eSVu Pham 	uint32_t bmp_size;
655b8dc6b0eSVu Pham 	void *bmp_mem;
656b8dc6b0eSVu Pham 	uint32_t ms_n;
657b8dc6b0eSVu Pham 	uint32_t n;
658b8dc6b0eSVu Pham 	size_t len;
659b8dc6b0eSVu Pham 
660b8dc6b0eSVu Pham 	DRV_LOG(DEBUG, "Creating a MR using address (%p)", (void *)addr);
661b8dc6b0eSVu Pham 	/*
662b8dc6b0eSVu Pham 	 * Release detached MRs if any. This can't be called with holding either
663b8dc6b0eSVu Pham 	 * memory_hotplug_lock or share_cache->rwlock. MRs on the free list have
664b8dc6b0eSVu Pham 	 * been detached by the memory free event but it couldn't be released
665b8dc6b0eSVu Pham 	 * inside the callback due to deadlock. As a result, releasing resources
666b8dc6b0eSVu Pham 	 * is quite opportunistic.
667b8dc6b0eSVu Pham 	 */
668b8dc6b0eSVu Pham 	mlx5_mr_garbage_collect(share_cache);
669b8dc6b0eSVu Pham 	/*
670b8dc6b0eSVu Pham 	 * If enabled, find out a contiguous virtual address chunk in use, to
671b8dc6b0eSVu Pham 	 * which the given address belongs, in order to register maximum range.
672b8dc6b0eSVu Pham 	 * In the best case where mempools are not dynamically recreated and
673b8dc6b0eSVu Pham 	 * '--socket-mem' is specified as an EAL option, it is very likely to
674b8dc6b0eSVu Pham 	 * have only one MR(LKey) per a socket and per a hugepage-size even
675b8dc6b0eSVu Pham 	 * though the system memory is highly fragmented. As the whole memory
676b8dc6b0eSVu Pham 	 * chunk will be pinned by kernel, it can't be reused unless entire
677b8dc6b0eSVu Pham 	 * chunk is freed from EAL.
678b8dc6b0eSVu Pham 	 *
679b8dc6b0eSVu Pham 	 * If disabled, just register one memseg (page). Then, memory
680b8dc6b0eSVu Pham 	 * consumption will be minimized but it may drop performance if there
681b8dc6b0eSVu Pham 	 * are many MRs to lookup on the datapath.
682b8dc6b0eSVu Pham 	 */
683b8dc6b0eSVu Pham 	if (!mr_ext_memseg_en) {
684b8dc6b0eSVu Pham 		data.msl = rte_mem_virt2memseg_list((void *)addr);
685b8dc6b0eSVu Pham 		data.start = RTE_ALIGN_FLOOR(addr, data.msl->page_sz);
686b8dc6b0eSVu Pham 		data.end = data.start + data.msl->page_sz;
687b8dc6b0eSVu Pham 	} else if (!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data)) {
688b8dc6b0eSVu Pham 		DRV_LOG(WARNING,
689b8dc6b0eSVu Pham 			"Unable to find virtually contiguous"
690b8dc6b0eSVu Pham 			" chunk for address (%p)."
691b8dc6b0eSVu Pham 			" rte_memseg_contig_walk() failed.", (void *)addr);
692b8dc6b0eSVu Pham 		rte_errno = ENXIO;
693b8dc6b0eSVu Pham 		goto err_nolock;
694b8dc6b0eSVu Pham 	}
695b8dc6b0eSVu Pham alloc_resources:
696b8dc6b0eSVu Pham 	/* Addresses must be page-aligned. */
697b8dc6b0eSVu Pham 	MLX5_ASSERT(data.msl);
698b8dc6b0eSVu Pham 	MLX5_ASSERT(rte_is_aligned((void *)data.start, data.msl->page_sz));
699b8dc6b0eSVu Pham 	MLX5_ASSERT(rte_is_aligned((void *)data.end, data.msl->page_sz));
700b8dc6b0eSVu Pham 	msl = data.msl;
701b8dc6b0eSVu Pham 	ms = rte_mem_virt2memseg((void *)data.start, msl);
702b8dc6b0eSVu Pham 	len = data.end - data.start;
703b8dc6b0eSVu Pham 	MLX5_ASSERT(ms);
704b8dc6b0eSVu Pham 	MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
705b8dc6b0eSVu Pham 	/* Number of memsegs in the range. */
706b8dc6b0eSVu Pham 	ms_n = len / msl->page_sz;
70787acdcc7SThomas Monjalon 	DRV_LOG(DEBUG, "Extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
708b8dc6b0eSVu Pham 	      " page_sz=0x%" PRIx64 ", ms_n=%u",
709b8dc6b0eSVu Pham 	      (void *)addr, data.start, data.end, msl->page_sz, ms_n);
710b8dc6b0eSVu Pham 	/* Size of memory for bitmap. */
711b8dc6b0eSVu Pham 	bmp_size = rte_bitmap_get_memory_footprint(ms_n);
712fd970a54SSuanming Mou 	mr = mlx5_malloc(MLX5_MEM_RTE |  MLX5_MEM_ZERO,
713fd970a54SSuanming Mou 			 RTE_ALIGN_CEIL(sizeof(*mr), RTE_CACHE_LINE_SIZE) +
714fd970a54SSuanming Mou 			 bmp_size, RTE_CACHE_LINE_SIZE, msl->socket_id);
715b8dc6b0eSVu Pham 	if (mr == NULL) {
71687acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "Unable to allocate memory for a new MR of"
717b8dc6b0eSVu Pham 		      " address (%p).", (void *)addr);
718b8dc6b0eSVu Pham 		rte_errno = ENOMEM;
719b8dc6b0eSVu Pham 		goto err_nolock;
720b8dc6b0eSVu Pham 	}
721b8dc6b0eSVu Pham 	mr->msl = msl;
722b8dc6b0eSVu Pham 	/*
723b8dc6b0eSVu Pham 	 * Save the index of the first memseg and initialize memseg bitmap. To
724b8dc6b0eSVu Pham 	 * see if a memseg of ms_idx in the memseg-list is still valid, check:
725b8dc6b0eSVu Pham 	 *	rte_bitmap_get(mr->bmp, ms_idx - mr->ms_base_idx)
726b8dc6b0eSVu Pham 	 */
727b8dc6b0eSVu Pham 	mr->ms_base_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
728b8dc6b0eSVu Pham 	bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE);
729b8dc6b0eSVu Pham 	mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size);
730b8dc6b0eSVu Pham 	if (mr->ms_bmp == NULL) {
73187acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "Unable to initialize bitmap for a new MR of"
732b8dc6b0eSVu Pham 		      " address (%p).", (void *)addr);
733b8dc6b0eSVu Pham 		rte_errno = EINVAL;
734b8dc6b0eSVu Pham 		goto err_nolock;
735b8dc6b0eSVu Pham 	}
736b8dc6b0eSVu Pham 	/*
737b8dc6b0eSVu Pham 	 * Should recheck whether the extended contiguous chunk is still valid.
738b8dc6b0eSVu Pham 	 * Because memory_hotplug_lock can't be held if there's any memory
739b8dc6b0eSVu Pham 	 * related calls in a critical path, resource allocation above can't be
740b8dc6b0eSVu Pham 	 * locked. If the memory has been changed at this point, try again with
741b8dc6b0eSVu Pham 	 * just single page. If not, go on with the big chunk atomically from
742b8dc6b0eSVu Pham 	 * here.
743b8dc6b0eSVu Pham 	 */
744b8dc6b0eSVu Pham 	rte_mcfg_mem_read_lock();
745b8dc6b0eSVu Pham 	data_re = data;
746b8dc6b0eSVu Pham 	if (len > msl->page_sz &&
747b8dc6b0eSVu Pham 	    !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
74887acdcc7SThomas Monjalon 		DRV_LOG(DEBUG,
74987acdcc7SThomas Monjalon 			"Unable to find virtually contiguous chunk for address "
75087acdcc7SThomas Monjalon 			"(%p). rte_memseg_contig_walk() failed.", (void *)addr);
751b8dc6b0eSVu Pham 		rte_errno = ENXIO;
752b8dc6b0eSVu Pham 		goto err_memlock;
753b8dc6b0eSVu Pham 	}
754b8dc6b0eSVu Pham 	if (data.start != data_re.start || data.end != data_re.end) {
755b8dc6b0eSVu Pham 		/*
756b8dc6b0eSVu Pham 		 * The extended contiguous chunk has been changed. Try again
757b8dc6b0eSVu Pham 		 * with single memseg instead.
758b8dc6b0eSVu Pham 		 */
759b8dc6b0eSVu Pham 		data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
760b8dc6b0eSVu Pham 		data.end = data.start + msl->page_sz;
761b8dc6b0eSVu Pham 		rte_mcfg_mem_read_unlock();
762992e6df3SJiawei Wang 		mlx5_mr_free(mr, share_cache->dereg_mr_cb);
763b8dc6b0eSVu Pham 		goto alloc_resources;
764b8dc6b0eSVu Pham 	}
765b8dc6b0eSVu Pham 	MLX5_ASSERT(data.msl == data_re.msl);
766b8dc6b0eSVu Pham 	rte_rwlock_write_lock(&share_cache->rwlock);
767b8dc6b0eSVu Pham 	/*
768b8dc6b0eSVu Pham 	 * Check the address is really missing. If other thread already created
769b8dc6b0eSVu Pham 	 * one or it is not found due to overflow, abort and return.
770b8dc6b0eSVu Pham 	 */
771b8dc6b0eSVu Pham 	if (mlx5_mr_lookup_cache(share_cache, entry, addr) != UINT32_MAX) {
772b8dc6b0eSVu Pham 		/*
773b8dc6b0eSVu Pham 		 * Insert to the global cache table. It may fail due to
774b8dc6b0eSVu Pham 		 * low-on-memory. Then, this entry will have to be searched
775b8dc6b0eSVu Pham 		 * here again.
776b8dc6b0eSVu Pham 		 */
777b8dc6b0eSVu Pham 		mr_btree_insert(&share_cache->cache, entry);
77887acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "Found MR for %p on final lookup, abort",
77987acdcc7SThomas Monjalon 			(void *)addr);
780b8dc6b0eSVu Pham 		rte_rwlock_write_unlock(&share_cache->rwlock);
781b8dc6b0eSVu Pham 		rte_mcfg_mem_read_unlock();
782b8dc6b0eSVu Pham 		/*
783b8dc6b0eSVu Pham 		 * Must be unlocked before calling rte_free() because
784b8dc6b0eSVu Pham 		 * mlx5_mr_mem_event_free_cb() can be called inside.
785b8dc6b0eSVu Pham 		 */
786992e6df3SJiawei Wang 		mlx5_mr_free(mr, share_cache->dereg_mr_cb);
787b8dc6b0eSVu Pham 		return entry->lkey;
788b8dc6b0eSVu Pham 	}
789b8dc6b0eSVu Pham 	/*
790b8dc6b0eSVu Pham 	 * Trim start and end addresses for verbs MR. Set bits for registering
791b8dc6b0eSVu Pham 	 * memsegs but exclude already registered ones. Bitmap can be
792b8dc6b0eSVu Pham 	 * fragmented.
793b8dc6b0eSVu Pham 	 */
794b8dc6b0eSVu Pham 	for (n = 0; n < ms_n; ++n) {
795b8dc6b0eSVu Pham 		uintptr_t start;
796b8dc6b0eSVu Pham 		struct mr_cache_entry ret;
797b8dc6b0eSVu Pham 
798b8dc6b0eSVu Pham 		memset(&ret, 0, sizeof(ret));
799b8dc6b0eSVu Pham 		start = data_re.start + n * msl->page_sz;
800b8dc6b0eSVu Pham 		/* Exclude memsegs already registered by other MRs. */
801b8dc6b0eSVu Pham 		if (mlx5_mr_lookup_cache(share_cache, &ret, start) ==
802b8dc6b0eSVu Pham 		    UINT32_MAX) {
803b8dc6b0eSVu Pham 			/*
804b8dc6b0eSVu Pham 			 * Start from the first unregistered memseg in the
805b8dc6b0eSVu Pham 			 * extended range.
806b8dc6b0eSVu Pham 			 */
807b8dc6b0eSVu Pham 			if (ms_idx_shift == -1) {
808b8dc6b0eSVu Pham 				mr->ms_base_idx += n;
809b8dc6b0eSVu Pham 				data.start = start;
810b8dc6b0eSVu Pham 				ms_idx_shift = n;
811b8dc6b0eSVu Pham 			}
812b8dc6b0eSVu Pham 			data.end = start + msl->page_sz;
813b8dc6b0eSVu Pham 			rte_bitmap_set(mr->ms_bmp, n - ms_idx_shift);
814b8dc6b0eSVu Pham 			++mr->ms_n;
815b8dc6b0eSVu Pham 		}
816b8dc6b0eSVu Pham 	}
817b8dc6b0eSVu Pham 	len = data.end - data.start;
818b8dc6b0eSVu Pham 	mr->ms_bmp_n = len / msl->page_sz;
819b8dc6b0eSVu Pham 	MLX5_ASSERT(ms_idx_shift + mr->ms_bmp_n <= ms_n);
820b8dc6b0eSVu Pham 	/*
821d5ed8aa9SOphir Munk 	 * Finally create an MR for the memory chunk. Verbs: ibv_reg_mr() can
822d5ed8aa9SOphir Munk 	 * be called with holding the memory lock because it doesn't use
823b8dc6b0eSVu Pham 	 * mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket()
824b8dc6b0eSVu Pham 	 * through mlx5_alloc_verbs_buf().
825b8dc6b0eSVu Pham 	 */
826d5ed8aa9SOphir Munk 	share_cache->reg_mr_cb(pd, (void *)data.start, len, &mr->pmd_mr);
82758a17853SOphir Munk 	if (mr->pmd_mr.obj == NULL) {
82887acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "Fail to create an MR for address (%p)",
829b8dc6b0eSVu Pham 		      (void *)addr);
830b8dc6b0eSVu Pham 		rte_errno = EINVAL;
831b8dc6b0eSVu Pham 		goto err_mrlock;
832b8dc6b0eSVu Pham 	}
83356d20677SOphir Munk 	MLX5_ASSERT((uintptr_t)mr->pmd_mr.addr == data.start);
83456d20677SOphir Munk 	MLX5_ASSERT(mr->pmd_mr.len);
835b8dc6b0eSVu Pham 	LIST_INSERT_HEAD(&share_cache->mr_list, mr, mr);
83687acdcc7SThomas Monjalon 	DRV_LOG(DEBUG, "MR CREATED (%p) for %p:\n"
837b8dc6b0eSVu Pham 	      "  [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
838b8dc6b0eSVu Pham 	      " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
839b8dc6b0eSVu Pham 	      (void *)mr, (void *)addr, data.start, data.end,
84056d20677SOphir Munk 	      rte_cpu_to_be_32(mr->pmd_mr.lkey),
841b8dc6b0eSVu Pham 	      mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
842b8dc6b0eSVu Pham 	/* Insert to the global cache table. */
843b8dc6b0eSVu Pham 	mlx5_mr_insert_cache(share_cache, mr);
844b8dc6b0eSVu Pham 	/* Fill in output data. */
845b8dc6b0eSVu Pham 	mlx5_mr_lookup_cache(share_cache, entry, addr);
846b8dc6b0eSVu Pham 	/* Lookup can't fail. */
847b8dc6b0eSVu Pham 	MLX5_ASSERT(entry->lkey != UINT32_MAX);
848b8dc6b0eSVu Pham 	rte_rwlock_write_unlock(&share_cache->rwlock);
849b8dc6b0eSVu Pham 	rte_mcfg_mem_read_unlock();
850b8dc6b0eSVu Pham 	return entry->lkey;
851b8dc6b0eSVu Pham err_mrlock:
852b8dc6b0eSVu Pham 	rte_rwlock_write_unlock(&share_cache->rwlock);
853b8dc6b0eSVu Pham err_memlock:
854b8dc6b0eSVu Pham 	rte_mcfg_mem_read_unlock();
855b8dc6b0eSVu Pham err_nolock:
856b8dc6b0eSVu Pham 	/*
857b8dc6b0eSVu Pham 	 * In case of error, as this can be called in a datapath, a warning
858b8dc6b0eSVu Pham 	 * message per an error is preferable instead. Must be unlocked before
859b8dc6b0eSVu Pham 	 * calling rte_free() because mlx5_mr_mem_event_free_cb() can be called
860b8dc6b0eSVu Pham 	 * inside.
861b8dc6b0eSVu Pham 	 */
862992e6df3SJiawei Wang 	mlx5_mr_free(mr, share_cache->dereg_mr_cb);
863b8dc6b0eSVu Pham 	return UINT32_MAX;
864b8dc6b0eSVu Pham }
865b8dc6b0eSVu Pham 
866b8dc6b0eSVu Pham /**
867b8dc6b0eSVu Pham  * Create a new global Memory Region (MR) for a missing virtual address.
868b8dc6b0eSVu Pham  * This can be called from primary and secondary process.
869b8dc6b0eSVu Pham  *
870b8dc6b0eSVu Pham  * @param pd
871c4685016SOphir Munk  *   Pointer to pd handle of a device (net, regex, vdpa,...).
872fb690f71SMichael Baum  * @param mp_id
873fb690f71SMichael Baum  *   Multi-process identifier, may be NULL for the primary process.
874b8dc6b0eSVu Pham  * @param share_cache
875b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
876b8dc6b0eSVu Pham  * @param[out] entry
877b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry, found in the global cache or newly
878b8dc6b0eSVu Pham  *   created. If failed to create one, this will not be updated.
879b8dc6b0eSVu Pham  * @param addr
880b8dc6b0eSVu Pham  *   Target virtual address to register.
881fb690f71SMichael Baum  * @param mr_ext_memseg_en
882fb690f71SMichael Baum  *   Configurable flag about external memory segment enable or not.
883b8dc6b0eSVu Pham  *
884b8dc6b0eSVu Pham  * @return
885b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
886b8dc6b0eSVu Pham  */
887b8dc6b0eSVu Pham static uint32_t
888c4685016SOphir Munk mlx5_mr_create(void *pd, struct mlx5_mp_id *mp_id,
889b8dc6b0eSVu Pham 	       struct mlx5_mr_share_cache *share_cache,
890b8dc6b0eSVu Pham 	       struct mr_cache_entry *entry, uintptr_t addr,
891b8dc6b0eSVu Pham 	       unsigned int mr_ext_memseg_en)
892b8dc6b0eSVu Pham {
893b8dc6b0eSVu Pham 	uint32_t ret = 0;
894b8dc6b0eSVu Pham 
895b8dc6b0eSVu Pham 	switch (rte_eal_process_type()) {
896b8dc6b0eSVu Pham 	case RTE_PROC_PRIMARY:
897b8dc6b0eSVu Pham 		ret = mlx5_mr_create_primary(pd, share_cache, entry,
898b8dc6b0eSVu Pham 					     addr, mr_ext_memseg_en);
899b8dc6b0eSVu Pham 		break;
900b8dc6b0eSVu Pham 	case RTE_PROC_SECONDARY:
901b8dc6b0eSVu Pham 		ret = mlx5_mr_create_secondary(pd, mp_id, share_cache, entry,
902b8dc6b0eSVu Pham 					       addr, mr_ext_memseg_en);
903b8dc6b0eSVu Pham 		break;
904b8dc6b0eSVu Pham 	default:
905b8dc6b0eSVu Pham 		break;
906b8dc6b0eSVu Pham 	}
907b8dc6b0eSVu Pham 	return ret;
908b8dc6b0eSVu Pham }
909b8dc6b0eSVu Pham 
910b8dc6b0eSVu Pham /**
911b8dc6b0eSVu Pham  * Look up address in the global MR cache table. If not found, create a new MR.
912b8dc6b0eSVu Pham  * Insert the found/created entry to local bottom-half cache table.
913b8dc6b0eSVu Pham  *
914b8dc6b0eSVu Pham  * @param pd
915c4685016SOphir Munk  *   Pointer to pd of a device (net, regex, vdpa,...).
916fb690f71SMichael Baum  * @param mp_id
917fb690f71SMichael Baum  *   Multi-process identifier, may be NULL for the primary process.
918b8dc6b0eSVu Pham  * @param share_cache
919b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
920b8dc6b0eSVu Pham  * @param mr_ctrl
921b8dc6b0eSVu Pham  *   Pointer to per-queue MR control structure.
922b8dc6b0eSVu Pham  * @param[out] entry
923b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry, found in the global cache or newly
924b8dc6b0eSVu Pham  *   created. If failed to create one, this is not written.
925b8dc6b0eSVu Pham  * @param addr
926b8dc6b0eSVu Pham  *   Search key.
927fb690f71SMichael Baum  * @param mr_ext_memseg_en
928fb690f71SMichael Baum  *   Configurable flag about external memory segment enable or not.
929b8dc6b0eSVu Pham  *
930b8dc6b0eSVu Pham  * @return
931b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on no match.
932b8dc6b0eSVu Pham  */
933b8dc6b0eSVu Pham static uint32_t
934c4685016SOphir Munk mr_lookup_caches(void *pd, struct mlx5_mp_id *mp_id,
935b8dc6b0eSVu Pham 		 struct mlx5_mr_share_cache *share_cache,
936b8dc6b0eSVu Pham 		 struct mlx5_mr_ctrl *mr_ctrl,
937b8dc6b0eSVu Pham 		 struct mr_cache_entry *entry, uintptr_t addr,
938b8dc6b0eSVu Pham 		 unsigned int mr_ext_memseg_en)
939b8dc6b0eSVu Pham {
940b8dc6b0eSVu Pham 	struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
941b8dc6b0eSVu Pham 	uint32_t lkey;
942b8dc6b0eSVu Pham 	uint16_t idx;
943b8dc6b0eSVu Pham 
944b8dc6b0eSVu Pham 	/* If local cache table is full, try to double it. */
945b8dc6b0eSVu Pham 	if (unlikely(bt->len == bt->size))
946b8dc6b0eSVu Pham 		mr_btree_expand(bt, bt->size << 1);
947b8dc6b0eSVu Pham 	/* Look up in the global cache. */
948b8dc6b0eSVu Pham 	rte_rwlock_read_lock(&share_cache->rwlock);
949b8dc6b0eSVu Pham 	lkey = mr_btree_lookup(&share_cache->cache, &idx, addr);
950b8dc6b0eSVu Pham 	if (lkey != UINT32_MAX) {
951b8dc6b0eSVu Pham 		/* Found. */
952b8dc6b0eSVu Pham 		*entry = (*share_cache->cache.table)[idx];
953b8dc6b0eSVu Pham 		rte_rwlock_read_unlock(&share_cache->rwlock);
954b8dc6b0eSVu Pham 		/*
955b8dc6b0eSVu Pham 		 * Update local cache. Even if it fails, return the found entry
956b8dc6b0eSVu Pham 		 * to update top-half cache. Next time, this entry will be found
957b8dc6b0eSVu Pham 		 * in the global cache.
958b8dc6b0eSVu Pham 		 */
959b8dc6b0eSVu Pham 		mr_btree_insert(bt, entry);
960b8dc6b0eSVu Pham 		return lkey;
961b8dc6b0eSVu Pham 	}
962b8dc6b0eSVu Pham 	rte_rwlock_read_unlock(&share_cache->rwlock);
963b8dc6b0eSVu Pham 	/* First time to see the address? Create a new MR. */
964b8dc6b0eSVu Pham 	lkey = mlx5_mr_create(pd, mp_id, share_cache, entry, addr,
965b8dc6b0eSVu Pham 			      mr_ext_memseg_en);
966b8dc6b0eSVu Pham 	/*
967b8dc6b0eSVu Pham 	 * Update the local cache if successfully created a new global MR. Even
968b8dc6b0eSVu Pham 	 * if failed to create one, there's no action to take in this datapath
969b8dc6b0eSVu Pham 	 * code. As returning LKey is invalid, this will eventually make HW
970b8dc6b0eSVu Pham 	 * fail.
971b8dc6b0eSVu Pham 	 */
972b8dc6b0eSVu Pham 	if (lkey != UINT32_MAX)
973b8dc6b0eSVu Pham 		mr_btree_insert(bt, entry);
974b8dc6b0eSVu Pham 	return lkey;
975b8dc6b0eSVu Pham }
976b8dc6b0eSVu Pham 
977b8dc6b0eSVu Pham /**
978b8dc6b0eSVu Pham  * Bottom-half of LKey search on datapath. First search in cache_bh[] and if
979b8dc6b0eSVu Pham  * misses, search in the global MR cache table and update the new entry to
980b8dc6b0eSVu Pham  * per-queue local caches.
981b8dc6b0eSVu Pham  *
982b8dc6b0eSVu Pham  * @param pd
983c4685016SOphir Munk  *   Pointer to pd of a device (net, regex, vdpa,...).
984fb690f71SMichael Baum  * @param mp_id
985fb690f71SMichael Baum  *   Multi-process identifier, may be NULL for the primary process.
986b8dc6b0eSVu Pham  * @param share_cache
987b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
988b8dc6b0eSVu Pham  * @param mr_ctrl
989b8dc6b0eSVu Pham  *   Pointer to per-queue MR control structure.
990b8dc6b0eSVu Pham  * @param addr
991b8dc6b0eSVu Pham  *   Search key.
992fb690f71SMichael Baum  * @param mr_ext_memseg_en
993fb690f71SMichael Baum  *   Configurable flag about external memory segment enable or not.
994b8dc6b0eSVu Pham  *
995b8dc6b0eSVu Pham  * @return
996b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on no match.
997b8dc6b0eSVu Pham  */
998c4685016SOphir Munk uint32_t mlx5_mr_addr2mr_bh(void *pd, struct mlx5_mp_id *mp_id,
999b8dc6b0eSVu Pham 			    struct mlx5_mr_share_cache *share_cache,
1000b8dc6b0eSVu Pham 			    struct mlx5_mr_ctrl *mr_ctrl,
1001b8dc6b0eSVu Pham 			    uintptr_t addr, unsigned int mr_ext_memseg_en)
1002b8dc6b0eSVu Pham {
1003b8dc6b0eSVu Pham 	uint32_t lkey;
1004b8dc6b0eSVu Pham 	uint16_t bh_idx = 0;
1005b8dc6b0eSVu Pham 	/* Victim in top-half cache to replace with new entry. */
1006b8dc6b0eSVu Pham 	struct mr_cache_entry *repl = &mr_ctrl->cache[mr_ctrl->head];
1007b8dc6b0eSVu Pham 
1008b8dc6b0eSVu Pham 	/* Binary-search MR translation table. */
1009b8dc6b0eSVu Pham 	lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);
1010b8dc6b0eSVu Pham 	/* Update top-half cache. */
1011b8dc6b0eSVu Pham 	if (likely(lkey != UINT32_MAX)) {
1012b8dc6b0eSVu Pham 		*repl = (*mr_ctrl->cache_bh.table)[bh_idx];
1013b8dc6b0eSVu Pham 	} else {
1014b8dc6b0eSVu Pham 		/*
1015b8dc6b0eSVu Pham 		 * If missed in local lookup table, search in the global cache
1016b8dc6b0eSVu Pham 		 * and local cache_bh[] will be updated inside if possible.
1017b8dc6b0eSVu Pham 		 * Top-half cache entry will also be updated.
1018b8dc6b0eSVu Pham 		 */
1019b8dc6b0eSVu Pham 		lkey = mr_lookup_caches(pd, mp_id, share_cache, mr_ctrl,
1020b8dc6b0eSVu Pham 					repl, addr, mr_ext_memseg_en);
1021b8dc6b0eSVu Pham 		if (unlikely(lkey == UINT32_MAX))
1022b8dc6b0eSVu Pham 			return UINT32_MAX;
1023b8dc6b0eSVu Pham 	}
1024b8dc6b0eSVu Pham 	/* Update the most recently used entry. */
1025b8dc6b0eSVu Pham 	mr_ctrl->mru = mr_ctrl->head;
1026b8dc6b0eSVu Pham 	/* Point to the next victim, the oldest. */
1027b8dc6b0eSVu Pham 	mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N;
1028b8dc6b0eSVu Pham 	return lkey;
1029b8dc6b0eSVu Pham }
1030b8dc6b0eSVu Pham 
1031b8dc6b0eSVu Pham /**
1032b8dc6b0eSVu Pham  * Release all the created MRs and resources on global MR cache of a device.
1033b8dc6b0eSVu Pham  * list.
1034b8dc6b0eSVu Pham  *
1035b8dc6b0eSVu Pham  * @param share_cache
1036b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
1037b8dc6b0eSVu Pham  */
1038b8dc6b0eSVu Pham void
1039b8dc6b0eSVu Pham mlx5_mr_release_cache(struct mlx5_mr_share_cache *share_cache)
1040b8dc6b0eSVu Pham {
1041b8dc6b0eSVu Pham 	struct mlx5_mr *mr_next;
1042b8dc6b0eSVu Pham 
1043b8dc6b0eSVu Pham 	rte_rwlock_write_lock(&share_cache->rwlock);
1044b8dc6b0eSVu Pham 	/* Detach from MR list and move to free list. */
1045b8dc6b0eSVu Pham 	mr_next = LIST_FIRST(&share_cache->mr_list);
1046b8dc6b0eSVu Pham 	while (mr_next != NULL) {
1047b8dc6b0eSVu Pham 		struct mlx5_mr *mr = mr_next;
1048b8dc6b0eSVu Pham 
1049b8dc6b0eSVu Pham 		mr_next = LIST_NEXT(mr, mr);
1050b8dc6b0eSVu Pham 		LIST_REMOVE(mr, mr);
1051b8dc6b0eSVu Pham 		LIST_INSERT_HEAD(&share_cache->mr_free_list, mr, mr);
1052b8dc6b0eSVu Pham 	}
1053b8dc6b0eSVu Pham 	LIST_INIT(&share_cache->mr_list);
1054b8dc6b0eSVu Pham 	/* Free global cache. */
1055b8dc6b0eSVu Pham 	mlx5_mr_btree_free(&share_cache->cache);
1056b8dc6b0eSVu Pham 	rte_rwlock_write_unlock(&share_cache->rwlock);
1057b8dc6b0eSVu Pham 	/* Free all remaining MRs. */
1058b8dc6b0eSVu Pham 	mlx5_mr_garbage_collect(share_cache);
1059b8dc6b0eSVu Pham }
1060b8dc6b0eSVu Pham 
1061b8dc6b0eSVu Pham /**
10625fbc75acSMichael Baum  * Initialize global MR cache of a device.
10635fbc75acSMichael Baum  *
10645fbc75acSMichael Baum  * @param share_cache
10655fbc75acSMichael Baum  *   Pointer to a global shared MR cache.
10665fbc75acSMichael Baum  * @param socket
10675fbc75acSMichael Baum  *   NUMA socket on which memory must be allocated.
10685fbc75acSMichael Baum  *
10695fbc75acSMichael Baum  * @return
10705fbc75acSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
10715fbc75acSMichael Baum  */
10725fbc75acSMichael Baum int
10735fbc75acSMichael Baum mlx5_mr_create_cache(struct mlx5_mr_share_cache *share_cache, int socket)
10745fbc75acSMichael Baum {
10755fbc75acSMichael Baum 	/* Set the reg_mr and dereg_mr callback functions */
10765fbc75acSMichael Baum 	mlx5_os_set_reg_mr_cb(&share_cache->reg_mr_cb,
10775fbc75acSMichael Baum 			      &share_cache->dereg_mr_cb);
10785fbc75acSMichael Baum 	rte_rwlock_init(&share_cache->rwlock);
10795fbc75acSMichael Baum 	/* Initialize B-tree and allocate memory for global MR cache table. */
10805fbc75acSMichael Baum 	return mlx5_mr_btree_init(&share_cache->cache,
10815fbc75acSMichael Baum 				  MLX5_MR_BTREE_CACHE_N * 2, socket);
10825fbc75acSMichael Baum }
10835fbc75acSMichael Baum 
10845fbc75acSMichael Baum /**
1085b8dc6b0eSVu Pham  * Flush all of the local cache entries.
1086b8dc6b0eSVu Pham  *
1087b8dc6b0eSVu Pham  * @param mr_ctrl
1088b8dc6b0eSVu Pham  *   Pointer to per-queue MR local cache.
1089b8dc6b0eSVu Pham  */
1090b8dc6b0eSVu Pham void
1091b8dc6b0eSVu Pham mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl)
1092b8dc6b0eSVu Pham {
1093b8dc6b0eSVu Pham 	/* Reset the most-recently-used index. */
1094b8dc6b0eSVu Pham 	mr_ctrl->mru = 0;
1095b8dc6b0eSVu Pham 	/* Reset the linear search array. */
1096b8dc6b0eSVu Pham 	mr_ctrl->head = 0;
1097b8dc6b0eSVu Pham 	memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache));
1098b8dc6b0eSVu Pham 	/* Reset the B-tree table. */
1099b8dc6b0eSVu Pham 	mr_ctrl->cache_bh.len = 1;
1100b8dc6b0eSVu Pham 	mr_ctrl->cache_bh.overflow = 0;
1101b8dc6b0eSVu Pham 	/* Update the generation number. */
1102b8dc6b0eSVu Pham 	mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr;
1103b8dc6b0eSVu Pham 	DRV_LOG(DEBUG, "mr_ctrl(%p): flushed, cur_gen=%d",
1104b8dc6b0eSVu Pham 		(void *)mr_ctrl, mr_ctrl->cur_gen);
1105b8dc6b0eSVu Pham }
1106b8dc6b0eSVu Pham 
1107b8dc6b0eSVu Pham /**
1108b8dc6b0eSVu Pham  * Creates a memory region for external memory, that is memory which is not
1109b8dc6b0eSVu Pham  * part of the DPDK memory segments.
1110b8dc6b0eSVu Pham  *
1111b8dc6b0eSVu Pham  * @param pd
1112c4685016SOphir Munk  *   Pointer to pd of a device (net, regex, vdpa,...).
1113b8dc6b0eSVu Pham  * @param addr
1114b8dc6b0eSVu Pham  *   Starting virtual address of memory.
1115b8dc6b0eSVu Pham  * @param len
1116b8dc6b0eSVu Pham  *   Length of memory segment being mapped.
1117b8dc6b0eSVu Pham  * @param socked_id
1118b8dc6b0eSVu Pham  *   Socket to allocate heap memory for the control structures.
1119b8dc6b0eSVu Pham  *
1120b8dc6b0eSVu Pham  * @return
1121b8dc6b0eSVu Pham  *   Pointer to MR structure on success, NULL otherwise.
1122b8dc6b0eSVu Pham  */
1123b8dc6b0eSVu Pham struct mlx5_mr *
1124d5ed8aa9SOphir Munk mlx5_create_mr_ext(void *pd, uintptr_t addr, size_t len, int socket_id,
1125d5ed8aa9SOphir Munk 		   mlx5_reg_mr_t reg_mr_cb)
1126b8dc6b0eSVu Pham {
1127b8dc6b0eSVu Pham 	struct mlx5_mr *mr = NULL;
1128b8dc6b0eSVu Pham 
1129fd970a54SSuanming Mou 	mr = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1130fd970a54SSuanming Mou 			 RTE_ALIGN_CEIL(sizeof(*mr), RTE_CACHE_LINE_SIZE),
1131b8dc6b0eSVu Pham 			 RTE_CACHE_LINE_SIZE, socket_id);
1132b8dc6b0eSVu Pham 	if (mr == NULL)
1133b8dc6b0eSVu Pham 		return NULL;
1134d5ed8aa9SOphir Munk 	reg_mr_cb(pd, (void *)addr, len, &mr->pmd_mr);
113558a17853SOphir Munk 	if (mr->pmd_mr.obj == NULL) {
1136b8dc6b0eSVu Pham 		DRV_LOG(WARNING,
113756d20677SOphir Munk 			"Fail to create MR for address (%p)",
1138b8dc6b0eSVu Pham 			(void *)addr);
1139fd970a54SSuanming Mou 		mlx5_free(mr);
1140b8dc6b0eSVu Pham 		return NULL;
1141b8dc6b0eSVu Pham 	}
1142b8dc6b0eSVu Pham 	mr->msl = NULL; /* Mark it is external memory. */
1143b8dc6b0eSVu Pham 	mr->ms_bmp = NULL;
1144b8dc6b0eSVu Pham 	mr->ms_n = 1;
1145b8dc6b0eSVu Pham 	mr->ms_bmp_n = 1;
1146b8dc6b0eSVu Pham 	DRV_LOG(DEBUG,
1147b8dc6b0eSVu Pham 		"MR CREATED (%p) for external memory %p:\n"
1148b8dc6b0eSVu Pham 		"  [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
1149b8dc6b0eSVu Pham 		" lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
1150b8dc6b0eSVu Pham 		(void *)mr, (void *)addr,
115156d20677SOphir Munk 		addr, addr + len, rte_cpu_to_be_32(mr->pmd_mr.lkey),
1152b8dc6b0eSVu Pham 		mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
1153b8dc6b0eSVu Pham 	return mr;
1154b8dc6b0eSVu Pham }
1155b8dc6b0eSVu Pham 
1156b8dc6b0eSVu Pham /**
11572f6c2adbSMichael Baum  * Callback for memory free event. Iterate freed memsegs and check whether it
11582f6c2adbSMichael Baum  * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a
11592f6c2adbSMichael Baum  * result, the MR would be fragmented. If it becomes empty, the MR will be freed
11602f6c2adbSMichael Baum  * later by mlx5_mr_garbage_collect(). Even if this callback is called from a
11612f6c2adbSMichael Baum  * secondary process, the garbage collector will be called in primary process
11622f6c2adbSMichael Baum  * as the secondary process can't call mlx5_mr_create().
11632f6c2adbSMichael Baum  *
11642f6c2adbSMichael Baum  * The global cache must be rebuilt if there's any change and this event has to
11652f6c2adbSMichael Baum  * be propagated to dataplane threads to flush the local caches.
11662f6c2adbSMichael Baum  *
11672f6c2adbSMichael Baum  * @param share_cache
11682f6c2adbSMichael Baum  *   Pointer to a global shared MR cache.
11692f6c2adbSMichael Baum  * @param ibdev_name
11702f6c2adbSMichael Baum  *   Name of ibv device.
11712f6c2adbSMichael Baum  * @param addr
11722f6c2adbSMichael Baum  *   Address of freed memory.
11732f6c2adbSMichael Baum  * @param len
11742f6c2adbSMichael Baum  *   Size of freed memory.
11752f6c2adbSMichael Baum  */
11762f6c2adbSMichael Baum void
11772f6c2adbSMichael Baum mlx5_free_mr_by_addr(struct mlx5_mr_share_cache *share_cache,
11782f6c2adbSMichael Baum 		     const char *ibdev_name, const void *addr, size_t len)
11792f6c2adbSMichael Baum {
11802f6c2adbSMichael Baum 	const struct rte_memseg_list *msl;
11812f6c2adbSMichael Baum 	struct mlx5_mr *mr;
11822f6c2adbSMichael Baum 	int ms_n;
11832f6c2adbSMichael Baum 	int i;
11842f6c2adbSMichael Baum 	int rebuild = 0;
11852f6c2adbSMichael Baum 
11862f6c2adbSMichael Baum 	DRV_LOG(DEBUG, "device %s free callback: addr=%p, len=%zu",
11872f6c2adbSMichael Baum 		ibdev_name, addr, len);
11882f6c2adbSMichael Baum 	msl = rte_mem_virt2memseg_list(addr);
11892f6c2adbSMichael Baum 	/* addr and len must be page-aligned. */
11902f6c2adbSMichael Baum 	MLX5_ASSERT((uintptr_t)addr ==
11912f6c2adbSMichael Baum 		    RTE_ALIGN((uintptr_t)addr, msl->page_sz));
11922f6c2adbSMichael Baum 	MLX5_ASSERT(len == RTE_ALIGN(len, msl->page_sz));
11932f6c2adbSMichael Baum 	ms_n = len / msl->page_sz;
11942f6c2adbSMichael Baum 	rte_rwlock_write_lock(&share_cache->rwlock);
11952f6c2adbSMichael Baum 	/* Clear bits of freed memsegs from MR. */
11962f6c2adbSMichael Baum 	for (i = 0; i < ms_n; ++i) {
11972f6c2adbSMichael Baum 		const struct rte_memseg *ms;
11982f6c2adbSMichael Baum 		struct mr_cache_entry entry;
11992f6c2adbSMichael Baum 		uintptr_t start;
12002f6c2adbSMichael Baum 		int ms_idx;
12012f6c2adbSMichael Baum 		uint32_t pos;
12022f6c2adbSMichael Baum 
12032f6c2adbSMichael Baum 		/* Find MR having this memseg. */
12042f6c2adbSMichael Baum 		start = (uintptr_t)addr + i * msl->page_sz;
12052f6c2adbSMichael Baum 		mr = mlx5_mr_lookup_list(share_cache, &entry, start);
12062f6c2adbSMichael Baum 		if (mr == NULL)
12072f6c2adbSMichael Baum 			continue;
12082f6c2adbSMichael Baum 		MLX5_ASSERT(mr->msl); /* Can't be external memory. */
12092f6c2adbSMichael Baum 		ms = rte_mem_virt2memseg((void *)start, msl);
12102f6c2adbSMichael Baum 		MLX5_ASSERT(ms != NULL);
12112f6c2adbSMichael Baum 		MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
12122f6c2adbSMichael Baum 		ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
12132f6c2adbSMichael Baum 		pos = ms_idx - mr->ms_base_idx;
12142f6c2adbSMichael Baum 		MLX5_ASSERT(rte_bitmap_get(mr->ms_bmp, pos));
12152f6c2adbSMichael Baum 		MLX5_ASSERT(pos < mr->ms_bmp_n);
12162f6c2adbSMichael Baum 		DRV_LOG(DEBUG, "device %s MR(%p): clear bitmap[%u] for addr %p",
12172f6c2adbSMichael Baum 			ibdev_name, (void *)mr, pos, (void *)start);
12182f6c2adbSMichael Baum 		rte_bitmap_clear(mr->ms_bmp, pos);
12192f6c2adbSMichael Baum 		if (--mr->ms_n == 0) {
12202f6c2adbSMichael Baum 			LIST_REMOVE(mr, mr);
12212f6c2adbSMichael Baum 			LIST_INSERT_HEAD(&share_cache->mr_free_list, mr, mr);
12222f6c2adbSMichael Baum 			DRV_LOG(DEBUG, "device %s remove MR(%p) from list",
12232f6c2adbSMichael Baum 				ibdev_name, (void *)mr);
12242f6c2adbSMichael Baum 		}
12252f6c2adbSMichael Baum 		/*
12262f6c2adbSMichael Baum 		 * MR is fragmented or will be freed. the global cache must be
12272f6c2adbSMichael Baum 		 * rebuilt.
12282f6c2adbSMichael Baum 		 */
12292f6c2adbSMichael Baum 		rebuild = 1;
12302f6c2adbSMichael Baum 	}
12312f6c2adbSMichael Baum 	if (rebuild) {
12322f6c2adbSMichael Baum 		mlx5_mr_rebuild_cache(share_cache);
12332f6c2adbSMichael Baum 		/*
12342f6c2adbSMichael Baum 		 * No explicit wmb is needed after updating dev_gen due to
12352f6c2adbSMichael Baum 		 * store-release ordering in unlock that provides the
12362f6c2adbSMichael Baum 		 * implicit barrier at the software visible level.
12372f6c2adbSMichael Baum 		 */
12382f6c2adbSMichael Baum 		++share_cache->dev_gen;
12392f6c2adbSMichael Baum 		DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d",
12402f6c2adbSMichael Baum 			share_cache->dev_gen);
12412f6c2adbSMichael Baum 	}
12422f6c2adbSMichael Baum 	rte_rwlock_write_unlock(&share_cache->rwlock);
12432f6c2adbSMichael Baum }
12442f6c2adbSMichael Baum 
12452f6c2adbSMichael Baum /**
1246b8dc6b0eSVu Pham  * Dump all the created MRs and the global cache entries.
1247b8dc6b0eSVu Pham  *
1248b8dc6b0eSVu Pham  * @param sh
1249b8dc6b0eSVu Pham  *   Pointer to Ethernet device shared context.
1250b8dc6b0eSVu Pham  */
1251b8dc6b0eSVu Pham void
1252b8dc6b0eSVu Pham mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused)
1253b8dc6b0eSVu Pham {
1254b8dc6b0eSVu Pham #ifdef RTE_LIBRTE_MLX5_DEBUG
1255b8dc6b0eSVu Pham 	struct mlx5_mr *mr;
1256b8dc6b0eSVu Pham 	int mr_n = 0;
1257b8dc6b0eSVu Pham 	int chunk_n = 0;
1258b8dc6b0eSVu Pham 
1259b8dc6b0eSVu Pham 	rte_rwlock_read_lock(&share_cache->rwlock);
1260b8dc6b0eSVu Pham 	/* Iterate all the existing MRs. */
1261b8dc6b0eSVu Pham 	LIST_FOREACH(mr, &share_cache->mr_list, mr) {
1262b8dc6b0eSVu Pham 		unsigned int n;
1263b8dc6b0eSVu Pham 
126487acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u",
126556d20677SOphir Munk 		      mr_n++, rte_cpu_to_be_32(mr->pmd_mr.lkey),
1266b8dc6b0eSVu Pham 		      mr->ms_n, mr->ms_bmp_n);
1267b8dc6b0eSVu Pham 		if (mr->ms_n == 0)
1268b8dc6b0eSVu Pham 			continue;
1269b8dc6b0eSVu Pham 		for (n = 0; n < mr->ms_bmp_n; ) {
1270b8dc6b0eSVu Pham 			struct mr_cache_entry ret = { 0, };
1271b8dc6b0eSVu Pham 
1272b8dc6b0eSVu Pham 			n = mr_find_next_chunk(mr, &ret, n);
1273b8dc6b0eSVu Pham 			if (!ret.end)
1274b8dc6b0eSVu Pham 				break;
127587acdcc7SThomas Monjalon 			DRV_LOG(DEBUG,
127687acdcc7SThomas Monjalon 				"  chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")",
1277b8dc6b0eSVu Pham 				chunk_n++, ret.start, ret.end);
1278b8dc6b0eSVu Pham 		}
1279b8dc6b0eSVu Pham 	}
128087acdcc7SThomas Monjalon 	DRV_LOG(DEBUG, "Dumping global cache %p", (void *)share_cache);
1281b8dc6b0eSVu Pham 	mlx5_mr_btree_dump(&share_cache->cache);
1282b8dc6b0eSVu Pham 	rte_rwlock_read_unlock(&share_cache->rwlock);
1283b8dc6b0eSVu Pham #endif
1284b8dc6b0eSVu Pham }
1285690b2a88SDmitry Kozlyuk 
1286690b2a88SDmitry Kozlyuk static int
1287690b2a88SDmitry Kozlyuk mlx5_range_compare_start(const void *lhs, const void *rhs)
1288690b2a88SDmitry Kozlyuk {
1289690b2a88SDmitry Kozlyuk 	const struct mlx5_range *r1 = lhs, *r2 = rhs;
1290690b2a88SDmitry Kozlyuk 
1291690b2a88SDmitry Kozlyuk 	if (r1->start > r2->start)
1292690b2a88SDmitry Kozlyuk 		return 1;
1293690b2a88SDmitry Kozlyuk 	else if (r1->start < r2->start)
1294690b2a88SDmitry Kozlyuk 		return -1;
1295690b2a88SDmitry Kozlyuk 	return 0;
1296690b2a88SDmitry Kozlyuk }
1297690b2a88SDmitry Kozlyuk 
1298690b2a88SDmitry Kozlyuk static void
1299690b2a88SDmitry Kozlyuk mlx5_range_from_mempool_chunk(struct rte_mempool *mp, void *opaque,
1300690b2a88SDmitry Kozlyuk 			      struct rte_mempool_memhdr *memhdr,
1301690b2a88SDmitry Kozlyuk 			      unsigned int idx)
1302690b2a88SDmitry Kozlyuk {
1303690b2a88SDmitry Kozlyuk 	struct mlx5_range *ranges = opaque, *range = &ranges[idx];
1304690b2a88SDmitry Kozlyuk 	uint64_t page_size = rte_mem_page_size();
1305690b2a88SDmitry Kozlyuk 
1306690b2a88SDmitry Kozlyuk 	RTE_SET_USED(mp);
1307690b2a88SDmitry Kozlyuk 	range->start = RTE_ALIGN_FLOOR((uintptr_t)memhdr->addr, page_size);
1308690b2a88SDmitry Kozlyuk 	range->end = RTE_ALIGN_CEIL(range->start + memhdr->len, page_size);
1309690b2a88SDmitry Kozlyuk }
1310690b2a88SDmitry Kozlyuk 
1311690b2a88SDmitry Kozlyuk /**
1312690b2a88SDmitry Kozlyuk  * Get VA-contiguous ranges of the mempool memory.
1313690b2a88SDmitry Kozlyuk  * Each range start and end is aligned to the system page size.
1314690b2a88SDmitry Kozlyuk  *
1315690b2a88SDmitry Kozlyuk  * @param[in] mp
1316690b2a88SDmitry Kozlyuk  *   Analyzed mempool.
1317690b2a88SDmitry Kozlyuk  * @param[out] out
1318690b2a88SDmitry Kozlyuk  *   Receives the ranges, caller must release it with free().
1319690b2a88SDmitry Kozlyuk  * @param[out] ount_n
1320690b2a88SDmitry Kozlyuk  *   Receives the number of @p out elements.
1321690b2a88SDmitry Kozlyuk  *
1322690b2a88SDmitry Kozlyuk  * @return
1323690b2a88SDmitry Kozlyuk  *   0 on success, (-1) on failure.
1324690b2a88SDmitry Kozlyuk  */
1325690b2a88SDmitry Kozlyuk static int
1326690b2a88SDmitry Kozlyuk mlx5_get_mempool_ranges(struct rte_mempool *mp, struct mlx5_range **out,
1327690b2a88SDmitry Kozlyuk 			unsigned int *out_n)
1328690b2a88SDmitry Kozlyuk {
1329690b2a88SDmitry Kozlyuk 	struct mlx5_range *chunks;
1330690b2a88SDmitry Kozlyuk 	unsigned int chunks_n = mp->nb_mem_chunks, contig_n, i;
1331690b2a88SDmitry Kozlyuk 
1332690b2a88SDmitry Kozlyuk 	/* Collect page-aligned memory ranges of the mempool. */
1333690b2a88SDmitry Kozlyuk 	chunks = calloc(sizeof(chunks[0]), chunks_n);
1334690b2a88SDmitry Kozlyuk 	if (chunks == NULL)
1335690b2a88SDmitry Kozlyuk 		return -1;
1336690b2a88SDmitry Kozlyuk 	rte_mempool_mem_iter(mp, mlx5_range_from_mempool_chunk, chunks);
1337690b2a88SDmitry Kozlyuk 	/* Merge adjacent chunks and place them at the beginning. */
1338690b2a88SDmitry Kozlyuk 	qsort(chunks, chunks_n, sizeof(chunks[0]), mlx5_range_compare_start);
1339690b2a88SDmitry Kozlyuk 	contig_n = 1;
1340690b2a88SDmitry Kozlyuk 	for (i = 1; i < chunks_n; i++)
1341690b2a88SDmitry Kozlyuk 		if (chunks[i - 1].end != chunks[i].start) {
1342690b2a88SDmitry Kozlyuk 			chunks[contig_n - 1].end = chunks[i - 1].end;
1343690b2a88SDmitry Kozlyuk 			chunks[contig_n] = chunks[i];
1344690b2a88SDmitry Kozlyuk 			contig_n++;
1345690b2a88SDmitry Kozlyuk 		}
1346690b2a88SDmitry Kozlyuk 	/* Extend the last contiguous chunk to the end of the mempool. */
1347690b2a88SDmitry Kozlyuk 	chunks[contig_n - 1].end = chunks[i - 1].end;
1348690b2a88SDmitry Kozlyuk 	*out = chunks;
1349690b2a88SDmitry Kozlyuk 	*out_n = contig_n;
1350690b2a88SDmitry Kozlyuk 	return 0;
1351690b2a88SDmitry Kozlyuk }
1352690b2a88SDmitry Kozlyuk 
1353690b2a88SDmitry Kozlyuk /**
1354690b2a88SDmitry Kozlyuk  * Analyze mempool memory to select memory ranges to register.
1355690b2a88SDmitry Kozlyuk  *
1356690b2a88SDmitry Kozlyuk  * @param[in] mp
1357690b2a88SDmitry Kozlyuk  *   Mempool to analyze.
1358690b2a88SDmitry Kozlyuk  * @param[out] out
1359690b2a88SDmitry Kozlyuk  *   Receives memory ranges to register, aligned to the system page size.
1360690b2a88SDmitry Kozlyuk  *   The caller must release them with free().
1361690b2a88SDmitry Kozlyuk  * @param[out] out_n
1362690b2a88SDmitry Kozlyuk  *   Receives the number of @p out items.
1363690b2a88SDmitry Kozlyuk  * @param[out] share_hugepage
1364690b2a88SDmitry Kozlyuk  *   Receives True if the entire pool resides within a single hugepage.
1365690b2a88SDmitry Kozlyuk  *
1366690b2a88SDmitry Kozlyuk  * @return
1367690b2a88SDmitry Kozlyuk  *   0 on success, (-1) on failure.
1368690b2a88SDmitry Kozlyuk  */
1369690b2a88SDmitry Kozlyuk static int
1370690b2a88SDmitry Kozlyuk mlx5_mempool_reg_analyze(struct rte_mempool *mp, struct mlx5_range **out,
1371690b2a88SDmitry Kozlyuk 			 unsigned int *out_n, bool *share_hugepage)
1372690b2a88SDmitry Kozlyuk {
1373690b2a88SDmitry Kozlyuk 	struct mlx5_range *ranges = NULL;
1374690b2a88SDmitry Kozlyuk 	unsigned int i, ranges_n = 0;
1375690b2a88SDmitry Kozlyuk 	struct rte_memseg_list *msl;
1376690b2a88SDmitry Kozlyuk 
1377690b2a88SDmitry Kozlyuk 	if (mlx5_get_mempool_ranges(mp, &ranges, &ranges_n) < 0) {
1378690b2a88SDmitry Kozlyuk 		DRV_LOG(ERR, "Cannot get address ranges for mempool %s",
1379690b2a88SDmitry Kozlyuk 			mp->name);
1380690b2a88SDmitry Kozlyuk 		return -1;
1381690b2a88SDmitry Kozlyuk 	}
1382690b2a88SDmitry Kozlyuk 	/* Check if the hugepage of the pool can be shared. */
1383690b2a88SDmitry Kozlyuk 	*share_hugepage = false;
1384690b2a88SDmitry Kozlyuk 	msl = rte_mem_virt2memseg_list((void *)ranges[0].start);
1385690b2a88SDmitry Kozlyuk 	if (msl != NULL) {
1386690b2a88SDmitry Kozlyuk 		uint64_t hugepage_sz = 0;
1387690b2a88SDmitry Kozlyuk 
1388690b2a88SDmitry Kozlyuk 		/* Check that all ranges are on pages of the same size. */
1389690b2a88SDmitry Kozlyuk 		for (i = 0; i < ranges_n; i++) {
1390690b2a88SDmitry Kozlyuk 			if (hugepage_sz != 0 && hugepage_sz != msl->page_sz)
1391690b2a88SDmitry Kozlyuk 				break;
1392690b2a88SDmitry Kozlyuk 			hugepage_sz = msl->page_sz;
1393690b2a88SDmitry Kozlyuk 		}
1394690b2a88SDmitry Kozlyuk 		if (i == ranges_n) {
1395690b2a88SDmitry Kozlyuk 			/*
1396690b2a88SDmitry Kozlyuk 			 * If the entire pool is within one hugepage,
1397690b2a88SDmitry Kozlyuk 			 * combine all ranges into one of the hugepage size.
1398690b2a88SDmitry Kozlyuk 			 */
1399690b2a88SDmitry Kozlyuk 			uintptr_t reg_start = ranges[0].start;
1400690b2a88SDmitry Kozlyuk 			uintptr_t reg_end = ranges[ranges_n - 1].end;
1401690b2a88SDmitry Kozlyuk 			uintptr_t hugepage_start =
1402690b2a88SDmitry Kozlyuk 				RTE_ALIGN_FLOOR(reg_start, hugepage_sz);
1403690b2a88SDmitry Kozlyuk 			uintptr_t hugepage_end = hugepage_start + hugepage_sz;
1404690b2a88SDmitry Kozlyuk 			if (reg_end < hugepage_end) {
1405690b2a88SDmitry Kozlyuk 				ranges[0].start = hugepage_start;
1406690b2a88SDmitry Kozlyuk 				ranges[0].end = hugepage_end;
1407690b2a88SDmitry Kozlyuk 				ranges_n = 1;
1408690b2a88SDmitry Kozlyuk 				*share_hugepage = true;
1409690b2a88SDmitry Kozlyuk 			}
1410690b2a88SDmitry Kozlyuk 		}
1411690b2a88SDmitry Kozlyuk 	}
1412690b2a88SDmitry Kozlyuk 	*out = ranges;
1413690b2a88SDmitry Kozlyuk 	*out_n = ranges_n;
1414690b2a88SDmitry Kozlyuk 	return 0;
1415690b2a88SDmitry Kozlyuk }
1416690b2a88SDmitry Kozlyuk 
1417690b2a88SDmitry Kozlyuk /** Create a registration object for the mempool. */
1418690b2a88SDmitry Kozlyuk static struct mlx5_mempool_reg *
1419690b2a88SDmitry Kozlyuk mlx5_mempool_reg_create(struct rte_mempool *mp, unsigned int mrs_n)
1420690b2a88SDmitry Kozlyuk {
1421690b2a88SDmitry Kozlyuk 	struct mlx5_mempool_reg *mpr = NULL;
1422690b2a88SDmitry Kozlyuk 
1423690b2a88SDmitry Kozlyuk 	mpr = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1424690b2a88SDmitry Kozlyuk 			  sizeof(*mpr) + mrs_n * sizeof(mpr->mrs[0]),
1425690b2a88SDmitry Kozlyuk 			  RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
1426690b2a88SDmitry Kozlyuk 	if (mpr == NULL) {
1427690b2a88SDmitry Kozlyuk 		DRV_LOG(ERR, "Cannot allocate mempool %s registration object",
1428690b2a88SDmitry Kozlyuk 			mp->name);
1429690b2a88SDmitry Kozlyuk 		return NULL;
1430690b2a88SDmitry Kozlyuk 	}
1431690b2a88SDmitry Kozlyuk 	mpr->mp = mp;
1432690b2a88SDmitry Kozlyuk 	mpr->mrs = (struct mlx5_mempool_mr *)(mpr + 1);
1433690b2a88SDmitry Kozlyuk 	mpr->mrs_n = mrs_n;
1434690b2a88SDmitry Kozlyuk 	return mpr;
1435690b2a88SDmitry Kozlyuk }
1436690b2a88SDmitry Kozlyuk 
1437690b2a88SDmitry Kozlyuk /**
1438690b2a88SDmitry Kozlyuk  * Destroy a mempool registration object.
1439690b2a88SDmitry Kozlyuk  *
1440690b2a88SDmitry Kozlyuk  * @param standalone
1441690b2a88SDmitry Kozlyuk  *   Whether @p mpr owns its MRs excludively, i.e. they are not shared.
1442690b2a88SDmitry Kozlyuk  */
1443690b2a88SDmitry Kozlyuk static void
1444690b2a88SDmitry Kozlyuk mlx5_mempool_reg_destroy(struct mlx5_mr_share_cache *share_cache,
1445690b2a88SDmitry Kozlyuk 			 struct mlx5_mempool_reg *mpr, bool standalone)
1446690b2a88SDmitry Kozlyuk {
1447690b2a88SDmitry Kozlyuk 	if (standalone) {
1448690b2a88SDmitry Kozlyuk 		unsigned int i;
1449690b2a88SDmitry Kozlyuk 
1450690b2a88SDmitry Kozlyuk 		for (i = 0; i < mpr->mrs_n; i++)
1451690b2a88SDmitry Kozlyuk 			share_cache->dereg_mr_cb(&mpr->mrs[i].pmd_mr);
1452690b2a88SDmitry Kozlyuk 	}
1453690b2a88SDmitry Kozlyuk 	mlx5_free(mpr);
1454690b2a88SDmitry Kozlyuk }
1455690b2a88SDmitry Kozlyuk 
1456690b2a88SDmitry Kozlyuk /** Find registration object of a mempool. */
1457690b2a88SDmitry Kozlyuk static struct mlx5_mempool_reg *
1458690b2a88SDmitry Kozlyuk mlx5_mempool_reg_lookup(struct mlx5_mr_share_cache *share_cache,
1459690b2a88SDmitry Kozlyuk 			struct rte_mempool *mp)
1460690b2a88SDmitry Kozlyuk {
1461690b2a88SDmitry Kozlyuk 	struct mlx5_mempool_reg *mpr;
1462690b2a88SDmitry Kozlyuk 
1463690b2a88SDmitry Kozlyuk 	LIST_FOREACH(mpr, &share_cache->mempool_reg_list, next)
1464690b2a88SDmitry Kozlyuk 		if (mpr->mp == mp)
1465690b2a88SDmitry Kozlyuk 			break;
1466690b2a88SDmitry Kozlyuk 	return mpr;
1467690b2a88SDmitry Kozlyuk }
1468690b2a88SDmitry Kozlyuk 
1469690b2a88SDmitry Kozlyuk /** Increment reference counters of MRs used in the registration. */
1470690b2a88SDmitry Kozlyuk static void
1471690b2a88SDmitry Kozlyuk mlx5_mempool_reg_attach(struct mlx5_mempool_reg *mpr)
1472690b2a88SDmitry Kozlyuk {
1473690b2a88SDmitry Kozlyuk 	unsigned int i;
1474690b2a88SDmitry Kozlyuk 
1475690b2a88SDmitry Kozlyuk 	for (i = 0; i < mpr->mrs_n; i++)
1476690b2a88SDmitry Kozlyuk 		__atomic_add_fetch(&mpr->mrs[i].refcnt, 1, __ATOMIC_RELAXED);
1477690b2a88SDmitry Kozlyuk }
1478690b2a88SDmitry Kozlyuk 
1479690b2a88SDmitry Kozlyuk /**
1480690b2a88SDmitry Kozlyuk  * Decrement reference counters of MRs used in the registration.
1481690b2a88SDmitry Kozlyuk  *
1482690b2a88SDmitry Kozlyuk  * @return True if no more references to @p mpr MRs exist, False otherwise.
1483690b2a88SDmitry Kozlyuk  */
1484690b2a88SDmitry Kozlyuk static bool
1485690b2a88SDmitry Kozlyuk mlx5_mempool_reg_detach(struct mlx5_mempool_reg *mpr)
1486690b2a88SDmitry Kozlyuk {
1487690b2a88SDmitry Kozlyuk 	unsigned int i;
1488690b2a88SDmitry Kozlyuk 	bool ret = false;
1489690b2a88SDmitry Kozlyuk 
1490690b2a88SDmitry Kozlyuk 	for (i = 0; i < mpr->mrs_n; i++)
1491690b2a88SDmitry Kozlyuk 		ret |= __atomic_sub_fetch(&mpr->mrs[i].refcnt, 1,
1492690b2a88SDmitry Kozlyuk 					  __ATOMIC_RELAXED) == 0;
1493690b2a88SDmitry Kozlyuk 	return ret;
1494690b2a88SDmitry Kozlyuk }
1495690b2a88SDmitry Kozlyuk 
1496690b2a88SDmitry Kozlyuk static int
1497690b2a88SDmitry Kozlyuk mlx5_mr_mempool_register_primary(struct mlx5_mr_share_cache *share_cache,
1498690b2a88SDmitry Kozlyuk 				 void *pd, struct rte_mempool *mp)
1499690b2a88SDmitry Kozlyuk {
1500690b2a88SDmitry Kozlyuk 	struct mlx5_range *ranges = NULL;
1501690b2a88SDmitry Kozlyuk 	struct mlx5_mempool_reg *mpr, *new_mpr;
1502690b2a88SDmitry Kozlyuk 	unsigned int i, ranges_n;
1503690b2a88SDmitry Kozlyuk 	bool share_hugepage;
1504690b2a88SDmitry Kozlyuk 	int ret = -1;
1505690b2a88SDmitry Kozlyuk 
1506690b2a88SDmitry Kozlyuk 	/* Early check to avoid unnecessary creation of MRs. */
1507690b2a88SDmitry Kozlyuk 	rte_rwlock_read_lock(&share_cache->rwlock);
1508690b2a88SDmitry Kozlyuk 	mpr = mlx5_mempool_reg_lookup(share_cache, mp);
1509690b2a88SDmitry Kozlyuk 	rte_rwlock_read_unlock(&share_cache->rwlock);
1510690b2a88SDmitry Kozlyuk 	if (mpr != NULL) {
1511690b2a88SDmitry Kozlyuk 		DRV_LOG(DEBUG, "Mempool %s is already registered for PD %p",
1512690b2a88SDmitry Kozlyuk 			mp->name, pd);
1513690b2a88SDmitry Kozlyuk 		rte_errno = EEXIST;
1514690b2a88SDmitry Kozlyuk 		goto exit;
1515690b2a88SDmitry Kozlyuk 	}
1516690b2a88SDmitry Kozlyuk 	if (mlx5_mempool_reg_analyze(mp, &ranges, &ranges_n,
1517690b2a88SDmitry Kozlyuk 				     &share_hugepage) < 0) {
1518690b2a88SDmitry Kozlyuk 		DRV_LOG(ERR, "Cannot get mempool %s memory ranges", mp->name);
1519690b2a88SDmitry Kozlyuk 		rte_errno = ENOMEM;
1520690b2a88SDmitry Kozlyuk 		goto exit;
1521690b2a88SDmitry Kozlyuk 	}
1522690b2a88SDmitry Kozlyuk 	new_mpr = mlx5_mempool_reg_create(mp, ranges_n);
1523690b2a88SDmitry Kozlyuk 	if (new_mpr == NULL) {
1524690b2a88SDmitry Kozlyuk 		DRV_LOG(ERR,
1525690b2a88SDmitry Kozlyuk 			"Cannot create a registration object for mempool %s in PD %p",
1526690b2a88SDmitry Kozlyuk 			mp->name, pd);
1527690b2a88SDmitry Kozlyuk 		rte_errno = ENOMEM;
1528690b2a88SDmitry Kozlyuk 		goto exit;
1529690b2a88SDmitry Kozlyuk 	}
1530690b2a88SDmitry Kozlyuk 	/*
1531690b2a88SDmitry Kozlyuk 	 * If the entire mempool fits in a single hugepage, the MR for this
1532690b2a88SDmitry Kozlyuk 	 * hugepage can be shared across mempools that also fit in it.
1533690b2a88SDmitry Kozlyuk 	 */
1534690b2a88SDmitry Kozlyuk 	if (share_hugepage) {
1535690b2a88SDmitry Kozlyuk 		rte_rwlock_write_lock(&share_cache->rwlock);
1536690b2a88SDmitry Kozlyuk 		LIST_FOREACH(mpr, &share_cache->mempool_reg_list, next) {
1537690b2a88SDmitry Kozlyuk 			if (mpr->mrs[0].pmd_mr.addr == (void *)ranges[0].start)
1538690b2a88SDmitry Kozlyuk 				break;
1539690b2a88SDmitry Kozlyuk 		}
1540690b2a88SDmitry Kozlyuk 		if (mpr != NULL) {
1541690b2a88SDmitry Kozlyuk 			new_mpr->mrs = mpr->mrs;
1542690b2a88SDmitry Kozlyuk 			mlx5_mempool_reg_attach(new_mpr);
1543690b2a88SDmitry Kozlyuk 			LIST_INSERT_HEAD(&share_cache->mempool_reg_list,
1544690b2a88SDmitry Kozlyuk 					 new_mpr, next);
1545690b2a88SDmitry Kozlyuk 		}
1546690b2a88SDmitry Kozlyuk 		rte_rwlock_write_unlock(&share_cache->rwlock);
1547690b2a88SDmitry Kozlyuk 		if (mpr != NULL) {
1548690b2a88SDmitry Kozlyuk 			DRV_LOG(DEBUG, "Shared MR %#x in PD %p for mempool %s with mempool %s",
1549690b2a88SDmitry Kozlyuk 				mpr->mrs[0].pmd_mr.lkey, pd, mp->name,
1550690b2a88SDmitry Kozlyuk 				mpr->mp->name);
1551690b2a88SDmitry Kozlyuk 			ret = 0;
1552690b2a88SDmitry Kozlyuk 			goto exit;
1553690b2a88SDmitry Kozlyuk 		}
1554690b2a88SDmitry Kozlyuk 	}
1555690b2a88SDmitry Kozlyuk 	for (i = 0; i < ranges_n; i++) {
1556690b2a88SDmitry Kozlyuk 		struct mlx5_mempool_mr *mr = &new_mpr->mrs[i];
1557690b2a88SDmitry Kozlyuk 		const struct mlx5_range *range = &ranges[i];
1558690b2a88SDmitry Kozlyuk 		size_t len = range->end - range->start;
1559690b2a88SDmitry Kozlyuk 
1560690b2a88SDmitry Kozlyuk 		if (share_cache->reg_mr_cb(pd, (void *)range->start, len,
1561690b2a88SDmitry Kozlyuk 		    &mr->pmd_mr) < 0) {
1562690b2a88SDmitry Kozlyuk 			DRV_LOG(ERR,
1563690b2a88SDmitry Kozlyuk 				"Failed to create an MR in PD %p for address range "
1564690b2a88SDmitry Kozlyuk 				"[0x%" PRIxPTR ", 0x%" PRIxPTR "] (%zu bytes) for mempool %s",
1565690b2a88SDmitry Kozlyuk 				pd, range->start, range->end, len, mp->name);
1566690b2a88SDmitry Kozlyuk 			break;
1567690b2a88SDmitry Kozlyuk 		}
1568690b2a88SDmitry Kozlyuk 		DRV_LOG(DEBUG,
1569690b2a88SDmitry Kozlyuk 			"Created a new MR %#x in PD %p for address range "
1570690b2a88SDmitry Kozlyuk 			"[0x%" PRIxPTR ", 0x%" PRIxPTR "] (%zu bytes) for mempool %s",
1571690b2a88SDmitry Kozlyuk 			mr->pmd_mr.lkey, pd, range->start, range->end, len,
1572690b2a88SDmitry Kozlyuk 			mp->name);
1573690b2a88SDmitry Kozlyuk 	}
1574690b2a88SDmitry Kozlyuk 	if (i != ranges_n) {
1575690b2a88SDmitry Kozlyuk 		mlx5_mempool_reg_destroy(share_cache, new_mpr, true);
1576690b2a88SDmitry Kozlyuk 		rte_errno = EINVAL;
1577690b2a88SDmitry Kozlyuk 		goto exit;
1578690b2a88SDmitry Kozlyuk 	}
1579690b2a88SDmitry Kozlyuk 	/* Concurrent registration is not supposed to happen. */
1580690b2a88SDmitry Kozlyuk 	rte_rwlock_write_lock(&share_cache->rwlock);
1581690b2a88SDmitry Kozlyuk 	mpr = mlx5_mempool_reg_lookup(share_cache, mp);
1582690b2a88SDmitry Kozlyuk 	if (mpr == NULL) {
1583690b2a88SDmitry Kozlyuk 		mlx5_mempool_reg_attach(new_mpr);
1584690b2a88SDmitry Kozlyuk 		LIST_INSERT_HEAD(&share_cache->mempool_reg_list,
1585690b2a88SDmitry Kozlyuk 				 new_mpr, next);
1586690b2a88SDmitry Kozlyuk 		ret = 0;
1587690b2a88SDmitry Kozlyuk 	}
1588690b2a88SDmitry Kozlyuk 	rte_rwlock_write_unlock(&share_cache->rwlock);
1589690b2a88SDmitry Kozlyuk 	if (mpr != NULL) {
1590690b2a88SDmitry Kozlyuk 		DRV_LOG(DEBUG, "Mempool %s is already registered for PD %p",
1591690b2a88SDmitry Kozlyuk 			mp->name, pd);
1592690b2a88SDmitry Kozlyuk 		mlx5_mempool_reg_destroy(share_cache, new_mpr, true);
1593690b2a88SDmitry Kozlyuk 		rte_errno = EEXIST;
1594690b2a88SDmitry Kozlyuk 		goto exit;
1595690b2a88SDmitry Kozlyuk 	}
1596690b2a88SDmitry Kozlyuk exit:
1597690b2a88SDmitry Kozlyuk 	free(ranges);
1598690b2a88SDmitry Kozlyuk 	return ret;
1599690b2a88SDmitry Kozlyuk }
1600690b2a88SDmitry Kozlyuk 
1601690b2a88SDmitry Kozlyuk static int
1602690b2a88SDmitry Kozlyuk mlx5_mr_mempool_register_secondary(struct mlx5_mr_share_cache *share_cache,
1603690b2a88SDmitry Kozlyuk 				   void *pd, struct rte_mempool *mp,
1604690b2a88SDmitry Kozlyuk 				   struct mlx5_mp_id *mp_id)
1605690b2a88SDmitry Kozlyuk {
1606690b2a88SDmitry Kozlyuk 	if (mp_id == NULL) {
1607690b2a88SDmitry Kozlyuk 		rte_errno = EINVAL;
1608690b2a88SDmitry Kozlyuk 		return -1;
1609690b2a88SDmitry Kozlyuk 	}
1610690b2a88SDmitry Kozlyuk 	return mlx5_mp_req_mempool_reg(mp_id, share_cache, pd, mp, true);
1611690b2a88SDmitry Kozlyuk }
1612690b2a88SDmitry Kozlyuk 
1613690b2a88SDmitry Kozlyuk /**
1614690b2a88SDmitry Kozlyuk  * Register the memory of a mempool in the protection domain.
1615690b2a88SDmitry Kozlyuk  *
1616690b2a88SDmitry Kozlyuk  * @param share_cache
1617690b2a88SDmitry Kozlyuk  *   Shared MR cache of the protection domain.
1618690b2a88SDmitry Kozlyuk  * @param pd
1619690b2a88SDmitry Kozlyuk  *   Protection domain object.
1620690b2a88SDmitry Kozlyuk  * @param mp
1621690b2a88SDmitry Kozlyuk  *   Mempool to register.
1622690b2a88SDmitry Kozlyuk  * @param mp_id
1623690b2a88SDmitry Kozlyuk  *   Multi-process identifier, may be NULL for the primary process.
1624690b2a88SDmitry Kozlyuk  *
1625690b2a88SDmitry Kozlyuk  * @return
1626690b2a88SDmitry Kozlyuk  *   0 on success, (-1) on failure and rte_errno is set.
1627690b2a88SDmitry Kozlyuk  */
1628690b2a88SDmitry Kozlyuk int
1629690b2a88SDmitry Kozlyuk mlx5_mr_mempool_register(struct mlx5_mr_share_cache *share_cache, void *pd,
1630690b2a88SDmitry Kozlyuk 			 struct rte_mempool *mp, struct mlx5_mp_id *mp_id)
1631690b2a88SDmitry Kozlyuk {
1632c47d7b90SAndrew Rybchenko 	if (mp->flags & RTE_MEMPOOL_F_NON_IO)
1633690b2a88SDmitry Kozlyuk 		return 0;
1634690b2a88SDmitry Kozlyuk 	switch (rte_eal_process_type()) {
1635690b2a88SDmitry Kozlyuk 	case RTE_PROC_PRIMARY:
1636690b2a88SDmitry Kozlyuk 		return mlx5_mr_mempool_register_primary(share_cache, pd, mp);
1637690b2a88SDmitry Kozlyuk 	case RTE_PROC_SECONDARY:
1638690b2a88SDmitry Kozlyuk 		return mlx5_mr_mempool_register_secondary(share_cache, pd, mp,
1639690b2a88SDmitry Kozlyuk 							  mp_id);
1640690b2a88SDmitry Kozlyuk 	default:
1641690b2a88SDmitry Kozlyuk 		return -1;
1642690b2a88SDmitry Kozlyuk 	}
1643690b2a88SDmitry Kozlyuk }
1644690b2a88SDmitry Kozlyuk 
1645690b2a88SDmitry Kozlyuk static int
1646690b2a88SDmitry Kozlyuk mlx5_mr_mempool_unregister_primary(struct mlx5_mr_share_cache *share_cache,
1647690b2a88SDmitry Kozlyuk 				   struct rte_mempool *mp)
1648690b2a88SDmitry Kozlyuk {
1649690b2a88SDmitry Kozlyuk 	struct mlx5_mempool_reg *mpr;
1650690b2a88SDmitry Kozlyuk 	bool standalone = false;
1651690b2a88SDmitry Kozlyuk 
1652690b2a88SDmitry Kozlyuk 	rte_rwlock_write_lock(&share_cache->rwlock);
1653690b2a88SDmitry Kozlyuk 	LIST_FOREACH(mpr, &share_cache->mempool_reg_list, next)
1654690b2a88SDmitry Kozlyuk 		if (mpr->mp == mp) {
1655690b2a88SDmitry Kozlyuk 			LIST_REMOVE(mpr, next);
1656690b2a88SDmitry Kozlyuk 			standalone = mlx5_mempool_reg_detach(mpr);
1657690b2a88SDmitry Kozlyuk 			if (standalone)
1658690b2a88SDmitry Kozlyuk 				/*
1659690b2a88SDmitry Kozlyuk 				 * The unlock operation below provides a memory
1660690b2a88SDmitry Kozlyuk 				 * barrier due to its store-release semantics.
1661690b2a88SDmitry Kozlyuk 				 */
1662690b2a88SDmitry Kozlyuk 				++share_cache->dev_gen;
1663690b2a88SDmitry Kozlyuk 			break;
1664690b2a88SDmitry Kozlyuk 		}
1665690b2a88SDmitry Kozlyuk 	rte_rwlock_write_unlock(&share_cache->rwlock);
1666690b2a88SDmitry Kozlyuk 	if (mpr == NULL) {
1667690b2a88SDmitry Kozlyuk 		rte_errno = ENOENT;
1668690b2a88SDmitry Kozlyuk 		return -1;
1669690b2a88SDmitry Kozlyuk 	}
1670690b2a88SDmitry Kozlyuk 	mlx5_mempool_reg_destroy(share_cache, mpr, standalone);
1671690b2a88SDmitry Kozlyuk 	return 0;
1672690b2a88SDmitry Kozlyuk }
1673690b2a88SDmitry Kozlyuk 
1674690b2a88SDmitry Kozlyuk static int
1675690b2a88SDmitry Kozlyuk mlx5_mr_mempool_unregister_secondary(struct mlx5_mr_share_cache *share_cache,
1676690b2a88SDmitry Kozlyuk 				     struct rte_mempool *mp,
1677690b2a88SDmitry Kozlyuk 				     struct mlx5_mp_id *mp_id)
1678690b2a88SDmitry Kozlyuk {
1679690b2a88SDmitry Kozlyuk 	if (mp_id == NULL) {
1680690b2a88SDmitry Kozlyuk 		rte_errno = EINVAL;
1681690b2a88SDmitry Kozlyuk 		return -1;
1682690b2a88SDmitry Kozlyuk 	}
1683690b2a88SDmitry Kozlyuk 	return mlx5_mp_req_mempool_reg(mp_id, share_cache, NULL, mp, false);
1684690b2a88SDmitry Kozlyuk }
1685690b2a88SDmitry Kozlyuk 
1686690b2a88SDmitry Kozlyuk /**
1687690b2a88SDmitry Kozlyuk  * Unregister the memory of a mempool from the protection domain.
1688690b2a88SDmitry Kozlyuk  *
1689690b2a88SDmitry Kozlyuk  * @param share_cache
1690690b2a88SDmitry Kozlyuk  *   Shared MR cache of the protection domain.
1691690b2a88SDmitry Kozlyuk  * @param mp
1692690b2a88SDmitry Kozlyuk  *   Mempool to unregister.
1693690b2a88SDmitry Kozlyuk  * @param mp_id
1694690b2a88SDmitry Kozlyuk  *   Multi-process identifier, may be NULL for the primary process.
1695690b2a88SDmitry Kozlyuk  *
1696690b2a88SDmitry Kozlyuk  * @return
1697690b2a88SDmitry Kozlyuk  *   0 on success, (-1) on failure and rte_errno is set.
1698690b2a88SDmitry Kozlyuk  */
1699690b2a88SDmitry Kozlyuk int
1700690b2a88SDmitry Kozlyuk mlx5_mr_mempool_unregister(struct mlx5_mr_share_cache *share_cache,
1701690b2a88SDmitry Kozlyuk 			   struct rte_mempool *mp, struct mlx5_mp_id *mp_id)
1702690b2a88SDmitry Kozlyuk {
1703c47d7b90SAndrew Rybchenko 	if (mp->flags & RTE_MEMPOOL_F_NON_IO)
1704690b2a88SDmitry Kozlyuk 		return 0;
1705690b2a88SDmitry Kozlyuk 	switch (rte_eal_process_type()) {
1706690b2a88SDmitry Kozlyuk 	case RTE_PROC_PRIMARY:
1707690b2a88SDmitry Kozlyuk 		return mlx5_mr_mempool_unregister_primary(share_cache, mp);
1708690b2a88SDmitry Kozlyuk 	case RTE_PROC_SECONDARY:
1709690b2a88SDmitry Kozlyuk 		return mlx5_mr_mempool_unregister_secondary(share_cache, mp,
1710690b2a88SDmitry Kozlyuk 							    mp_id);
1711690b2a88SDmitry Kozlyuk 	default:
1712690b2a88SDmitry Kozlyuk 		return -1;
1713690b2a88SDmitry Kozlyuk 	}
1714690b2a88SDmitry Kozlyuk }
1715690b2a88SDmitry Kozlyuk 
1716690b2a88SDmitry Kozlyuk /**
1717690b2a88SDmitry Kozlyuk  * Lookup a MR key by and address in a registered mempool.
1718690b2a88SDmitry Kozlyuk  *
1719690b2a88SDmitry Kozlyuk  * @param mpr
1720690b2a88SDmitry Kozlyuk  *   Mempool registration object.
1721690b2a88SDmitry Kozlyuk  * @param addr
1722690b2a88SDmitry Kozlyuk  *   Address within the mempool.
1723690b2a88SDmitry Kozlyuk  * @param entry
1724690b2a88SDmitry Kozlyuk  *   Bottom-half cache entry to fill.
1725690b2a88SDmitry Kozlyuk  *
1726690b2a88SDmitry Kozlyuk  * @return
1727690b2a88SDmitry Kozlyuk  *   MR key or UINT32_MAX on failure, which can only happen
1728690b2a88SDmitry Kozlyuk  *   if the address is not from within the mempool.
1729690b2a88SDmitry Kozlyuk  */
1730690b2a88SDmitry Kozlyuk static uint32_t
1731690b2a88SDmitry Kozlyuk mlx5_mempool_reg_addr2mr(struct mlx5_mempool_reg *mpr, uintptr_t addr,
1732690b2a88SDmitry Kozlyuk 			 struct mr_cache_entry *entry)
1733690b2a88SDmitry Kozlyuk {
1734690b2a88SDmitry Kozlyuk 	uint32_t lkey = UINT32_MAX;
1735690b2a88SDmitry Kozlyuk 	unsigned int i;
1736690b2a88SDmitry Kozlyuk 
1737690b2a88SDmitry Kozlyuk 	for (i = 0; i < mpr->mrs_n; i++) {
1738690b2a88SDmitry Kozlyuk 		const struct mlx5_pmd_mr *mr = &mpr->mrs[i].pmd_mr;
1739690b2a88SDmitry Kozlyuk 		uintptr_t mr_addr = (uintptr_t)mr->addr;
1740690b2a88SDmitry Kozlyuk 
1741690b2a88SDmitry Kozlyuk 		if (mr_addr <= addr) {
1742690b2a88SDmitry Kozlyuk 			lkey = rte_cpu_to_be_32(mr->lkey);
1743690b2a88SDmitry Kozlyuk 			entry->start = mr_addr;
1744690b2a88SDmitry Kozlyuk 			entry->end = mr_addr + mr->len;
1745690b2a88SDmitry Kozlyuk 			entry->lkey = lkey;
1746690b2a88SDmitry Kozlyuk 			break;
1747690b2a88SDmitry Kozlyuk 		}
1748690b2a88SDmitry Kozlyuk 	}
1749690b2a88SDmitry Kozlyuk 	return lkey;
1750690b2a88SDmitry Kozlyuk }
1751690b2a88SDmitry Kozlyuk 
1752690b2a88SDmitry Kozlyuk /**
1753690b2a88SDmitry Kozlyuk  * Update bottom-half cache from the list of mempool registrations.
1754690b2a88SDmitry Kozlyuk  *
1755690b2a88SDmitry Kozlyuk  * @param share_cache
1756690b2a88SDmitry Kozlyuk  *   Pointer to a global shared MR cache.
1757690b2a88SDmitry Kozlyuk  * @param mr_ctrl
1758690b2a88SDmitry Kozlyuk  *   Per-queue MR control handle.
1759690b2a88SDmitry Kozlyuk  * @param entry
1760690b2a88SDmitry Kozlyuk  *   Pointer to an entry in the bottom-half cache to update
1761690b2a88SDmitry Kozlyuk  *   with the MR lkey looked up.
1762690b2a88SDmitry Kozlyuk  * @param mp
1763690b2a88SDmitry Kozlyuk  *   Mempool containing the address.
1764690b2a88SDmitry Kozlyuk  * @param addr
1765690b2a88SDmitry Kozlyuk  *   Address to lookup.
1766690b2a88SDmitry Kozlyuk  * @return
1767690b2a88SDmitry Kozlyuk  *   MR lkey on success, UINT32_MAX on failure.
1768690b2a88SDmitry Kozlyuk  */
1769690b2a88SDmitry Kozlyuk static uint32_t
1770690b2a88SDmitry Kozlyuk mlx5_lookup_mempool_regs(struct mlx5_mr_share_cache *share_cache,
1771690b2a88SDmitry Kozlyuk 			 struct mlx5_mr_ctrl *mr_ctrl,
1772690b2a88SDmitry Kozlyuk 			 struct mr_cache_entry *entry,
1773690b2a88SDmitry Kozlyuk 			 struct rte_mempool *mp, uintptr_t addr)
1774690b2a88SDmitry Kozlyuk {
1775690b2a88SDmitry Kozlyuk 	struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
1776690b2a88SDmitry Kozlyuk 	struct mlx5_mempool_reg *mpr;
1777690b2a88SDmitry Kozlyuk 	uint32_t lkey = UINT32_MAX;
1778690b2a88SDmitry Kozlyuk 
1779690b2a88SDmitry Kozlyuk 	/* If local cache table is full, try to double it. */
1780690b2a88SDmitry Kozlyuk 	if (unlikely(bt->len == bt->size))
1781690b2a88SDmitry Kozlyuk 		mr_btree_expand(bt, bt->size << 1);
1782690b2a88SDmitry Kozlyuk 	/* Look up in mempool registrations. */
1783690b2a88SDmitry Kozlyuk 	rte_rwlock_read_lock(&share_cache->rwlock);
1784690b2a88SDmitry Kozlyuk 	mpr = mlx5_mempool_reg_lookup(share_cache, mp);
1785690b2a88SDmitry Kozlyuk 	if (mpr != NULL)
1786690b2a88SDmitry Kozlyuk 		lkey = mlx5_mempool_reg_addr2mr(mpr, addr, entry);
1787690b2a88SDmitry Kozlyuk 	rte_rwlock_read_unlock(&share_cache->rwlock);
1788690b2a88SDmitry Kozlyuk 	/*
1789690b2a88SDmitry Kozlyuk 	 * Update local cache. Even if it fails, return the found entry
1790690b2a88SDmitry Kozlyuk 	 * to update top-half cache. Next time, this entry will be found
1791690b2a88SDmitry Kozlyuk 	 * in the global cache.
1792690b2a88SDmitry Kozlyuk 	 */
1793690b2a88SDmitry Kozlyuk 	if (lkey != UINT32_MAX)
1794690b2a88SDmitry Kozlyuk 		mr_btree_insert(bt, entry);
1795690b2a88SDmitry Kozlyuk 	return lkey;
1796690b2a88SDmitry Kozlyuk }
1797690b2a88SDmitry Kozlyuk 
1798690b2a88SDmitry Kozlyuk /**
1799690b2a88SDmitry Kozlyuk  * Bottom-half lookup for the address from the mempool.
1800690b2a88SDmitry Kozlyuk  *
1801690b2a88SDmitry Kozlyuk  * @param share_cache
1802690b2a88SDmitry Kozlyuk  *   Pointer to a global shared MR cache.
1803690b2a88SDmitry Kozlyuk  * @param mr_ctrl
1804690b2a88SDmitry Kozlyuk  *   Per-queue MR control handle.
1805690b2a88SDmitry Kozlyuk  * @param mp
1806690b2a88SDmitry Kozlyuk  *   Mempool containing the address.
1807690b2a88SDmitry Kozlyuk  * @param addr
1808690b2a88SDmitry Kozlyuk  *   Address to lookup.
1809690b2a88SDmitry Kozlyuk  * @return
1810690b2a88SDmitry Kozlyuk  *   MR lkey on success, UINT32_MAX on failure.
1811690b2a88SDmitry Kozlyuk  */
1812690b2a88SDmitry Kozlyuk uint32_t
1813690b2a88SDmitry Kozlyuk mlx5_mr_mempool2mr_bh(struct mlx5_mr_share_cache *share_cache,
1814690b2a88SDmitry Kozlyuk 		      struct mlx5_mr_ctrl *mr_ctrl,
1815690b2a88SDmitry Kozlyuk 		      struct rte_mempool *mp, uintptr_t addr)
1816690b2a88SDmitry Kozlyuk {
1817690b2a88SDmitry Kozlyuk 	struct mr_cache_entry *repl = &mr_ctrl->cache[mr_ctrl->head];
1818690b2a88SDmitry Kozlyuk 	uint32_t lkey;
1819690b2a88SDmitry Kozlyuk 	uint16_t bh_idx = 0;
1820690b2a88SDmitry Kozlyuk 
1821690b2a88SDmitry Kozlyuk 	/* Binary-search MR translation table. */
1822690b2a88SDmitry Kozlyuk 	lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);
1823690b2a88SDmitry Kozlyuk 	/* Update top-half cache. */
1824690b2a88SDmitry Kozlyuk 	if (likely(lkey != UINT32_MAX)) {
1825690b2a88SDmitry Kozlyuk 		*repl = (*mr_ctrl->cache_bh.table)[bh_idx];
1826690b2a88SDmitry Kozlyuk 	} else {
1827690b2a88SDmitry Kozlyuk 		lkey = mlx5_lookup_mempool_regs(share_cache, mr_ctrl, repl,
1828690b2a88SDmitry Kozlyuk 						mp, addr);
1829690b2a88SDmitry Kozlyuk 		/* Can only fail if the address is not from the mempool. */
1830690b2a88SDmitry Kozlyuk 		if (unlikely(lkey == UINT32_MAX))
1831690b2a88SDmitry Kozlyuk 			return UINT32_MAX;
1832690b2a88SDmitry Kozlyuk 	}
1833690b2a88SDmitry Kozlyuk 	/* Update the most recently used entry. */
1834690b2a88SDmitry Kozlyuk 	mr_ctrl->mru = mr_ctrl->head;
1835690b2a88SDmitry Kozlyuk 	/* Point to the next victim, the oldest. */
1836690b2a88SDmitry Kozlyuk 	mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N;
1837690b2a88SDmitry Kozlyuk 	return lkey;
1838690b2a88SDmitry Kozlyuk }
1839fb690f71SMichael Baum 
1840fb690f71SMichael Baum /**
1841fb690f71SMichael Baum  * Query LKey from a packet buffer.
1842fb690f71SMichael Baum  *
1843fb690f71SMichael Baum  * @param cdev
1844fb690f71SMichael Baum  *   Pointer to the mlx5 device structure.
1845fb690f71SMichael Baum  * @param mp_id
1846fb690f71SMichael Baum  *   Multi-process identifier, may be NULL for the primary process.
1847fb690f71SMichael Baum  * @param mr_ctrl
1848fb690f71SMichael Baum  *   Pointer to per-queue MR control structure.
1849fb690f71SMichael Baum  * @param mbuf
1850fb690f71SMichael Baum  *   Pointer to mbuf.
1851fb690f71SMichael Baum  *
1852fb690f71SMichael Baum  * @return
1853fb690f71SMichael Baum  *   Searched LKey on success, UINT32_MAX on no match.
1854fb690f71SMichael Baum  */
1855fb690f71SMichael Baum uint32_t
1856fb690f71SMichael Baum mlx5_mr_mb2mr(struct mlx5_common_device *cdev, struct mlx5_mp_id *mp_id,
18579f1d636fSMichael Baum 	      struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mbuf)
1858fb690f71SMichael Baum {
1859fb690f71SMichael Baum 	uint32_t lkey;
1860fb690f71SMichael Baum 	uintptr_t addr = (uintptr_t)mbuf->buf_addr;
1861fb690f71SMichael Baum 
1862fb690f71SMichael Baum 	/* Check generation bit to see if there's any change on existing MRs. */
1863fb690f71SMichael Baum 	if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
1864fb690f71SMichael Baum 		mlx5_mr_flush_local_cache(mr_ctrl);
1865fb690f71SMichael Baum 	/* Linear search on MR cache array. */
1866fb690f71SMichael Baum 	lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru,
1867fb690f71SMichael Baum 				   MLX5_MR_CACHE_N, (uintptr_t)mbuf->buf_addr);
1868fb690f71SMichael Baum 	if (likely(lkey != UINT32_MAX))
1869fb690f71SMichael Baum 		return lkey;
1870fb690f71SMichael Baum 	/* Take slower bottom-half on miss. */
18719f1d636fSMichael Baum 	return mlx5_mr_addr2mr_bh(cdev->pd, mp_id, &cdev->mr_scache, mr_ctrl,
1872fb690f71SMichael Baum 				  addr, cdev->config.mr_ext_memseg_en);
1873fb690f71SMichael Baum }
1874