xref: /dpdk/drivers/common/mlx5/mlx5_common_mr.c (revision d5ed8aa9449d5a4d5772333c5196f89b7337fcbb)
1b8dc6b0eSVu Pham /* SPDX-License-Identifier: BSD-3-Clause
2b8dc6b0eSVu Pham  * Copyright 2016 6WIND S.A.
3b8dc6b0eSVu Pham  * Copyright 2020 Mellanox Technologies, Ltd
4b8dc6b0eSVu Pham  */
5b8dc6b0eSVu Pham #include <rte_eal_memconfig.h>
6b8dc6b0eSVu Pham #include <rte_errno.h>
7b8dc6b0eSVu Pham #include <rte_mempool.h>
8b8dc6b0eSVu Pham #include <rte_malloc.h>
9b8dc6b0eSVu Pham #include <rte_rwlock.h>
10b8dc6b0eSVu Pham 
11b8dc6b0eSVu Pham #include "mlx5_glue.h"
12b8dc6b0eSVu Pham #include "mlx5_common_mp.h"
13b8dc6b0eSVu Pham #include "mlx5_common_mr.h"
14b8dc6b0eSVu Pham #include "mlx5_common_utils.h"
15b8dc6b0eSVu Pham 
16b8dc6b0eSVu Pham struct mr_find_contig_memsegs_data {
17b8dc6b0eSVu Pham 	uintptr_t addr;
18b8dc6b0eSVu Pham 	uintptr_t start;
19b8dc6b0eSVu Pham 	uintptr_t end;
20b8dc6b0eSVu Pham 	const struct rte_memseg_list *msl;
21b8dc6b0eSVu Pham };
22b8dc6b0eSVu Pham 
23b8dc6b0eSVu Pham /**
24b8dc6b0eSVu Pham  * Expand B-tree table to a given size. Can't be called with holding
25b8dc6b0eSVu Pham  * memory_hotplug_lock or share_cache.rwlock due to rte_realloc().
26b8dc6b0eSVu Pham  *
27b8dc6b0eSVu Pham  * @param bt
28b8dc6b0eSVu Pham  *   Pointer to B-tree structure.
29b8dc6b0eSVu Pham  * @param n
30b8dc6b0eSVu Pham  *   Number of entries for expansion.
31b8dc6b0eSVu Pham  *
32b8dc6b0eSVu Pham  * @return
33b8dc6b0eSVu Pham  *   0 on success, -1 on failure.
34b8dc6b0eSVu Pham  */
35b8dc6b0eSVu Pham static int
36b8dc6b0eSVu Pham mr_btree_expand(struct mlx5_mr_btree *bt, int n)
37b8dc6b0eSVu Pham {
38b8dc6b0eSVu Pham 	void *mem;
39b8dc6b0eSVu Pham 	int ret = 0;
40b8dc6b0eSVu Pham 
41b8dc6b0eSVu Pham 	if (n <= bt->size)
42b8dc6b0eSVu Pham 		return ret;
43b8dc6b0eSVu Pham 	/*
44b8dc6b0eSVu Pham 	 * Downside of directly using rte_realloc() is that SOCKET_ID_ANY is
45b8dc6b0eSVu Pham 	 * used inside if there's no room to expand. Because this is a quite
46b8dc6b0eSVu Pham 	 * rare case and a part of very slow path, it is very acceptable.
47b8dc6b0eSVu Pham 	 * Initially cache_bh[] will be given practically enough space and once
48b8dc6b0eSVu Pham 	 * it is expanded, expansion wouldn't be needed again ever.
49b8dc6b0eSVu Pham 	 */
50b8dc6b0eSVu Pham 	mem = rte_realloc(bt->table, n * sizeof(struct mr_cache_entry), 0);
51b8dc6b0eSVu Pham 	if (mem == NULL) {
52b8dc6b0eSVu Pham 		/* Not an error, B-tree search will be skipped. */
53b8dc6b0eSVu Pham 		DRV_LOG(WARNING, "failed to expand MR B-tree (%p) table",
54b8dc6b0eSVu Pham 			(void *)bt);
55b8dc6b0eSVu Pham 		ret = -1;
56b8dc6b0eSVu Pham 	} else {
57b8dc6b0eSVu Pham 		DRV_LOG(DEBUG, "expanded MR B-tree table (size=%u)", n);
58b8dc6b0eSVu Pham 		bt->table = mem;
59b8dc6b0eSVu Pham 		bt->size = n;
60b8dc6b0eSVu Pham 	}
61b8dc6b0eSVu Pham 	return ret;
62b8dc6b0eSVu Pham }
63b8dc6b0eSVu Pham 
64b8dc6b0eSVu Pham /**
65b8dc6b0eSVu Pham  * Look up LKey from given B-tree lookup table, store the last index and return
66b8dc6b0eSVu Pham  * searched LKey.
67b8dc6b0eSVu Pham  *
68b8dc6b0eSVu Pham  * @param bt
69b8dc6b0eSVu Pham  *   Pointer to B-tree structure.
70b8dc6b0eSVu Pham  * @param[out] idx
71b8dc6b0eSVu Pham  *   Pointer to index. Even on search failure, returns index where it stops
72b8dc6b0eSVu Pham  *   searching so that index can be used when inserting a new entry.
73b8dc6b0eSVu Pham  * @param addr
74b8dc6b0eSVu Pham  *   Search key.
75b8dc6b0eSVu Pham  *
76b8dc6b0eSVu Pham  * @return
77b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on no match.
78b8dc6b0eSVu Pham  */
79b8dc6b0eSVu Pham static uint32_t
80b8dc6b0eSVu Pham mr_btree_lookup(struct mlx5_mr_btree *bt, uint16_t *idx, uintptr_t addr)
81b8dc6b0eSVu Pham {
82b8dc6b0eSVu Pham 	struct mr_cache_entry *lkp_tbl;
83b8dc6b0eSVu Pham 	uint16_t n;
84b8dc6b0eSVu Pham 	uint16_t base = 0;
85b8dc6b0eSVu Pham 
86b8dc6b0eSVu Pham 	MLX5_ASSERT(bt != NULL);
87b8dc6b0eSVu Pham 	lkp_tbl = *bt->table;
88b8dc6b0eSVu Pham 	n = bt->len;
89b8dc6b0eSVu Pham 	/* First entry must be NULL for comparison. */
90b8dc6b0eSVu Pham 	MLX5_ASSERT(bt->len > 0 || (lkp_tbl[0].start == 0 &&
91b8dc6b0eSVu Pham 				    lkp_tbl[0].lkey == UINT32_MAX));
92b8dc6b0eSVu Pham 	/* Binary search. */
93b8dc6b0eSVu Pham 	do {
94b8dc6b0eSVu Pham 		register uint16_t delta = n >> 1;
95b8dc6b0eSVu Pham 
96b8dc6b0eSVu Pham 		if (addr < lkp_tbl[base + delta].start) {
97b8dc6b0eSVu Pham 			n = delta;
98b8dc6b0eSVu Pham 		} else {
99b8dc6b0eSVu Pham 			base += delta;
100b8dc6b0eSVu Pham 			n -= delta;
101b8dc6b0eSVu Pham 		}
102b8dc6b0eSVu Pham 	} while (n > 1);
103b8dc6b0eSVu Pham 	MLX5_ASSERT(addr >= lkp_tbl[base].start);
104b8dc6b0eSVu Pham 	*idx = base;
105b8dc6b0eSVu Pham 	if (addr < lkp_tbl[base].end)
106b8dc6b0eSVu Pham 		return lkp_tbl[base].lkey;
107b8dc6b0eSVu Pham 	/* Not found. */
108b8dc6b0eSVu Pham 	return UINT32_MAX;
109b8dc6b0eSVu Pham }
110b8dc6b0eSVu Pham 
111b8dc6b0eSVu Pham /**
112b8dc6b0eSVu Pham  * Insert an entry to B-tree lookup table.
113b8dc6b0eSVu Pham  *
114b8dc6b0eSVu Pham  * @param bt
115b8dc6b0eSVu Pham  *   Pointer to B-tree structure.
116b8dc6b0eSVu Pham  * @param entry
117b8dc6b0eSVu Pham  *   Pointer to new entry to insert.
118b8dc6b0eSVu Pham  *
119b8dc6b0eSVu Pham  * @return
120b8dc6b0eSVu Pham  *   0 on success, -1 on failure.
121b8dc6b0eSVu Pham  */
122b8dc6b0eSVu Pham static int
123b8dc6b0eSVu Pham mr_btree_insert(struct mlx5_mr_btree *bt, struct mr_cache_entry *entry)
124b8dc6b0eSVu Pham {
125b8dc6b0eSVu Pham 	struct mr_cache_entry *lkp_tbl;
126b8dc6b0eSVu Pham 	uint16_t idx = 0;
127b8dc6b0eSVu Pham 	size_t shift;
128b8dc6b0eSVu Pham 
129b8dc6b0eSVu Pham 	MLX5_ASSERT(bt != NULL);
130b8dc6b0eSVu Pham 	MLX5_ASSERT(bt->len <= bt->size);
131b8dc6b0eSVu Pham 	MLX5_ASSERT(bt->len > 0);
132b8dc6b0eSVu Pham 	lkp_tbl = *bt->table;
133b8dc6b0eSVu Pham 	/* Find out the slot for insertion. */
134b8dc6b0eSVu Pham 	if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) {
135b8dc6b0eSVu Pham 		DRV_LOG(DEBUG,
136b8dc6b0eSVu Pham 			"abort insertion to B-tree(%p): already exist at"
137b8dc6b0eSVu Pham 			" idx=%u [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
138b8dc6b0eSVu Pham 			(void *)bt, idx, entry->start, entry->end, entry->lkey);
139b8dc6b0eSVu Pham 		/* Already exist, return. */
140b8dc6b0eSVu Pham 		return 0;
141b8dc6b0eSVu Pham 	}
142b8dc6b0eSVu Pham 	/* If table is full, return error. */
143b8dc6b0eSVu Pham 	if (unlikely(bt->len == bt->size)) {
144b8dc6b0eSVu Pham 		bt->overflow = 1;
145b8dc6b0eSVu Pham 		return -1;
146b8dc6b0eSVu Pham 	}
147b8dc6b0eSVu Pham 	/* Insert entry. */
148b8dc6b0eSVu Pham 	++idx;
149b8dc6b0eSVu Pham 	shift = (bt->len - idx) * sizeof(struct mr_cache_entry);
150b8dc6b0eSVu Pham 	if (shift)
151b8dc6b0eSVu Pham 		memmove(&lkp_tbl[idx + 1], &lkp_tbl[idx], shift);
152b8dc6b0eSVu Pham 	lkp_tbl[idx] = *entry;
153b8dc6b0eSVu Pham 	bt->len++;
154b8dc6b0eSVu Pham 	DRV_LOG(DEBUG,
155b8dc6b0eSVu Pham 		"inserted B-tree(%p)[%u],"
156b8dc6b0eSVu Pham 		" [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
157b8dc6b0eSVu Pham 		(void *)bt, idx, entry->start, entry->end, entry->lkey);
158b8dc6b0eSVu Pham 	return 0;
159b8dc6b0eSVu Pham }
160b8dc6b0eSVu Pham 
161b8dc6b0eSVu Pham /**
162b8dc6b0eSVu Pham  * Initialize B-tree and allocate memory for lookup table.
163b8dc6b0eSVu Pham  *
164b8dc6b0eSVu Pham  * @param bt
165b8dc6b0eSVu Pham  *   Pointer to B-tree structure.
166b8dc6b0eSVu Pham  * @param n
167b8dc6b0eSVu Pham  *   Number of entries to allocate.
168b8dc6b0eSVu Pham  * @param socket
169b8dc6b0eSVu Pham  *   NUMA socket on which memory must be allocated.
170b8dc6b0eSVu Pham  *
171b8dc6b0eSVu Pham  * @return
172b8dc6b0eSVu Pham  *   0 on success, a negative errno value otherwise and rte_errno is set.
173b8dc6b0eSVu Pham  */
174b8dc6b0eSVu Pham int
175b8dc6b0eSVu Pham mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket)
176b8dc6b0eSVu Pham {
177b8dc6b0eSVu Pham 	if (bt == NULL) {
178b8dc6b0eSVu Pham 		rte_errno = EINVAL;
179b8dc6b0eSVu Pham 		return -rte_errno;
180b8dc6b0eSVu Pham 	}
181b8dc6b0eSVu Pham 	MLX5_ASSERT(!bt->table && !bt->size);
182b8dc6b0eSVu Pham 	memset(bt, 0, sizeof(*bt));
183b8dc6b0eSVu Pham 	bt->table = rte_calloc_socket("B-tree table",
184b8dc6b0eSVu Pham 				      n, sizeof(struct mr_cache_entry),
185b8dc6b0eSVu Pham 				      0, socket);
186b8dc6b0eSVu Pham 	if (bt->table == NULL) {
187b8dc6b0eSVu Pham 		rte_errno = ENOMEM;
188b8dc6b0eSVu Pham 		DEBUG("failed to allocate memory for btree cache on socket %d",
189b8dc6b0eSVu Pham 		      socket);
190b8dc6b0eSVu Pham 		return -rte_errno;
191b8dc6b0eSVu Pham 	}
192b8dc6b0eSVu Pham 	bt->size = n;
193b8dc6b0eSVu Pham 	/* First entry must be NULL for binary search. */
194b8dc6b0eSVu Pham 	(*bt->table)[bt->len++] = (struct mr_cache_entry) {
195b8dc6b0eSVu Pham 		.lkey = UINT32_MAX,
196b8dc6b0eSVu Pham 	};
197b8dc6b0eSVu Pham 	DEBUG("initialized B-tree %p with table %p",
198b8dc6b0eSVu Pham 	      (void *)bt, (void *)bt->table);
199b8dc6b0eSVu Pham 	return 0;
200b8dc6b0eSVu Pham }
201b8dc6b0eSVu Pham 
202b8dc6b0eSVu Pham /**
203b8dc6b0eSVu Pham  * Free B-tree resources.
204b8dc6b0eSVu Pham  *
205b8dc6b0eSVu Pham  * @param bt
206b8dc6b0eSVu Pham  *   Pointer to B-tree structure.
207b8dc6b0eSVu Pham  */
208b8dc6b0eSVu Pham void
209b8dc6b0eSVu Pham mlx5_mr_btree_free(struct mlx5_mr_btree *bt)
210b8dc6b0eSVu Pham {
211b8dc6b0eSVu Pham 	if (bt == NULL)
212b8dc6b0eSVu Pham 		return;
213b8dc6b0eSVu Pham 	DEBUG("freeing B-tree %p with table %p",
214b8dc6b0eSVu Pham 	      (void *)bt, (void *)bt->table);
215b8dc6b0eSVu Pham 	rte_free(bt->table);
216b8dc6b0eSVu Pham 	memset(bt, 0, sizeof(*bt));
217b8dc6b0eSVu Pham }
218b8dc6b0eSVu Pham 
219b8dc6b0eSVu Pham /**
220b8dc6b0eSVu Pham  * Dump all the entries in a B-tree
221b8dc6b0eSVu Pham  *
222b8dc6b0eSVu Pham  * @param bt
223b8dc6b0eSVu Pham  *   Pointer to B-tree structure.
224b8dc6b0eSVu Pham  */
225b8dc6b0eSVu Pham void
226b8dc6b0eSVu Pham mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused)
227b8dc6b0eSVu Pham {
228b8dc6b0eSVu Pham #ifdef RTE_LIBRTE_MLX5_DEBUG
229b8dc6b0eSVu Pham 	int idx;
230b8dc6b0eSVu Pham 	struct mr_cache_entry *lkp_tbl;
231b8dc6b0eSVu Pham 
232b8dc6b0eSVu Pham 	if (bt == NULL)
233b8dc6b0eSVu Pham 		return;
234b8dc6b0eSVu Pham 	lkp_tbl = *bt->table;
235b8dc6b0eSVu Pham 	for (idx = 0; idx < bt->len; ++idx) {
236b8dc6b0eSVu Pham 		struct mr_cache_entry *entry = &lkp_tbl[idx];
237b8dc6b0eSVu Pham 
238b8dc6b0eSVu Pham 		DEBUG("B-tree(%p)[%u],"
239b8dc6b0eSVu Pham 		      " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
240b8dc6b0eSVu Pham 		      (void *)bt, idx, entry->start, entry->end, entry->lkey);
241b8dc6b0eSVu Pham 	}
242b8dc6b0eSVu Pham #endif
243b8dc6b0eSVu Pham }
244b8dc6b0eSVu Pham 
245b8dc6b0eSVu Pham /**
246b8dc6b0eSVu Pham  * Find virtually contiguous memory chunk in a given MR.
247b8dc6b0eSVu Pham  *
248b8dc6b0eSVu Pham  * @param dev
249b8dc6b0eSVu Pham  *   Pointer to MR structure.
250b8dc6b0eSVu Pham  * @param[out] entry
251b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry. If not found, this will not be
252b8dc6b0eSVu Pham  *   updated.
253b8dc6b0eSVu Pham  * @param start_idx
254b8dc6b0eSVu Pham  *   Start index of the memseg bitmap.
255b8dc6b0eSVu Pham  *
256b8dc6b0eSVu Pham  * @return
257b8dc6b0eSVu Pham  *   Next index to go on lookup.
258b8dc6b0eSVu Pham  */
259b8dc6b0eSVu Pham static int
260b8dc6b0eSVu Pham mr_find_next_chunk(struct mlx5_mr *mr, struct mr_cache_entry *entry,
261b8dc6b0eSVu Pham 		   int base_idx)
262b8dc6b0eSVu Pham {
263b8dc6b0eSVu Pham 	uintptr_t start = 0;
264b8dc6b0eSVu Pham 	uintptr_t end = 0;
265b8dc6b0eSVu Pham 	uint32_t idx = 0;
266b8dc6b0eSVu Pham 
267b8dc6b0eSVu Pham 	/* MR for external memory doesn't have memseg list. */
268b8dc6b0eSVu Pham 	if (mr->msl == NULL) {
269b8dc6b0eSVu Pham 		MLX5_ASSERT(mr->ms_bmp_n == 1);
270b8dc6b0eSVu Pham 		MLX5_ASSERT(mr->ms_n == 1);
271b8dc6b0eSVu Pham 		MLX5_ASSERT(base_idx == 0);
272b8dc6b0eSVu Pham 		/*
273b8dc6b0eSVu Pham 		 * Can't search it from memseg list but get it directly from
27456d20677SOphir Munk 		 * pmd_mr as there's only one chunk.
275b8dc6b0eSVu Pham 		 */
27656d20677SOphir Munk 		entry->start = (uintptr_t)mr->pmd_mr.addr;
27756d20677SOphir Munk 		entry->end = (uintptr_t)mr->pmd_mr.addr + mr->pmd_mr.len;
27856d20677SOphir Munk 		entry->lkey = rte_cpu_to_be_32(mr->pmd_mr.lkey);
279b8dc6b0eSVu Pham 		/* Returning 1 ends iteration. */
280b8dc6b0eSVu Pham 		return 1;
281b8dc6b0eSVu Pham 	}
282b8dc6b0eSVu Pham 	for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) {
283b8dc6b0eSVu Pham 		if (rte_bitmap_get(mr->ms_bmp, idx)) {
284b8dc6b0eSVu Pham 			const struct rte_memseg_list *msl;
285b8dc6b0eSVu Pham 			const struct rte_memseg *ms;
286b8dc6b0eSVu Pham 
287b8dc6b0eSVu Pham 			msl = mr->msl;
288b8dc6b0eSVu Pham 			ms = rte_fbarray_get(&msl->memseg_arr,
289b8dc6b0eSVu Pham 					     mr->ms_base_idx + idx);
290b8dc6b0eSVu Pham 			MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
291b8dc6b0eSVu Pham 			if (!start)
292b8dc6b0eSVu Pham 				start = ms->addr_64;
293b8dc6b0eSVu Pham 			end = ms->addr_64 + ms->hugepage_sz;
294b8dc6b0eSVu Pham 		} else if (start) {
295b8dc6b0eSVu Pham 			/* Passed the end of a fragment. */
296b8dc6b0eSVu Pham 			break;
297b8dc6b0eSVu Pham 		}
298b8dc6b0eSVu Pham 	}
299b8dc6b0eSVu Pham 	if (start) {
300b8dc6b0eSVu Pham 		/* Found one chunk. */
301b8dc6b0eSVu Pham 		entry->start = start;
302b8dc6b0eSVu Pham 		entry->end = end;
30356d20677SOphir Munk 		entry->lkey = rte_cpu_to_be_32(mr->pmd_mr.lkey);
304b8dc6b0eSVu Pham 	}
305b8dc6b0eSVu Pham 	return idx;
306b8dc6b0eSVu Pham }
307b8dc6b0eSVu Pham 
308b8dc6b0eSVu Pham /**
309b8dc6b0eSVu Pham  * Insert a MR to the global B-tree cache. It may fail due to low-on-memory.
310b8dc6b0eSVu Pham  * Then, this entry will have to be searched by mr_lookup_list() in
311b8dc6b0eSVu Pham  * mlx5_mr_create() on miss.
312b8dc6b0eSVu Pham  *
313b8dc6b0eSVu Pham  * @param share_cache
314b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
315b8dc6b0eSVu Pham  * @param mr
316b8dc6b0eSVu Pham  *   Pointer to MR to insert.
317b8dc6b0eSVu Pham  *
318b8dc6b0eSVu Pham  * @return
319b8dc6b0eSVu Pham  *   0 on success, -1 on failure.
320b8dc6b0eSVu Pham  */
321b8dc6b0eSVu Pham int
322b8dc6b0eSVu Pham mlx5_mr_insert_cache(struct mlx5_mr_share_cache *share_cache,
323b8dc6b0eSVu Pham 		     struct mlx5_mr *mr)
324b8dc6b0eSVu Pham {
325b8dc6b0eSVu Pham 	unsigned int n;
326b8dc6b0eSVu Pham 
327b8dc6b0eSVu Pham 	DRV_LOG(DEBUG, "Inserting MR(%p) to global cache(%p)",
328b8dc6b0eSVu Pham 		(void *)mr, (void *)share_cache);
329b8dc6b0eSVu Pham 	for (n = 0; n < mr->ms_bmp_n; ) {
330b8dc6b0eSVu Pham 		struct mr_cache_entry entry;
331b8dc6b0eSVu Pham 
332b8dc6b0eSVu Pham 		memset(&entry, 0, sizeof(entry));
333b8dc6b0eSVu Pham 		/* Find a contiguous chunk and advance the index. */
334b8dc6b0eSVu Pham 		n = mr_find_next_chunk(mr, &entry, n);
335b8dc6b0eSVu Pham 		if (!entry.end)
336b8dc6b0eSVu Pham 			break;
337b8dc6b0eSVu Pham 		if (mr_btree_insert(&share_cache->cache, &entry) < 0) {
338b8dc6b0eSVu Pham 			/*
339b8dc6b0eSVu Pham 			 * Overflowed, but the global table cannot be expanded
340b8dc6b0eSVu Pham 			 * because of deadlock.
341b8dc6b0eSVu Pham 			 */
342b8dc6b0eSVu Pham 			return -1;
343b8dc6b0eSVu Pham 		}
344b8dc6b0eSVu Pham 	}
345b8dc6b0eSVu Pham 	return 0;
346b8dc6b0eSVu Pham }
347b8dc6b0eSVu Pham 
348b8dc6b0eSVu Pham /**
349b8dc6b0eSVu Pham  * Look up address in the original global MR list.
350b8dc6b0eSVu Pham  *
351b8dc6b0eSVu Pham  * @param share_cache
352b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
353b8dc6b0eSVu Pham  * @param[out] entry
354b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry. If no match, this will not be updated.
355b8dc6b0eSVu Pham  * @param addr
356b8dc6b0eSVu Pham  *   Search key.
357b8dc6b0eSVu Pham  *
358b8dc6b0eSVu Pham  * @return
359b8dc6b0eSVu Pham  *   Found MR on match, NULL otherwise.
360b8dc6b0eSVu Pham  */
361b8dc6b0eSVu Pham struct mlx5_mr *
362b8dc6b0eSVu Pham mlx5_mr_lookup_list(struct mlx5_mr_share_cache *share_cache,
363b8dc6b0eSVu Pham 		    struct mr_cache_entry *entry, uintptr_t addr)
364b8dc6b0eSVu Pham {
365b8dc6b0eSVu Pham 	struct mlx5_mr *mr;
366b8dc6b0eSVu Pham 
367b8dc6b0eSVu Pham 	/* Iterate all the existing MRs. */
368b8dc6b0eSVu Pham 	LIST_FOREACH(mr, &share_cache->mr_list, mr) {
369b8dc6b0eSVu Pham 		unsigned int n;
370b8dc6b0eSVu Pham 
371b8dc6b0eSVu Pham 		if (mr->ms_n == 0)
372b8dc6b0eSVu Pham 			continue;
373b8dc6b0eSVu Pham 		for (n = 0; n < mr->ms_bmp_n; ) {
374b8dc6b0eSVu Pham 			struct mr_cache_entry ret;
375b8dc6b0eSVu Pham 
376b8dc6b0eSVu Pham 			memset(&ret, 0, sizeof(ret));
377b8dc6b0eSVu Pham 			n = mr_find_next_chunk(mr, &ret, n);
378b8dc6b0eSVu Pham 			if (addr >= ret.start && addr < ret.end) {
379b8dc6b0eSVu Pham 				/* Found. */
380b8dc6b0eSVu Pham 				*entry = ret;
381b8dc6b0eSVu Pham 				return mr;
382b8dc6b0eSVu Pham 			}
383b8dc6b0eSVu Pham 		}
384b8dc6b0eSVu Pham 	}
385b8dc6b0eSVu Pham 	return NULL;
386b8dc6b0eSVu Pham }
387b8dc6b0eSVu Pham 
388b8dc6b0eSVu Pham /**
389b8dc6b0eSVu Pham  * Look up address on global MR cache.
390b8dc6b0eSVu Pham  *
391b8dc6b0eSVu Pham  * @param share_cache
392b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
393b8dc6b0eSVu Pham  * @param[out] entry
394b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry. If no match, this will not be updated.
395b8dc6b0eSVu Pham  * @param addr
396b8dc6b0eSVu Pham  *   Search key.
397b8dc6b0eSVu Pham  *
398b8dc6b0eSVu Pham  * @return
399b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
400b8dc6b0eSVu Pham  */
401b8dc6b0eSVu Pham uint32_t
402b8dc6b0eSVu Pham mlx5_mr_lookup_cache(struct mlx5_mr_share_cache *share_cache,
403b8dc6b0eSVu Pham 		     struct mr_cache_entry *entry, uintptr_t addr)
404b8dc6b0eSVu Pham {
405b8dc6b0eSVu Pham 	uint16_t idx;
406b8dc6b0eSVu Pham 	uint32_t lkey = UINT32_MAX;
407b8dc6b0eSVu Pham 	struct mlx5_mr *mr;
408b8dc6b0eSVu Pham 
409b8dc6b0eSVu Pham 	/*
410b8dc6b0eSVu Pham 	 * If the global cache has overflowed since it failed to expand the
411b8dc6b0eSVu Pham 	 * B-tree table, it can't have all the existing MRs. Then, the address
412b8dc6b0eSVu Pham 	 * has to be searched by traversing the original MR list instead, which
413b8dc6b0eSVu Pham 	 * is very slow path. Otherwise, the global cache is all inclusive.
414b8dc6b0eSVu Pham 	 */
415b8dc6b0eSVu Pham 	if (!unlikely(share_cache->cache.overflow)) {
416b8dc6b0eSVu Pham 		lkey = mr_btree_lookup(&share_cache->cache, &idx, addr);
417b8dc6b0eSVu Pham 		if (lkey != UINT32_MAX)
418b8dc6b0eSVu Pham 			*entry = (*share_cache->cache.table)[idx];
419b8dc6b0eSVu Pham 	} else {
420b8dc6b0eSVu Pham 		/* Falling back to the slowest path. */
421b8dc6b0eSVu Pham 		mr = mlx5_mr_lookup_list(share_cache, entry, addr);
422b8dc6b0eSVu Pham 		if (mr != NULL)
423b8dc6b0eSVu Pham 			lkey = entry->lkey;
424b8dc6b0eSVu Pham 	}
425b8dc6b0eSVu Pham 	MLX5_ASSERT(lkey == UINT32_MAX || (addr >= entry->start &&
426b8dc6b0eSVu Pham 					   addr < entry->end));
427b8dc6b0eSVu Pham 	return lkey;
428b8dc6b0eSVu Pham }
429b8dc6b0eSVu Pham 
430b8dc6b0eSVu Pham /**
431b8dc6b0eSVu Pham  * Free MR resources. MR lock must not be held to avoid a deadlock. rte_free()
432b8dc6b0eSVu Pham  * can raise memory free event and the callback function will spin on the lock.
433b8dc6b0eSVu Pham  *
434b8dc6b0eSVu Pham  * @param mr
435b8dc6b0eSVu Pham  *   Pointer to MR to free.
436b8dc6b0eSVu Pham  */
437b8dc6b0eSVu Pham static void
438*d5ed8aa9SOphir Munk mr_free(struct mlx5_mr *mr, mlx5_dereg_mr_t dereg_mr_cb)
439b8dc6b0eSVu Pham {
440b8dc6b0eSVu Pham 	if (mr == NULL)
441b8dc6b0eSVu Pham 		return;
442b8dc6b0eSVu Pham 	DRV_LOG(DEBUG, "freeing MR(%p):", (void *)mr);
443*d5ed8aa9SOphir Munk 	dereg_mr_cb(&mr->pmd_mr);
444b8dc6b0eSVu Pham 	if (mr->ms_bmp != NULL)
445b8dc6b0eSVu Pham 		rte_bitmap_free(mr->ms_bmp);
446b8dc6b0eSVu Pham 	rte_free(mr);
447b8dc6b0eSVu Pham }
448b8dc6b0eSVu Pham 
449b8dc6b0eSVu Pham void
450b8dc6b0eSVu Pham mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache)
451b8dc6b0eSVu Pham {
452b8dc6b0eSVu Pham 	struct mlx5_mr *mr;
453b8dc6b0eSVu Pham 
454b8dc6b0eSVu Pham 	DRV_LOG(DEBUG, "Rebuild dev cache[] %p", (void *)share_cache);
455b8dc6b0eSVu Pham 	/* Flush cache to rebuild. */
456b8dc6b0eSVu Pham 	share_cache->cache.len = 1;
457b8dc6b0eSVu Pham 	share_cache->cache.overflow = 0;
458b8dc6b0eSVu Pham 	/* Iterate all the existing MRs. */
459b8dc6b0eSVu Pham 	LIST_FOREACH(mr, &share_cache->mr_list, mr)
460b8dc6b0eSVu Pham 		if (mlx5_mr_insert_cache(share_cache, mr) < 0)
461b8dc6b0eSVu Pham 			return;
462b8dc6b0eSVu Pham }
463b8dc6b0eSVu Pham 
464b8dc6b0eSVu Pham /**
465b8dc6b0eSVu Pham  * Release resources of detached MR having no online entry.
466b8dc6b0eSVu Pham  *
467b8dc6b0eSVu Pham  * @param share_cache
468b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
469b8dc6b0eSVu Pham  */
470b8dc6b0eSVu Pham static void
471b8dc6b0eSVu Pham mlx5_mr_garbage_collect(struct mlx5_mr_share_cache *share_cache)
472b8dc6b0eSVu Pham {
473b8dc6b0eSVu Pham 	struct mlx5_mr *mr_next;
474b8dc6b0eSVu Pham 	struct mlx5_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
475b8dc6b0eSVu Pham 
476b8dc6b0eSVu Pham 	/* Must be called from the primary process. */
477b8dc6b0eSVu Pham 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
478b8dc6b0eSVu Pham 	/*
479b8dc6b0eSVu Pham 	 * MR can't be freed with holding the lock because rte_free() could call
480b8dc6b0eSVu Pham 	 * memory free callback function. This will be a deadlock situation.
481b8dc6b0eSVu Pham 	 */
482b8dc6b0eSVu Pham 	rte_rwlock_write_lock(&share_cache->rwlock);
483b8dc6b0eSVu Pham 	/* Detach the whole free list and release it after unlocking. */
484b8dc6b0eSVu Pham 	free_list = share_cache->mr_free_list;
485b8dc6b0eSVu Pham 	LIST_INIT(&share_cache->mr_free_list);
486b8dc6b0eSVu Pham 	rte_rwlock_write_unlock(&share_cache->rwlock);
487b8dc6b0eSVu Pham 	/* Release resources. */
488b8dc6b0eSVu Pham 	mr_next = LIST_FIRST(&free_list);
489b8dc6b0eSVu Pham 	while (mr_next != NULL) {
490b8dc6b0eSVu Pham 		struct mlx5_mr *mr = mr_next;
491b8dc6b0eSVu Pham 
492b8dc6b0eSVu Pham 		mr_next = LIST_NEXT(mr, mr);
493*d5ed8aa9SOphir Munk 		mr_free(mr, share_cache->dereg_mr_cb);
494b8dc6b0eSVu Pham 	}
495b8dc6b0eSVu Pham }
496b8dc6b0eSVu Pham 
497b8dc6b0eSVu Pham /* Called during rte_memseg_contig_walk() by mlx5_mr_create(). */
498b8dc6b0eSVu Pham static int
499b8dc6b0eSVu Pham mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl,
500b8dc6b0eSVu Pham 			  const struct rte_memseg *ms, size_t len, void *arg)
501b8dc6b0eSVu Pham {
502b8dc6b0eSVu Pham 	struct mr_find_contig_memsegs_data *data = arg;
503b8dc6b0eSVu Pham 
504b8dc6b0eSVu Pham 	if (data->addr < ms->addr_64 || data->addr >= ms->addr_64 + len)
505b8dc6b0eSVu Pham 		return 0;
506b8dc6b0eSVu Pham 	/* Found, save it and stop walking. */
507b8dc6b0eSVu Pham 	data->start = ms->addr_64;
508b8dc6b0eSVu Pham 	data->end = ms->addr_64 + len;
509b8dc6b0eSVu Pham 	data->msl = msl;
510b8dc6b0eSVu Pham 	return 1;
511b8dc6b0eSVu Pham }
512b8dc6b0eSVu Pham 
513b8dc6b0eSVu Pham /**
514b8dc6b0eSVu Pham  * Create a new global Memory Region (MR) for a missing virtual address.
515b8dc6b0eSVu Pham  * This API should be called on a secondary process, then a request is sent to
516b8dc6b0eSVu Pham  * the primary process in order to create a MR for the address. As the global MR
517b8dc6b0eSVu Pham  * list is on the shared memory, following LKey lookup should succeed unless the
518b8dc6b0eSVu Pham  * request fails.
519b8dc6b0eSVu Pham  *
520b8dc6b0eSVu Pham  * @param pd
521c4685016SOphir Munk  *   Pointer to pd of a device (net, regex, vdpa,...).
522b8dc6b0eSVu Pham  * @param share_cache
523b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
524b8dc6b0eSVu Pham  * @param[out] entry
525b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry, found in the global cache or newly
526b8dc6b0eSVu Pham  *   created. If failed to create one, this will not be updated.
527b8dc6b0eSVu Pham  * @param addr
528b8dc6b0eSVu Pham  *   Target virtual address to register.
529b8dc6b0eSVu Pham  * @param mr_ext_memseg_en
530b8dc6b0eSVu Pham  *   Configurable flag about external memory segment enable or not.
531b8dc6b0eSVu Pham  *
532b8dc6b0eSVu Pham  * @return
533b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
534b8dc6b0eSVu Pham  */
535b8dc6b0eSVu Pham static uint32_t
536c4685016SOphir Munk mlx5_mr_create_secondary(void *pd __rte_unused,
537b8dc6b0eSVu Pham 			 struct mlx5_mp_id *mp_id,
538b8dc6b0eSVu Pham 			 struct mlx5_mr_share_cache *share_cache,
539b8dc6b0eSVu Pham 			 struct mr_cache_entry *entry, uintptr_t addr,
540b8dc6b0eSVu Pham 			 unsigned int mr_ext_memseg_en __rte_unused)
541b8dc6b0eSVu Pham {
542b8dc6b0eSVu Pham 	int ret;
543b8dc6b0eSVu Pham 
544b8dc6b0eSVu Pham 	DEBUG("port %u requesting MR creation for address (%p)",
545b8dc6b0eSVu Pham 	      mp_id->port_id, (void *)addr);
546b8dc6b0eSVu Pham 	ret = mlx5_mp_req_mr_create(mp_id, addr);
547b8dc6b0eSVu Pham 	if (ret) {
548b8dc6b0eSVu Pham 		DEBUG("Fail to request MR creation for address (%p)",
549b8dc6b0eSVu Pham 		      (void *)addr);
550b8dc6b0eSVu Pham 		return UINT32_MAX;
551b8dc6b0eSVu Pham 	}
552b8dc6b0eSVu Pham 	rte_rwlock_read_lock(&share_cache->rwlock);
553b8dc6b0eSVu Pham 	/* Fill in output data. */
554b8dc6b0eSVu Pham 	mlx5_mr_lookup_cache(share_cache, entry, addr);
555b8dc6b0eSVu Pham 	/* Lookup can't fail. */
556b8dc6b0eSVu Pham 	MLX5_ASSERT(entry->lkey != UINT32_MAX);
557b8dc6b0eSVu Pham 	rte_rwlock_read_unlock(&share_cache->rwlock);
558b8dc6b0eSVu Pham 	DEBUG("MR CREATED by primary process for %p:\n"
559b8dc6b0eSVu Pham 	      "  [0x%" PRIxPTR ", 0x%" PRIxPTR "), lkey=0x%x",
560b8dc6b0eSVu Pham 	      (void *)addr, entry->start, entry->end, entry->lkey);
561b8dc6b0eSVu Pham 	return entry->lkey;
562b8dc6b0eSVu Pham }
563b8dc6b0eSVu Pham 
564b8dc6b0eSVu Pham /**
565b8dc6b0eSVu Pham  * Create a new global Memory Region (MR) for a missing virtual address.
566b8dc6b0eSVu Pham  * Register entire virtually contiguous memory chunk around the address.
567b8dc6b0eSVu Pham  *
568b8dc6b0eSVu Pham  * @param pd
569c4685016SOphir Munk  *   Pointer to pd of a device (net, regex, vdpa,...).
570b8dc6b0eSVu Pham  * @param share_cache
571b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
572b8dc6b0eSVu Pham  * @param[out] entry
573b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry, found in the global cache or newly
574b8dc6b0eSVu Pham  *   created. If failed to create one, this will not be updated.
575b8dc6b0eSVu Pham  * @param addr
576b8dc6b0eSVu Pham  *   Target virtual address to register.
577b8dc6b0eSVu Pham  * @param mr_ext_memseg_en
578b8dc6b0eSVu Pham  *   Configurable flag about external memory segment enable or not.
579b8dc6b0eSVu Pham  *
580b8dc6b0eSVu Pham  * @return
581b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
582b8dc6b0eSVu Pham  */
583b8dc6b0eSVu Pham uint32_t
584c4685016SOphir Munk mlx5_mr_create_primary(void *pd,
585b8dc6b0eSVu Pham 		       struct mlx5_mr_share_cache *share_cache,
586b8dc6b0eSVu Pham 		       struct mr_cache_entry *entry, uintptr_t addr,
587b8dc6b0eSVu Pham 		       unsigned int mr_ext_memseg_en)
588b8dc6b0eSVu Pham {
589b8dc6b0eSVu Pham 	struct mr_find_contig_memsegs_data data = {.addr = addr, };
590b8dc6b0eSVu Pham 	struct mr_find_contig_memsegs_data data_re;
591b8dc6b0eSVu Pham 	const struct rte_memseg_list *msl;
592b8dc6b0eSVu Pham 	const struct rte_memseg *ms;
593b8dc6b0eSVu Pham 	struct mlx5_mr *mr = NULL;
594b8dc6b0eSVu Pham 	int ms_idx_shift = -1;
595b8dc6b0eSVu Pham 	uint32_t bmp_size;
596b8dc6b0eSVu Pham 	void *bmp_mem;
597b8dc6b0eSVu Pham 	uint32_t ms_n;
598b8dc6b0eSVu Pham 	uint32_t n;
599b8dc6b0eSVu Pham 	size_t len;
600b8dc6b0eSVu Pham 
601b8dc6b0eSVu Pham 	DRV_LOG(DEBUG, "Creating a MR using address (%p)", (void *)addr);
602b8dc6b0eSVu Pham 	/*
603b8dc6b0eSVu Pham 	 * Release detached MRs if any. This can't be called with holding either
604b8dc6b0eSVu Pham 	 * memory_hotplug_lock or share_cache->rwlock. MRs on the free list have
605b8dc6b0eSVu Pham 	 * been detached by the memory free event but it couldn't be released
606b8dc6b0eSVu Pham 	 * inside the callback due to deadlock. As a result, releasing resources
607b8dc6b0eSVu Pham 	 * is quite opportunistic.
608b8dc6b0eSVu Pham 	 */
609b8dc6b0eSVu Pham 	mlx5_mr_garbage_collect(share_cache);
610b8dc6b0eSVu Pham 	/*
611b8dc6b0eSVu Pham 	 * If enabled, find out a contiguous virtual address chunk in use, to
612b8dc6b0eSVu Pham 	 * which the given address belongs, in order to register maximum range.
613b8dc6b0eSVu Pham 	 * In the best case where mempools are not dynamically recreated and
614b8dc6b0eSVu Pham 	 * '--socket-mem' is specified as an EAL option, it is very likely to
615b8dc6b0eSVu Pham 	 * have only one MR(LKey) per a socket and per a hugepage-size even
616b8dc6b0eSVu Pham 	 * though the system memory is highly fragmented. As the whole memory
617b8dc6b0eSVu Pham 	 * chunk will be pinned by kernel, it can't be reused unless entire
618b8dc6b0eSVu Pham 	 * chunk is freed from EAL.
619b8dc6b0eSVu Pham 	 *
620b8dc6b0eSVu Pham 	 * If disabled, just register one memseg (page). Then, memory
621b8dc6b0eSVu Pham 	 * consumption will be minimized but it may drop performance if there
622b8dc6b0eSVu Pham 	 * are many MRs to lookup on the datapath.
623b8dc6b0eSVu Pham 	 */
624b8dc6b0eSVu Pham 	if (!mr_ext_memseg_en) {
625b8dc6b0eSVu Pham 		data.msl = rte_mem_virt2memseg_list((void *)addr);
626b8dc6b0eSVu Pham 		data.start = RTE_ALIGN_FLOOR(addr, data.msl->page_sz);
627b8dc6b0eSVu Pham 		data.end = data.start + data.msl->page_sz;
628b8dc6b0eSVu Pham 	} else if (!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data)) {
629b8dc6b0eSVu Pham 		DRV_LOG(WARNING,
630b8dc6b0eSVu Pham 			"Unable to find virtually contiguous"
631b8dc6b0eSVu Pham 			" chunk for address (%p)."
632b8dc6b0eSVu Pham 			" rte_memseg_contig_walk() failed.", (void *)addr);
633b8dc6b0eSVu Pham 		rte_errno = ENXIO;
634b8dc6b0eSVu Pham 		goto err_nolock;
635b8dc6b0eSVu Pham 	}
636b8dc6b0eSVu Pham alloc_resources:
637b8dc6b0eSVu Pham 	/* Addresses must be page-aligned. */
638b8dc6b0eSVu Pham 	MLX5_ASSERT(data.msl);
639b8dc6b0eSVu Pham 	MLX5_ASSERT(rte_is_aligned((void *)data.start, data.msl->page_sz));
640b8dc6b0eSVu Pham 	MLX5_ASSERT(rte_is_aligned((void *)data.end, data.msl->page_sz));
641b8dc6b0eSVu Pham 	msl = data.msl;
642b8dc6b0eSVu Pham 	ms = rte_mem_virt2memseg((void *)data.start, msl);
643b8dc6b0eSVu Pham 	len = data.end - data.start;
644b8dc6b0eSVu Pham 	MLX5_ASSERT(ms);
645b8dc6b0eSVu Pham 	MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
646b8dc6b0eSVu Pham 	/* Number of memsegs in the range. */
647b8dc6b0eSVu Pham 	ms_n = len / msl->page_sz;
648b8dc6b0eSVu Pham 	DEBUG("Extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
649b8dc6b0eSVu Pham 	      " page_sz=0x%" PRIx64 ", ms_n=%u",
650b8dc6b0eSVu Pham 	      (void *)addr, data.start, data.end, msl->page_sz, ms_n);
651b8dc6b0eSVu Pham 	/* Size of memory for bitmap. */
652b8dc6b0eSVu Pham 	bmp_size = rte_bitmap_get_memory_footprint(ms_n);
653b8dc6b0eSVu Pham 	mr = rte_zmalloc_socket(NULL,
654b8dc6b0eSVu Pham 				RTE_ALIGN_CEIL(sizeof(*mr),
655b8dc6b0eSVu Pham 					       RTE_CACHE_LINE_SIZE) +
656b8dc6b0eSVu Pham 				bmp_size,
657b8dc6b0eSVu Pham 				RTE_CACHE_LINE_SIZE, msl->socket_id);
658b8dc6b0eSVu Pham 	if (mr == NULL) {
659b8dc6b0eSVu Pham 		DEBUG("Unable to allocate memory for a new MR of"
660b8dc6b0eSVu Pham 		      " address (%p).", (void *)addr);
661b8dc6b0eSVu Pham 		rte_errno = ENOMEM;
662b8dc6b0eSVu Pham 		goto err_nolock;
663b8dc6b0eSVu Pham 	}
664b8dc6b0eSVu Pham 	mr->msl = msl;
665b8dc6b0eSVu Pham 	/*
666b8dc6b0eSVu Pham 	 * Save the index of the first memseg and initialize memseg bitmap. To
667b8dc6b0eSVu Pham 	 * see if a memseg of ms_idx in the memseg-list is still valid, check:
668b8dc6b0eSVu Pham 	 *	rte_bitmap_get(mr->bmp, ms_idx - mr->ms_base_idx)
669b8dc6b0eSVu Pham 	 */
670b8dc6b0eSVu Pham 	mr->ms_base_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
671b8dc6b0eSVu Pham 	bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE);
672b8dc6b0eSVu Pham 	mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size);
673b8dc6b0eSVu Pham 	if (mr->ms_bmp == NULL) {
674b8dc6b0eSVu Pham 		DEBUG("Unable to initialize bitmap for a new MR of"
675b8dc6b0eSVu Pham 		      " address (%p).", (void *)addr);
676b8dc6b0eSVu Pham 		rte_errno = EINVAL;
677b8dc6b0eSVu Pham 		goto err_nolock;
678b8dc6b0eSVu Pham 	}
679b8dc6b0eSVu Pham 	/*
680b8dc6b0eSVu Pham 	 * Should recheck whether the extended contiguous chunk is still valid.
681b8dc6b0eSVu Pham 	 * Because memory_hotplug_lock can't be held if there's any memory
682b8dc6b0eSVu Pham 	 * related calls in a critical path, resource allocation above can't be
683b8dc6b0eSVu Pham 	 * locked. If the memory has been changed at this point, try again with
684b8dc6b0eSVu Pham 	 * just single page. If not, go on with the big chunk atomically from
685b8dc6b0eSVu Pham 	 * here.
686b8dc6b0eSVu Pham 	 */
687b8dc6b0eSVu Pham 	rte_mcfg_mem_read_lock();
688b8dc6b0eSVu Pham 	data_re = data;
689b8dc6b0eSVu Pham 	if (len > msl->page_sz &&
690b8dc6b0eSVu Pham 	    !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
691b8dc6b0eSVu Pham 		DEBUG("Unable to find virtually contiguous"
692b8dc6b0eSVu Pham 		      " chunk for address (%p)."
693b8dc6b0eSVu Pham 		      " rte_memseg_contig_walk() failed.", (void *)addr);
694b8dc6b0eSVu Pham 		rte_errno = ENXIO;
695b8dc6b0eSVu Pham 		goto err_memlock;
696b8dc6b0eSVu Pham 	}
697b8dc6b0eSVu Pham 	if (data.start != data_re.start || data.end != data_re.end) {
698b8dc6b0eSVu Pham 		/*
699b8dc6b0eSVu Pham 		 * The extended contiguous chunk has been changed. Try again
700b8dc6b0eSVu Pham 		 * with single memseg instead.
701b8dc6b0eSVu Pham 		 */
702b8dc6b0eSVu Pham 		data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
703b8dc6b0eSVu Pham 		data.end = data.start + msl->page_sz;
704b8dc6b0eSVu Pham 		rte_mcfg_mem_read_unlock();
705*d5ed8aa9SOphir Munk 		mr_free(mr, share_cache->dereg_mr_cb);
706b8dc6b0eSVu Pham 		goto alloc_resources;
707b8dc6b0eSVu Pham 	}
708b8dc6b0eSVu Pham 	MLX5_ASSERT(data.msl == data_re.msl);
709b8dc6b0eSVu Pham 	rte_rwlock_write_lock(&share_cache->rwlock);
710b8dc6b0eSVu Pham 	/*
711b8dc6b0eSVu Pham 	 * Check the address is really missing. If other thread already created
712b8dc6b0eSVu Pham 	 * one or it is not found due to overflow, abort and return.
713b8dc6b0eSVu Pham 	 */
714b8dc6b0eSVu Pham 	if (mlx5_mr_lookup_cache(share_cache, entry, addr) != UINT32_MAX) {
715b8dc6b0eSVu Pham 		/*
716b8dc6b0eSVu Pham 		 * Insert to the global cache table. It may fail due to
717b8dc6b0eSVu Pham 		 * low-on-memory. Then, this entry will have to be searched
718b8dc6b0eSVu Pham 		 * here again.
719b8dc6b0eSVu Pham 		 */
720b8dc6b0eSVu Pham 		mr_btree_insert(&share_cache->cache, entry);
721b8dc6b0eSVu Pham 		DEBUG("Found MR for %p on final lookup, abort", (void *)addr);
722b8dc6b0eSVu Pham 		rte_rwlock_write_unlock(&share_cache->rwlock);
723b8dc6b0eSVu Pham 		rte_mcfg_mem_read_unlock();
724b8dc6b0eSVu Pham 		/*
725b8dc6b0eSVu Pham 		 * Must be unlocked before calling rte_free() because
726b8dc6b0eSVu Pham 		 * mlx5_mr_mem_event_free_cb() can be called inside.
727b8dc6b0eSVu Pham 		 */
728*d5ed8aa9SOphir Munk 		mr_free(mr, share_cache->dereg_mr_cb);
729b8dc6b0eSVu Pham 		return entry->lkey;
730b8dc6b0eSVu Pham 	}
731b8dc6b0eSVu Pham 	/*
732b8dc6b0eSVu Pham 	 * Trim start and end addresses for verbs MR. Set bits for registering
733b8dc6b0eSVu Pham 	 * memsegs but exclude already registered ones. Bitmap can be
734b8dc6b0eSVu Pham 	 * fragmented.
735b8dc6b0eSVu Pham 	 */
736b8dc6b0eSVu Pham 	for (n = 0; n < ms_n; ++n) {
737b8dc6b0eSVu Pham 		uintptr_t start;
738b8dc6b0eSVu Pham 		struct mr_cache_entry ret;
739b8dc6b0eSVu Pham 
740b8dc6b0eSVu Pham 		memset(&ret, 0, sizeof(ret));
741b8dc6b0eSVu Pham 		start = data_re.start + n * msl->page_sz;
742b8dc6b0eSVu Pham 		/* Exclude memsegs already registered by other MRs. */
743b8dc6b0eSVu Pham 		if (mlx5_mr_lookup_cache(share_cache, &ret, start) ==
744b8dc6b0eSVu Pham 		    UINT32_MAX) {
745b8dc6b0eSVu Pham 			/*
746b8dc6b0eSVu Pham 			 * Start from the first unregistered memseg in the
747b8dc6b0eSVu Pham 			 * extended range.
748b8dc6b0eSVu Pham 			 */
749b8dc6b0eSVu Pham 			if (ms_idx_shift == -1) {
750b8dc6b0eSVu Pham 				mr->ms_base_idx += n;
751b8dc6b0eSVu Pham 				data.start = start;
752b8dc6b0eSVu Pham 				ms_idx_shift = n;
753b8dc6b0eSVu Pham 			}
754b8dc6b0eSVu Pham 			data.end = start + msl->page_sz;
755b8dc6b0eSVu Pham 			rte_bitmap_set(mr->ms_bmp, n - ms_idx_shift);
756b8dc6b0eSVu Pham 			++mr->ms_n;
757b8dc6b0eSVu Pham 		}
758b8dc6b0eSVu Pham 	}
759b8dc6b0eSVu Pham 	len = data.end - data.start;
760b8dc6b0eSVu Pham 	mr->ms_bmp_n = len / msl->page_sz;
761b8dc6b0eSVu Pham 	MLX5_ASSERT(ms_idx_shift + mr->ms_bmp_n <= ms_n);
762b8dc6b0eSVu Pham 	/*
763*d5ed8aa9SOphir Munk 	 * Finally create an MR for the memory chunk. Verbs: ibv_reg_mr() can
764*d5ed8aa9SOphir Munk 	 * be called with holding the memory lock because it doesn't use
765b8dc6b0eSVu Pham 	 * mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket()
766b8dc6b0eSVu Pham 	 * through mlx5_alloc_verbs_buf().
767b8dc6b0eSVu Pham 	 */
768*d5ed8aa9SOphir Munk 	share_cache->reg_mr_cb(pd, (void *)data.start, len, &mr->pmd_mr);
76958a17853SOphir Munk 	if (mr->pmd_mr.obj == NULL) {
77056d20677SOphir Munk 		DEBUG("Fail to create an MR for address (%p)",
771b8dc6b0eSVu Pham 		      (void *)addr);
772b8dc6b0eSVu Pham 		rte_errno = EINVAL;
773b8dc6b0eSVu Pham 		goto err_mrlock;
774b8dc6b0eSVu Pham 	}
77556d20677SOphir Munk 	MLX5_ASSERT((uintptr_t)mr->pmd_mr.addr == data.start);
77656d20677SOphir Munk 	MLX5_ASSERT(mr->pmd_mr.len);
777b8dc6b0eSVu Pham 	LIST_INSERT_HEAD(&share_cache->mr_list, mr, mr);
778b8dc6b0eSVu Pham 	DEBUG("MR CREATED (%p) for %p:\n"
779b8dc6b0eSVu Pham 	      "  [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
780b8dc6b0eSVu Pham 	      " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
781b8dc6b0eSVu Pham 	      (void *)mr, (void *)addr, data.start, data.end,
78256d20677SOphir Munk 	      rte_cpu_to_be_32(mr->pmd_mr.lkey),
783b8dc6b0eSVu Pham 	      mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
784b8dc6b0eSVu Pham 	/* Insert to the global cache table. */
785b8dc6b0eSVu Pham 	mlx5_mr_insert_cache(share_cache, mr);
786b8dc6b0eSVu Pham 	/* Fill in output data. */
787b8dc6b0eSVu Pham 	mlx5_mr_lookup_cache(share_cache, entry, addr);
788b8dc6b0eSVu Pham 	/* Lookup can't fail. */
789b8dc6b0eSVu Pham 	MLX5_ASSERT(entry->lkey != UINT32_MAX);
790b8dc6b0eSVu Pham 	rte_rwlock_write_unlock(&share_cache->rwlock);
791b8dc6b0eSVu Pham 	rte_mcfg_mem_read_unlock();
792b8dc6b0eSVu Pham 	return entry->lkey;
793b8dc6b0eSVu Pham err_mrlock:
794b8dc6b0eSVu Pham 	rte_rwlock_write_unlock(&share_cache->rwlock);
795b8dc6b0eSVu Pham err_memlock:
796b8dc6b0eSVu Pham 	rte_mcfg_mem_read_unlock();
797b8dc6b0eSVu Pham err_nolock:
798b8dc6b0eSVu Pham 	/*
799b8dc6b0eSVu Pham 	 * In case of error, as this can be called in a datapath, a warning
800b8dc6b0eSVu Pham 	 * message per an error is preferable instead. Must be unlocked before
801b8dc6b0eSVu Pham 	 * calling rte_free() because mlx5_mr_mem_event_free_cb() can be called
802b8dc6b0eSVu Pham 	 * inside.
803b8dc6b0eSVu Pham 	 */
804*d5ed8aa9SOphir Munk 	mr_free(mr, share_cache->dereg_mr_cb);
805b8dc6b0eSVu Pham 	return UINT32_MAX;
806b8dc6b0eSVu Pham }
807b8dc6b0eSVu Pham 
808b8dc6b0eSVu Pham /**
809b8dc6b0eSVu Pham  * Create a new global Memory Region (MR) for a missing virtual address.
810b8dc6b0eSVu Pham  * This can be called from primary and secondary process.
811b8dc6b0eSVu Pham  *
812b8dc6b0eSVu Pham  * @param pd
813c4685016SOphir Munk  *   Pointer to pd handle of a device (net, regex, vdpa,...).
814b8dc6b0eSVu Pham  * @param share_cache
815b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
816b8dc6b0eSVu Pham  * @param[out] entry
817b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry, found in the global cache or newly
818b8dc6b0eSVu Pham  *   created. If failed to create one, this will not be updated.
819b8dc6b0eSVu Pham  * @param addr
820b8dc6b0eSVu Pham  *   Target virtual address to register.
821b8dc6b0eSVu Pham  *
822b8dc6b0eSVu Pham  * @return
823b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
824b8dc6b0eSVu Pham  */
825b8dc6b0eSVu Pham static uint32_t
826c4685016SOphir Munk mlx5_mr_create(void *pd, struct mlx5_mp_id *mp_id,
827b8dc6b0eSVu Pham 	       struct mlx5_mr_share_cache *share_cache,
828b8dc6b0eSVu Pham 	       struct mr_cache_entry *entry, uintptr_t addr,
829b8dc6b0eSVu Pham 	       unsigned int mr_ext_memseg_en)
830b8dc6b0eSVu Pham {
831b8dc6b0eSVu Pham 	uint32_t ret = 0;
832b8dc6b0eSVu Pham 
833b8dc6b0eSVu Pham 	switch (rte_eal_process_type()) {
834b8dc6b0eSVu Pham 	case RTE_PROC_PRIMARY:
835b8dc6b0eSVu Pham 		ret = mlx5_mr_create_primary(pd, share_cache, entry,
836b8dc6b0eSVu Pham 					     addr, mr_ext_memseg_en);
837b8dc6b0eSVu Pham 		break;
838b8dc6b0eSVu Pham 	case RTE_PROC_SECONDARY:
839b8dc6b0eSVu Pham 		ret = mlx5_mr_create_secondary(pd, mp_id, share_cache, entry,
840b8dc6b0eSVu Pham 					       addr, mr_ext_memseg_en);
841b8dc6b0eSVu Pham 		break;
842b8dc6b0eSVu Pham 	default:
843b8dc6b0eSVu Pham 		break;
844b8dc6b0eSVu Pham 	}
845b8dc6b0eSVu Pham 	return ret;
846b8dc6b0eSVu Pham }
847b8dc6b0eSVu Pham 
848b8dc6b0eSVu Pham /**
849b8dc6b0eSVu Pham  * Look up address in the global MR cache table. If not found, create a new MR.
850b8dc6b0eSVu Pham  * Insert the found/created entry to local bottom-half cache table.
851b8dc6b0eSVu Pham  *
852b8dc6b0eSVu Pham  * @param pd
853c4685016SOphir Munk  *   Pointer to pd of a device (net, regex, vdpa,...).
854b8dc6b0eSVu Pham  * @param share_cache
855b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
856b8dc6b0eSVu Pham  * @param mr_ctrl
857b8dc6b0eSVu Pham  *   Pointer to per-queue MR control structure.
858b8dc6b0eSVu Pham  * @param[out] entry
859b8dc6b0eSVu Pham  *   Pointer to returning MR cache entry, found in the global cache or newly
860b8dc6b0eSVu Pham  *   created. If failed to create one, this is not written.
861b8dc6b0eSVu Pham  * @param addr
862b8dc6b0eSVu Pham  *   Search key.
863b8dc6b0eSVu Pham  *
864b8dc6b0eSVu Pham  * @return
865b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on no match.
866b8dc6b0eSVu Pham  */
867b8dc6b0eSVu Pham static uint32_t
868c4685016SOphir Munk mr_lookup_caches(void *pd, struct mlx5_mp_id *mp_id,
869b8dc6b0eSVu Pham 		 struct mlx5_mr_share_cache *share_cache,
870b8dc6b0eSVu Pham 		 struct mlx5_mr_ctrl *mr_ctrl,
871b8dc6b0eSVu Pham 		 struct mr_cache_entry *entry, uintptr_t addr,
872b8dc6b0eSVu Pham 		 unsigned int mr_ext_memseg_en)
873b8dc6b0eSVu Pham {
874b8dc6b0eSVu Pham 	struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
875b8dc6b0eSVu Pham 	uint32_t lkey;
876b8dc6b0eSVu Pham 	uint16_t idx;
877b8dc6b0eSVu Pham 
878b8dc6b0eSVu Pham 	/* If local cache table is full, try to double it. */
879b8dc6b0eSVu Pham 	if (unlikely(bt->len == bt->size))
880b8dc6b0eSVu Pham 		mr_btree_expand(bt, bt->size << 1);
881b8dc6b0eSVu Pham 	/* Look up in the global cache. */
882b8dc6b0eSVu Pham 	rte_rwlock_read_lock(&share_cache->rwlock);
883b8dc6b0eSVu Pham 	lkey = mr_btree_lookup(&share_cache->cache, &idx, addr);
884b8dc6b0eSVu Pham 	if (lkey != UINT32_MAX) {
885b8dc6b0eSVu Pham 		/* Found. */
886b8dc6b0eSVu Pham 		*entry = (*share_cache->cache.table)[idx];
887b8dc6b0eSVu Pham 		rte_rwlock_read_unlock(&share_cache->rwlock);
888b8dc6b0eSVu Pham 		/*
889b8dc6b0eSVu Pham 		 * Update local cache. Even if it fails, return the found entry
890b8dc6b0eSVu Pham 		 * to update top-half cache. Next time, this entry will be found
891b8dc6b0eSVu Pham 		 * in the global cache.
892b8dc6b0eSVu Pham 		 */
893b8dc6b0eSVu Pham 		mr_btree_insert(bt, entry);
894b8dc6b0eSVu Pham 		return lkey;
895b8dc6b0eSVu Pham 	}
896b8dc6b0eSVu Pham 	rte_rwlock_read_unlock(&share_cache->rwlock);
897b8dc6b0eSVu Pham 	/* First time to see the address? Create a new MR. */
898b8dc6b0eSVu Pham 	lkey = mlx5_mr_create(pd, mp_id, share_cache, entry, addr,
899b8dc6b0eSVu Pham 			      mr_ext_memseg_en);
900b8dc6b0eSVu Pham 	/*
901b8dc6b0eSVu Pham 	 * Update the local cache if successfully created a new global MR. Even
902b8dc6b0eSVu Pham 	 * if failed to create one, there's no action to take in this datapath
903b8dc6b0eSVu Pham 	 * code. As returning LKey is invalid, this will eventually make HW
904b8dc6b0eSVu Pham 	 * fail.
905b8dc6b0eSVu Pham 	 */
906b8dc6b0eSVu Pham 	if (lkey != UINT32_MAX)
907b8dc6b0eSVu Pham 		mr_btree_insert(bt, entry);
908b8dc6b0eSVu Pham 	return lkey;
909b8dc6b0eSVu Pham }
910b8dc6b0eSVu Pham 
911b8dc6b0eSVu Pham /**
912b8dc6b0eSVu Pham  * Bottom-half of LKey search on datapath. First search in cache_bh[] and if
913b8dc6b0eSVu Pham  * misses, search in the global MR cache table and update the new entry to
914b8dc6b0eSVu Pham  * per-queue local caches.
915b8dc6b0eSVu Pham  *
916b8dc6b0eSVu Pham  * @param pd
917c4685016SOphir Munk  *   Pointer to pd of a device (net, regex, vdpa,...).
918b8dc6b0eSVu Pham  * @param share_cache
919b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
920b8dc6b0eSVu Pham  * @param mr_ctrl
921b8dc6b0eSVu Pham  *   Pointer to per-queue MR control structure.
922b8dc6b0eSVu Pham  * @param addr
923b8dc6b0eSVu Pham  *   Search key.
924b8dc6b0eSVu Pham  *
925b8dc6b0eSVu Pham  * @return
926b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on no match.
927b8dc6b0eSVu Pham  */
928c4685016SOphir Munk uint32_t mlx5_mr_addr2mr_bh(void *pd, struct mlx5_mp_id *mp_id,
929b8dc6b0eSVu Pham 			    struct mlx5_mr_share_cache *share_cache,
930b8dc6b0eSVu Pham 			    struct mlx5_mr_ctrl *mr_ctrl,
931b8dc6b0eSVu Pham 			    uintptr_t addr, unsigned int mr_ext_memseg_en)
932b8dc6b0eSVu Pham {
933b8dc6b0eSVu Pham 	uint32_t lkey;
934b8dc6b0eSVu Pham 	uint16_t bh_idx = 0;
935b8dc6b0eSVu Pham 	/* Victim in top-half cache to replace with new entry. */
936b8dc6b0eSVu Pham 	struct mr_cache_entry *repl = &mr_ctrl->cache[mr_ctrl->head];
937b8dc6b0eSVu Pham 
938b8dc6b0eSVu Pham 	/* Binary-search MR translation table. */
939b8dc6b0eSVu Pham 	lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);
940b8dc6b0eSVu Pham 	/* Update top-half cache. */
941b8dc6b0eSVu Pham 	if (likely(lkey != UINT32_MAX)) {
942b8dc6b0eSVu Pham 		*repl = (*mr_ctrl->cache_bh.table)[bh_idx];
943b8dc6b0eSVu Pham 	} else {
944b8dc6b0eSVu Pham 		/*
945b8dc6b0eSVu Pham 		 * If missed in local lookup table, search in the global cache
946b8dc6b0eSVu Pham 		 * and local cache_bh[] will be updated inside if possible.
947b8dc6b0eSVu Pham 		 * Top-half cache entry will also be updated.
948b8dc6b0eSVu Pham 		 */
949b8dc6b0eSVu Pham 		lkey = mr_lookup_caches(pd, mp_id, share_cache, mr_ctrl,
950b8dc6b0eSVu Pham 					repl, addr, mr_ext_memseg_en);
951b8dc6b0eSVu Pham 		if (unlikely(lkey == UINT32_MAX))
952b8dc6b0eSVu Pham 			return UINT32_MAX;
953b8dc6b0eSVu Pham 	}
954b8dc6b0eSVu Pham 	/* Update the most recently used entry. */
955b8dc6b0eSVu Pham 	mr_ctrl->mru = mr_ctrl->head;
956b8dc6b0eSVu Pham 	/* Point to the next victim, the oldest. */
957b8dc6b0eSVu Pham 	mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N;
958b8dc6b0eSVu Pham 	return lkey;
959b8dc6b0eSVu Pham }
960b8dc6b0eSVu Pham 
961b8dc6b0eSVu Pham /**
962b8dc6b0eSVu Pham  * Release all the created MRs and resources on global MR cache of a device.
963b8dc6b0eSVu Pham  * list.
964b8dc6b0eSVu Pham  *
965b8dc6b0eSVu Pham  * @param share_cache
966b8dc6b0eSVu Pham  *   Pointer to a global shared MR cache.
967b8dc6b0eSVu Pham  */
968b8dc6b0eSVu Pham void
969b8dc6b0eSVu Pham mlx5_mr_release_cache(struct mlx5_mr_share_cache *share_cache)
970b8dc6b0eSVu Pham {
971b8dc6b0eSVu Pham 	struct mlx5_mr *mr_next;
972b8dc6b0eSVu Pham 
973b8dc6b0eSVu Pham 	rte_rwlock_write_lock(&share_cache->rwlock);
974b8dc6b0eSVu Pham 	/* Detach from MR list and move to free list. */
975b8dc6b0eSVu Pham 	mr_next = LIST_FIRST(&share_cache->mr_list);
976b8dc6b0eSVu Pham 	while (mr_next != NULL) {
977b8dc6b0eSVu Pham 		struct mlx5_mr *mr = mr_next;
978b8dc6b0eSVu Pham 
979b8dc6b0eSVu Pham 		mr_next = LIST_NEXT(mr, mr);
980b8dc6b0eSVu Pham 		LIST_REMOVE(mr, mr);
981b8dc6b0eSVu Pham 		LIST_INSERT_HEAD(&share_cache->mr_free_list, mr, mr);
982b8dc6b0eSVu Pham 	}
983b8dc6b0eSVu Pham 	LIST_INIT(&share_cache->mr_list);
984b8dc6b0eSVu Pham 	/* Free global cache. */
985b8dc6b0eSVu Pham 	mlx5_mr_btree_free(&share_cache->cache);
986b8dc6b0eSVu Pham 	rte_rwlock_write_unlock(&share_cache->rwlock);
987b8dc6b0eSVu Pham 	/* Free all remaining MRs. */
988b8dc6b0eSVu Pham 	mlx5_mr_garbage_collect(share_cache);
989b8dc6b0eSVu Pham }
990b8dc6b0eSVu Pham 
991b8dc6b0eSVu Pham /**
992b8dc6b0eSVu Pham  * Flush all of the local cache entries.
993b8dc6b0eSVu Pham  *
994b8dc6b0eSVu Pham  * @param mr_ctrl
995b8dc6b0eSVu Pham  *   Pointer to per-queue MR local cache.
996b8dc6b0eSVu Pham  */
997b8dc6b0eSVu Pham void
998b8dc6b0eSVu Pham mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl)
999b8dc6b0eSVu Pham {
1000b8dc6b0eSVu Pham 	/* Reset the most-recently-used index. */
1001b8dc6b0eSVu Pham 	mr_ctrl->mru = 0;
1002b8dc6b0eSVu Pham 	/* Reset the linear search array. */
1003b8dc6b0eSVu Pham 	mr_ctrl->head = 0;
1004b8dc6b0eSVu Pham 	memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache));
1005b8dc6b0eSVu Pham 	/* Reset the B-tree table. */
1006b8dc6b0eSVu Pham 	mr_ctrl->cache_bh.len = 1;
1007b8dc6b0eSVu Pham 	mr_ctrl->cache_bh.overflow = 0;
1008b8dc6b0eSVu Pham 	/* Update the generation number. */
1009b8dc6b0eSVu Pham 	mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr;
1010b8dc6b0eSVu Pham 	DRV_LOG(DEBUG, "mr_ctrl(%p): flushed, cur_gen=%d",
1011b8dc6b0eSVu Pham 		(void *)mr_ctrl, mr_ctrl->cur_gen);
1012b8dc6b0eSVu Pham }
1013b8dc6b0eSVu Pham 
1014b8dc6b0eSVu Pham /**
1015b8dc6b0eSVu Pham  * Creates a memory region for external memory, that is memory which is not
1016b8dc6b0eSVu Pham  * part of the DPDK memory segments.
1017b8dc6b0eSVu Pham  *
1018b8dc6b0eSVu Pham  * @param pd
1019c4685016SOphir Munk  *   Pointer to pd of a device (net, regex, vdpa,...).
1020b8dc6b0eSVu Pham  * @param addr
1021b8dc6b0eSVu Pham  *   Starting virtual address of memory.
1022b8dc6b0eSVu Pham  * @param len
1023b8dc6b0eSVu Pham  *   Length of memory segment being mapped.
1024b8dc6b0eSVu Pham  * @param socked_id
1025b8dc6b0eSVu Pham  *   Socket to allocate heap memory for the control structures.
1026b8dc6b0eSVu Pham  *
1027b8dc6b0eSVu Pham  * @return
1028b8dc6b0eSVu Pham  *   Pointer to MR structure on success, NULL otherwise.
1029b8dc6b0eSVu Pham  */
1030b8dc6b0eSVu Pham struct mlx5_mr *
1031*d5ed8aa9SOphir Munk mlx5_create_mr_ext(void *pd, uintptr_t addr, size_t len, int socket_id,
1032*d5ed8aa9SOphir Munk 		   mlx5_reg_mr_t reg_mr_cb)
1033b8dc6b0eSVu Pham {
1034b8dc6b0eSVu Pham 	struct mlx5_mr *mr = NULL;
1035b8dc6b0eSVu Pham 
1036b8dc6b0eSVu Pham 	mr = rte_zmalloc_socket(NULL,
1037b8dc6b0eSVu Pham 				RTE_ALIGN_CEIL(sizeof(*mr),
1038b8dc6b0eSVu Pham 					       RTE_CACHE_LINE_SIZE),
1039b8dc6b0eSVu Pham 				RTE_CACHE_LINE_SIZE, socket_id);
1040b8dc6b0eSVu Pham 	if (mr == NULL)
1041b8dc6b0eSVu Pham 		return NULL;
1042*d5ed8aa9SOphir Munk 	reg_mr_cb(pd, (void *)addr, len, &mr->pmd_mr);
104358a17853SOphir Munk 	if (mr->pmd_mr.obj == NULL) {
1044b8dc6b0eSVu Pham 		DRV_LOG(WARNING,
104556d20677SOphir Munk 			"Fail to create MR for address (%p)",
1046b8dc6b0eSVu Pham 			(void *)addr);
1047b8dc6b0eSVu Pham 		rte_free(mr);
1048b8dc6b0eSVu Pham 		return NULL;
1049b8dc6b0eSVu Pham 	}
1050b8dc6b0eSVu Pham 	mr->msl = NULL; /* Mark it is external memory. */
1051b8dc6b0eSVu Pham 	mr->ms_bmp = NULL;
1052b8dc6b0eSVu Pham 	mr->ms_n = 1;
1053b8dc6b0eSVu Pham 	mr->ms_bmp_n = 1;
1054b8dc6b0eSVu Pham 	DRV_LOG(DEBUG,
1055b8dc6b0eSVu Pham 		"MR CREATED (%p) for external memory %p:\n"
1056b8dc6b0eSVu Pham 		"  [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
1057b8dc6b0eSVu Pham 		" lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
1058b8dc6b0eSVu Pham 		(void *)mr, (void *)addr,
105956d20677SOphir Munk 		addr, addr + len, rte_cpu_to_be_32(mr->pmd_mr.lkey),
1060b8dc6b0eSVu Pham 		mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
1061b8dc6b0eSVu Pham 	return mr;
1062b8dc6b0eSVu Pham }
1063b8dc6b0eSVu Pham 
1064b8dc6b0eSVu Pham /**
1065b8dc6b0eSVu Pham  * Dump all the created MRs and the global cache entries.
1066b8dc6b0eSVu Pham  *
1067b8dc6b0eSVu Pham  * @param sh
1068b8dc6b0eSVu Pham  *   Pointer to Ethernet device shared context.
1069b8dc6b0eSVu Pham  */
1070b8dc6b0eSVu Pham void
1071b8dc6b0eSVu Pham mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused)
1072b8dc6b0eSVu Pham {
1073b8dc6b0eSVu Pham #ifdef RTE_LIBRTE_MLX5_DEBUG
1074b8dc6b0eSVu Pham 	struct mlx5_mr *mr;
1075b8dc6b0eSVu Pham 	int mr_n = 0;
1076b8dc6b0eSVu Pham 	int chunk_n = 0;
1077b8dc6b0eSVu Pham 
1078b8dc6b0eSVu Pham 	rte_rwlock_read_lock(&share_cache->rwlock);
1079b8dc6b0eSVu Pham 	/* Iterate all the existing MRs. */
1080b8dc6b0eSVu Pham 	LIST_FOREACH(mr, &share_cache->mr_list, mr) {
1081b8dc6b0eSVu Pham 		unsigned int n;
1082b8dc6b0eSVu Pham 
1083b8dc6b0eSVu Pham 		DEBUG("MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u",
108456d20677SOphir Munk 		      mr_n++, rte_cpu_to_be_32(mr->pmd_mr.lkey),
1085b8dc6b0eSVu Pham 		      mr->ms_n, mr->ms_bmp_n);
1086b8dc6b0eSVu Pham 		if (mr->ms_n == 0)
1087b8dc6b0eSVu Pham 			continue;
1088b8dc6b0eSVu Pham 		for (n = 0; n < mr->ms_bmp_n; ) {
1089b8dc6b0eSVu Pham 			struct mr_cache_entry ret = { 0, };
1090b8dc6b0eSVu Pham 
1091b8dc6b0eSVu Pham 			n = mr_find_next_chunk(mr, &ret, n);
1092b8dc6b0eSVu Pham 			if (!ret.end)
1093b8dc6b0eSVu Pham 				break;
1094b8dc6b0eSVu Pham 			DEBUG("  chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")",
1095b8dc6b0eSVu Pham 			      chunk_n++, ret.start, ret.end);
1096b8dc6b0eSVu Pham 		}
1097b8dc6b0eSVu Pham 	}
1098b8dc6b0eSVu Pham 	DEBUG("Dumping global cache %p", (void *)share_cache);
1099b8dc6b0eSVu Pham 	mlx5_mr_btree_dump(&share_cache->cache);
1100b8dc6b0eSVu Pham 	rte_rwlock_read_unlock(&share_cache->rwlock);
1101b8dc6b0eSVu Pham #endif
1102b8dc6b0eSVu Pham }
1103