1b8dc6b0eSVu Pham /* SPDX-License-Identifier: BSD-3-Clause
2b8dc6b0eSVu Pham * Copyright 2016 6WIND S.A.
3b8dc6b0eSVu Pham * Copyright 2020 Mellanox Technologies, Ltd
4b8dc6b0eSVu Pham */
5690b2a88SDmitry Kozlyuk #include <stddef.h>
6690b2a88SDmitry Kozlyuk
7b8dc6b0eSVu Pham #include <rte_eal_memconfig.h>
8690b2a88SDmitry Kozlyuk #include <rte_eal_paging.h>
9b8dc6b0eSVu Pham #include <rte_errno.h>
10b8dc6b0eSVu Pham #include <rte_mempool.h>
11b8dc6b0eSVu Pham #include <rte_malloc.h>
12b8dc6b0eSVu Pham #include <rte_rwlock.h>
13b8dc6b0eSVu Pham
14b8dc6b0eSVu Pham #include "mlx5_glue.h"
15fc59a1ecSMichael Baum #include "mlx5_common.h"
16b8dc6b0eSVu Pham #include "mlx5_common_mp.h"
17b8dc6b0eSVu Pham #include "mlx5_common_mr.h"
18fc59a1ecSMichael Baum #include "mlx5_common_os.h"
1925245d5dSShiri Kuzin #include "mlx5_common_log.h"
20fd970a54SSuanming Mou #include "mlx5_malloc.h"
21b8dc6b0eSVu Pham
22b8dc6b0eSVu Pham struct mr_find_contig_memsegs_data {
23b8dc6b0eSVu Pham uintptr_t addr;
24b8dc6b0eSVu Pham uintptr_t start;
25b8dc6b0eSVu Pham uintptr_t end;
26b8dc6b0eSVu Pham const struct rte_memseg_list *msl;
27b8dc6b0eSVu Pham };
28b8dc6b0eSVu Pham
29690b2a88SDmitry Kozlyuk /* Virtual memory range. */
30690b2a88SDmitry Kozlyuk struct mlx5_range {
31690b2a88SDmitry Kozlyuk uintptr_t start;
32690b2a88SDmitry Kozlyuk uintptr_t end;
33690b2a88SDmitry Kozlyuk };
34690b2a88SDmitry Kozlyuk
35690b2a88SDmitry Kozlyuk /** Memory region for a mempool. */
36690b2a88SDmitry Kozlyuk struct mlx5_mempool_mr {
37690b2a88SDmitry Kozlyuk struct mlx5_pmd_mr pmd_mr;
38*e12a0166STyler Retzlaff RTE_ATOMIC(uint32_t) refcnt; /**< Number of mempools sharing this MR. */
39690b2a88SDmitry Kozlyuk };
40690b2a88SDmitry Kozlyuk
41690b2a88SDmitry Kozlyuk /* Mempool registration. */
42690b2a88SDmitry Kozlyuk struct mlx5_mempool_reg {
43690b2a88SDmitry Kozlyuk LIST_ENTRY(mlx5_mempool_reg) next;
44690b2a88SDmitry Kozlyuk /** Registered mempool, used to designate registrations. */
45690b2a88SDmitry Kozlyuk struct rte_mempool *mp;
46690b2a88SDmitry Kozlyuk /** Memory regions for the address ranges of the mempool. */
47690b2a88SDmitry Kozlyuk struct mlx5_mempool_mr *mrs;
48690b2a88SDmitry Kozlyuk /** Number of memory regions. */
49690b2a88SDmitry Kozlyuk unsigned int mrs_n;
5008ac0358SDmitry Kozlyuk /** Whether the MR were created for external pinned memory. */
5108ac0358SDmitry Kozlyuk bool is_extmem;
52690b2a88SDmitry Kozlyuk };
53690b2a88SDmitry Kozlyuk
54fc59a1ecSMichael Baum void
mlx5_mprq_buf_free_cb(void * addr __rte_unused,void * opaque)55fc59a1ecSMichael Baum mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
56fc59a1ecSMichael Baum {
57fc59a1ecSMichael Baum struct mlx5_mprq_buf *buf = opaque;
58fc59a1ecSMichael Baum
59*e12a0166STyler Retzlaff if (rte_atomic_load_explicit(&buf->refcnt, rte_memory_order_relaxed) == 1) {
60fc59a1ecSMichael Baum rte_mempool_put(buf->mp, buf);
61*e12a0166STyler Retzlaff } else if (unlikely(rte_atomic_fetch_sub_explicit(&buf->refcnt, 1,
62*e12a0166STyler Retzlaff rte_memory_order_relaxed) - 1 == 0)) {
63*e12a0166STyler Retzlaff rte_atomic_store_explicit(&buf->refcnt, 1, rte_memory_order_relaxed);
64fc59a1ecSMichael Baum rte_mempool_put(buf->mp, buf);
65fc59a1ecSMichael Baum }
66fc59a1ecSMichael Baum }
67fc59a1ecSMichael Baum
68b8dc6b0eSVu Pham /**
69b8dc6b0eSVu Pham * Expand B-tree table to a given size. Can't be called with holding
70b8dc6b0eSVu Pham * memory_hotplug_lock or share_cache.rwlock due to rte_realloc().
71b8dc6b0eSVu Pham *
72b8dc6b0eSVu Pham * @param bt
73b8dc6b0eSVu Pham * Pointer to B-tree structure.
74b8dc6b0eSVu Pham * @param n
75b8dc6b0eSVu Pham * Number of entries for expansion.
76b8dc6b0eSVu Pham *
77b8dc6b0eSVu Pham * @return
78b8dc6b0eSVu Pham * 0 on success, -1 on failure.
79b8dc6b0eSVu Pham */
80b8dc6b0eSVu Pham static int
mr_btree_expand(struct mlx5_mr_btree * bt,uint32_t n)81e96d3d02SDmitry Kozlyuk mr_btree_expand(struct mlx5_mr_btree *bt, uint32_t n)
82b8dc6b0eSVu Pham {
83b8dc6b0eSVu Pham void *mem;
84b8dc6b0eSVu Pham int ret = 0;
85b8dc6b0eSVu Pham
86b8dc6b0eSVu Pham if (n <= bt->size)
87b8dc6b0eSVu Pham return ret;
88b8dc6b0eSVu Pham /*
89b8dc6b0eSVu Pham * Downside of directly using rte_realloc() is that SOCKET_ID_ANY is
90b8dc6b0eSVu Pham * used inside if there's no room to expand. Because this is a quite
91b8dc6b0eSVu Pham * rare case and a part of very slow path, it is very acceptable.
92b8dc6b0eSVu Pham * Initially cache_bh[] will be given practically enough space and once
93b8dc6b0eSVu Pham * it is expanded, expansion wouldn't be needed again ever.
94b8dc6b0eSVu Pham */
95fd970a54SSuanming Mou mem = mlx5_realloc(bt->table, MLX5_MEM_RTE | MLX5_MEM_ZERO,
96fd970a54SSuanming Mou n * sizeof(struct mr_cache_entry), 0, SOCKET_ID_ANY);
97b8dc6b0eSVu Pham if (mem == NULL) {
98b8dc6b0eSVu Pham /* Not an error, B-tree search will be skipped. */
99b8dc6b0eSVu Pham DRV_LOG(WARNING, "failed to expand MR B-tree (%p) table",
100b8dc6b0eSVu Pham (void *)bt);
101b8dc6b0eSVu Pham ret = -1;
102b8dc6b0eSVu Pham } else {
103b8dc6b0eSVu Pham DRV_LOG(DEBUG, "expanded MR B-tree table (size=%u)", n);
104b8dc6b0eSVu Pham bt->table = mem;
105b8dc6b0eSVu Pham bt->size = n;
106b8dc6b0eSVu Pham }
107b8dc6b0eSVu Pham return ret;
108b8dc6b0eSVu Pham }
109b8dc6b0eSVu Pham
110b8dc6b0eSVu Pham /**
111b8dc6b0eSVu Pham * Look up LKey from given B-tree lookup table, store the last index and return
112b8dc6b0eSVu Pham * searched LKey.
113b8dc6b0eSVu Pham *
114b8dc6b0eSVu Pham * @param bt
115b8dc6b0eSVu Pham * Pointer to B-tree structure.
116b8dc6b0eSVu Pham * @param[out] idx
117b8dc6b0eSVu Pham * Pointer to index. Even on search failure, returns index where it stops
118b8dc6b0eSVu Pham * searching so that index can be used when inserting a new entry.
119b8dc6b0eSVu Pham * @param addr
120b8dc6b0eSVu Pham * Search key.
121b8dc6b0eSVu Pham *
122b8dc6b0eSVu Pham * @return
123b8dc6b0eSVu Pham * Searched LKey on success, UINT32_MAX on no match.
124b8dc6b0eSVu Pham */
125b8dc6b0eSVu Pham static uint32_t
mr_btree_lookup(struct mlx5_mr_btree * bt,uint32_t * idx,uintptr_t addr)126e96d3d02SDmitry Kozlyuk mr_btree_lookup(struct mlx5_mr_btree *bt, uint32_t *idx, uintptr_t addr)
127b8dc6b0eSVu Pham {
128b8dc6b0eSVu Pham struct mr_cache_entry *lkp_tbl;
129e96d3d02SDmitry Kozlyuk uint32_t n;
130e96d3d02SDmitry Kozlyuk uint32_t base = 0;
131b8dc6b0eSVu Pham
132b8dc6b0eSVu Pham MLX5_ASSERT(bt != NULL);
133b8dc6b0eSVu Pham lkp_tbl = *bt->table;
134b8dc6b0eSVu Pham n = bt->len;
135b8dc6b0eSVu Pham /* First entry must be NULL for comparison. */
136b8dc6b0eSVu Pham MLX5_ASSERT(bt->len > 0 || (lkp_tbl[0].start == 0 &&
137b8dc6b0eSVu Pham lkp_tbl[0].lkey == UINT32_MAX));
138b8dc6b0eSVu Pham /* Binary search. */
139b8dc6b0eSVu Pham do {
140e96d3d02SDmitry Kozlyuk register uint32_t delta = n >> 1;
141b8dc6b0eSVu Pham
142b8dc6b0eSVu Pham if (addr < lkp_tbl[base + delta].start) {
143b8dc6b0eSVu Pham n = delta;
144b8dc6b0eSVu Pham } else {
145b8dc6b0eSVu Pham base += delta;
146b8dc6b0eSVu Pham n -= delta;
147b8dc6b0eSVu Pham }
148b8dc6b0eSVu Pham } while (n > 1);
149b8dc6b0eSVu Pham MLX5_ASSERT(addr >= lkp_tbl[base].start);
150b8dc6b0eSVu Pham *idx = base;
151b8dc6b0eSVu Pham if (addr < lkp_tbl[base].end)
152b8dc6b0eSVu Pham return lkp_tbl[base].lkey;
153b8dc6b0eSVu Pham /* Not found. */
154b8dc6b0eSVu Pham return UINT32_MAX;
155b8dc6b0eSVu Pham }
156b8dc6b0eSVu Pham
157b8dc6b0eSVu Pham /**
158b8dc6b0eSVu Pham * Insert an entry to B-tree lookup table.
159b8dc6b0eSVu Pham *
160b8dc6b0eSVu Pham * @param bt
161b8dc6b0eSVu Pham * Pointer to B-tree structure.
162b8dc6b0eSVu Pham * @param entry
163b8dc6b0eSVu Pham * Pointer to new entry to insert.
164b8dc6b0eSVu Pham *
165b8dc6b0eSVu Pham * @return
166b8dc6b0eSVu Pham * 0 on success, -1 on failure.
167b8dc6b0eSVu Pham */
168b8dc6b0eSVu Pham static int
mr_btree_insert(struct mlx5_mr_btree * bt,struct mr_cache_entry * entry)169b8dc6b0eSVu Pham mr_btree_insert(struct mlx5_mr_btree *bt, struct mr_cache_entry *entry)
170b8dc6b0eSVu Pham {
171b8dc6b0eSVu Pham struct mr_cache_entry *lkp_tbl;
172e96d3d02SDmitry Kozlyuk uint32_t idx = 0;
173b8dc6b0eSVu Pham size_t shift;
174b8dc6b0eSVu Pham
175b8dc6b0eSVu Pham MLX5_ASSERT(bt != NULL);
176b8dc6b0eSVu Pham MLX5_ASSERT(bt->len <= bt->size);
177b8dc6b0eSVu Pham MLX5_ASSERT(bt->len > 0);
178b8dc6b0eSVu Pham lkp_tbl = *bt->table;
179b8dc6b0eSVu Pham /* Find out the slot for insertion. */
180b8dc6b0eSVu Pham if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) {
181b8dc6b0eSVu Pham DRV_LOG(DEBUG,
182b8dc6b0eSVu Pham "abort insertion to B-tree(%p): already exist at"
183b8dc6b0eSVu Pham " idx=%u [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
184b8dc6b0eSVu Pham (void *)bt, idx, entry->start, entry->end, entry->lkey);
185b8dc6b0eSVu Pham /* Already exist, return. */
186b8dc6b0eSVu Pham return 0;
187b8dc6b0eSVu Pham }
188e96d3d02SDmitry Kozlyuk /* Caller must ensure that there is enough place for a new entry. */
189e96d3d02SDmitry Kozlyuk MLX5_ASSERT(bt->len < bt->size);
190b8dc6b0eSVu Pham /* Insert entry. */
191b8dc6b0eSVu Pham ++idx;
192b8dc6b0eSVu Pham shift = (bt->len - idx) * sizeof(struct mr_cache_entry);
193b8dc6b0eSVu Pham if (shift)
194b8dc6b0eSVu Pham memmove(&lkp_tbl[idx + 1], &lkp_tbl[idx], shift);
195b8dc6b0eSVu Pham lkp_tbl[idx] = *entry;
196b8dc6b0eSVu Pham bt->len++;
197b8dc6b0eSVu Pham DRV_LOG(DEBUG,
198b8dc6b0eSVu Pham "inserted B-tree(%p)[%u],"
199b8dc6b0eSVu Pham " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
200b8dc6b0eSVu Pham (void *)bt, idx, entry->start, entry->end, entry->lkey);
201b8dc6b0eSVu Pham return 0;
202b8dc6b0eSVu Pham }
203b8dc6b0eSVu Pham
204b8dc6b0eSVu Pham /**
205b8dc6b0eSVu Pham * Initialize B-tree and allocate memory for lookup table.
206b8dc6b0eSVu Pham *
207b8dc6b0eSVu Pham * @param bt
208b8dc6b0eSVu Pham * Pointer to B-tree structure.
209b8dc6b0eSVu Pham * @param n
210b8dc6b0eSVu Pham * Number of entries to allocate.
211b8dc6b0eSVu Pham * @param socket
212b8dc6b0eSVu Pham * NUMA socket on which memory must be allocated.
213b8dc6b0eSVu Pham *
214b8dc6b0eSVu Pham * @return
215b8dc6b0eSVu Pham * 0 on success, a negative errno value otherwise and rte_errno is set.
216b8dc6b0eSVu Pham */
2175fbc75acSMichael Baum static int
mlx5_mr_btree_init(struct mlx5_mr_btree * bt,int n,int socket)218b8dc6b0eSVu Pham mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket)
219b8dc6b0eSVu Pham {
220b8dc6b0eSVu Pham if (bt == NULL) {
221b8dc6b0eSVu Pham rte_errno = EINVAL;
222b8dc6b0eSVu Pham return -rte_errno;
223b8dc6b0eSVu Pham }
224b8dc6b0eSVu Pham MLX5_ASSERT(!bt->table && !bt->size);
225b8dc6b0eSVu Pham memset(bt, 0, sizeof(*bt));
226fd970a54SSuanming Mou bt->table = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
227fd970a54SSuanming Mou sizeof(struct mr_cache_entry) * n,
228b8dc6b0eSVu Pham 0, socket);
229b8dc6b0eSVu Pham if (bt->table == NULL) {
230b8dc6b0eSVu Pham rte_errno = ENOMEM;
23187acdcc7SThomas Monjalon DRV_LOG(DEBUG,
23287acdcc7SThomas Monjalon "failed to allocate memory for btree cache on socket "
23387acdcc7SThomas Monjalon "%d", socket);
234b8dc6b0eSVu Pham return -rte_errno;
235b8dc6b0eSVu Pham }
236b8dc6b0eSVu Pham bt->size = n;
237b8dc6b0eSVu Pham /* First entry must be NULL for binary search. */
238b8dc6b0eSVu Pham (*bt->table)[bt->len++] = (struct mr_cache_entry) {
239b8dc6b0eSVu Pham .lkey = UINT32_MAX,
240b8dc6b0eSVu Pham };
24187acdcc7SThomas Monjalon DRV_LOG(DEBUG, "initialized B-tree %p with table %p",
242b8dc6b0eSVu Pham (void *)bt, (void *)bt->table);
243b8dc6b0eSVu Pham return 0;
244b8dc6b0eSVu Pham }
245b8dc6b0eSVu Pham
246b8dc6b0eSVu Pham /**
247b8dc6b0eSVu Pham * Free B-tree resources.
248b8dc6b0eSVu Pham *
249b8dc6b0eSVu Pham * @param bt
250b8dc6b0eSVu Pham * Pointer to B-tree structure.
251b8dc6b0eSVu Pham */
252b8dc6b0eSVu Pham void
mlx5_mr_btree_free(struct mlx5_mr_btree * bt)253b8dc6b0eSVu Pham mlx5_mr_btree_free(struct mlx5_mr_btree *bt)
254b8dc6b0eSVu Pham {
255b8dc6b0eSVu Pham if (bt == NULL)
256b8dc6b0eSVu Pham return;
25787acdcc7SThomas Monjalon DRV_LOG(DEBUG, "freeing B-tree %p with table %p",
258b8dc6b0eSVu Pham (void *)bt, (void *)bt->table);
259fd970a54SSuanming Mou mlx5_free(bt->table);
260b8dc6b0eSVu Pham memset(bt, 0, sizeof(*bt));
261b8dc6b0eSVu Pham }
262b8dc6b0eSVu Pham
263b8dc6b0eSVu Pham /**
264b8dc6b0eSVu Pham * Dump all the entries in a B-tree
265b8dc6b0eSVu Pham *
266b8dc6b0eSVu Pham * @param bt
267b8dc6b0eSVu Pham * Pointer to B-tree structure.
268b8dc6b0eSVu Pham */
269b8dc6b0eSVu Pham void
mlx5_mr_btree_dump(struct mlx5_mr_btree * bt __rte_unused)270b8dc6b0eSVu Pham mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused)
271b8dc6b0eSVu Pham {
272b8dc6b0eSVu Pham #ifdef RTE_LIBRTE_MLX5_DEBUG
273e96d3d02SDmitry Kozlyuk uint32_t idx;
274b8dc6b0eSVu Pham struct mr_cache_entry *lkp_tbl;
275b8dc6b0eSVu Pham
276b8dc6b0eSVu Pham if (bt == NULL)
277b8dc6b0eSVu Pham return;
278b8dc6b0eSVu Pham lkp_tbl = *bt->table;
279b8dc6b0eSVu Pham for (idx = 0; idx < bt->len; ++idx) {
280b8dc6b0eSVu Pham struct mr_cache_entry *entry = &lkp_tbl[idx];
281b8dc6b0eSVu Pham
28287acdcc7SThomas Monjalon DRV_LOG(DEBUG, "B-tree(%p)[%u],"
283b8dc6b0eSVu Pham " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
284b8dc6b0eSVu Pham (void *)bt, idx, entry->start, entry->end, entry->lkey);
285b8dc6b0eSVu Pham }
286b8dc6b0eSVu Pham #endif
287b8dc6b0eSVu Pham }
288b8dc6b0eSVu Pham
289b8dc6b0eSVu Pham /**
29085c7005eSMichael Baum * Initialize per-queue MR control descriptor.
29185c7005eSMichael Baum *
29285c7005eSMichael Baum * @param mr_ctrl
29385c7005eSMichael Baum * Pointer to MR control structure.
29471304b5cSMichael Baum * @param dev_gen_ptr
29571304b5cSMichael Baum * Pointer to generation number of global cache.
29685c7005eSMichael Baum * @param socket
29785c7005eSMichael Baum * NUMA socket on which memory must be allocated.
29885c7005eSMichael Baum *
29985c7005eSMichael Baum * @return
30085c7005eSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set.
30185c7005eSMichael Baum */
30285c7005eSMichael Baum int
mlx5_mr_ctrl_init(struct mlx5_mr_ctrl * mr_ctrl,uint32_t * dev_gen_ptr,int socket)30371304b5cSMichael Baum mlx5_mr_ctrl_init(struct mlx5_mr_ctrl *mr_ctrl, uint32_t *dev_gen_ptr,
30485c7005eSMichael Baum int socket)
30585c7005eSMichael Baum {
30685c7005eSMichael Baum if (mr_ctrl == NULL) {
30785c7005eSMichael Baum rte_errno = EINVAL;
30885c7005eSMichael Baum return -rte_errno;
30985c7005eSMichael Baum }
31085c7005eSMichael Baum /* Save pointer of global generation number to check memory event. */
31171304b5cSMichael Baum mr_ctrl->dev_gen_ptr = dev_gen_ptr;
31285c7005eSMichael Baum /* Initialize B-tree and allocate memory for bottom-half cache table. */
31385c7005eSMichael Baum return mlx5_mr_btree_init(&mr_ctrl->cache_bh, MLX5_MR_BTREE_CACHE_N,
31485c7005eSMichael Baum socket);
31585c7005eSMichael Baum }
31685c7005eSMichael Baum
31785c7005eSMichael Baum /**
318b8dc6b0eSVu Pham * Find virtually contiguous memory chunk in a given MR.
319b8dc6b0eSVu Pham *
320b8dc6b0eSVu Pham * @param dev
321b8dc6b0eSVu Pham * Pointer to MR structure.
322b8dc6b0eSVu Pham * @param[out] entry
323b8dc6b0eSVu Pham * Pointer to returning MR cache entry. If not found, this will not be
324b8dc6b0eSVu Pham * updated.
325b8dc6b0eSVu Pham * @param start_idx
326b8dc6b0eSVu Pham * Start index of the memseg bitmap.
327b8dc6b0eSVu Pham *
328b8dc6b0eSVu Pham * @return
329b8dc6b0eSVu Pham * Next index to go on lookup.
330b8dc6b0eSVu Pham */
331b8dc6b0eSVu Pham static int
mr_find_next_chunk(struct mlx5_mr * mr,struct mr_cache_entry * entry,int base_idx)332b8dc6b0eSVu Pham mr_find_next_chunk(struct mlx5_mr *mr, struct mr_cache_entry *entry,
333b8dc6b0eSVu Pham int base_idx)
334b8dc6b0eSVu Pham {
335b8dc6b0eSVu Pham uintptr_t start = 0;
336b8dc6b0eSVu Pham uintptr_t end = 0;
337b8dc6b0eSVu Pham uint32_t idx = 0;
338b8dc6b0eSVu Pham
339b8dc6b0eSVu Pham /* MR for external memory doesn't have memseg list. */
340b8dc6b0eSVu Pham if (mr->msl == NULL) {
341b8dc6b0eSVu Pham MLX5_ASSERT(mr->ms_bmp_n == 1);
342b8dc6b0eSVu Pham MLX5_ASSERT(mr->ms_n == 1);
343b8dc6b0eSVu Pham MLX5_ASSERT(base_idx == 0);
344b8dc6b0eSVu Pham /*
345b8dc6b0eSVu Pham * Can't search it from memseg list but get it directly from
34656d20677SOphir Munk * pmd_mr as there's only one chunk.
347b8dc6b0eSVu Pham */
34856d20677SOphir Munk entry->start = (uintptr_t)mr->pmd_mr.addr;
34956d20677SOphir Munk entry->end = (uintptr_t)mr->pmd_mr.addr + mr->pmd_mr.len;
35056d20677SOphir Munk entry->lkey = rte_cpu_to_be_32(mr->pmd_mr.lkey);
351b8dc6b0eSVu Pham /* Returning 1 ends iteration. */
352b8dc6b0eSVu Pham return 1;
353b8dc6b0eSVu Pham }
354b8dc6b0eSVu Pham for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) {
355b8dc6b0eSVu Pham if (rte_bitmap_get(mr->ms_bmp, idx)) {
356b8dc6b0eSVu Pham const struct rte_memseg_list *msl;
357b8dc6b0eSVu Pham const struct rte_memseg *ms;
358b8dc6b0eSVu Pham
359b8dc6b0eSVu Pham msl = mr->msl;
360b8dc6b0eSVu Pham ms = rte_fbarray_get(&msl->memseg_arr,
361b8dc6b0eSVu Pham mr->ms_base_idx + idx);
362b8dc6b0eSVu Pham MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
363b8dc6b0eSVu Pham if (!start)
364b8dc6b0eSVu Pham start = ms->addr_64;
365b8dc6b0eSVu Pham end = ms->addr_64 + ms->hugepage_sz;
366b8dc6b0eSVu Pham } else if (start) {
367b8dc6b0eSVu Pham /* Passed the end of a fragment. */
368b8dc6b0eSVu Pham break;
369b8dc6b0eSVu Pham }
370b8dc6b0eSVu Pham }
371b8dc6b0eSVu Pham if (start) {
372b8dc6b0eSVu Pham /* Found one chunk. */
373b8dc6b0eSVu Pham entry->start = start;
374b8dc6b0eSVu Pham entry->end = end;
37556d20677SOphir Munk entry->lkey = rte_cpu_to_be_32(mr->pmd_mr.lkey);
376b8dc6b0eSVu Pham }
377b8dc6b0eSVu Pham return idx;
378b8dc6b0eSVu Pham }
379b8dc6b0eSVu Pham
380b8dc6b0eSVu Pham /**
381b8dc6b0eSVu Pham * Insert a MR to the global B-tree cache. It may fail due to low-on-memory.
382b8dc6b0eSVu Pham * Then, this entry will have to be searched by mr_lookup_list() in
383b8dc6b0eSVu Pham * mlx5_mr_create() on miss.
384b8dc6b0eSVu Pham *
385b8dc6b0eSVu Pham * @param share_cache
386b8dc6b0eSVu Pham * Pointer to a global shared MR cache.
387b8dc6b0eSVu Pham * @param mr
388b8dc6b0eSVu Pham * Pointer to MR to insert.
389b8dc6b0eSVu Pham *
390b8dc6b0eSVu Pham * @return
391b8dc6b0eSVu Pham * 0 on success, -1 on failure.
392b8dc6b0eSVu Pham */
393b8dc6b0eSVu Pham int
mlx5_mr_insert_cache(struct mlx5_mr_share_cache * share_cache,struct mlx5_mr * mr)394b8dc6b0eSVu Pham mlx5_mr_insert_cache(struct mlx5_mr_share_cache *share_cache,
395b8dc6b0eSVu Pham struct mlx5_mr *mr)
396b8dc6b0eSVu Pham {
397b8dc6b0eSVu Pham unsigned int n;
398b8dc6b0eSVu Pham
399b8dc6b0eSVu Pham DRV_LOG(DEBUG, "Inserting MR(%p) to global cache(%p)",
400b8dc6b0eSVu Pham (void *)mr, (void *)share_cache);
401b8dc6b0eSVu Pham for (n = 0; n < mr->ms_bmp_n; ) {
402b8dc6b0eSVu Pham struct mr_cache_entry entry;
403b8dc6b0eSVu Pham
404b8dc6b0eSVu Pham memset(&entry, 0, sizeof(entry));
405b8dc6b0eSVu Pham /* Find a contiguous chunk and advance the index. */
406b8dc6b0eSVu Pham n = mr_find_next_chunk(mr, &entry, n);
407b8dc6b0eSVu Pham if (!entry.end)
408b8dc6b0eSVu Pham break;
409e96d3d02SDmitry Kozlyuk if (mr_btree_insert(&share_cache->cache, &entry) < 0)
410b8dc6b0eSVu Pham return -1;
411b8dc6b0eSVu Pham }
412b8dc6b0eSVu Pham return 0;
413b8dc6b0eSVu Pham }
414b8dc6b0eSVu Pham
415b8dc6b0eSVu Pham /**
416b8dc6b0eSVu Pham * Look up address in the original global MR list.
417b8dc6b0eSVu Pham *
418b8dc6b0eSVu Pham * @param share_cache
419b8dc6b0eSVu Pham * Pointer to a global shared MR cache.
420b8dc6b0eSVu Pham * @param[out] entry
421b8dc6b0eSVu Pham * Pointer to returning MR cache entry. If no match, this will not be updated.
422b8dc6b0eSVu Pham * @param addr
423b8dc6b0eSVu Pham * Search key.
424b8dc6b0eSVu Pham *
425b8dc6b0eSVu Pham * @return
426b8dc6b0eSVu Pham * Found MR on match, NULL otherwise.
427b8dc6b0eSVu Pham */
428b8dc6b0eSVu Pham struct mlx5_mr *
mlx5_mr_lookup_list(struct mlx5_mr_share_cache * share_cache,struct mr_cache_entry * entry,uintptr_t addr)429b8dc6b0eSVu Pham mlx5_mr_lookup_list(struct mlx5_mr_share_cache *share_cache,
430b8dc6b0eSVu Pham struct mr_cache_entry *entry, uintptr_t addr)
431b8dc6b0eSVu Pham {
432b8dc6b0eSVu Pham struct mlx5_mr *mr;
433b8dc6b0eSVu Pham
434b8dc6b0eSVu Pham /* Iterate all the existing MRs. */
435b8dc6b0eSVu Pham LIST_FOREACH(mr, &share_cache->mr_list, mr) {
436b8dc6b0eSVu Pham unsigned int n;
437b8dc6b0eSVu Pham
438b8dc6b0eSVu Pham if (mr->ms_n == 0)
439b8dc6b0eSVu Pham continue;
440b8dc6b0eSVu Pham for (n = 0; n < mr->ms_bmp_n; ) {
441b8dc6b0eSVu Pham struct mr_cache_entry ret;
442b8dc6b0eSVu Pham
443b8dc6b0eSVu Pham memset(&ret, 0, sizeof(ret));
444b8dc6b0eSVu Pham n = mr_find_next_chunk(mr, &ret, n);
445b8dc6b0eSVu Pham if (addr >= ret.start && addr < ret.end) {
446b8dc6b0eSVu Pham /* Found. */
447b8dc6b0eSVu Pham *entry = ret;
448b8dc6b0eSVu Pham return mr;
449b8dc6b0eSVu Pham }
450b8dc6b0eSVu Pham }
451b8dc6b0eSVu Pham }
452b8dc6b0eSVu Pham return NULL;
453b8dc6b0eSVu Pham }
454b8dc6b0eSVu Pham
455b8dc6b0eSVu Pham /**
456b8dc6b0eSVu Pham * Look up address on global MR cache.
457b8dc6b0eSVu Pham *
458b8dc6b0eSVu Pham * @param share_cache
459b8dc6b0eSVu Pham * Pointer to a global shared MR cache.
460b8dc6b0eSVu Pham * @param[out] entry
461b8dc6b0eSVu Pham * Pointer to returning MR cache entry. If no match, this will not be updated.
462b8dc6b0eSVu Pham * @param addr
463b8dc6b0eSVu Pham * Search key.
464b8dc6b0eSVu Pham *
465b8dc6b0eSVu Pham * @return
466b8dc6b0eSVu Pham * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
467b8dc6b0eSVu Pham */
468a5d06c90SMichael Baum static uint32_t
mlx5_mr_lookup_cache(struct mlx5_mr_share_cache * share_cache,struct mr_cache_entry * entry,uintptr_t addr)469b8dc6b0eSVu Pham mlx5_mr_lookup_cache(struct mlx5_mr_share_cache *share_cache,
470b8dc6b0eSVu Pham struct mr_cache_entry *entry, uintptr_t addr)
471b8dc6b0eSVu Pham {
472e96d3d02SDmitry Kozlyuk uint32_t idx;
473e96d3d02SDmitry Kozlyuk uint32_t lkey;
474b8dc6b0eSVu Pham
475b8dc6b0eSVu Pham lkey = mr_btree_lookup(&share_cache->cache, &idx, addr);
476b8dc6b0eSVu Pham if (lkey != UINT32_MAX)
477b8dc6b0eSVu Pham *entry = (*share_cache->cache.table)[idx];
478b8dc6b0eSVu Pham MLX5_ASSERT(lkey == UINT32_MAX || (addr >= entry->start &&
479b8dc6b0eSVu Pham addr < entry->end));
480b8dc6b0eSVu Pham return lkey;
481b8dc6b0eSVu Pham }
482b8dc6b0eSVu Pham
483b8dc6b0eSVu Pham /**
484b8dc6b0eSVu Pham * Free MR resources. MR lock must not be held to avoid a deadlock. rte_free()
485b8dc6b0eSVu Pham * can raise memory free event and the callback function will spin on the lock.
486b8dc6b0eSVu Pham *
487b8dc6b0eSVu Pham * @param mr
488b8dc6b0eSVu Pham * Pointer to MR to free.
489b8dc6b0eSVu Pham */
490992e6df3SJiawei Wang void
mlx5_mr_free(struct mlx5_mr * mr,mlx5_dereg_mr_t dereg_mr_cb)491992e6df3SJiawei Wang mlx5_mr_free(struct mlx5_mr *mr, mlx5_dereg_mr_t dereg_mr_cb)
492b8dc6b0eSVu Pham {
493b8dc6b0eSVu Pham if (mr == NULL)
494b8dc6b0eSVu Pham return;
495b8dc6b0eSVu Pham DRV_LOG(DEBUG, "freeing MR(%p):", (void *)mr);
496d5ed8aa9SOphir Munk dereg_mr_cb(&mr->pmd_mr);
497b8dc6b0eSVu Pham rte_bitmap_free(mr->ms_bmp);
498fd970a54SSuanming Mou mlx5_free(mr);
499b8dc6b0eSVu Pham }
500b8dc6b0eSVu Pham
501b8dc6b0eSVu Pham void
mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache * share_cache)502b8dc6b0eSVu Pham mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache)
503b8dc6b0eSVu Pham {
504b8dc6b0eSVu Pham struct mlx5_mr *mr;
505b8dc6b0eSVu Pham
506b8dc6b0eSVu Pham DRV_LOG(DEBUG, "Rebuild dev cache[] %p", (void *)share_cache);
507b8dc6b0eSVu Pham /* Flush cache to rebuild. */
508b8dc6b0eSVu Pham share_cache->cache.len = 1;
509b8dc6b0eSVu Pham /* Iterate all the existing MRs. */
510b8dc6b0eSVu Pham LIST_FOREACH(mr, &share_cache->mr_list, mr)
511b8dc6b0eSVu Pham if (mlx5_mr_insert_cache(share_cache, mr) < 0)
512b8dc6b0eSVu Pham return;
513b8dc6b0eSVu Pham }
514b8dc6b0eSVu Pham
515b8dc6b0eSVu Pham /**
516b8dc6b0eSVu Pham * Release resources of detached MR having no online entry.
517b8dc6b0eSVu Pham *
518b8dc6b0eSVu Pham * @param share_cache
519b8dc6b0eSVu Pham * Pointer to a global shared MR cache.
520b8dc6b0eSVu Pham */
521b8dc6b0eSVu Pham static void
mlx5_mr_garbage_collect(struct mlx5_mr_share_cache * share_cache)522b8dc6b0eSVu Pham mlx5_mr_garbage_collect(struct mlx5_mr_share_cache *share_cache)
523b8dc6b0eSVu Pham {
524b8dc6b0eSVu Pham struct mlx5_mr *mr_next;
525b8dc6b0eSVu Pham struct mlx5_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
526b8dc6b0eSVu Pham
527b8dc6b0eSVu Pham /* Must be called from the primary process. */
528b8dc6b0eSVu Pham MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
529b8dc6b0eSVu Pham /*
530b8dc6b0eSVu Pham * MR can't be freed with holding the lock because rte_free() could call
531b8dc6b0eSVu Pham * memory free callback function. This will be a deadlock situation.
532b8dc6b0eSVu Pham */
533b8dc6b0eSVu Pham rte_rwlock_write_lock(&share_cache->rwlock);
534b8dc6b0eSVu Pham /* Detach the whole free list and release it after unlocking. */
535b8dc6b0eSVu Pham free_list = share_cache->mr_free_list;
536b8dc6b0eSVu Pham LIST_INIT(&share_cache->mr_free_list);
537b8dc6b0eSVu Pham rte_rwlock_write_unlock(&share_cache->rwlock);
538b8dc6b0eSVu Pham /* Release resources. */
539b8dc6b0eSVu Pham mr_next = LIST_FIRST(&free_list);
540b8dc6b0eSVu Pham while (mr_next != NULL) {
541b8dc6b0eSVu Pham struct mlx5_mr *mr = mr_next;
542b8dc6b0eSVu Pham
543b8dc6b0eSVu Pham mr_next = LIST_NEXT(mr, mr);
544992e6df3SJiawei Wang mlx5_mr_free(mr, share_cache->dereg_mr_cb);
545b8dc6b0eSVu Pham }
546b8dc6b0eSVu Pham }
547b8dc6b0eSVu Pham
548b8dc6b0eSVu Pham /* Called during rte_memseg_contig_walk() by mlx5_mr_create(). */
549b8dc6b0eSVu Pham static int
mr_find_contig_memsegs_cb(const struct rte_memseg_list * msl,const struct rte_memseg * ms,size_t len,void * arg)550b8dc6b0eSVu Pham mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl,
551b8dc6b0eSVu Pham const struct rte_memseg *ms, size_t len, void *arg)
552b8dc6b0eSVu Pham {
553b8dc6b0eSVu Pham struct mr_find_contig_memsegs_data *data = arg;
554b8dc6b0eSVu Pham
555b8dc6b0eSVu Pham if (data->addr < ms->addr_64 || data->addr >= ms->addr_64 + len)
556b8dc6b0eSVu Pham return 0;
557b8dc6b0eSVu Pham /* Found, save it and stop walking. */
558b8dc6b0eSVu Pham data->start = ms->addr_64;
559b8dc6b0eSVu Pham data->end = ms->addr_64 + len;
560b8dc6b0eSVu Pham data->msl = msl;
561b8dc6b0eSVu Pham return 1;
562b8dc6b0eSVu Pham }
563b8dc6b0eSVu Pham
564b8dc6b0eSVu Pham /**
565e96d3d02SDmitry Kozlyuk * Get the number of virtually-contiguous chunks in the MR.
566e96d3d02SDmitry Kozlyuk * HW MR does not need to be already created to use this function.
567e96d3d02SDmitry Kozlyuk *
568e96d3d02SDmitry Kozlyuk * @param mr
569e96d3d02SDmitry Kozlyuk * Pointer to the MR.
570e96d3d02SDmitry Kozlyuk *
571e96d3d02SDmitry Kozlyuk * @return
572e96d3d02SDmitry Kozlyuk * Number of chunks.
573e96d3d02SDmitry Kozlyuk */
574e96d3d02SDmitry Kozlyuk static uint32_t
mr_get_chunk_count(const struct mlx5_mr * mr)575e96d3d02SDmitry Kozlyuk mr_get_chunk_count(const struct mlx5_mr *mr)
576e96d3d02SDmitry Kozlyuk {
577e96d3d02SDmitry Kozlyuk uint32_t i, count = 0;
578e96d3d02SDmitry Kozlyuk bool was_in_chunk = false;
579e96d3d02SDmitry Kozlyuk bool is_in_chunk;
580e96d3d02SDmitry Kozlyuk
581e96d3d02SDmitry Kozlyuk /* There is only one chunk in case of external memory. */
582e96d3d02SDmitry Kozlyuk if (mr->msl == NULL)
583e96d3d02SDmitry Kozlyuk return 1;
584e96d3d02SDmitry Kozlyuk for (i = 0; i < mr->ms_bmp_n; i++) {
585e96d3d02SDmitry Kozlyuk is_in_chunk = rte_bitmap_get(mr->ms_bmp, i);
586e96d3d02SDmitry Kozlyuk if (!was_in_chunk && is_in_chunk)
587e96d3d02SDmitry Kozlyuk count++;
588e96d3d02SDmitry Kozlyuk was_in_chunk = is_in_chunk;
589e96d3d02SDmitry Kozlyuk }
590e96d3d02SDmitry Kozlyuk return count;
591e96d3d02SDmitry Kozlyuk }
592e96d3d02SDmitry Kozlyuk
593e96d3d02SDmitry Kozlyuk /**
594e96d3d02SDmitry Kozlyuk * Thread-safely expand the global MR cache to at least @p new_size slots.
595e96d3d02SDmitry Kozlyuk *
596e96d3d02SDmitry Kozlyuk * @param share_cache
597e96d3d02SDmitry Kozlyuk * Shared MR cache for locking.
598e96d3d02SDmitry Kozlyuk * @param new_size
599e96d3d02SDmitry Kozlyuk * Desired cache size.
600e96d3d02SDmitry Kozlyuk * @param socket
601e96d3d02SDmitry Kozlyuk * NUMA node.
602e96d3d02SDmitry Kozlyuk *
603e96d3d02SDmitry Kozlyuk * @return
604e96d3d02SDmitry Kozlyuk * 0 in success, negative on failure and rte_errno is set.
605e96d3d02SDmitry Kozlyuk */
606e96d3d02SDmitry Kozlyuk int
mlx5_mr_expand_cache(struct mlx5_mr_share_cache * share_cache,uint32_t size,int socket)607e96d3d02SDmitry Kozlyuk mlx5_mr_expand_cache(struct mlx5_mr_share_cache *share_cache,
608e96d3d02SDmitry Kozlyuk uint32_t size, int socket)
609e96d3d02SDmitry Kozlyuk {
610e96d3d02SDmitry Kozlyuk struct mlx5_mr_btree cache = {0};
611e96d3d02SDmitry Kozlyuk struct mlx5_mr_btree *bt;
612e96d3d02SDmitry Kozlyuk struct mr_cache_entry *lkp_tbl;
613e96d3d02SDmitry Kozlyuk int ret;
614e96d3d02SDmitry Kozlyuk
615e96d3d02SDmitry Kozlyuk size = rte_align32pow2(size);
616e96d3d02SDmitry Kozlyuk ret = mlx5_mr_btree_init(&cache, size, socket);
617e96d3d02SDmitry Kozlyuk if (ret < 0)
618e96d3d02SDmitry Kozlyuk return ret;
619e96d3d02SDmitry Kozlyuk rte_rwlock_write_lock(&share_cache->rwlock);
620e96d3d02SDmitry Kozlyuk bt = &share_cache->cache;
621e96d3d02SDmitry Kozlyuk lkp_tbl = *bt->table;
622e96d3d02SDmitry Kozlyuk if (cache.size > bt->size) {
623e96d3d02SDmitry Kozlyuk rte_memcpy(cache.table, lkp_tbl, bt->len * sizeof(lkp_tbl[0]));
624e96d3d02SDmitry Kozlyuk RTE_SWAP(*bt, cache);
625e96d3d02SDmitry Kozlyuk DRV_LOG(DEBUG, "Global MR cache expanded to %u slots", size);
626e96d3d02SDmitry Kozlyuk }
627e96d3d02SDmitry Kozlyuk rte_rwlock_write_unlock(&share_cache->rwlock);
628e96d3d02SDmitry Kozlyuk mlx5_mr_btree_free(&cache);
629e96d3d02SDmitry Kozlyuk return 0;
630e96d3d02SDmitry Kozlyuk }
631e96d3d02SDmitry Kozlyuk
632e96d3d02SDmitry Kozlyuk /**
633b8dc6b0eSVu Pham * Create a new global Memory Region (MR) for a missing virtual address.
634b8dc6b0eSVu Pham * This API should be called on a secondary process, then a request is sent to
635b8dc6b0eSVu Pham * the primary process in order to create a MR for the address. As the global MR
636b8dc6b0eSVu Pham * list is on the shared memory, following LKey lookup should succeed unless the
637b8dc6b0eSVu Pham * request fails.
638b8dc6b0eSVu Pham *
63920489176SMichael Baum * @param cdev
64020489176SMichael Baum * Pointer to the mlx5 common device.
641b8dc6b0eSVu Pham * @param share_cache
642b8dc6b0eSVu Pham * Pointer to a global shared MR cache.
643b8dc6b0eSVu Pham * @param[out] entry
644b8dc6b0eSVu Pham * Pointer to returning MR cache entry, found in the global cache or newly
645b8dc6b0eSVu Pham * created. If failed to create one, this will not be updated.
646b8dc6b0eSVu Pham * @param addr
647b8dc6b0eSVu Pham * Target virtual address to register.
648b8dc6b0eSVu Pham *
649b8dc6b0eSVu Pham * @return
650b8dc6b0eSVu Pham * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
651b8dc6b0eSVu Pham */
652b8dc6b0eSVu Pham static uint32_t
mlx5_mr_create_secondary(struct mlx5_common_device * cdev,struct mlx5_mr_share_cache * share_cache,struct mr_cache_entry * entry,uintptr_t addr)65320489176SMichael Baum mlx5_mr_create_secondary(struct mlx5_common_device *cdev,
654b8dc6b0eSVu Pham struct mlx5_mr_share_cache *share_cache,
65520489176SMichael Baum struct mr_cache_entry *entry, uintptr_t addr)
656b8dc6b0eSVu Pham {
657b8dc6b0eSVu Pham int ret;
658b8dc6b0eSVu Pham
65920489176SMichael Baum DRV_LOG(DEBUG, "Requesting MR creation for address (%p)", (void *)addr);
66020489176SMichael Baum ret = mlx5_mp_req_mr_create(cdev, addr);
661b8dc6b0eSVu Pham if (ret) {
66287acdcc7SThomas Monjalon DRV_LOG(DEBUG, "Fail to request MR creation for address (%p)",
663b8dc6b0eSVu Pham (void *)addr);
664b8dc6b0eSVu Pham return UINT32_MAX;
665b8dc6b0eSVu Pham }
666b8dc6b0eSVu Pham rte_rwlock_read_lock(&share_cache->rwlock);
667b8dc6b0eSVu Pham /* Fill in output data. */
668b8dc6b0eSVu Pham mlx5_mr_lookup_cache(share_cache, entry, addr);
669b8dc6b0eSVu Pham /* Lookup can't fail. */
670b8dc6b0eSVu Pham MLX5_ASSERT(entry->lkey != UINT32_MAX);
671b8dc6b0eSVu Pham rte_rwlock_read_unlock(&share_cache->rwlock);
67287acdcc7SThomas Monjalon DRV_LOG(DEBUG, "MR CREATED by primary process for %p:\n"
673b8dc6b0eSVu Pham " [0x%" PRIxPTR ", 0x%" PRIxPTR "), lkey=0x%x",
674b8dc6b0eSVu Pham (void *)addr, entry->start, entry->end, entry->lkey);
675b8dc6b0eSVu Pham return entry->lkey;
676b8dc6b0eSVu Pham }
677b8dc6b0eSVu Pham
678b8dc6b0eSVu Pham /**
679b8dc6b0eSVu Pham * Create a new global Memory Region (MR) for a missing virtual address.
680b8dc6b0eSVu Pham * Register entire virtually contiguous memory chunk around the address.
681b8dc6b0eSVu Pham *
682b8dc6b0eSVu Pham * @param pd
683c4685016SOphir Munk * Pointer to pd of a device (net, regex, vdpa,...).
684b8dc6b0eSVu Pham * @param share_cache
685b8dc6b0eSVu Pham * Pointer to a global shared MR cache.
686b8dc6b0eSVu Pham * @param[out] entry
687b8dc6b0eSVu Pham * Pointer to returning MR cache entry, found in the global cache or newly
688b8dc6b0eSVu Pham * created. If failed to create one, this will not be updated.
689b8dc6b0eSVu Pham * @param addr
690b8dc6b0eSVu Pham * Target virtual address to register.
691b8dc6b0eSVu Pham * @param mr_ext_memseg_en
692b8dc6b0eSVu Pham * Configurable flag about external memory segment enable or not.
693b8dc6b0eSVu Pham *
694b8dc6b0eSVu Pham * @return
695b8dc6b0eSVu Pham * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
696b8dc6b0eSVu Pham */
69720489176SMichael Baum static uint32_t
mlx5_mr_create_primary(void * pd,struct mlx5_mr_share_cache * share_cache,struct mr_cache_entry * entry,uintptr_t addr,unsigned int mr_ext_memseg_en)698c4685016SOphir Munk mlx5_mr_create_primary(void *pd,
699b8dc6b0eSVu Pham struct mlx5_mr_share_cache *share_cache,
700b8dc6b0eSVu Pham struct mr_cache_entry *entry, uintptr_t addr,
701b8dc6b0eSVu Pham unsigned int mr_ext_memseg_en)
702b8dc6b0eSVu Pham {
703b8dc6b0eSVu Pham struct mr_find_contig_memsegs_data data = {.addr = addr, };
704b8dc6b0eSVu Pham struct mr_find_contig_memsegs_data data_re;
705b8dc6b0eSVu Pham const struct rte_memseg_list *msl;
706b8dc6b0eSVu Pham const struct rte_memseg *ms;
707e96d3d02SDmitry Kozlyuk struct mlx5_mr_btree *bt;
708b8dc6b0eSVu Pham struct mlx5_mr *mr = NULL;
709b8dc6b0eSVu Pham int ms_idx_shift = -1;
710b8dc6b0eSVu Pham uint32_t bmp_size;
711b8dc6b0eSVu Pham void *bmp_mem;
712b8dc6b0eSVu Pham uint32_t ms_n;
713b8dc6b0eSVu Pham uint32_t n;
714e96d3d02SDmitry Kozlyuk uint32_t chunks_n;
715b8dc6b0eSVu Pham size_t len;
716b8dc6b0eSVu Pham
717b8dc6b0eSVu Pham DRV_LOG(DEBUG, "Creating a MR using address (%p)", (void *)addr);
718b8dc6b0eSVu Pham /*
719b8dc6b0eSVu Pham * Release detached MRs if any. This can't be called with holding either
720b8dc6b0eSVu Pham * memory_hotplug_lock or share_cache->rwlock. MRs on the free list have
721b8dc6b0eSVu Pham * been detached by the memory free event but it couldn't be released
722b8dc6b0eSVu Pham * inside the callback due to deadlock. As a result, releasing resources
723b8dc6b0eSVu Pham * is quite opportunistic.
724b8dc6b0eSVu Pham */
725b8dc6b0eSVu Pham mlx5_mr_garbage_collect(share_cache);
726e96d3d02SDmitry Kozlyuk find_range:
727b8dc6b0eSVu Pham /*
728b8dc6b0eSVu Pham * If enabled, find out a contiguous virtual address chunk in use, to
729b8dc6b0eSVu Pham * which the given address belongs, in order to register maximum range.
730b8dc6b0eSVu Pham * In the best case where mempools are not dynamically recreated and
731b8dc6b0eSVu Pham * '--socket-mem' is specified as an EAL option, it is very likely to
732b8dc6b0eSVu Pham * have only one MR(LKey) per a socket and per a hugepage-size even
733b8dc6b0eSVu Pham * though the system memory is highly fragmented. As the whole memory
734b8dc6b0eSVu Pham * chunk will be pinned by kernel, it can't be reused unless entire
735b8dc6b0eSVu Pham * chunk is freed from EAL.
736b8dc6b0eSVu Pham *
737b8dc6b0eSVu Pham * If disabled, just register one memseg (page). Then, memory
738b8dc6b0eSVu Pham * consumption will be minimized but it may drop performance if there
739b8dc6b0eSVu Pham * are many MRs to lookup on the datapath.
740b8dc6b0eSVu Pham */
741b8dc6b0eSVu Pham if (!mr_ext_memseg_en) {
742b8dc6b0eSVu Pham data.msl = rte_mem_virt2memseg_list((void *)addr);
743b8dc6b0eSVu Pham data.start = RTE_ALIGN_FLOOR(addr, data.msl->page_sz);
744b8dc6b0eSVu Pham data.end = data.start + data.msl->page_sz;
745b8dc6b0eSVu Pham } else if (!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data)) {
746b8dc6b0eSVu Pham DRV_LOG(WARNING,
747b8dc6b0eSVu Pham "Unable to find virtually contiguous"
748b8dc6b0eSVu Pham " chunk for address (%p)."
749b8dc6b0eSVu Pham " rte_memseg_contig_walk() failed.", (void *)addr);
750b8dc6b0eSVu Pham rte_errno = ENXIO;
751b8dc6b0eSVu Pham goto err_nolock;
752b8dc6b0eSVu Pham }
753b8dc6b0eSVu Pham alloc_resources:
754b8dc6b0eSVu Pham /* Addresses must be page-aligned. */
755b8dc6b0eSVu Pham MLX5_ASSERT(data.msl);
756b8dc6b0eSVu Pham MLX5_ASSERT(rte_is_aligned((void *)data.start, data.msl->page_sz));
757b8dc6b0eSVu Pham MLX5_ASSERT(rte_is_aligned((void *)data.end, data.msl->page_sz));
758b8dc6b0eSVu Pham msl = data.msl;
759b8dc6b0eSVu Pham ms = rte_mem_virt2memseg((void *)data.start, msl);
760b8dc6b0eSVu Pham len = data.end - data.start;
761b8dc6b0eSVu Pham MLX5_ASSERT(ms);
762b8dc6b0eSVu Pham MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
763b8dc6b0eSVu Pham /* Number of memsegs in the range. */
764b8dc6b0eSVu Pham ms_n = len / msl->page_sz;
76587acdcc7SThomas Monjalon DRV_LOG(DEBUG, "Extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
766b8dc6b0eSVu Pham " page_sz=0x%" PRIx64 ", ms_n=%u",
767b8dc6b0eSVu Pham (void *)addr, data.start, data.end, msl->page_sz, ms_n);
768b8dc6b0eSVu Pham /* Size of memory for bitmap. */
769b8dc6b0eSVu Pham bmp_size = rte_bitmap_get_memory_footprint(ms_n);
770fd970a54SSuanming Mou mr = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
771fd970a54SSuanming Mou RTE_ALIGN_CEIL(sizeof(*mr), RTE_CACHE_LINE_SIZE) +
772fd970a54SSuanming Mou bmp_size, RTE_CACHE_LINE_SIZE, msl->socket_id);
773b8dc6b0eSVu Pham if (mr == NULL) {
77487acdcc7SThomas Monjalon DRV_LOG(DEBUG, "Unable to allocate memory for a new MR of"
775b8dc6b0eSVu Pham " address (%p).", (void *)addr);
776b8dc6b0eSVu Pham rte_errno = ENOMEM;
777b8dc6b0eSVu Pham goto err_nolock;
778b8dc6b0eSVu Pham }
779b8dc6b0eSVu Pham mr->msl = msl;
780b8dc6b0eSVu Pham /*
781b8dc6b0eSVu Pham * Save the index of the first memseg and initialize memseg bitmap. To
782b8dc6b0eSVu Pham * see if a memseg of ms_idx in the memseg-list is still valid, check:
783b8dc6b0eSVu Pham * rte_bitmap_get(mr->bmp, ms_idx - mr->ms_base_idx)
784b8dc6b0eSVu Pham */
785b8dc6b0eSVu Pham mr->ms_base_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
786b8dc6b0eSVu Pham bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE);
787b8dc6b0eSVu Pham mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size);
788b8dc6b0eSVu Pham if (mr->ms_bmp == NULL) {
78987acdcc7SThomas Monjalon DRV_LOG(DEBUG, "Unable to initialize bitmap for a new MR of"
790b8dc6b0eSVu Pham " address (%p).", (void *)addr);
791b8dc6b0eSVu Pham rte_errno = EINVAL;
792b8dc6b0eSVu Pham goto err_nolock;
793b8dc6b0eSVu Pham }
794b8dc6b0eSVu Pham /*
795b8dc6b0eSVu Pham * Should recheck whether the extended contiguous chunk is still valid.
796b8dc6b0eSVu Pham * Because memory_hotplug_lock can't be held if there's any memory
797b8dc6b0eSVu Pham * related calls in a critical path, resource allocation above can't be
798b8dc6b0eSVu Pham * locked. If the memory has been changed at this point, try again with
799b8dc6b0eSVu Pham * just single page. If not, go on with the big chunk atomically from
800b8dc6b0eSVu Pham * here.
801b8dc6b0eSVu Pham */
802b8dc6b0eSVu Pham rte_mcfg_mem_read_lock();
803b8dc6b0eSVu Pham data_re = data;
804b8dc6b0eSVu Pham if (len > msl->page_sz &&
805b8dc6b0eSVu Pham !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
80687acdcc7SThomas Monjalon DRV_LOG(DEBUG,
80787acdcc7SThomas Monjalon "Unable to find virtually contiguous chunk for address "
80887acdcc7SThomas Monjalon "(%p). rte_memseg_contig_walk() failed.", (void *)addr);
809b8dc6b0eSVu Pham rte_errno = ENXIO;
810b8dc6b0eSVu Pham goto err_memlock;
811b8dc6b0eSVu Pham }
812b8dc6b0eSVu Pham if (data.start != data_re.start || data.end != data_re.end) {
813b8dc6b0eSVu Pham /*
814b8dc6b0eSVu Pham * The extended contiguous chunk has been changed. Try again
815b8dc6b0eSVu Pham * with single memseg instead.
816b8dc6b0eSVu Pham */
817b8dc6b0eSVu Pham data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
818b8dc6b0eSVu Pham data.end = data.start + msl->page_sz;
819b8dc6b0eSVu Pham rte_mcfg_mem_read_unlock();
820992e6df3SJiawei Wang mlx5_mr_free(mr, share_cache->dereg_mr_cb);
821b8dc6b0eSVu Pham goto alloc_resources;
822b8dc6b0eSVu Pham }
823b8dc6b0eSVu Pham MLX5_ASSERT(data.msl == data_re.msl);
824b8dc6b0eSVu Pham rte_rwlock_write_lock(&share_cache->rwlock);
825b8dc6b0eSVu Pham /*
826b8dc6b0eSVu Pham * Check the address is really missing. If other thread already created
827b8dc6b0eSVu Pham * one or it is not found due to overflow, abort and return.
828b8dc6b0eSVu Pham */
829b8dc6b0eSVu Pham if (mlx5_mr_lookup_cache(share_cache, entry, addr) != UINT32_MAX) {
830b8dc6b0eSVu Pham /*
831b8dc6b0eSVu Pham * Insert to the global cache table. It may fail due to
832b8dc6b0eSVu Pham * low-on-memory. Then, this entry will have to be searched
833b8dc6b0eSVu Pham * here again.
834b8dc6b0eSVu Pham */
835b8dc6b0eSVu Pham mr_btree_insert(&share_cache->cache, entry);
83687acdcc7SThomas Monjalon DRV_LOG(DEBUG, "Found MR for %p on final lookup, abort",
83787acdcc7SThomas Monjalon (void *)addr);
838b8dc6b0eSVu Pham rte_rwlock_write_unlock(&share_cache->rwlock);
839b8dc6b0eSVu Pham rte_mcfg_mem_read_unlock();
840b8dc6b0eSVu Pham /*
841b8dc6b0eSVu Pham * Must be unlocked before calling rte_free() because
842b8dc6b0eSVu Pham * mlx5_mr_mem_event_free_cb() can be called inside.
843b8dc6b0eSVu Pham */
844992e6df3SJiawei Wang mlx5_mr_free(mr, share_cache->dereg_mr_cb);
845b8dc6b0eSVu Pham return entry->lkey;
846b8dc6b0eSVu Pham }
847b8dc6b0eSVu Pham /*
848b8dc6b0eSVu Pham * Trim start and end addresses for verbs MR. Set bits for registering
849b8dc6b0eSVu Pham * memsegs but exclude already registered ones. Bitmap can be
850b8dc6b0eSVu Pham * fragmented.
851b8dc6b0eSVu Pham */
852b8dc6b0eSVu Pham for (n = 0; n < ms_n; ++n) {
853b8dc6b0eSVu Pham uintptr_t start;
854b8dc6b0eSVu Pham struct mr_cache_entry ret;
855b8dc6b0eSVu Pham
856b8dc6b0eSVu Pham memset(&ret, 0, sizeof(ret));
857b8dc6b0eSVu Pham start = data_re.start + n * msl->page_sz;
858b8dc6b0eSVu Pham /* Exclude memsegs already registered by other MRs. */
859b8dc6b0eSVu Pham if (mlx5_mr_lookup_cache(share_cache, &ret, start) ==
860b8dc6b0eSVu Pham UINT32_MAX) {
861b8dc6b0eSVu Pham /*
862b8dc6b0eSVu Pham * Start from the first unregistered memseg in the
863b8dc6b0eSVu Pham * extended range.
864b8dc6b0eSVu Pham */
865b8dc6b0eSVu Pham if (ms_idx_shift == -1) {
866b8dc6b0eSVu Pham mr->ms_base_idx += n;
867b8dc6b0eSVu Pham data.start = start;
868b8dc6b0eSVu Pham ms_idx_shift = n;
869b8dc6b0eSVu Pham }
870b8dc6b0eSVu Pham data.end = start + msl->page_sz;
871b8dc6b0eSVu Pham rte_bitmap_set(mr->ms_bmp, n - ms_idx_shift);
872b8dc6b0eSVu Pham ++mr->ms_n;
873b8dc6b0eSVu Pham }
874b8dc6b0eSVu Pham }
875b8dc6b0eSVu Pham len = data.end - data.start;
876b8dc6b0eSVu Pham mr->ms_bmp_n = len / msl->page_sz;
877b8dc6b0eSVu Pham MLX5_ASSERT(ms_idx_shift + mr->ms_bmp_n <= ms_n);
878b8dc6b0eSVu Pham /*
879e96d3d02SDmitry Kozlyuk * It is now known how many entries will be used in the global cache.
880e96d3d02SDmitry Kozlyuk * If there is not enough, expand the cache.
881e96d3d02SDmitry Kozlyuk * This cannot be done while holding the memory hotplug lock.
882e96d3d02SDmitry Kozlyuk * While it is released, memory layout may change,
883e96d3d02SDmitry Kozlyuk * so the process must be repeated from the beginning.
884e96d3d02SDmitry Kozlyuk */
885e96d3d02SDmitry Kozlyuk bt = &share_cache->cache;
886e96d3d02SDmitry Kozlyuk chunks_n = mr_get_chunk_count(mr);
887e96d3d02SDmitry Kozlyuk if (bt->len + chunks_n > bt->size) {
888e96d3d02SDmitry Kozlyuk struct mlx5_common_device *cdev;
889e96d3d02SDmitry Kozlyuk uint32_t size;
890e96d3d02SDmitry Kozlyuk
891e96d3d02SDmitry Kozlyuk size = bt->size + chunks_n;
892e96d3d02SDmitry Kozlyuk MLX5_ASSERT(size > bt->size);
893e96d3d02SDmitry Kozlyuk cdev = container_of(share_cache, struct mlx5_common_device,
894e96d3d02SDmitry Kozlyuk mr_scache);
895e96d3d02SDmitry Kozlyuk rte_rwlock_write_unlock(&share_cache->rwlock);
896e96d3d02SDmitry Kozlyuk rte_mcfg_mem_read_unlock();
897e96d3d02SDmitry Kozlyuk if (mlx5_mr_expand_cache(share_cache, size,
898e96d3d02SDmitry Kozlyuk cdev->dev->numa_node) < 0) {
899e96d3d02SDmitry Kozlyuk DRV_LOG(ERR, "Failed to expand global MR cache to %u slots",
900e96d3d02SDmitry Kozlyuk size);
901e96d3d02SDmitry Kozlyuk goto err_nolock;
902e96d3d02SDmitry Kozlyuk }
903e96d3d02SDmitry Kozlyuk goto find_range;
904e96d3d02SDmitry Kozlyuk }
905e96d3d02SDmitry Kozlyuk /*
906d5ed8aa9SOphir Munk * Finally create an MR for the memory chunk. Verbs: ibv_reg_mr() can
907d5ed8aa9SOphir Munk * be called with holding the memory lock because it doesn't use
908b8dc6b0eSVu Pham * mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket()
909b8dc6b0eSVu Pham * through mlx5_alloc_verbs_buf().
910b8dc6b0eSVu Pham */
911d5ed8aa9SOphir Munk share_cache->reg_mr_cb(pd, (void *)data.start, len, &mr->pmd_mr);
91258a17853SOphir Munk if (mr->pmd_mr.obj == NULL) {
91387acdcc7SThomas Monjalon DRV_LOG(DEBUG, "Fail to create an MR for address (%p)",
914b8dc6b0eSVu Pham (void *)addr);
915b8dc6b0eSVu Pham rte_errno = EINVAL;
916b8dc6b0eSVu Pham goto err_mrlock;
917b8dc6b0eSVu Pham }
91856d20677SOphir Munk MLX5_ASSERT((uintptr_t)mr->pmd_mr.addr == data.start);
91956d20677SOphir Munk MLX5_ASSERT(mr->pmd_mr.len);
920b8dc6b0eSVu Pham LIST_INSERT_HEAD(&share_cache->mr_list, mr, mr);
92187acdcc7SThomas Monjalon DRV_LOG(DEBUG, "MR CREATED (%p) for %p:\n"
922b8dc6b0eSVu Pham " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
923b8dc6b0eSVu Pham " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
924b8dc6b0eSVu Pham (void *)mr, (void *)addr, data.start, data.end,
92556d20677SOphir Munk rte_cpu_to_be_32(mr->pmd_mr.lkey),
926b8dc6b0eSVu Pham mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
927b8dc6b0eSVu Pham /* Insert to the global cache table. */
928b8dc6b0eSVu Pham mlx5_mr_insert_cache(share_cache, mr);
929b8dc6b0eSVu Pham /* Fill in output data. */
930b8dc6b0eSVu Pham mlx5_mr_lookup_cache(share_cache, entry, addr);
931b8dc6b0eSVu Pham /* Lookup can't fail. */
932b8dc6b0eSVu Pham MLX5_ASSERT(entry->lkey != UINT32_MAX);
933b8dc6b0eSVu Pham rte_rwlock_write_unlock(&share_cache->rwlock);
934b8dc6b0eSVu Pham rte_mcfg_mem_read_unlock();
935b8dc6b0eSVu Pham return entry->lkey;
936b8dc6b0eSVu Pham err_mrlock:
937b8dc6b0eSVu Pham rte_rwlock_write_unlock(&share_cache->rwlock);
938b8dc6b0eSVu Pham err_memlock:
939b8dc6b0eSVu Pham rte_mcfg_mem_read_unlock();
940b8dc6b0eSVu Pham err_nolock:
941b8dc6b0eSVu Pham /*
942b8dc6b0eSVu Pham * In case of error, as this can be called in a datapath, a warning
943b8dc6b0eSVu Pham * message per an error is preferable instead. Must be unlocked before
944b8dc6b0eSVu Pham * calling rte_free() because mlx5_mr_mem_event_free_cb() can be called
945b8dc6b0eSVu Pham * inside.
946b8dc6b0eSVu Pham */
947992e6df3SJiawei Wang mlx5_mr_free(mr, share_cache->dereg_mr_cb);
948b8dc6b0eSVu Pham return UINT32_MAX;
949b8dc6b0eSVu Pham }
950b8dc6b0eSVu Pham
951b8dc6b0eSVu Pham /**
952b8dc6b0eSVu Pham * Create a new global Memory Region (MR) for a missing virtual address.
953b8dc6b0eSVu Pham * This can be called from primary and secondary process.
954b8dc6b0eSVu Pham *
95520489176SMichael Baum * @param cdev
95620489176SMichael Baum * Pointer to the mlx5 common device.
957b8dc6b0eSVu Pham * @param share_cache
958b8dc6b0eSVu Pham * Pointer to a global shared MR cache.
959b8dc6b0eSVu Pham * @param[out] entry
960b8dc6b0eSVu Pham * Pointer to returning MR cache entry, found in the global cache or newly
961b8dc6b0eSVu Pham * created. If failed to create one, this will not be updated.
962b8dc6b0eSVu Pham * @param addr
963b8dc6b0eSVu Pham * Target virtual address to register.
964b8dc6b0eSVu Pham *
965b8dc6b0eSVu Pham * @return
966b8dc6b0eSVu Pham * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
967b8dc6b0eSVu Pham */
96820489176SMichael Baum uint32_t
mlx5_mr_create(struct mlx5_common_device * cdev,struct mlx5_mr_share_cache * share_cache,struct mr_cache_entry * entry,uintptr_t addr)96920489176SMichael Baum mlx5_mr_create(struct mlx5_common_device *cdev,
970b8dc6b0eSVu Pham struct mlx5_mr_share_cache *share_cache,
97120489176SMichael Baum struct mr_cache_entry *entry, uintptr_t addr)
972b8dc6b0eSVu Pham {
973b8dc6b0eSVu Pham uint32_t ret = 0;
974b8dc6b0eSVu Pham
975b8dc6b0eSVu Pham switch (rte_eal_process_type()) {
976b8dc6b0eSVu Pham case RTE_PROC_PRIMARY:
97720489176SMichael Baum ret = mlx5_mr_create_primary(cdev->pd, share_cache, entry, addr,
97820489176SMichael Baum cdev->config.mr_ext_memseg_en);
979b8dc6b0eSVu Pham break;
980b8dc6b0eSVu Pham case RTE_PROC_SECONDARY:
98120489176SMichael Baum ret = mlx5_mr_create_secondary(cdev, share_cache, entry, addr);
982b8dc6b0eSVu Pham break;
983b8dc6b0eSVu Pham default:
984b8dc6b0eSVu Pham break;
985b8dc6b0eSVu Pham }
986b8dc6b0eSVu Pham return ret;
987b8dc6b0eSVu Pham }
988b8dc6b0eSVu Pham
989b8dc6b0eSVu Pham /**
990b8dc6b0eSVu Pham * Look up address in the global MR cache table. If not found, create a new MR.
991b8dc6b0eSVu Pham * Insert the found/created entry to local bottom-half cache table.
992b8dc6b0eSVu Pham *
993b8dc6b0eSVu Pham * @param mr_ctrl
994b8dc6b0eSVu Pham * Pointer to per-queue MR control structure.
995b8dc6b0eSVu Pham * @param[out] entry
996b8dc6b0eSVu Pham * Pointer to returning MR cache entry, found in the global cache or newly
997b8dc6b0eSVu Pham * created. If failed to create one, this is not written.
998b8dc6b0eSVu Pham * @param addr
999b8dc6b0eSVu Pham * Search key.
1000b8dc6b0eSVu Pham *
1001b8dc6b0eSVu Pham * @return
1002b8dc6b0eSVu Pham * Searched LKey on success, UINT32_MAX on no match.
1003b8dc6b0eSVu Pham */
1004b8dc6b0eSVu Pham static uint32_t
mr_lookup_caches(struct mlx5_mr_ctrl * mr_ctrl,struct mr_cache_entry * entry,uintptr_t addr)100520489176SMichael Baum mr_lookup_caches(struct mlx5_mr_ctrl *mr_ctrl,
100620489176SMichael Baum struct mr_cache_entry *entry, uintptr_t addr)
1007b8dc6b0eSVu Pham {
100871304b5cSMichael Baum struct mlx5_mr_share_cache *share_cache =
100971304b5cSMichael Baum container_of(mr_ctrl->dev_gen_ptr, struct mlx5_mr_share_cache,
101071304b5cSMichael Baum dev_gen);
101171304b5cSMichael Baum struct mlx5_common_device *cdev =
101271304b5cSMichael Baum container_of(share_cache, struct mlx5_common_device, mr_scache);
1013b8dc6b0eSVu Pham struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
1014b8dc6b0eSVu Pham uint32_t lkey;
1015e96d3d02SDmitry Kozlyuk uint32_t idx;
1016b8dc6b0eSVu Pham
1017b8dc6b0eSVu Pham /* If local cache table is full, try to double it. */
1018b8dc6b0eSVu Pham if (unlikely(bt->len == bt->size))
1019b8dc6b0eSVu Pham mr_btree_expand(bt, bt->size << 1);
1020b8dc6b0eSVu Pham /* Look up in the global cache. */
1021b8dc6b0eSVu Pham rte_rwlock_read_lock(&share_cache->rwlock);
1022b8dc6b0eSVu Pham lkey = mr_btree_lookup(&share_cache->cache, &idx, addr);
1023b8dc6b0eSVu Pham if (lkey != UINT32_MAX) {
1024b8dc6b0eSVu Pham /* Found. */
1025b8dc6b0eSVu Pham *entry = (*share_cache->cache.table)[idx];
1026b8dc6b0eSVu Pham rte_rwlock_read_unlock(&share_cache->rwlock);
1027b8dc6b0eSVu Pham /*
1028b8dc6b0eSVu Pham * Update local cache. Even if it fails, return the found entry
1029b8dc6b0eSVu Pham * to update top-half cache. Next time, this entry will be found
1030b8dc6b0eSVu Pham * in the global cache.
1031b8dc6b0eSVu Pham */
1032b8dc6b0eSVu Pham mr_btree_insert(bt, entry);
1033b8dc6b0eSVu Pham return lkey;
1034b8dc6b0eSVu Pham }
1035b8dc6b0eSVu Pham rte_rwlock_read_unlock(&share_cache->rwlock);
1036b8dc6b0eSVu Pham /* First time to see the address? Create a new MR. */
103771304b5cSMichael Baum lkey = mlx5_mr_create(cdev, share_cache, entry, addr);
1038b8dc6b0eSVu Pham /*
1039b8dc6b0eSVu Pham * Update the local cache if successfully created a new global MR. Even
1040b8dc6b0eSVu Pham * if failed to create one, there's no action to take in this datapath
1041b8dc6b0eSVu Pham * code. As returning LKey is invalid, this will eventually make HW
1042b8dc6b0eSVu Pham * fail.
1043b8dc6b0eSVu Pham */
1044b8dc6b0eSVu Pham if (lkey != UINT32_MAX)
1045b8dc6b0eSVu Pham mr_btree_insert(bt, entry);
1046b8dc6b0eSVu Pham return lkey;
1047b8dc6b0eSVu Pham }
1048b8dc6b0eSVu Pham
1049b8dc6b0eSVu Pham /**
1050b8dc6b0eSVu Pham * Bottom-half of LKey search on datapath. First search in cache_bh[] and if
1051b8dc6b0eSVu Pham * misses, search in the global MR cache table and update the new entry to
1052b8dc6b0eSVu Pham * per-queue local caches.
1053b8dc6b0eSVu Pham *
1054b8dc6b0eSVu Pham * @param mr_ctrl
1055b8dc6b0eSVu Pham * Pointer to per-queue MR control structure.
1056b8dc6b0eSVu Pham * @param addr
1057b8dc6b0eSVu Pham * Search key.
1058b8dc6b0eSVu Pham *
1059b8dc6b0eSVu Pham * @return
1060b8dc6b0eSVu Pham * Searched LKey on success, UINT32_MAX on no match.
1061b8dc6b0eSVu Pham */
1062bd6f2207SSuanming Mou uint32_t
mlx5_mr_addr2mr_bh(struct mlx5_mr_ctrl * mr_ctrl,uintptr_t addr)106320489176SMichael Baum mlx5_mr_addr2mr_bh(struct mlx5_mr_ctrl *mr_ctrl, uintptr_t addr)
1064b8dc6b0eSVu Pham {
1065b8dc6b0eSVu Pham uint32_t lkey;
1066e96d3d02SDmitry Kozlyuk uint32_t bh_idx = 0;
1067b8dc6b0eSVu Pham /* Victim in top-half cache to replace with new entry. */
1068b8dc6b0eSVu Pham struct mr_cache_entry *repl = &mr_ctrl->cache[mr_ctrl->head];
1069b8dc6b0eSVu Pham
1070b8dc6b0eSVu Pham /* Binary-search MR translation table. */
1071b8dc6b0eSVu Pham lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);
1072b8dc6b0eSVu Pham /* Update top-half cache. */
1073b8dc6b0eSVu Pham if (likely(lkey != UINT32_MAX)) {
1074b8dc6b0eSVu Pham *repl = (*mr_ctrl->cache_bh.table)[bh_idx];
1075b8dc6b0eSVu Pham } else {
1076b8dc6b0eSVu Pham /*
1077b8dc6b0eSVu Pham * If missed in local lookup table, search in the global cache
1078b8dc6b0eSVu Pham * and local cache_bh[] will be updated inside if possible.
1079b8dc6b0eSVu Pham * Top-half cache entry will also be updated.
1080b8dc6b0eSVu Pham */
108120489176SMichael Baum lkey = mr_lookup_caches(mr_ctrl, repl, addr);
1082b8dc6b0eSVu Pham if (unlikely(lkey == UINT32_MAX))
1083b8dc6b0eSVu Pham return UINT32_MAX;
1084b8dc6b0eSVu Pham }
1085b8dc6b0eSVu Pham /* Update the most recently used entry. */
1086b8dc6b0eSVu Pham mr_ctrl->mru = mr_ctrl->head;
1087b8dc6b0eSVu Pham /* Point to the next victim, the oldest. */
1088b8dc6b0eSVu Pham mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N;
1089b8dc6b0eSVu Pham return lkey;
1090b8dc6b0eSVu Pham }
1091b8dc6b0eSVu Pham
1092b8dc6b0eSVu Pham /**
1093fc59a1ecSMichael Baum * Release all the created MRs and resources on global MR cache of a device
1094b8dc6b0eSVu Pham * list.
1095b8dc6b0eSVu Pham *
1096b8dc6b0eSVu Pham * @param share_cache
1097b8dc6b0eSVu Pham * Pointer to a global shared MR cache.
1098b8dc6b0eSVu Pham */
1099b8dc6b0eSVu Pham void
mlx5_mr_release_cache(struct mlx5_mr_share_cache * share_cache)1100b8dc6b0eSVu Pham mlx5_mr_release_cache(struct mlx5_mr_share_cache *share_cache)
1101b8dc6b0eSVu Pham {
1102b8dc6b0eSVu Pham struct mlx5_mr *mr_next;
1103b8dc6b0eSVu Pham
1104b8dc6b0eSVu Pham rte_rwlock_write_lock(&share_cache->rwlock);
1105b8dc6b0eSVu Pham /* Detach from MR list and move to free list. */
1106b8dc6b0eSVu Pham mr_next = LIST_FIRST(&share_cache->mr_list);
1107b8dc6b0eSVu Pham while (mr_next != NULL) {
1108b8dc6b0eSVu Pham struct mlx5_mr *mr = mr_next;
1109b8dc6b0eSVu Pham
1110b8dc6b0eSVu Pham mr_next = LIST_NEXT(mr, mr);
1111b8dc6b0eSVu Pham LIST_REMOVE(mr, mr);
1112b8dc6b0eSVu Pham LIST_INSERT_HEAD(&share_cache->mr_free_list, mr, mr);
1113b8dc6b0eSVu Pham }
1114b8dc6b0eSVu Pham LIST_INIT(&share_cache->mr_list);
1115b8dc6b0eSVu Pham /* Free global cache. */
1116b8dc6b0eSVu Pham mlx5_mr_btree_free(&share_cache->cache);
1117b8dc6b0eSVu Pham rte_rwlock_write_unlock(&share_cache->rwlock);
1118b8dc6b0eSVu Pham /* Free all remaining MRs. */
1119b8dc6b0eSVu Pham mlx5_mr_garbage_collect(share_cache);
1120b8dc6b0eSVu Pham }
1121b8dc6b0eSVu Pham
1122b8dc6b0eSVu Pham /**
11235fbc75acSMichael Baum * Initialize global MR cache of a device.
11245fbc75acSMichael Baum *
11255fbc75acSMichael Baum * @param share_cache
11265fbc75acSMichael Baum * Pointer to a global shared MR cache.
11275fbc75acSMichael Baum * @param socket
11285fbc75acSMichael Baum * NUMA socket on which memory must be allocated.
11295fbc75acSMichael Baum *
11305fbc75acSMichael Baum * @return
11315fbc75acSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set.
11325fbc75acSMichael Baum */
11335fbc75acSMichael Baum int
mlx5_mr_create_cache(struct mlx5_mr_share_cache * share_cache,int socket)11345fbc75acSMichael Baum mlx5_mr_create_cache(struct mlx5_mr_share_cache *share_cache, int socket)
11355fbc75acSMichael Baum {
11365fbc75acSMichael Baum /* Set the reg_mr and dereg_mr callback functions */
11375fbc75acSMichael Baum mlx5_os_set_reg_mr_cb(&share_cache->reg_mr_cb,
11385fbc75acSMichael Baum &share_cache->dereg_mr_cb);
11395fbc75acSMichael Baum rte_rwlock_init(&share_cache->rwlock);
1140fc59a1ecSMichael Baum rte_rwlock_init(&share_cache->mprwlock);
11415fbc75acSMichael Baum /* Initialize B-tree and allocate memory for global MR cache table. */
11425fbc75acSMichael Baum return mlx5_mr_btree_init(&share_cache->cache,
11435fbc75acSMichael Baum MLX5_MR_BTREE_CACHE_N * 2, socket);
11445fbc75acSMichael Baum }
11455fbc75acSMichael Baum
11465fbc75acSMichael Baum /**
1147b8dc6b0eSVu Pham * Flush all of the local cache entries.
1148b8dc6b0eSVu Pham *
1149b8dc6b0eSVu Pham * @param mr_ctrl
1150b8dc6b0eSVu Pham * Pointer to per-queue MR local cache.
1151b8dc6b0eSVu Pham */
1152b8dc6b0eSVu Pham void
mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl * mr_ctrl)1153b8dc6b0eSVu Pham mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl)
1154b8dc6b0eSVu Pham {
1155b8dc6b0eSVu Pham /* Reset the most-recently-used index. */
1156b8dc6b0eSVu Pham mr_ctrl->mru = 0;
1157b8dc6b0eSVu Pham /* Reset the linear search array. */
1158b8dc6b0eSVu Pham mr_ctrl->head = 0;
1159b8dc6b0eSVu Pham memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache));
1160b8dc6b0eSVu Pham /* Reset the B-tree table. */
1161b8dc6b0eSVu Pham mr_ctrl->cache_bh.len = 1;
1162b8dc6b0eSVu Pham /* Update the generation number. */
1163b8dc6b0eSVu Pham mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr;
1164b8dc6b0eSVu Pham DRV_LOG(DEBUG, "mr_ctrl(%p): flushed, cur_gen=%d",
1165b8dc6b0eSVu Pham (void *)mr_ctrl, mr_ctrl->cur_gen);
1166b8dc6b0eSVu Pham }
1167b8dc6b0eSVu Pham
1168b8dc6b0eSVu Pham /**
1169b8dc6b0eSVu Pham * Creates a memory region for external memory, that is memory which is not
1170b8dc6b0eSVu Pham * part of the DPDK memory segments.
1171b8dc6b0eSVu Pham *
1172b8dc6b0eSVu Pham * @param pd
1173c4685016SOphir Munk * Pointer to pd of a device (net, regex, vdpa,...).
1174b8dc6b0eSVu Pham * @param addr
1175b8dc6b0eSVu Pham * Starting virtual address of memory.
1176b8dc6b0eSVu Pham * @param len
1177b8dc6b0eSVu Pham * Length of memory segment being mapped.
1178b8dc6b0eSVu Pham * @param socked_id
1179b8dc6b0eSVu Pham * Socket to allocate heap memory for the control structures.
1180b8dc6b0eSVu Pham *
1181b8dc6b0eSVu Pham * @return
1182b8dc6b0eSVu Pham * Pointer to MR structure on success, NULL otherwise.
1183b8dc6b0eSVu Pham */
1184b8dc6b0eSVu Pham struct mlx5_mr *
mlx5_create_mr_ext(void * pd,uintptr_t addr,size_t len,int socket_id,mlx5_reg_mr_t reg_mr_cb)1185d5ed8aa9SOphir Munk mlx5_create_mr_ext(void *pd, uintptr_t addr, size_t len, int socket_id,
1186d5ed8aa9SOphir Munk mlx5_reg_mr_t reg_mr_cb)
1187b8dc6b0eSVu Pham {
1188b8dc6b0eSVu Pham struct mlx5_mr *mr = NULL;
1189b8dc6b0eSVu Pham
1190fd970a54SSuanming Mou mr = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1191fd970a54SSuanming Mou RTE_ALIGN_CEIL(sizeof(*mr), RTE_CACHE_LINE_SIZE),
1192b8dc6b0eSVu Pham RTE_CACHE_LINE_SIZE, socket_id);
1193b8dc6b0eSVu Pham if (mr == NULL)
1194b8dc6b0eSVu Pham return NULL;
1195d5ed8aa9SOphir Munk reg_mr_cb(pd, (void *)addr, len, &mr->pmd_mr);
119658a17853SOphir Munk if (mr->pmd_mr.obj == NULL) {
1197b8dc6b0eSVu Pham DRV_LOG(WARNING,
119856d20677SOphir Munk "Fail to create MR for address (%p)",
1199b8dc6b0eSVu Pham (void *)addr);
1200fd970a54SSuanming Mou mlx5_free(mr);
1201b8dc6b0eSVu Pham return NULL;
1202b8dc6b0eSVu Pham }
1203b8dc6b0eSVu Pham mr->msl = NULL; /* Mark it is external memory. */
1204b8dc6b0eSVu Pham mr->ms_bmp = NULL;
1205b8dc6b0eSVu Pham mr->ms_n = 1;
1206b8dc6b0eSVu Pham mr->ms_bmp_n = 1;
1207b8dc6b0eSVu Pham DRV_LOG(DEBUG,
1208b8dc6b0eSVu Pham "MR CREATED (%p) for external memory %p:\n"
1209b8dc6b0eSVu Pham " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
1210b8dc6b0eSVu Pham " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
1211b8dc6b0eSVu Pham (void *)mr, (void *)addr,
121256d20677SOphir Munk addr, addr + len, rte_cpu_to_be_32(mr->pmd_mr.lkey),
1213b8dc6b0eSVu Pham mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
1214b8dc6b0eSVu Pham return mr;
1215b8dc6b0eSVu Pham }
1216b8dc6b0eSVu Pham
1217b8dc6b0eSVu Pham /**
12182f6c2adbSMichael Baum * Callback for memory free event. Iterate freed memsegs and check whether it
12192f6c2adbSMichael Baum * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a
12202f6c2adbSMichael Baum * result, the MR would be fragmented. If it becomes empty, the MR will be freed
12212f6c2adbSMichael Baum * later by mlx5_mr_garbage_collect(). Even if this callback is called from a
12222f6c2adbSMichael Baum * secondary process, the garbage collector will be called in primary process
12232f6c2adbSMichael Baum * as the secondary process can't call mlx5_mr_create().
12242f6c2adbSMichael Baum *
12252f6c2adbSMichael Baum * The global cache must be rebuilt if there's any change and this event has to
12262f6c2adbSMichael Baum * be propagated to dataplane threads to flush the local caches.
12272f6c2adbSMichael Baum *
12282f6c2adbSMichael Baum * @param share_cache
12292f6c2adbSMichael Baum * Pointer to a global shared MR cache.
12302f6c2adbSMichael Baum * @param ibdev_name
12312f6c2adbSMichael Baum * Name of ibv device.
12322f6c2adbSMichael Baum * @param addr
12332f6c2adbSMichael Baum * Address of freed memory.
12342f6c2adbSMichael Baum * @param len
12352f6c2adbSMichael Baum * Size of freed memory.
12362f6c2adbSMichael Baum */
12372f6c2adbSMichael Baum void
mlx5_free_mr_by_addr(struct mlx5_mr_share_cache * share_cache,const char * ibdev_name,const void * addr,size_t len)12382f6c2adbSMichael Baum mlx5_free_mr_by_addr(struct mlx5_mr_share_cache *share_cache,
12392f6c2adbSMichael Baum const char *ibdev_name, const void *addr, size_t len)
12402f6c2adbSMichael Baum {
12412f6c2adbSMichael Baum const struct rte_memseg_list *msl;
12422f6c2adbSMichael Baum struct mlx5_mr *mr;
12432f6c2adbSMichael Baum int ms_n;
12442f6c2adbSMichael Baum int i;
12452f6c2adbSMichael Baum int rebuild = 0;
12462f6c2adbSMichael Baum
12472f6c2adbSMichael Baum DRV_LOG(DEBUG, "device %s free callback: addr=%p, len=%zu",
12482f6c2adbSMichael Baum ibdev_name, addr, len);
12492f6c2adbSMichael Baum msl = rte_mem_virt2memseg_list(addr);
12502f6c2adbSMichael Baum /* addr and len must be page-aligned. */
12512f6c2adbSMichael Baum MLX5_ASSERT((uintptr_t)addr ==
12522f6c2adbSMichael Baum RTE_ALIGN((uintptr_t)addr, msl->page_sz));
12532f6c2adbSMichael Baum MLX5_ASSERT(len == RTE_ALIGN(len, msl->page_sz));
12542f6c2adbSMichael Baum ms_n = len / msl->page_sz;
12552f6c2adbSMichael Baum rte_rwlock_write_lock(&share_cache->rwlock);
12562f6c2adbSMichael Baum /* Clear bits of freed memsegs from MR. */
12572f6c2adbSMichael Baum for (i = 0; i < ms_n; ++i) {
12582f6c2adbSMichael Baum const struct rte_memseg *ms;
12592f6c2adbSMichael Baum struct mr_cache_entry entry;
12602f6c2adbSMichael Baum uintptr_t start;
12612f6c2adbSMichael Baum int ms_idx;
12622f6c2adbSMichael Baum uint32_t pos;
12632f6c2adbSMichael Baum
12642f6c2adbSMichael Baum /* Find MR having this memseg. */
12652f6c2adbSMichael Baum start = (uintptr_t)addr + i * msl->page_sz;
12662f6c2adbSMichael Baum mr = mlx5_mr_lookup_list(share_cache, &entry, start);
12672f6c2adbSMichael Baum if (mr == NULL)
12682f6c2adbSMichael Baum continue;
12692f6c2adbSMichael Baum MLX5_ASSERT(mr->msl); /* Can't be external memory. */
12702f6c2adbSMichael Baum ms = rte_mem_virt2memseg((void *)start, msl);
12712f6c2adbSMichael Baum MLX5_ASSERT(ms != NULL);
12722f6c2adbSMichael Baum MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
12732f6c2adbSMichael Baum ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
12742f6c2adbSMichael Baum pos = ms_idx - mr->ms_base_idx;
12752f6c2adbSMichael Baum MLX5_ASSERT(rte_bitmap_get(mr->ms_bmp, pos));
12762f6c2adbSMichael Baum MLX5_ASSERT(pos < mr->ms_bmp_n);
12772f6c2adbSMichael Baum DRV_LOG(DEBUG, "device %s MR(%p): clear bitmap[%u] for addr %p",
12782f6c2adbSMichael Baum ibdev_name, (void *)mr, pos, (void *)start);
12792f6c2adbSMichael Baum rte_bitmap_clear(mr->ms_bmp, pos);
12802f6c2adbSMichael Baum if (--mr->ms_n == 0) {
12812f6c2adbSMichael Baum LIST_REMOVE(mr, mr);
12822f6c2adbSMichael Baum LIST_INSERT_HEAD(&share_cache->mr_free_list, mr, mr);
12832f6c2adbSMichael Baum DRV_LOG(DEBUG, "device %s remove MR(%p) from list",
12842f6c2adbSMichael Baum ibdev_name, (void *)mr);
12852f6c2adbSMichael Baum }
12862f6c2adbSMichael Baum /*
12872f6c2adbSMichael Baum * MR is fragmented or will be freed. the global cache must be
12882f6c2adbSMichael Baum * rebuilt.
12892f6c2adbSMichael Baum */
12902f6c2adbSMichael Baum rebuild = 1;
12912f6c2adbSMichael Baum }
12922f6c2adbSMichael Baum if (rebuild) {
12932f6c2adbSMichael Baum mlx5_mr_rebuild_cache(share_cache);
12942f6c2adbSMichael Baum /*
12952f6c2adbSMichael Baum * No explicit wmb is needed after updating dev_gen due to
12962f6c2adbSMichael Baum * store-release ordering in unlock that provides the
12972f6c2adbSMichael Baum * implicit barrier at the software visible level.
12982f6c2adbSMichael Baum */
12992f6c2adbSMichael Baum ++share_cache->dev_gen;
13002f6c2adbSMichael Baum DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d",
13012f6c2adbSMichael Baum share_cache->dev_gen);
13022f6c2adbSMichael Baum }
13032f6c2adbSMichael Baum rte_rwlock_write_unlock(&share_cache->rwlock);
13042f6c2adbSMichael Baum }
13052f6c2adbSMichael Baum
13062f6c2adbSMichael Baum /**
1307b8dc6b0eSVu Pham * Dump all the created MRs and the global cache entries.
1308b8dc6b0eSVu Pham *
1309fc59a1ecSMichael Baum * @param share_cache
1310fc59a1ecSMichael Baum * Pointer to a global shared MR cache.
1311b8dc6b0eSVu Pham */
1312b8dc6b0eSVu Pham void
mlx5_mr_dump_cache(struct mlx5_mr_share_cache * share_cache __rte_unused)1313b8dc6b0eSVu Pham mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused)
1314b8dc6b0eSVu Pham {
1315b8dc6b0eSVu Pham #ifdef RTE_LIBRTE_MLX5_DEBUG
1316b8dc6b0eSVu Pham struct mlx5_mr *mr;
1317b8dc6b0eSVu Pham int mr_n = 0;
1318b8dc6b0eSVu Pham int chunk_n = 0;
1319b8dc6b0eSVu Pham
1320b8dc6b0eSVu Pham rte_rwlock_read_lock(&share_cache->rwlock);
1321b8dc6b0eSVu Pham /* Iterate all the existing MRs. */
1322b8dc6b0eSVu Pham LIST_FOREACH(mr, &share_cache->mr_list, mr) {
1323b8dc6b0eSVu Pham unsigned int n;
1324b8dc6b0eSVu Pham
132587acdcc7SThomas Monjalon DRV_LOG(DEBUG, "MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u",
132656d20677SOphir Munk mr_n++, rte_cpu_to_be_32(mr->pmd_mr.lkey),
1327b8dc6b0eSVu Pham mr->ms_n, mr->ms_bmp_n);
1328b8dc6b0eSVu Pham if (mr->ms_n == 0)
1329b8dc6b0eSVu Pham continue;
1330b8dc6b0eSVu Pham for (n = 0; n < mr->ms_bmp_n; ) {
1331b8dc6b0eSVu Pham struct mr_cache_entry ret = { 0, };
1332b8dc6b0eSVu Pham
1333b8dc6b0eSVu Pham n = mr_find_next_chunk(mr, &ret, n);
1334b8dc6b0eSVu Pham if (!ret.end)
1335b8dc6b0eSVu Pham break;
133687acdcc7SThomas Monjalon DRV_LOG(DEBUG,
133787acdcc7SThomas Monjalon " chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")",
1338b8dc6b0eSVu Pham chunk_n++, ret.start, ret.end);
1339b8dc6b0eSVu Pham }
1340b8dc6b0eSVu Pham }
134187acdcc7SThomas Monjalon DRV_LOG(DEBUG, "Dumping global cache %p", (void *)share_cache);
1342b8dc6b0eSVu Pham mlx5_mr_btree_dump(&share_cache->cache);
1343b8dc6b0eSVu Pham rte_rwlock_read_unlock(&share_cache->rwlock);
1344b8dc6b0eSVu Pham #endif
1345b8dc6b0eSVu Pham }
1346690b2a88SDmitry Kozlyuk
1347690b2a88SDmitry Kozlyuk static int
mlx5_range_compare_start(const void * lhs,const void * rhs)1348690b2a88SDmitry Kozlyuk mlx5_range_compare_start(const void *lhs, const void *rhs)
1349690b2a88SDmitry Kozlyuk {
1350690b2a88SDmitry Kozlyuk const struct mlx5_range *r1 = lhs, *r2 = rhs;
1351690b2a88SDmitry Kozlyuk
1352690b2a88SDmitry Kozlyuk if (r1->start > r2->start)
1353690b2a88SDmitry Kozlyuk return 1;
1354690b2a88SDmitry Kozlyuk else if (r1->start < r2->start)
1355690b2a88SDmitry Kozlyuk return -1;
1356690b2a88SDmitry Kozlyuk return 0;
1357690b2a88SDmitry Kozlyuk }
1358690b2a88SDmitry Kozlyuk
1359690b2a88SDmitry Kozlyuk static void
mlx5_range_from_mempool_chunk(struct rte_mempool * mp,void * opaque,struct rte_mempool_memhdr * memhdr,unsigned int idx)1360690b2a88SDmitry Kozlyuk mlx5_range_from_mempool_chunk(struct rte_mempool *mp, void *opaque,
1361690b2a88SDmitry Kozlyuk struct rte_mempool_memhdr *memhdr,
1362690b2a88SDmitry Kozlyuk unsigned int idx)
1363690b2a88SDmitry Kozlyuk {
1364690b2a88SDmitry Kozlyuk struct mlx5_range *ranges = opaque, *range = &ranges[idx];
136581132518SDmitry Kozlyuk uintptr_t start = (uintptr_t)memhdr->addr;
1366690b2a88SDmitry Kozlyuk uint64_t page_size = rte_mem_page_size();
1367690b2a88SDmitry Kozlyuk
1368690b2a88SDmitry Kozlyuk RTE_SET_USED(mp);
136981132518SDmitry Kozlyuk range->start = RTE_ALIGN_FLOOR(start, page_size);
137081132518SDmitry Kozlyuk range->end = RTE_ALIGN_CEIL(start + memhdr->len, page_size);
1371690b2a88SDmitry Kozlyuk }
1372690b2a88SDmitry Kozlyuk
1373690b2a88SDmitry Kozlyuk /**
13747297d2cdSDmitry Kozlyuk * Collect page-aligned memory ranges of the mempool.
13757297d2cdSDmitry Kozlyuk */
13767297d2cdSDmitry Kozlyuk static int
mlx5_mempool_get_chunks(struct rte_mempool * mp,struct mlx5_range ** out,unsigned int * out_n)13777297d2cdSDmitry Kozlyuk mlx5_mempool_get_chunks(struct rte_mempool *mp, struct mlx5_range **out,
13787297d2cdSDmitry Kozlyuk unsigned int *out_n)
13797297d2cdSDmitry Kozlyuk {
13807297d2cdSDmitry Kozlyuk unsigned int n;
13817297d2cdSDmitry Kozlyuk
1382e4c402afSDmitry Kozlyuk DRV_LOG(DEBUG, "Collecting chunks of regular mempool %s", mp->name);
13837297d2cdSDmitry Kozlyuk n = mp->nb_mem_chunks;
1384f805f70bSFerruh Yigit *out = calloc(n, sizeof(**out));
138508ac0358SDmitry Kozlyuk if (*out == NULL)
13867297d2cdSDmitry Kozlyuk return -1;
138708ac0358SDmitry Kozlyuk rte_mempool_mem_iter(mp, mlx5_range_from_mempool_chunk, *out);
13887297d2cdSDmitry Kozlyuk *out_n = n;
13897297d2cdSDmitry Kozlyuk return 0;
13907297d2cdSDmitry Kozlyuk }
13917297d2cdSDmitry Kozlyuk
13927297d2cdSDmitry Kozlyuk struct mlx5_mempool_get_extmem_data {
13937297d2cdSDmitry Kozlyuk struct mlx5_range *heap;
13947297d2cdSDmitry Kozlyuk unsigned int heap_size;
13957297d2cdSDmitry Kozlyuk int ret;
13967297d2cdSDmitry Kozlyuk };
13977297d2cdSDmitry Kozlyuk
13987297d2cdSDmitry Kozlyuk static void
mlx5_mempool_get_extmem_cb(struct rte_mempool * mp,void * opaque,void * obj,unsigned int obj_idx)13997297d2cdSDmitry Kozlyuk mlx5_mempool_get_extmem_cb(struct rte_mempool *mp, void *opaque,
14007297d2cdSDmitry Kozlyuk void *obj, unsigned int obj_idx)
14017297d2cdSDmitry Kozlyuk {
14027297d2cdSDmitry Kozlyuk struct mlx5_mempool_get_extmem_data *data = opaque;
14037297d2cdSDmitry Kozlyuk struct rte_mbuf *mbuf = obj;
14047297d2cdSDmitry Kozlyuk uintptr_t addr = (uintptr_t)mbuf->buf_addr;
14057297d2cdSDmitry Kozlyuk struct mlx5_range *seg, *heap;
14067297d2cdSDmitry Kozlyuk struct rte_memseg_list *msl;
14077297d2cdSDmitry Kozlyuk size_t page_size;
14087297d2cdSDmitry Kozlyuk uintptr_t page_start;
14097297d2cdSDmitry Kozlyuk unsigned int pos = 0, len = data->heap_size, delta;
14107297d2cdSDmitry Kozlyuk
14117297d2cdSDmitry Kozlyuk RTE_SET_USED(mp);
14127297d2cdSDmitry Kozlyuk RTE_SET_USED(obj_idx);
14137297d2cdSDmitry Kozlyuk if (data->ret < 0)
14147297d2cdSDmitry Kozlyuk return;
14157297d2cdSDmitry Kozlyuk /* Binary search for an already visited page. */
14167297d2cdSDmitry Kozlyuk while (len > 1) {
14177297d2cdSDmitry Kozlyuk delta = len / 2;
14187297d2cdSDmitry Kozlyuk if (addr < data->heap[pos + delta].start) {
14197297d2cdSDmitry Kozlyuk len = delta;
14207297d2cdSDmitry Kozlyuk } else {
14217297d2cdSDmitry Kozlyuk pos += delta;
14227297d2cdSDmitry Kozlyuk len -= delta;
14237297d2cdSDmitry Kozlyuk }
14247297d2cdSDmitry Kozlyuk }
14257297d2cdSDmitry Kozlyuk if (data->heap != NULL) {
14267297d2cdSDmitry Kozlyuk seg = &data->heap[pos];
14277297d2cdSDmitry Kozlyuk if (seg->start <= addr && addr < seg->end)
14287297d2cdSDmitry Kozlyuk return;
14297297d2cdSDmitry Kozlyuk }
14307297d2cdSDmitry Kozlyuk /* Determine the page boundaries and remember them. */
14317297d2cdSDmitry Kozlyuk heap = realloc(data->heap, sizeof(heap[0]) * (data->heap_size + 1));
14327297d2cdSDmitry Kozlyuk if (heap == NULL) {
14337297d2cdSDmitry Kozlyuk free(data->heap);
14347297d2cdSDmitry Kozlyuk data->heap = NULL;
14357297d2cdSDmitry Kozlyuk data->ret = -1;
14367297d2cdSDmitry Kozlyuk return;
14377297d2cdSDmitry Kozlyuk }
14387297d2cdSDmitry Kozlyuk data->heap = heap;
14397297d2cdSDmitry Kozlyuk data->heap_size++;
14407297d2cdSDmitry Kozlyuk seg = &heap[data->heap_size - 1];
14417297d2cdSDmitry Kozlyuk msl = rte_mem_virt2memseg_list((void *)addr);
14427297d2cdSDmitry Kozlyuk page_size = msl != NULL ? msl->page_sz : rte_mem_page_size();
14437297d2cdSDmitry Kozlyuk page_start = RTE_PTR_ALIGN_FLOOR(addr, page_size);
14447297d2cdSDmitry Kozlyuk seg->start = page_start;
14457297d2cdSDmitry Kozlyuk seg->end = page_start + page_size;
14467297d2cdSDmitry Kozlyuk /* Maintain the heap order. */
14477297d2cdSDmitry Kozlyuk qsort(data->heap, data->heap_size, sizeof(heap[0]),
14487297d2cdSDmitry Kozlyuk mlx5_range_compare_start);
14497297d2cdSDmitry Kozlyuk }
14507297d2cdSDmitry Kozlyuk
14517297d2cdSDmitry Kozlyuk /**
14527297d2cdSDmitry Kozlyuk * Recover pages of external memory as close as possible
14537297d2cdSDmitry Kozlyuk * for a mempool with RTE_PKTMBUF_POOL_PINNED_EXT_BUF.
14547297d2cdSDmitry Kozlyuk * Pages are stored in a heap for efficient search, for mbufs are many.
14557297d2cdSDmitry Kozlyuk */
14567297d2cdSDmitry Kozlyuk static int
mlx5_mempool_get_extmem(struct rte_mempool * mp,struct mlx5_range ** out,unsigned int * out_n)14577297d2cdSDmitry Kozlyuk mlx5_mempool_get_extmem(struct rte_mempool *mp, struct mlx5_range **out,
14587297d2cdSDmitry Kozlyuk unsigned int *out_n)
14597297d2cdSDmitry Kozlyuk {
14607297d2cdSDmitry Kozlyuk struct mlx5_mempool_get_extmem_data data;
14617297d2cdSDmitry Kozlyuk
1462e4c402afSDmitry Kozlyuk DRV_LOG(DEBUG, "Recovering external pinned pages of mempool %s",
1463e4c402afSDmitry Kozlyuk mp->name);
14647297d2cdSDmitry Kozlyuk memset(&data, 0, sizeof(data));
14657297d2cdSDmitry Kozlyuk rte_mempool_obj_iter(mp, mlx5_mempool_get_extmem_cb, &data);
14667297d2cdSDmitry Kozlyuk *out = data.heap;
14677297d2cdSDmitry Kozlyuk *out_n = data.heap_size;
146808ac0358SDmitry Kozlyuk return data.ret;
14697297d2cdSDmitry Kozlyuk }
14707297d2cdSDmitry Kozlyuk
14717297d2cdSDmitry Kozlyuk /**
1472690b2a88SDmitry Kozlyuk * Get VA-contiguous ranges of the mempool memory.
1473690b2a88SDmitry Kozlyuk * Each range start and end is aligned to the system page size.
1474690b2a88SDmitry Kozlyuk *
1475690b2a88SDmitry Kozlyuk * @param[in] mp
1476690b2a88SDmitry Kozlyuk * Analyzed mempool.
147708ac0358SDmitry Kozlyuk * @param[in] is_extmem
147808ac0358SDmitry Kozlyuk * Whether the pool is contains only external pinned buffers.
1479690b2a88SDmitry Kozlyuk * @param[out] out
1480690b2a88SDmitry Kozlyuk * Receives the ranges, caller must release it with free().
148108ac0358SDmitry Kozlyuk * @param[out] out_n
1482690b2a88SDmitry Kozlyuk * Receives the number of @p out elements.
1483690b2a88SDmitry Kozlyuk *
1484690b2a88SDmitry Kozlyuk * @return
1485690b2a88SDmitry Kozlyuk * 0 on success, (-1) on failure.
1486690b2a88SDmitry Kozlyuk */
1487690b2a88SDmitry Kozlyuk static int
mlx5_get_mempool_ranges(struct rte_mempool * mp,bool is_extmem,struct mlx5_range ** out,unsigned int * out_n)148808ac0358SDmitry Kozlyuk mlx5_get_mempool_ranges(struct rte_mempool *mp, bool is_extmem,
148908ac0358SDmitry Kozlyuk struct mlx5_range **out, unsigned int *out_n)
1490690b2a88SDmitry Kozlyuk {
1491690b2a88SDmitry Kozlyuk struct mlx5_range *chunks;
14927297d2cdSDmitry Kozlyuk unsigned int chunks_n, contig_n, i;
14937297d2cdSDmitry Kozlyuk int ret;
1494690b2a88SDmitry Kozlyuk
14957297d2cdSDmitry Kozlyuk /* Collect the pool underlying memory. */
149608ac0358SDmitry Kozlyuk ret = is_extmem ? mlx5_mempool_get_extmem(mp, &chunks, &chunks_n) :
14977297d2cdSDmitry Kozlyuk mlx5_mempool_get_chunks(mp, &chunks, &chunks_n);
14987297d2cdSDmitry Kozlyuk if (ret < 0)
14997297d2cdSDmitry Kozlyuk return ret;
1500690b2a88SDmitry Kozlyuk /* Merge adjacent chunks and place them at the beginning. */
1501690b2a88SDmitry Kozlyuk qsort(chunks, chunks_n, sizeof(chunks[0]), mlx5_range_compare_start);
1502690b2a88SDmitry Kozlyuk contig_n = 1;
1503690b2a88SDmitry Kozlyuk for (i = 1; i < chunks_n; i++)
1504690b2a88SDmitry Kozlyuk if (chunks[i - 1].end != chunks[i].start) {
1505690b2a88SDmitry Kozlyuk chunks[contig_n - 1].end = chunks[i - 1].end;
1506690b2a88SDmitry Kozlyuk chunks[contig_n] = chunks[i];
1507690b2a88SDmitry Kozlyuk contig_n++;
1508690b2a88SDmitry Kozlyuk }
1509690b2a88SDmitry Kozlyuk /* Extend the last contiguous chunk to the end of the mempool. */
1510690b2a88SDmitry Kozlyuk chunks[contig_n - 1].end = chunks[i - 1].end;
1511690b2a88SDmitry Kozlyuk *out = chunks;
1512690b2a88SDmitry Kozlyuk *out_n = contig_n;
1513690b2a88SDmitry Kozlyuk return 0;
1514690b2a88SDmitry Kozlyuk }
1515690b2a88SDmitry Kozlyuk
1516690b2a88SDmitry Kozlyuk /**
1517690b2a88SDmitry Kozlyuk * Analyze mempool memory to select memory ranges to register.
1518690b2a88SDmitry Kozlyuk *
1519690b2a88SDmitry Kozlyuk * @param[in] mp
1520690b2a88SDmitry Kozlyuk * Mempool to analyze.
152108ac0358SDmitry Kozlyuk * @param[in] is_extmem
152208ac0358SDmitry Kozlyuk * Whether the pool is contains only external pinned buffers.
1523690b2a88SDmitry Kozlyuk * @param[out] out
1524690b2a88SDmitry Kozlyuk * Receives memory ranges to register, aligned to the system page size.
1525690b2a88SDmitry Kozlyuk * The caller must release them with free().
1526690b2a88SDmitry Kozlyuk * @param[out] out_n
1527690b2a88SDmitry Kozlyuk * Receives the number of @p out items.
1528690b2a88SDmitry Kozlyuk * @param[out] share_hugepage
1529690b2a88SDmitry Kozlyuk * Receives True if the entire pool resides within a single hugepage.
1530690b2a88SDmitry Kozlyuk *
1531690b2a88SDmitry Kozlyuk * @return
1532690b2a88SDmitry Kozlyuk * 0 on success, (-1) on failure.
1533690b2a88SDmitry Kozlyuk */
1534690b2a88SDmitry Kozlyuk static int
mlx5_mempool_reg_analyze(struct rte_mempool * mp,bool is_extmem,struct mlx5_range ** out,unsigned int * out_n,bool * share_hugepage)153508ac0358SDmitry Kozlyuk mlx5_mempool_reg_analyze(struct rte_mempool *mp, bool is_extmem,
153608ac0358SDmitry Kozlyuk struct mlx5_range **out, unsigned int *out_n,
153708ac0358SDmitry Kozlyuk bool *share_hugepage)
1538690b2a88SDmitry Kozlyuk {
1539690b2a88SDmitry Kozlyuk struct mlx5_range *ranges = NULL;
1540690b2a88SDmitry Kozlyuk unsigned int i, ranges_n = 0;
1541690b2a88SDmitry Kozlyuk struct rte_memseg_list *msl;
1542690b2a88SDmitry Kozlyuk
154308ac0358SDmitry Kozlyuk if (mlx5_get_mempool_ranges(mp, is_extmem, &ranges, &ranges_n) < 0) {
1544690b2a88SDmitry Kozlyuk DRV_LOG(ERR, "Cannot get address ranges for mempool %s",
1545690b2a88SDmitry Kozlyuk mp->name);
1546690b2a88SDmitry Kozlyuk return -1;
1547690b2a88SDmitry Kozlyuk }
1548690b2a88SDmitry Kozlyuk /* Check if the hugepage of the pool can be shared. */
1549690b2a88SDmitry Kozlyuk *share_hugepage = false;
1550690b2a88SDmitry Kozlyuk msl = rte_mem_virt2memseg_list((void *)ranges[0].start);
1551690b2a88SDmitry Kozlyuk if (msl != NULL) {
1552690b2a88SDmitry Kozlyuk uint64_t hugepage_sz = 0;
1553690b2a88SDmitry Kozlyuk
1554690b2a88SDmitry Kozlyuk /* Check that all ranges are on pages of the same size. */
1555690b2a88SDmitry Kozlyuk for (i = 0; i < ranges_n; i++) {
1556690b2a88SDmitry Kozlyuk if (hugepage_sz != 0 && hugepage_sz != msl->page_sz)
1557690b2a88SDmitry Kozlyuk break;
1558690b2a88SDmitry Kozlyuk hugepage_sz = msl->page_sz;
1559690b2a88SDmitry Kozlyuk }
1560690b2a88SDmitry Kozlyuk if (i == ranges_n) {
1561690b2a88SDmitry Kozlyuk /*
1562690b2a88SDmitry Kozlyuk * If the entire pool is within one hugepage,
1563690b2a88SDmitry Kozlyuk * combine all ranges into one of the hugepage size.
1564690b2a88SDmitry Kozlyuk */
1565690b2a88SDmitry Kozlyuk uintptr_t reg_start = ranges[0].start;
1566690b2a88SDmitry Kozlyuk uintptr_t reg_end = ranges[ranges_n - 1].end;
1567690b2a88SDmitry Kozlyuk uintptr_t hugepage_start =
1568690b2a88SDmitry Kozlyuk RTE_ALIGN_FLOOR(reg_start, hugepage_sz);
1569690b2a88SDmitry Kozlyuk uintptr_t hugepage_end = hugepage_start + hugepage_sz;
1570690b2a88SDmitry Kozlyuk if (reg_end < hugepage_end) {
1571690b2a88SDmitry Kozlyuk ranges[0].start = hugepage_start;
1572690b2a88SDmitry Kozlyuk ranges[0].end = hugepage_end;
1573690b2a88SDmitry Kozlyuk ranges_n = 1;
1574690b2a88SDmitry Kozlyuk *share_hugepage = true;
1575690b2a88SDmitry Kozlyuk }
1576690b2a88SDmitry Kozlyuk }
1577690b2a88SDmitry Kozlyuk }
1578690b2a88SDmitry Kozlyuk *out = ranges;
1579690b2a88SDmitry Kozlyuk *out_n = ranges_n;
1580690b2a88SDmitry Kozlyuk return 0;
1581690b2a88SDmitry Kozlyuk }
1582690b2a88SDmitry Kozlyuk
1583690b2a88SDmitry Kozlyuk /** Create a registration object for the mempool. */
1584690b2a88SDmitry Kozlyuk static struct mlx5_mempool_reg *
mlx5_mempool_reg_create(struct rte_mempool * mp,unsigned int mrs_n,bool is_extmem)158508ac0358SDmitry Kozlyuk mlx5_mempool_reg_create(struct rte_mempool *mp, unsigned int mrs_n,
158608ac0358SDmitry Kozlyuk bool is_extmem)
1587690b2a88SDmitry Kozlyuk {
1588690b2a88SDmitry Kozlyuk struct mlx5_mempool_reg *mpr = NULL;
1589690b2a88SDmitry Kozlyuk
1590690b2a88SDmitry Kozlyuk mpr = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
15918947eebcSBing Zhao sizeof(struct mlx5_mempool_reg),
1592690b2a88SDmitry Kozlyuk RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
1593690b2a88SDmitry Kozlyuk if (mpr == NULL) {
1594690b2a88SDmitry Kozlyuk DRV_LOG(ERR, "Cannot allocate mempool %s registration object",
1595690b2a88SDmitry Kozlyuk mp->name);
1596690b2a88SDmitry Kozlyuk return NULL;
1597690b2a88SDmitry Kozlyuk }
15988947eebcSBing Zhao mpr->mrs = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
15998947eebcSBing Zhao mrs_n * sizeof(struct mlx5_mempool_mr),
16008947eebcSBing Zhao RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
16018947eebcSBing Zhao if (!mpr->mrs) {
16028947eebcSBing Zhao DRV_LOG(ERR, "Cannot allocate mempool %s registration MRs",
16038947eebcSBing Zhao mp->name);
16048947eebcSBing Zhao mlx5_free(mpr);
16058947eebcSBing Zhao return NULL;
16068947eebcSBing Zhao }
1607690b2a88SDmitry Kozlyuk mpr->mp = mp;
1608690b2a88SDmitry Kozlyuk mpr->mrs_n = mrs_n;
160908ac0358SDmitry Kozlyuk mpr->is_extmem = is_extmem;
1610690b2a88SDmitry Kozlyuk return mpr;
1611690b2a88SDmitry Kozlyuk }
1612690b2a88SDmitry Kozlyuk
1613690b2a88SDmitry Kozlyuk /**
1614690b2a88SDmitry Kozlyuk * Destroy a mempool registration object.
1615690b2a88SDmitry Kozlyuk *
1616690b2a88SDmitry Kozlyuk * @param standalone
16177be78d02SJosh Soref * Whether @p mpr owns its MRs exclusively, i.e. they are not shared.
1618690b2a88SDmitry Kozlyuk */
1619690b2a88SDmitry Kozlyuk static void
mlx5_mempool_reg_destroy(struct mlx5_mr_share_cache * share_cache,struct mlx5_mempool_reg * mpr,bool standalone)1620690b2a88SDmitry Kozlyuk mlx5_mempool_reg_destroy(struct mlx5_mr_share_cache *share_cache,
1621690b2a88SDmitry Kozlyuk struct mlx5_mempool_reg *mpr, bool standalone)
1622690b2a88SDmitry Kozlyuk {
1623690b2a88SDmitry Kozlyuk if (standalone) {
1624690b2a88SDmitry Kozlyuk unsigned int i;
1625690b2a88SDmitry Kozlyuk
1626690b2a88SDmitry Kozlyuk for (i = 0; i < mpr->mrs_n; i++)
1627690b2a88SDmitry Kozlyuk share_cache->dereg_mr_cb(&mpr->mrs[i].pmd_mr);
16288947eebcSBing Zhao mlx5_free(mpr->mrs);
1629690b2a88SDmitry Kozlyuk }
1630690b2a88SDmitry Kozlyuk mlx5_free(mpr);
1631690b2a88SDmitry Kozlyuk }
1632690b2a88SDmitry Kozlyuk
1633690b2a88SDmitry Kozlyuk /** Find registration object of a mempool. */
1634690b2a88SDmitry Kozlyuk static struct mlx5_mempool_reg *
mlx5_mempool_reg_lookup(struct mlx5_mr_share_cache * share_cache,struct rte_mempool * mp)1635690b2a88SDmitry Kozlyuk mlx5_mempool_reg_lookup(struct mlx5_mr_share_cache *share_cache,
1636690b2a88SDmitry Kozlyuk struct rte_mempool *mp)
1637690b2a88SDmitry Kozlyuk {
1638690b2a88SDmitry Kozlyuk struct mlx5_mempool_reg *mpr;
1639690b2a88SDmitry Kozlyuk
1640690b2a88SDmitry Kozlyuk LIST_FOREACH(mpr, &share_cache->mempool_reg_list, next)
1641690b2a88SDmitry Kozlyuk if (mpr->mp == mp)
1642690b2a88SDmitry Kozlyuk break;
1643690b2a88SDmitry Kozlyuk return mpr;
1644690b2a88SDmitry Kozlyuk }
1645690b2a88SDmitry Kozlyuk
1646690b2a88SDmitry Kozlyuk /** Increment reference counters of MRs used in the registration. */
1647690b2a88SDmitry Kozlyuk static void
mlx5_mempool_reg_attach(struct mlx5_mempool_reg * mpr)1648690b2a88SDmitry Kozlyuk mlx5_mempool_reg_attach(struct mlx5_mempool_reg *mpr)
1649690b2a88SDmitry Kozlyuk {
1650690b2a88SDmitry Kozlyuk unsigned int i;
1651690b2a88SDmitry Kozlyuk
1652690b2a88SDmitry Kozlyuk for (i = 0; i < mpr->mrs_n; i++)
1653*e12a0166STyler Retzlaff rte_atomic_fetch_add_explicit(&mpr->mrs[i].refcnt, 1, rte_memory_order_relaxed);
1654690b2a88SDmitry Kozlyuk }
1655690b2a88SDmitry Kozlyuk
1656690b2a88SDmitry Kozlyuk /**
1657690b2a88SDmitry Kozlyuk * Decrement reference counters of MRs used in the registration.
1658690b2a88SDmitry Kozlyuk *
1659690b2a88SDmitry Kozlyuk * @return True if no more references to @p mpr MRs exist, False otherwise.
1660690b2a88SDmitry Kozlyuk */
1661690b2a88SDmitry Kozlyuk static bool
mlx5_mempool_reg_detach(struct mlx5_mempool_reg * mpr)1662690b2a88SDmitry Kozlyuk mlx5_mempool_reg_detach(struct mlx5_mempool_reg *mpr)
1663690b2a88SDmitry Kozlyuk {
1664690b2a88SDmitry Kozlyuk unsigned int i;
1665690b2a88SDmitry Kozlyuk bool ret = false;
1666690b2a88SDmitry Kozlyuk
1667690b2a88SDmitry Kozlyuk for (i = 0; i < mpr->mrs_n; i++)
1668*e12a0166STyler Retzlaff ret |= rte_atomic_fetch_sub_explicit(&mpr->mrs[i].refcnt, 1,
1669*e12a0166STyler Retzlaff rte_memory_order_relaxed) - 1 == 0;
1670690b2a88SDmitry Kozlyuk return ret;
1671690b2a88SDmitry Kozlyuk }
1672690b2a88SDmitry Kozlyuk
1673690b2a88SDmitry Kozlyuk static int
mlx5_mr_mempool_register_primary(struct mlx5_mr_share_cache * share_cache,void * pd,struct rte_mempool * mp,bool is_extmem)1674690b2a88SDmitry Kozlyuk mlx5_mr_mempool_register_primary(struct mlx5_mr_share_cache *share_cache,
167508ac0358SDmitry Kozlyuk void *pd, struct rte_mempool *mp,
167608ac0358SDmitry Kozlyuk bool is_extmem)
1677690b2a88SDmitry Kozlyuk {
1678690b2a88SDmitry Kozlyuk struct mlx5_range *ranges = NULL;
167908ac0358SDmitry Kozlyuk struct mlx5_mempool_reg *mpr, *old_mpr, *new_mpr;
1680690b2a88SDmitry Kozlyuk unsigned int i, ranges_n;
168108ac0358SDmitry Kozlyuk bool share_hugepage, standalone = false;
1682690b2a88SDmitry Kozlyuk int ret = -1;
1683690b2a88SDmitry Kozlyuk
1684690b2a88SDmitry Kozlyuk /* Early check to avoid unnecessary creation of MRs. */
1685690b2a88SDmitry Kozlyuk rte_rwlock_read_lock(&share_cache->rwlock);
168608ac0358SDmitry Kozlyuk old_mpr = mlx5_mempool_reg_lookup(share_cache, mp);
1687690b2a88SDmitry Kozlyuk rte_rwlock_read_unlock(&share_cache->rwlock);
168808ac0358SDmitry Kozlyuk if (old_mpr != NULL && (!is_extmem || old_mpr->is_extmem)) {
1689690b2a88SDmitry Kozlyuk DRV_LOG(DEBUG, "Mempool %s is already registered for PD %p",
1690690b2a88SDmitry Kozlyuk mp->name, pd);
1691690b2a88SDmitry Kozlyuk rte_errno = EEXIST;
1692690b2a88SDmitry Kozlyuk goto exit;
1693690b2a88SDmitry Kozlyuk }
169408ac0358SDmitry Kozlyuk if (mlx5_mempool_reg_analyze(mp, is_extmem, &ranges, &ranges_n,
1695690b2a88SDmitry Kozlyuk &share_hugepage) < 0) {
1696690b2a88SDmitry Kozlyuk DRV_LOG(ERR, "Cannot get mempool %s memory ranges", mp->name);
1697690b2a88SDmitry Kozlyuk rte_errno = ENOMEM;
1698690b2a88SDmitry Kozlyuk goto exit;
1699690b2a88SDmitry Kozlyuk }
170008ac0358SDmitry Kozlyuk new_mpr = mlx5_mempool_reg_create(mp, ranges_n, is_extmem);
1701690b2a88SDmitry Kozlyuk if (new_mpr == NULL) {
1702690b2a88SDmitry Kozlyuk DRV_LOG(ERR,
1703690b2a88SDmitry Kozlyuk "Cannot create a registration object for mempool %s in PD %p",
1704690b2a88SDmitry Kozlyuk mp->name, pd);
1705690b2a88SDmitry Kozlyuk rte_errno = ENOMEM;
1706690b2a88SDmitry Kozlyuk goto exit;
1707690b2a88SDmitry Kozlyuk }
1708690b2a88SDmitry Kozlyuk /*
1709690b2a88SDmitry Kozlyuk * If the entire mempool fits in a single hugepage, the MR for this
1710690b2a88SDmitry Kozlyuk * hugepage can be shared across mempools that also fit in it.
1711690b2a88SDmitry Kozlyuk */
1712690b2a88SDmitry Kozlyuk if (share_hugepage) {
1713690b2a88SDmitry Kozlyuk rte_rwlock_write_lock(&share_cache->rwlock);
1714690b2a88SDmitry Kozlyuk LIST_FOREACH(mpr, &share_cache->mempool_reg_list, next) {
1715690b2a88SDmitry Kozlyuk if (mpr->mrs[0].pmd_mr.addr == (void *)ranges[0].start)
1716690b2a88SDmitry Kozlyuk break;
1717690b2a88SDmitry Kozlyuk }
1718690b2a88SDmitry Kozlyuk if (mpr != NULL) {
1719690b2a88SDmitry Kozlyuk new_mpr->mrs = mpr->mrs;
1720690b2a88SDmitry Kozlyuk mlx5_mempool_reg_attach(new_mpr);
1721690b2a88SDmitry Kozlyuk LIST_INSERT_HEAD(&share_cache->mempool_reg_list,
1722690b2a88SDmitry Kozlyuk new_mpr, next);
1723690b2a88SDmitry Kozlyuk }
1724690b2a88SDmitry Kozlyuk rte_rwlock_write_unlock(&share_cache->rwlock);
1725690b2a88SDmitry Kozlyuk if (mpr != NULL) {
1726690b2a88SDmitry Kozlyuk DRV_LOG(DEBUG, "Shared MR %#x in PD %p for mempool %s with mempool %s",
1727690b2a88SDmitry Kozlyuk mpr->mrs[0].pmd_mr.lkey, pd, mp->name,
1728690b2a88SDmitry Kozlyuk mpr->mp->name);
1729690b2a88SDmitry Kozlyuk ret = 0;
1730690b2a88SDmitry Kozlyuk goto exit;
1731690b2a88SDmitry Kozlyuk }
1732690b2a88SDmitry Kozlyuk }
1733690b2a88SDmitry Kozlyuk for (i = 0; i < ranges_n; i++) {
1734690b2a88SDmitry Kozlyuk struct mlx5_mempool_mr *mr = &new_mpr->mrs[i];
1735690b2a88SDmitry Kozlyuk const struct mlx5_range *range = &ranges[i];
1736690b2a88SDmitry Kozlyuk size_t len = range->end - range->start;
1737690b2a88SDmitry Kozlyuk
1738690b2a88SDmitry Kozlyuk if (share_cache->reg_mr_cb(pd, (void *)range->start, len,
1739690b2a88SDmitry Kozlyuk &mr->pmd_mr) < 0) {
1740690b2a88SDmitry Kozlyuk DRV_LOG(ERR,
1741690b2a88SDmitry Kozlyuk "Failed to create an MR in PD %p for address range "
1742690b2a88SDmitry Kozlyuk "[0x%" PRIxPTR ", 0x%" PRIxPTR "] (%zu bytes) for mempool %s",
1743690b2a88SDmitry Kozlyuk pd, range->start, range->end, len, mp->name);
1744690b2a88SDmitry Kozlyuk break;
1745690b2a88SDmitry Kozlyuk }
1746690b2a88SDmitry Kozlyuk DRV_LOG(DEBUG,
1747690b2a88SDmitry Kozlyuk "Created a new MR %#x in PD %p for address range "
1748690b2a88SDmitry Kozlyuk "[0x%" PRIxPTR ", 0x%" PRIxPTR "] (%zu bytes) for mempool %s",
1749690b2a88SDmitry Kozlyuk mr->pmd_mr.lkey, pd, range->start, range->end, len,
1750690b2a88SDmitry Kozlyuk mp->name);
1751690b2a88SDmitry Kozlyuk }
1752690b2a88SDmitry Kozlyuk if (i != ranges_n) {
1753690b2a88SDmitry Kozlyuk mlx5_mempool_reg_destroy(share_cache, new_mpr, true);
1754690b2a88SDmitry Kozlyuk rte_errno = EINVAL;
1755690b2a88SDmitry Kozlyuk goto exit;
1756690b2a88SDmitry Kozlyuk }
1757690b2a88SDmitry Kozlyuk /* Concurrent registration is not supposed to happen. */
1758690b2a88SDmitry Kozlyuk rte_rwlock_write_lock(&share_cache->rwlock);
1759690b2a88SDmitry Kozlyuk mpr = mlx5_mempool_reg_lookup(share_cache, mp);
176008ac0358SDmitry Kozlyuk if (mpr == old_mpr && old_mpr != NULL) {
176108ac0358SDmitry Kozlyuk LIST_REMOVE(old_mpr, next);
176208ac0358SDmitry Kozlyuk standalone = mlx5_mempool_reg_detach(mpr);
176308ac0358SDmitry Kozlyuk /* No need to flush the cache: old MRs cannot be in use. */
176408ac0358SDmitry Kozlyuk mpr = NULL;
176508ac0358SDmitry Kozlyuk }
1766690b2a88SDmitry Kozlyuk if (mpr == NULL) {
1767690b2a88SDmitry Kozlyuk mlx5_mempool_reg_attach(new_mpr);
1768fc59a1ecSMichael Baum LIST_INSERT_HEAD(&share_cache->mempool_reg_list, new_mpr, next);
1769690b2a88SDmitry Kozlyuk ret = 0;
1770690b2a88SDmitry Kozlyuk }
1771690b2a88SDmitry Kozlyuk rte_rwlock_write_unlock(&share_cache->rwlock);
1772690b2a88SDmitry Kozlyuk if (mpr != NULL) {
1773690b2a88SDmitry Kozlyuk DRV_LOG(DEBUG, "Mempool %s is already registered for PD %p",
1774690b2a88SDmitry Kozlyuk mp->name, pd);
1775690b2a88SDmitry Kozlyuk mlx5_mempool_reg_destroy(share_cache, new_mpr, true);
1776690b2a88SDmitry Kozlyuk rte_errno = EEXIST;
1777690b2a88SDmitry Kozlyuk goto exit;
177808ac0358SDmitry Kozlyuk } else if (old_mpr != NULL) {
177908ac0358SDmitry Kozlyuk DRV_LOG(DEBUG, "Mempool %s registration for PD %p updated for external memory",
178008ac0358SDmitry Kozlyuk mp->name, pd);
178108ac0358SDmitry Kozlyuk mlx5_mempool_reg_destroy(share_cache, old_mpr, standalone);
1782690b2a88SDmitry Kozlyuk }
1783690b2a88SDmitry Kozlyuk exit:
1784690b2a88SDmitry Kozlyuk free(ranges);
1785690b2a88SDmitry Kozlyuk return ret;
1786690b2a88SDmitry Kozlyuk }
1787690b2a88SDmitry Kozlyuk
1788690b2a88SDmitry Kozlyuk static int
mlx5_mr_mempool_register_secondary(struct mlx5_common_device * cdev,struct rte_mempool * mp,bool is_extmem)178920489176SMichael Baum mlx5_mr_mempool_register_secondary(struct mlx5_common_device *cdev,
179008ac0358SDmitry Kozlyuk struct rte_mempool *mp, bool is_extmem)
1791690b2a88SDmitry Kozlyuk {
179208ac0358SDmitry Kozlyuk return mlx5_mp_req_mempool_reg(cdev, mp, true, is_extmem);
1793690b2a88SDmitry Kozlyuk }
1794690b2a88SDmitry Kozlyuk
1795690b2a88SDmitry Kozlyuk /**
1796690b2a88SDmitry Kozlyuk * Register the memory of a mempool in the protection domain.
1797690b2a88SDmitry Kozlyuk *
179820489176SMichael Baum * @param cdev
179920489176SMichael Baum * Pointer to the mlx5 common device.
1800690b2a88SDmitry Kozlyuk * @param mp
1801690b2a88SDmitry Kozlyuk * Mempool to register.
1802690b2a88SDmitry Kozlyuk *
1803690b2a88SDmitry Kozlyuk * @return
1804690b2a88SDmitry Kozlyuk * 0 on success, (-1) on failure and rte_errno is set.
1805690b2a88SDmitry Kozlyuk */
1806690b2a88SDmitry Kozlyuk int
mlx5_mr_mempool_register(struct mlx5_common_device * cdev,struct rte_mempool * mp,bool is_extmem)180720489176SMichael Baum mlx5_mr_mempool_register(struct mlx5_common_device *cdev,
180808ac0358SDmitry Kozlyuk struct rte_mempool *mp, bool is_extmem)
1809690b2a88SDmitry Kozlyuk {
1810c47d7b90SAndrew Rybchenko if (mp->flags & RTE_MEMPOOL_F_NON_IO)
1811690b2a88SDmitry Kozlyuk return 0;
1812690b2a88SDmitry Kozlyuk switch (rte_eal_process_type()) {
1813690b2a88SDmitry Kozlyuk case RTE_PROC_PRIMARY:
181420489176SMichael Baum return mlx5_mr_mempool_register_primary(&cdev->mr_scache,
181508ac0358SDmitry Kozlyuk cdev->pd, mp,
181608ac0358SDmitry Kozlyuk is_extmem);
1817690b2a88SDmitry Kozlyuk case RTE_PROC_SECONDARY:
181808ac0358SDmitry Kozlyuk return mlx5_mr_mempool_register_secondary(cdev, mp, is_extmem);
1819690b2a88SDmitry Kozlyuk default:
1820690b2a88SDmitry Kozlyuk return -1;
1821690b2a88SDmitry Kozlyuk }
1822690b2a88SDmitry Kozlyuk }
1823690b2a88SDmitry Kozlyuk
1824690b2a88SDmitry Kozlyuk static int
mlx5_mr_mempool_unregister_primary(struct mlx5_mr_share_cache * share_cache,struct rte_mempool * mp)1825690b2a88SDmitry Kozlyuk mlx5_mr_mempool_unregister_primary(struct mlx5_mr_share_cache *share_cache,
1826690b2a88SDmitry Kozlyuk struct rte_mempool *mp)
1827690b2a88SDmitry Kozlyuk {
1828690b2a88SDmitry Kozlyuk struct mlx5_mempool_reg *mpr;
1829690b2a88SDmitry Kozlyuk bool standalone = false;
1830690b2a88SDmitry Kozlyuk
1831690b2a88SDmitry Kozlyuk rte_rwlock_write_lock(&share_cache->rwlock);
1832690b2a88SDmitry Kozlyuk LIST_FOREACH(mpr, &share_cache->mempool_reg_list, next)
1833690b2a88SDmitry Kozlyuk if (mpr->mp == mp) {
1834690b2a88SDmitry Kozlyuk LIST_REMOVE(mpr, next);
1835690b2a88SDmitry Kozlyuk standalone = mlx5_mempool_reg_detach(mpr);
1836690b2a88SDmitry Kozlyuk if (standalone)
1837690b2a88SDmitry Kozlyuk /*
1838690b2a88SDmitry Kozlyuk * The unlock operation below provides a memory
1839690b2a88SDmitry Kozlyuk * barrier due to its store-release semantics.
1840690b2a88SDmitry Kozlyuk */
1841690b2a88SDmitry Kozlyuk ++share_cache->dev_gen;
1842690b2a88SDmitry Kozlyuk break;
1843690b2a88SDmitry Kozlyuk }
1844690b2a88SDmitry Kozlyuk rte_rwlock_write_unlock(&share_cache->rwlock);
1845690b2a88SDmitry Kozlyuk if (mpr == NULL) {
1846690b2a88SDmitry Kozlyuk rte_errno = ENOENT;
1847690b2a88SDmitry Kozlyuk return -1;
1848690b2a88SDmitry Kozlyuk }
1849690b2a88SDmitry Kozlyuk mlx5_mempool_reg_destroy(share_cache, mpr, standalone);
1850690b2a88SDmitry Kozlyuk return 0;
1851690b2a88SDmitry Kozlyuk }
1852690b2a88SDmitry Kozlyuk
1853690b2a88SDmitry Kozlyuk static int
mlx5_mr_mempool_unregister_secondary(struct mlx5_common_device * cdev,struct rte_mempool * mp)185420489176SMichael Baum mlx5_mr_mempool_unregister_secondary(struct mlx5_common_device *cdev,
185520489176SMichael Baum struct rte_mempool *mp)
1856690b2a88SDmitry Kozlyuk {
185708ac0358SDmitry Kozlyuk return mlx5_mp_req_mempool_reg(cdev, mp, false, false /* is_extmem */);
1858690b2a88SDmitry Kozlyuk }
1859690b2a88SDmitry Kozlyuk
1860690b2a88SDmitry Kozlyuk /**
1861690b2a88SDmitry Kozlyuk * Unregister the memory of a mempool from the protection domain.
1862690b2a88SDmitry Kozlyuk *
186320489176SMichael Baum * @param cdev
186420489176SMichael Baum * Pointer to the mlx5 common device.
1865690b2a88SDmitry Kozlyuk * @param mp
1866690b2a88SDmitry Kozlyuk * Mempool to unregister.
1867690b2a88SDmitry Kozlyuk *
1868690b2a88SDmitry Kozlyuk * @return
1869690b2a88SDmitry Kozlyuk * 0 on success, (-1) on failure and rte_errno is set.
1870690b2a88SDmitry Kozlyuk */
1871690b2a88SDmitry Kozlyuk int
mlx5_mr_mempool_unregister(struct mlx5_common_device * cdev,struct rte_mempool * mp)187220489176SMichael Baum mlx5_mr_mempool_unregister(struct mlx5_common_device *cdev,
187320489176SMichael Baum struct rte_mempool *mp)
1874690b2a88SDmitry Kozlyuk {
1875c47d7b90SAndrew Rybchenko if (mp->flags & RTE_MEMPOOL_F_NON_IO)
1876690b2a88SDmitry Kozlyuk return 0;
1877690b2a88SDmitry Kozlyuk switch (rte_eal_process_type()) {
1878690b2a88SDmitry Kozlyuk case RTE_PROC_PRIMARY:
187920489176SMichael Baum return mlx5_mr_mempool_unregister_primary(&cdev->mr_scache, mp);
1880690b2a88SDmitry Kozlyuk case RTE_PROC_SECONDARY:
188120489176SMichael Baum return mlx5_mr_mempool_unregister_secondary(cdev, mp);
1882690b2a88SDmitry Kozlyuk default:
1883690b2a88SDmitry Kozlyuk return -1;
1884690b2a88SDmitry Kozlyuk }
1885690b2a88SDmitry Kozlyuk }
1886690b2a88SDmitry Kozlyuk
1887690b2a88SDmitry Kozlyuk /**
1888690b2a88SDmitry Kozlyuk * Lookup a MR key by and address in a registered mempool.
1889690b2a88SDmitry Kozlyuk *
1890690b2a88SDmitry Kozlyuk * @param mpr
1891690b2a88SDmitry Kozlyuk * Mempool registration object.
1892690b2a88SDmitry Kozlyuk * @param addr
1893690b2a88SDmitry Kozlyuk * Address within the mempool.
1894690b2a88SDmitry Kozlyuk * @param entry
1895690b2a88SDmitry Kozlyuk * Bottom-half cache entry to fill.
1896690b2a88SDmitry Kozlyuk *
1897690b2a88SDmitry Kozlyuk * @return
1898690b2a88SDmitry Kozlyuk * MR key or UINT32_MAX on failure, which can only happen
1899690b2a88SDmitry Kozlyuk * if the address is not from within the mempool.
1900690b2a88SDmitry Kozlyuk */
1901690b2a88SDmitry Kozlyuk static uint32_t
mlx5_mempool_reg_addr2mr(struct mlx5_mempool_reg * mpr,uintptr_t addr,struct mr_cache_entry * entry)1902690b2a88SDmitry Kozlyuk mlx5_mempool_reg_addr2mr(struct mlx5_mempool_reg *mpr, uintptr_t addr,
1903690b2a88SDmitry Kozlyuk struct mr_cache_entry *entry)
1904690b2a88SDmitry Kozlyuk {
1905690b2a88SDmitry Kozlyuk uint32_t lkey = UINT32_MAX;
1906690b2a88SDmitry Kozlyuk unsigned int i;
1907690b2a88SDmitry Kozlyuk
1908690b2a88SDmitry Kozlyuk for (i = 0; i < mpr->mrs_n; i++) {
1909690b2a88SDmitry Kozlyuk const struct mlx5_pmd_mr *mr = &mpr->mrs[i].pmd_mr;
19102eb92b0fSDmitry Kozlyuk uintptr_t mr_start = (uintptr_t)mr->addr;
19112eb92b0fSDmitry Kozlyuk uintptr_t mr_end = mr_start + mr->len;
1912690b2a88SDmitry Kozlyuk
19132eb92b0fSDmitry Kozlyuk if (mr_start <= addr && addr < mr_end) {
1914690b2a88SDmitry Kozlyuk lkey = rte_cpu_to_be_32(mr->lkey);
19152eb92b0fSDmitry Kozlyuk entry->start = mr_start;
19162eb92b0fSDmitry Kozlyuk entry->end = mr_end;
1917690b2a88SDmitry Kozlyuk entry->lkey = lkey;
1918690b2a88SDmitry Kozlyuk break;
1919690b2a88SDmitry Kozlyuk }
1920690b2a88SDmitry Kozlyuk }
1921690b2a88SDmitry Kozlyuk return lkey;
1922690b2a88SDmitry Kozlyuk }
1923690b2a88SDmitry Kozlyuk
1924690b2a88SDmitry Kozlyuk /**
1925690b2a88SDmitry Kozlyuk * Update bottom-half cache from the list of mempool registrations.
1926690b2a88SDmitry Kozlyuk *
1927690b2a88SDmitry Kozlyuk * @param mr_ctrl
1928690b2a88SDmitry Kozlyuk * Per-queue MR control handle.
1929690b2a88SDmitry Kozlyuk * @param entry
1930690b2a88SDmitry Kozlyuk * Pointer to an entry in the bottom-half cache to update
1931690b2a88SDmitry Kozlyuk * with the MR lkey looked up.
1932690b2a88SDmitry Kozlyuk * @param mp
1933690b2a88SDmitry Kozlyuk * Mempool containing the address.
1934690b2a88SDmitry Kozlyuk * @param addr
1935690b2a88SDmitry Kozlyuk * Address to lookup.
1936690b2a88SDmitry Kozlyuk * @return
1937690b2a88SDmitry Kozlyuk * MR lkey on success, UINT32_MAX on failure.
1938690b2a88SDmitry Kozlyuk */
1939690b2a88SDmitry Kozlyuk static uint32_t
mlx5_lookup_mempool_regs(struct mlx5_mr_ctrl * mr_ctrl,struct mr_cache_entry * entry,struct rte_mempool * mp,uintptr_t addr)194071304b5cSMichael Baum mlx5_lookup_mempool_regs(struct mlx5_mr_ctrl *mr_ctrl,
1941690b2a88SDmitry Kozlyuk struct mr_cache_entry *entry,
1942690b2a88SDmitry Kozlyuk struct rte_mempool *mp, uintptr_t addr)
1943690b2a88SDmitry Kozlyuk {
194471304b5cSMichael Baum struct mlx5_mr_share_cache *share_cache =
194571304b5cSMichael Baum container_of(mr_ctrl->dev_gen_ptr, struct mlx5_mr_share_cache,
194671304b5cSMichael Baum dev_gen);
1947690b2a88SDmitry Kozlyuk struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
1948690b2a88SDmitry Kozlyuk struct mlx5_mempool_reg *mpr;
1949690b2a88SDmitry Kozlyuk uint32_t lkey = UINT32_MAX;
1950690b2a88SDmitry Kozlyuk
1951690b2a88SDmitry Kozlyuk /* If local cache table is full, try to double it. */
1952690b2a88SDmitry Kozlyuk if (unlikely(bt->len == bt->size))
1953690b2a88SDmitry Kozlyuk mr_btree_expand(bt, bt->size << 1);
1954690b2a88SDmitry Kozlyuk /* Look up in mempool registrations. */
1955690b2a88SDmitry Kozlyuk rte_rwlock_read_lock(&share_cache->rwlock);
1956690b2a88SDmitry Kozlyuk mpr = mlx5_mempool_reg_lookup(share_cache, mp);
1957690b2a88SDmitry Kozlyuk if (mpr != NULL)
1958690b2a88SDmitry Kozlyuk lkey = mlx5_mempool_reg_addr2mr(mpr, addr, entry);
1959690b2a88SDmitry Kozlyuk rte_rwlock_read_unlock(&share_cache->rwlock);
1960690b2a88SDmitry Kozlyuk /*
1961690b2a88SDmitry Kozlyuk * Update local cache. Even if it fails, return the found entry
1962690b2a88SDmitry Kozlyuk * to update top-half cache. Next time, this entry will be found
1963690b2a88SDmitry Kozlyuk * in the global cache.
1964690b2a88SDmitry Kozlyuk */
1965690b2a88SDmitry Kozlyuk if (lkey != UINT32_MAX)
1966690b2a88SDmitry Kozlyuk mr_btree_insert(bt, entry);
1967690b2a88SDmitry Kozlyuk return lkey;
1968690b2a88SDmitry Kozlyuk }
1969690b2a88SDmitry Kozlyuk
1970690b2a88SDmitry Kozlyuk /**
197108ac0358SDmitry Kozlyuk * Populate cache with LKeys of all MRs used by the mempool.
197208ac0358SDmitry Kozlyuk * It is intended to be used to register Rx mempools in advance.
197308ac0358SDmitry Kozlyuk *
197408ac0358SDmitry Kozlyuk * @param mr_ctrl
197508ac0358SDmitry Kozlyuk * Per-queue MR control handle.
197608ac0358SDmitry Kozlyuk * @param mp
197708ac0358SDmitry Kozlyuk * Registered memory pool.
197808ac0358SDmitry Kozlyuk *
197908ac0358SDmitry Kozlyuk * @return
198008ac0358SDmitry Kozlyuk * 0 on success, (-1) on failure and rte_errno is set.
198108ac0358SDmitry Kozlyuk */
198208ac0358SDmitry Kozlyuk int
mlx5_mr_mempool_populate_cache(struct mlx5_mr_ctrl * mr_ctrl,struct rte_mempool * mp)198308ac0358SDmitry Kozlyuk mlx5_mr_mempool_populate_cache(struct mlx5_mr_ctrl *mr_ctrl,
198408ac0358SDmitry Kozlyuk struct rte_mempool *mp)
198508ac0358SDmitry Kozlyuk {
198608ac0358SDmitry Kozlyuk struct mlx5_mr_share_cache *share_cache =
198708ac0358SDmitry Kozlyuk container_of(mr_ctrl->dev_gen_ptr, struct mlx5_mr_share_cache,
198808ac0358SDmitry Kozlyuk dev_gen);
198908ac0358SDmitry Kozlyuk struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
199008ac0358SDmitry Kozlyuk struct mlx5_mempool_reg *mpr;
199108ac0358SDmitry Kozlyuk unsigned int i;
199208ac0358SDmitry Kozlyuk
199308ac0358SDmitry Kozlyuk /*
199408ac0358SDmitry Kozlyuk * Registration is valid after the lock is released,
199508ac0358SDmitry Kozlyuk * because the function is called after the mempool is registered.
199608ac0358SDmitry Kozlyuk */
199708ac0358SDmitry Kozlyuk rte_rwlock_read_lock(&share_cache->rwlock);
199808ac0358SDmitry Kozlyuk mpr = mlx5_mempool_reg_lookup(share_cache, mp);
199908ac0358SDmitry Kozlyuk rte_rwlock_read_unlock(&share_cache->rwlock);
200008ac0358SDmitry Kozlyuk if (mpr == NULL) {
200108ac0358SDmitry Kozlyuk DRV_LOG(ERR, "Mempool %s is not registered", mp->name);
200208ac0358SDmitry Kozlyuk rte_errno = ENOENT;
200308ac0358SDmitry Kozlyuk return -1;
200408ac0358SDmitry Kozlyuk }
200508ac0358SDmitry Kozlyuk for (i = 0; i < mpr->mrs_n; i++) {
200608ac0358SDmitry Kozlyuk struct mlx5_mempool_mr *mr = &mpr->mrs[i];
200708ac0358SDmitry Kozlyuk struct mr_cache_entry entry;
200808ac0358SDmitry Kozlyuk uint32_t lkey;
2009e96d3d02SDmitry Kozlyuk uint32_t idx;
201008ac0358SDmitry Kozlyuk
201108ac0358SDmitry Kozlyuk lkey = mr_btree_lookup(bt, &idx, (uintptr_t)mr->pmd_mr.addr);
201208ac0358SDmitry Kozlyuk if (lkey != UINT32_MAX)
201308ac0358SDmitry Kozlyuk continue;
201408ac0358SDmitry Kozlyuk if (bt->len == bt->size)
201508ac0358SDmitry Kozlyuk mr_btree_expand(bt, bt->size << 1);
201608ac0358SDmitry Kozlyuk entry.start = (uintptr_t)mr->pmd_mr.addr;
201708ac0358SDmitry Kozlyuk entry.end = entry.start + mr->pmd_mr.len;
201808ac0358SDmitry Kozlyuk entry.lkey = rte_cpu_to_be_32(mr->pmd_mr.lkey);
201908ac0358SDmitry Kozlyuk if (mr_btree_insert(bt, &entry) < 0) {
202008ac0358SDmitry Kozlyuk DRV_LOG(ERR, "Cannot insert cache entry for mempool %s MR %08x",
202108ac0358SDmitry Kozlyuk mp->name, entry.lkey);
202208ac0358SDmitry Kozlyuk rte_errno = EINVAL;
202308ac0358SDmitry Kozlyuk return -1;
202408ac0358SDmitry Kozlyuk }
202508ac0358SDmitry Kozlyuk }
202608ac0358SDmitry Kozlyuk return 0;
202708ac0358SDmitry Kozlyuk }
202808ac0358SDmitry Kozlyuk
202908ac0358SDmitry Kozlyuk /**
2030690b2a88SDmitry Kozlyuk * Bottom-half lookup for the address from the mempool.
2031690b2a88SDmitry Kozlyuk *
2032690b2a88SDmitry Kozlyuk * @param mr_ctrl
2033690b2a88SDmitry Kozlyuk * Per-queue MR control handle.
2034690b2a88SDmitry Kozlyuk * @param mp
2035690b2a88SDmitry Kozlyuk * Mempool containing the address.
2036690b2a88SDmitry Kozlyuk * @param addr
2037690b2a88SDmitry Kozlyuk * Address to lookup.
2038690b2a88SDmitry Kozlyuk * @return
2039690b2a88SDmitry Kozlyuk * MR lkey on success, UINT32_MAX on failure.
2040690b2a88SDmitry Kozlyuk */
2041690b2a88SDmitry Kozlyuk uint32_t
mlx5_mr_mempool2mr_bh(struct mlx5_mr_ctrl * mr_ctrl,struct rte_mempool * mp,uintptr_t addr)204271304b5cSMichael Baum mlx5_mr_mempool2mr_bh(struct mlx5_mr_ctrl *mr_ctrl,
2043690b2a88SDmitry Kozlyuk struct rte_mempool *mp, uintptr_t addr)
2044690b2a88SDmitry Kozlyuk {
2045690b2a88SDmitry Kozlyuk struct mr_cache_entry *repl = &mr_ctrl->cache[mr_ctrl->head];
2046690b2a88SDmitry Kozlyuk uint32_t lkey;
2047e96d3d02SDmitry Kozlyuk uint32_t bh_idx = 0;
2048690b2a88SDmitry Kozlyuk
2049690b2a88SDmitry Kozlyuk /* Binary-search MR translation table. */
2050690b2a88SDmitry Kozlyuk lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);
2051690b2a88SDmitry Kozlyuk /* Update top-half cache. */
2052690b2a88SDmitry Kozlyuk if (likely(lkey != UINT32_MAX)) {
2053690b2a88SDmitry Kozlyuk *repl = (*mr_ctrl->cache_bh.table)[bh_idx];
2054690b2a88SDmitry Kozlyuk } else {
205571304b5cSMichael Baum lkey = mlx5_lookup_mempool_regs(mr_ctrl, repl, mp, addr);
2056690b2a88SDmitry Kozlyuk /* Can only fail if the address is not from the mempool. */
2057690b2a88SDmitry Kozlyuk if (unlikely(lkey == UINT32_MAX))
2058690b2a88SDmitry Kozlyuk return UINT32_MAX;
2059690b2a88SDmitry Kozlyuk }
2060690b2a88SDmitry Kozlyuk /* Update the most recently used entry. */
2061690b2a88SDmitry Kozlyuk mr_ctrl->mru = mr_ctrl->head;
2062690b2a88SDmitry Kozlyuk /* Point to the next victim, the oldest. */
2063690b2a88SDmitry Kozlyuk mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N;
2064690b2a88SDmitry Kozlyuk return lkey;
2065690b2a88SDmitry Kozlyuk }
2066fb690f71SMichael Baum
20676a4e4385SMichael Baum uint32_t
mlx5_mr_mb2mr_bh(struct mlx5_mr_ctrl * mr_ctrl,struct rte_mbuf * mb)206820489176SMichael Baum mlx5_mr_mb2mr_bh(struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mb)
2069fc59a1ecSMichael Baum {
207008ac0358SDmitry Kozlyuk struct rte_mempool *mp;
207108ac0358SDmitry Kozlyuk struct mlx5_mprq_buf *buf;
2072fc59a1ecSMichael Baum uint32_t lkey;
2073fc59a1ecSMichael Baum uintptr_t addr = (uintptr_t)mb->buf_addr;
207471304b5cSMichael Baum struct mlx5_mr_share_cache *share_cache =
207571304b5cSMichael Baum container_of(mr_ctrl->dev_gen_ptr, struct mlx5_mr_share_cache,
207671304b5cSMichael Baum dev_gen);
207771304b5cSMichael Baum struct mlx5_common_device *cdev =
207871304b5cSMichael Baum container_of(share_cache, struct mlx5_common_device, mr_scache);
207963625c5dSDmitry Kozlyuk bool external, mprq, pinned = false;
2080fc59a1ecSMichael Baum
2081fc59a1ecSMichael Baum /* Recover MPRQ mempool. */
208263625c5dSDmitry Kozlyuk external = RTE_MBUF_HAS_EXTBUF(mb);
208363625c5dSDmitry Kozlyuk if (external && mb->shinfo->free_cb == mlx5_mprq_buf_free_cb) {
208463625c5dSDmitry Kozlyuk mprq = true;
2085fc59a1ecSMichael Baum buf = mb->shinfo->fcb_opaque;
2086fc59a1ecSMichael Baum mp = buf->mp;
208708ac0358SDmitry Kozlyuk } else {
208863625c5dSDmitry Kozlyuk mprq = false;
208908ac0358SDmitry Kozlyuk mp = mlx5_mb2mp(mb);
209063625c5dSDmitry Kozlyuk pinned = rte_pktmbuf_priv_flags(mp) &
209163625c5dSDmitry Kozlyuk RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF;
2092fc59a1ecSMichael Baum }
209363625c5dSDmitry Kozlyuk if (!external || mprq || pinned) {
209471304b5cSMichael Baum lkey = mlx5_mr_mempool2mr_bh(mr_ctrl, mp, addr);
2095fc59a1ecSMichael Baum if (lkey != UINT32_MAX)
2096fc59a1ecSMichael Baum return lkey;
209763625c5dSDmitry Kozlyuk /* MPRQ is always registered. */
209863625c5dSDmitry Kozlyuk MLX5_ASSERT(!mprq);
209963625c5dSDmitry Kozlyuk }
210008ac0358SDmitry Kozlyuk /* Register pinned external memory if the mempool is not used for Rx. */
210163625c5dSDmitry Kozlyuk if (cdev->config.mr_mempool_reg_en && pinned) {
210208ac0358SDmitry Kozlyuk if (mlx5_mr_mempool_register(cdev, mp, true) < 0)
210308ac0358SDmitry Kozlyuk return UINT32_MAX;
210408ac0358SDmitry Kozlyuk lkey = mlx5_mr_mempool2mr_bh(mr_ctrl, mp, addr);
210508ac0358SDmitry Kozlyuk MLX5_ASSERT(lkey != UINT32_MAX);
210608ac0358SDmitry Kozlyuk return lkey;
2107fc59a1ecSMichael Baum }
210808ac0358SDmitry Kozlyuk /* Fallback to generic mechanism in corner cases. */
210920489176SMichael Baum return mlx5_mr_addr2mr_bh(mr_ctrl, addr);
2110fc59a1ecSMichael Baum }
2111