xref: /dpdk/drivers/common/mlx5/mlx5_common_mr.h (revision e77506397fc8005c5129e22e9e2d15d5876790fd)
1b8dc6b0eSVu Pham /* SPDX-License-Identifier: BSD-3-Clause
2b8dc6b0eSVu Pham  * Copyright 2018 6WIND S.A.
3b8dc6b0eSVu Pham  * Copyright 2018 Mellanox Technologies, Ltd
4b8dc6b0eSVu Pham  */
5b8dc6b0eSVu Pham 
6b8dc6b0eSVu Pham #ifndef RTE_PMD_MLX5_COMMON_MR_H_
7b8dc6b0eSVu Pham #define RTE_PMD_MLX5_COMMON_MR_H_
8b8dc6b0eSVu Pham 
9b8dc6b0eSVu Pham #include <stddef.h>
10b8dc6b0eSVu Pham #include <stdint.h>
11b8dc6b0eSVu Pham #include <sys/queue.h>
12b8dc6b0eSVu Pham 
13b8dc6b0eSVu Pham 
141094dd94SDavid Marchand #include <rte_compat.h>
15b8dc6b0eSVu Pham #include <rte_rwlock.h>
16b8dc6b0eSVu Pham #include <rte_bitmap.h>
17690b2a88SDmitry Kozlyuk #include <rte_mbuf.h>
18b8dc6b0eSVu Pham #include <rte_memory.h>
19b8dc6b0eSVu Pham 
209d60f545SOphir Munk #include "mlx5_glue.h"
21b8dc6b0eSVu Pham #include "mlx5_common_mp.h"
22a77bedf2SMichael Baum #include "mlx5_common_defs.h"
23b8dc6b0eSVu Pham 
2456d20677SOphir Munk /* mlx5 PMD MR struct. */
2556d20677SOphir Munk struct mlx5_pmd_mr {
2656d20677SOphir Munk 	uint32_t	     lkey;
2756d20677SOphir Munk 	void		     *addr;
2856d20677SOphir Munk 	size_t		     len;
2956d20677SOphir Munk 	void		     *obj;  /* verbs mr object or devx umem object. */
30ba420719SOphir Munk 	struct mlx5_devx_obj *mkey; /* devx mkey object. */
3156d20677SOphir Munk };
32d5ed8aa9SOphir Munk 
33d5ed8aa9SOphir Munk /**
34d5ed8aa9SOphir Munk  * mr operations typedef
35d5ed8aa9SOphir Munk  */
36d5ed8aa9SOphir Munk typedef int (*mlx5_reg_mr_t)(void *pd, void *addr, size_t length,
37d5ed8aa9SOphir Munk 			     struct mlx5_pmd_mr *pmd_mr);
38d5ed8aa9SOphir Munk typedef void (*mlx5_dereg_mr_t)(struct mlx5_pmd_mr *pmd_mr);
39d5ed8aa9SOphir Munk 
40b8dc6b0eSVu Pham /* Memory Region object. */
41b8dc6b0eSVu Pham struct mlx5_mr {
42b8dc6b0eSVu Pham 	LIST_ENTRY(mlx5_mr) mr; /**< Pointer to the prev/next entry. */
4356d20677SOphir Munk 	struct mlx5_pmd_mr pmd_mr; /* PMD memory region. */
44b8dc6b0eSVu Pham 	const struct rte_memseg_list *msl;
45b8dc6b0eSVu Pham 	int ms_base_idx; /* Start index of msl->memseg_arr[]. */
46b8dc6b0eSVu Pham 	int ms_n; /* Number of memsegs in use. */
47b8dc6b0eSVu Pham 	uint32_t ms_bmp_n; /* Number of bits in memsegs bit-mask. */
48b8dc6b0eSVu Pham 	struct rte_bitmap *ms_bmp; /* Bit-mask of memsegs belonged to MR. */
49b8dc6b0eSVu Pham };
50b8dc6b0eSVu Pham 
51b8dc6b0eSVu Pham /* Cache entry for Memory Region. */
52*e7750639SAndre Muezerie struct __rte_packed_begin mr_cache_entry {
53b8dc6b0eSVu Pham 	uintptr_t start; /* Start address of MR. */
54b8dc6b0eSVu Pham 	uintptr_t end; /* End address of MR. */
5556d20677SOphir Munk 	uint32_t lkey; /* rte_cpu_to_be_32(lkey). */
56*e7750639SAndre Muezerie } __rte_packed_end;
57b8dc6b0eSVu Pham 
58b8dc6b0eSVu Pham /* MR Cache table for Binary search. */
59*e7750639SAndre Muezerie struct __rte_packed_begin mlx5_mr_btree {
60e96d3d02SDmitry Kozlyuk 	uint32_t len; /* Number of entries. */
61e96d3d02SDmitry Kozlyuk 	uint32_t size; /* Total number of entries. */
62b8dc6b0eSVu Pham 	struct mr_cache_entry (*table)[];
63*e7750639SAndre Muezerie } __rte_packed_end;
64b8dc6b0eSVu Pham 
656a4e4385SMichael Baum struct mlx5_common_device;
666a4e4385SMichael Baum 
67b8dc6b0eSVu Pham /* Per-queue MR control descriptor. */
68*e7750639SAndre Muezerie struct __rte_packed_begin mlx5_mr_ctrl {
69b8dc6b0eSVu Pham 	uint32_t *dev_gen_ptr; /* Generation number of device to poll. */
70b8dc6b0eSVu Pham 	uint32_t cur_gen; /* Generation number saved to flush caches. */
71b8dc6b0eSVu Pham 	uint16_t mru; /* Index of last hit entry in top-half cache. */
72b8dc6b0eSVu Pham 	uint16_t head; /* Index of the oldest entry in top-half cache. */
73b8dc6b0eSVu Pham 	struct mr_cache_entry cache[MLX5_MR_CACHE_N]; /* Cache for top-half. */
74b8dc6b0eSVu Pham 	struct mlx5_mr_btree cache_bh; /* Cache for bottom-half. */
75*e7750639SAndre Muezerie } __rte_packed_end;
76b8dc6b0eSVu Pham 
77b8dc6b0eSVu Pham LIST_HEAD(mlx5_mr_list, mlx5_mr);
78690b2a88SDmitry Kozlyuk LIST_HEAD(mlx5_mempool_reg_list, mlx5_mempool_reg);
79b8dc6b0eSVu Pham 
80b8dc6b0eSVu Pham /* Global per-device MR cache. */
81*e7750639SAndre Muezerie struct __rte_packed_begin mlx5_mr_share_cache {
82b8dc6b0eSVu Pham 	uint32_t dev_gen; /* Generation number to flush local caches. */
83b8dc6b0eSVu Pham 	rte_rwlock_t rwlock; /* MR cache Lock. */
84fc59a1ecSMichael Baum 	rte_rwlock_t mprwlock; /* Mempool Registration Lock. */
85b8dc6b0eSVu Pham 	struct mlx5_mr_btree cache; /* Global MR cache table. */
86b8dc6b0eSVu Pham 	struct mlx5_mr_list mr_list; /* Registered MR list. */
87b8dc6b0eSVu Pham 	struct mlx5_mr_list mr_free_list; /* Freed MR list. */
88690b2a88SDmitry Kozlyuk 	struct mlx5_mempool_reg_list mempool_reg_list; /* Mempool database. */
89d5ed8aa9SOphir Munk 	mlx5_reg_mr_t reg_mr_cb; /* Callback to reg_mr func */
90d5ed8aa9SOphir Munk 	mlx5_dereg_mr_t dereg_mr_cb; /* Callback to dereg_mr func */
91*e7750639SAndre Muezerie } __rte_packed_end;
92b8dc6b0eSVu Pham 
93fc59a1ecSMichael Baum /* Multi-Packet RQ buffer header. */
9427595cd8STyler Retzlaff struct __rte_cache_aligned mlx5_mprq_buf {
95fc59a1ecSMichael Baum 	struct rte_mempool *mp;
96e12a0166STyler Retzlaff 	RTE_ATOMIC(uint16_t) refcnt; /* Atomically accessed refcnt. */
97fc59a1ecSMichael Baum 	struct rte_mbuf_ext_shared_info shinfos[];
98fc59a1ecSMichael Baum 	/*
99fc59a1ecSMichael Baum 	 * Shared information per stride.
100fc59a1ecSMichael Baum 	 * More memory will be allocated for the first stride head-room and for
101fc59a1ecSMichael Baum 	 * the strides data.
102fc59a1ecSMichael Baum 	 */
10327595cd8STyler Retzlaff };
104fc59a1ecSMichael Baum 
105fc59a1ecSMichael Baum __rte_internal
106fc59a1ecSMichael Baum void mlx5_mprq_buf_free_cb(void *addr, void *opaque);
107fc59a1ecSMichael Baum 
108fc59a1ecSMichael Baum /**
109fc59a1ecSMichael Baum  * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which the
110fc59a1ecSMichael Baum  * cloned mbuf is allocated is returned instead.
111fc59a1ecSMichael Baum  *
112fc59a1ecSMichael Baum  * @param buf
113fc59a1ecSMichael Baum  *   Pointer to mbuf.
114fc59a1ecSMichael Baum  *
115fc59a1ecSMichael Baum  * @return
116fc59a1ecSMichael Baum  *   Memory pool where data is located for given mbuf.
117fc59a1ecSMichael Baum  */
118fc59a1ecSMichael Baum static inline struct rte_mempool *
119fc59a1ecSMichael Baum mlx5_mb2mp(struct rte_mbuf *buf)
120fc59a1ecSMichael Baum {
121fc59a1ecSMichael Baum 	if (unlikely(RTE_MBUF_CLONED(buf)))
122fc59a1ecSMichael Baum 		return rte_mbuf_from_indirect(buf)->pool;
123fc59a1ecSMichael Baum 	return buf->pool;
124fc59a1ecSMichael Baum }
125fc59a1ecSMichael Baum 
126b8dc6b0eSVu Pham /**
127b8dc6b0eSVu Pham  * Look up LKey from given lookup table by linear search. Firstly look up the
128b8dc6b0eSVu Pham  * last-hit entry. If miss, the entire array is searched. If found, update the
129b8dc6b0eSVu Pham  * last-hit index and return LKey.
130b8dc6b0eSVu Pham  *
131b8dc6b0eSVu Pham  * @param lkp_tbl
132b8dc6b0eSVu Pham  *   Pointer to lookup table.
133b8dc6b0eSVu Pham  * @param[in,out] cached_idx
134b8dc6b0eSVu Pham  *   Pointer to last-hit index.
135b8dc6b0eSVu Pham  * @param n
136b8dc6b0eSVu Pham  *   Size of lookup table.
137b8dc6b0eSVu Pham  * @param addr
138b8dc6b0eSVu Pham  *   Search key.
139b8dc6b0eSVu Pham  *
140b8dc6b0eSVu Pham  * @return
141b8dc6b0eSVu Pham  *   Searched LKey on success, UINT32_MAX on no match.
142b8dc6b0eSVu Pham  */
143b8dc6b0eSVu Pham static __rte_always_inline uint32_t
144b8dc6b0eSVu Pham mlx5_mr_lookup_lkey(struct mr_cache_entry *lkp_tbl, uint16_t *cached_idx,
145b8dc6b0eSVu Pham 		    uint16_t n, uintptr_t addr)
146b8dc6b0eSVu Pham {
147b8dc6b0eSVu Pham 	uint16_t idx;
148b8dc6b0eSVu Pham 
149b8dc6b0eSVu Pham 	if (likely(addr >= lkp_tbl[*cached_idx].start &&
150b8dc6b0eSVu Pham 		   addr < lkp_tbl[*cached_idx].end))
151b8dc6b0eSVu Pham 		return lkp_tbl[*cached_idx].lkey;
152b8dc6b0eSVu Pham 	for (idx = 0; idx < n && lkp_tbl[idx].start != 0; ++idx) {
153b8dc6b0eSVu Pham 		if (addr >= lkp_tbl[idx].start &&
154b8dc6b0eSVu Pham 		    addr < lkp_tbl[idx].end) {
155b8dc6b0eSVu Pham 			/* Found. */
156b8dc6b0eSVu Pham 			*cached_idx = idx;
157b8dc6b0eSVu Pham 			return lkp_tbl[idx].lkey;
158b8dc6b0eSVu Pham 		}
159b8dc6b0eSVu Pham 	}
160b8dc6b0eSVu Pham 	return UINT32_MAX;
161b8dc6b0eSVu Pham }
162b8dc6b0eSVu Pham 
1636a4e4385SMichael Baum __rte_internal
1646a4e4385SMichael Baum void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);
1656a4e4385SMichael Baum 
1666a4e4385SMichael Baum /**
1676a4e4385SMichael Baum  * Bottom-half of LKey search on. If supported, lookup for the address from
1686a4e4385SMichael Baum  * the mempool. Otherwise, search in old mechanism caches.
1696a4e4385SMichael Baum  *
1706a4e4385SMichael Baum  * @param mr_ctrl
1716a4e4385SMichael Baum  *   Pointer to per-queue MR control structure.
1726a4e4385SMichael Baum  * @param mb
1736a4e4385SMichael Baum  *   Pointer to mbuf.
1746a4e4385SMichael Baum  *
1756a4e4385SMichael Baum  * @return
1766a4e4385SMichael Baum  *   Searched LKey on success, UINT32_MAX on no match.
1776a4e4385SMichael Baum  */
1786a4e4385SMichael Baum __rte_internal
17920489176SMichael Baum uint32_t mlx5_mr_mb2mr_bh(struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mbuf);
1806a4e4385SMichael Baum 
1816a4e4385SMichael Baum /**
1826a4e4385SMichael Baum  * Query LKey from a packet buffer.
1836a4e4385SMichael Baum  *
1846a4e4385SMichael Baum  * @param mr_ctrl
1856a4e4385SMichael Baum  *   Pointer to per-queue MR control structure.
1866a4e4385SMichael Baum  * @param mbuf
1876a4e4385SMichael Baum  *   Pointer to mbuf.
1886a4e4385SMichael Baum  *
1896a4e4385SMichael Baum  * @return
1906a4e4385SMichael Baum  *   Searched LKey on success, UINT32_MAX on no match.
1916a4e4385SMichael Baum  */
1926a4e4385SMichael Baum static __rte_always_inline uint32_t
19320489176SMichael Baum mlx5_mr_mb2mr(struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mbuf)
1946a4e4385SMichael Baum {
1956a4e4385SMichael Baum 	uint32_t lkey;
1966a4e4385SMichael Baum 
1976a4e4385SMichael Baum 	/* Check generation bit to see if there's any change on existing MRs. */
1986a4e4385SMichael Baum 	if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
1996a4e4385SMichael Baum 		mlx5_mr_flush_local_cache(mr_ctrl);
2006a4e4385SMichael Baum 	/* Linear search on MR cache array. */
2016a4e4385SMichael Baum 	lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru,
2026a4e4385SMichael Baum 				   MLX5_MR_CACHE_N, (uintptr_t)mbuf->buf_addr);
2036a4e4385SMichael Baum 	if (likely(lkey != UINT32_MAX))
2046a4e4385SMichael Baum 		return lkey;
2056a4e4385SMichael Baum 	/* Take slower bottom-half on miss. */
20620489176SMichael Baum 	return mlx5_mr_mb2mr_bh(mr_ctrl, mbuf);
2076a4e4385SMichael Baum }
2086a4e4385SMichael Baum 
209a5d06c90SMichael Baum /* mlx5_common_mr.c */
210a5d06c90SMichael Baum 
21164c563f8SOphir Munk __rte_internal
21271304b5cSMichael Baum int mlx5_mr_ctrl_init(struct mlx5_mr_ctrl *mr_ctrl, uint32_t *dev_gen_ptr,
21371304b5cSMichael Baum 		      int socket);
21485c7005eSMichael Baum __rte_internal
215b8dc6b0eSVu Pham void mlx5_mr_btree_free(struct mlx5_mr_btree *bt);
216b8dc6b0eSVu Pham void mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused);
21764c563f8SOphir Munk __rte_internal
21871304b5cSMichael Baum uint32_t mlx5_mr_mempool2mr_bh(struct mlx5_mr_ctrl *mr_ctrl,
219690b2a88SDmitry Kozlyuk 			       struct rte_mempool *mp, uintptr_t addr);
220e96d3d02SDmitry Kozlyuk int mlx5_mr_expand_cache(struct mlx5_mr_share_cache *share_cache,
221e96d3d02SDmitry Kozlyuk 			 uint32_t new_size, int socket);
222b8dc6b0eSVu Pham void mlx5_mr_release_cache(struct mlx5_mr_share_cache *mr_cache);
2235fbc75acSMichael Baum int mlx5_mr_create_cache(struct mlx5_mr_share_cache *share_cache, int socket);
224b8dc6b0eSVu Pham void mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused);
225b8dc6b0eSVu Pham void mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache);
2262f6c2adbSMichael Baum void mlx5_free_mr_by_addr(struct mlx5_mr_share_cache *share_cache,
2272f6c2adbSMichael Baum 			  const char *ibdev_name, const void *addr, size_t len);
228a5d06c90SMichael Baum int mlx5_mr_insert_cache(struct mlx5_mr_share_cache *share_cache,
229b8dc6b0eSVu Pham 			 struct mlx5_mr *mr);
230b8dc6b0eSVu Pham struct mlx5_mr *
231b8dc6b0eSVu Pham mlx5_mr_lookup_list(struct mlx5_mr_share_cache *share_cache,
232b8dc6b0eSVu Pham 		    struct mr_cache_entry *entry, uintptr_t addr);
233b8dc6b0eSVu Pham struct mlx5_mr *
234d5ed8aa9SOphir Munk mlx5_create_mr_ext(void *pd, uintptr_t addr, size_t len, int socket_id,
235d5ed8aa9SOphir Munk 		   mlx5_reg_mr_t reg_mr_cb);
236a5d06c90SMichael Baum void mlx5_mr_free(struct mlx5_mr *mr, mlx5_dereg_mr_t dereg_mr_cb);
23764c563f8SOphir Munk __rte_internal
238b8dc6b0eSVu Pham uint32_t
23920489176SMichael Baum mlx5_mr_create(struct mlx5_common_device *cdev,
240b8dc6b0eSVu Pham 	       struct mlx5_mr_share_cache *share_cache,
24120489176SMichael Baum 	       struct mr_cache_entry *entry, uintptr_t addr);
242a5d06c90SMichael Baum 
243bd6f2207SSuanming Mou __rte_internal
244bd6f2207SSuanming Mou uint32_t
245bd6f2207SSuanming Mou mlx5_mr_addr2mr_bh(struct mlx5_mr_ctrl *mr_ctrl, uintptr_t addr);
246bd6f2207SSuanming Mou 
247a5d06c90SMichael Baum /* mlx5_common_verbs.c */
248a5d06c90SMichael Baum 
24958a17853SOphir Munk __rte_internal
25058a17853SOphir Munk int
25158a17853SOphir Munk mlx5_common_verbs_reg_mr(void *pd, void *addr, size_t length,
25258a17853SOphir Munk 			 struct mlx5_pmd_mr *pmd_mr);
25358a17853SOphir Munk __rte_internal
25458a17853SOphir Munk void
25558a17853SOphir Munk mlx5_common_verbs_dereg_mr(struct mlx5_pmd_mr *pmd_mr);
256992e6df3SJiawei Wang 
257b32dbedbSSuanming Mou __rte_internal
258992e6df3SJiawei Wang void
2595fbc75acSMichael Baum mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb, mlx5_dereg_mr_t *dereg_mr_cb);
2605fbc75acSMichael Baum 
2615fbc75acSMichael Baum __rte_internal
262690b2a88SDmitry Kozlyuk int
26320489176SMichael Baum mlx5_mr_mempool_register(struct mlx5_common_device *cdev,
26408ac0358SDmitry Kozlyuk 			 struct rte_mempool *mp, bool is_extmem);
265690b2a88SDmitry Kozlyuk __rte_internal
266690b2a88SDmitry Kozlyuk int
26720489176SMichael Baum mlx5_mr_mempool_unregister(struct mlx5_common_device *cdev,
26820489176SMichael Baum 			   struct rte_mempool *mp);
269690b2a88SDmitry Kozlyuk 
27008ac0358SDmitry Kozlyuk __rte_internal
27108ac0358SDmitry Kozlyuk int
27208ac0358SDmitry Kozlyuk mlx5_mr_mempool_populate_cache(struct mlx5_mr_ctrl *mr_ctrl,
27308ac0358SDmitry Kozlyuk 			       struct rte_mempool *mp);
274e4c402afSDmitry Kozlyuk 
275b8dc6b0eSVu Pham #endif /* RTE_PMD_MLX5_COMMON_MR_H_ */
276