1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018 6WIND S.A. 3 * Copyright 2018 Mellanox Technologies, Ltd 4 */ 5 6 #ifndef RTE_PMD_MLX5_COMMON_MR_H_ 7 #define RTE_PMD_MLX5_COMMON_MR_H_ 8 9 #include <stddef.h> 10 #include <stdint.h> 11 #include <sys/queue.h> 12 13 14 #include <rte_rwlock.h> 15 #include <rte_bitmap.h> 16 #include <rte_memory.h> 17 18 #include "mlx5_glue.h" 19 #include "mlx5_common_mp.h" 20 21 /* Size of per-queue MR cache array for linear search. */ 22 #define MLX5_MR_CACHE_N 8 23 #define MLX5_MR_BTREE_CACHE_N 256 24 25 /* mlx5 PMD MR struct. */ 26 struct mlx5_pmd_mr { 27 uint32_t lkey; 28 void *addr; 29 size_t len; 30 void *obj; /* verbs mr object or devx umem object. */ 31 struct mlx5_devx_obj *mkey; /* devx mkey object. */ 32 }; 33 34 /** 35 * mr operations typedef 36 */ 37 typedef int (*mlx5_reg_mr_t)(void *pd, void *addr, size_t length, 38 struct mlx5_pmd_mr *pmd_mr); 39 typedef void (*mlx5_dereg_mr_t)(struct mlx5_pmd_mr *pmd_mr); 40 41 /* Memory Region object. */ 42 struct mlx5_mr { 43 LIST_ENTRY(mlx5_mr) mr; /**< Pointer to the prev/next entry. */ 44 struct mlx5_pmd_mr pmd_mr; /* PMD memory region. */ 45 const struct rte_memseg_list *msl; 46 int ms_base_idx; /* Start index of msl->memseg_arr[]. */ 47 int ms_n; /* Number of memsegs in use. */ 48 uint32_t ms_bmp_n; /* Number of bits in memsegs bit-mask. */ 49 struct rte_bitmap *ms_bmp; /* Bit-mask of memsegs belonged to MR. */ 50 }; 51 52 /* Cache entry for Memory Region. */ 53 struct mr_cache_entry { 54 uintptr_t start; /* Start address of MR. */ 55 uintptr_t end; /* End address of MR. */ 56 uint32_t lkey; /* rte_cpu_to_be_32(lkey). */ 57 } __rte_packed; 58 59 /* MR Cache table for Binary search. */ 60 struct mlx5_mr_btree { 61 uint16_t len; /* Number of entries. */ 62 uint16_t size; /* Total number of entries. */ 63 int overflow; /* Mark failure of table expansion. */ 64 struct mr_cache_entry (*table)[]; 65 } __rte_packed; 66 67 /* Per-queue MR control descriptor. */ 68 struct mlx5_mr_ctrl { 69 uint32_t *dev_gen_ptr; /* Generation number of device to poll. */ 70 uint32_t cur_gen; /* Generation number saved to flush caches. */ 71 uint16_t mru; /* Index of last hit entry in top-half cache. */ 72 uint16_t head; /* Index of the oldest entry in top-half cache. */ 73 struct mr_cache_entry cache[MLX5_MR_CACHE_N]; /* Cache for top-half. */ 74 struct mlx5_mr_btree cache_bh; /* Cache for bottom-half. */ 75 } __rte_packed; 76 77 LIST_HEAD(mlx5_mr_list, mlx5_mr); 78 79 /* Global per-device MR cache. */ 80 struct mlx5_mr_share_cache { 81 uint32_t dev_gen; /* Generation number to flush local caches. */ 82 rte_rwlock_t rwlock; /* MR cache Lock. */ 83 struct mlx5_mr_btree cache; /* Global MR cache table. */ 84 struct mlx5_mr_list mr_list; /* Registered MR list. */ 85 struct mlx5_mr_list mr_free_list; /* Freed MR list. */ 86 mlx5_reg_mr_t reg_mr_cb; /* Callback to reg_mr func */ 87 mlx5_dereg_mr_t dereg_mr_cb; /* Callback to dereg_mr func */ 88 } __rte_packed; 89 90 /** 91 * Look up LKey from given lookup table by linear search. Firstly look up the 92 * last-hit entry. If miss, the entire array is searched. If found, update the 93 * last-hit index and return LKey. 94 * 95 * @param lkp_tbl 96 * Pointer to lookup table. 97 * @param[in,out] cached_idx 98 * Pointer to last-hit index. 99 * @param n 100 * Size of lookup table. 101 * @param addr 102 * Search key. 103 * 104 * @return 105 * Searched LKey on success, UINT32_MAX on no match. 106 */ 107 static __rte_always_inline uint32_t 108 mlx5_mr_lookup_lkey(struct mr_cache_entry *lkp_tbl, uint16_t *cached_idx, 109 uint16_t n, uintptr_t addr) 110 { 111 uint16_t idx; 112 113 if (likely(addr >= lkp_tbl[*cached_idx].start && 114 addr < lkp_tbl[*cached_idx].end)) 115 return lkp_tbl[*cached_idx].lkey; 116 for (idx = 0; idx < n && lkp_tbl[idx].start != 0; ++idx) { 117 if (addr >= lkp_tbl[idx].start && 118 addr < lkp_tbl[idx].end) { 119 /* Found. */ 120 *cached_idx = idx; 121 return lkp_tbl[idx].lkey; 122 } 123 } 124 return UINT32_MAX; 125 } 126 127 __rte_internal 128 int mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket); 129 __rte_internal 130 void mlx5_mr_btree_free(struct mlx5_mr_btree *bt); 131 __rte_internal 132 void mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused); 133 __rte_internal 134 uint32_t mlx5_mr_addr2mr_bh(void *pd, struct mlx5_mp_id *mp_id, 135 struct mlx5_mr_share_cache *share_cache, 136 struct mlx5_mr_ctrl *mr_ctrl, 137 uintptr_t addr, unsigned int mr_ext_memseg_en); 138 __rte_internal 139 void mlx5_mr_release_cache(struct mlx5_mr_share_cache *mr_cache); 140 __rte_internal 141 void mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused); 142 __rte_internal 143 void mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache); 144 __rte_internal 145 void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl); 146 __rte_internal 147 int 148 mlx5_mr_insert_cache(struct mlx5_mr_share_cache *share_cache, 149 struct mlx5_mr *mr); 150 __rte_internal 151 uint32_t 152 mlx5_mr_lookup_cache(struct mlx5_mr_share_cache *share_cache, 153 struct mr_cache_entry *entry, uintptr_t addr); 154 __rte_internal 155 struct mlx5_mr * 156 mlx5_mr_lookup_list(struct mlx5_mr_share_cache *share_cache, 157 struct mr_cache_entry *entry, uintptr_t addr); 158 __rte_internal 159 struct mlx5_mr * 160 mlx5_create_mr_ext(void *pd, uintptr_t addr, size_t len, int socket_id, 161 mlx5_reg_mr_t reg_mr_cb); 162 __rte_internal 163 uint32_t 164 mlx5_mr_create_primary(void *pd, 165 struct mlx5_mr_share_cache *share_cache, 166 struct mr_cache_entry *entry, uintptr_t addr, 167 unsigned int mr_ext_memseg_en); 168 __rte_internal 169 int 170 mlx5_common_verbs_reg_mr(void *pd, void *addr, size_t length, 171 struct mlx5_pmd_mr *pmd_mr); 172 __rte_internal 173 void 174 mlx5_common_verbs_dereg_mr(struct mlx5_pmd_mr *pmd_mr); 175 176 __rte_internal 177 void 178 mlx5_mr_free(struct mlx5_mr *mr, mlx5_dereg_mr_t dereg_mr_cb); 179 #endif /* RTE_PMD_MLX5_COMMON_MR_H_ */ 180