1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018 6WIND S.A. 3 * Copyright 2018 Mellanox Technologies, Ltd 4 */ 5 6 #ifndef RTE_PMD_MLX5_COMMON_MR_H_ 7 #define RTE_PMD_MLX5_COMMON_MR_H_ 8 9 #include <stddef.h> 10 #include <stdint.h> 11 #include <sys/queue.h> 12 13 /* Verbs header. */ 14 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 15 #ifdef PEDANTIC 16 #pragma GCC diagnostic ignored "-Wpedantic" 17 #endif 18 #include <infiniband/verbs.h> 19 #include <infiniband/mlx5dv.h> 20 #ifdef PEDANTIC 21 #pragma GCC diagnostic error "-Wpedantic" 22 #endif 23 24 #include <rte_rwlock.h> 25 #include <rte_bitmap.h> 26 #include <rte_memory.h> 27 28 #include "mlx5_common_mp.h" 29 30 /* Size of per-queue MR cache array for linear search. */ 31 #define MLX5_MR_CACHE_N 8 32 #define MLX5_MR_BTREE_CACHE_N 256 33 34 /* mlx5 PMD MR struct. */ 35 struct mlx5_pmd_mr { 36 uint32_t lkey; 37 void *addr; 38 size_t len; 39 void *obj; /* verbs mr object or devx umem object. */ 40 }; 41 42 /** 43 * mr operations typedef 44 */ 45 typedef int (*mlx5_reg_mr_t)(void *pd, void *addr, size_t length, 46 struct mlx5_pmd_mr *pmd_mr); 47 typedef void (*mlx5_dereg_mr_t)(struct mlx5_pmd_mr *pmd_mr); 48 49 /* Memory Region object. */ 50 struct mlx5_mr { 51 LIST_ENTRY(mlx5_mr) mr; /**< Pointer to the prev/next entry. */ 52 struct mlx5_pmd_mr pmd_mr; /* PMD memory region. */ 53 const struct rte_memseg_list *msl; 54 int ms_base_idx; /* Start index of msl->memseg_arr[]. */ 55 int ms_n; /* Number of memsegs in use. */ 56 uint32_t ms_bmp_n; /* Number of bits in memsegs bit-mask. */ 57 struct rte_bitmap *ms_bmp; /* Bit-mask of memsegs belonged to MR. */ 58 }; 59 60 /* Cache entry for Memory Region. */ 61 struct mr_cache_entry { 62 uintptr_t start; /* Start address of MR. */ 63 uintptr_t end; /* End address of MR. */ 64 uint32_t lkey; /* rte_cpu_to_be_32(lkey). */ 65 } __rte_packed; 66 67 /* MR Cache table for Binary search. */ 68 struct mlx5_mr_btree { 69 uint16_t len; /* Number of entries. */ 70 uint16_t size; /* Total number of entries. */ 71 int overflow; /* Mark failure of table expansion. */ 72 struct mr_cache_entry (*table)[]; 73 } __rte_packed; 74 75 /* Per-queue MR control descriptor. */ 76 struct mlx5_mr_ctrl { 77 uint32_t *dev_gen_ptr; /* Generation number of device to poll. */ 78 uint32_t cur_gen; /* Generation number saved to flush caches. */ 79 uint16_t mru; /* Index of last hit entry in top-half cache. */ 80 uint16_t head; /* Index of the oldest entry in top-half cache. */ 81 struct mr_cache_entry cache[MLX5_MR_CACHE_N]; /* Cache for top-half. */ 82 struct mlx5_mr_btree cache_bh; /* Cache for bottom-half. */ 83 } __rte_packed; 84 85 LIST_HEAD(mlx5_mr_list, mlx5_mr); 86 87 /* Global per-device MR cache. */ 88 struct mlx5_mr_share_cache { 89 uint32_t dev_gen; /* Generation number to flush local caches. */ 90 rte_rwlock_t rwlock; /* MR cache Lock. */ 91 struct mlx5_mr_btree cache; /* Global MR cache table. */ 92 struct mlx5_mr_list mr_list; /* Registered MR list. */ 93 struct mlx5_mr_list mr_free_list; /* Freed MR list. */ 94 mlx5_reg_mr_t reg_mr_cb; /* Callback to reg_mr func */ 95 mlx5_dereg_mr_t dereg_mr_cb; /* Callback to dereg_mr func */ 96 } __rte_packed; 97 98 /** 99 * Look up LKey from given lookup table by linear search. Firstly look up the 100 * last-hit entry. If miss, the entire array is searched. If found, update the 101 * last-hit index and return LKey. 102 * 103 * @param lkp_tbl 104 * Pointer to lookup table. 105 * @param[in,out] cached_idx 106 * Pointer to last-hit index. 107 * @param n 108 * Size of lookup table. 109 * @param addr 110 * Search key. 111 * 112 * @return 113 * Searched LKey on success, UINT32_MAX on no match. 114 */ 115 static __rte_always_inline uint32_t 116 mlx5_mr_lookup_lkey(struct mr_cache_entry *lkp_tbl, uint16_t *cached_idx, 117 uint16_t n, uintptr_t addr) 118 { 119 uint16_t idx; 120 121 if (likely(addr >= lkp_tbl[*cached_idx].start && 122 addr < lkp_tbl[*cached_idx].end)) 123 return lkp_tbl[*cached_idx].lkey; 124 for (idx = 0; idx < n && lkp_tbl[idx].start != 0; ++idx) { 125 if (addr >= lkp_tbl[idx].start && 126 addr < lkp_tbl[idx].end) { 127 /* Found. */ 128 *cached_idx = idx; 129 return lkp_tbl[idx].lkey; 130 } 131 } 132 return UINT32_MAX; 133 } 134 135 __rte_internal 136 int mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket); 137 __rte_internal 138 void mlx5_mr_btree_free(struct mlx5_mr_btree *bt); 139 __rte_internal 140 void mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused); 141 __rte_internal 142 uint32_t mlx5_mr_addr2mr_bh(void *pd, struct mlx5_mp_id *mp_id, 143 struct mlx5_mr_share_cache *share_cache, 144 struct mlx5_mr_ctrl *mr_ctrl, 145 uintptr_t addr, unsigned int mr_ext_memseg_en); 146 __rte_internal 147 void mlx5_mr_release_cache(struct mlx5_mr_share_cache *mr_cache); 148 __rte_internal 149 void mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused); 150 __rte_internal 151 void mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache); 152 __rte_internal 153 void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl); 154 __rte_internal 155 int 156 mlx5_mr_insert_cache(struct mlx5_mr_share_cache *share_cache, 157 struct mlx5_mr *mr); 158 __rte_internal 159 uint32_t 160 mlx5_mr_lookup_cache(struct mlx5_mr_share_cache *share_cache, 161 struct mr_cache_entry *entry, uintptr_t addr); 162 __rte_internal 163 struct mlx5_mr * 164 mlx5_mr_lookup_list(struct mlx5_mr_share_cache *share_cache, 165 struct mr_cache_entry *entry, uintptr_t addr); 166 __rte_internal 167 struct mlx5_mr * 168 mlx5_create_mr_ext(void *pd, uintptr_t addr, size_t len, int socket_id, 169 mlx5_reg_mr_t reg_mr_cb); 170 __rte_internal 171 uint32_t 172 mlx5_mr_create_primary(void *pd, 173 struct mlx5_mr_share_cache *share_cache, 174 struct mr_cache_entry *entry, uintptr_t addr, 175 unsigned int mr_ext_memseg_en); 176 __rte_internal 177 int 178 mlx5_common_verbs_reg_mr(void *pd, void *addr, size_t length, 179 struct mlx5_pmd_mr *pmd_mr); 180 __rte_internal 181 void 182 mlx5_common_verbs_dereg_mr(struct mlx5_pmd_mr *pmd_mr); 183 #endif /* RTE_PMD_MLX5_COMMON_MR_H_ */ 184