1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018 6WIND S.A. 3 * Copyright 2018 Mellanox Technologies, Ltd 4 */ 5 6 #ifndef RTE_PMD_MLX5_COMMON_MR_H_ 7 #define RTE_PMD_MLX5_COMMON_MR_H_ 8 9 #include <stddef.h> 10 #include <stdint.h> 11 #include <sys/queue.h> 12 13 /* Verbs header. */ 14 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 15 #ifdef PEDANTIC 16 #pragma GCC diagnostic ignored "-Wpedantic" 17 #endif 18 #include <infiniband/verbs.h> 19 #include <infiniband/mlx5dv.h> 20 #ifdef PEDANTIC 21 #pragma GCC diagnostic error "-Wpedantic" 22 #endif 23 24 #include <rte_rwlock.h> 25 #include <rte_bitmap.h> 26 #include <rte_memory.h> 27 28 #include "mlx5_common_mp.h" 29 30 /* Size of per-queue MR cache array for linear search. */ 31 #define MLX5_MR_CACHE_N 8 32 #define MLX5_MR_BTREE_CACHE_N 256 33 34 /* Memory Region object. */ 35 struct mlx5_mr { 36 LIST_ENTRY(mlx5_mr) mr; /**< Pointer to the prev/next entry. */ 37 struct ibv_mr *ibv_mr; /* Verbs Memory Region. */ 38 const struct rte_memseg_list *msl; 39 int ms_base_idx; /* Start index of msl->memseg_arr[]. */ 40 int ms_n; /* Number of memsegs in use. */ 41 uint32_t ms_bmp_n; /* Number of bits in memsegs bit-mask. */ 42 struct rte_bitmap *ms_bmp; /* Bit-mask of memsegs belonged to MR. */ 43 }; 44 45 /* Cache entry for Memory Region. */ 46 struct mr_cache_entry { 47 uintptr_t start; /* Start address of MR. */ 48 uintptr_t end; /* End address of MR. */ 49 uint32_t lkey; /* rte_cpu_to_be_32(ibv_mr->lkey). */ 50 } __rte_packed; 51 52 /* MR Cache table for Binary search. */ 53 struct mlx5_mr_btree { 54 uint16_t len; /* Number of entries. */ 55 uint16_t size; /* Total number of entries. */ 56 int overflow; /* Mark failure of table expansion. */ 57 struct mr_cache_entry (*table)[]; 58 } __rte_packed; 59 60 /* Per-queue MR control descriptor. */ 61 struct mlx5_mr_ctrl { 62 uint32_t *dev_gen_ptr; /* Generation number of device to poll. */ 63 uint32_t cur_gen; /* Generation number saved to flush caches. */ 64 uint16_t mru; /* Index of last hit entry in top-half cache. */ 65 uint16_t head; /* Index of the oldest entry in top-half cache. */ 66 struct mr_cache_entry cache[MLX5_MR_CACHE_N]; /* Cache for top-half. */ 67 struct mlx5_mr_btree cache_bh; /* Cache for bottom-half. */ 68 } __rte_packed; 69 70 LIST_HEAD(mlx5_mr_list, mlx5_mr); 71 72 /* Global per-device MR cache. */ 73 struct mlx5_mr_share_cache { 74 uint32_t dev_gen; /* Generation number to flush local caches. */ 75 rte_rwlock_t rwlock; /* MR cache Lock. */ 76 struct mlx5_mr_btree cache; /* Global MR cache table. */ 77 struct mlx5_mr_list mr_list; /* Registered MR list. */ 78 struct mlx5_mr_list mr_free_list; /* Freed MR list. */ 79 } __rte_packed; 80 81 /** 82 * Look up LKey from given lookup table by linear search. Firstly look up the 83 * last-hit entry. If miss, the entire array is searched. If found, update the 84 * last-hit index and return LKey. 85 * 86 * @param lkp_tbl 87 * Pointer to lookup table. 88 * @param[in,out] cached_idx 89 * Pointer to last-hit index. 90 * @param n 91 * Size of lookup table. 92 * @param addr 93 * Search key. 94 * 95 * @return 96 * Searched LKey on success, UINT32_MAX on no match. 97 */ 98 static __rte_always_inline uint32_t 99 mlx5_mr_lookup_lkey(struct mr_cache_entry *lkp_tbl, uint16_t *cached_idx, 100 uint16_t n, uintptr_t addr) 101 { 102 uint16_t idx; 103 104 if (likely(addr >= lkp_tbl[*cached_idx].start && 105 addr < lkp_tbl[*cached_idx].end)) 106 return lkp_tbl[*cached_idx].lkey; 107 for (idx = 0; idx < n && lkp_tbl[idx].start != 0; ++idx) { 108 if (addr >= lkp_tbl[idx].start && 109 addr < lkp_tbl[idx].end) { 110 /* Found. */ 111 *cached_idx = idx; 112 return lkp_tbl[idx].lkey; 113 } 114 } 115 return UINT32_MAX; 116 } 117 118 __rte_experimental 119 int mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket); 120 __rte_experimental 121 void mlx5_mr_btree_free(struct mlx5_mr_btree *bt); 122 __rte_experimental 123 void mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused); 124 __rte_experimental 125 uint32_t mlx5_mr_addr2mr_bh(struct ibv_pd *pd, struct mlx5_mp_id *mp_id, 126 struct mlx5_mr_share_cache *share_cache, 127 struct mlx5_mr_ctrl *mr_ctrl, 128 uintptr_t addr, unsigned int mr_ext_memseg_en); 129 __rte_experimental 130 void mlx5_mr_release_cache(struct mlx5_mr_share_cache *mr_cache); 131 __rte_experimental 132 void mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused); 133 __rte_experimental 134 void mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache); 135 __rte_experimental 136 void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl); 137 __rte_experimental 138 int 139 mlx5_mr_insert_cache(struct mlx5_mr_share_cache *share_cache, 140 struct mlx5_mr *mr); 141 __rte_experimental 142 uint32_t 143 mlx5_mr_lookup_cache(struct mlx5_mr_share_cache *share_cache, 144 struct mr_cache_entry *entry, uintptr_t addr); 145 __rte_experimental 146 struct mlx5_mr * 147 mlx5_mr_lookup_list(struct mlx5_mr_share_cache *share_cache, 148 struct mr_cache_entry *entry, uintptr_t addr); 149 __rte_experimental 150 struct mlx5_mr * 151 mlx5_create_mr_ext(struct ibv_pd *pd, uintptr_t addr, size_t len, 152 int socket_id); 153 __rte_experimental 154 uint32_t 155 mlx5_mr_create_primary(struct ibv_pd *pd, 156 struct mlx5_mr_share_cache *share_cache, 157 struct mr_cache_entry *entry, uintptr_t addr, 158 unsigned int mr_ext_memseg_en); 159 160 #endif /* RTE_PMD_MLX5_COMMON_MR_H_ */ 161