1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright 2017 NXP 4 * 5 */ 6 7 /* System headers */ 8 #include <stdio.h> 9 #include <inttypes.h> 10 #include <unistd.h> 11 #include <limits.h> 12 #include <sched.h> 13 #include <signal.h> 14 #include <pthread.h> 15 #include <sys/types.h> 16 #include <sys/syscall.h> 17 18 #include <rte_byteorder.h> 19 #include <rte_common.h> 20 #include <rte_log.h> 21 #include <rte_debug.h> 22 #include <rte_memory.h> 23 #include <rte_tailq.h> 24 #include <rte_eal.h> 25 #include <rte_malloc.h> 26 #include <rte_ring.h> 27 28 #include <dpaa_mempool.h> 29 #include <dpaax_iova_table.h> 30 31 /* List of all the memseg information locally maintained in dpaa driver. This 32 * is to optimize the PA_to_VA searches until a better mechanism (algo) is 33 * available. 34 */ 35 struct dpaa_memseg_list rte_dpaa_memsegs 36 = TAILQ_HEAD_INITIALIZER(rte_dpaa_memsegs); 37 38 struct dpaa_bp_info *rte_dpaa_bpid_info; 39 40 static int 41 dpaa_mbuf_create_pool(struct rte_mempool *mp) 42 { 43 struct bman_pool *bp; 44 struct bm_buffer bufs[8]; 45 struct dpaa_bp_info *bp_info; 46 uint8_t bpid; 47 int num_bufs = 0, ret = 0; 48 struct bman_pool_params params = { 49 .flags = BMAN_POOL_FLAG_DYNAMIC_BPID 50 }; 51 52 MEMPOOL_INIT_FUNC_TRACE(); 53 54 bp = bman_new_pool(¶ms); 55 if (!bp) { 56 DPAA_MEMPOOL_ERR("bman_new_pool() failed"); 57 return -ENODEV; 58 } 59 bpid = bman_get_params(bp)->bpid; 60 61 /* Drain the pool of anything already in it. */ 62 do { 63 /* Acquire is all-or-nothing, so we drain in 8s, 64 * then in 1s for the remainder. 65 */ 66 if (ret != 1) 67 ret = bman_acquire(bp, bufs, 8, 0); 68 if (ret < 8) 69 ret = bman_acquire(bp, bufs, 1, 0); 70 if (ret > 0) 71 num_bufs += ret; 72 } while (ret > 0); 73 if (num_bufs) 74 DPAA_MEMPOOL_WARN("drained %u bufs from BPID %d", 75 num_bufs, bpid); 76 77 if (rte_dpaa_bpid_info == NULL) { 78 rte_dpaa_bpid_info = (struct dpaa_bp_info *)rte_zmalloc(NULL, 79 sizeof(struct dpaa_bp_info) * DPAA_MAX_BPOOLS, 80 RTE_CACHE_LINE_SIZE); 81 if (rte_dpaa_bpid_info == NULL) 82 return -ENOMEM; 83 } 84 85 rte_dpaa_bpid_info[bpid].mp = mp; 86 rte_dpaa_bpid_info[bpid].bpid = bpid; 87 rte_dpaa_bpid_info[bpid].size = mp->elt_size; 88 rte_dpaa_bpid_info[bpid].bp = bp; 89 rte_dpaa_bpid_info[bpid].meta_data_size = 90 sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mp); 91 rte_dpaa_bpid_info[bpid].dpaa_ops_index = mp->ops_index; 92 rte_dpaa_bpid_info[bpid].ptov_off = 0; 93 rte_dpaa_bpid_info[bpid].flags = 0; 94 95 bp_info = rte_malloc(NULL, 96 sizeof(struct dpaa_bp_info), 97 RTE_CACHE_LINE_SIZE); 98 if (!bp_info) { 99 DPAA_MEMPOOL_WARN("Memory allocation failed for bp_info"); 100 bman_free_pool(bp); 101 return -ENOMEM; 102 } 103 104 rte_memcpy(bp_info, (void *)&rte_dpaa_bpid_info[bpid], 105 sizeof(struct dpaa_bp_info)); 106 mp->pool_data = (void *)bp_info; 107 108 DPAA_MEMPOOL_INFO("BMAN pool created for bpid =%d", bpid); 109 return 0; 110 } 111 112 static void 113 dpaa_mbuf_free_pool(struct rte_mempool *mp) 114 { 115 struct dpaa_bp_info *bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); 116 117 MEMPOOL_INIT_FUNC_TRACE(); 118 119 if (bp_info) { 120 bman_free_pool(bp_info->bp); 121 DPAA_MEMPOOL_INFO("BMAN pool freed for bpid =%d", 122 bp_info->bpid); 123 rte_free(mp->pool_data); 124 mp->pool_data = NULL; 125 } 126 } 127 128 static void 129 dpaa_buf_free(struct dpaa_bp_info *bp_info, uint64_t addr) 130 { 131 struct bm_buffer buf; 132 int ret; 133 134 DPAA_MEMPOOL_DPDEBUG("Free 0x%" PRIx64 " to bpid: %d", 135 addr, bp_info->bpid); 136 137 bm_buffer_set64(&buf, addr); 138 retry: 139 ret = bman_release(bp_info->bp, &buf, 1, 0); 140 if (ret) { 141 DPAA_MEMPOOL_DEBUG("BMAN busy. Retrying..."); 142 cpu_spin(CPU_SPIN_BACKOFF_CYCLES); 143 goto retry; 144 } 145 } 146 147 static int 148 dpaa_mbuf_free_bulk(struct rte_mempool *pool, 149 void *const *obj_table, 150 unsigned int n) 151 { 152 struct dpaa_bp_info *bp_info = DPAA_MEMPOOL_TO_POOL_INFO(pool); 153 int ret; 154 unsigned int i = 0; 155 156 DPAA_MEMPOOL_DPDEBUG("Request to free %d buffers in bpid = %d", 157 n, bp_info->bpid); 158 159 if (unlikely(!RTE_PER_LCORE(dpaa_io))) { 160 ret = rte_dpaa_portal_init((void *)0); 161 if (ret) { 162 DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d", 163 ret); 164 return 0; 165 } 166 } 167 168 while (i < n) { 169 uint64_t phy = rte_mempool_virt2iova(obj_table[i]); 170 171 if (unlikely(!bp_info->ptov_off)) { 172 /* buffers are from single mem segment */ 173 if (bp_info->flags & DPAA_MPOOL_SINGLE_SEGMENT) { 174 bp_info->ptov_off = (size_t)obj_table[i] - phy; 175 rte_dpaa_bpid_info[bp_info->bpid].ptov_off 176 = bp_info->ptov_off; 177 } 178 } 179 180 dpaa_buf_free(bp_info, 181 (uint64_t)phy + bp_info->meta_data_size); 182 i = i + 1; 183 } 184 185 DPAA_MEMPOOL_DPDEBUG("freed %d buffers in bpid =%d", 186 n, bp_info->bpid); 187 188 return 0; 189 } 190 191 static int 192 dpaa_mbuf_alloc_bulk(struct rte_mempool *pool, 193 void **obj_table, 194 unsigned int count) 195 { 196 struct rte_mbuf **m = (struct rte_mbuf **)obj_table; 197 struct bm_buffer bufs[DPAA_MBUF_MAX_ACQ_REL]; 198 struct dpaa_bp_info *bp_info; 199 void *bufaddr; 200 int i, ret; 201 unsigned int n = 0; 202 203 bp_info = DPAA_MEMPOOL_TO_POOL_INFO(pool); 204 205 DPAA_MEMPOOL_DPDEBUG("Request to alloc %d buffers in bpid = %d", 206 count, bp_info->bpid); 207 208 if (unlikely(count >= (RTE_MEMPOOL_CACHE_MAX_SIZE * 2))) { 209 DPAA_MEMPOOL_ERR("Unable to allocate requested (%u) buffers", 210 count); 211 return -1; 212 } 213 214 if (unlikely(!RTE_PER_LCORE(dpaa_io))) { 215 ret = rte_dpaa_portal_init((void *)0); 216 if (ret) { 217 DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d", 218 ret); 219 return -1; 220 } 221 } 222 223 while (n < count) { 224 /* Acquire is all-or-nothing, so we drain in 7s, 225 * then the remainder. 226 */ 227 if ((count - n) > DPAA_MBUF_MAX_ACQ_REL) { 228 ret = bman_acquire(bp_info->bp, bufs, 229 DPAA_MBUF_MAX_ACQ_REL, 0); 230 } else { 231 ret = bman_acquire(bp_info->bp, bufs, count - n, 0); 232 } 233 /* In case of less than requested number of buffers available 234 * in pool, qbman_swp_acquire returns 0 235 */ 236 if (ret <= 0) { 237 DPAA_MEMPOOL_DPDEBUG("Buffer acquire failed (%d)", 238 ret); 239 /* The API expect the exact number of requested 240 * buffers. Releasing all buffers allocated 241 */ 242 dpaa_mbuf_free_bulk(pool, obj_table, n); 243 return -ENOBUFS; 244 } 245 /* assigning mbuf from the acquired objects */ 246 for (i = 0; (i < ret) && bufs[i].addr; i++) { 247 /* TODO-errata - objerved that bufs may be null 248 * i.e. first buffer is valid, remaining 6 buffers 249 * may be null. 250 */ 251 bufaddr = DPAA_MEMPOOL_PTOV(bp_info, bufs[i].addr); 252 m[n] = (struct rte_mbuf *)((char *)bufaddr 253 - bp_info->meta_data_size); 254 DPAA_MEMPOOL_DPDEBUG("Paddr (%p), FD (%p) from BMAN", 255 (void *)bufaddr, (void *)m[n]); 256 n++; 257 } 258 } 259 260 DPAA_MEMPOOL_DPDEBUG("Allocated %d buffers from bpid=%d", 261 n, bp_info->bpid); 262 return 0; 263 } 264 265 static unsigned int 266 dpaa_mbuf_get_count(const struct rte_mempool *mp) 267 { 268 struct dpaa_bp_info *bp_info; 269 270 MEMPOOL_INIT_FUNC_TRACE(); 271 272 if (!mp || !mp->pool_data) { 273 DPAA_MEMPOOL_ERR("Invalid mempool provided\n"); 274 return 0; 275 } 276 277 bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); 278 279 return bman_query_free_buffers(bp_info->bp); 280 } 281 282 static int 283 dpaa_populate(struct rte_mempool *mp, unsigned int max_objs, 284 void *vaddr, rte_iova_t paddr, size_t len, 285 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg) 286 { 287 struct dpaa_bp_info *bp_info; 288 unsigned int total_elt_sz; 289 290 MEMPOOL_INIT_FUNC_TRACE(); 291 292 if (!mp || !mp->pool_data) { 293 DPAA_MEMPOOL_ERR("Invalid mempool provided\n"); 294 return 0; 295 } 296 297 /* Update the PA-VA Table */ 298 dpaax_iova_table_update(paddr, vaddr, len); 299 300 bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); 301 total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size; 302 303 DPAA_MEMPOOL_DEBUG("Req size %" PRIx64 " vs Available %u\n", 304 (uint64_t)len, total_elt_sz * mp->size); 305 306 /* Detect pool area has sufficient space for elements in this memzone */ 307 if (len >= total_elt_sz * mp->size) 308 bp_info->flags |= DPAA_MPOOL_SINGLE_SEGMENT; 309 struct dpaa_memseg *ms; 310 311 /* For each memory chunk pinned to the Mempool, a linked list of the 312 * contained memsegs is created for searching when PA to VA 313 * conversion is required. 314 */ 315 ms = rte_zmalloc(NULL, sizeof(struct dpaa_memseg), 0); 316 if (!ms) { 317 DPAA_MEMPOOL_ERR("Unable to allocate internal memory."); 318 DPAA_MEMPOOL_WARN("Fast Physical to Virtual Addr translation would not be available."); 319 /* If the element is not added, it would only lead to failure 320 * in searching for the element and the logic would Fallback 321 * to traditional DPDK memseg traversal code. So, this is not 322 * a blocking error - but, error would be printed on screen. 323 */ 324 return 0; 325 } 326 327 ms->vaddr = vaddr; 328 ms->iova = paddr; 329 ms->len = len; 330 /* Head insertions are generally faster than tail insertions as the 331 * buffers pinned are picked from rear end. 332 */ 333 TAILQ_INSERT_HEAD(&rte_dpaa_memsegs, ms, next); 334 335 return rte_mempool_op_populate_default(mp, max_objs, vaddr, paddr, len, 336 obj_cb, obj_cb_arg); 337 } 338 339 static const struct rte_mempool_ops dpaa_mpool_ops = { 340 .name = DPAA_MEMPOOL_OPS_NAME, 341 .alloc = dpaa_mbuf_create_pool, 342 .free = dpaa_mbuf_free_pool, 343 .enqueue = dpaa_mbuf_free_bulk, 344 .dequeue = dpaa_mbuf_alloc_bulk, 345 .get_count = dpaa_mbuf_get_count, 346 .populate = dpaa_populate, 347 }; 348 349 MEMPOOL_REGISTER_OPS(dpaa_mpool_ops); 350