1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016-2019 NXP 5 * 6 */ 7 8 #include <unistd.h> 9 #include <stdio.h> 10 #include <sys/types.h> 11 #include <string.h> 12 #include <stdlib.h> 13 #include <fcntl.h> 14 #include <errno.h> 15 16 #include <rte_mbuf.h> 17 #include <ethdev_driver.h> 18 #include <rte_malloc.h> 19 #include <rte_memcpy.h> 20 #include <rte_string_fns.h> 21 #include <rte_cycles.h> 22 #include <rte_kvargs.h> 23 #include <rte_dev.h> 24 #include "rte_dpaa2_mempool.h" 25 26 #include "fslmc_vfio.h" 27 #include <fslmc_logs.h> 28 #include <mc/fsl_dpbp.h> 29 #include <portal/dpaa2_hw_pvt.h> 30 #include <portal/dpaa2_hw_dpio.h> 31 #include "dpaa2_hw_mempool.h" 32 #include "dpaa2_hw_mempool_logs.h" 33 34 #include <dpaax_iova_table.h> 35 36 struct dpaa2_bp_info *rte_dpaa2_bpid_info; 37 static struct dpaa2_bp_list *h_bp_list; 38 39 static int 40 rte_hw_mbuf_create_pool(struct rte_mempool *mp) 41 { 42 struct dpaa2_bp_list *bp_list; 43 struct dpaa2_dpbp_dev *avail_dpbp; 44 struct dpaa2_bp_info *bp_info; 45 struct dpbp_attr dpbp_attr; 46 uint32_t bpid; 47 int ret; 48 49 avail_dpbp = dpaa2_alloc_dpbp_dev(); 50 51 if (rte_dpaa2_bpid_info == NULL) { 52 rte_dpaa2_bpid_info = (struct dpaa2_bp_info *)rte_malloc(NULL, 53 sizeof(struct dpaa2_bp_info) * MAX_BPID, 54 RTE_CACHE_LINE_SIZE); 55 if (rte_dpaa2_bpid_info == NULL) 56 return -ENOMEM; 57 memset(rte_dpaa2_bpid_info, 0, 58 sizeof(struct dpaa2_bp_info) * MAX_BPID); 59 } 60 61 if (!avail_dpbp) { 62 DPAA2_MEMPOOL_ERR("DPAA2 pool not available!"); 63 return -ENOENT; 64 } 65 66 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 67 ret = dpaa2_affine_qbman_swp(); 68 if (ret) { 69 DPAA2_MEMPOOL_ERR( 70 "Failed to allocate IO portal, tid: %d\n", 71 rte_gettid()); 72 goto err1; 73 } 74 } 75 76 ret = dpbp_enable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token); 77 if (ret != 0) { 78 DPAA2_MEMPOOL_ERR("Resource enable failure with err code: %d", 79 ret); 80 goto err1; 81 } 82 83 ret = dpbp_get_attributes(&avail_dpbp->dpbp, CMD_PRI_LOW, 84 avail_dpbp->token, &dpbp_attr); 85 if (ret != 0) { 86 DPAA2_MEMPOOL_ERR("Resource read failure with err code: %d", 87 ret); 88 goto err2; 89 } 90 91 bp_info = rte_malloc(NULL, 92 sizeof(struct dpaa2_bp_info), 93 RTE_CACHE_LINE_SIZE); 94 if (!bp_info) { 95 DPAA2_MEMPOOL_ERR("Unable to allocate buffer pool memory"); 96 ret = -ENOMEM; 97 goto err2; 98 } 99 100 /* Allocate the bp_list which will be added into global_bp_list */ 101 bp_list = rte_malloc(NULL, sizeof(struct dpaa2_bp_list), 102 RTE_CACHE_LINE_SIZE); 103 if (!bp_list) { 104 DPAA2_MEMPOOL_ERR("Unable to allocate buffer pool memory"); 105 ret = -ENOMEM; 106 goto err3; 107 } 108 109 /* Set parameters of buffer pool list */ 110 bp_list->buf_pool.num_bufs = mp->size; 111 bp_list->buf_pool.size = mp->elt_size 112 - sizeof(struct rte_mbuf) - rte_pktmbuf_priv_size(mp); 113 bp_list->buf_pool.bpid = dpbp_attr.bpid; 114 bp_list->buf_pool.h_bpool_mem = NULL; 115 bp_list->buf_pool.dpbp_node = avail_dpbp; 116 /* Identification for our offloaded pool_data structure */ 117 bp_list->dpaa2_ops_index = mp->ops_index; 118 bp_list->next = h_bp_list; 119 bp_list->mp = mp; 120 121 bpid = dpbp_attr.bpid; 122 123 rte_dpaa2_bpid_info[bpid].meta_data_size = sizeof(struct rte_mbuf) 124 + rte_pktmbuf_priv_size(mp); 125 rte_dpaa2_bpid_info[bpid].bp_list = bp_list; 126 rte_dpaa2_bpid_info[bpid].bpid = bpid; 127 128 rte_memcpy(bp_info, (void *)&rte_dpaa2_bpid_info[bpid], 129 sizeof(struct dpaa2_bp_info)); 130 mp->pool_data = (void *)bp_info; 131 132 DPAA2_MEMPOOL_DEBUG("BP List created for bpid =%d", dpbp_attr.bpid); 133 134 h_bp_list = bp_list; 135 return 0; 136 err3: 137 rte_free(bp_info); 138 err2: 139 dpbp_disable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token); 140 err1: 141 dpaa2_free_dpbp_dev(avail_dpbp); 142 143 return ret; 144 } 145 146 static void 147 rte_hw_mbuf_free_pool(struct rte_mempool *mp) 148 { 149 struct dpaa2_bp_info *bpinfo; 150 struct dpaa2_bp_list *bp; 151 struct dpaa2_dpbp_dev *dpbp_node; 152 153 if (!mp->pool_data) { 154 DPAA2_MEMPOOL_ERR("Not a valid dpaa2 buffer pool"); 155 return; 156 } 157 158 bpinfo = (struct dpaa2_bp_info *)mp->pool_data; 159 bp = bpinfo->bp_list; 160 dpbp_node = bp->buf_pool.dpbp_node; 161 162 dpbp_disable(&(dpbp_node->dpbp), CMD_PRI_LOW, dpbp_node->token); 163 164 if (h_bp_list == bp) { 165 h_bp_list = h_bp_list->next; 166 } else { /* if it is not the first node */ 167 struct dpaa2_bp_list *prev = h_bp_list, *temp; 168 temp = h_bp_list->next; 169 while (temp) { 170 if (temp == bp) { 171 prev->next = temp->next; 172 rte_free(bp); 173 break; 174 } 175 prev = temp; 176 temp = temp->next; 177 } 178 } 179 180 rte_free(mp->pool_data); 181 dpaa2_free_dpbp_dev(dpbp_node); 182 } 183 184 static void 185 rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused, 186 void * const *obj_table, 187 uint32_t bpid, 188 uint32_t meta_data_size, 189 int count) 190 { 191 struct qbman_release_desc releasedesc; 192 struct qbman_swp *swp; 193 int ret; 194 int i, n, retry_count; 195 uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL]; 196 197 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 198 ret = dpaa2_affine_qbman_swp(); 199 if (ret != 0) { 200 DPAA2_MEMPOOL_ERR( 201 "Failed to allocate IO portal, tid: %d\n", 202 rte_gettid()); 203 return; 204 } 205 } 206 swp = DPAA2_PER_LCORE_PORTAL; 207 208 /* Create a release descriptor required for releasing 209 * buffers into QBMAN 210 */ 211 qbman_release_desc_clear(&releasedesc); 212 qbman_release_desc_set_bpid(&releasedesc, bpid); 213 214 n = count % DPAA2_MBUF_MAX_ACQ_REL; 215 if (unlikely(!n)) 216 goto aligned; 217 218 /* convert mbuf to buffers for the remainder */ 219 for (i = 0; i < n ; i++) { 220 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA 221 bufs[i] = (uint64_t)rte_mempool_virt2iova(obj_table[i]) 222 + meta_data_size; 223 #else 224 bufs[i] = (uint64_t)obj_table[i] + meta_data_size; 225 #endif 226 } 227 228 /* feed them to bman */ 229 retry_count = 0; 230 while ((ret = qbman_swp_release(swp, &releasedesc, bufs, n)) == 231 -EBUSY) { 232 retry_count++; 233 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) { 234 DPAA2_MEMPOOL_ERR("bman release retry exceeded, low fbpr?"); 235 return; 236 } 237 } 238 239 aligned: 240 /* if there are more buffers to free */ 241 while (n < count) { 242 /* convert mbuf to buffers */ 243 for (i = 0; i < DPAA2_MBUF_MAX_ACQ_REL; i++) { 244 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA 245 bufs[i] = (uint64_t) 246 rte_mempool_virt2iova(obj_table[n + i]) 247 + meta_data_size; 248 #else 249 bufs[i] = (uint64_t)obj_table[n + i] + meta_data_size; 250 #endif 251 } 252 253 retry_count = 0; 254 while ((ret = qbman_swp_release(swp, &releasedesc, bufs, 255 DPAA2_MBUF_MAX_ACQ_REL)) == -EBUSY) { 256 retry_count++; 257 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) { 258 DPAA2_MEMPOOL_ERR("bman release retry exceeded, low fbpr?"); 259 return; 260 } 261 } 262 n += DPAA2_MBUF_MAX_ACQ_REL; 263 } 264 } 265 266 uint16_t 267 rte_dpaa2_mbuf_pool_bpid(struct rte_mempool *mp) 268 { 269 struct dpaa2_bp_info *bp_info; 270 271 bp_info = mempool_to_bpinfo(mp); 272 if (!(bp_info->bp_list)) { 273 RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n"); 274 return -ENOMEM; 275 } 276 277 return bp_info->bpid; 278 } 279 280 struct rte_mbuf * 281 rte_dpaa2_mbuf_from_buf_addr(struct rte_mempool *mp, void *buf_addr) 282 { 283 struct dpaa2_bp_info *bp_info; 284 285 bp_info = mempool_to_bpinfo(mp); 286 if (!(bp_info->bp_list)) { 287 RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n"); 288 return NULL; 289 } 290 291 return (struct rte_mbuf *)((uint8_t *)buf_addr - 292 bp_info->meta_data_size); 293 } 294 295 int 296 rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool, 297 void **obj_table, unsigned int count) 298 { 299 #ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER 300 static int alloc; 301 #endif 302 struct qbman_swp *swp; 303 uint16_t bpid; 304 size_t bufs[DPAA2_MBUF_MAX_ACQ_REL]; 305 int i, ret; 306 unsigned int n = 0; 307 struct dpaa2_bp_info *bp_info; 308 309 bp_info = mempool_to_bpinfo(pool); 310 311 if (!(bp_info->bp_list)) { 312 DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured"); 313 return -ENOENT; 314 } 315 316 bpid = bp_info->bpid; 317 318 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 319 ret = dpaa2_affine_qbman_swp(); 320 if (ret != 0) { 321 DPAA2_MEMPOOL_ERR( 322 "Failed to allocate IO portal, tid: %d\n", 323 rte_gettid()); 324 return ret; 325 } 326 } 327 swp = DPAA2_PER_LCORE_PORTAL; 328 329 while (n < count) { 330 /* Acquire is all-or-nothing, so we drain in 7s, 331 * then the remainder. 332 */ 333 if ((count - n) > DPAA2_MBUF_MAX_ACQ_REL) { 334 ret = qbman_swp_acquire(swp, bpid, (void *)bufs, 335 DPAA2_MBUF_MAX_ACQ_REL); 336 } else { 337 ret = qbman_swp_acquire(swp, bpid, (void *)bufs, 338 count - n); 339 } 340 /* In case of less than requested number of buffers available 341 * in pool, qbman_swp_acquire returns 0 342 */ 343 if (ret <= 0) { 344 DPAA2_MEMPOOL_DP_DEBUG( 345 "Buffer acquire failed with err code: %d", ret); 346 /* The API expect the exact number of requested bufs */ 347 /* Releasing all buffers allocated */ 348 rte_dpaa2_mbuf_release(pool, obj_table, bpid, 349 bp_info->meta_data_size, n); 350 return -ENOBUFS; 351 } 352 /* assigning mbuf from the acquired objects */ 353 for (i = 0; (i < ret) && bufs[i]; i++) { 354 DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], size_t); 355 obj_table[n] = (struct rte_mbuf *) 356 (bufs[i] - bp_info->meta_data_size); 357 DPAA2_MEMPOOL_DP_DEBUG( 358 "Acquired %p address %p from BMAN\n", 359 (void *)bufs[i], (void *)obj_table[n]); 360 n++; 361 } 362 } 363 364 #ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER 365 alloc += n; 366 DPAA2_MEMPOOL_DP_DEBUG("Total = %d , req = %d done = %d\n", 367 alloc, count, n); 368 #endif 369 return 0; 370 } 371 372 static int 373 rte_hw_mbuf_free_bulk(struct rte_mempool *pool, 374 void * const *obj_table, unsigned int n) 375 { 376 struct dpaa2_bp_info *bp_info; 377 378 bp_info = mempool_to_bpinfo(pool); 379 if (!(bp_info->bp_list)) { 380 DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured"); 381 return -ENOENT; 382 } 383 rte_dpaa2_mbuf_release(pool, obj_table, bp_info->bpid, 384 bp_info->meta_data_size, n); 385 386 return 0; 387 } 388 389 static unsigned int 390 rte_hw_mbuf_get_count(const struct rte_mempool *mp) 391 { 392 int ret; 393 unsigned int num_of_bufs = 0; 394 struct dpaa2_bp_info *bp_info; 395 struct dpaa2_dpbp_dev *dpbp_node; 396 struct fsl_mc_io mc_io; 397 398 if (!mp || !mp->pool_data) { 399 DPAA2_MEMPOOL_ERR("Invalid mempool provided"); 400 return 0; 401 } 402 403 bp_info = (struct dpaa2_bp_info *)mp->pool_data; 404 dpbp_node = bp_info->bp_list->buf_pool.dpbp_node; 405 406 /* In case as secondary process access stats, MCP portal in priv-hw may 407 * have primary process address. Need the secondary process based MCP 408 * portal address for this object. 409 */ 410 mc_io.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX); 411 ret = dpbp_get_num_free_bufs(&mc_io, CMD_PRI_LOW, 412 dpbp_node->token, &num_of_bufs); 413 if (ret) { 414 DPAA2_MEMPOOL_ERR("Unable to obtain free buf count (err=%d)", 415 ret); 416 return 0; 417 } 418 419 DPAA2_MEMPOOL_DP_DEBUG("Free bufs = %u\n", num_of_bufs); 420 421 return num_of_bufs; 422 } 423 424 static int 425 dpaa2_populate(struct rte_mempool *mp, unsigned int max_objs, 426 void *vaddr, rte_iova_t paddr, size_t len, 427 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg) 428 { 429 struct rte_memseg_list *msl; 430 /* The memsegment list exists incase the memory is not external. 431 * So, DMA-Map is required only when memory is provided by user, 432 * i.e. External. 433 */ 434 msl = rte_mem_virt2memseg_list(vaddr); 435 436 if (!msl) { 437 DPAA2_MEMPOOL_DEBUG("Memsegment is External.\n"); 438 rte_fslmc_vfio_mem_dmamap((size_t)vaddr, 439 (size_t)paddr, (size_t)len); 440 } 441 /* Insert entry into the PA->VA Table */ 442 dpaax_iova_table_update(paddr, vaddr, len); 443 444 return rte_mempool_op_populate_helper(mp, 0, max_objs, vaddr, paddr, 445 len, obj_cb, obj_cb_arg); 446 } 447 448 static const struct rte_mempool_ops dpaa2_mpool_ops = { 449 .name = DPAA2_MEMPOOL_OPS_NAME, 450 .alloc = rte_hw_mbuf_create_pool, 451 .free = rte_hw_mbuf_free_pool, 452 .enqueue = rte_hw_mbuf_free_bulk, 453 .dequeue = rte_dpaa2_mbuf_alloc_bulk, 454 .get_count = rte_hw_mbuf_get_count, 455 .populate = dpaa2_populate, 456 }; 457 458 MEMPOOL_REGISTER_OPS(dpaa2_mpool_ops); 459 460 RTE_LOG_REGISTER(dpaa2_logtype_mempool, mempool.dpaa2, NOTICE); 461