1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2021 Marvell. 3 */ 4 5 #include "roc_api.h" 6 #include "roc_priv.h" 7 8 static roc_npa_lf_init_cb_t lf_init_cb; 9 10 int 11 roc_npa_lf_init_cb_register(roc_npa_lf_init_cb_t cb) 12 { 13 if (lf_init_cb != NULL) 14 return -EEXIST; 15 16 lf_init_cb = cb; 17 return 0; 18 } 19 20 uint16_t 21 roc_npa_pf_func_get(void) 22 { 23 return idev_npa_pffunc_get(); 24 } 25 26 void 27 roc_npa_pool_op_range_set(uint64_t aura_handle, uint64_t start_iova, 28 uint64_t end_iova) 29 { 30 const uint64_t start = roc_npa_aura_handle_to_base(aura_handle) + 31 NPA_LF_POOL_OP_PTR_START0; 32 const uint64_t end = roc_npa_aura_handle_to_base(aura_handle) + 33 NPA_LF_POOL_OP_PTR_END0; 34 uint64_t reg = roc_npa_aura_handle_to_aura(aura_handle); 35 struct npa_lf *lf = idev_npa_obj_get(); 36 struct npa_aura_lim *lim; 37 38 PLT_ASSERT(lf); 39 lim = lf->aura_lim; 40 41 /* Change the range bookkeeping in software as well as in hardware */ 42 lim[reg].ptr_start = PLT_MIN(lim[reg].ptr_start, start_iova); 43 lim[reg].ptr_end = PLT_MAX(lim[reg].ptr_end, end_iova); 44 45 roc_store_pair(lim[reg].ptr_start, reg, start); 46 roc_store_pair(lim[reg].ptr_end, reg, end); 47 } 48 49 void 50 roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova, 51 uint64_t end_iova) 52 { 53 uint64_t reg = roc_npa_aura_handle_to_aura(aura_handle); 54 struct npa_lf *lf = idev_npa_obj_get(); 55 struct npa_aura_lim *lim; 56 57 PLT_ASSERT(lf); 58 lim = lf->aura_lim; 59 60 /* Change only the bookkeeping in software */ 61 lim[reg].ptr_start = PLT_MIN(lim[reg].ptr_start, start_iova); 62 lim[reg].ptr_end = PLT_MAX(lim[reg].ptr_end, end_iova); 63 } 64 65 void 66 roc_npa_aura_op_range_get(uint64_t aura_handle, uint64_t *start_iova, 67 uint64_t *end_iova) 68 { 69 uint64_t aura_id = roc_npa_aura_handle_to_aura(aura_handle); 70 struct npa_aura_lim *lim; 71 struct npa_lf *lf; 72 73 lf = idev_npa_obj_get(); 74 PLT_ASSERT(lf); 75 76 lim = lf->aura_lim; 77 *start_iova = lim[aura_id].ptr_start; 78 *end_iova = lim[aura_id].ptr_end; 79 } 80 81 static int 82 npa_aura_pool_init(struct mbox *m_box, uint32_t aura_id, struct npa_aura_s *aura, 83 struct npa_pool_s *pool) 84 { 85 struct npa_cn20k_aq_enq_req *aura_init_req_cn20k, *pool_init_req_cn20k; 86 struct npa_aq_enq_req *aura_init_req, *pool_init_req; 87 struct npa_aq_enq_rsp *aura_init_rsp, *pool_init_rsp; 88 struct mbox_dev *mdev = &m_box->dev[0]; 89 int rc = -ENOSPC, off; 90 struct mbox *mbox; 91 92 mbox = mbox_get(m_box); 93 if (roc_model_is_cn20k()) { 94 aura_init_req_cn20k = mbox_alloc_msg_npa_cn20k_aq_enq(mbox); 95 aura_init_req = (struct npa_aq_enq_req *)aura_init_req_cn20k; 96 } else { 97 aura_init_req = mbox_alloc_msg_npa_aq_enq(mbox); 98 } 99 if (aura_init_req == NULL) 100 goto exit; 101 aura_init_req->aura_id = aura_id; 102 aura_init_req->ctype = NPA_AQ_CTYPE_AURA; 103 aura_init_req->op = NPA_AQ_INSTOP_INIT; 104 mbox_memcpy(&aura_init_req->aura, aura, sizeof(*aura)); 105 106 if (roc_model_is_cn20k()) { 107 pool_init_req_cn20k = mbox_alloc_msg_npa_cn20k_aq_enq(mbox); 108 pool_init_req = (struct npa_aq_enq_req *)pool_init_req_cn20k; 109 } else { 110 pool_init_req = mbox_alloc_msg_npa_aq_enq(mbox); 111 } 112 pool_init_req = mbox_alloc_msg_npa_aq_enq(mbox); 113 if (pool_init_req == NULL) 114 goto exit; 115 pool_init_req->aura_id = aura_id; 116 pool_init_req->ctype = NPA_AQ_CTYPE_POOL; 117 pool_init_req->op = NPA_AQ_INSTOP_INIT; 118 mbox_memcpy(&pool_init_req->pool, pool, sizeof(*pool)); 119 120 rc = mbox_process(mbox); 121 if (rc < 0) 122 goto exit; 123 124 off = mbox->rx_start + 125 PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); 126 aura_init_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off); 127 off = mbox->rx_start + aura_init_rsp->hdr.next_msgoff; 128 pool_init_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off); 129 130 if (aura_init_rsp->hdr.rc == 0 && pool_init_rsp->hdr.rc == 0) 131 rc = 0; 132 else 133 rc = NPA_ERR_AURA_POOL_INIT; 134 exit: 135 mbox_put(mbox); 136 return rc; 137 } 138 139 static int 140 npa_aura_init(struct mbox *m_box, uint32_t aura_id, struct npa_aura_s *aura) 141 { 142 struct npa_cn20k_aq_enq_req *aura_init_req_cn20k; 143 struct npa_aq_enq_req *aura_init_req; 144 struct npa_aq_enq_rsp *aura_init_rsp; 145 struct mbox *mbox; 146 int rc = -ENOSPC; 147 148 mbox = mbox_get(m_box); 149 if (roc_model_is_cn20k()) { 150 aura_init_req_cn20k = mbox_alloc_msg_npa_cn20k_aq_enq(mbox); 151 aura_init_req = (struct npa_aq_enq_req *)aura_init_req_cn20k; 152 } else { 153 aura_init_req = mbox_alloc_msg_npa_aq_enq(mbox); 154 } 155 if (aura_init_req == NULL) 156 goto exit; 157 aura_init_req->aura_id = aura_id; 158 aura_init_req->ctype = NPA_AQ_CTYPE_AURA; 159 aura_init_req->op = NPA_AQ_INSTOP_INIT; 160 mbox_memcpy(&aura_init_req->aura, aura, sizeof(*aura)); 161 162 rc = mbox_process_msg(mbox, (void **)&aura_init_rsp); 163 if (rc < 0) 164 goto exit; 165 166 if (aura_init_rsp->hdr.rc == 0) 167 rc = 0; 168 else 169 rc = NPA_ERR_AURA_POOL_INIT; 170 exit: 171 mbox_put(mbox); 172 return rc; 173 } 174 175 static int 176 npa_aura_pool_fini(struct mbox *m_box, uint32_t aura_id, uint64_t aura_handle) 177 { 178 struct npa_cn20k_aq_enq_req *aura_req_cn20k, *pool_req_cn20k; 179 struct npa_aq_enq_req *aura_req, *pool_req; 180 struct npa_aq_enq_rsp *aura_rsp, *pool_rsp; 181 struct mbox_dev *mdev = &m_box->dev[0]; 182 struct ndc_sync_op *ndc_req; 183 int rc = -ENOSPC, off; 184 struct mbox *mbox; 185 uint64_t ptr; 186 187 /* Procedure for disabling an aura/pool */ 188 plt_delay_us(10); 189 190 /* Clear all the pointers from the aura */ 191 do { 192 ptr = roc_npa_aura_op_alloc(aura_handle, 0); 193 } while (ptr); 194 195 mbox = mbox_get(m_box); 196 if (roc_model_is_cn20k()) { 197 pool_req_cn20k = mbox_alloc_msg_npa_cn20k_aq_enq(mbox); 198 pool_req = (struct npa_aq_enq_req *)pool_req_cn20k; 199 } else { 200 pool_req = mbox_alloc_msg_npa_aq_enq(mbox); 201 } 202 if (pool_req == NULL) 203 goto exit; 204 pool_req->aura_id = aura_id; 205 pool_req->ctype = NPA_AQ_CTYPE_POOL; 206 pool_req->op = NPA_AQ_INSTOP_WRITE; 207 pool_req->pool.ena = 0; 208 pool_req->pool_mask.ena = ~pool_req->pool_mask.ena; 209 210 if (roc_model_is_cn20k()) { 211 aura_req_cn20k = mbox_alloc_msg_npa_cn20k_aq_enq(mbox); 212 aura_req = (struct npa_aq_enq_req *)aura_req_cn20k; 213 } else { 214 aura_req = mbox_alloc_msg_npa_aq_enq(mbox); 215 } 216 if (aura_req == NULL) 217 goto exit; 218 aura_req->aura_id = aura_id; 219 aura_req->ctype = NPA_AQ_CTYPE_AURA; 220 aura_req->op = NPA_AQ_INSTOP_WRITE; 221 aura_req->aura.ena = 0; 222 aura_req->aura_mask.ena = ~aura_req->aura_mask.ena; 223 if (roc_model_is_cn20k()) { 224 __io struct npa_cn20k_aura_s *aura_cn20k, *aura_mask_cn20k; 225 226 /* The bit positions/width of bp_ena has changed in cn20k */ 227 aura_cn20k = (__io struct npa_cn20k_aura_s *)&aura_req->aura; 228 aura_cn20k->bp_ena = 0; 229 aura_mask_cn20k = (__io struct npa_cn20k_aura_s *)&aura_req->aura_mask; 230 aura_mask_cn20k->bp_ena = ~aura_mask_cn20k->bp_ena; 231 } else { 232 aura_req->aura.bp_ena = 0; 233 aura_req->aura_mask.bp_ena = ~aura_req->aura_mask.bp_ena; 234 } 235 236 rc = mbox_process(mbox); 237 if (rc < 0) 238 goto exit; 239 240 off = mbox->rx_start + 241 PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); 242 pool_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off); 243 244 off = mbox->rx_start + pool_rsp->hdr.next_msgoff; 245 aura_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off); 246 247 if (aura_rsp->hdr.rc != 0 || pool_rsp->hdr.rc != 0) { 248 rc = NPA_ERR_AURA_POOL_FINI; 249 goto exit; 250 } 251 252 if (roc_model_is_cn20k()) { 253 /* In cn20k, NPA does not use NDC */ 254 rc = 0; 255 goto exit; 256 } 257 258 /* Sync NDC-NPA for LF */ 259 ndc_req = mbox_alloc_msg_ndc_sync_op(mbox); 260 if (ndc_req == NULL) { 261 rc = -ENOSPC; 262 goto exit; 263 } 264 ndc_req->npa_lf_sync = 1; 265 rc = mbox_process(mbox); 266 if (rc) { 267 plt_err("Error on NDC-NPA LF sync, rc %d", rc); 268 rc = NPA_ERR_AURA_POOL_FINI; 269 goto exit; 270 } 271 rc = 0; 272 exit: 273 mbox_put(mbox); 274 return rc; 275 } 276 277 static int 278 npa_aura_fini(struct mbox *m_box, uint32_t aura_id) 279 { 280 struct npa_cn20k_aq_enq_req *aura_req_cn20k; 281 struct npa_aq_enq_req *aura_req; 282 struct npa_aq_enq_rsp *aura_rsp; 283 struct ndc_sync_op *ndc_req; 284 struct mbox *mbox; 285 int rc = -ENOSPC; 286 287 /* Procedure for disabling an aura/pool */ 288 plt_delay_us(10); 289 290 mbox = mbox_get(m_box); 291 if (roc_model_is_cn20k()) { 292 aura_req_cn20k = mbox_alloc_msg_npa_cn20k_aq_enq(mbox); 293 aura_req = (struct npa_aq_enq_req *)aura_req_cn20k; 294 } else { 295 aura_req = mbox_alloc_msg_npa_aq_enq(mbox); 296 } 297 if (aura_req == NULL) 298 goto exit; 299 aura_req->aura_id = aura_id; 300 aura_req->ctype = NPA_AQ_CTYPE_AURA; 301 aura_req->op = NPA_AQ_INSTOP_WRITE; 302 aura_req->aura.ena = 0; 303 aura_req->aura_mask.ena = ~aura_req->aura_mask.ena; 304 305 rc = mbox_process_msg(mbox, (void **)&aura_rsp); 306 if (rc < 0) 307 goto exit; 308 309 if (aura_rsp->hdr.rc != 0) { 310 rc = NPA_ERR_AURA_POOL_FINI; 311 goto exit; 312 } 313 314 if (roc_model_is_cn20k()) { 315 /* In cn20k, NPA does not use NDC */ 316 rc = 0; 317 goto exit; 318 } 319 320 /* Sync NDC-NPA for LF */ 321 ndc_req = mbox_alloc_msg_ndc_sync_op(mbox); 322 if (ndc_req == NULL) { 323 rc = -ENOSPC; 324 goto exit; 325 } 326 ndc_req->npa_lf_sync = 1; 327 rc = mbox_process(mbox); 328 if (rc) { 329 plt_err("Error on NDC-NPA LF sync, rc %d", rc); 330 rc = NPA_ERR_AURA_POOL_FINI; 331 goto exit; 332 } 333 rc = 0; 334 exit: 335 mbox_put(mbox); 336 return rc; 337 } 338 339 int 340 roc_npa_pool_op_pc_reset(uint64_t aura_handle) 341 { 342 struct npa_lf *lf = idev_npa_obj_get(); 343 struct npa_aq_enq_req *pool_req; 344 struct npa_aq_enq_rsp *pool_rsp; 345 struct ndc_sync_op *ndc_req; 346 struct mbox_dev *mdev; 347 int rc = -ENOSPC, off; 348 struct mbox *mbox; 349 350 if (lf == NULL) 351 return NPA_ERR_PARAM; 352 353 mbox = mbox_get(lf->mbox); 354 mdev = &mbox->dev[0]; 355 plt_npa_dbg("lf=%p aura_handle=0x%" PRIx64, lf, aura_handle); 356 357 pool_req = mbox_alloc_msg_npa_aq_enq(mbox); 358 if (pool_req == NULL) 359 goto exit; 360 pool_req->aura_id = roc_npa_aura_handle_to_aura(aura_handle); 361 pool_req->ctype = NPA_AQ_CTYPE_POOL; 362 pool_req->op = NPA_AQ_INSTOP_WRITE; 363 pool_req->pool.op_pc = 0; 364 pool_req->pool_mask.op_pc = ~pool_req->pool_mask.op_pc; 365 366 rc = mbox_process(mbox); 367 if (rc < 0) 368 goto exit; 369 370 off = mbox->rx_start + 371 PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); 372 pool_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off); 373 374 if (pool_rsp->hdr.rc != 0) { 375 rc = NPA_ERR_AURA_POOL_FINI; 376 goto exit; 377 } 378 379 /* Sync NDC-NPA for LF */ 380 ndc_req = mbox_alloc_msg_ndc_sync_op(mbox); 381 if (ndc_req == NULL) { 382 rc = -ENOSPC; 383 goto exit; 384 } 385 ndc_req->npa_lf_sync = 1; 386 rc = mbox_process(mbox); 387 if (rc) { 388 plt_err("Error on NDC-NPA LF sync, rc %d", rc); 389 rc = NPA_ERR_AURA_POOL_FINI; 390 goto exit; 391 } 392 rc = 0; 393 exit: 394 mbox_put(mbox); 395 return rc; 396 } 397 398 int 399 roc_npa_aura_drop_set(uint64_t aura_handle, uint64_t limit, bool ena) 400 { 401 struct npa_cn20k_aq_enq_req *aura_req_cn20k; 402 struct npa_aq_enq_req *aura_req; 403 struct npa_lf *lf; 404 struct mbox *mbox; 405 int rc; 406 407 lf = idev_npa_obj_get(); 408 if (lf == NULL) 409 return NPA_ERR_DEVICE_NOT_BOUNDED; 410 mbox = mbox_get(lf->mbox); 411 if (roc_model_is_cn20k()) { 412 aura_req_cn20k = mbox_alloc_msg_npa_cn20k_aq_enq(mbox); 413 aura_req = (struct npa_aq_enq_req *)aura_req_cn20k; 414 } else { 415 aura_req = mbox_alloc_msg_npa_aq_enq(mbox); 416 } 417 if (aura_req == NULL) { 418 rc = -ENOMEM; 419 goto exit; 420 } 421 aura_req->aura_id = roc_npa_aura_handle_to_aura(aura_handle); 422 aura_req->ctype = NPA_AQ_CTYPE_AURA; 423 aura_req->op = NPA_AQ_INSTOP_WRITE; 424 425 aura_req->aura.aura_drop_ena = ena; 426 aura_req->aura.aura_drop = limit; 427 aura_req->aura_mask.aura_drop_ena = 428 ~(aura_req->aura_mask.aura_drop_ena); 429 aura_req->aura_mask.aura_drop = ~(aura_req->aura_mask.aura_drop); 430 rc = mbox_process(mbox); 431 432 exit: 433 mbox_put(mbox); 434 return rc; 435 } 436 437 static inline char * 438 npa_stack_memzone_name(struct npa_lf *lf, int pool_id, char *name) 439 { 440 snprintf(name, PLT_MEMZONE_NAMESIZE, "roc_npa_stack_%x_%d", lf->pf_func, 441 pool_id); 442 return name; 443 } 444 445 static inline const struct plt_memzone * 446 npa_stack_dma_alloc(struct npa_lf *lf, char *name, int pool_id, size_t size) 447 { 448 const char *mz_name = npa_stack_memzone_name(lf, pool_id, name); 449 size = PLT_ALIGN_CEIL(size, ROC_ALIGN); 450 451 return plt_memzone_reserve_aligned(mz_name, size, 0, ROC_ALIGN); 452 } 453 454 static inline int 455 npa_stack_dma_free(struct npa_lf *lf, char *name, int pool_id) 456 { 457 const struct plt_memzone *mz; 458 459 mz = plt_memzone_lookup(npa_stack_memzone_name(lf, pool_id, name)); 460 if (mz == NULL) 461 return NPA_ERR_PARAM; 462 463 return plt_memzone_free(mz); 464 } 465 466 static inline int 467 bitmap_ctzll(uint64_t slab) 468 { 469 if (slab == 0) 470 return 0; 471 472 return plt_ctz64(slab); 473 } 474 475 static int 476 find_free_aura(struct npa_lf *lf, uint32_t flags) 477 { 478 struct plt_bitmap *bmp = lf->npa_bmp; 479 uint64_t aura0_state = 0; 480 uint64_t slab; 481 uint32_t pos; 482 int idx = -1; 483 int rc; 484 485 if (flags & ROC_NPA_ZERO_AURA_F) { 486 /* Only look for zero aura */ 487 if (plt_bitmap_get(bmp, 0)) 488 return 0; 489 plt_err("Zero aura already in use"); 490 return -1; 491 } 492 493 if (lf->zero_aura_rsvd) { 494 /* Save and clear zero aura bit if needed */ 495 aura0_state = plt_bitmap_get(bmp, 0); 496 if (aura0_state) 497 plt_bitmap_clear(bmp, 0); 498 } 499 500 pos = 0; 501 slab = 0; 502 /* Scan from the beginning */ 503 plt_bitmap_scan_init(bmp); 504 /* Scan bitmap to get the free pool */ 505 rc = plt_bitmap_scan(bmp, &pos, &slab); 506 /* Empty bitmap */ 507 if (rc == 0) { 508 plt_err("Aura's exhausted"); 509 goto empty; 510 } 511 512 idx = pos + bitmap_ctzll(slab); 513 empty: 514 if (lf->zero_aura_rsvd && aura0_state) 515 plt_bitmap_set(bmp, 0); 516 517 return idx; 518 } 519 520 static int 521 npa_aura_pool_pair_alloc(struct npa_lf *lf, const uint32_t block_size, 522 const uint32_t block_count, struct npa_aura_s *aura, 523 struct npa_pool_s *pool, uint64_t *aura_handle, 524 uint32_t flags) 525 { 526 int rc, aura_id, pool_id, stack_size, alloc_size; 527 char name[PLT_MEMZONE_NAMESIZE]; 528 const struct plt_memzone *mz; 529 530 /* Sanity check */ 531 if (!lf || !block_size || !block_count || !pool || !aura || 532 !aura_handle) 533 return NPA_ERR_PARAM; 534 535 /* Block size should be cache line aligned and in range of 128B-128KB */ 536 if (block_size % ROC_ALIGN || block_size < 128 || 537 block_size > ROC_NPA_MAX_BLOCK_SZ) 538 return NPA_ERR_INVALID_BLOCK_SZ; 539 540 /* Get aura_id from resource bitmap */ 541 roc_npa_dev_lock(); 542 aura_id = find_free_aura(lf, flags); 543 if (aura_id < 0) { 544 roc_npa_dev_unlock(); 545 return NPA_ERR_AURA_ID_ALLOC; 546 } 547 548 /* Mark pool as reserved */ 549 plt_bitmap_clear(lf->npa_bmp, aura_id); 550 roc_npa_dev_unlock(); 551 552 /* Configuration based on each aura has separate pool(aura-pool pair) */ 553 pool_id = aura_id; 554 rc = (aura_id < 0 || pool_id >= (int)lf->nr_pools || 555 aura_id >= (int)BIT_ULL(6 + lf->aura_sz)) ? 556 NPA_ERR_AURA_ID_ALLOC : 557 0; 558 if (rc) 559 goto exit; 560 561 /* Allocate stack memory */ 562 stack_size = (block_count + lf->stack_pg_ptrs - 1) / lf->stack_pg_ptrs; 563 alloc_size = stack_size * lf->stack_pg_bytes; 564 565 mz = npa_stack_dma_alloc(lf, name, pool_id, alloc_size); 566 if (mz == NULL) { 567 rc = NPA_ERR_ALLOC; 568 goto aura_res_put; 569 } 570 571 /* Update aura fields */ 572 aura->pool_addr = pool_id; /* AF will translate to associated poolctx */ 573 aura->ena = 1; 574 aura->shift = plt_log2_u32(block_count); 575 aura->shift = aura->shift < 8 ? 0 : aura->shift - 8; 576 aura->limit = block_count; 577 aura->pool_caching = 1; 578 aura->err_int_ena = BIT(NPA_AURA_ERR_INT_AURA_ADD_OVER); 579 aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_ADD_UNDER); 580 aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_FREE_UNDER); 581 aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_POOL_DIS); 582 aura->avg_con = 0; 583 /* Many to one reduction */ 584 aura->err_qint_idx = aura_id % lf->qints; 585 586 /* Update pool fields */ 587 pool->stack_base = mz->iova; 588 pool->ena = 1; 589 /* In opaque mode buffer size must be 0 */ 590 if (!pool->nat_align) 591 pool->buf_size = 0; 592 else 593 pool->buf_size = block_size / ROC_ALIGN; 594 pool->stack_max_pages = stack_size; 595 pool->shift = plt_log2_u32(block_count); 596 pool->shift = pool->shift < 8 ? 0 : pool->shift - 8; 597 pool->ptr_start = 0; 598 pool->ptr_end = ~0; 599 pool->stack_caching = 1; 600 pool->err_int_ena = BIT(NPA_POOL_ERR_INT_OVFLS); 601 pool->err_int_ena |= BIT(NPA_POOL_ERR_INT_RANGE); 602 pool->err_int_ena |= BIT(NPA_POOL_ERR_INT_PERR); 603 pool->avg_con = 0; 604 605 /* Many to one reduction */ 606 pool->err_qint_idx = pool_id % lf->qints; 607 608 /* Issue AURA_INIT and POOL_INIT op */ 609 rc = npa_aura_pool_init(lf->mbox, aura_id, aura, pool); 610 if (rc) 611 goto stack_mem_free; 612 613 lf->aura_attr[aura_id].shift = aura->shift; 614 lf->aura_attr[aura_id].limit = aura->limit; 615 *aura_handle = roc_npa_aura_handle_gen(aura_id, lf->base); 616 /* Update aura count */ 617 roc_npa_aura_op_cnt_set(*aura_handle, 0, block_count); 618 /* Read it back to make sure aura count is updated */ 619 roc_npa_aura_op_cnt_get(*aura_handle); 620 621 return 0; 622 623 stack_mem_free: 624 plt_memzone_free(mz); 625 aura_res_put: 626 roc_npa_dev_lock(); 627 plt_bitmap_set(lf->npa_bmp, aura_id); 628 roc_npa_dev_unlock(); 629 exit: 630 return rc; 631 } 632 633 int 634 roc_npa_pool_create(uint64_t *aura_handle, uint32_t block_size, 635 uint32_t block_count, struct npa_aura_s *aura, 636 struct npa_pool_s *pool, uint32_t flags) 637 { 638 struct npa_aura_s defaura; 639 struct npa_pool_s defpool; 640 struct idev_cfg *idev; 641 struct npa_lf *lf; 642 int rc; 643 644 lf = idev_npa_obj_get(); 645 if (lf == NULL) { 646 rc = NPA_ERR_DEVICE_NOT_BOUNDED; 647 goto error; 648 } 649 650 idev = idev_get_cfg(); 651 if (idev == NULL) { 652 rc = NPA_ERR_ALLOC; 653 goto error; 654 } 655 656 if (flags & ROC_NPA_ZERO_AURA_F && !lf->zero_aura_rsvd) { 657 rc = NPA_ERR_ALLOC; 658 goto error; 659 } 660 661 if (aura == NULL) { 662 memset(&defaura, 0, sizeof(struct npa_aura_s)); 663 aura = &defaura; 664 } 665 if (pool == NULL) { 666 memset(&defpool, 0, sizeof(struct npa_pool_s)); 667 defpool.nat_align = 1; 668 defpool.buf_offset = 1; 669 pool = &defpool; 670 } 671 672 rc = npa_aura_pool_pair_alloc(lf, block_size, block_count, aura, pool, 673 aura_handle, flags); 674 if (rc) { 675 plt_err("Failed to alloc pool or aura rc=%d", rc); 676 goto error; 677 } 678 679 plt_npa_dbg("lf=%p block_sz=%d block_count=%d aura_handle=0x%" PRIx64, 680 lf, block_size, block_count, *aura_handle); 681 682 /* Just hold the reference of the object */ 683 __atomic_fetch_add(&idev->npa_refcnt, 1, __ATOMIC_SEQ_CST); 684 error: 685 return rc; 686 } 687 688 static int 689 npa_aura_alloc(struct npa_lf *lf, const uint32_t block_count, int pool_id, 690 struct npa_aura_s *aura, uint64_t *aura_handle, uint32_t flags) 691 { 692 int rc, aura_id; 693 694 /* Sanity check */ 695 if (!lf || !aura || !aura_handle) 696 return NPA_ERR_PARAM; 697 698 roc_npa_dev_lock(); 699 /* Get aura_id from resource bitmap */ 700 aura_id = find_free_aura(lf, flags); 701 if (aura_id < 0) { 702 roc_npa_dev_unlock(); 703 return NPA_ERR_AURA_ID_ALLOC; 704 } 705 706 /* Mark aura as reserved */ 707 plt_bitmap_clear(lf->npa_bmp, aura_id); 708 709 roc_npa_dev_unlock(); 710 rc = (aura_id < 0 || pool_id >= (int)lf->nr_pools || 711 aura_id >= (int)BIT_ULL(6 + lf->aura_sz)) ? 712 NPA_ERR_AURA_ID_ALLOC : 713 0; 714 if (rc) 715 goto exit; 716 717 /* Update aura fields */ 718 aura->pool_addr = pool_id; /* AF will translate to associated poolctx */ 719 aura->ena = 1; 720 aura->shift = plt_log2_u32(block_count); 721 aura->shift = aura->shift < 8 ? 0 : aura->shift - 8; 722 aura->limit = block_count; 723 aura->pool_caching = 1; 724 aura->err_int_ena = BIT(NPA_AURA_ERR_INT_AURA_ADD_OVER); 725 aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_ADD_UNDER); 726 aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_FREE_UNDER); 727 aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_POOL_DIS); 728 aura->avg_con = 0; 729 /* Many to one reduction */ 730 aura->err_qint_idx = aura_id % lf->qints; 731 732 /* Issue AURA_INIT and POOL_INIT op */ 733 rc = npa_aura_init(lf->mbox, aura_id, aura); 734 if (rc) 735 return rc; 736 737 lf->aura_attr[aura_id].shift = aura->shift; 738 lf->aura_attr[aura_id].limit = aura->limit; 739 *aura_handle = roc_npa_aura_handle_gen(aura_id, lf->base); 740 741 return 0; 742 743 exit: 744 return rc; 745 } 746 747 int 748 roc_npa_aura_create(uint64_t *aura_handle, uint32_t block_count, 749 struct npa_aura_s *aura, int pool_id, uint32_t flags) 750 { 751 struct npa_aura_s defaura; 752 struct idev_cfg *idev; 753 struct npa_lf *lf; 754 int rc; 755 756 lf = idev_npa_obj_get(); 757 if (lf == NULL) { 758 rc = NPA_ERR_DEVICE_NOT_BOUNDED; 759 goto error; 760 } 761 762 idev = idev_get_cfg(); 763 if (idev == NULL) { 764 rc = NPA_ERR_ALLOC; 765 goto error; 766 } 767 768 if (flags & ROC_NPA_ZERO_AURA_F && !lf->zero_aura_rsvd) { 769 rc = NPA_ERR_ALLOC; 770 goto error; 771 } 772 773 if (aura == NULL) { 774 memset(&defaura, 0, sizeof(struct npa_aura_s)); 775 aura = &defaura; 776 } 777 778 rc = npa_aura_alloc(lf, block_count, pool_id, aura, aura_handle, flags); 779 if (rc) { 780 plt_err("Failed to alloc aura rc=%d", rc); 781 goto error; 782 } 783 784 plt_npa_dbg("lf=%p aura_handle=0x%" PRIx64, lf, *aura_handle); 785 786 /* Just hold the reference of the object */ 787 __atomic_fetch_add(&idev->npa_refcnt, 1, __ATOMIC_SEQ_CST); 788 error: 789 return rc; 790 } 791 792 int 793 roc_npa_aura_limit_modify(uint64_t aura_handle, uint16_t aura_limit) 794 { 795 struct npa_cn20k_aq_enq_req *aura_req_cn20k; 796 struct npa_aq_enq_req *aura_req; 797 struct npa_lf *lf; 798 struct mbox *mbox; 799 int rc; 800 801 lf = idev_npa_obj_get(); 802 if (lf == NULL) 803 return NPA_ERR_DEVICE_NOT_BOUNDED; 804 805 mbox = mbox_get(lf->mbox); 806 if (roc_model_is_cn20k()) { 807 aura_req_cn20k = mbox_alloc_msg_npa_cn20k_aq_enq(mbox); 808 aura_req = (struct npa_aq_enq_req *)aura_req_cn20k; 809 } else { 810 aura_req = mbox_alloc_msg_npa_aq_enq(mbox); 811 } 812 if (aura_req == NULL) { 813 rc = -ENOMEM; 814 goto exit; 815 } 816 aura_req->aura_id = roc_npa_aura_handle_to_aura(aura_handle); 817 aura_req->ctype = NPA_AQ_CTYPE_AURA; 818 aura_req->op = NPA_AQ_INSTOP_WRITE; 819 820 aura_req->aura.limit = aura_limit; 821 aura_req->aura_mask.limit = ~(aura_req->aura_mask.limit); 822 rc = mbox_process(mbox); 823 if (rc) 824 goto exit; 825 lf->aura_attr[aura_req->aura_id].limit = aura_req->aura.limit; 826 exit: 827 mbox_put(mbox); 828 return rc; 829 } 830 831 static int 832 npa_aura_pool_pair_free(struct npa_lf *lf, uint64_t aura_handle) 833 { 834 char name[PLT_MEMZONE_NAMESIZE]; 835 int aura_id, pool_id, rc; 836 837 if (!lf || !aura_handle) 838 return NPA_ERR_PARAM; 839 840 aura_id = roc_npa_aura_handle_to_aura(aura_handle); 841 pool_id = aura_id; 842 rc = npa_aura_pool_fini(lf->mbox, aura_id, aura_handle); 843 rc |= npa_stack_dma_free(lf, name, pool_id); 844 memset(&lf->aura_attr[aura_id], 0, sizeof(struct npa_aura_attr)); 845 846 roc_npa_dev_lock(); 847 plt_bitmap_set(lf->npa_bmp, aura_id); 848 roc_npa_dev_unlock(); 849 850 return rc; 851 } 852 853 int 854 roc_npa_pool_destroy(uint64_t aura_handle) 855 { 856 struct npa_lf *lf = idev_npa_obj_get(); 857 int rc = 0; 858 859 plt_npa_dbg("lf=%p aura_handle=0x%" PRIx64, lf, aura_handle); 860 rc = npa_aura_pool_pair_free(lf, aura_handle); 861 if (rc) 862 plt_err("Failed to destroy pool or aura rc=%d", rc); 863 864 /* Release the reference of npa */ 865 rc |= npa_lf_fini(); 866 return rc; 867 } 868 869 static int 870 npa_aura_free(struct npa_lf *lf, uint64_t aura_handle) 871 { 872 int aura_id, rc; 873 874 if (!lf || !aura_handle) 875 return NPA_ERR_PARAM; 876 877 aura_id = roc_npa_aura_handle_to_aura(aura_handle); 878 rc = npa_aura_fini(lf->mbox, aura_id); 879 880 if (rc) 881 return rc; 882 883 memset(&lf->aura_attr[aura_id], 0, sizeof(struct npa_aura_attr)); 884 885 roc_npa_dev_lock(); 886 plt_bitmap_set(lf->npa_bmp, aura_id); 887 roc_npa_dev_unlock(); 888 889 return rc; 890 } 891 892 int 893 roc_npa_aura_destroy(uint64_t aura_handle) 894 { 895 struct npa_lf *lf = idev_npa_obj_get(); 896 int rc = 0; 897 898 plt_npa_dbg("lf=%p aura_handle=0x%" PRIx64, lf, aura_handle); 899 rc = npa_aura_free(lf, aura_handle); 900 if (rc) 901 plt_err("Failed to destroy aura rc=%d", rc); 902 903 /* Release the reference of npa */ 904 rc |= npa_lf_fini(); 905 return rc; 906 } 907 908 int 909 roc_npa_pool_range_update_check(uint64_t aura_handle) 910 { 911 uint64_t aura_id = roc_npa_aura_handle_to_aura(aura_handle); 912 struct npa_cn20k_aq_enq_req *req_cn20k; 913 __io struct npa_pool_s *pool; 914 struct npa_aq_enq_req *req; 915 struct npa_aq_enq_rsp *rsp; 916 struct npa_aura_lim *lim; 917 struct mbox *mbox; 918 struct npa_lf *lf; 919 int rc; 920 921 lf = idev_npa_obj_get(); 922 if (lf == NULL) 923 return NPA_ERR_PARAM; 924 925 lim = lf->aura_lim; 926 927 mbox = mbox_get(lf->mbox); 928 if (roc_model_is_cn20k()) { 929 req_cn20k = mbox_alloc_msg_npa_cn20k_aq_enq(mbox); 930 req = (struct npa_aq_enq_req *)req_cn20k; 931 } else { 932 req = mbox_alloc_msg_npa_aq_enq(mbox); 933 } 934 if (req == NULL) { 935 rc = -ENOSPC; 936 goto exit; 937 } 938 939 req->aura_id = aura_id; 940 req->ctype = NPA_AQ_CTYPE_POOL; 941 req->op = NPA_AQ_INSTOP_READ; 942 943 rc = mbox_process_msg(mbox, (void *)&rsp); 944 if (rc) { 945 plt_err("Failed to get pool(0x%" PRIx64 ") context", aura_id); 946 goto exit; 947 } 948 949 pool = &rsp->pool; 950 if (lim[aura_id].ptr_start != pool->ptr_start || 951 lim[aura_id].ptr_end != pool->ptr_end) { 952 plt_err("Range update failed on pool(0x%" PRIx64 ")", aura_id); 953 rc = NPA_ERR_PARAM; 954 goto exit; 955 } 956 957 rc = 0; 958 exit: 959 mbox_put(mbox); 960 return rc; 961 } 962 963 uint64_t 964 roc_npa_zero_aura_handle(void) 965 { 966 struct idev_cfg *idev; 967 struct npa_lf *lf; 968 969 lf = idev_npa_obj_get(); 970 if (lf == NULL) 971 return NPA_ERR_DEVICE_NOT_BOUNDED; 972 973 idev = idev_get_cfg(); 974 if (idev == NULL) 975 return NPA_ERR_ALLOC; 976 977 /* Return aura handle only if reserved */ 978 if (lf->zero_aura_rsvd) 979 return roc_npa_aura_handle_gen(0, lf->base); 980 return 0; 981 } 982 983 int 984 roc_npa_aura_bp_configure(uint64_t aura_handle, uint16_t bpid, uint8_t bp_intf, uint8_t bp_thresh, 985 bool enable) 986 { 987 /* TODO: Add support for CN20K */ 988 uint32_t aura_id = roc_npa_aura_handle_to_aura(aura_handle); 989 struct npa_lf *lf = idev_npa_obj_get(); 990 struct npa_aq_enq_req *req; 991 struct mbox *mbox; 992 int rc = 0; 993 994 plt_npa_dbg("Setting BPID %u BP_INTF 0x%x BP_THRESH %u enable %u on aura %" PRIx64, 995 bpid, bp_intf, bp_thresh, enable, aura_handle); 996 997 if (lf == NULL) 998 return NPA_ERR_PARAM; 999 1000 mbox = mbox_get(lf->mbox); 1001 req = mbox_alloc_msg_npa_aq_enq(mbox); 1002 if (req == NULL) { 1003 rc = -ENOMEM; 1004 goto fail; 1005 } 1006 1007 req->aura_id = aura_id; 1008 req->ctype = NPA_AQ_CTYPE_AURA; 1009 req->op = NPA_AQ_INSTOP_WRITE; 1010 1011 if (enable) { 1012 if (bp_intf & 0x1) { 1013 req->aura.nix0_bpid = bpid; 1014 req->aura_mask.nix0_bpid = ~(req->aura_mask.nix0_bpid); 1015 } else { 1016 req->aura.nix1_bpid = bpid; 1017 req->aura_mask.nix1_bpid = ~(req->aura_mask.nix1_bpid); 1018 } 1019 req->aura.bp = bp_thresh; 1020 req->aura_mask.bp = ~(req->aura_mask.bp); 1021 } else { 1022 req->aura.bp = 0; 1023 req->aura_mask.bp = ~(req->aura_mask.bp); 1024 } 1025 1026 req->aura.bp_ena = bp_intf; 1027 req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena); 1028 1029 rc = mbox_process(mbox); 1030 if (rc) 1031 goto fail; 1032 1033 lf->aura_attr[aura_id].nix0_bpid = req->aura.nix0_bpid; 1034 lf->aura_attr[aura_id].nix1_bpid = req->aura.nix1_bpid; 1035 lf->aura_attr[aura_id].bp_ena = req->aura.bp_ena; 1036 lf->aura_attr[aura_id].bp = req->aura.bp; 1037 fail: 1038 mbox_put(mbox); 1039 return rc; 1040 } 1041 1042 static inline int 1043 npa_attach(struct mbox *m_box) 1044 { 1045 struct mbox *mbox = mbox_get(m_box); 1046 struct rsrc_attach_req *req; 1047 int rc; 1048 1049 req = mbox_alloc_msg_attach_resources(mbox); 1050 if (req == NULL) { 1051 rc = -ENOSPC; 1052 goto exit; 1053 } 1054 req->modify = true; 1055 req->npalf = true; 1056 1057 rc = mbox_process(mbox); 1058 exit: 1059 mbox_put(mbox); 1060 return rc; 1061 } 1062 1063 static inline int 1064 npa_detach(struct mbox *m_box) 1065 { 1066 struct mbox *mbox = mbox_get(m_box); 1067 struct rsrc_detach_req *req; 1068 int rc; 1069 1070 req = mbox_alloc_msg_detach_resources(mbox); 1071 if (req == NULL) { 1072 rc = -ENOSPC; 1073 goto exit; 1074 } 1075 req->partial = true; 1076 req->npalf = true; 1077 1078 rc = mbox_process(mbox); 1079 exit: 1080 mbox_put(mbox); 1081 return rc; 1082 } 1083 1084 static inline int 1085 npa_get_msix_offset(struct mbox *m_box, uint16_t *npa_msixoff) 1086 { 1087 struct mbox *mbox = mbox_get(m_box); 1088 struct msix_offset_rsp *msix_rsp; 1089 int rc; 1090 1091 /* Initialize msixoff */ 1092 *npa_msixoff = 0; 1093 /* Get NPA MSIX vector offsets */ 1094 mbox_alloc_msg_msix_offset(mbox); 1095 rc = mbox_process_msg(mbox, (void *)&msix_rsp); 1096 if (rc == 0) 1097 *npa_msixoff = msix_rsp->npa_msixoff; 1098 1099 mbox_put(mbox); 1100 return rc; 1101 } 1102 1103 static inline int 1104 npa_lf_alloc(struct npa_lf *lf) 1105 { 1106 struct mbox *mbox = mbox_get(lf->mbox); 1107 struct npa_lf_alloc_req *req; 1108 struct npa_lf_alloc_rsp *rsp; 1109 int rc; 1110 1111 req = mbox_alloc_msg_npa_lf_alloc(mbox); 1112 if (req == NULL) { 1113 rc = -ENOSPC; 1114 goto exit; 1115 } 1116 req->aura_sz = lf->aura_sz; 1117 req->nr_pools = lf->nr_pools; 1118 1119 rc = mbox_process_msg(mbox, (void *)&rsp); 1120 if (rc) { 1121 rc = NPA_ERR_ALLOC; 1122 goto exit; 1123 } 1124 1125 lf->stack_pg_ptrs = rsp->stack_pg_ptrs; 1126 lf->stack_pg_bytes = rsp->stack_pg_bytes; 1127 lf->qints = rsp->qints; 1128 1129 rc = 0; 1130 exit: 1131 mbox_put(mbox); 1132 return rc; 1133 } 1134 1135 static int 1136 npa_lf_free(struct mbox *mail_box) 1137 { 1138 struct mbox *mbox = mbox_get(mail_box); 1139 int rc; 1140 1141 mbox_alloc_msg_npa_lf_free(mbox); 1142 rc = mbox_process(mbox); 1143 mbox_put(mbox); 1144 return rc; 1145 } 1146 1147 static inline uint32_t 1148 aura_size_to_u32(uint8_t val) 1149 { 1150 if (val == NPA_AURA_SZ_0) 1151 return 128; 1152 if (val >= NPA_AURA_SZ_MAX) 1153 return BIT_ULL(20); 1154 1155 return 1 << (val + 6); 1156 } 1157 1158 static inline void 1159 pool_count_aura_sz_get(uint32_t *nr_pools, uint8_t *aura_sz) 1160 { 1161 uint32_t val; 1162 1163 val = roc_idev_npa_maxpools_get(); 1164 if (val < aura_size_to_u32(NPA_AURA_SZ_128)) 1165 val = 128; 1166 if (val > aura_size_to_u32(NPA_AURA_SZ_1M)) 1167 val = BIT_ULL(20); 1168 1169 roc_idev_npa_maxpools_set(val); 1170 *nr_pools = val; 1171 *aura_sz = plt_log2_u32(val) - 6; 1172 } 1173 1174 static int 1175 npa_dev_init(struct npa_lf *lf, uintptr_t base, struct mbox *mbox) 1176 { 1177 uint32_t i, bmp_sz, nr_pools; 1178 uint8_t aura_sz; 1179 int rc; 1180 1181 /* Sanity checks */ 1182 if (!lf || !base || !mbox) 1183 return NPA_ERR_PARAM; 1184 1185 if (base & ROC_AURA_ID_MASK) 1186 return NPA_ERR_BASE_INVALID; 1187 1188 pool_count_aura_sz_get(&nr_pools, &aura_sz); 1189 if (aura_sz == NPA_AURA_SZ_0 || aura_sz >= NPA_AURA_SZ_MAX) 1190 return NPA_ERR_PARAM; 1191 1192 memset(lf, 0x0, sizeof(*lf)); 1193 lf->base = base; 1194 lf->aura_sz = aura_sz; 1195 lf->nr_pools = nr_pools; 1196 lf->mbox = mbox; 1197 1198 rc = npa_lf_alloc(lf); 1199 if (rc) 1200 goto exit; 1201 1202 bmp_sz = plt_bitmap_get_memory_footprint(nr_pools); 1203 1204 /* Allocate memory for bitmap */ 1205 lf->npa_bmp_mem = plt_zmalloc(bmp_sz, ROC_ALIGN); 1206 if (lf->npa_bmp_mem == NULL) { 1207 rc = NPA_ERR_ALLOC; 1208 goto lf_free; 1209 } 1210 1211 /* Initialize pool resource bitmap array */ 1212 lf->npa_bmp = plt_bitmap_init(nr_pools, lf->npa_bmp_mem, bmp_sz); 1213 if (lf->npa_bmp == NULL) { 1214 rc = NPA_ERR_PARAM; 1215 goto bmap_mem_free; 1216 } 1217 1218 /* Mark all pools available */ 1219 for (i = 0; i < nr_pools; i++) 1220 plt_bitmap_set(lf->npa_bmp, i); 1221 1222 /* Reserve zero aura for all models other than CN9K */ 1223 if (!roc_model_is_cn9k()) 1224 lf->zero_aura_rsvd = true; 1225 1226 /* Allocate memory for qint context */ 1227 lf->npa_qint_mem = plt_zmalloc(sizeof(struct npa_qint) * nr_pools, 0); 1228 if (lf->npa_qint_mem == NULL) { 1229 rc = NPA_ERR_ALLOC; 1230 goto bmap_free; 1231 } 1232 1233 /* Allocate memory for nap_aura_lim memory */ 1234 lf->aura_lim = plt_zmalloc(sizeof(struct npa_aura_lim) * nr_pools, 0); 1235 if (lf->aura_lim == NULL) { 1236 rc = NPA_ERR_ALLOC; 1237 goto qint_free; 1238 } 1239 1240 /* Allocate per-aura attribute */ 1241 lf->aura_attr = plt_zmalloc(sizeof(struct npa_aura_attr) * nr_pools, 0); 1242 if (lf->aura_attr == NULL) { 1243 rc = NPA_ERR_PARAM; 1244 goto lim_free; 1245 } 1246 1247 /* Init aura start & end limits */ 1248 for (i = 0; i < nr_pools; i++) { 1249 lf->aura_lim[i].ptr_start = UINT64_MAX; 1250 lf->aura_lim[i].ptr_end = 0x0ull; 1251 } 1252 1253 return 0; 1254 1255 lim_free: 1256 plt_free(lf->aura_lim); 1257 qint_free: 1258 plt_free(lf->npa_qint_mem); 1259 bmap_free: 1260 plt_bitmap_free(lf->npa_bmp); 1261 bmap_mem_free: 1262 plt_free(lf->npa_bmp_mem); 1263 lf_free: 1264 npa_lf_free(lf->mbox); 1265 exit: 1266 return rc; 1267 } 1268 1269 static int 1270 npa_dev_fini(struct npa_lf *lf) 1271 { 1272 if (!lf) 1273 return NPA_ERR_PARAM; 1274 1275 plt_free(lf->aura_lim); 1276 plt_free(lf->npa_qint_mem); 1277 plt_bitmap_free(lf->npa_bmp); 1278 plt_free(lf->npa_bmp_mem); 1279 plt_free(lf->aura_attr); 1280 1281 return npa_lf_free(lf->mbox); 1282 } 1283 1284 int 1285 npa_lf_init(struct dev *dev, struct plt_pci_device *pci_dev) 1286 { 1287 uint16_t npa_msixoff = 0; 1288 struct idev_cfg *idev; 1289 struct npa_lf *lf; 1290 int rc; 1291 1292 idev = idev_get_cfg(); 1293 if (idev == NULL) 1294 return NPA_ERR_ALLOC; 1295 1296 /* Not the first PCI device */ 1297 if (__atomic_fetch_add(&idev->npa_refcnt, 1, __ATOMIC_SEQ_CST) != 0) 1298 return 0; 1299 1300 if (lf_init_cb) { 1301 rc = (*lf_init_cb)(pci_dev); 1302 if (rc) 1303 goto fail; 1304 } 1305 1306 rc = npa_attach(dev->mbox); 1307 if (rc) 1308 goto fail; 1309 1310 rc = npa_get_msix_offset(dev->mbox, &npa_msixoff); 1311 if (rc) 1312 goto npa_detach; 1313 1314 lf = &dev->npa; 1315 rc = npa_dev_init(lf, dev->bar2 + (RVU_BLOCK_ADDR_NPA << 20), 1316 dev->mbox); 1317 if (rc) 1318 goto npa_detach; 1319 1320 lf->pf_func = dev->pf_func; 1321 lf->npa_msixoff = npa_msixoff; 1322 lf->intr_handle = pci_dev->intr_handle; 1323 lf->pci_dev = pci_dev; 1324 1325 idev->npa_pf_func = dev->pf_func; 1326 idev->npa = lf; 1327 plt_wmb(); 1328 1329 rc = npa_register_irqs(lf); 1330 if (rc) 1331 goto npa_fini; 1332 1333 plt_npa_dbg("npa=%p max_pools=%d pf_func=0x%x msix=0x%x", lf, 1334 roc_idev_npa_maxpools_get(), lf->pf_func, npa_msixoff); 1335 1336 return 0; 1337 1338 npa_fini: 1339 npa_dev_fini(idev->npa); 1340 npa_detach: 1341 npa_detach(dev->mbox); 1342 fail: 1343 __atomic_fetch_sub(&idev->npa_refcnt, 1, __ATOMIC_SEQ_CST); 1344 return rc; 1345 } 1346 1347 int 1348 npa_lf_fini(void) 1349 { 1350 struct idev_cfg *idev; 1351 int rc = 0; 1352 1353 idev = idev_get_cfg(); 1354 if (idev == NULL) 1355 return NPA_ERR_ALLOC; 1356 1357 /* Not the last PCI device */ 1358 if (__atomic_fetch_sub(&idev->npa_refcnt, 1, __ATOMIC_SEQ_CST) - 1 != 0) 1359 return 0; 1360 1361 npa_unregister_irqs(idev->npa); 1362 rc |= npa_dev_fini(idev->npa); 1363 rc |= npa_detach(idev->npa->mbox); 1364 idev_set_defaults(idev); 1365 1366 return rc; 1367 } 1368 1369 int 1370 roc_npa_dev_init(struct roc_npa *roc_npa) 1371 { 1372 struct plt_pci_device *pci_dev; 1373 struct npa *npa; 1374 struct dev *dev; 1375 int rc; 1376 1377 if (roc_npa == NULL || roc_npa->pci_dev == NULL) 1378 return NPA_ERR_PARAM; 1379 1380 PLT_STATIC_ASSERT(sizeof(struct npa) <= ROC_NPA_MEM_SZ); 1381 npa = roc_npa_to_npa_priv(roc_npa); 1382 memset(npa, 0, sizeof(*npa)); 1383 pci_dev = roc_npa->pci_dev; 1384 dev = &npa->dev; 1385 1386 /* Initialize device */ 1387 rc = dev_init(dev, pci_dev); 1388 if (rc) { 1389 plt_err("Failed to init roc device"); 1390 goto fail; 1391 } 1392 1393 npa->pci_dev = pci_dev; 1394 dev->drv_inited = true; 1395 fail: 1396 return rc; 1397 } 1398 1399 int 1400 roc_npa_dev_fini(struct roc_npa *roc_npa) 1401 { 1402 struct npa *npa = roc_npa_to_npa_priv(roc_npa); 1403 1404 if (npa == NULL) 1405 return NPA_ERR_PARAM; 1406 1407 npa->dev.drv_inited = false; 1408 return dev_fini(&npa->dev, npa->pci_dev); 1409 } 1410 1411 void 1412 roc_npa_dev_lock(void) 1413 { 1414 struct idev_cfg *idev = idev_get_cfg(); 1415 1416 if (idev != NULL) 1417 plt_spinlock_lock(&idev->npa_dev_lock); 1418 } 1419 1420 void 1421 roc_npa_dev_unlock(void) 1422 { 1423 struct idev_cfg *idev = idev_get_cfg(); 1424 1425 if (idev != NULL) 1426 plt_spinlock_unlock(&idev->npa_dev_lock); 1427 } 1428