1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2021 Marvell. 3 */ 4 5 #include "roc_api.h" 6 #include "roc_priv.h" 7 8 #define SSO_XAQ_CACHE_CNT (0x3) 9 #define SSO_XAQ_RSVD_CNT (0x4) 10 #define SSO_XAQ_SLACK (16) 11 12 /* Private functions. */ 13 int 14 sso_lf_alloc(struct dev *dev, enum sso_lf_type lf_type, uint16_t nb_lf, 15 void **rsp) 16 { 17 struct mbox *mbox = mbox_get(dev->mbox); 18 int rc = -ENOSPC; 19 20 switch (lf_type) { 21 case SSO_LF_TYPE_HWS: { 22 struct ssow_lf_alloc_req *req; 23 24 req = mbox_alloc_msg_ssow_lf_alloc(mbox); 25 if (req == NULL) 26 goto exit; 27 req->hws = nb_lf; 28 } break; 29 case SSO_LF_TYPE_HWGRP: { 30 struct sso_lf_alloc_req *req; 31 32 req = mbox_alloc_msg_sso_lf_alloc(mbox); 33 if (req == NULL) 34 goto exit; 35 req->hwgrps = nb_lf; 36 } break; 37 default: 38 break; 39 } 40 41 rc = mbox_process_msg(mbox, rsp); 42 if (rc) { 43 rc = -EIO; 44 goto exit; 45 } 46 47 rc = 0; 48 exit: 49 mbox_put(mbox); 50 return rc; 51 } 52 53 int 54 sso_lf_free(struct dev *dev, enum sso_lf_type lf_type, uint16_t nb_lf) 55 { 56 struct mbox *mbox = mbox_get(dev->mbox); 57 int rc = -ENOSPC; 58 59 switch (lf_type) { 60 case SSO_LF_TYPE_HWS: { 61 struct ssow_lf_free_req *req; 62 63 req = mbox_alloc_msg_ssow_lf_free(mbox); 64 if (req == NULL) 65 goto exit; 66 req->hws = nb_lf; 67 } break; 68 case SSO_LF_TYPE_HWGRP: { 69 struct sso_lf_free_req *req; 70 71 req = mbox_alloc_msg_sso_lf_free(mbox); 72 if (req == NULL) 73 goto exit; 74 req->hwgrps = nb_lf; 75 } break; 76 default: 77 break; 78 } 79 80 rc = mbox_process(mbox); 81 if (rc) { 82 rc = -EIO; 83 goto exit; 84 } 85 86 rc = 0; 87 exit: 88 mbox_put(mbox); 89 return rc; 90 } 91 92 static int 93 sso_rsrc_attach(struct roc_sso *roc_sso, enum sso_lf_type lf_type, 94 uint16_t nb_lf) 95 { 96 struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev; 97 struct mbox *mbox = mbox_get(dev->mbox); 98 struct rsrc_attach_req *req; 99 int rc = -ENOSPC; 100 101 req = mbox_alloc_msg_attach_resources(mbox); 102 if (req == NULL) 103 goto exit; 104 switch (lf_type) { 105 case SSO_LF_TYPE_HWS: 106 req->ssow = nb_lf; 107 break; 108 case SSO_LF_TYPE_HWGRP: 109 req->sso = nb_lf; 110 break; 111 default: 112 rc = SSO_ERR_PARAM; 113 goto exit; 114 } 115 116 req->modify = true; 117 if (mbox_process(mbox)) { 118 rc = -EIO; 119 goto exit; 120 } 121 122 rc = 0; 123 exit: 124 mbox_put(mbox); 125 return rc; 126 } 127 128 static int 129 sso_rsrc_detach(struct roc_sso *roc_sso, enum sso_lf_type lf_type) 130 { 131 struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev; 132 struct rsrc_detach_req *req; 133 struct mbox *mbox = mbox_get(dev->mbox); 134 int rc = -ENOSPC; 135 136 req = mbox_alloc_msg_detach_resources(mbox); 137 if (req == NULL) 138 goto exit; 139 switch (lf_type) { 140 case SSO_LF_TYPE_HWS: 141 req->ssow = true; 142 break; 143 case SSO_LF_TYPE_HWGRP: 144 req->sso = true; 145 break; 146 default: 147 rc = SSO_ERR_PARAM; 148 goto exit; 149 } 150 151 req->partial = true; 152 if (mbox_process(mbox)) { 153 rc = -EIO; 154 goto exit; 155 } 156 157 rc = 0; 158 exit: 159 mbox_put(mbox); 160 return rc; 161 } 162 163 static int 164 sso_rsrc_get(struct roc_sso *roc_sso) 165 { 166 struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev; 167 struct free_rsrcs_rsp *rsrc_cnt; 168 struct mbox *mbox = mbox_get(dev->mbox); 169 int rc; 170 171 mbox_alloc_msg_free_rsrc_cnt(mbox); 172 rc = mbox_process_msg(mbox, (void **)&rsrc_cnt); 173 if (rc) { 174 plt_err("Failed to get free resource count\n"); 175 rc = -EIO; 176 goto exit; 177 } 178 179 roc_sso->max_hwgrp = rsrc_cnt->sso; 180 roc_sso->max_hws = rsrc_cnt->ssow; 181 182 rc = 0; 183 exit: 184 mbox_put(mbox); 185 return rc; 186 } 187 188 void 189 sso_hws_link_modify(uint8_t hws, uintptr_t base, struct plt_bitmap *bmp, uint16_t hwgrp[], 190 uint16_t n, uint8_t set, uint16_t enable) 191 { 192 uint64_t reg; 193 int i, j, k; 194 195 i = 0; 196 while (n) { 197 uint64_t mask[4] = { 198 0x8000, 199 0x8000, 200 0x8000, 201 0x8000, 202 }; 203 204 k = n % 4; 205 k = k ? k : 4; 206 for (j = 0; j < k; j++) { 207 mask[j] = hwgrp[i + j] | (uint32_t)set << 12 | enable << 14; 208 if (bmp) { 209 enable ? plt_bitmap_set(bmp, hwgrp[i + j]) : 210 plt_bitmap_clear(bmp, hwgrp[i + j]); 211 } 212 plt_sso_dbg("HWS %d Linked to HWGRP %d", hws, 213 hwgrp[i + j]); 214 } 215 216 n -= j; 217 i += j; 218 reg = mask[0] | mask[1] << 16 | mask[2] << 32 | mask[3] << 48; 219 plt_write64(reg, base + SSOW_LF_GWS_GRPMSK_CHG); 220 } 221 } 222 223 static int 224 sso_msix_fill(struct roc_sso *roc_sso, uint16_t nb_hws, uint16_t nb_hwgrp) 225 { 226 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 227 struct msix_offset_rsp *rsp; 228 struct dev *dev = &sso->dev; 229 int i, rc; 230 231 mbox_alloc_msg_msix_offset(mbox_get(dev->mbox)); 232 rc = mbox_process_msg(dev->mbox, (void **)&rsp); 233 if (rc) { 234 rc = -EIO; 235 goto exit; 236 } 237 238 for (i = 0; i < nb_hws; i++) 239 sso->hws_msix_offset[i] = rsp->ssow_msixoff[i]; 240 for (i = 0; i < nb_hwgrp; i++) 241 sso->hwgrp_msix_offset[i] = rsp->sso_msixoff[i]; 242 243 rc = 0; 244 exit: 245 mbox_put(dev->mbox); 246 return rc; 247 } 248 249 /* Public Functions. */ 250 uintptr_t 251 roc_sso_hws_base_get(struct roc_sso *roc_sso, uint8_t hws) 252 { 253 struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev; 254 255 return dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | hws << 12); 256 } 257 258 uintptr_t 259 roc_sso_hwgrp_base_get(struct roc_sso *roc_sso, uint16_t hwgrp) 260 { 261 struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev; 262 263 return dev->bar2 + (RVU_BLOCK_ADDR_SSO << 20 | hwgrp << 12); 264 } 265 266 uint64_t 267 roc_sso_ns_to_gw(struct roc_sso *roc_sso, uint64_t ns) 268 { 269 struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev; 270 uint64_t current_us, current_ns, new_ns; 271 uintptr_t base; 272 273 base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20); 274 current_us = plt_read64(base + SSOW_LF_GWS_NW_TIM); 275 /* From HRM, table 14-19: 276 * The SSOW_LF_GWS_NW_TIM[NW_TIM] period is specified in n-1 notation. 277 */ 278 current_us += 1; 279 280 /* From HRM, table 14-1: 281 * SSOW_LF_GWS_NW_TIM[NW_TIM] specifies the minimum timeout. The SSO 282 * hardware times out a GET_WORK request within 2 usec of the minimum 283 * timeout specified by SSOW_LF_GWS_NW_TIM[NW_TIM]. 284 */ 285 current_us += 2; 286 current_ns = current_us * 1E3; 287 new_ns = (ns - PLT_MIN(ns, current_ns)); 288 new_ns = !new_ns ? 1 : new_ns; 289 return (new_ns * plt_tsc_hz()) / 1E9; 290 } 291 292 int 293 roc_sso_hws_link(struct roc_sso *roc_sso, uint8_t hws, uint16_t hwgrp[], uint16_t nb_hwgrp, 294 uint8_t set) 295 { 296 struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev; 297 struct sso *sso; 298 uintptr_t base; 299 300 sso = roc_sso_to_sso_priv(roc_sso); 301 base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | hws << 12); 302 sso_hws_link_modify(hws, base, sso->link_map[hws], hwgrp, nb_hwgrp, set, 1); 303 304 return nb_hwgrp; 305 } 306 307 int 308 roc_sso_hws_unlink(struct roc_sso *roc_sso, uint8_t hws, uint16_t hwgrp[], uint16_t nb_hwgrp, 309 uint8_t set) 310 { 311 struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev; 312 struct sso *sso; 313 uintptr_t base; 314 315 sso = roc_sso_to_sso_priv(roc_sso); 316 base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | hws << 12); 317 sso_hws_link_modify(hws, base, sso->link_map[hws], hwgrp, nb_hwgrp, set, 0); 318 319 return nb_hwgrp; 320 } 321 322 int 323 roc_sso_hws_stats_get(struct roc_sso *roc_sso, uint8_t hws, 324 struct roc_sso_hws_stats *stats) 325 { 326 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 327 struct sso_hws_stats *req_rsp; 328 struct dev *dev = &sso->dev; 329 struct mbox *mbox; 330 int rc; 331 332 mbox = mbox_get(dev->mbox); 333 req_rsp = (struct sso_hws_stats *)mbox_alloc_msg_sso_hws_get_stats( 334 mbox); 335 if (req_rsp == NULL) { 336 rc = mbox_process(mbox); 337 if (rc) { 338 rc = -EIO; 339 goto fail; 340 } 341 req_rsp = (struct sso_hws_stats *) 342 mbox_alloc_msg_sso_hws_get_stats(mbox); 343 if (req_rsp == NULL) { 344 rc = -ENOSPC; 345 goto fail; 346 } 347 } 348 req_rsp->hws = hws; 349 rc = mbox_process_msg(mbox, (void **)&req_rsp); 350 if (rc) { 351 rc = -EIO; 352 goto fail; 353 } 354 355 stats->arbitration = req_rsp->arbitration; 356 fail: 357 mbox_put(mbox); 358 return rc; 359 } 360 361 void 362 roc_sso_hws_gwc_invalidate(struct roc_sso *roc_sso, uint8_t *hws, 363 uint8_t nb_hws) 364 { 365 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 366 struct ssow_lf_inv_req *req; 367 struct dev *dev = &sso->dev; 368 struct mbox *mbox; 369 int i; 370 371 if (!nb_hws) 372 return; 373 374 mbox = mbox_get(dev->mbox); 375 req = mbox_alloc_msg_sso_ws_cache_inv(mbox); 376 if (req == NULL) { 377 mbox_process(mbox); 378 req = mbox_alloc_msg_sso_ws_cache_inv(mbox); 379 if (req == NULL) { 380 mbox_put(mbox); 381 return; 382 } 383 } 384 req->hdr.ver = SSOW_INVAL_SELECTIVE_VER; 385 req->nb_hws = nb_hws; 386 for (i = 0; i < nb_hws; i++) 387 req->hws[i] = hws[i]; 388 mbox_process(mbox); 389 mbox_put(mbox); 390 } 391 392 int 393 roc_sso_hwgrp_stats_get(struct roc_sso *roc_sso, uint8_t hwgrp, 394 struct roc_sso_hwgrp_stats *stats) 395 { 396 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 397 struct sso_grp_stats *req_rsp; 398 struct dev *dev = &sso->dev; 399 struct mbox *mbox; 400 int rc; 401 402 mbox = mbox_get(dev->mbox); 403 req_rsp = (struct sso_grp_stats *)mbox_alloc_msg_sso_grp_get_stats( 404 mbox); 405 if (req_rsp == NULL) { 406 rc = mbox_process(mbox); 407 if (rc) { 408 rc = -EIO; 409 goto fail; 410 } 411 req_rsp = (struct sso_grp_stats *) 412 mbox_alloc_msg_sso_grp_get_stats(mbox); 413 if (req_rsp == NULL) { 414 rc = -ENOSPC; 415 goto fail; 416 } 417 } 418 req_rsp->grp = hwgrp; 419 rc = mbox_process_msg(mbox, (void **)&req_rsp); 420 if (rc) { 421 rc = -EIO; 422 goto fail; 423 } 424 425 stats->aw_status = req_rsp->aw_status; 426 stats->dq_pc = req_rsp->dq_pc; 427 stats->ds_pc = req_rsp->ds_pc; 428 stats->ext_pc = req_rsp->ext_pc; 429 stats->page_cnt = req_rsp->page_cnt; 430 stats->ts_pc = req_rsp->ts_pc; 431 stats->wa_pc = req_rsp->wa_pc; 432 stats->ws_pc = req_rsp->ws_pc; 433 434 fail: 435 mbox_put(mbox); 436 return rc; 437 } 438 439 int 440 roc_sso_hwgrp_hws_link_status(struct roc_sso *roc_sso, uint8_t hws, 441 uint16_t hwgrp) 442 { 443 struct sso *sso; 444 445 sso = roc_sso_to_sso_priv(roc_sso); 446 return plt_bitmap_get(sso->link_map[hws], hwgrp); 447 } 448 449 int 450 roc_sso_hwgrp_qos_config(struct roc_sso *roc_sso, struct roc_sso_hwgrp_qos *qos, uint16_t nb_qos) 451 { 452 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 453 struct dev *dev = &sso->dev; 454 struct sso_grp_qos_cfg *req; 455 struct mbox *mbox; 456 int i, rc; 457 458 if (!nb_qos) 459 return 0; 460 461 mbox = mbox_get(dev->mbox); 462 for (i = 0; i < nb_qos; i++) { 463 uint8_t iaq_prcnt = qos[i].iaq_prcnt; 464 uint8_t taq_prcnt = qos[i].taq_prcnt; 465 466 req = mbox_alloc_msg_sso_grp_qos_config(mbox); 467 if (req == NULL) { 468 rc = mbox_process(mbox); 469 if (rc) { 470 rc = -EIO; 471 goto fail; 472 } 473 474 req = mbox_alloc_msg_sso_grp_qos_config(mbox); 475 if (req == NULL) { 476 rc = -ENOSPC; 477 goto fail; 478 } 479 } 480 req->grp = qos[i].hwgrp; 481 req->iaq_thr = (SSO_HWGRP_IAQ_MAX_THR_MASK * 482 (iaq_prcnt ? iaq_prcnt : 100)) / 483 100; 484 req->taq_thr = (SSO_HWGRP_TAQ_MAX_THR_MASK * 485 (taq_prcnt ? taq_prcnt : 100)) / 486 100; 487 } 488 489 rc = mbox_process(mbox); 490 if (rc) 491 rc = -EIO; 492 fail: 493 mbox_put(mbox); 494 return rc; 495 } 496 497 int 498 sso_hwgrp_init_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq, 499 uint32_t nb_xae, uint32_t xae_waes, 500 uint32_t xaq_buf_size, uint16_t nb_hwgrp) 501 { 502 struct npa_pool_s pool; 503 struct npa_aura_s aura; 504 plt_iova_t iova; 505 uint32_t i; 506 int rc; 507 508 if (xaq->mem != NULL) { 509 rc = sso_hwgrp_release_xaq(dev, nb_hwgrp); 510 if (rc < 0) { 511 plt_err("Failed to release XAQ %d", rc); 512 return rc; 513 } 514 roc_npa_pool_destroy(xaq->aura_handle); 515 plt_free(xaq->fc); 516 plt_free(xaq->mem); 517 memset(xaq, 0, sizeof(struct roc_sso_xaq_data)); 518 } 519 520 xaq->fc = plt_zmalloc(ROC_ALIGN, ROC_ALIGN); 521 if (xaq->fc == NULL) { 522 plt_err("Failed to allocate XAQ FC"); 523 rc = -ENOMEM; 524 goto fail; 525 } 526 527 xaq->nb_xae = nb_xae; 528 529 /** SSO will reserve up to 0x4 XAQ buffers per group when GetWork engine 530 * is inactive and it might prefetch an additional 0x3 buffers due to 531 * pipelining. 532 */ 533 xaq->nb_xaq = (SSO_XAQ_CACHE_CNT * nb_hwgrp); 534 xaq->nb_xaq += (SSO_XAQ_RSVD_CNT * nb_hwgrp); 535 xaq->nb_xaq += PLT_MAX(1 + ((xaq->nb_xae - 1) / xae_waes), xaq->nb_xaq); 536 xaq->nb_xaq += SSO_XAQ_SLACK; 537 538 xaq->mem = plt_zmalloc(xaq_buf_size * xaq->nb_xaq, xaq_buf_size); 539 if (xaq->mem == NULL) { 540 plt_err("Failed to allocate XAQ mem"); 541 rc = -ENOMEM; 542 goto free_fc; 543 } 544 545 memset(&pool, 0, sizeof(struct npa_pool_s)); 546 pool.nat_align = 1; 547 548 memset(&aura, 0, sizeof(aura)); 549 aura.fc_ena = 1; 550 aura.fc_addr = (uint64_t)xaq->fc; 551 aura.fc_hyst_bits = 0; /* Store count on all updates */ 552 rc = roc_npa_pool_create(&xaq->aura_handle, xaq_buf_size, xaq->nb_xaq, 553 &aura, &pool, 0); 554 if (rc) { 555 plt_err("Failed to create XAQ pool"); 556 goto npa_fail; 557 } 558 559 iova = (uint64_t)xaq->mem; 560 for (i = 0; i < xaq->nb_xaq; i++) { 561 roc_npa_aura_op_free(xaq->aura_handle, 0, iova); 562 iova += xaq_buf_size; 563 } 564 roc_npa_pool_op_range_set(xaq->aura_handle, (uint64_t)xaq->mem, iova); 565 566 if (roc_npa_aura_op_available_wait(xaq->aura_handle, xaq->nb_xaq, 0) != 567 xaq->nb_xaq) { 568 plt_err("Failed to free all pointers to the pool"); 569 rc = -ENOMEM; 570 goto npa_fill_fail; 571 } 572 573 /* When SW does addwork (enqueue) check if there is space in XAQ by 574 * comparing fc_addr above against the xaq_lmt calculated below. 575 * There should be a minimum headroom of 7 XAQs per HWGRP for SSO 576 * to request XAQ to cache them even before enqueue is called. 577 */ 578 xaq->xaq_lmt = xaq->nb_xaq - (nb_hwgrp * SSO_XAQ_CACHE_CNT) - SSO_XAQ_SLACK; 579 580 return 0; 581 npa_fill_fail: 582 roc_npa_pool_destroy(xaq->aura_handle); 583 npa_fail: 584 plt_free(xaq->mem); 585 free_fc: 586 plt_free(xaq->fc); 587 fail: 588 memset(xaq, 0, sizeof(struct roc_sso_xaq_data)); 589 return rc; 590 } 591 592 int 593 roc_sso_hwgrp_init_xaq_aura(struct roc_sso *roc_sso, uint32_t nb_xae) 594 { 595 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 596 struct dev *dev = &sso->dev; 597 int rc; 598 599 rc = sso_hwgrp_init_xaq_aura(dev, &roc_sso->xaq, nb_xae, 600 roc_sso->xae_waes, roc_sso->xaq_buf_size, 601 roc_sso->nb_hwgrp); 602 return rc; 603 } 604 605 int 606 sso_hwgrp_free_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq, 607 uint16_t nb_hwgrp) 608 { 609 int rc; 610 611 if (xaq->mem != NULL) { 612 if (nb_hwgrp) { 613 rc = sso_hwgrp_release_xaq(dev, nb_hwgrp); 614 if (rc < 0) { 615 plt_err("Failed to release XAQ %d", rc); 616 return rc; 617 } 618 } 619 roc_npa_pool_destroy(xaq->aura_handle); 620 plt_free(xaq->fc); 621 plt_free(xaq->mem); 622 } 623 memset(xaq, 0, sizeof(struct roc_sso_xaq_data)); 624 625 return 0; 626 } 627 628 int 629 roc_sso_hwgrp_free_xaq_aura(struct roc_sso *roc_sso, uint16_t nb_hwgrp) 630 { 631 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 632 struct dev *dev = &sso->dev; 633 int rc; 634 635 rc = sso_hwgrp_free_xaq_aura(dev, &roc_sso->xaq, nb_hwgrp); 636 return rc; 637 } 638 639 int 640 sso_hwgrp_alloc_xaq(struct dev *dev, uint32_t npa_aura_id, uint16_t hwgrps) 641 { 642 struct sso_hw_setconfig *req; 643 struct mbox *mbox = mbox_get(dev->mbox); 644 int rc = -ENOSPC; 645 646 req = mbox_alloc_msg_sso_hw_setconfig(mbox); 647 if (req == NULL) 648 goto exit; 649 req->npa_pf_func = idev_npa_pffunc_get(); 650 req->npa_aura_id = npa_aura_id; 651 req->hwgrps = hwgrps; 652 653 if (mbox_process(dev->mbox)) { 654 rc = -EIO; 655 goto exit; 656 } 657 658 rc = 0; 659 exit: 660 mbox_put(mbox); 661 return rc; 662 } 663 664 int 665 roc_sso_hwgrp_alloc_xaq(struct roc_sso *roc_sso, uint32_t npa_aura_id, 666 uint16_t hwgrps) 667 { 668 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 669 struct dev *dev = &sso->dev; 670 int rc; 671 672 rc = sso_hwgrp_alloc_xaq(dev, npa_aura_id, hwgrps); 673 return rc; 674 } 675 676 int 677 sso_hwgrp_release_xaq(struct dev *dev, uint16_t hwgrps) 678 { 679 struct sso_hw_xaq_release *req; 680 struct mbox *mbox = mbox_get(dev->mbox); 681 int rc; 682 683 req = mbox_alloc_msg_sso_hw_release_xaq_aura(mbox); 684 if (req == NULL) { 685 rc = -EINVAL; 686 goto exit; 687 } 688 req->hwgrps = hwgrps; 689 690 if (mbox_process(mbox)) { 691 rc = -EIO; 692 goto exit; 693 } 694 695 rc = 0; 696 exit: 697 mbox_put(mbox); 698 return rc; 699 } 700 701 int 702 roc_sso_hwgrp_release_xaq(struct roc_sso *roc_sso, uint16_t hwgrps) 703 { 704 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 705 struct dev *dev = &sso->dev; 706 int rc; 707 708 rc = sso_hwgrp_release_xaq(dev, hwgrps); 709 return rc; 710 } 711 712 int 713 roc_sso_hwgrp_set_priority(struct roc_sso *roc_sso, uint16_t hwgrp, 714 uint8_t weight, uint8_t affinity, uint8_t priority) 715 { 716 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 717 struct dev *dev = &sso->dev; 718 struct sso_grp_priority *req; 719 struct mbox *mbox; 720 int rc = -ENOSPC; 721 722 mbox = mbox_get(dev->mbox); 723 req = mbox_alloc_msg_sso_grp_set_priority(mbox); 724 if (req == NULL) 725 goto fail; 726 req->grp = hwgrp; 727 req->weight = weight; 728 req->affinity = affinity; 729 req->priority = priority; 730 731 rc = mbox_process(mbox); 732 if (rc) { 733 rc = -EIO; 734 goto fail; 735 } 736 mbox_put(mbox); 737 plt_sso_dbg("HWGRP %d weight %d affinity %d priority %d", hwgrp, weight, 738 affinity, priority); 739 740 return 0; 741 fail: 742 mbox_put(mbox); 743 return rc; 744 } 745 746 static int 747 sso_update_msix_vec_count(struct roc_sso *roc_sso, uint16_t sso_vec_cnt) 748 { 749 struct plt_pci_device *pci_dev = roc_sso->pci_dev; 750 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 751 uint16_t mbox_vec_cnt, npa_vec_cnt; 752 struct dev *dev = &sso->dev; 753 struct idev_cfg *idev; 754 int rc; 755 756 idev = idev_get_cfg(); 757 if (idev == NULL) 758 return -ENODEV; 759 760 mbox_vec_cnt = RVU_PF_INT_VEC_AFPF_MBOX + 1; 761 762 /* Allocating vectors for the first time */ 763 if (plt_intr_max_intr_get(pci_dev->intr_handle) == 0) { 764 npa_vec_cnt = idev->npa_refcnt ? 0 : NPA_LF_INT_VEC_POISON + 1; 765 return dev_irq_reconfigure(pci_dev->intr_handle, mbox_vec_cnt + npa_vec_cnt); 766 } 767 768 npa_vec_cnt = (dev->npa.pci_dev == pci_dev) ? NPA_LF_INT_VEC_POISON + 1 : 0; 769 770 /* Re-configure to include SSO vectors */ 771 rc = dev_irq_reconfigure(pci_dev->intr_handle, mbox_vec_cnt + npa_vec_cnt + sso_vec_cnt); 772 if (rc) 773 return rc; 774 775 rc = dev_mbox_register_irq(pci_dev, dev); 776 if (rc) 777 return rc; 778 779 if (!dev_is_vf(dev)) { 780 rc = dev_vf_flr_register_irqs(pci_dev, dev); 781 if (rc) 782 return rc; 783 } 784 785 if (npa_vec_cnt) 786 rc = npa_register_irqs(&dev->npa); 787 788 return rc; 789 } 790 791 int 792 roc_sso_hwgrp_stash_config(struct roc_sso *roc_sso, struct roc_sso_hwgrp_stash *stash, 793 uint16_t nb_stash) 794 { 795 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 796 struct sso_grp_stash_cfg *req; 797 struct dev *dev = &sso->dev; 798 struct mbox *mbox; 799 int i, rc; 800 801 if (!nb_stash) 802 return 0; 803 804 mbox = mbox_get(dev->mbox); 805 for (i = 0; i < nb_stash; i++) { 806 req = mbox_alloc_msg_sso_grp_stash_config(mbox); 807 if (req == NULL) { 808 rc = mbox_process(mbox); 809 if (rc) { 810 rc = -EIO; 811 goto fail; 812 } 813 814 req = mbox_alloc_msg_sso_grp_stash_config(mbox); 815 if (req == NULL) { 816 rc = -ENOSPC; 817 goto fail; 818 } 819 } 820 req->ena = true; 821 req->grp = stash[i].hwgrp; 822 req->offset = stash[i].stash_offset; 823 req->num_linesm1 = stash[i].stash_count - 1; 824 } 825 826 rc = mbox_process(mbox); 827 if (rc) 828 rc = -EIO; 829 fail: 830 mbox_put(mbox); 831 return rc; 832 } 833 834 int 835 roc_sso_rsrc_init(struct roc_sso *roc_sso, uint8_t nb_hws, uint16_t nb_hwgrp, uint16_t nb_tim_lfs) 836 { 837 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 838 struct sso_lf_alloc_rsp *rsp_hwgrp; 839 uint16_t sso_vec_cnt, free_tim_lfs; 840 int rc; 841 842 if (!nb_hwgrp || roc_sso->max_hwgrp < nb_hwgrp) 843 return -ENOENT; 844 if (!nb_hws || roc_sso->max_hws < nb_hws) 845 return -ENOENT; 846 847 rc = sso_rsrc_attach(roc_sso, SSO_LF_TYPE_HWS, nb_hws); 848 if (rc < 0) { 849 plt_err("Unable to attach SSO HWS LFs"); 850 goto fail; 851 } 852 853 rc = sso_rsrc_attach(roc_sso, SSO_LF_TYPE_HWGRP, nb_hwgrp); 854 if (rc < 0) { 855 plt_err("Unable to attach SSO HWGRP LFs"); 856 goto hwgrp_atch_fail; 857 } 858 859 rc = sso_lf_alloc(&sso->dev, SSO_LF_TYPE_HWS, nb_hws, NULL); 860 if (rc < 0) { 861 plt_err("Unable to alloc SSO HWS LFs"); 862 goto hws_alloc_fail; 863 } 864 865 rc = sso_lf_alloc(&sso->dev, SSO_LF_TYPE_HWGRP, nb_hwgrp, 866 (void **)&rsp_hwgrp); 867 if (rc < 0) { 868 plt_err("Unable to alloc SSO HWGRP Lfs"); 869 goto hwgrp_alloc_fail; 870 } 871 872 roc_sso->xaq_buf_size = rsp_hwgrp->xaq_buf_size; 873 roc_sso->xae_waes = rsp_hwgrp->xaq_wq_entries; 874 roc_sso->iue = rsp_hwgrp->in_unit_entries; 875 876 rc = sso_msix_fill(roc_sso, nb_hws, nb_hwgrp); 877 if (rc < 0) { 878 plt_err("Unable to get MSIX offsets for SSO LFs"); 879 goto sso_msix_fail; 880 } 881 882 /* 1 error interrupt per SSO HWS/HWGRP */ 883 sso_vec_cnt = nb_hws + nb_hwgrp; 884 885 if (sso->dev.roc_tim) { 886 nb_tim_lfs = ((struct roc_tim *)sso->dev.roc_tim)->nb_lfs; 887 } else { 888 rc = tim_free_lf_count_get(&sso->dev, &free_tim_lfs); 889 if (rc < 0) { 890 plt_err("Failed to get TIM resource count"); 891 goto sso_msix_fail; 892 } 893 894 nb_tim_lfs = nb_tim_lfs ? PLT_MIN(nb_tim_lfs, free_tim_lfs) : free_tim_lfs; 895 } 896 897 /* 2 error interrupt per TIM LF */ 898 sso_vec_cnt += 2 * nb_tim_lfs; 899 900 rc = sso_update_msix_vec_count(roc_sso, sso_vec_cnt); 901 if (rc < 0) { 902 plt_err("Failed to update SSO MSIX vector count"); 903 goto sso_msix_fail; 904 } 905 906 rc = sso_register_irqs_priv(roc_sso, sso->pci_dev->intr_handle, nb_hws, 907 nb_hwgrp); 908 if (rc < 0) { 909 plt_err("Failed to register SSO LF IRQs"); 910 goto sso_msix_fail; 911 } 912 913 roc_sso->nb_hwgrp = nb_hwgrp; 914 roc_sso->nb_hws = nb_hws; 915 916 return 0; 917 sso_msix_fail: 918 sso_lf_free(&sso->dev, SSO_LF_TYPE_HWGRP, nb_hwgrp); 919 hwgrp_alloc_fail: 920 sso_lf_free(&sso->dev, SSO_LF_TYPE_HWS, nb_hws); 921 hws_alloc_fail: 922 sso_rsrc_detach(roc_sso, SSO_LF_TYPE_HWGRP); 923 hwgrp_atch_fail: 924 sso_rsrc_detach(roc_sso, SSO_LF_TYPE_HWS); 925 fail: 926 return rc; 927 } 928 929 void 930 roc_sso_rsrc_fini(struct roc_sso *roc_sso) 931 { 932 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 933 934 if (!roc_sso->nb_hws && !roc_sso->nb_hwgrp) 935 return; 936 937 sso_unregister_irqs_priv(roc_sso, sso->pci_dev->intr_handle, 938 roc_sso->nb_hws, roc_sso->nb_hwgrp); 939 sso_lf_free(&sso->dev, SSO_LF_TYPE_HWS, roc_sso->nb_hws); 940 sso_lf_free(&sso->dev, SSO_LF_TYPE_HWGRP, roc_sso->nb_hwgrp); 941 942 sso_rsrc_detach(roc_sso, SSO_LF_TYPE_HWS); 943 sso_rsrc_detach(roc_sso, SSO_LF_TYPE_HWGRP); 944 945 roc_sso->nb_hwgrp = 0; 946 roc_sso->nb_hws = 0; 947 } 948 949 int 950 roc_sso_dev_init(struct roc_sso *roc_sso) 951 { 952 struct plt_pci_device *pci_dev; 953 uint32_t link_map_sz; 954 struct sso *sso; 955 void *link_mem; 956 int i, rc; 957 958 if (roc_sso == NULL || roc_sso->pci_dev == NULL) 959 return SSO_ERR_PARAM; 960 961 PLT_STATIC_ASSERT(sizeof(struct sso) <= ROC_SSO_MEM_SZ); 962 sso = roc_sso_to_sso_priv(roc_sso); 963 memset(sso, 0, sizeof(*sso)); 964 pci_dev = roc_sso->pci_dev; 965 966 rc = sso_update_msix_vec_count(roc_sso, 0); 967 if (rc < 0) { 968 plt_err("Failed to set SSO MSIX vector count"); 969 return rc; 970 } 971 972 rc = dev_init(&sso->dev, pci_dev); 973 if (rc < 0) { 974 plt_err("Failed to init roc device"); 975 goto fail; 976 } 977 978 rc = sso_rsrc_get(roc_sso); 979 if (rc < 0) { 980 plt_err("Failed to get SSO resources"); 981 goto rsrc_fail; 982 } 983 rc = -ENOMEM; 984 985 if (roc_sso->max_hws) { 986 sso->link_map = plt_zmalloc( 987 sizeof(struct plt_bitmap *) * roc_sso->max_hws, 0); 988 if (sso->link_map == NULL) { 989 plt_err("Failed to allocate memory for link_map array"); 990 goto rsrc_fail; 991 } 992 993 link_map_sz = 994 plt_bitmap_get_memory_footprint(roc_sso->max_hwgrp); 995 sso->link_map_mem = 996 plt_zmalloc(link_map_sz * roc_sso->max_hws, 0); 997 if (sso->link_map_mem == NULL) { 998 plt_err("Failed to get link_map memory"); 999 goto rsrc_fail; 1000 } 1001 1002 link_mem = sso->link_map_mem; 1003 1004 for (i = 0; i < roc_sso->max_hws; i++) { 1005 sso->link_map[i] = plt_bitmap_init( 1006 roc_sso->max_hwgrp, link_mem, link_map_sz); 1007 if (sso->link_map[i] == NULL) { 1008 plt_err("Failed to allocate link map"); 1009 goto link_mem_free; 1010 } 1011 link_mem = PLT_PTR_ADD(link_mem, link_map_sz); 1012 } 1013 } 1014 idev_sso_pffunc_set(sso->dev.pf_func); 1015 idev_sso_set(roc_sso); 1016 sso->pci_dev = pci_dev; 1017 sso->dev.drv_inited = true; 1018 roc_sso->lmt_base = sso->dev.lmt_base; 1019 1020 return 0; 1021 link_mem_free: 1022 plt_free(sso->link_map_mem); 1023 rsrc_fail: 1024 rc |= dev_fini(&sso->dev, pci_dev); 1025 fail: 1026 return rc; 1027 } 1028 1029 int 1030 roc_sso_dev_fini(struct roc_sso *roc_sso) 1031 { 1032 struct sso *sso; 1033 1034 sso = roc_sso_to_sso_priv(roc_sso); 1035 sso->dev.drv_inited = false; 1036 1037 return dev_fini(&sso->dev, sso->pci_dev); 1038 } 1039