1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2021 Marvell. 3 */ 4 5 #include "roc_api.h" 6 #include "roc_priv.h" 7 8 #define SSO_XAQ_CACHE_CNT (0x3) 9 #define SSO_XAQ_RSVD_CNT (0x4) 10 #define SSO_XAQ_SLACK (16) 11 12 /* Private functions. */ 13 int 14 sso_lf_alloc(struct dev *dev, enum sso_lf_type lf_type, uint16_t nb_lf, 15 void **rsp) 16 { 17 struct mbox *mbox = mbox_get(dev->mbox); 18 int rc = -ENOSPC; 19 20 if (!nb_lf) { 21 mbox_put(mbox); 22 return 0; 23 } 24 25 switch (lf_type) { 26 case SSO_LF_TYPE_HWS: { 27 struct ssow_lf_alloc_req *req; 28 29 req = mbox_alloc_msg_ssow_lf_alloc(mbox); 30 if (req == NULL) 31 goto exit; 32 req->hws = nb_lf; 33 } break; 34 case SSO_LF_TYPE_HWGRP: { 35 struct sso_lf_alloc_req *req; 36 37 req = mbox_alloc_msg_sso_lf_alloc(mbox); 38 if (req == NULL) 39 goto exit; 40 req->hwgrps = nb_lf; 41 } break; 42 default: 43 break; 44 } 45 46 rc = mbox_process_msg(mbox, rsp); 47 if (rc) { 48 rc = -EIO; 49 goto exit; 50 } 51 52 rc = 0; 53 exit: 54 mbox_put(mbox); 55 return rc; 56 } 57 58 int 59 sso_lf_free(struct dev *dev, enum sso_lf_type lf_type, uint16_t nb_lf) 60 { 61 struct mbox *mbox = mbox_get(dev->mbox); 62 int rc = -ENOSPC; 63 64 if (!nb_lf) { 65 mbox_put(mbox); 66 return 0; 67 } 68 69 switch (lf_type) { 70 case SSO_LF_TYPE_HWS: { 71 struct ssow_lf_free_req *req; 72 73 req = mbox_alloc_msg_ssow_lf_free(mbox); 74 if (req == NULL) 75 goto exit; 76 req->hws = nb_lf; 77 } break; 78 case SSO_LF_TYPE_HWGRP: { 79 struct sso_lf_free_req *req; 80 81 req = mbox_alloc_msg_sso_lf_free(mbox); 82 if (req == NULL) 83 goto exit; 84 req->hwgrps = nb_lf; 85 } break; 86 default: 87 break; 88 } 89 90 rc = mbox_process(mbox); 91 if (rc) { 92 rc = -EIO; 93 goto exit; 94 } 95 96 rc = 0; 97 exit: 98 mbox_put(mbox); 99 return rc; 100 } 101 102 static int 103 sso_rsrc_attach(struct roc_sso *roc_sso, enum sso_lf_type lf_type, 104 uint16_t nb_lf) 105 { 106 struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev; 107 struct mbox *mbox = mbox_get(dev->mbox); 108 struct rsrc_attach_req *req; 109 int rc = -ENOSPC; 110 111 if (!nb_lf) { 112 mbox_put(mbox); 113 return 0; 114 } 115 116 req = mbox_alloc_msg_attach_resources(mbox); 117 if (req == NULL) 118 goto exit; 119 switch (lf_type) { 120 case SSO_LF_TYPE_HWS: 121 req->ssow = nb_lf; 122 break; 123 case SSO_LF_TYPE_HWGRP: 124 req->sso = nb_lf; 125 break; 126 default: 127 rc = SSO_ERR_PARAM; 128 goto exit; 129 } 130 131 req->modify = true; 132 if (mbox_process(mbox)) { 133 rc = -EIO; 134 goto exit; 135 } 136 137 rc = 0; 138 exit: 139 mbox_put(mbox); 140 return rc; 141 } 142 143 static int 144 sso_rsrc_detach(struct roc_sso *roc_sso, enum sso_lf_type lf_type) 145 { 146 struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev; 147 struct rsrc_detach_req *req; 148 struct mbox *mbox = mbox_get(dev->mbox); 149 int rc = -ENOSPC; 150 151 req = mbox_alloc_msg_detach_resources(mbox); 152 if (req == NULL) 153 goto exit; 154 switch (lf_type) { 155 case SSO_LF_TYPE_HWS: 156 req->ssow = true; 157 break; 158 case SSO_LF_TYPE_HWGRP: 159 req->sso = true; 160 break; 161 default: 162 rc = SSO_ERR_PARAM; 163 goto exit; 164 } 165 166 req->partial = true; 167 if (mbox_process(mbox)) { 168 rc = -EIO; 169 goto exit; 170 } 171 172 rc = 0; 173 exit: 174 mbox_put(mbox); 175 return rc; 176 } 177 178 static int 179 sso_rsrc_get(struct roc_sso *roc_sso) 180 { 181 struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev; 182 struct free_rsrcs_rsp *rsrc_cnt; 183 struct mbox *mbox = mbox_get(dev->mbox); 184 int rc; 185 186 mbox_alloc_msg_free_rsrc_cnt(mbox); 187 rc = mbox_process_msg(mbox, (void **)&rsrc_cnt); 188 if (rc) { 189 plt_err("Failed to get free resource count"); 190 rc = -EIO; 191 goto exit; 192 } 193 194 roc_sso->max_hwgrp = PLT_MIN(rsrc_cnt->sso, roc_sso->feat.hwgrps_per_pf); 195 roc_sso->max_hws = rsrc_cnt->ssow; 196 197 rc = 0; 198 exit: 199 mbox_put(mbox); 200 return rc; 201 } 202 203 static int 204 sso_hw_info_get(struct roc_sso *roc_sso) 205 { 206 struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev; 207 struct mbox *mbox = mbox_get(dev->mbox); 208 struct sso_hw_info *rsp; 209 int rc; 210 211 mbox_alloc_msg_sso_get_hw_info(mbox); 212 rc = mbox_process_msg(mbox, (void **)&rsp); 213 if (rc && rc != MBOX_MSG_INVALID) { 214 plt_err("Failed to get SSO HW info"); 215 rc = -EIO; 216 goto exit; 217 } 218 219 if (rc == MBOX_MSG_INVALID) { 220 roc_sso->feat.hwgrps_per_pf = ROC_SSO_MAX_HWGRP_PER_PF; 221 } else { 222 mbox_memcpy(&roc_sso->feat, &rsp->feat, sizeof(roc_sso->feat)); 223 224 if (!roc_sso->feat.hwgrps_per_pf) 225 roc_sso->feat.hwgrps_per_pf = ROC_SSO_MAX_HWGRP_PER_PF; 226 } 227 228 rc = 0; 229 exit: 230 mbox_put(mbox); 231 return rc; 232 } 233 234 void 235 sso_hws_link_modify(uint8_t hws, uintptr_t base, struct plt_bitmap *bmp, uint16_t hwgrp[], 236 uint16_t n, uint8_t set, uint16_t enable) 237 { 238 uint64_t reg; 239 int i, j, k; 240 241 i = 0; 242 while (n) { 243 uint64_t mask[4] = { 244 0x8000, 245 0x8000, 246 0x8000, 247 0x8000, 248 }; 249 250 k = n % 4; 251 k = k ? k : 4; 252 for (j = 0; j < k; j++) { 253 mask[j] = hwgrp[i + j] | (uint32_t)set << 12 | enable << 14; 254 if (bmp) { 255 enable ? plt_bitmap_set(bmp, hwgrp[i + j]) : 256 plt_bitmap_clear(bmp, hwgrp[i + j]); 257 } 258 plt_sso_dbg("HWS %d Linked to HWGRP %d", hws, 259 hwgrp[i + j]); 260 } 261 262 n -= j; 263 i += j; 264 reg = mask[0] | mask[1] << 16 | mask[2] << 32 | mask[3] << 48; 265 plt_write64(reg, base + SSOW_LF_GWS_GRPMSK_CHG); 266 } 267 } 268 269 static int 270 sso_hws_link_modify_af(struct dev *dev, uint8_t hws, struct plt_bitmap *bmp, uint16_t hwgrp[], 271 uint16_t n, uint8_t set, uint16_t enable) 272 { 273 struct mbox *mbox = mbox_get(dev->mbox); 274 struct ssow_chng_mship *req; 275 int rc, i; 276 277 req = mbox_alloc_msg_ssow_chng_mship(mbox); 278 if (req == NULL) { 279 rc = mbox_process(mbox); 280 if (rc) { 281 mbox_put(mbox); 282 return -EIO; 283 } 284 req = mbox_alloc_msg_ssow_chng_mship(mbox); 285 if (req == NULL) { 286 mbox_put(mbox); 287 return -ENOSPC; 288 } 289 } 290 req->enable = enable; 291 req->set = set; 292 req->hws = hws; 293 req->nb_hwgrps = n; 294 for (i = 0; i < n; i++) 295 req->hwgrps[i] = hwgrp[i]; 296 rc = mbox_process(mbox); 297 mbox_put(mbox); 298 if (rc == MBOX_MSG_INVALID) 299 return rc; 300 if (rc) 301 return -EIO; 302 303 for (i = 0; i < n; i++) 304 enable ? plt_bitmap_set(bmp, hwgrp[i]) : 305 plt_bitmap_clear(bmp, hwgrp[i]); 306 307 return 0; 308 } 309 310 static int 311 sso_msix_fill(struct roc_sso *roc_sso, uint16_t nb_hws, uint16_t nb_hwgrp) 312 { 313 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 314 struct msix_offset_rsp *rsp; 315 struct dev *dev = &sso->dev; 316 int i, rc; 317 318 mbox_alloc_msg_msix_offset(mbox_get(dev->mbox)); 319 rc = mbox_process_msg(dev->mbox, (void **)&rsp); 320 if (rc) { 321 rc = -EIO; 322 goto exit; 323 } 324 325 for (i = 0; i < nb_hws; i++) 326 sso->hws_msix_offset[i] = rsp->ssow_msixoff[i]; 327 for (i = 0; i < nb_hwgrp; i++) 328 sso->hwgrp_msix_offset[i] = rsp->sso_msixoff[i]; 329 330 rc = 0; 331 exit: 332 mbox_put(dev->mbox); 333 return rc; 334 } 335 336 /* Public Functions. */ 337 uintptr_t 338 roc_sso_hws_base_get(struct roc_sso *roc_sso, uint8_t hws) 339 { 340 struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev; 341 342 return dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | hws << 12); 343 } 344 345 uintptr_t 346 roc_sso_hwgrp_base_get(struct roc_sso *roc_sso, uint16_t hwgrp) 347 { 348 struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev; 349 350 return dev->bar2 + (RVU_BLOCK_ADDR_SSO << 20 | hwgrp << 12); 351 } 352 353 uint16_t 354 roc_sso_pf_func_get(void) 355 { 356 return idev_sso_pffunc_get(); 357 } 358 359 uint64_t 360 roc_sso_ns_to_gw(uint64_t base, uint64_t ns) 361 { 362 uint64_t current_us; 363 364 current_us = plt_read64(base + SSOW_LF_GWS_NW_TIM); 365 /* From HRM, table 14-19: 366 * The SSOW_LF_GWS_NW_TIM[NW_TIM] period is specified in n-1 notation. 367 */ 368 current_us += 1; 369 370 /* From HRM, table 14-1: 371 * SSOW_LF_GWS_NW_TIM[NW_TIM] specifies the minimum timeout. The SSO 372 * hardware times out a GET_WORK request within 1 usec of the minimum 373 * timeout specified by SSOW_LF_GWS_NW_TIM[NW_TIM]. 374 */ 375 current_us += 1; 376 return PLT_MAX(1UL, (uint64_t)PLT_DIV_CEIL(ns, (current_us * 1E3))); 377 } 378 379 int 380 roc_sso_hws_link(struct roc_sso *roc_sso, uint8_t hws, uint16_t hwgrp[], uint16_t nb_hwgrp, 381 uint8_t set, bool use_mbox) 382 { 383 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 384 struct dev *dev = &sso->dev; 385 uintptr_t base; 386 int rc; 387 388 if (!nb_hwgrp) 389 return 0; 390 391 if (use_mbox && roc_model_is_cn10k()) { 392 rc = sso_hws_link_modify_af(dev, hws, sso->link_map[hws], hwgrp, nb_hwgrp, set, 1); 393 if (rc == MBOX_MSG_INVALID) 394 goto lf_access; 395 if (rc < 0) 396 return 0; 397 goto done; 398 } 399 lf_access: 400 base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | hws << 12); 401 sso_hws_link_modify(hws, base, sso->link_map[hws], hwgrp, nb_hwgrp, set, 1); 402 done: 403 return nb_hwgrp; 404 } 405 406 int 407 roc_sso_hws_unlink(struct roc_sso *roc_sso, uint8_t hws, uint16_t hwgrp[], 408 uint16_t nb_hwgrp, uint8_t set, bool use_mbox) 409 { 410 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 411 struct dev *dev = &sso->dev; 412 uintptr_t base; 413 int rc; 414 415 if (!nb_hwgrp) 416 return 0; 417 418 if (use_mbox && roc_model_is_cn10k()) { 419 rc = sso_hws_link_modify_af(dev, hws, sso->link_map[hws], hwgrp, nb_hwgrp, set, 0); 420 if (rc == MBOX_MSG_INVALID) 421 goto lf_access; 422 if (rc < 0) 423 return 0; 424 goto done; 425 } 426 lf_access: 427 base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | hws << 12); 428 sso_hws_link_modify(hws, base, sso->link_map[hws], hwgrp, nb_hwgrp, set, 0); 429 done: 430 return nb_hwgrp; 431 } 432 433 int 434 roc_sso_hws_stats_get(struct roc_sso *roc_sso, uint8_t hws, 435 struct roc_sso_hws_stats *stats) 436 { 437 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 438 struct sso_hws_stats *req_rsp; 439 struct dev *dev = &sso->dev; 440 struct mbox *mbox; 441 int rc; 442 443 mbox = mbox_get(dev->mbox); 444 req_rsp = (struct sso_hws_stats *)mbox_alloc_msg_sso_hws_get_stats( 445 mbox); 446 if (req_rsp == NULL) { 447 rc = mbox_process(mbox); 448 if (rc) { 449 rc = -EIO; 450 goto fail; 451 } 452 req_rsp = (struct sso_hws_stats *) 453 mbox_alloc_msg_sso_hws_get_stats(mbox); 454 if (req_rsp == NULL) { 455 rc = -ENOSPC; 456 goto fail; 457 } 458 } 459 req_rsp->hws = hws; 460 rc = mbox_process_msg(mbox, (void **)&req_rsp); 461 if (rc) { 462 rc = -EIO; 463 goto fail; 464 } 465 466 stats->arbitration = req_rsp->arbitration; 467 fail: 468 mbox_put(mbox); 469 return rc; 470 } 471 472 void 473 roc_sso_hws_gwc_invalidate(struct roc_sso *roc_sso, uint8_t *hws, 474 uint8_t nb_hws) 475 { 476 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 477 struct ssow_lf_inv_req *req; 478 struct dev *dev = &sso->dev; 479 struct mbox *mbox; 480 int i; 481 482 if (!nb_hws) 483 return; 484 485 mbox = mbox_get(dev->mbox); 486 req = mbox_alloc_msg_sso_ws_cache_inv(mbox); 487 if (req == NULL) { 488 mbox_process(mbox); 489 req = mbox_alloc_msg_sso_ws_cache_inv(mbox); 490 if (req == NULL) { 491 mbox_put(mbox); 492 return; 493 } 494 } 495 req->hdr.ver = SSOW_INVAL_SELECTIVE_VER; 496 req->nb_hws = nb_hws; 497 for (i = 0; i < nb_hws; i++) 498 req->hws[i] = hws[i]; 499 mbox_process(mbox); 500 mbox_put(mbox); 501 } 502 503 static void 504 sso_agq_op_wait(struct roc_sso *roc_sso, uint16_t hwgrp) 505 { 506 uint64_t reg; 507 508 reg = plt_read64(roc_sso_hwgrp_base_get(roc_sso, hwgrp) + SSO_LF_GGRP_AGGR_CTX_INSTOP); 509 while (reg & BIT_ULL(2)) { 510 plt_delay_us(100); 511 reg = plt_read64(roc_sso_hwgrp_base_get(roc_sso, hwgrp) + 512 SSO_LF_GGRP_AGGR_CTX_INSTOP); 513 } 514 } 515 516 int 517 roc_sso_hwgrp_agq_alloc(struct roc_sso *roc_sso, uint16_t hwgrp, struct roc_sso_agq_data *data) 518 { 519 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 520 struct sso_aggr_setconfig *req; 521 struct sso_agq_ctx *ctx; 522 uint32_t cnt, off; 523 struct mbox *mbox; 524 uintptr_t ptr; 525 uint64_t reg; 526 int rc; 527 528 if (sso->agg_mem[hwgrp] == 0) { 529 mbox = mbox_get(sso->dev.mbox); 530 req = mbox_alloc_msg_sso_aggr_setconfig(mbox); 531 if (req == NULL) { 532 mbox_process(mbox); 533 req = mbox_alloc_msg_sso_aggr_setconfig(mbox); 534 if (req == NULL) { 535 plt_err("Failed to allocate AGQ config mbox."); 536 mbox_put(mbox); 537 return -EIO; 538 } 539 } 540 541 req->hwgrp = hwgrp; 542 req->npa_pf_func = idev_npa_pffunc_get(); 543 rc = mbox_process(mbox); 544 if (rc < 0) { 545 plt_err("Failed to set HWGRP AGQ config rc=%d", rc); 546 mbox_put(mbox); 547 return rc; 548 } 549 550 mbox_put(mbox); 551 552 sso->agg_mem[hwgrp] = 553 (uintptr_t)plt_zmalloc(SSO_AGGR_MIN_CTX * sizeof(struct sso_agq_ctx), 554 roc_model_optimal_align_sz()); 555 if (sso->agg_mem[hwgrp] == 0) 556 return -ENOMEM; 557 sso->agg_cnt[hwgrp] = SSO_AGGR_MIN_CTX; 558 sso->agg_used[hwgrp] = 0; 559 plt_wmb(); 560 plt_write64(sso->agg_mem[hwgrp], 561 roc_sso_hwgrp_base_get(roc_sso, hwgrp) + SSO_LF_GGRP_AGGR_CTX_BASE); 562 reg = (plt_log2_u32(SSO_AGGR_MIN_CTX) - 6) << 16; 563 reg |= (SSO_AGGR_DEF_TMO << 4) | 1; 564 plt_write64(reg, roc_sso_hwgrp_base_get(roc_sso, hwgrp) + SSO_LF_GGRP_AGGR_CFG); 565 } 566 567 if (sso->agg_cnt[hwgrp] >= SSO_AGGR_MAX_CTX) 568 return -ENOSPC; 569 570 if (sso->agg_cnt[hwgrp] == sso->agg_used[hwgrp]) { 571 ptr = sso->agg_mem[hwgrp]; 572 cnt = sso->agg_cnt[hwgrp] << 1; 573 sso->agg_mem[hwgrp] = (uintptr_t)plt_zmalloc(cnt * sizeof(struct sso_agq_ctx), 574 roc_model_optimal_align_sz()); 575 if (sso->agg_mem[hwgrp] == 0) { 576 sso->agg_mem[hwgrp] = ptr; 577 return -ENOMEM; 578 } 579 580 memcpy((void *)sso->agg_mem[hwgrp], (void *)ptr, 581 sso->agg_cnt[hwgrp] * sizeof(struct sso_agq_ctx)); 582 plt_wmb(); 583 sso_agq_op_wait(roc_sso, hwgrp); 584 /* Base address has changed, evict old entries. */ 585 plt_write64(sso->agg_mem[hwgrp], 586 roc_sso_hwgrp_base_get(roc_sso, hwgrp) + SSO_LF_GGRP_AGGR_CTX_BASE); 587 reg = plt_read64(roc_sso_hwgrp_base_get(roc_sso, hwgrp) + SSO_LF_GGRP_AGGR_CFG); 588 reg &= ~GENMASK_ULL(19, 16); 589 reg |= (uint64_t)(plt_log2_u32(cnt) - 6) << 16; 590 plt_write64(reg, roc_sso_hwgrp_base_get(roc_sso, hwgrp) + SSO_LF_GGRP_AGGR_CFG); 591 reg = SSO_LF_AGGR_INSTOP_GLOBAL_EVICT << 4; 592 plt_write64(reg, 593 roc_sso_hwgrp_base_get(roc_sso, hwgrp) + SSO_LF_GGRP_AGGR_CTX_INSTOP); 594 sso_agq_op_wait(roc_sso, hwgrp); 595 plt_free((void *)ptr); 596 597 sso->agg_cnt[hwgrp] = cnt; 598 off = sso->agg_used[hwgrp]; 599 } else { 600 ctx = (struct sso_agq_ctx *)sso->agg_mem[hwgrp]; 601 for (cnt = 0; cnt < sso->agg_cnt[hwgrp]; cnt++) { 602 if (!ctx[cnt].ena) 603 break; 604 } 605 if (cnt == sso->agg_cnt[hwgrp]) 606 return -EINVAL; 607 off = cnt; 608 } 609 610 ctx = (struct sso_agq_ctx *)sso->agg_mem[hwgrp]; 611 ctx += off; 612 ctx->ena = 1; 613 ctx->tt = data->tt; 614 ctx->tag = data->tag; 615 ctx->swqe_tag = data->stag; 616 ctx->cnt_ena = data->cnt_ena; 617 ctx->xqe_type = data->xqe_type; 618 ctx->vtimewait = data->vwqe_wait_tmo; 619 ctx->vwqe_aura = data->vwqe_aura; 620 ctx->max_vsize_exp = data->vwqe_max_sz_exp - 2; 621 622 plt_wmb(); 623 sso->agg_used[hwgrp]++; 624 625 return 0; 626 } 627 628 void 629 roc_sso_hwgrp_agq_free(struct roc_sso *roc_sso, uint16_t hwgrp, uint32_t agq_id) 630 { 631 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 632 struct sso_agq_ctx *ctx; 633 uint64_t reg; 634 635 ctx = (struct sso_agq_ctx *)sso->agg_mem[hwgrp]; 636 ctx += agq_id; 637 638 if (!ctx->ena) 639 return; 640 641 reg = SSO_LF_AGGR_INSTOP_FLUSH << 4; 642 reg |= (uint64_t)(agq_id << 8); 643 644 plt_write64(reg, roc_sso_hwgrp_base_get(roc_sso, hwgrp) + SSO_LF_GGRP_AGGR_CTX_INSTOP); 645 sso_agq_op_wait(roc_sso, hwgrp); 646 647 memset(ctx, 0, sizeof(struct sso_agq_ctx)); 648 plt_wmb(); 649 sso->agg_used[hwgrp]--; 650 651 /* Flush the context from CTX Cache */ 652 reg = SSO_LF_AGGR_INSTOP_EVICT << 4; 653 reg |= (uint64_t)(agq_id << 8); 654 655 plt_write64(reg, roc_sso_hwgrp_base_get(roc_sso, hwgrp) + SSO_LF_GGRP_AGGR_CTX_INSTOP); 656 sso_agq_op_wait(roc_sso, hwgrp); 657 } 658 659 void 660 roc_sso_hwgrp_agq_release(struct roc_sso *roc_sso, uint16_t hwgrp) 661 { 662 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 663 struct sso_aggr_setconfig *req; 664 struct sso_agq_ctx *ctx; 665 struct mbox *mbox; 666 uint32_t cnt; 667 int rc; 668 669 if (!roc_sso->feat.eva_present) 670 return; 671 672 plt_write64(0, roc_sso_hwgrp_base_get(roc_sso, hwgrp) + SSO_LF_GGRP_AGGR_CFG); 673 ctx = (struct sso_agq_ctx *)sso->agg_mem[hwgrp]; 674 for (cnt = 0; cnt < sso->agg_cnt[hwgrp]; cnt++) { 675 if (!ctx[cnt].ena) 676 continue; 677 roc_sso_hwgrp_agq_free(roc_sso, hwgrp, cnt); 678 } 679 680 plt_write64(0, roc_sso_hwgrp_base_get(roc_sso, hwgrp) + SSO_LF_GGRP_AGGR_CTX_BASE); 681 plt_free((void *)sso->agg_mem[hwgrp]); 682 sso->agg_mem[hwgrp] = 0; 683 sso->agg_cnt[hwgrp] = 0; 684 sso->agg_used[hwgrp] = 0; 685 686 mbox = mbox_get(sso->dev.mbox); 687 req = mbox_alloc_msg_sso_aggr_setconfig(mbox); 688 if (req == NULL) { 689 mbox_process(mbox); 690 req = mbox_alloc_msg_sso_aggr_setconfig(mbox); 691 if (req == NULL) { 692 plt_err("Failed to allocate AGQ config mbox."); 693 mbox_put(mbox); 694 return; 695 } 696 } 697 698 req->hwgrp = hwgrp; 699 req->npa_pf_func = 0; 700 rc = mbox_process(mbox); 701 if (rc < 0) 702 plt_err("Failed to set HWGRP AGQ config rc=%d", rc); 703 mbox_put(mbox); 704 } 705 706 uint32_t 707 roc_sso_hwgrp_agq_from_tag(struct roc_sso *roc_sso, uint16_t hwgrp, uint32_t tag_mask, 708 uint8_t xqe_type) 709 { 710 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 711 struct sso_agq_ctx *ctx; 712 uint32_t i; 713 714 plt_rmb(); 715 ctx = (struct sso_agq_ctx *)sso->agg_mem[hwgrp]; 716 for (i = 0; i < sso->agg_used[hwgrp]; i++) { 717 if (!ctx[i].ena) 718 continue; 719 if (ctx[i].tag == tag_mask && ctx[i].xqe_type == xqe_type) 720 return i; 721 } 722 723 return UINT32_MAX; 724 } 725 726 int 727 roc_sso_hwgrp_stats_get(struct roc_sso *roc_sso, uint16_t hwgrp, struct roc_sso_hwgrp_stats *stats) 728 { 729 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 730 struct sso_grp_stats *req_rsp; 731 struct dev *dev = &sso->dev; 732 struct mbox *mbox; 733 int rc; 734 735 mbox = mbox_get(dev->mbox); 736 req_rsp = (struct sso_grp_stats *)mbox_alloc_msg_sso_grp_get_stats( 737 mbox); 738 if (req_rsp == NULL) { 739 rc = mbox_process(mbox); 740 if (rc) { 741 rc = -EIO; 742 goto fail; 743 } 744 req_rsp = (struct sso_grp_stats *) 745 mbox_alloc_msg_sso_grp_get_stats(mbox); 746 if (req_rsp == NULL) { 747 rc = -ENOSPC; 748 goto fail; 749 } 750 } 751 req_rsp->grp = hwgrp; 752 rc = mbox_process_msg(mbox, (void **)&req_rsp); 753 if (rc) { 754 rc = -EIO; 755 goto fail; 756 } 757 758 stats->aw_status = req_rsp->aw_status; 759 stats->dq_pc = req_rsp->dq_pc; 760 stats->ds_pc = req_rsp->ds_pc; 761 stats->ext_pc = req_rsp->ext_pc; 762 stats->page_cnt = req_rsp->page_cnt; 763 stats->ts_pc = req_rsp->ts_pc; 764 stats->wa_pc = req_rsp->wa_pc; 765 stats->ws_pc = req_rsp->ws_pc; 766 767 fail: 768 mbox_put(mbox); 769 return rc; 770 } 771 772 int 773 roc_sso_hwgrp_hws_link_status(struct roc_sso *roc_sso, uint8_t hws, 774 uint16_t hwgrp) 775 { 776 struct sso *sso; 777 778 sso = roc_sso_to_sso_priv(roc_sso); 779 return plt_bitmap_get(sso->link_map[hws], hwgrp); 780 } 781 782 int 783 roc_sso_hwgrp_qos_config(struct roc_sso *roc_sso, struct roc_sso_hwgrp_qos *qos, uint16_t nb_qos) 784 { 785 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 786 struct dev *dev = &sso->dev; 787 struct sso_grp_qos_cfg *req; 788 struct mbox *mbox; 789 int i, rc; 790 791 if (!nb_qos) 792 return 0; 793 794 mbox = mbox_get(dev->mbox); 795 for (i = 0; i < nb_qos; i++) { 796 uint8_t iaq_prcnt = qos[i].iaq_prcnt; 797 uint8_t taq_prcnt = qos[i].taq_prcnt; 798 799 req = mbox_alloc_msg_sso_grp_qos_config(mbox); 800 if (req == NULL) { 801 rc = mbox_process(mbox); 802 if (rc) { 803 rc = -EIO; 804 goto fail; 805 } 806 807 req = mbox_alloc_msg_sso_grp_qos_config(mbox); 808 if (req == NULL) { 809 rc = -ENOSPC; 810 goto fail; 811 } 812 } 813 req->grp = qos[i].hwgrp; 814 req->iaq_thr = (SSO_HWGRP_IAQ_MAX_THR_MASK * 815 (iaq_prcnt ? iaq_prcnt : 100)) / 816 100; 817 req->taq_thr = (SSO_HWGRP_TAQ_MAX_THR_MASK * 818 (taq_prcnt ? taq_prcnt : 100)) / 819 100; 820 } 821 822 rc = mbox_process(mbox); 823 if (rc) 824 rc = -EIO; 825 fail: 826 mbox_put(mbox); 827 return rc; 828 } 829 830 int 831 sso_hwgrp_init_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq, 832 uint32_t nb_xae, uint32_t xae_waes, 833 uint32_t xaq_buf_size, uint16_t nb_hwgrp) 834 { 835 struct npa_pool_s pool; 836 struct npa_aura_s aura; 837 plt_iova_t iova; 838 uint32_t i; 839 int rc; 840 841 if (xaq->mem != NULL) { 842 rc = sso_hwgrp_release_xaq(dev, nb_hwgrp); 843 if (rc < 0) { 844 plt_err("Failed to release XAQ %d", rc); 845 return rc; 846 } 847 roc_npa_pool_destroy(xaq->aura_handle); 848 plt_free(xaq->fc); 849 plt_free(xaq->mem); 850 memset(xaq, 0, sizeof(struct roc_sso_xaq_data)); 851 } 852 853 xaq->fc = plt_zmalloc(ROC_ALIGN, ROC_ALIGN); 854 if (xaq->fc == NULL) { 855 plt_err("Failed to allocate XAQ FC"); 856 rc = -ENOMEM; 857 goto fail; 858 } 859 860 xaq->nb_xae = nb_xae; 861 862 /** SSO will reserve up to 0x4 XAQ buffers per group when GetWork engine 863 * is inactive and it might prefetch an additional 0x3 buffers due to 864 * pipelining. 865 */ 866 xaq->nb_xaq = (SSO_XAQ_CACHE_CNT * nb_hwgrp); 867 xaq->nb_xaq += (SSO_XAQ_RSVD_CNT * nb_hwgrp); 868 xaq->nb_xaq += PLT_MAX(1 + ((xaq->nb_xae - 1) / xae_waes), xaq->nb_xaq); 869 xaq->nb_xaq += SSO_XAQ_SLACK; 870 871 xaq->mem = plt_zmalloc(xaq_buf_size * xaq->nb_xaq, xaq_buf_size); 872 if (xaq->mem == NULL) { 873 plt_err("Failed to allocate XAQ mem"); 874 rc = -ENOMEM; 875 goto free_fc; 876 } 877 878 memset(&pool, 0, sizeof(struct npa_pool_s)); 879 pool.nat_align = 1; 880 881 memset(&aura, 0, sizeof(aura)); 882 aura.fc_ena = 1; 883 aura.fc_addr = (uint64_t)xaq->fc; 884 aura.fc_hyst_bits = 0; /* Store count on all updates */ 885 rc = roc_npa_pool_create(&xaq->aura_handle, xaq_buf_size, xaq->nb_xaq, 886 &aura, &pool, 0); 887 if (rc) { 888 plt_err("Failed to create XAQ pool"); 889 goto npa_fail; 890 } 891 892 iova = (uint64_t)xaq->mem; 893 for (i = 0; i < xaq->nb_xaq; i++) { 894 roc_npa_aura_op_free(xaq->aura_handle, 0, iova); 895 iova += xaq_buf_size; 896 } 897 roc_npa_pool_op_range_set(xaq->aura_handle, (uint64_t)xaq->mem, iova); 898 899 if (roc_npa_aura_op_available_wait(xaq->aura_handle, xaq->nb_xaq, 0) != 900 xaq->nb_xaq) { 901 plt_err("Failed to free all pointers to the pool"); 902 rc = -ENOMEM; 903 goto npa_fill_fail; 904 } 905 906 /* When SW does addwork (enqueue) check if there is space in XAQ by 907 * comparing fc_addr above against the xaq_lmt calculated below. 908 * There should be a minimum headroom of 7 XAQs per HWGRP for SSO 909 * to request XAQ to cache them even before enqueue is called. 910 */ 911 xaq->xaq_lmt = xaq->nb_xaq - (nb_hwgrp * SSO_XAQ_CACHE_CNT) - SSO_XAQ_SLACK; 912 913 return 0; 914 npa_fill_fail: 915 roc_npa_pool_destroy(xaq->aura_handle); 916 npa_fail: 917 plt_free(xaq->mem); 918 free_fc: 919 plt_free(xaq->fc); 920 fail: 921 memset(xaq, 0, sizeof(struct roc_sso_xaq_data)); 922 return rc; 923 } 924 925 int 926 roc_sso_hwgrp_init_xaq_aura(struct roc_sso *roc_sso, uint32_t nb_xae) 927 { 928 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 929 struct dev *dev = &sso->dev; 930 int rc; 931 932 rc = sso_hwgrp_init_xaq_aura(dev, &roc_sso->xaq, nb_xae, roc_sso->feat.xaq_wq_entries, 933 roc_sso->feat.xaq_buf_size, roc_sso->nb_hwgrp); 934 return rc; 935 } 936 937 int 938 sso_hwgrp_free_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq, 939 uint16_t nb_hwgrp) 940 { 941 int rc; 942 943 if (xaq->mem != NULL) { 944 if (nb_hwgrp) { 945 rc = sso_hwgrp_release_xaq(dev, nb_hwgrp); 946 if (rc < 0) { 947 plt_err("Failed to release XAQ %d", rc); 948 return rc; 949 } 950 } 951 roc_npa_pool_destroy(xaq->aura_handle); 952 plt_free(xaq->fc); 953 plt_free(xaq->mem); 954 } 955 memset(xaq, 0, sizeof(struct roc_sso_xaq_data)); 956 957 return 0; 958 } 959 960 int 961 roc_sso_hwgrp_free_xaq_aura(struct roc_sso *roc_sso, uint16_t nb_hwgrp) 962 { 963 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 964 struct dev *dev = &sso->dev; 965 int rc; 966 967 rc = sso_hwgrp_free_xaq_aura(dev, &roc_sso->xaq, nb_hwgrp); 968 return rc; 969 } 970 971 int 972 sso_hwgrp_alloc_xaq(struct dev *dev, uint32_t npa_aura_id, uint16_t hwgrps) 973 { 974 struct sso_hw_setconfig *req; 975 struct mbox *mbox = mbox_get(dev->mbox); 976 int rc = -ENOSPC; 977 978 req = mbox_alloc_msg_sso_hw_setconfig(mbox); 979 if (req == NULL) 980 goto exit; 981 req->npa_pf_func = idev_npa_pffunc_get(); 982 req->npa_aura_id = npa_aura_id; 983 req->hwgrps = hwgrps; 984 985 if (mbox_process(dev->mbox)) { 986 rc = -EIO; 987 goto exit; 988 } 989 990 rc = 0; 991 exit: 992 mbox_put(mbox); 993 return rc; 994 } 995 996 int 997 roc_sso_hwgrp_alloc_xaq(struct roc_sso *roc_sso, uint32_t npa_aura_id, 998 uint16_t hwgrps) 999 { 1000 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 1001 struct dev *dev = &sso->dev; 1002 int rc; 1003 1004 rc = sso_hwgrp_alloc_xaq(dev, npa_aura_id, hwgrps); 1005 return rc; 1006 } 1007 1008 int 1009 sso_hwgrp_release_xaq(struct dev *dev, uint16_t hwgrps) 1010 { 1011 struct sso_hw_xaq_release *req; 1012 struct mbox *mbox = mbox_get(dev->mbox); 1013 int rc; 1014 1015 req = mbox_alloc_msg_sso_hw_release_xaq_aura(mbox); 1016 if (req == NULL) { 1017 rc = -EINVAL; 1018 goto exit; 1019 } 1020 req->hwgrps = hwgrps; 1021 1022 if (mbox_process(mbox)) { 1023 rc = -EIO; 1024 goto exit; 1025 } 1026 1027 rc = 0; 1028 exit: 1029 mbox_put(mbox); 1030 return rc; 1031 } 1032 1033 int 1034 roc_sso_hwgrp_release_xaq(struct roc_sso *roc_sso, uint16_t hwgrps) 1035 { 1036 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 1037 struct dev *dev = &sso->dev; 1038 int rc; 1039 1040 if (!hwgrps) 1041 return 0; 1042 1043 rc = sso_hwgrp_release_xaq(dev, hwgrps); 1044 return rc; 1045 } 1046 1047 int 1048 roc_sso_hwgrp_set_priority(struct roc_sso *roc_sso, uint16_t hwgrp, 1049 uint8_t weight, uint8_t affinity, uint8_t priority) 1050 { 1051 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 1052 struct dev *dev = &sso->dev; 1053 struct sso_grp_priority *req; 1054 struct mbox *mbox; 1055 int rc = -ENOSPC; 1056 1057 mbox = mbox_get(dev->mbox); 1058 req = mbox_alloc_msg_sso_grp_set_priority(mbox); 1059 if (req == NULL) 1060 goto fail; 1061 req->grp = hwgrp; 1062 req->weight = weight; 1063 req->affinity = affinity; 1064 req->priority = priority; 1065 1066 rc = mbox_process(mbox); 1067 if (rc) { 1068 rc = -EIO; 1069 goto fail; 1070 } 1071 mbox_put(mbox); 1072 plt_sso_dbg("HWGRP %d weight %d affinity %d priority %d", hwgrp, weight, 1073 affinity, priority); 1074 1075 return 0; 1076 fail: 1077 mbox_put(mbox); 1078 return rc; 1079 } 1080 1081 static int 1082 sso_update_msix_vec_count(struct roc_sso *roc_sso, uint16_t sso_vec_cnt) 1083 { 1084 struct plt_pci_device *pci_dev = roc_sso->pci_dev; 1085 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 1086 uint16_t mbox_vec_cnt, npa_vec_cnt; 1087 struct dev *dev = &sso->dev; 1088 struct idev_cfg *idev; 1089 int rc; 1090 1091 idev = idev_get_cfg(); 1092 if (idev == NULL) 1093 return -ENODEV; 1094 1095 if (roc_model_is_cn20k()) 1096 mbox_vec_cnt = RVU_MBOX_PF_INT_VEC_AFPF_MBOX + 1; 1097 else 1098 mbox_vec_cnt = RVU_PF_INT_VEC_AFPF_MBOX + 1; 1099 1100 /* Allocating vectors for the first time */ 1101 if (plt_intr_max_intr_get(pci_dev->intr_handle) == 0) { 1102 npa_vec_cnt = idev->npa_refcnt ? 0 : NPA_LF_INT_VEC_POISON + 1; 1103 return dev_irq_reconfigure(pci_dev->intr_handle, mbox_vec_cnt + npa_vec_cnt); 1104 } 1105 1106 /* Before re-configuring unregister irqs */ 1107 npa_vec_cnt = (dev->npa.pci_dev == pci_dev) ? NPA_LF_INT_VEC_POISON + 1 : 0; 1108 if (npa_vec_cnt) 1109 npa_unregister_irqs(&dev->npa); 1110 1111 dev_mbox_unregister_irq(pci_dev, dev); 1112 if (!dev_is_vf(dev)) 1113 dev_vf_flr_unregister_irqs(pci_dev, dev); 1114 1115 /* Re-configure to include SSO vectors */ 1116 rc = dev_irq_reconfigure(pci_dev->intr_handle, mbox_vec_cnt + npa_vec_cnt + sso_vec_cnt); 1117 if (rc) 1118 return rc; 1119 1120 rc = dev_mbox_register_irq(pci_dev, dev); 1121 if (rc) 1122 return rc; 1123 1124 if (!dev_is_vf(dev)) { 1125 rc = dev_vf_flr_register_irqs(pci_dev, dev); 1126 if (rc) 1127 return rc; 1128 } 1129 1130 if (npa_vec_cnt) 1131 rc = npa_register_irqs(&dev->npa); 1132 1133 return rc; 1134 } 1135 1136 int 1137 roc_sso_hwgrp_stash_config(struct roc_sso *roc_sso, struct roc_sso_hwgrp_stash *stash, 1138 uint16_t nb_stash) 1139 { 1140 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 1141 struct sso_grp_stash_cfg *req; 1142 struct dev *dev = &sso->dev; 1143 struct mbox *mbox; 1144 int i, rc; 1145 1146 if (!nb_stash) 1147 return 0; 1148 1149 mbox = mbox_get(dev->mbox); 1150 for (i = 0; i < nb_stash; i++) { 1151 req = mbox_alloc_msg_sso_grp_stash_config(mbox); 1152 if (req == NULL) { 1153 rc = mbox_process(mbox); 1154 if (rc) { 1155 rc = -EIO; 1156 goto fail; 1157 } 1158 1159 req = mbox_alloc_msg_sso_grp_stash_config(mbox); 1160 if (req == NULL) { 1161 rc = -ENOSPC; 1162 goto fail; 1163 } 1164 } 1165 req->ena = true; 1166 req->grp = stash[i].hwgrp; 1167 req->offset = stash[i].stash_offset; 1168 req->num_linesm1 = stash[i].stash_count - 1; 1169 } 1170 1171 rc = mbox_process(mbox); 1172 if (rc) 1173 rc = -EIO; 1174 fail: 1175 mbox_put(mbox); 1176 return rc; 1177 } 1178 1179 int 1180 roc_sso_rsrc_init(struct roc_sso *roc_sso, uint8_t nb_hws, uint16_t nb_hwgrp, uint16_t nb_tim_lfs) 1181 { 1182 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 1183 struct sso_lf_alloc_rsp *rsp_hwgrp; 1184 uint16_t sso_vec_cnt, free_tim_lfs; 1185 int rc; 1186 1187 if (!nb_hwgrp || roc_sso->max_hwgrp < nb_hwgrp) 1188 return -ENOENT; 1189 if (!nb_hws || roc_sso->max_hws < nb_hws) 1190 return -ENOENT; 1191 1192 rc = sso_rsrc_attach(roc_sso, SSO_LF_TYPE_HWS, nb_hws); 1193 if (rc < 0) { 1194 plt_err("Unable to attach SSO HWS LFs"); 1195 goto fail; 1196 } 1197 1198 rc = sso_rsrc_attach(roc_sso, SSO_LF_TYPE_HWGRP, nb_hwgrp); 1199 if (rc < 0) { 1200 plt_err("Unable to attach SSO HWGRP LFs"); 1201 goto hwgrp_atch_fail; 1202 } 1203 1204 rc = sso_lf_alloc(&sso->dev, SSO_LF_TYPE_HWS, nb_hws, NULL); 1205 if (rc < 0) { 1206 plt_err("Unable to alloc SSO HWS LFs"); 1207 goto hws_alloc_fail; 1208 } 1209 1210 rc = sso_lf_alloc(&sso->dev, SSO_LF_TYPE_HWGRP, nb_hwgrp, 1211 (void **)&rsp_hwgrp); 1212 if (rc < 0) { 1213 plt_err("Unable to alloc SSO HWGRP Lfs"); 1214 goto hwgrp_alloc_fail; 1215 } 1216 1217 if (!roc_sso->feat.xaq_buf_size || !roc_sso->feat.xaq_wq_entries || !roc_sso->feat.iue) { 1218 roc_sso->feat.xaq_buf_size = rsp_hwgrp->xaq_buf_size; 1219 roc_sso->feat.xaq_wq_entries = rsp_hwgrp->xaq_wq_entries; 1220 roc_sso->feat.iue = rsp_hwgrp->in_unit_entries; 1221 } 1222 1223 rc = sso_msix_fill(roc_sso, nb_hws, nb_hwgrp); 1224 if (rc < 0) { 1225 plt_err("Unable to get MSIX offsets for SSO LFs"); 1226 goto sso_msix_fail; 1227 } 1228 1229 /* 1 error interrupt per SSO HWS/HWGRP */ 1230 sso_vec_cnt = nb_hws + nb_hwgrp; 1231 1232 if (sso->dev.roc_tim) { 1233 nb_tim_lfs = ((struct roc_tim *)sso->dev.roc_tim)->nb_lfs; 1234 } else { 1235 rc = tim_free_lf_count_get(&sso->dev, &free_tim_lfs); 1236 if (rc < 0) { 1237 plt_err("Failed to get TIM resource count"); 1238 goto sso_msix_fail; 1239 } 1240 1241 nb_tim_lfs = PLT_MIN(nb_tim_lfs, free_tim_lfs); 1242 } 1243 1244 /* 2 error interrupt per TIM LF */ 1245 if (roc_model_is_cn20k()) 1246 sso_vec_cnt += 3 * nb_tim_lfs; 1247 else 1248 sso_vec_cnt += 2 * nb_tim_lfs; 1249 1250 rc = sso_update_msix_vec_count(roc_sso, sso_vec_cnt); 1251 if (rc < 0) { 1252 plt_err("Failed to update SSO MSIX vector count"); 1253 goto sso_msix_fail; 1254 } 1255 1256 rc = sso_register_irqs_priv(roc_sso, sso->pci_dev->intr_handle, nb_hws, 1257 nb_hwgrp); 1258 if (rc < 0) { 1259 plt_err("Failed to register SSO LF IRQs"); 1260 goto sso_msix_fail; 1261 } 1262 1263 roc_sso->nb_hwgrp = nb_hwgrp; 1264 roc_sso->nb_hws = nb_hws; 1265 1266 return 0; 1267 sso_msix_fail: 1268 sso_lf_free(&sso->dev, SSO_LF_TYPE_HWGRP, nb_hwgrp); 1269 hwgrp_alloc_fail: 1270 sso_lf_free(&sso->dev, SSO_LF_TYPE_HWS, nb_hws); 1271 hws_alloc_fail: 1272 sso_rsrc_detach(roc_sso, SSO_LF_TYPE_HWGRP); 1273 hwgrp_atch_fail: 1274 sso_rsrc_detach(roc_sso, SSO_LF_TYPE_HWS); 1275 fail: 1276 return rc; 1277 } 1278 1279 void 1280 roc_sso_rsrc_fini(struct roc_sso *roc_sso) 1281 { 1282 struct sso *sso = roc_sso_to_sso_priv(roc_sso); 1283 uint32_t cnt; 1284 1285 if (!roc_sso->nb_hws && !roc_sso->nb_hwgrp) 1286 return; 1287 1288 for (cnt = 0; cnt < roc_sso->nb_hwgrp; cnt++) 1289 roc_sso_hwgrp_agq_release(roc_sso, cnt); 1290 1291 sso_unregister_irqs_priv(roc_sso, sso->pci_dev->intr_handle, 1292 roc_sso->nb_hws, roc_sso->nb_hwgrp); 1293 sso_lf_free(&sso->dev, SSO_LF_TYPE_HWS, roc_sso->nb_hws); 1294 sso_lf_free(&sso->dev, SSO_LF_TYPE_HWGRP, roc_sso->nb_hwgrp); 1295 1296 sso_rsrc_detach(roc_sso, SSO_LF_TYPE_HWS); 1297 sso_rsrc_detach(roc_sso, SSO_LF_TYPE_HWGRP); 1298 1299 roc_sso->nb_hwgrp = 0; 1300 roc_sso->nb_hws = 0; 1301 } 1302 1303 int 1304 roc_sso_dev_init(struct roc_sso *roc_sso) 1305 { 1306 struct plt_pci_device *pci_dev; 1307 uint32_t link_map_sz; 1308 struct sso *sso; 1309 void *link_mem; 1310 int i, rc; 1311 1312 if (roc_sso == NULL || roc_sso->pci_dev == NULL) 1313 return SSO_ERR_PARAM; 1314 1315 PLT_STATIC_ASSERT(sizeof(struct sso) <= ROC_SSO_MEM_SZ); 1316 sso = roc_sso_to_sso_priv(roc_sso); 1317 memset(sso, 0, sizeof(*sso)); 1318 pci_dev = roc_sso->pci_dev; 1319 1320 rc = sso_update_msix_vec_count(roc_sso, 0); 1321 if (rc < 0) { 1322 plt_err("Failed to set SSO MSIX vector count"); 1323 return rc; 1324 } 1325 1326 rc = dev_init(&sso->dev, pci_dev); 1327 if (rc < 0) { 1328 plt_err("Failed to init roc device"); 1329 goto fail; 1330 } 1331 1332 rc = sso_hw_info_get(roc_sso); 1333 if (rc < 0) { 1334 plt_err("Failed to get SSO HW info"); 1335 goto fail; 1336 } 1337 1338 rc = sso_rsrc_get(roc_sso); 1339 if (rc < 0) { 1340 plt_err("Failed to get SSO resources"); 1341 goto rsrc_fail; 1342 } 1343 rc = -ENOMEM; 1344 1345 if (roc_sso->max_hws) { 1346 sso->link_map = plt_zmalloc( 1347 sizeof(struct plt_bitmap *) * roc_sso->max_hws, 0); 1348 if (sso->link_map == NULL) { 1349 plt_err("Failed to allocate memory for link_map array"); 1350 goto rsrc_fail; 1351 } 1352 1353 link_map_sz = 1354 plt_bitmap_get_memory_footprint(roc_sso->max_hwgrp); 1355 sso->link_map_mem = 1356 plt_zmalloc(link_map_sz * roc_sso->max_hws, 0); 1357 if (sso->link_map_mem == NULL) { 1358 plt_err("Failed to get link_map memory"); 1359 goto rsrc_fail; 1360 } 1361 1362 link_mem = sso->link_map_mem; 1363 1364 for (i = 0; i < roc_sso->max_hws; i++) { 1365 sso->link_map[i] = plt_bitmap_init( 1366 roc_sso->max_hwgrp, link_mem, link_map_sz); 1367 if (sso->link_map[i] == NULL) { 1368 plt_err("Failed to allocate link map"); 1369 goto link_mem_free; 1370 } 1371 link_mem = PLT_PTR_ADD(link_mem, link_map_sz); 1372 } 1373 } 1374 idev_sso_pffunc_set(sso->dev.pf_func); 1375 idev_sso_set(roc_sso); 1376 sso->pci_dev = pci_dev; 1377 sso->dev.drv_inited = true; 1378 roc_sso->lmt_base = sso->dev.lmt_base; 1379 1380 return 0; 1381 link_mem_free: 1382 plt_free(sso->link_map_mem); 1383 rsrc_fail: 1384 rc |= dev_fini(&sso->dev, pci_dev); 1385 fail: 1386 return rc; 1387 } 1388 1389 int 1390 roc_sso_dev_fini(struct roc_sso *roc_sso) 1391 { 1392 struct sso *sso; 1393 1394 sso = roc_sso_to_sso_priv(roc_sso); 1395 sso->dev.drv_inited = false; 1396 1397 return dev_fini(&sso->dev, sso->pci_dev); 1398 } 1399