1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2021 Marvell. 3 */ 4 5 #include "roc_api.h" 6 #include "roc_priv.h" 7 8 static int 9 nix_fc_rxchan_bpid_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg) 10 { 11 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 12 13 if (nix->chan_cnt != 0) 14 fc_cfg->rxchan_cfg.enable = true; 15 else 16 fc_cfg->rxchan_cfg.enable = false; 17 18 fc_cfg->type = ROC_NIX_FC_RXCHAN_CFG; 19 20 return 0; 21 } 22 23 static int 24 nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable) 25 { 26 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 27 struct dev *dev = &nix->dev; 28 struct mbox *mbox = mbox_get(dev->mbox); 29 struct nix_bp_cfg_req *req; 30 struct nix_bp_cfg_rsp *rsp; 31 int rc = -ENOSPC, i; 32 33 if (enable) { 34 req = mbox_alloc_msg_nix_bp_enable(mbox); 35 if (req == NULL) 36 goto exit; 37 38 req->chan_base = 0; 39 if (roc_nix_is_lbk(roc_nix) || roc_nix_is_sdp(roc_nix)) 40 req->chan_cnt = NIX_LBK_MAX_CHAN; 41 else 42 req->chan_cnt = NIX_CGX_MAX_CHAN; 43 44 req->bpid_per_chan = true; 45 46 rc = mbox_process_msg(mbox, (void *)&rsp); 47 if (rc || (req->chan_cnt != rsp->chan_cnt)) { 48 rc = -EIO; 49 goto exit; 50 } 51 52 nix->chan_cnt = rsp->chan_cnt; 53 for (i = 0; i < rsp->chan_cnt; i++) 54 nix->bpid[i] = rsp->chan_bpid[i] & 0x1FF; 55 } else { 56 req = mbox_alloc_msg_nix_bp_disable(mbox); 57 if (req == NULL) 58 goto exit; 59 req->chan_base = 0; 60 req->chan_cnt = nix->chan_cnt; 61 62 rc = mbox_process(mbox); 63 if (rc) 64 goto exit; 65 66 memset(nix->bpid, 0, sizeof(uint16_t) * NIX_MAX_CHAN); 67 nix->chan_cnt = 0; 68 } 69 70 if (roc_model_is_cn9k()) 71 goto exit; 72 73 /* Enable backpressure on CPT if inline inb is enabled */ 74 if (enable && roc_nix_inl_inb_is_enabled(roc_nix) && 75 !roc_errata_cpt_hang_on_x2p_bp()) { 76 req = mbox_alloc_msg_nix_cpt_bp_enable(mbox); 77 if (req == NULL) 78 goto exit; 79 req->chan_base = 0; 80 if (roc_nix_is_lbk(roc_nix) || roc_nix_is_sdp(roc_nix)) 81 req->chan_cnt = NIX_LBK_MAX_CHAN; 82 else 83 req->chan_cnt = NIX_CGX_MAX_CHAN; 84 req->bpid_per_chan = 0; 85 86 rc = mbox_process_msg(mbox, (void *)&rsp); 87 if (rc) 88 goto exit; 89 nix->cpt_lbpid = rsp->chan_bpid[0] & 0x1FF; 90 } 91 92 /* CPT to NIX BP on all channels */ 93 if (!roc_feature_nix_has_rxchan_multi_bpid() || !nix->cpt_nixbpid || 94 !roc_nix_inl_inb_is_enabled(roc_nix)) 95 goto exit; 96 97 mbox_put(mbox); 98 for (i = 0; i < nix->rx_chan_cnt; i++) { 99 rc = roc_nix_chan_bpid_set(roc_nix, i, nix->cpt_nixbpid, enable, false); 100 if (rc) 101 break; 102 } 103 return rc; 104 exit: 105 mbox_put(mbox); 106 return rc; 107 } 108 109 static int 110 nix_fc_cq_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg) 111 { 112 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 113 struct dev *dev = &nix->dev; 114 struct mbox *mbox = mbox_get(dev->mbox); 115 struct nix_aq_enq_rsp *rsp; 116 int rc; 117 118 if (roc_model_is_cn9k()) { 119 struct nix_aq_enq_req *aq; 120 121 aq = mbox_alloc_msg_nix_aq_enq(mbox); 122 if (!aq) { 123 rc = -ENOSPC; 124 goto exit; 125 } 126 127 aq->qidx = fc_cfg->cq_cfg.rq; 128 aq->ctype = NIX_AQ_CTYPE_CQ; 129 aq->op = NIX_AQ_INSTOP_READ; 130 } else if (roc_model_is_cn10k()) { 131 struct nix_cn10k_aq_enq_req *aq; 132 133 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 134 if (!aq) { 135 rc = -ENOSPC; 136 goto exit; 137 } 138 139 aq->qidx = fc_cfg->cq_cfg.rq; 140 aq->ctype = NIX_AQ_CTYPE_CQ; 141 aq->op = NIX_AQ_INSTOP_READ; 142 } else { 143 struct nix_cn20k_aq_enq_req *aq; 144 145 aq = mbox_alloc_msg_nix_cn20k_aq_enq(mbox); 146 if (!aq) { 147 rc = -ENOSPC; 148 goto exit; 149 } 150 151 aq->qidx = fc_cfg->cq_cfg.rq; 152 aq->ctype = NIX_AQ_CTYPE_CQ; 153 aq->op = NIX_AQ_INSTOP_READ; 154 } 155 156 rc = mbox_process_msg(mbox, (void *)&rsp); 157 if (rc) 158 goto exit; 159 160 fc_cfg->cq_cfg.cq_drop = rsp->cq.bp; 161 fc_cfg->cq_cfg.enable = rsp->cq.bp_ena; 162 fc_cfg->type = ROC_NIX_FC_CQ_CFG; 163 164 exit: 165 mbox_put(mbox); 166 return rc; 167 } 168 169 static int 170 nix_fc_rq_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg) 171 { 172 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 173 struct npa_cn20k_aq_enq_req *npa_req_cn20k; 174 struct npa_cn20k_aq_enq_rsp *npa_rsp_cn20k; 175 struct dev *dev = &nix->dev; 176 struct mbox *mbox = mbox_get(dev->mbox); 177 struct nix_aq_enq_rsp *rsp; 178 struct npa_aq_enq_req *npa_req; 179 struct npa_aq_enq_rsp *npa_rsp; 180 int rc; 181 182 if (roc_model_is_cn9k()) { 183 struct nix_aq_enq_req *aq; 184 185 aq = mbox_alloc_msg_nix_aq_enq(mbox); 186 if (!aq) { 187 rc = -ENOSPC; 188 goto exit; 189 } 190 191 aq->qidx = fc_cfg->rq_cfg.rq; 192 aq->ctype = NIX_AQ_CTYPE_RQ; 193 aq->op = NIX_AQ_INSTOP_READ; 194 } else if (roc_model_is_cn10k()) { 195 struct nix_cn10k_aq_enq_req *aq; 196 197 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 198 if (!aq) { 199 rc = -ENOSPC; 200 goto exit; 201 } 202 203 aq->qidx = fc_cfg->rq_cfg.rq; 204 aq->ctype = NIX_AQ_CTYPE_RQ; 205 aq->op = NIX_AQ_INSTOP_READ; 206 } else { 207 struct nix_cn20k_aq_enq_req *aq; 208 209 aq = mbox_alloc_msg_nix_cn20k_aq_enq(mbox); 210 if (!aq) { 211 rc = -ENOSPC; 212 goto exit; 213 } 214 215 aq->qidx = fc_cfg->rq_cfg.rq; 216 aq->ctype = NIX_AQ_CTYPE_RQ; 217 aq->op = NIX_AQ_INSTOP_READ; 218 } 219 220 rc = mbox_process_msg(mbox, (void *)&rsp); 221 if (rc) 222 goto exit; 223 224 if (roc_model_is_cn20k()) { 225 npa_req_cn20k = mbox_alloc_msg_npa_cn20k_aq_enq(mbox); 226 if (!npa_req_cn20k) { 227 rc = -ENOSPC; 228 goto exit; 229 } 230 231 npa_req_cn20k->aura_id = rsp->rq.lpb_aura; 232 npa_req_cn20k->ctype = NPA_AQ_CTYPE_AURA; 233 npa_req_cn20k->op = NPA_AQ_INSTOP_READ; 234 235 rc = mbox_process_msg(mbox, (void *)&npa_rsp_cn20k); 236 if (rc) 237 goto exit; 238 239 fc_cfg->cq_cfg.cq_drop = npa_rsp_cn20k->aura.bp; 240 fc_cfg->cq_cfg.enable = npa_rsp_cn20k->aura.bp_ena; 241 fc_cfg->type = ROC_NIX_FC_RQ_CFG; 242 } else { 243 npa_req = mbox_alloc_msg_npa_aq_enq(mbox); 244 if (!npa_req) { 245 rc = -ENOSPC; 246 goto exit; 247 } 248 249 npa_req->aura_id = rsp->rq.lpb_aura; 250 npa_req->ctype = NPA_AQ_CTYPE_AURA; 251 npa_req->op = NPA_AQ_INSTOP_READ; 252 253 rc = mbox_process_msg(mbox, (void *)&npa_rsp); 254 if (rc) 255 goto exit; 256 257 fc_cfg->cq_cfg.cq_drop = npa_rsp->aura.bp; 258 fc_cfg->cq_cfg.enable = npa_rsp->aura.bp_ena; 259 fc_cfg->type = ROC_NIX_FC_RQ_CFG; 260 } 261 262 exit: 263 mbox_put(mbox); 264 return rc; 265 } 266 267 static int 268 nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg) 269 { 270 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 271 struct dev *dev = &nix->dev; 272 struct mbox *mbox = mbox_get(dev->mbox); 273 int rc; 274 275 if (roc_model_is_cn9k()) { 276 struct nix_aq_enq_req *aq; 277 278 aq = mbox_alloc_msg_nix_aq_enq(mbox); 279 if (!aq) { 280 rc = -ENOSPC; 281 goto exit; 282 } 283 284 aq->qidx = fc_cfg->cq_cfg.rq; 285 aq->ctype = NIX_AQ_CTYPE_CQ; 286 aq->op = NIX_AQ_INSTOP_WRITE; 287 288 if (fc_cfg->cq_cfg.enable) { 289 aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc]; 290 aq->cq_mask.bpid = ~(aq->cq_mask.bpid); 291 aq->cq.bp = fc_cfg->cq_cfg.cq_drop; 292 aq->cq_mask.bp = ~(aq->cq_mask.bp); 293 } 294 295 aq->cq.bp_ena = !!(fc_cfg->cq_cfg.enable); 296 aq->cq_mask.bp_ena = ~(aq->cq_mask.bp_ena); 297 } else if (roc_model_is_cn10k()) { 298 struct nix_cn10k_aq_enq_req *aq; 299 300 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 301 if (!aq) { 302 rc = -ENOSPC; 303 goto exit; 304 } 305 306 aq->qidx = fc_cfg->cq_cfg.rq; 307 aq->ctype = NIX_AQ_CTYPE_CQ; 308 aq->op = NIX_AQ_INSTOP_WRITE; 309 310 if (fc_cfg->cq_cfg.enable) { 311 aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc]; 312 aq->cq_mask.bpid = ~(aq->cq_mask.bpid); 313 aq->cq.bp = fc_cfg->cq_cfg.cq_drop; 314 aq->cq_mask.bp = ~(aq->cq_mask.bp); 315 } 316 317 aq->cq.bp_ena = !!(fc_cfg->cq_cfg.enable); 318 aq->cq_mask.bp_ena = ~(aq->cq_mask.bp_ena); 319 } else { 320 struct nix_cn20k_aq_enq_req *aq; 321 322 aq = mbox_alloc_msg_nix_cn20k_aq_enq(mbox); 323 if (!aq) { 324 rc = -ENOSPC; 325 goto exit; 326 } 327 328 aq->qidx = fc_cfg->cq_cfg.rq; 329 aq->ctype = NIX_AQ_CTYPE_CQ; 330 aq->op = NIX_AQ_INSTOP_WRITE; 331 332 if (fc_cfg->cq_cfg.enable) { 333 aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc]; 334 aq->cq_mask.bpid = ~(aq->cq_mask.bpid); 335 aq->cq.bp = fc_cfg->cq_cfg.cq_drop; 336 aq->cq_mask.bp = ~(aq->cq_mask.bp); 337 } 338 339 aq->cq.bp_ena = !!(fc_cfg->cq_cfg.enable); 340 aq->cq_mask.bp_ena = ~(aq->cq_mask.bp_ena); 341 } 342 343 rc = mbox_process(mbox); 344 exit: 345 mbox_put(mbox); 346 return rc; 347 } 348 349 static int 350 nix_fc_rq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg) 351 { 352 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 353 uint64_t pool_drop_pct, spb_pool_drop_pct; 354 struct roc_nix_fc_cfg tmp; 355 struct roc_nix_rq *rq; 356 int rc; 357 358 rq = nix->rqs[fc_cfg->rq_cfg.rq]; 359 360 if (rq->sso_ena) { 361 pool_drop_pct = fc_cfg->rq_cfg.pool_drop_pct; 362 /* Use default value for zero pct */ 363 if (fc_cfg->rq_cfg.enable && !pool_drop_pct) 364 pool_drop_pct = ROC_NIX_AURA_THRESH; 365 366 roc_nix_fc_npa_bp_cfg(roc_nix, fc_cfg->rq_cfg.pool, fc_cfg->rq_cfg.enable, 367 roc_nix->force_rx_aura_bp, fc_cfg->rq_cfg.tc, pool_drop_pct); 368 369 if (rq->spb_ena) { 370 spb_pool_drop_pct = fc_cfg->rq_cfg.spb_pool_drop_pct; 371 /* Use default value for zero pct */ 372 if (!spb_pool_drop_pct) 373 spb_pool_drop_pct = ROC_NIX_AURA_THRESH; 374 375 roc_nix_fc_npa_bp_cfg(roc_nix, fc_cfg->rq_cfg.spb_pool, 376 fc_cfg->rq_cfg.enable, roc_nix->force_rx_aura_bp, 377 fc_cfg->rq_cfg.tc, spb_pool_drop_pct); 378 } 379 380 if (roc_nix->local_meta_aura_ena && roc_nix->meta_aura_handle) 381 roc_nix_fc_npa_bp_cfg(roc_nix, roc_nix->meta_aura_handle, 382 fc_cfg->rq_cfg.enable, roc_nix->force_rx_aura_bp, 383 fc_cfg->rq_cfg.tc, pool_drop_pct); 384 } 385 386 /* Copy RQ config to CQ config as they are occupying same area */ 387 memset(&tmp, 0, sizeof(tmp)); 388 tmp.type = ROC_NIX_FC_CQ_CFG; 389 tmp.cq_cfg.rq = fc_cfg->rq_cfg.rq; 390 tmp.cq_cfg.tc = fc_cfg->rq_cfg.tc; 391 tmp.cq_cfg.cq_drop = fc_cfg->rq_cfg.cq_drop; 392 tmp.cq_cfg.enable = fc_cfg->rq_cfg.enable; 393 394 rc = nix_fc_cq_config_set(roc_nix, &tmp); 395 if (rc) 396 return rc; 397 398 rq->tc = fc_cfg->rq_cfg.enable ? fc_cfg->rq_cfg.tc : ROC_NIX_PFC_CLASS_INVALID; 399 plt_nix_dbg("RQ %u: TC %u %s", fc_cfg->rq_cfg.rq, fc_cfg->rq_cfg.tc, 400 fc_cfg->rq_cfg.enable ? "enabled" : "disabled"); 401 return 0; 402 } 403 404 int 405 roc_nix_fc_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg) 406 { 407 if (!roc_nix_is_pf(roc_nix) && !roc_nix_is_lbk(roc_nix) && 408 !roc_nix_is_sdp(roc_nix)) 409 return 0; 410 411 if (fc_cfg->type == ROC_NIX_FC_CQ_CFG) 412 return nix_fc_cq_config_get(roc_nix, fc_cfg); 413 else if (fc_cfg->type == ROC_NIX_FC_RQ_CFG) 414 return nix_fc_rq_config_get(roc_nix, fc_cfg); 415 else if (fc_cfg->type == ROC_NIX_FC_RXCHAN_CFG) 416 return nix_fc_rxchan_bpid_get(roc_nix, fc_cfg); 417 else if (fc_cfg->type == ROC_NIX_FC_TM_CFG) 418 return nix_tm_bp_config_get(roc_nix, &fc_cfg->tm_cfg.enable); 419 420 return -EINVAL; 421 } 422 423 int 424 roc_nix_fc_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg) 425 { 426 if (fc_cfg->type == ROC_NIX_FC_CQ_CFG) 427 return nix_fc_cq_config_set(roc_nix, fc_cfg); 428 else if (fc_cfg->type == ROC_NIX_FC_RQ_CFG) 429 return nix_fc_rq_config_set(roc_nix, fc_cfg); 430 else if (fc_cfg->type == ROC_NIX_FC_RXCHAN_CFG) 431 return nix_fc_rxchan_bpid_set(roc_nix, 432 fc_cfg->rxchan_cfg.enable); 433 else if (fc_cfg->type == ROC_NIX_FC_TM_CFG) 434 return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.sq, 435 fc_cfg->tm_cfg.tc, 436 fc_cfg->tm_cfg.enable); 437 438 return -EINVAL; 439 } 440 441 enum roc_nix_fc_mode 442 roc_nix_fc_mode_get(struct roc_nix *roc_nix) 443 { 444 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 445 enum roc_nix_fc_mode mode; 446 447 if (nix->tx_pause && nix->rx_pause) 448 mode = ROC_NIX_FC_FULL; 449 else if (nix->rx_pause) 450 mode = ROC_NIX_FC_RX; 451 else if (nix->tx_pause) 452 mode = ROC_NIX_FC_TX; 453 else 454 mode = ROC_NIX_FC_NONE; 455 return mode; 456 } 457 458 int 459 roc_nix_fc_mode_set(struct roc_nix *roc_nix, enum roc_nix_fc_mode mode) 460 { 461 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 462 struct dev *dev = &nix->dev; 463 struct mbox *mbox = mbox_get(dev->mbox); 464 struct cgx_pause_frm_cfg *req; 465 uint8_t tx_pause, rx_pause; 466 int rc = -ENOSPC; 467 468 rx_pause = (mode == ROC_NIX_FC_FULL) || (mode == ROC_NIX_FC_RX); 469 tx_pause = (mode == ROC_NIX_FC_FULL) || (mode == ROC_NIX_FC_TX); 470 471 /* Nothing much to do for LBK links */ 472 if (roc_nix_is_lbk(roc_nix)) { 473 nix->rx_pause = rx_pause; 474 nix->tx_pause = tx_pause; 475 rc = 0; 476 goto exit; 477 } 478 479 /* Set new config */ 480 req = mbox_alloc_msg_cgx_cfg_pause_frm(mbox); 481 if (req == NULL) 482 goto exit; 483 req->set = 1; 484 req->rx_pause = rx_pause; 485 req->tx_pause = tx_pause; 486 487 rc = mbox_process(mbox); 488 if (rc) 489 goto exit; 490 491 nix->rx_pause = rx_pause; 492 nix->tx_pause = tx_pause; 493 exit: 494 mbox_put(mbox); 495 return rc; 496 } 497 498 static int 499 nix_rx_chan_multi_bpid_cfg(struct roc_nix *roc_nix, uint8_t chan, uint16_t bpid, uint16_t *bpid_new) 500 { 501 struct roc_nix *roc_nix_tmp, *roc_nix_pre = NULL; 502 struct roc_nix_list *nix_list; 503 uint8_t chan_pre; 504 505 if (!roc_feature_nix_has_rxchan_multi_bpid()) 506 return -ENOTSUP; 507 508 nix_list = roc_idev_nix_list_get(); 509 if (nix_list == NULL) 510 return -EINVAL; 511 512 /* Find associated NIX RX channel if Aura BPID is of that of a NIX. */ 513 TAILQ_FOREACH(roc_nix_tmp, nix_list, next) { 514 struct nix *nix = roc_nix_to_nix_priv(roc_nix_tmp); 515 int i; 516 517 for (i = 0; i < NIX_MAX_CHAN; i++) { 518 if (nix->bpid[i] == bpid) 519 break; 520 } 521 522 if (i < NIX_MAX_CHAN) { 523 roc_nix_pre = roc_nix_tmp; 524 chan_pre = i; 525 break; 526 } 527 } 528 529 /* Alloc and configure a new BPID if Aura BPID is that of a NIX. */ 530 if (roc_nix_pre) { 531 if (roc_nix_bpids_alloc(roc_nix, ROC_NIX_INTF_TYPE_SSO, 1, bpid_new) <= 0) 532 return -ENOSPC; 533 534 if (roc_nix_chan_bpid_set(roc_nix_pre, chan_pre, *bpid_new, 1, false) < 0) 535 return -ENOSPC; 536 537 if (roc_nix_chan_bpid_set(roc_nix, chan, *bpid_new, 1, false) < 0) 538 return -ENOSPC; 539 540 return 0; 541 } else { 542 return roc_nix_chan_bpid_set(roc_nix, chan, bpid, 1, false); 543 } 544 545 return 0; 546 } 547 548 #define NIX_BPID_INVALID 0xFFFF 549 550 void 551 roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena, uint8_t force, 552 uint8_t tc, uint64_t drop_percent) 553 { 554 uint32_t aura_id = roc_npa_aura_handle_to_aura(pool_id); 555 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 556 struct npa_lf *lf = idev_npa_obj_get(); 557 struct npa_aura_attr *aura_attr; 558 uint8_t bp_thresh, bp_intf; 559 uint16_t bpid; 560 int i; 561 562 if (roc_nix_is_sdp(roc_nix)) 563 return; 564 565 if (!lf) 566 return; 567 568 aura_attr = &lf->aura_attr[aura_id]; 569 570 bp_intf = 1 << nix->is_nix1; 571 bp_thresh = NIX_RQ_AURA_BP_THRESH(drop_percent, aura_attr->limit, aura_attr->shift); 572 573 bpid = (aura_attr->bp_ena & 0x1) ? aura_attr->nix0_bpid : aura_attr->nix1_bpid; 574 /* BP is already enabled. */ 575 if (aura_attr->bp_ena && ena) { 576 if (bpid != nix->bpid[tc]) { 577 uint16_t bpid_new = NIX_BPID_INVALID; 578 579 if (force && !nix_rx_chan_multi_bpid_cfg(roc_nix, tc, bpid, &bpid_new)) { 580 plt_info("Setting up shared BPID on shared aura 0x%" PRIx64, 581 pool_id); 582 583 /* Configure Aura with new BPID if it is allocated. */ 584 if (roc_npa_aura_bp_configure(pool_id, bpid_new, bp_intf, bp_thresh, 585 true)) 586 plt_err("Enabling backpressue failed on aura 0x%" PRIx64, 587 pool_id); 588 } else { 589 aura_attr->ref_count++; 590 plt_info("Ignoring port=%u tc=%u config on shared aura 0x%" PRIx64, 591 roc_nix->port_id, tc, pool_id); 592 } 593 } else { 594 aura_attr->ref_count++; 595 } 596 597 return; 598 } 599 600 if (ena) { 601 if (roc_npa_aura_bp_configure(pool_id, nix->bpid[tc], bp_intf, bp_thresh, true)) 602 plt_err("Enabling backpressue failed on aura 0x%" PRIx64, pool_id); 603 else 604 aura_attr->ref_count++; 605 } else { 606 bool found = !!force; 607 608 /* Don't disable if existing BPID is not within this port's list */ 609 for (i = 0; i < nix->chan_cnt; i++) 610 if (bpid == nix->bpid[i]) 611 found = true; 612 if (!found) 613 return; 614 else if ((aura_attr->ref_count > 0) && --(aura_attr->ref_count)) 615 return; 616 617 if (roc_npa_aura_bp_configure(pool_id, 0, 0, 0, false)) 618 plt_err("Disabling backpressue failed on aura 0x%" PRIx64, pool_id); 619 } 620 621 return; 622 } 623 624 int 625 roc_nix_pfc_mode_set(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg) 626 { 627 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 628 struct dev *dev = &nix->dev; 629 struct mbox *mbox = mbox_get(dev->mbox); 630 uint8_t tx_pause, rx_pause; 631 struct cgx_pfc_cfg *req; 632 struct cgx_pfc_rsp *rsp; 633 int rc = -ENOSPC; 634 635 if (roc_nix_is_lbk(roc_nix)) { 636 rc = NIX_ERR_OP_NOTSUP; 637 goto exit; 638 } 639 640 rx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) || 641 (pfc_cfg->mode == ROC_NIX_FC_RX); 642 tx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) || 643 (pfc_cfg->mode == ROC_NIX_FC_TX); 644 645 req = mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(mbox); 646 if (req == NULL) 647 goto exit; 648 649 req->pfc_en = pfc_cfg->tc; 650 req->rx_pause = rx_pause; 651 req->tx_pause = tx_pause; 652 653 rc = mbox_process_msg(mbox, (void *)&rsp); 654 if (rc) 655 goto exit; 656 657 nix->pfc_rx_pause = rsp->rx_pause; 658 nix->pfc_tx_pause = rsp->tx_pause; 659 if (rsp->tx_pause) 660 nix->cev |= BIT(pfc_cfg->tc); 661 else 662 nix->cev &= ~BIT(pfc_cfg->tc); 663 664 exit: 665 mbox_put(mbox); 666 return rc; 667 } 668 669 int 670 roc_nix_pfc_mode_get(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg) 671 { 672 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 673 674 if (roc_nix_is_lbk(roc_nix)) 675 return NIX_ERR_OP_NOTSUP; 676 677 pfc_cfg->tc = nix->cev; 678 679 if (nix->pfc_rx_pause && nix->pfc_tx_pause) 680 pfc_cfg->mode = ROC_NIX_FC_FULL; 681 else if (nix->pfc_rx_pause) 682 pfc_cfg->mode = ROC_NIX_FC_RX; 683 else if (nix->pfc_tx_pause) 684 pfc_cfg->mode = ROC_NIX_FC_TX; 685 else 686 pfc_cfg->mode = ROC_NIX_FC_NONE; 687 688 return 0; 689 } 690 691 uint16_t 692 roc_nix_chan_count_get(struct roc_nix *roc_nix) 693 { 694 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 695 696 return nix->chan_cnt; 697 } 698 699 /* Allocate BPID for requested type 700 * Returns number of BPIDs allocated 701 * 0 if no BPIDs available 702 * -ve value on error 703 */ 704 int 705 roc_nix_bpids_alloc(struct roc_nix *roc_nix, uint8_t type, uint8_t bp_cnt, uint16_t *bpids) 706 { 707 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 708 struct mbox *mbox = mbox_get(nix->dev.mbox); 709 struct nix_alloc_bpid_req *req; 710 struct nix_bpids *rsp; 711 int rc = -EINVAL; 712 713 /* Use this api for unreserved interface types */ 714 if ((type < ROC_NIX_INTF_TYPE_RSVD) || (bp_cnt > ROC_NIX_MAX_BPID_CNT) || !bpids) 715 goto exit; 716 717 rc = -ENOSPC; 718 req = mbox_alloc_msg_nix_alloc_bpids(mbox); 719 if (req == NULL) 720 goto exit; 721 req->type = type; 722 req->bpid_cnt = bp_cnt; 723 724 rc = mbox_process_msg(mbox, (void *)&rsp); 725 if (rc) 726 goto exit; 727 728 for (rc = 0; rc < rsp->bpid_cnt; rc++) 729 bpids[rc] = rsp->bpids[rc]; 730 exit: 731 mbox_put(mbox); 732 return rc; 733 } 734 735 int 736 roc_nix_bpids_free(struct roc_nix *roc_nix, uint8_t bp_cnt, uint16_t *bpids) 737 { 738 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 739 struct mbox *mbox = mbox_get(nix->dev.mbox); 740 struct nix_bpids *req; 741 int rc = -EINVAL; 742 743 /* Use this api for unreserved interface types */ 744 if ((bp_cnt > ROC_NIX_MAX_BPID_CNT) || !bpids) 745 goto exit; 746 747 rc = -ENOSPC; 748 req = mbox_alloc_msg_nix_free_bpids(mbox); 749 if (req == NULL) 750 goto exit; 751 for (rc = 0; rc < bp_cnt; rc++) 752 req->bpids[rc] = bpids[rc]; 753 req->bpid_cnt = rc; 754 755 rc = mbox_process(mbox); 756 exit: 757 mbox_put(mbox); 758 return rc; 759 } 760 761 int 762 roc_nix_rx_chan_cfg_get(struct roc_nix *roc_nix, uint16_t chan, bool is_cpt, uint64_t *cfg) 763 { 764 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 765 struct mbox *mbox = mbox_get(nix->dev.mbox); 766 struct nix_rx_chan_cfg *req; 767 struct nix_rx_chan_cfg *rsp; 768 int rc = -EINVAL; 769 770 req = mbox_alloc_msg_nix_rx_chan_cfg(mbox); 771 if (req == NULL) 772 goto exit; 773 if (is_cpt) 774 req->type = ROC_NIX_INTF_TYPE_CPT; 775 req->chan = chan; 776 req->read = 1; 777 778 rc = mbox_process_msg(mbox, (void *)&rsp); 779 if (rc) 780 goto exit; 781 *cfg = rsp->val; 782 exit: 783 mbox_put(mbox); 784 return rc; 785 } 786 787 int 788 roc_nix_rx_chan_cfg_set(struct roc_nix *roc_nix, uint16_t chan, bool is_cpt, uint64_t val) 789 { 790 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 791 struct mbox *mbox = mbox_get(nix->dev.mbox); 792 struct nix_rx_chan_cfg *req; 793 int rc = -EINVAL; 794 795 req = mbox_alloc_msg_nix_rx_chan_cfg(mbox); 796 if (req == NULL) 797 goto exit; 798 if (is_cpt) 799 req->type = ROC_NIX_INTF_TYPE_CPT; 800 req->chan = chan; 801 req->val = val; 802 req->read = 0; 803 804 rc = mbox_process(mbox); 805 exit: 806 mbox_put(mbox); 807 return rc; 808 } 809 810 #define NIX_BPID1_ENA 15 811 #define NIX_BPID2_ENA 14 812 #define NIX_BPID3_ENA 13 813 814 #define NIX_BPID1_OFF 20 815 #define NIX_BPID2_OFF 32 816 #define NIX_BPID3_OFF 44 817 818 int 819 roc_nix_chan_bpid_set(struct roc_nix *roc_nix, uint16_t chan, uint64_t bpid, int ena, bool cpt_chan) 820 { 821 uint64_t cfg; 822 int rc; 823 824 if (!roc_feature_nix_has_rxchan_multi_bpid()) 825 return -ENOTSUP; 826 827 rc = roc_nix_rx_chan_cfg_get(roc_nix, chan, cpt_chan, &cfg); 828 if (rc) 829 return rc; 830 831 if (ena) { 832 if ((((cfg >> NIX_BPID1_OFF) & GENMASK_ULL(8, 0)) == bpid) || 833 (((cfg >> NIX_BPID2_OFF) & GENMASK_ULL(8, 0)) == bpid) || 834 (((cfg >> NIX_BPID3_OFF) & GENMASK_ULL(8, 0)) == bpid)) 835 return 0; 836 837 if (!(cfg & BIT_ULL(NIX_BPID1_ENA))) { 838 cfg &= ~GENMASK_ULL(NIX_BPID1_OFF + 8, NIX_BPID1_OFF); 839 cfg |= (((uint64_t)bpid << NIX_BPID1_OFF) | BIT_ULL(NIX_BPID1_ENA)); 840 } else if (!(cfg & BIT_ULL(NIX_BPID2_ENA))) { 841 cfg &= ~GENMASK_ULL(NIX_BPID2_OFF + 8, NIX_BPID2_OFF); 842 cfg |= (((uint64_t)bpid << NIX_BPID2_OFF) | BIT_ULL(NIX_BPID2_ENA)); 843 } else if (!(cfg & BIT_ULL(NIX_BPID3_ENA))) { 844 cfg &= ~GENMASK_ULL(NIX_BPID3_OFF + 8, NIX_BPID3_OFF); 845 cfg |= (((uint64_t)bpid << NIX_BPID3_OFF) | BIT_ULL(NIX_BPID3_ENA)); 846 } else { 847 plt_nix_dbg("Exceed maximum BPIDs"); 848 return -ENOSPC; 849 } 850 } else { 851 if (((cfg >> NIX_BPID1_OFF) & GENMASK_ULL(8, 0)) == bpid) { 852 cfg &= ~(GENMASK_ULL(NIX_BPID1_OFF + 8, NIX_BPID1_OFF) | 853 BIT_ULL(NIX_BPID1_ENA)); 854 } else if (((cfg >> NIX_BPID2_OFF) & GENMASK_ULL(8, 0)) == bpid) { 855 cfg &= ~(GENMASK_ULL(NIX_BPID2_OFF + 8, NIX_BPID2_OFF) | 856 BIT_ULL(NIX_BPID2_ENA)); 857 } else if (((cfg >> NIX_BPID3_OFF) & GENMASK_ULL(8, 0)) == bpid) { 858 cfg &= ~(GENMASK_ULL(NIX_BPID3_OFF + 8, NIX_BPID3_OFF) | 859 BIT_ULL(NIX_BPID3_ENA)); 860 } else { 861 plt_nix_dbg("BPID not found"); 862 return -EINVAL; 863 } 864 } 865 return roc_nix_rx_chan_cfg_set(roc_nix, chan, cpt_chan, cfg); 866 } 867