1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2021 Marvell. 3 */ 4 5 #include "roc_api.h" 6 #include "roc_priv.h" 7 8 static inline uint64_t 9 nix_tm_shaper2regval(struct nix_tm_shaper_data *shaper) 10 { 11 uint64_t regval; 12 13 if (roc_model_is_cn9k()) { 14 regval = (shaper->burst_exponent << 37); 15 regval |= (shaper->burst_mantissa << 29); 16 regval |= (shaper->div_exp << 13); 17 regval |= (shaper->exponent << 9); 18 regval |= (shaper->mantissa << 1); 19 return regval; 20 } 21 22 regval = (shaper->burst_exponent << 44); 23 regval |= (shaper->burst_mantissa << 29); 24 regval |= (shaper->div_exp << 13); 25 regval |= (shaper->exponent << 9); 26 regval |= (shaper->mantissa << 1); 27 return regval; 28 } 29 30 uint16_t 31 nix_tm_lvl2nix_tl1_root(uint32_t lvl) 32 { 33 switch (lvl) { 34 case ROC_TM_LVL_ROOT: 35 return NIX_TXSCH_LVL_TL1; 36 case ROC_TM_LVL_SCH1: 37 return NIX_TXSCH_LVL_TL2; 38 case ROC_TM_LVL_SCH2: 39 return NIX_TXSCH_LVL_TL3; 40 case ROC_TM_LVL_SCH3: 41 return NIX_TXSCH_LVL_TL4; 42 case ROC_TM_LVL_SCH4: 43 return NIX_TXSCH_LVL_SMQ; 44 default: 45 return NIX_TXSCH_LVL_CNT; 46 } 47 } 48 49 uint16_t 50 nix_tm_lvl2nix_tl2_root(uint32_t lvl) 51 { 52 switch (lvl) { 53 case ROC_TM_LVL_ROOT: 54 return NIX_TXSCH_LVL_TL2; 55 case ROC_TM_LVL_SCH1: 56 return NIX_TXSCH_LVL_TL3; 57 case ROC_TM_LVL_SCH2: 58 return NIX_TXSCH_LVL_TL4; 59 case ROC_TM_LVL_SCH3: 60 return NIX_TXSCH_LVL_SMQ; 61 default: 62 return NIX_TXSCH_LVL_CNT; 63 } 64 } 65 66 uint16_t 67 nix_tm_lvl2nix(struct nix *nix, uint32_t lvl) 68 { 69 if (nix_tm_have_tl1_access(nix)) 70 return nix_tm_lvl2nix_tl1_root(lvl); 71 else 72 return nix_tm_lvl2nix_tl2_root(lvl); 73 } 74 75 uint8_t 76 nix_tm_lbk_relchan_get(struct nix *nix) 77 { 78 return nix->tx_chan_base & 0xff; 79 } 80 81 static int 82 nix_tm_find_prio_anchor(struct nix *nix, uint32_t node_id, 83 enum roc_nix_tm_tree tree) 84 { 85 struct nix_tm_node *child_node; 86 struct nix_tm_node_list *list; 87 88 list = nix_tm_node_list(nix, tree); 89 90 TAILQ_FOREACH(child_node, list, node) { 91 if (!child_node->parent) 92 continue; 93 if (!(child_node->parent->id == node_id)) 94 continue; 95 if (child_node->priority == child_node->parent->rr_prio) 96 continue; 97 return child_node->hw_id - child_node->priority; 98 } 99 return 0; 100 } 101 102 struct nix_tm_shaper_profile * 103 nix_tm_shaper_profile_search(struct nix *nix, uint32_t id) 104 { 105 struct nix_tm_shaper_profile *profile; 106 107 TAILQ_FOREACH(profile, &nix->shaper_profile_list, shaper) { 108 if (profile->id == id) 109 return profile; 110 } 111 return NULL; 112 } 113 114 struct nix_tm_node * 115 nix_tm_node_search(struct nix *nix, uint32_t node_id, enum roc_nix_tm_tree tree) 116 { 117 struct nix_tm_node_list *list; 118 struct nix_tm_node *node; 119 120 list = nix_tm_node_list(nix, tree); 121 TAILQ_FOREACH(node, list, node) { 122 if (node->id == node_id) 123 return node; 124 } 125 return NULL; 126 } 127 128 static uint64_t 129 nix_tm_shaper_rate_conv_floor(uint64_t value, uint64_t *exponent_p, 130 uint64_t *mantissa_p, uint64_t *div_exp_p) 131 { 132 uint64_t div_exp, exponent, mantissa; 133 134 /* Boundary checks */ 135 if (value < NIX_TM_MIN_SHAPER_RATE || value > NIX_TM_MAX_SHAPER_RATE) 136 return 0; 137 138 if (value <= NIX_TM_SHAPER_RATE(0, 0, 0)) { 139 /* Calculate rate div_exp and mantissa using 140 * the following formula: 141 * 142 * value = (2E6 * (256 + mantissa) 143 * / ((1 << div_exp) * 256)) 144 */ 145 div_exp = 0; 146 exponent = 0; 147 mantissa = NIX_TM_MAX_RATE_MANTISSA; 148 149 while (value <= (NIX_TM_SHAPER_RATE_CONST / (1 << div_exp))) 150 div_exp += 1; 151 152 while (value <= ((NIX_TM_SHAPER_RATE_CONST * (256 + mantissa)) / 153 ((1 << div_exp) * 256))) 154 mantissa -= 1; 155 } else { 156 /* Calculate rate exponent and mantissa using 157 * the following formula: 158 * 159 * value = (2E6 * ((256 + mantissa) << exponent)) / 256 160 * 161 */ 162 div_exp = 0; 163 exponent = NIX_TM_MAX_RATE_EXPONENT; 164 mantissa = NIX_TM_MAX_RATE_MANTISSA; 165 166 while (value <= (NIX_TM_SHAPER_RATE_CONST * (1 << exponent))) 167 exponent -= 1; 168 169 while (value <= ((NIX_TM_SHAPER_RATE_CONST * 170 ((256 + mantissa) << exponent)) / 171 256)) 172 mantissa -= 1; 173 } 174 175 if (div_exp > NIX_TM_MAX_RATE_DIV_EXP || 176 exponent > NIX_TM_MAX_RATE_EXPONENT || 177 mantissa > NIX_TM_MAX_RATE_MANTISSA) 178 return 0; 179 180 if (div_exp_p) 181 *div_exp_p = div_exp; 182 if (exponent_p) 183 *exponent_p = exponent; 184 if (mantissa_p) 185 *mantissa_p = mantissa; 186 187 /* Calculate real rate value */ 188 return NIX_TM_SHAPER_RATE(exponent, mantissa, div_exp); 189 } 190 191 static uint64_t 192 nix_tm_shaper_rate_conv_exact(uint64_t value, uint64_t *exponent_p, 193 uint64_t *mantissa_p, uint64_t *div_exp_p) 194 { 195 uint64_t div_exp, exponent, mantissa; 196 197 /* Boundary checks */ 198 if (value < NIX_TM_MIN_SHAPER_RATE || value > NIX_TM_MAX_SHAPER_RATE) 199 return 0; 200 201 if (value <= NIX_TM_SHAPER_RATE(0, 0, 0)) { 202 /* Calculate rate div_exp and mantissa using 203 * the following formula: 204 * 205 * value = (2E6 * (256 + mantissa) 206 * / ((1 << div_exp) * 256)) 207 */ 208 div_exp = 0; 209 exponent = 0; 210 mantissa = NIX_TM_MAX_RATE_MANTISSA; 211 212 while (value < (NIX_TM_SHAPER_RATE_CONST / (1 << div_exp))) 213 div_exp += 1; 214 215 while (value < ((NIX_TM_SHAPER_RATE_CONST * (256 + mantissa)) / 216 ((1 << div_exp) * 256))) 217 mantissa -= 1; 218 } else { 219 /* Calculate rate exponent and mantissa using 220 * the following formula: 221 * 222 * value = (2E6 * ((256 + mantissa) << exponent)) / 256 223 * 224 */ 225 div_exp = 0; 226 exponent = NIX_TM_MAX_RATE_EXPONENT; 227 mantissa = NIX_TM_MAX_RATE_MANTISSA; 228 229 while (value < (NIX_TM_SHAPER_RATE_CONST * (1 << exponent))) 230 exponent -= 1; 231 232 while (value < ((NIX_TM_SHAPER_RATE_CONST * 233 ((256 + mantissa) << exponent)) / 234 256)) 235 mantissa -= 1; 236 } 237 238 if (div_exp > NIX_TM_MAX_RATE_DIV_EXP || 239 exponent > NIX_TM_MAX_RATE_EXPONENT || 240 mantissa > NIX_TM_MAX_RATE_MANTISSA) 241 return 0; 242 243 if (div_exp_p) 244 *div_exp_p = div_exp; 245 if (exponent_p) 246 *exponent_p = exponent; 247 if (mantissa_p) 248 *mantissa_p = mantissa; 249 250 /* Calculate real rate value */ 251 return NIX_TM_SHAPER_RATE(exponent, mantissa, div_exp); 252 } 253 254 /* With zero accuracy we will tune parameters as defined by HW, 255 * non zero accuracy will keep the parameters close to lower values 256 * and make sure long-term shaper rate will not exceed the requested rate. 257 */ 258 uint64_t 259 nix_tm_shaper_rate_conv(uint64_t value, uint64_t *exponent_p, 260 uint64_t *mantissa_p, uint64_t *div_exp_p, 261 int8_t accuracy) 262 { 263 if (!accuracy) 264 return nix_tm_shaper_rate_conv_exact(value, exponent_p, 265 mantissa_p, div_exp_p); 266 267 return nix_tm_shaper_rate_conv_floor(value, exponent_p, mantissa_p, 268 div_exp_p); 269 } 270 271 uint64_t 272 nix_tm_shaper_burst_conv(uint64_t value, uint64_t *exponent_p, 273 uint64_t *mantissa_p) 274 { 275 uint64_t min_burst, max_burst; 276 uint64_t exponent, mantissa; 277 uint32_t max_mantissa; 278 279 min_burst = NIX_TM_MIN_SHAPER_BURST; 280 max_burst = roc_nix_tm_max_shaper_burst_get(); 281 282 if (value < min_burst || value > max_burst) 283 return 0; 284 285 max_mantissa = (roc_model_is_cn9k() ? NIX_CN9K_TM_MAX_BURST_MANTISSA : 286 NIX_TM_MAX_BURST_MANTISSA); 287 /* Calculate burst exponent and mantissa using 288 * the following formula: 289 * 290 * value = (((256 + mantissa) << (exponent + 1) / 256) 291 * 292 */ 293 exponent = NIX_TM_MAX_BURST_EXPONENT; 294 mantissa = max_mantissa; 295 296 while (value < (1ull << (exponent + 1))) 297 exponent -= 1; 298 299 while (value < ((256 + mantissa) << (exponent + 1)) / 256) 300 mantissa -= 1; 301 302 if (exponent > NIX_TM_MAX_BURST_EXPONENT || mantissa > max_mantissa) 303 return 0; 304 305 if (exponent_p) 306 *exponent_p = exponent; 307 if (mantissa_p) 308 *mantissa_p = mantissa; 309 310 return NIX_TM_SHAPER_BURST(exponent, mantissa); 311 } 312 313 static void 314 nix_tm_shaper_conf_get(struct nix_tm_shaper_profile *profile, 315 struct nix_tm_shaper_data *cir, 316 struct nix_tm_shaper_data *pir) 317 { 318 memset(cir, 0, sizeof(*cir)); 319 memset(pir, 0, sizeof(*pir)); 320 321 if (!profile) 322 return; 323 324 /* Calculate CIR exponent and mantissa */ 325 if (profile->commit.rate) 326 cir->rate = nix_tm_shaper_rate_conv( 327 profile->commit.rate, &cir->exponent, &cir->mantissa, 328 &cir->div_exp, profile->accuracy); 329 330 /* Calculate PIR exponent and mantissa */ 331 if (profile->peak.rate) 332 pir->rate = nix_tm_shaper_rate_conv( 333 profile->peak.rate, &pir->exponent, &pir->mantissa, 334 &pir->div_exp, profile->accuracy); 335 336 /* Calculate CIR burst exponent and mantissa */ 337 if (profile->commit.size) 338 cir->burst = nix_tm_shaper_burst_conv(profile->commit.size, 339 &cir->burst_exponent, 340 &cir->burst_mantissa); 341 342 /* Calculate PIR burst exponent and mantissa */ 343 if (profile->peak.size) 344 pir->burst = nix_tm_shaper_burst_conv(profile->peak.size, 345 &pir->burst_exponent, 346 &pir->burst_mantissa); 347 } 348 349 uint32_t 350 nix_tm_check_rr(struct nix *nix, uint32_t parent_id, enum roc_nix_tm_tree tree, 351 uint32_t *rr_prio, uint32_t *max_prio) 352 { 353 uint32_t node_cnt[NIX_TM_TLX_SP_PRIO_MAX]; 354 struct nix_tm_node_list *list; 355 struct nix_tm_node *node; 356 uint32_t rr_num = 0, i; 357 uint32_t children = 0; 358 uint32_t priority; 359 360 memset(node_cnt, 0, sizeof(node_cnt)); 361 *rr_prio = 0xF; 362 *max_prio = UINT32_MAX; 363 364 list = nix_tm_node_list(nix, tree); 365 TAILQ_FOREACH(node, list, node) { 366 if (!node->parent) 367 continue; 368 369 if (!(node->parent->id == parent_id)) 370 continue; 371 372 priority = node->priority; 373 node_cnt[priority]++; 374 children++; 375 } 376 377 for (i = 0; i < NIX_TM_TLX_SP_PRIO_MAX; i++) { 378 if (!node_cnt[i]) 379 break; 380 381 if (node_cnt[i] > rr_num) { 382 *rr_prio = i; 383 rr_num = node_cnt[i]; 384 } 385 } 386 387 /* RR group of single RR child is considered as SP */ 388 if (rr_num == 1) { 389 *rr_prio = 0xF; 390 rr_num = 0; 391 } 392 393 /* Max prio will be returned only when we have non zero prio 394 * or if a parent has single child. 395 */ 396 if (i > 1 || (children == 1)) 397 *max_prio = i - 1; 398 return rr_num; 399 } 400 401 static uint16_t 402 nix_tm_max_prio(struct nix *nix, uint16_t hw_lvl) 403 { 404 if (hw_lvl >= NIX_TXSCH_LVL_CNT) 405 return 0; 406 407 /* MDQ does not support SP */ 408 if (hw_lvl == NIX_TXSCH_LVL_MDQ) 409 return 0; 410 411 /* PF's TL1 with VF's enabled does not support SP */ 412 if (hw_lvl == NIX_TXSCH_LVL_TL1 && (!nix_tm_have_tl1_access(nix) || 413 (nix->tm_flags & NIX_TM_TL1_NO_SP))) 414 return 0; 415 416 return NIX_TM_TLX_SP_PRIO_MAX - 1; 417 } 418 419 int 420 nix_tm_validate_prio(struct nix *nix, uint32_t lvl, uint32_t parent_id, 421 uint32_t priority, enum roc_nix_tm_tree tree) 422 { 423 uint8_t priorities[NIX_TM_TLX_SP_PRIO_MAX]; 424 struct nix_tm_node_list *list; 425 struct nix_tm_node *node; 426 uint32_t rr_num = 0; 427 int i; 428 429 list = nix_tm_node_list(nix, tree); 430 /* Validate priority against max */ 431 if (priority > nix_tm_max_prio(nix, nix_tm_lvl2nix(nix, lvl - 1))) 432 return NIX_ERR_TM_PRIO_EXCEEDED; 433 434 if (parent_id == ROC_NIX_TM_NODE_ID_INVALID) 435 return 0; 436 437 memset(priorities, 0, sizeof(priorities)); 438 priorities[priority] = 1; 439 440 TAILQ_FOREACH(node, list, node) { 441 if (!node->parent) 442 continue; 443 444 if (node->parent->id != parent_id) 445 continue; 446 447 priorities[node->priority]++; 448 } 449 450 for (i = 0; i < NIX_TM_TLX_SP_PRIO_MAX; i++) 451 if (priorities[i] > 1) 452 rr_num++; 453 454 /* At max, one rr groups per parent */ 455 if (rr_num > 1) 456 return NIX_ERR_TM_MULTIPLE_RR_GROUPS; 457 458 /* Check for previous priority to avoid holes in priorities */ 459 if (priority && !priorities[priority - 1]) 460 return NIX_ERR_TM_PRIO_ORDER; 461 462 return 0; 463 } 464 465 bool 466 nix_tm_child_res_valid(struct nix_tm_node_list *list, 467 struct nix_tm_node *parent) 468 { 469 struct nix_tm_node *child; 470 471 TAILQ_FOREACH(child, list, node) { 472 if (child->parent != parent) 473 continue; 474 if (!(child->flags & NIX_TM_NODE_HWRES)) 475 return false; 476 } 477 return true; 478 } 479 480 uint8_t 481 nix_tm_tl1_default_prep(struct nix *nix, uint32_t schq, volatile uint64_t *reg, 482 volatile uint64_t *regval) 483 { 484 uint8_t k = 0; 485 486 /* 487 * Default config for TL1. 488 * For VF this is always ignored. 489 */ 490 plt_tm_dbg("Default config for main root %s(%u)", 491 nix_tm_hwlvl2str(NIX_TXSCH_LVL_TL1), schq); 492 493 /* Set DWRR quantum */ 494 reg[k] = NIX_AF_TL1X_SCHEDULE(schq); 495 regval[k] = NIX_TM_TL1_DFLT_RR_QTM; 496 k++; 497 498 reg[k] = NIX_AF_TL1X_TOPOLOGY(schq); 499 regval[k] = (nix->tm_aggr_lvl_rr_prio << 1); 500 k++; 501 502 reg[k] = NIX_AF_TL1X_CIR(schq); 503 regval[k] = 0; 504 k++; 505 506 return k; 507 } 508 509 uint8_t 510 nix_tm_topology_reg_prep(struct nix *nix, struct nix_tm_node *node, 511 volatile uint64_t *reg, volatile uint64_t *regval, 512 volatile uint64_t *regval_mask) 513 { 514 struct roc_nix *roc_nix = nix_priv_to_roc_nix(nix); 515 uint8_t k = 0, hw_lvl, parent_lvl; 516 uint64_t parent = 0, child = 0; 517 enum roc_nix_tm_tree tree; 518 uint32_t rr_prio, schq; 519 uint16_t link, relchan; 520 521 tree = node->tree; 522 schq = node->hw_id; 523 hw_lvl = node->hw_lvl; 524 parent_lvl = hw_lvl + 1; 525 rr_prio = node->rr_prio; 526 527 /* Root node will not have a parent node */ 528 if (hw_lvl == nix->tm_root_lvl) 529 parent = node->parent_hw_id; 530 else 531 parent = node->parent->hw_id; 532 533 link = nix->tx_link; 534 relchan = roc_nix_is_lbk(roc_nix) ? nix_tm_lbk_relchan_get(nix) : 0; 535 536 if (hw_lvl != NIX_TXSCH_LVL_SMQ) 537 child = nix_tm_find_prio_anchor(nix, node->id, tree); 538 539 /* Override default rr_prio when TL1 540 * Static Priority is disabled 541 */ 542 if (hw_lvl == NIX_TXSCH_LVL_TL1 && nix->tm_flags & NIX_TM_TL1_NO_SP) { 543 rr_prio = nix->tm_aggr_lvl_rr_prio; 544 child = 0; 545 } 546 547 plt_tm_dbg("Topology config node %s(%u)->%s(%" PRIu64 ") lvl %u, id %u" 548 " prio_anchor %" PRIu64 " rr_prio %u (%p)", 549 nix_tm_hwlvl2str(hw_lvl), schq, nix_tm_hwlvl2str(parent_lvl), 550 parent, node->lvl, node->id, child, rr_prio, node); 551 552 /* Prepare Topology and Link config */ 553 switch (hw_lvl) { 554 case NIX_TXSCH_LVL_SMQ: 555 556 /* Set xoff which will be cleared later */ 557 reg[k] = NIX_AF_SMQX_CFG(schq); 558 regval[k] = (BIT_ULL(50) | NIX_MIN_HW_FRS | 559 ((nix->mtu & 0xFFFF) << 8)); 560 /* Maximum Vtag insertion size as a multiple of four bytes */ 561 if (roc_nix->hw_vlan_ins) 562 regval[k] |= (0x2ULL << 36); 563 regval_mask[k] = ~(BIT_ULL(50) | GENMASK_ULL(6, 0) | 564 GENMASK_ULL(23, 8) | GENMASK_ULL(38, 36)); 565 k++; 566 567 /* Parent and schedule conf */ 568 reg[k] = NIX_AF_MDQX_PARENT(schq); 569 regval[k] = parent << 16; 570 k++; 571 572 break; 573 case NIX_TXSCH_LVL_TL4: 574 /* Parent and schedule conf */ 575 reg[k] = NIX_AF_TL4X_PARENT(schq); 576 regval[k] = parent << 16; 577 k++; 578 579 reg[k] = NIX_AF_TL4X_TOPOLOGY(schq); 580 regval[k] = (child << 32) | (rr_prio << 1); 581 k++; 582 583 /* Configure TL4 to send to SDP channel instead of CGX/LBK */ 584 if (nix->sdp_link) { 585 relchan = nix->tx_chan_base & 0xff; 586 plt_tm_dbg("relchan=%u schq=%u tx_chan_cnt=%u", relchan, schq, 587 nix->tx_chan_cnt); 588 reg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq); 589 regval[k] = BIT_ULL(12); 590 regval[k] |= BIT_ULL(13); 591 regval[k] |= (uint64_t)relchan; 592 k++; 593 } 594 break; 595 case NIX_TXSCH_LVL_TL3: 596 /* Parent and schedule conf */ 597 reg[k] = NIX_AF_TL3X_PARENT(schq); 598 regval[k] = parent << 16; 599 k++; 600 601 reg[k] = NIX_AF_TL3X_TOPOLOGY(schq); 602 regval[k] = (child << 32) | (rr_prio << 1); 603 k++; 604 605 /* Link configuration */ 606 if (!nix->sdp_link && 607 nix->tm_link_cfg_lvl == NIX_TXSCH_LVL_TL3) { 608 reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, link); 609 regval[k] = BIT_ULL(12) | (uint64_t)relchan; 610 k++; 611 } 612 613 break; 614 case NIX_TXSCH_LVL_TL2: 615 /* Parent and schedule conf */ 616 reg[k] = NIX_AF_TL2X_PARENT(schq); 617 regval[k] = parent << 16; 618 k++; 619 620 reg[k] = NIX_AF_TL2X_TOPOLOGY(schq); 621 regval[k] = (child << 32) | (rr_prio << 1); 622 k++; 623 624 /* Link configuration */ 625 if (!nix->sdp_link && 626 nix->tm_link_cfg_lvl == NIX_TXSCH_LVL_TL2) { 627 reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, link); 628 regval[k] = BIT_ULL(12) | (uint64_t)relchan; 629 k++; 630 } 631 632 break; 633 case NIX_TXSCH_LVL_TL1: 634 reg[k] = NIX_AF_TL1X_TOPOLOGY(schq); 635 regval[k] = (child << 32) | (rr_prio << 1 /*RR_PRIO*/); 636 k++; 637 638 break; 639 } 640 641 return k; 642 } 643 644 static inline int 645 nix_tm_default_rr_weight(struct nix *nix) 646 { 647 struct roc_nix *roc_nix = nix_priv_to_roc_nix(nix); 648 uint32_t max_pktlen = roc_nix_max_pkt_len(roc_nix); 649 uint32_t weight; 650 651 /* Reduce TX VTAG Insertions */ 652 max_pktlen -= 8; 653 weight = max_pktlen / roc_nix->dwrr_mtu; 654 if (max_pktlen % roc_nix->dwrr_mtu) 655 weight += 1; 656 657 return weight; 658 } 659 660 uint8_t 661 nix_tm_sched_reg_prep(struct nix *nix, struct nix_tm_node *node, volatile uint64_t *reg, 662 volatile uint64_t *regval) 663 { 664 uint64_t strict_prio = node->priority; 665 uint32_t hw_lvl = node->hw_lvl; 666 uint32_t schq = node->hw_id; 667 uint64_t rr_quantum; 668 uint8_t k = 0; 669 670 /* If minimum weight not provided, then by default RR_QUANTUM 671 * should be in sync with kernel, i.e., single MTU value 672 */ 673 if (!node->weight) 674 rr_quantum = nix_tm_default_rr_weight(nix); 675 else 676 /* For CN9K, weight needs to be converted to quantum */ 677 rr_quantum = nix_tm_weight_to_rr_quantum(node->weight); 678 679 /* For children to root, strict prio is default if either 680 * device root is TL2 or TL1 Static Priority is disabled. 681 */ 682 if (hw_lvl == NIX_TXSCH_LVL_TL2 && 683 (!nix_tm_have_tl1_access(nix) || nix->tm_flags & NIX_TM_TL1_NO_SP)) 684 strict_prio = nix->tm_aggr_lvl_rr_prio; 685 686 plt_tm_dbg("Schedule config node %s(%u) lvl %u id %u, " 687 "prio 0x%" PRIx64 ", rr_quantum/rr_wt 0x%" PRIx64 " (%p)", 688 nix_tm_hwlvl2str(node->hw_lvl), schq, node->lvl, node->id, strict_prio, 689 rr_quantum, node); 690 691 switch (hw_lvl) { 692 case NIX_TXSCH_LVL_SMQ: 693 reg[k] = NIX_AF_MDQX_SCHEDULE(schq); 694 regval[k] = (strict_prio << 24) | rr_quantum; 695 k++; 696 697 break; 698 case NIX_TXSCH_LVL_TL4: 699 reg[k] = NIX_AF_TL4X_SCHEDULE(schq); 700 regval[k] = (strict_prio << 24) | rr_quantum; 701 k++; 702 703 break; 704 case NIX_TXSCH_LVL_TL3: 705 reg[k] = NIX_AF_TL3X_SCHEDULE(schq); 706 regval[k] = (strict_prio << 24) | rr_quantum; 707 k++; 708 709 break; 710 case NIX_TXSCH_LVL_TL2: 711 reg[k] = NIX_AF_TL2X_SCHEDULE(schq); 712 regval[k] = (strict_prio << 24) | rr_quantum; 713 k++; 714 715 break; 716 case NIX_TXSCH_LVL_TL1: 717 reg[k] = NIX_AF_TL1X_SCHEDULE(schq); 718 regval[k] = rr_quantum; 719 k++; 720 721 break; 722 } 723 724 return k; 725 } 726 727 uint8_t 728 nix_tm_shaper_reg_prep(struct nix_tm_node *node, 729 struct nix_tm_shaper_profile *profile, 730 volatile uint64_t *reg, volatile uint64_t *regval) 731 { 732 struct nix_tm_shaper_data cir, pir; 733 uint32_t schq = node->hw_id; 734 uint64_t adjust = 0; 735 uint8_t k = 0; 736 737 nix_tm_shaper_conf_get(profile, &cir, &pir); 738 739 if (profile && node->pkt_mode) 740 adjust = profile->pkt_mode_adj; 741 else if (profile) 742 adjust = profile->pkt_len_adj; 743 744 adjust &= 0x1FF; 745 plt_tm_dbg("Shaper config node %s(%u) lvl %u id %u, " 746 "pir %" PRIu64 "(%" PRIu64 "B)," 747 " cir %" PRIu64 "(%" PRIu64 "B)" 748 "adjust 0x%" PRIx64 "(pktmode %u) (%p)", 749 nix_tm_hwlvl2str(node->hw_lvl), schq, node->lvl, node->id, 750 pir.rate, pir.burst, cir.rate, cir.burst, adjust, 751 node->pkt_mode, node); 752 753 switch (node->hw_lvl) { 754 case NIX_TXSCH_LVL_SMQ: 755 /* Configure PIR, CIR */ 756 reg[k] = NIX_AF_MDQX_PIR(schq); 757 regval[k] = (pir.rate && pir.burst) ? 758 (nix_tm_shaper2regval(&pir) | 1) : 759 0; 760 k++; 761 762 reg[k] = NIX_AF_MDQX_CIR(schq); 763 regval[k] = (cir.rate && cir.burst) ? 764 (nix_tm_shaper2regval(&cir) | 1) : 765 0; 766 k++; 767 768 /* Configure RED ALG */ 769 reg[k] = NIX_AF_MDQX_SHAPE(schq); 770 regval[k] = (adjust | (uint64_t)node->red_algo << 9 | 771 (uint64_t)node->pkt_mode << 24); 772 k++; 773 break; 774 case NIX_TXSCH_LVL_TL4: 775 /* Configure PIR, CIR */ 776 reg[k] = NIX_AF_TL4X_PIR(schq); 777 regval[k] = (pir.rate && pir.burst) ? 778 (nix_tm_shaper2regval(&pir) | 1) : 779 0; 780 k++; 781 782 reg[k] = NIX_AF_TL4X_CIR(schq); 783 regval[k] = (cir.rate && cir.burst) ? 784 (nix_tm_shaper2regval(&cir) | 1) : 785 0; 786 k++; 787 788 /* Configure RED algo */ 789 reg[k] = NIX_AF_TL4X_SHAPE(schq); 790 regval[k] = (adjust | (uint64_t)node->red_algo << 9 | 791 (uint64_t)node->pkt_mode << 24); 792 k++; 793 break; 794 case NIX_TXSCH_LVL_TL3: 795 /* Configure PIR, CIR */ 796 reg[k] = NIX_AF_TL3X_PIR(schq); 797 regval[k] = (pir.rate && pir.burst) ? 798 (nix_tm_shaper2regval(&pir) | 1) : 799 0; 800 k++; 801 802 reg[k] = NIX_AF_TL3X_CIR(schq); 803 regval[k] = (cir.rate && cir.burst) ? 804 (nix_tm_shaper2regval(&cir) | 1) : 805 0; 806 k++; 807 808 /* Configure RED algo */ 809 reg[k] = NIX_AF_TL3X_SHAPE(schq); 810 regval[k] = (adjust | (uint64_t)node->red_algo << 9 | 811 (uint64_t)node->pkt_mode << 24); 812 k++; 813 814 break; 815 case NIX_TXSCH_LVL_TL2: 816 /* Configure PIR, CIR */ 817 reg[k] = NIX_AF_TL2X_PIR(schq); 818 regval[k] = (pir.rate && pir.burst) ? 819 (nix_tm_shaper2regval(&pir) | 1) : 820 0; 821 k++; 822 823 reg[k] = NIX_AF_TL2X_CIR(schq); 824 regval[k] = (cir.rate && cir.burst) ? 825 (nix_tm_shaper2regval(&cir) | 1) : 826 0; 827 k++; 828 829 /* Configure RED algo */ 830 reg[k] = NIX_AF_TL2X_SHAPE(schq); 831 regval[k] = (adjust | (uint64_t)node->red_algo << 9 | 832 (uint64_t)node->pkt_mode << 24); 833 k++; 834 835 break; 836 case NIX_TXSCH_LVL_TL1: 837 /* Configure CIR */ 838 reg[k] = NIX_AF_TL1X_CIR(schq); 839 regval[k] = (cir.rate && cir.burst) ? 840 (nix_tm_shaper2regval(&cir) | 1) : 841 0; 842 k++; 843 844 /* Configure length disable and adjust */ 845 reg[k] = NIX_AF_TL1X_SHAPE(schq); 846 regval[k] = (adjust | (uint64_t)node->pkt_mode << 24); 847 k++; 848 break; 849 } 850 851 return k; 852 } 853 854 uint8_t 855 nix_tm_sw_xoff_prep(struct nix_tm_node *node, bool enable, 856 volatile uint64_t *reg, volatile uint64_t *regval) 857 { 858 uint32_t hw_lvl = node->hw_lvl; 859 uint32_t schq = node->hw_id; 860 uint8_t k = 0; 861 862 plt_tm_dbg("sw xoff config node %s(%u) lvl %u id %u, enable %u (%p)", 863 nix_tm_hwlvl2str(hw_lvl), schq, node->lvl, node->id, enable, 864 node); 865 866 regval[k] = enable; 867 868 switch (hw_lvl) { 869 case NIX_TXSCH_LVL_MDQ: 870 reg[k] = NIX_AF_MDQX_SW_XOFF(schq); 871 k++; 872 break; 873 case NIX_TXSCH_LVL_TL4: 874 reg[k] = NIX_AF_TL4X_SW_XOFF(schq); 875 k++; 876 break; 877 case NIX_TXSCH_LVL_TL3: 878 reg[k] = NIX_AF_TL3X_SW_XOFF(schq); 879 k++; 880 break; 881 case NIX_TXSCH_LVL_TL2: 882 reg[k] = NIX_AF_TL2X_SW_XOFF(schq); 883 k++; 884 break; 885 case NIX_TXSCH_LVL_TL1: 886 reg[k] = NIX_AF_TL1X_SW_XOFF(schq); 887 k++; 888 break; 889 default: 890 break; 891 } 892 893 return k; 894 } 895 896 /* Search for min rate in topology */ 897 uint64_t 898 nix_tm_shaper_profile_rate_min(struct nix *nix) 899 { 900 struct nix_tm_shaper_profile *profile; 901 uint64_t rate_min = 1E9; /* 1 Gbps */ 902 903 TAILQ_FOREACH(profile, &nix->shaper_profile_list, shaper) { 904 if (profile->peak.rate && profile->peak.rate < rate_min) 905 rate_min = profile->peak.rate; 906 907 if (profile->commit.rate && profile->commit.rate < rate_min) 908 rate_min = profile->commit.rate; 909 } 910 return rate_min; 911 } 912 913 uint16_t 914 nix_tm_resource_avail(struct nix *nix, uint8_t hw_lvl, bool contig) 915 { 916 uint32_t pos = 0, start_pos = 0; 917 struct plt_bitmap *bmp; 918 uint16_t count = 0; 919 uint64_t slab = 0; 920 921 bmp = contig ? nix->schq_contig_bmp[hw_lvl] : nix->schq_bmp[hw_lvl]; 922 plt_bitmap_scan_init(bmp); 923 924 if (!plt_bitmap_scan(bmp, &pos, &slab)) 925 return count; 926 927 /* Count bit set */ 928 start_pos = pos; 929 do { 930 count += plt_popcount64(slab); 931 if (!plt_bitmap_scan(bmp, &pos, &slab)) 932 break; 933 } while (pos != start_pos); 934 935 return count; 936 } 937 938 uint16_t 939 nix_tm_resource_estimate(struct nix *nix, uint16_t *schq_contig, uint16_t *schq, 940 enum roc_nix_tm_tree tree) 941 { 942 struct nix_tm_node_list *list; 943 uint8_t contig_cnt, hw_lvl; 944 struct nix_tm_node *parent; 945 uint16_t cnt = 0, avail; 946 947 list = nix_tm_node_list(nix, tree); 948 /* Walk through parents from TL1..TL4 */ 949 for (hw_lvl = NIX_TXSCH_LVL_TL1; hw_lvl > 0; hw_lvl--) { 950 TAILQ_FOREACH(parent, list, node) { 951 if (hw_lvl != parent->hw_lvl) 952 continue; 953 954 /* Skip accounting for children whose 955 * parent does not indicate so. 956 */ 957 if (!parent->child_realloc) 958 continue; 959 960 /* Count children needed */ 961 schq[hw_lvl - 1] += parent->rr_num; 962 if (parent->max_prio != UINT32_MAX) { 963 contig_cnt = parent->max_prio + 1; 964 schq_contig[hw_lvl - 1] += contig_cnt; 965 /* When we have SP + DWRR at a parent, 966 * we will always have a spare schq at rr prio 967 * location in contiguous queues. Hence reduce 968 * discontiguous count by 1. 969 */ 970 if (parent->max_prio > 0 && parent->rr_num) 971 schq[hw_lvl - 1] -= 1; 972 } 973 } 974 } 975 976 schq[nix->tm_root_lvl] = 1; 977 if (!nix_tm_have_tl1_access(nix)) 978 schq[NIX_TXSCH_LVL_TL1] = 1; 979 980 /* Now check for existing resources */ 981 for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) { 982 avail = nix_tm_resource_avail(nix, hw_lvl, false); 983 if (schq[hw_lvl] <= avail) 984 schq[hw_lvl] = 0; 985 else 986 schq[hw_lvl] -= avail; 987 988 /* For contiguous queues, realloc everything */ 989 avail = nix_tm_resource_avail(nix, hw_lvl, true); 990 if (schq_contig[hw_lvl] <= avail) 991 schq_contig[hw_lvl] = 0; 992 993 cnt += schq[hw_lvl]; 994 cnt += schq_contig[hw_lvl]; 995 996 plt_tm_dbg("Estimate resources needed for %s: dis %u cont %u", 997 nix_tm_hwlvl2str(hw_lvl), schq[hw_lvl], 998 schq_contig[hw_lvl]); 999 } 1000 1001 return cnt; 1002 } 1003 1004 uint16_t 1005 roc_nix_tm_leaf_cnt(struct roc_nix *roc_nix) 1006 { 1007 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1008 struct nix_tm_node_list *list; 1009 struct nix_tm_node *node; 1010 uint16_t leaf_cnt = 0; 1011 1012 /* Count leafs only in user list */ 1013 list = nix_tm_node_list(nix, ROC_NIX_TM_USER); 1014 TAILQ_FOREACH(node, list, node) { 1015 if (node->id < nix->nb_tx_queues) 1016 leaf_cnt++; 1017 } 1018 1019 return leaf_cnt; 1020 } 1021 1022 int 1023 roc_nix_tm_node_lvl(struct roc_nix *roc_nix, uint32_t node_id) 1024 { 1025 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1026 struct nix_tm_node *node; 1027 1028 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER); 1029 if (!node) 1030 return NIX_ERR_TM_INVALID_NODE; 1031 1032 return node->lvl; 1033 } 1034 1035 struct roc_nix_tm_node * 1036 roc_nix_tm_node_get(struct roc_nix *roc_nix, uint32_t node_id) 1037 { 1038 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1039 struct nix_tm_node *node; 1040 1041 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER); 1042 return (struct roc_nix_tm_node *)node; 1043 } 1044 1045 struct roc_nix_tm_node * 1046 roc_nix_tm_node_next(struct roc_nix *roc_nix, struct roc_nix_tm_node *__prev) 1047 { 1048 struct nix_tm_node *prev = (struct nix_tm_node *)__prev; 1049 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1050 struct nix_tm_node_list *list; 1051 1052 list = nix_tm_node_list(nix, ROC_NIX_TM_USER); 1053 1054 /* HEAD of the list */ 1055 if (!prev) 1056 return (struct roc_nix_tm_node *)TAILQ_FIRST(list); 1057 1058 /* Next entry */ 1059 if (prev->tree != ROC_NIX_TM_USER) 1060 return NULL; 1061 1062 return (struct roc_nix_tm_node *)TAILQ_NEXT(prev, node); 1063 } 1064 1065 struct roc_nix_tm_shaper_profile * 1066 roc_nix_tm_shaper_profile_get(struct roc_nix *roc_nix, uint32_t profile_id) 1067 { 1068 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1069 struct nix_tm_shaper_profile *profile; 1070 1071 profile = nix_tm_shaper_profile_search(nix, profile_id); 1072 return (struct roc_nix_tm_shaper_profile *)profile; 1073 } 1074 1075 struct roc_nix_tm_shaper_profile * 1076 roc_nix_tm_shaper_profile_next(struct roc_nix *roc_nix, 1077 struct roc_nix_tm_shaper_profile *__prev) 1078 { 1079 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1080 struct nix_tm_shaper_profile_list *list; 1081 struct nix_tm_shaper_profile *prev; 1082 1083 prev = (struct nix_tm_shaper_profile *)__prev; 1084 list = &nix->shaper_profile_list; 1085 1086 /* HEAD of the list */ 1087 if (!prev) 1088 return (struct roc_nix_tm_shaper_profile *)TAILQ_FIRST(list); 1089 1090 return (struct roc_nix_tm_shaper_profile *)TAILQ_NEXT(prev, shaper); 1091 } 1092 1093 struct nix_tm_node * 1094 nix_tm_node_alloc(void) 1095 { 1096 struct nix_tm_node *node; 1097 1098 node = plt_zmalloc(sizeof(struct nix_tm_node), 0); 1099 if (!node) 1100 return NULL; 1101 1102 node->free_fn = plt_free; 1103 return node; 1104 } 1105 1106 void 1107 nix_tm_node_free(struct nix_tm_node *node) 1108 { 1109 if (!node || node->free_fn == NULL) 1110 return; 1111 1112 (node->free_fn)(node); 1113 } 1114 1115 struct nix_tm_shaper_profile * 1116 nix_tm_shaper_profile_alloc(void) 1117 { 1118 struct nix_tm_shaper_profile *profile; 1119 1120 profile = plt_zmalloc(sizeof(struct nix_tm_shaper_profile), 0); 1121 if (!profile) 1122 return NULL; 1123 1124 profile->free_fn = plt_free; 1125 return profile; 1126 } 1127 1128 void 1129 nix_tm_shaper_profile_free(struct nix_tm_shaper_profile *profile) 1130 { 1131 if (!profile || !profile->free_fn) 1132 return; 1133 1134 (profile->free_fn)(profile); 1135 } 1136 1137 int 1138 roc_nix_tm_node_stats_get(struct roc_nix *roc_nix, uint32_t node_id, bool clear, 1139 struct roc_nix_tm_node_stats *n_stats) 1140 { 1141 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1142 struct mbox *mbox = (&nix->dev)->mbox; 1143 struct nix_txschq_config *req, *rsp; 1144 struct nix_tm_node *node; 1145 uint32_t schq; 1146 int rc, i; 1147 1148 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER); 1149 if (!node) 1150 return NIX_ERR_TM_INVALID_NODE; 1151 1152 if (node->hw_lvl != NIX_TXSCH_LVL_TL1) 1153 return NIX_ERR_OP_NOTSUP; 1154 1155 /* Check if node has HW resource */ 1156 if (!(node->flags & NIX_TM_NODE_HWRES)) 1157 return 0; 1158 1159 schq = node->hw_id; 1160 /* Skip fetch if not requested */ 1161 if (!n_stats) 1162 goto clear_stats; 1163 1164 memset(n_stats, 0, sizeof(struct roc_nix_tm_node_stats)); 1165 1166 req = mbox_alloc_msg_nix_txschq_cfg(mbox_get(mbox)); 1167 req->read = 1; 1168 req->lvl = NIX_TXSCH_LVL_TL1; 1169 1170 i = 0; 1171 req->reg[i++] = NIX_AF_TL1X_DROPPED_PACKETS(schq); 1172 req->reg[i++] = NIX_AF_TL1X_DROPPED_BYTES(schq); 1173 req->reg[i++] = NIX_AF_TL1X_GREEN_PACKETS(schq); 1174 req->reg[i++] = NIX_AF_TL1X_GREEN_BYTES(schq); 1175 req->reg[i++] = NIX_AF_TL1X_YELLOW_PACKETS(schq); 1176 req->reg[i++] = NIX_AF_TL1X_YELLOW_BYTES(schq); 1177 req->reg[i++] = NIX_AF_TL1X_RED_PACKETS(schq); 1178 req->reg[i++] = NIX_AF_TL1X_RED_BYTES(schq); 1179 req->num_regs = i; 1180 1181 rc = mbox_process_msg(mbox, (void **)&rsp); 1182 if (rc) { 1183 mbox_put(mbox); 1184 return rc; 1185 } 1186 1187 /* Return stats */ 1188 n_stats->stats[ROC_NIX_TM_NODE_PKTS_DROPPED] = rsp->regval[0]; 1189 n_stats->stats[ROC_NIX_TM_NODE_BYTES_DROPPED] = rsp->regval[1]; 1190 n_stats->stats[ROC_NIX_TM_NODE_GREEN_PKTS] = rsp->regval[2]; 1191 n_stats->stats[ROC_NIX_TM_NODE_GREEN_BYTES] = rsp->regval[3]; 1192 n_stats->stats[ROC_NIX_TM_NODE_YELLOW_PKTS] = rsp->regval[4]; 1193 n_stats->stats[ROC_NIX_TM_NODE_YELLOW_BYTES] = rsp->regval[5]; 1194 n_stats->stats[ROC_NIX_TM_NODE_RED_PKTS] = rsp->regval[6]; 1195 n_stats->stats[ROC_NIX_TM_NODE_RED_BYTES] = rsp->regval[7]; 1196 mbox_put(mbox); 1197 1198 clear_stats: 1199 if (!clear) 1200 return 0; 1201 1202 /* Clear all the stats */ 1203 req = mbox_alloc_msg_nix_txschq_cfg(mbox_get(mbox)); 1204 req->lvl = NIX_TXSCH_LVL_TL1; 1205 i = 0; 1206 req->reg[i++] = NIX_AF_TL1X_DROPPED_PACKETS(schq); 1207 req->reg[i++] = NIX_AF_TL1X_DROPPED_BYTES(schq); 1208 req->reg[i++] = NIX_AF_TL1X_GREEN_PACKETS(schq); 1209 req->reg[i++] = NIX_AF_TL1X_GREEN_BYTES(schq); 1210 req->reg[i++] = NIX_AF_TL1X_YELLOW_PACKETS(schq); 1211 req->reg[i++] = NIX_AF_TL1X_YELLOW_BYTES(schq); 1212 req->reg[i++] = NIX_AF_TL1X_RED_PACKETS(schq); 1213 req->reg[i++] = NIX_AF_TL1X_RED_BYTES(schq); 1214 req->num_regs = i; 1215 1216 rc = mbox_process_msg(mbox, (void **)&rsp); 1217 mbox_put(mbox); 1218 return rc; 1219 } 1220 1221 bool 1222 roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *roc_nix) 1223 { 1224 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1225 1226 if ((nix->tm_flags & NIX_TM_HIERARCHY_ENA) && 1227 (nix->tm_tree == ROC_NIX_TM_USER)) 1228 return true; 1229 return false; 1230 } 1231 1232 int 1233 roc_nix_tm_tree_type_get(struct roc_nix *roc_nix) 1234 { 1235 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1236 1237 return nix->tm_tree; 1238 } 1239 1240 int 1241 roc_nix_tm_max_prio(struct roc_nix *roc_nix, int lvl) 1242 { 1243 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1244 int hw_lvl = nix_tm_lvl2nix(nix, lvl); 1245 1246 return nix_tm_max_prio(nix, hw_lvl); 1247 } 1248 1249 int 1250 roc_nix_tm_lvl_is_leaf(struct roc_nix *roc_nix, int lvl) 1251 { 1252 return nix_tm_is_leaf(roc_nix_to_nix_priv(roc_nix), lvl); 1253 } 1254 1255 void 1256 roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node, 1257 struct roc_nix_tm_shaper_profile *roc_prof) 1258 { 1259 struct nix_tm_node *tm_node = (struct nix_tm_node *)node; 1260 struct nix_tm_shaper_profile *profile; 1261 struct nix_tm_shaper_data cir, pir; 1262 1263 if (!roc_prof) 1264 return; 1265 1266 profile = (struct nix_tm_shaper_profile *)roc_prof->reserved; 1267 tm_node->red_algo = roc_prof->red_algo; 1268 1269 /* C0 doesn't support STALL when both PIR & CIR are enabled */ 1270 if (roc_model_is_cn96_cx() || roc_model_is_cnf95xxn_a0() || roc_model_is_cnf95xxo_a0() || 1271 roc_model_is_cnf95xxn_a1() || roc_model_is_cnf95xxn_b0()) { 1272 nix_tm_shaper_conf_get(profile, &cir, &pir); 1273 1274 if (pir.rate && cir.rate) 1275 tm_node->red_algo = NIX_REDALG_DISCARD; 1276 } 1277 } 1278 1279 int 1280 roc_nix_tm_lvl_cnt_get(struct roc_nix *roc_nix) 1281 { 1282 if (nix_tm_have_tl1_access(roc_nix_to_nix_priv(roc_nix))) 1283 return NIX_TXSCH_LVL_CNT; 1284 1285 return (NIX_TXSCH_LVL_CNT - 1); 1286 } 1287 1288 int 1289 roc_nix_tm_lvl_have_link_access(struct roc_nix *roc_nix, int lvl) 1290 { 1291 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1292 1293 if (nix_tm_lvl2nix(nix, lvl) == NIX_TXSCH_LVL_TL1) 1294 return 1; 1295 1296 return 0; 1297 } 1298