1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2021 Marvell. 3 */ 4 5 #include "roc_api.h" 6 #include "roc_priv.h" 7 8 static inline uint64_t 9 nix_tm_shaper2regval(struct nix_tm_shaper_data *shaper) 10 { 11 uint64_t regval; 12 13 if (roc_model_is_cn9k()) { 14 regval = (shaper->burst_exponent << 37); 15 regval |= (shaper->burst_mantissa << 29); 16 regval |= (shaper->div_exp << 13); 17 regval |= (shaper->exponent << 9); 18 regval |= (shaper->mantissa << 1); 19 return regval; 20 } 21 22 regval = (shaper->burst_exponent << 44); 23 regval |= (shaper->burst_mantissa << 29); 24 regval |= (shaper->div_exp << 13); 25 regval |= (shaper->exponent << 9); 26 regval |= (shaper->mantissa << 1); 27 return regval; 28 } 29 30 uint16_t 31 nix_tm_lvl2nix_tl1_root(uint32_t lvl) 32 { 33 switch (lvl) { 34 case ROC_TM_LVL_ROOT: 35 return NIX_TXSCH_LVL_TL1; 36 case ROC_TM_LVL_SCH1: 37 return NIX_TXSCH_LVL_TL2; 38 case ROC_TM_LVL_SCH2: 39 return NIX_TXSCH_LVL_TL3; 40 case ROC_TM_LVL_SCH3: 41 return NIX_TXSCH_LVL_TL4; 42 case ROC_TM_LVL_SCH4: 43 return NIX_TXSCH_LVL_SMQ; 44 default: 45 return NIX_TXSCH_LVL_CNT; 46 } 47 } 48 49 uint16_t 50 nix_tm_lvl2nix_tl2_root(uint32_t lvl) 51 { 52 switch (lvl) { 53 case ROC_TM_LVL_ROOT: 54 return NIX_TXSCH_LVL_TL2; 55 case ROC_TM_LVL_SCH1: 56 return NIX_TXSCH_LVL_TL3; 57 case ROC_TM_LVL_SCH2: 58 return NIX_TXSCH_LVL_TL4; 59 case ROC_TM_LVL_SCH3: 60 return NIX_TXSCH_LVL_SMQ; 61 default: 62 return NIX_TXSCH_LVL_CNT; 63 } 64 } 65 66 uint16_t 67 nix_tm_lvl2nix(struct nix *nix, uint32_t lvl) 68 { 69 if (nix_tm_have_tl1_access(nix)) 70 return nix_tm_lvl2nix_tl1_root(lvl); 71 else 72 return nix_tm_lvl2nix_tl2_root(lvl); 73 } 74 75 static uint8_t 76 nix_tm_relchan_get(struct nix *nix) 77 { 78 return nix->tx_chan_base & 0xff; 79 } 80 81 static int 82 nix_tm_find_prio_anchor(struct nix *nix, uint32_t node_id, 83 enum roc_nix_tm_tree tree) 84 { 85 struct nix_tm_node *child_node; 86 struct nix_tm_node_list *list; 87 88 list = nix_tm_node_list(nix, tree); 89 90 TAILQ_FOREACH(child_node, list, node) { 91 if (!child_node->parent) 92 continue; 93 if (!(child_node->parent->id == node_id)) 94 continue; 95 if (child_node->priority == child_node->parent->rr_prio) 96 continue; 97 return child_node->hw_id - child_node->priority; 98 } 99 return 0; 100 } 101 102 struct nix_tm_shaper_profile * 103 nix_tm_shaper_profile_search(struct nix *nix, uint32_t id) 104 { 105 struct nix_tm_shaper_profile *profile; 106 107 TAILQ_FOREACH(profile, &nix->shaper_profile_list, shaper) { 108 if (profile->id == id) 109 return profile; 110 } 111 return NULL; 112 } 113 114 struct nix_tm_node * 115 nix_tm_node_search(struct nix *nix, uint32_t node_id, enum roc_nix_tm_tree tree) 116 { 117 struct nix_tm_node_list *list; 118 struct nix_tm_node *node; 119 120 list = nix_tm_node_list(nix, tree); 121 TAILQ_FOREACH(node, list, node) { 122 if (node->id == node_id) 123 return node; 124 } 125 return NULL; 126 } 127 128 static uint64_t 129 nix_tm_shaper_rate_conv_floor(uint64_t value, uint64_t *exponent_p, 130 uint64_t *mantissa_p, uint64_t *div_exp_p) 131 { 132 uint64_t div_exp, exponent, mantissa; 133 134 /* Boundary checks */ 135 if (value < NIX_TM_MIN_SHAPER_RATE || value > NIX_TM_MAX_SHAPER_RATE) 136 return 0; 137 138 if (value <= NIX_TM_SHAPER_RATE(0, 0, 0)) { 139 /* Calculate rate div_exp and mantissa using 140 * the following formula: 141 * 142 * value = (2E6 * (256 + mantissa) 143 * / ((1 << div_exp) * 256)) 144 */ 145 div_exp = 0; 146 exponent = 0; 147 mantissa = NIX_TM_MAX_RATE_MANTISSA; 148 149 while (value <= (NIX_TM_SHAPER_RATE_CONST / (1 << div_exp))) 150 div_exp += 1; 151 152 while (value <= ((NIX_TM_SHAPER_RATE_CONST * (256 + mantissa)) / 153 ((1 << div_exp) * 256))) 154 mantissa -= 1; 155 } else { 156 /* Calculate rate exponent and mantissa using 157 * the following formula: 158 * 159 * value = (2E6 * ((256 + mantissa) << exponent)) / 256 160 * 161 */ 162 div_exp = 0; 163 exponent = NIX_TM_MAX_RATE_EXPONENT; 164 mantissa = NIX_TM_MAX_RATE_MANTISSA; 165 166 while (value <= (NIX_TM_SHAPER_RATE_CONST * (1 << exponent))) 167 exponent -= 1; 168 169 while (value <= ((NIX_TM_SHAPER_RATE_CONST * 170 ((256 + mantissa) << exponent)) / 171 256)) 172 mantissa -= 1; 173 } 174 175 if (div_exp > NIX_TM_MAX_RATE_DIV_EXP || 176 exponent > NIX_TM_MAX_RATE_EXPONENT || 177 mantissa > NIX_TM_MAX_RATE_MANTISSA) 178 return 0; 179 180 if (div_exp_p) 181 *div_exp_p = div_exp; 182 if (exponent_p) 183 *exponent_p = exponent; 184 if (mantissa_p) 185 *mantissa_p = mantissa; 186 187 /* Calculate real rate value */ 188 return NIX_TM_SHAPER_RATE(exponent, mantissa, div_exp); 189 } 190 191 static uint64_t 192 nix_tm_shaper_rate_conv_exact(uint64_t value, uint64_t *exponent_p, 193 uint64_t *mantissa_p, uint64_t *div_exp_p) 194 { 195 uint64_t div_exp, exponent, mantissa; 196 197 /* Boundary checks */ 198 if (value < NIX_TM_MIN_SHAPER_RATE || value > NIX_TM_MAX_SHAPER_RATE) 199 return 0; 200 201 if (value <= NIX_TM_SHAPER_RATE(0, 0, 0)) { 202 /* Calculate rate div_exp and mantissa using 203 * the following formula: 204 * 205 * value = (2E6 * (256 + mantissa) 206 * / ((1 << div_exp) * 256)) 207 */ 208 div_exp = 0; 209 exponent = 0; 210 mantissa = NIX_TM_MAX_RATE_MANTISSA; 211 212 while (value < (NIX_TM_SHAPER_RATE_CONST / (1 << div_exp))) 213 div_exp += 1; 214 215 while (value < ((NIX_TM_SHAPER_RATE_CONST * (256 + mantissa)) / 216 ((1 << div_exp) * 256))) 217 mantissa -= 1; 218 } else { 219 /* Calculate rate exponent and mantissa using 220 * the following formula: 221 * 222 * value = (2E6 * ((256 + mantissa) << exponent)) / 256 223 * 224 */ 225 div_exp = 0; 226 exponent = NIX_TM_MAX_RATE_EXPONENT; 227 mantissa = NIX_TM_MAX_RATE_MANTISSA; 228 229 while (value < (NIX_TM_SHAPER_RATE_CONST * (1 << exponent))) 230 exponent -= 1; 231 232 while (value < ((NIX_TM_SHAPER_RATE_CONST * 233 ((256 + mantissa) << exponent)) / 234 256)) 235 mantissa -= 1; 236 } 237 238 if (div_exp > NIX_TM_MAX_RATE_DIV_EXP || 239 exponent > NIX_TM_MAX_RATE_EXPONENT || 240 mantissa > NIX_TM_MAX_RATE_MANTISSA) 241 return 0; 242 243 if (div_exp_p) 244 *div_exp_p = div_exp; 245 if (exponent_p) 246 *exponent_p = exponent; 247 if (mantissa_p) 248 *mantissa_p = mantissa; 249 250 /* Calculate real rate value */ 251 return NIX_TM_SHAPER_RATE(exponent, mantissa, div_exp); 252 } 253 254 /* With zero accuracy we will tune parameters as defined by HW, 255 * non zero accuracy will keep the parameters close to lower values 256 * and make sure long-term shaper rate will not exceed the requested rate. 257 */ 258 uint64_t 259 nix_tm_shaper_rate_conv(uint64_t value, uint64_t *exponent_p, 260 uint64_t *mantissa_p, uint64_t *div_exp_p, 261 int8_t accuracy) 262 { 263 if (!accuracy) 264 return nix_tm_shaper_rate_conv_exact(value, exponent_p, 265 mantissa_p, div_exp_p); 266 267 return nix_tm_shaper_rate_conv_floor(value, exponent_p, mantissa_p, 268 div_exp_p); 269 } 270 271 uint64_t 272 nix_tm_shaper_burst_conv(uint64_t value, uint64_t *exponent_p, 273 uint64_t *mantissa_p) 274 { 275 uint64_t min_burst, max_burst; 276 uint64_t exponent, mantissa; 277 uint32_t max_mantissa; 278 279 min_burst = NIX_TM_MIN_SHAPER_BURST; 280 max_burst = roc_nix_tm_max_shaper_burst_get(); 281 282 if (value < min_burst || value > max_burst) 283 return 0; 284 285 max_mantissa = (roc_model_is_cn9k() ? NIX_CN9K_TM_MAX_BURST_MANTISSA : 286 NIX_TM_MAX_BURST_MANTISSA); 287 /* Calculate burst exponent and mantissa using 288 * the following formula: 289 * 290 * value = (((256 + mantissa) << (exponent + 1) / 256) 291 * 292 */ 293 exponent = NIX_TM_MAX_BURST_EXPONENT; 294 mantissa = max_mantissa; 295 296 while (value < (1ull << (exponent + 1))) 297 exponent -= 1; 298 299 while (value < ((256 + mantissa) << (exponent + 1)) / 256) 300 mantissa -= 1; 301 302 if (exponent > NIX_TM_MAX_BURST_EXPONENT || mantissa > max_mantissa) 303 return 0; 304 305 if (exponent_p) 306 *exponent_p = exponent; 307 if (mantissa_p) 308 *mantissa_p = mantissa; 309 310 return NIX_TM_SHAPER_BURST(exponent, mantissa); 311 } 312 313 static void 314 nix_tm_shaper_conf_get(struct nix_tm_shaper_profile *profile, 315 struct nix_tm_shaper_data *cir, 316 struct nix_tm_shaper_data *pir) 317 { 318 memset(cir, 0, sizeof(*cir)); 319 memset(pir, 0, sizeof(*pir)); 320 321 if (!profile) 322 return; 323 324 /* Calculate CIR exponent and mantissa */ 325 if (profile->commit.rate) 326 cir->rate = nix_tm_shaper_rate_conv( 327 profile->commit.rate, &cir->exponent, &cir->mantissa, 328 &cir->div_exp, profile->accuracy); 329 330 /* Calculate PIR exponent and mantissa */ 331 if (profile->peak.rate) 332 pir->rate = nix_tm_shaper_rate_conv( 333 profile->peak.rate, &pir->exponent, &pir->mantissa, 334 &pir->div_exp, profile->accuracy); 335 336 /* Calculate CIR burst exponent and mantissa */ 337 if (profile->commit.size) 338 cir->burst = nix_tm_shaper_burst_conv(profile->commit.size, 339 &cir->burst_exponent, 340 &cir->burst_mantissa); 341 342 /* Calculate PIR burst exponent and mantissa */ 343 if (profile->peak.size) 344 pir->burst = nix_tm_shaper_burst_conv(profile->peak.size, 345 &pir->burst_exponent, 346 &pir->burst_mantissa); 347 } 348 349 uint32_t 350 nix_tm_check_rr(struct nix *nix, uint32_t parent_id, enum roc_nix_tm_tree tree, 351 uint32_t *rr_prio, uint32_t *max_prio) 352 { 353 uint32_t node_cnt[NIX_TM_TLX_SP_PRIO_MAX]; 354 struct nix_tm_node_list *list; 355 struct nix_tm_node *node; 356 uint32_t rr_num = 0, i; 357 uint32_t children = 0; 358 uint32_t priority; 359 360 memset(node_cnt, 0, sizeof(node_cnt)); 361 *rr_prio = 0xF; 362 *max_prio = UINT32_MAX; 363 364 list = nix_tm_node_list(nix, tree); 365 TAILQ_FOREACH(node, list, node) { 366 if (!node->parent) 367 continue; 368 369 if (!(node->parent->id == parent_id)) 370 continue; 371 372 priority = node->priority; 373 node_cnt[priority]++; 374 children++; 375 } 376 377 for (i = 0; i < NIX_TM_TLX_SP_PRIO_MAX; i++) { 378 if (!node_cnt[i]) 379 break; 380 381 if (node_cnt[i] > rr_num) { 382 *rr_prio = i; 383 rr_num = node_cnt[i]; 384 } 385 } 386 387 /* RR group of single RR child is considered as SP */ 388 if (rr_num == 1) { 389 *rr_prio = 0xF; 390 rr_num = 0; 391 } 392 393 /* Max prio will be returned only when we have non zero prio 394 * or if a parent has single child. 395 */ 396 if (i > 1 || (children == 1)) 397 *max_prio = i - 1; 398 return rr_num; 399 } 400 401 static uint16_t 402 nix_tm_max_prio(struct nix *nix, uint16_t hw_lvl) 403 { 404 if (hw_lvl >= NIX_TXSCH_LVL_CNT) 405 return 0; 406 407 /* MDQ does not support SP */ 408 if (hw_lvl == NIX_TXSCH_LVL_MDQ) 409 return 0; 410 411 /* PF's TL1 with VF's enabled does not support SP */ 412 if (hw_lvl == NIX_TXSCH_LVL_TL1 && (!nix_tm_have_tl1_access(nix) || 413 (nix->tm_flags & NIX_TM_TL1_NO_SP))) 414 return 0; 415 416 return NIX_TM_TLX_SP_PRIO_MAX - 1; 417 } 418 419 int 420 nix_tm_validate_prio(struct nix *nix, uint32_t lvl, uint32_t parent_id, 421 uint32_t priority, enum roc_nix_tm_tree tree) 422 { 423 uint8_t priorities[NIX_TM_TLX_SP_PRIO_MAX]; 424 struct nix_tm_node_list *list; 425 struct nix_tm_node *node; 426 uint32_t rr_num = 0; 427 int i; 428 429 list = nix_tm_node_list(nix, tree); 430 /* Validate priority against max */ 431 if (priority > nix_tm_max_prio(nix, nix_tm_lvl2nix(nix, lvl - 1))) 432 return NIX_ERR_TM_PRIO_EXCEEDED; 433 434 if (parent_id == ROC_NIX_TM_NODE_ID_INVALID) 435 return 0; 436 437 memset(priorities, 0, sizeof(priorities)); 438 priorities[priority] = 1; 439 440 TAILQ_FOREACH(node, list, node) { 441 if (!node->parent) 442 continue; 443 444 if (node->parent->id != parent_id) 445 continue; 446 447 priorities[node->priority]++; 448 } 449 450 for (i = 0; i < NIX_TM_TLX_SP_PRIO_MAX; i++) 451 if (priorities[i] > 1) 452 rr_num++; 453 454 /* At max, one rr groups per parent */ 455 if (rr_num > 1) 456 return NIX_ERR_TM_MULTIPLE_RR_GROUPS; 457 458 /* Check for previous priority to avoid holes in priorities */ 459 if (priority && !priorities[priority - 1]) 460 return NIX_ERR_TM_PRIO_ORDER; 461 462 return 0; 463 } 464 465 bool 466 nix_tm_child_res_valid(struct nix_tm_node_list *list, 467 struct nix_tm_node *parent) 468 { 469 struct nix_tm_node *child; 470 471 TAILQ_FOREACH(child, list, node) { 472 if (child->parent != parent) 473 continue; 474 if (!(child->flags & NIX_TM_NODE_HWRES)) 475 return false; 476 } 477 return true; 478 } 479 480 uint8_t 481 nix_tm_tl1_default_prep(struct nix *nix, uint32_t schq, volatile uint64_t *reg, 482 volatile uint64_t *regval) 483 { 484 uint8_t k = 0; 485 486 /* 487 * Default config for TL1. 488 * For VF this is always ignored. 489 */ 490 plt_tm_dbg("Default config for main root %s(%u)", 491 nix_tm_hwlvl2str(NIX_TXSCH_LVL_TL1), schq); 492 493 /* Set DWRR quantum */ 494 reg[k] = NIX_AF_TL1X_SCHEDULE(schq); 495 regval[k] = NIX_TM_TL1_DFLT_RR_QTM; 496 k++; 497 498 reg[k] = NIX_AF_TL1X_TOPOLOGY(schq); 499 regval[k] = (nix->tm_aggr_lvl_rr_prio << 1); 500 k++; 501 502 reg[k] = NIX_AF_TL1X_CIR(schq); 503 regval[k] = 0; 504 k++; 505 506 return k; 507 } 508 509 uint8_t 510 nix_tm_topology_reg_prep(struct nix *nix, struct nix_tm_node *node, 511 volatile uint64_t *reg, volatile uint64_t *regval, 512 volatile uint64_t *regval_mask) 513 { 514 struct roc_nix *roc_nix = nix_priv_to_roc_nix(nix); 515 uint8_t k = 0, hw_lvl, parent_lvl; 516 uint64_t parent = 0, child = 0; 517 enum roc_nix_tm_tree tree; 518 uint32_t rr_prio, schq; 519 uint16_t link, relchan; 520 521 tree = node->tree; 522 schq = node->hw_id; 523 hw_lvl = node->hw_lvl; 524 parent_lvl = hw_lvl + 1; 525 rr_prio = node->rr_prio; 526 527 /* Root node will not have a parent node */ 528 if (hw_lvl == nix->tm_root_lvl) 529 parent = node->parent_hw_id; 530 else 531 parent = node->parent->hw_id; 532 533 link = nix->tx_link; 534 relchan = nix_tm_relchan_get(nix); 535 536 if (hw_lvl != NIX_TXSCH_LVL_SMQ) 537 child = nix_tm_find_prio_anchor(nix, node->id, tree); 538 539 /* Override default rr_prio when TL1 540 * Static Priority is disabled 541 */ 542 if (hw_lvl == NIX_TXSCH_LVL_TL1 && nix->tm_flags & NIX_TM_TL1_NO_SP) { 543 rr_prio = nix->tm_aggr_lvl_rr_prio; 544 child = 0; 545 } 546 547 plt_tm_dbg("Topology config node %s(%u)->%s(%" PRIu64 ") lvl %u, id %u" 548 " prio_anchor %" PRIu64 " rr_prio %u (%p)", 549 nix_tm_hwlvl2str(hw_lvl), schq, nix_tm_hwlvl2str(parent_lvl), 550 parent, node->lvl, node->id, child, rr_prio, node); 551 552 /* Prepare Topology and Link config */ 553 switch (hw_lvl) { 554 case NIX_TXSCH_LVL_SMQ: 555 556 /* Set xoff which will be cleared later */ 557 reg[k] = NIX_AF_SMQX_CFG(schq); 558 regval[k] = (BIT_ULL(50) | NIX_MIN_HW_FRS | 559 ((nix->mtu & 0xFFFF) << 8)); 560 /* Maximum Vtag insertion size as a multiple of four bytes */ 561 if (roc_nix->hw_vlan_ins) 562 regval[k] |= (0x2ULL << 36); 563 regval_mask[k] = ~(BIT_ULL(50) | GENMASK_ULL(6, 0) | 564 GENMASK_ULL(23, 8) | GENMASK_ULL(38, 36)); 565 k++; 566 567 /* Parent and schedule conf */ 568 reg[k] = NIX_AF_MDQX_PARENT(schq); 569 regval[k] = parent << 16; 570 k++; 571 572 break; 573 case NIX_TXSCH_LVL_TL4: 574 /* Parent and schedule conf */ 575 reg[k] = NIX_AF_TL4X_PARENT(schq); 576 regval[k] = parent << 16; 577 k++; 578 579 reg[k] = NIX_AF_TL4X_TOPOLOGY(schq); 580 regval[k] = (child << 32) | (rr_prio << 1); 581 k++; 582 583 /* Configure TL4 to send to SDP channel instead of CGX/LBK */ 584 if (nix->sdp_link) { 585 reg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq); 586 regval[k] = BIT_ULL(12); 587 k++; 588 } 589 break; 590 case NIX_TXSCH_LVL_TL3: 591 /* Parent and schedule conf */ 592 reg[k] = NIX_AF_TL3X_PARENT(schq); 593 regval[k] = parent << 16; 594 k++; 595 596 reg[k] = NIX_AF_TL3X_TOPOLOGY(schq); 597 regval[k] = (child << 32) | (rr_prio << 1); 598 k++; 599 600 /* Link configuration */ 601 if (!nix->sdp_link && 602 nix->tm_link_cfg_lvl == NIX_TXSCH_LVL_TL3) { 603 reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, link); 604 regval[k] = BIT_ULL(12) | relchan; 605 /* Enable BP if node is BP capable and rx_pause is set 606 */ 607 if (nix->rx_pause && node->bp_capa) 608 regval[k] |= BIT_ULL(13); 609 k++; 610 } 611 612 break; 613 case NIX_TXSCH_LVL_TL2: 614 /* Parent and schedule conf */ 615 reg[k] = NIX_AF_TL2X_PARENT(schq); 616 regval[k] = parent << 16; 617 k++; 618 619 reg[k] = NIX_AF_TL2X_TOPOLOGY(schq); 620 regval[k] = (child << 32) | (rr_prio << 1); 621 k++; 622 623 /* Link configuration */ 624 if (!nix->sdp_link && 625 nix->tm_link_cfg_lvl == NIX_TXSCH_LVL_TL2) { 626 reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, link); 627 regval[k] = BIT_ULL(12) | relchan; 628 /* Enable BP if node is BP capable and rx_pause is set 629 */ 630 if (nix->rx_pause && node->bp_capa) 631 regval[k] |= BIT_ULL(13); 632 k++; 633 } 634 635 break; 636 case NIX_TXSCH_LVL_TL1: 637 reg[k] = NIX_AF_TL1X_TOPOLOGY(schq); 638 regval[k] = (child << 32) | (rr_prio << 1 /*RR_PRIO*/); 639 k++; 640 641 break; 642 } 643 644 return k; 645 } 646 647 static inline int 648 nix_tm_default_rr_weight(struct nix *nix) 649 { 650 struct roc_nix *roc_nix = nix_priv_to_roc_nix(nix); 651 uint32_t max_pktlen = roc_nix_max_pkt_len(roc_nix); 652 uint32_t weight; 653 654 /* Reduce TX VTAG Insertions */ 655 max_pktlen -= 8; 656 weight = max_pktlen / roc_nix->dwrr_mtu; 657 if (max_pktlen % roc_nix->dwrr_mtu) 658 weight += 1; 659 660 return weight; 661 } 662 663 uint8_t 664 nix_tm_sched_reg_prep(struct nix *nix, struct nix_tm_node *node, volatile uint64_t *reg, 665 volatile uint64_t *regval) 666 { 667 uint64_t strict_prio = node->priority; 668 uint32_t hw_lvl = node->hw_lvl; 669 uint32_t schq = node->hw_id; 670 uint64_t rr_quantum; 671 uint8_t k = 0; 672 673 /* If minimum weight not provided, then by default RR_QUANTUM 674 * should be in sync with kernel, i.e., single MTU value 675 */ 676 if (!node->weight) 677 rr_quantum = nix_tm_default_rr_weight(nix); 678 else 679 /* For CN9K, weight needs to be converted to quantum */ 680 rr_quantum = nix_tm_weight_to_rr_quantum(node->weight); 681 682 /* For children to root, strict prio is default if either 683 * device root is TL2 or TL1 Static Priority is disabled. 684 */ 685 if (hw_lvl == NIX_TXSCH_LVL_TL2 && 686 (!nix_tm_have_tl1_access(nix) || nix->tm_flags & NIX_TM_TL1_NO_SP)) 687 strict_prio = nix->tm_aggr_lvl_rr_prio; 688 689 plt_tm_dbg("Schedule config node %s(%u) lvl %u id %u, " 690 "prio 0x%" PRIx64 ", rr_quantum/rr_wt 0x%" PRIx64 " (%p)", 691 nix_tm_hwlvl2str(node->hw_lvl), schq, node->lvl, node->id, strict_prio, 692 rr_quantum, node); 693 694 switch (hw_lvl) { 695 case NIX_TXSCH_LVL_SMQ: 696 reg[k] = NIX_AF_MDQX_SCHEDULE(schq); 697 regval[k] = (strict_prio << 24) | rr_quantum; 698 k++; 699 700 break; 701 case NIX_TXSCH_LVL_TL4: 702 reg[k] = NIX_AF_TL4X_SCHEDULE(schq); 703 regval[k] = (strict_prio << 24) | rr_quantum; 704 k++; 705 706 break; 707 case NIX_TXSCH_LVL_TL3: 708 reg[k] = NIX_AF_TL3X_SCHEDULE(schq); 709 regval[k] = (strict_prio << 24) | rr_quantum; 710 k++; 711 712 break; 713 case NIX_TXSCH_LVL_TL2: 714 reg[k] = NIX_AF_TL2X_SCHEDULE(schq); 715 regval[k] = (strict_prio << 24) | rr_quantum; 716 k++; 717 718 break; 719 case NIX_TXSCH_LVL_TL1: 720 reg[k] = NIX_AF_TL1X_SCHEDULE(schq); 721 regval[k] = rr_quantum; 722 k++; 723 724 break; 725 } 726 727 return k; 728 } 729 730 uint8_t 731 nix_tm_shaper_reg_prep(struct nix_tm_node *node, 732 struct nix_tm_shaper_profile *profile, 733 volatile uint64_t *reg, volatile uint64_t *regval) 734 { 735 struct nix_tm_shaper_data cir, pir; 736 uint32_t schq = node->hw_id; 737 uint64_t adjust = 0; 738 uint8_t k = 0; 739 740 nix_tm_shaper_conf_get(profile, &cir, &pir); 741 742 if (profile && node->pkt_mode) 743 adjust = profile->pkt_mode_adj; 744 else if (profile) 745 adjust = profile->pkt_len_adj; 746 747 adjust &= 0x1FF; 748 plt_tm_dbg("Shaper config node %s(%u) lvl %u id %u, " 749 "pir %" PRIu64 "(%" PRIu64 "B)," 750 " cir %" PRIu64 "(%" PRIu64 "B)" 751 "adjust 0x%" PRIx64 "(pktmode %u) (%p)", 752 nix_tm_hwlvl2str(node->hw_lvl), schq, node->lvl, node->id, 753 pir.rate, pir.burst, cir.rate, cir.burst, adjust, 754 node->pkt_mode, node); 755 756 switch (node->hw_lvl) { 757 case NIX_TXSCH_LVL_SMQ: 758 /* Configure PIR, CIR */ 759 reg[k] = NIX_AF_MDQX_PIR(schq); 760 regval[k] = (pir.rate && pir.burst) ? 761 (nix_tm_shaper2regval(&pir) | 1) : 762 0; 763 k++; 764 765 reg[k] = NIX_AF_MDQX_CIR(schq); 766 regval[k] = (cir.rate && cir.burst) ? 767 (nix_tm_shaper2regval(&cir) | 1) : 768 0; 769 k++; 770 771 /* Configure RED ALG */ 772 reg[k] = NIX_AF_MDQX_SHAPE(schq); 773 regval[k] = (adjust | (uint64_t)node->red_algo << 9 | 774 (uint64_t)node->pkt_mode << 24); 775 k++; 776 break; 777 case NIX_TXSCH_LVL_TL4: 778 /* Configure PIR, CIR */ 779 reg[k] = NIX_AF_TL4X_PIR(schq); 780 regval[k] = (pir.rate && pir.burst) ? 781 (nix_tm_shaper2regval(&pir) | 1) : 782 0; 783 k++; 784 785 reg[k] = NIX_AF_TL4X_CIR(schq); 786 regval[k] = (cir.rate && cir.burst) ? 787 (nix_tm_shaper2regval(&cir) | 1) : 788 0; 789 k++; 790 791 /* Configure RED algo */ 792 reg[k] = NIX_AF_TL4X_SHAPE(schq); 793 regval[k] = (adjust | (uint64_t)node->red_algo << 9 | 794 (uint64_t)node->pkt_mode << 24); 795 k++; 796 break; 797 case NIX_TXSCH_LVL_TL3: 798 /* Configure PIR, CIR */ 799 reg[k] = NIX_AF_TL3X_PIR(schq); 800 regval[k] = (pir.rate && pir.burst) ? 801 (nix_tm_shaper2regval(&pir) | 1) : 802 0; 803 k++; 804 805 reg[k] = NIX_AF_TL3X_CIR(schq); 806 regval[k] = (cir.rate && cir.burst) ? 807 (nix_tm_shaper2regval(&cir) | 1) : 808 0; 809 k++; 810 811 /* Configure RED algo */ 812 reg[k] = NIX_AF_TL3X_SHAPE(schq); 813 regval[k] = (adjust | (uint64_t)node->red_algo << 9 | 814 (uint64_t)node->pkt_mode << 24); 815 k++; 816 817 break; 818 case NIX_TXSCH_LVL_TL2: 819 /* Configure PIR, CIR */ 820 reg[k] = NIX_AF_TL2X_PIR(schq); 821 regval[k] = (pir.rate && pir.burst) ? 822 (nix_tm_shaper2regval(&pir) | 1) : 823 0; 824 k++; 825 826 reg[k] = NIX_AF_TL2X_CIR(schq); 827 regval[k] = (cir.rate && cir.burst) ? 828 (nix_tm_shaper2regval(&cir) | 1) : 829 0; 830 k++; 831 832 /* Configure RED algo */ 833 reg[k] = NIX_AF_TL2X_SHAPE(schq); 834 regval[k] = (adjust | (uint64_t)node->red_algo << 9 | 835 (uint64_t)node->pkt_mode << 24); 836 k++; 837 838 break; 839 case NIX_TXSCH_LVL_TL1: 840 /* Configure CIR */ 841 reg[k] = NIX_AF_TL1X_CIR(schq); 842 regval[k] = (cir.rate && cir.burst) ? 843 (nix_tm_shaper2regval(&cir) | 1) : 844 0; 845 k++; 846 847 /* Configure length disable and adjust */ 848 reg[k] = NIX_AF_TL1X_SHAPE(schq); 849 regval[k] = (adjust | (uint64_t)node->pkt_mode << 24); 850 k++; 851 break; 852 } 853 854 return k; 855 } 856 857 uint8_t 858 nix_tm_sw_xoff_prep(struct nix_tm_node *node, bool enable, 859 volatile uint64_t *reg, volatile uint64_t *regval) 860 { 861 uint32_t hw_lvl = node->hw_lvl; 862 uint32_t schq = node->hw_id; 863 uint8_t k = 0; 864 865 plt_tm_dbg("sw xoff config node %s(%u) lvl %u id %u, enable %u (%p)", 866 nix_tm_hwlvl2str(hw_lvl), schq, node->lvl, node->id, enable, 867 node); 868 869 regval[k] = enable; 870 871 switch (hw_lvl) { 872 case NIX_TXSCH_LVL_MDQ: 873 reg[k] = NIX_AF_MDQX_SW_XOFF(schq); 874 k++; 875 break; 876 case NIX_TXSCH_LVL_TL4: 877 reg[k] = NIX_AF_TL4X_SW_XOFF(schq); 878 k++; 879 break; 880 case NIX_TXSCH_LVL_TL3: 881 reg[k] = NIX_AF_TL3X_SW_XOFF(schq); 882 k++; 883 break; 884 case NIX_TXSCH_LVL_TL2: 885 reg[k] = NIX_AF_TL2X_SW_XOFF(schq); 886 k++; 887 break; 888 case NIX_TXSCH_LVL_TL1: 889 reg[k] = NIX_AF_TL1X_SW_XOFF(schq); 890 k++; 891 break; 892 default: 893 break; 894 } 895 896 return k; 897 } 898 899 /* Search for min rate in topology */ 900 uint64_t 901 nix_tm_shaper_profile_rate_min(struct nix *nix) 902 { 903 struct nix_tm_shaper_profile *profile; 904 uint64_t rate_min = 1E9; /* 1 Gbps */ 905 906 TAILQ_FOREACH(profile, &nix->shaper_profile_list, shaper) { 907 if (profile->peak.rate && profile->peak.rate < rate_min) 908 rate_min = profile->peak.rate; 909 910 if (profile->commit.rate && profile->commit.rate < rate_min) 911 rate_min = profile->commit.rate; 912 } 913 return rate_min; 914 } 915 916 uint16_t 917 nix_tm_resource_avail(struct nix *nix, uint8_t hw_lvl, bool contig) 918 { 919 uint32_t pos = 0, start_pos = 0; 920 struct plt_bitmap *bmp; 921 uint16_t count = 0; 922 uint64_t slab = 0; 923 924 bmp = contig ? nix->schq_contig_bmp[hw_lvl] : nix->schq_bmp[hw_lvl]; 925 plt_bitmap_scan_init(bmp); 926 927 if (!plt_bitmap_scan(bmp, &pos, &slab)) 928 return count; 929 930 /* Count bit set */ 931 start_pos = pos; 932 do { 933 count += __builtin_popcountll(slab); 934 if (!plt_bitmap_scan(bmp, &pos, &slab)) 935 break; 936 } while (pos != start_pos); 937 938 return count; 939 } 940 941 uint16_t 942 nix_tm_resource_estimate(struct nix *nix, uint16_t *schq_contig, uint16_t *schq, 943 enum roc_nix_tm_tree tree) 944 { 945 struct nix_tm_node_list *list; 946 uint8_t contig_cnt, hw_lvl; 947 struct nix_tm_node *parent; 948 uint16_t cnt = 0, avail; 949 950 list = nix_tm_node_list(nix, tree); 951 /* Walk through parents from TL1..TL4 */ 952 for (hw_lvl = NIX_TXSCH_LVL_TL1; hw_lvl > 0; hw_lvl--) { 953 TAILQ_FOREACH(parent, list, node) { 954 if (hw_lvl != parent->hw_lvl) 955 continue; 956 957 /* Skip accounting for children whose 958 * parent does not indicate so. 959 */ 960 if (!parent->child_realloc) 961 continue; 962 963 /* Count children needed */ 964 schq[hw_lvl - 1] += parent->rr_num; 965 if (parent->max_prio != UINT32_MAX) { 966 contig_cnt = parent->max_prio + 1; 967 schq_contig[hw_lvl - 1] += contig_cnt; 968 /* When we have SP + DWRR at a parent, 969 * we will always have a spare schq at rr prio 970 * location in contiguous queues. Hence reduce 971 * discontiguous count by 1. 972 */ 973 if (parent->max_prio > 0 && parent->rr_num) 974 schq[hw_lvl - 1] -= 1; 975 } 976 } 977 } 978 979 schq[nix->tm_root_lvl] = 1; 980 if (!nix_tm_have_tl1_access(nix)) 981 schq[NIX_TXSCH_LVL_TL1] = 1; 982 983 /* Now check for existing resources */ 984 for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) { 985 avail = nix_tm_resource_avail(nix, hw_lvl, false); 986 if (schq[hw_lvl] <= avail) 987 schq[hw_lvl] = 0; 988 else 989 schq[hw_lvl] -= avail; 990 991 /* For contiguous queues, realloc everything */ 992 avail = nix_tm_resource_avail(nix, hw_lvl, true); 993 if (schq_contig[hw_lvl] <= avail) 994 schq_contig[hw_lvl] = 0; 995 996 cnt += schq[hw_lvl]; 997 cnt += schq_contig[hw_lvl]; 998 999 plt_tm_dbg("Estimate resources needed for %s: dis %u cont %u", 1000 nix_tm_hwlvl2str(hw_lvl), schq[hw_lvl], 1001 schq_contig[hw_lvl]); 1002 } 1003 1004 return cnt; 1005 } 1006 1007 uint16_t 1008 roc_nix_tm_leaf_cnt(struct roc_nix *roc_nix) 1009 { 1010 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1011 struct nix_tm_node_list *list; 1012 struct nix_tm_node *node; 1013 uint16_t leaf_cnt = 0; 1014 1015 /* Count leafs only in user list */ 1016 list = nix_tm_node_list(nix, ROC_NIX_TM_USER); 1017 TAILQ_FOREACH(node, list, node) { 1018 if (node->id < nix->nb_tx_queues) 1019 leaf_cnt++; 1020 } 1021 1022 return leaf_cnt; 1023 } 1024 1025 int 1026 roc_nix_tm_node_lvl(struct roc_nix *roc_nix, uint32_t node_id) 1027 { 1028 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1029 struct nix_tm_node *node; 1030 1031 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER); 1032 if (!node) 1033 return NIX_ERR_TM_INVALID_NODE; 1034 1035 return node->lvl; 1036 } 1037 1038 struct roc_nix_tm_node * 1039 roc_nix_tm_node_get(struct roc_nix *roc_nix, uint32_t node_id) 1040 { 1041 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1042 struct nix_tm_node *node; 1043 1044 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER); 1045 return (struct roc_nix_tm_node *)node; 1046 } 1047 1048 struct roc_nix_tm_node * 1049 roc_nix_tm_node_next(struct roc_nix *roc_nix, struct roc_nix_tm_node *__prev) 1050 { 1051 struct nix_tm_node *prev = (struct nix_tm_node *)__prev; 1052 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1053 struct nix_tm_node_list *list; 1054 1055 list = nix_tm_node_list(nix, ROC_NIX_TM_USER); 1056 1057 /* HEAD of the list */ 1058 if (!prev) 1059 return (struct roc_nix_tm_node *)TAILQ_FIRST(list); 1060 1061 /* Next entry */ 1062 if (prev->tree != ROC_NIX_TM_USER) 1063 return NULL; 1064 1065 return (struct roc_nix_tm_node *)TAILQ_NEXT(prev, node); 1066 } 1067 1068 struct roc_nix_tm_shaper_profile * 1069 roc_nix_tm_shaper_profile_get(struct roc_nix *roc_nix, uint32_t profile_id) 1070 { 1071 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1072 struct nix_tm_shaper_profile *profile; 1073 1074 profile = nix_tm_shaper_profile_search(nix, profile_id); 1075 return (struct roc_nix_tm_shaper_profile *)profile; 1076 } 1077 1078 struct roc_nix_tm_shaper_profile * 1079 roc_nix_tm_shaper_profile_next(struct roc_nix *roc_nix, 1080 struct roc_nix_tm_shaper_profile *__prev) 1081 { 1082 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1083 struct nix_tm_shaper_profile_list *list; 1084 struct nix_tm_shaper_profile *prev; 1085 1086 prev = (struct nix_tm_shaper_profile *)__prev; 1087 list = &nix->shaper_profile_list; 1088 1089 /* HEAD of the list */ 1090 if (!prev) 1091 return (struct roc_nix_tm_shaper_profile *)TAILQ_FIRST(list); 1092 1093 return (struct roc_nix_tm_shaper_profile *)TAILQ_NEXT(prev, shaper); 1094 } 1095 1096 struct nix_tm_node * 1097 nix_tm_node_alloc(void) 1098 { 1099 struct nix_tm_node *node; 1100 1101 node = plt_zmalloc(sizeof(struct nix_tm_node), 0); 1102 if (!node) 1103 return NULL; 1104 1105 node->free_fn = plt_free; 1106 return node; 1107 } 1108 1109 void 1110 nix_tm_node_free(struct nix_tm_node *node) 1111 { 1112 if (!node || node->free_fn == NULL) 1113 return; 1114 1115 (node->free_fn)(node); 1116 } 1117 1118 struct nix_tm_shaper_profile * 1119 nix_tm_shaper_profile_alloc(void) 1120 { 1121 struct nix_tm_shaper_profile *profile; 1122 1123 profile = plt_zmalloc(sizeof(struct nix_tm_shaper_profile), 0); 1124 if (!profile) 1125 return NULL; 1126 1127 profile->free_fn = plt_free; 1128 return profile; 1129 } 1130 1131 void 1132 nix_tm_shaper_profile_free(struct nix_tm_shaper_profile *profile) 1133 { 1134 if (!profile || !profile->free_fn) 1135 return; 1136 1137 (profile->free_fn)(profile); 1138 } 1139 1140 int 1141 roc_nix_tm_node_stats_get(struct roc_nix *roc_nix, uint32_t node_id, bool clear, 1142 struct roc_nix_tm_node_stats *n_stats) 1143 { 1144 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1145 struct mbox *mbox = (&nix->dev)->mbox; 1146 struct nix_txschq_config *req, *rsp; 1147 struct nix_tm_node *node; 1148 uint32_t schq; 1149 int rc, i; 1150 1151 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER); 1152 if (!node) 1153 return NIX_ERR_TM_INVALID_NODE; 1154 1155 if (node->hw_lvl != NIX_TXSCH_LVL_TL1) 1156 return NIX_ERR_OP_NOTSUP; 1157 1158 /* Check if node has HW resource */ 1159 if (!(node->flags & NIX_TM_NODE_HWRES)) 1160 return 0; 1161 1162 schq = node->hw_id; 1163 /* Skip fetch if not requested */ 1164 if (!n_stats) 1165 goto clear_stats; 1166 1167 memset(n_stats, 0, sizeof(struct roc_nix_tm_node_stats)); 1168 1169 req = mbox_alloc_msg_nix_txschq_cfg(mbox_get(mbox)); 1170 req->read = 1; 1171 req->lvl = NIX_TXSCH_LVL_TL1; 1172 1173 i = 0; 1174 req->reg[i++] = NIX_AF_TL1X_DROPPED_PACKETS(schq); 1175 req->reg[i++] = NIX_AF_TL1X_DROPPED_BYTES(schq); 1176 req->reg[i++] = NIX_AF_TL1X_GREEN_PACKETS(schq); 1177 req->reg[i++] = NIX_AF_TL1X_GREEN_BYTES(schq); 1178 req->reg[i++] = NIX_AF_TL1X_YELLOW_PACKETS(schq); 1179 req->reg[i++] = NIX_AF_TL1X_YELLOW_BYTES(schq); 1180 req->reg[i++] = NIX_AF_TL1X_RED_PACKETS(schq); 1181 req->reg[i++] = NIX_AF_TL1X_RED_BYTES(schq); 1182 req->num_regs = i; 1183 1184 rc = mbox_process_msg(mbox, (void **)&rsp); 1185 if (rc) { 1186 mbox_put(mbox); 1187 return rc; 1188 } 1189 1190 /* Return stats */ 1191 n_stats->stats[ROC_NIX_TM_NODE_PKTS_DROPPED] = rsp->regval[0]; 1192 n_stats->stats[ROC_NIX_TM_NODE_BYTES_DROPPED] = rsp->regval[1]; 1193 n_stats->stats[ROC_NIX_TM_NODE_GREEN_PKTS] = rsp->regval[2]; 1194 n_stats->stats[ROC_NIX_TM_NODE_GREEN_BYTES] = rsp->regval[3]; 1195 n_stats->stats[ROC_NIX_TM_NODE_YELLOW_PKTS] = rsp->regval[4]; 1196 n_stats->stats[ROC_NIX_TM_NODE_YELLOW_BYTES] = rsp->regval[5]; 1197 n_stats->stats[ROC_NIX_TM_NODE_RED_PKTS] = rsp->regval[6]; 1198 n_stats->stats[ROC_NIX_TM_NODE_RED_BYTES] = rsp->regval[7]; 1199 mbox_put(mbox); 1200 1201 clear_stats: 1202 if (!clear) 1203 return 0; 1204 1205 /* Clear all the stats */ 1206 req = mbox_alloc_msg_nix_txschq_cfg(mbox_get(mbox)); 1207 req->lvl = NIX_TXSCH_LVL_TL1; 1208 i = 0; 1209 req->reg[i++] = NIX_AF_TL1X_DROPPED_PACKETS(schq); 1210 req->reg[i++] = NIX_AF_TL1X_DROPPED_BYTES(schq); 1211 req->reg[i++] = NIX_AF_TL1X_GREEN_PACKETS(schq); 1212 req->reg[i++] = NIX_AF_TL1X_GREEN_BYTES(schq); 1213 req->reg[i++] = NIX_AF_TL1X_YELLOW_PACKETS(schq); 1214 req->reg[i++] = NIX_AF_TL1X_YELLOW_BYTES(schq); 1215 req->reg[i++] = NIX_AF_TL1X_RED_PACKETS(schq); 1216 req->reg[i++] = NIX_AF_TL1X_RED_BYTES(schq); 1217 req->num_regs = i; 1218 1219 rc = mbox_process_msg(mbox, (void **)&rsp); 1220 mbox_put(mbox); 1221 return rc; 1222 } 1223 1224 bool 1225 roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *roc_nix) 1226 { 1227 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1228 1229 if ((nix->tm_flags & NIX_TM_HIERARCHY_ENA) && 1230 (nix->tm_tree == ROC_NIX_TM_USER)) 1231 return true; 1232 return false; 1233 } 1234 1235 int 1236 roc_nix_tm_tree_type_get(struct roc_nix *roc_nix) 1237 { 1238 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1239 1240 return nix->tm_tree; 1241 } 1242 1243 int 1244 roc_nix_tm_max_prio(struct roc_nix *roc_nix, int lvl) 1245 { 1246 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1247 int hw_lvl = nix_tm_lvl2nix(nix, lvl); 1248 1249 return nix_tm_max_prio(nix, hw_lvl); 1250 } 1251 1252 int 1253 roc_nix_tm_lvl_is_leaf(struct roc_nix *roc_nix, int lvl) 1254 { 1255 return nix_tm_is_leaf(roc_nix_to_nix_priv(roc_nix), lvl); 1256 } 1257 1258 void 1259 roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node, 1260 struct roc_nix_tm_shaper_profile *roc_prof) 1261 { 1262 struct nix_tm_node *tm_node = (struct nix_tm_node *)node; 1263 struct nix_tm_shaper_profile *profile; 1264 struct nix_tm_shaper_data cir, pir; 1265 1266 if (!roc_prof) 1267 return; 1268 1269 profile = (struct nix_tm_shaper_profile *)roc_prof->reserved; 1270 tm_node->red_algo = roc_prof->red_algo; 1271 1272 /* C0 doesn't support STALL when both PIR & CIR are enabled */ 1273 if (roc_model_is_cn96_cx()) { 1274 nix_tm_shaper_conf_get(profile, &cir, &pir); 1275 1276 if (pir.rate && cir.rate) 1277 tm_node->red_algo = NIX_REDALG_DISCARD; 1278 } 1279 } 1280 1281 int 1282 roc_nix_tm_lvl_cnt_get(struct roc_nix *roc_nix) 1283 { 1284 if (nix_tm_have_tl1_access(roc_nix_to_nix_priv(roc_nix))) 1285 return NIX_TXSCH_LVL_CNT; 1286 1287 return (NIX_TXSCH_LVL_CNT - 1); 1288 } 1289 1290 int 1291 roc_nix_tm_lvl_have_link_access(struct roc_nix *roc_nix, int lvl) 1292 { 1293 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1294 1295 if (nix_tm_lvl2nix(nix, lvl) == NIX_TXSCH_LVL_TL1) 1296 return 1; 1297 1298 return 0; 1299 } 1300