1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2021 Marvell. 3 */ 4 5 #include "roc_api.h" 6 #include "roc_priv.h" 7 8 static inline uint64_t 9 nix_tm_shaper2regval(struct nix_tm_shaper_data *shaper) 10 { 11 uint64_t regval; 12 13 if (roc_model_is_cn9k()) { 14 regval = (shaper->burst_exponent << 37); 15 regval |= (shaper->burst_mantissa << 29); 16 regval |= (shaper->div_exp << 13); 17 regval |= (shaper->exponent << 9); 18 regval |= (shaper->mantissa << 1); 19 return regval; 20 } 21 22 regval = (shaper->burst_exponent << 44); 23 regval |= (shaper->burst_mantissa << 29); 24 regval |= (shaper->div_exp << 13); 25 regval |= (shaper->exponent << 9); 26 regval |= (shaper->mantissa << 1); 27 return regval; 28 } 29 30 uint16_t 31 nix_tm_lvl2nix_tl1_root(uint32_t lvl) 32 { 33 switch (lvl) { 34 case ROC_TM_LVL_ROOT: 35 return NIX_TXSCH_LVL_TL1; 36 case ROC_TM_LVL_SCH1: 37 return NIX_TXSCH_LVL_TL2; 38 case ROC_TM_LVL_SCH2: 39 return NIX_TXSCH_LVL_TL3; 40 case ROC_TM_LVL_SCH3: 41 return NIX_TXSCH_LVL_TL4; 42 case ROC_TM_LVL_SCH4: 43 return NIX_TXSCH_LVL_SMQ; 44 default: 45 return NIX_TXSCH_LVL_CNT; 46 } 47 } 48 49 uint16_t 50 nix_tm_lvl2nix_tl2_root(uint32_t lvl) 51 { 52 switch (lvl) { 53 case ROC_TM_LVL_ROOT: 54 return NIX_TXSCH_LVL_TL2; 55 case ROC_TM_LVL_SCH1: 56 return NIX_TXSCH_LVL_TL3; 57 case ROC_TM_LVL_SCH2: 58 return NIX_TXSCH_LVL_TL4; 59 case ROC_TM_LVL_SCH3: 60 return NIX_TXSCH_LVL_SMQ; 61 default: 62 return NIX_TXSCH_LVL_CNT; 63 } 64 } 65 66 uint16_t 67 nix_tm_lvl2nix(struct nix *nix, uint32_t lvl) 68 { 69 if (nix_tm_have_tl1_access(nix)) 70 return nix_tm_lvl2nix_tl1_root(lvl); 71 else 72 return nix_tm_lvl2nix_tl2_root(lvl); 73 } 74 75 static uint8_t 76 nix_tm_relchan_get(struct nix *nix) 77 { 78 return nix->tx_chan_base & 0xff; 79 } 80 81 static int 82 nix_tm_find_prio_anchor(struct nix *nix, uint32_t node_id, 83 enum roc_nix_tm_tree tree) 84 { 85 struct nix_tm_node *child_node; 86 struct nix_tm_node_list *list; 87 88 list = nix_tm_node_list(nix, tree); 89 90 TAILQ_FOREACH(child_node, list, node) { 91 if (!child_node->parent) 92 continue; 93 if (!(child_node->parent->id == node_id)) 94 continue; 95 if (child_node->priority == child_node->parent->rr_prio) 96 continue; 97 return child_node->hw_id - child_node->priority; 98 } 99 return 0; 100 } 101 102 struct nix_tm_shaper_profile * 103 nix_tm_shaper_profile_search(struct nix *nix, uint32_t id) 104 { 105 struct nix_tm_shaper_profile *profile; 106 107 TAILQ_FOREACH(profile, &nix->shaper_profile_list, shaper) { 108 if (profile->id == id) 109 return profile; 110 } 111 return NULL; 112 } 113 114 struct nix_tm_node * 115 nix_tm_node_search(struct nix *nix, uint32_t node_id, enum roc_nix_tm_tree tree) 116 { 117 struct nix_tm_node_list *list; 118 struct nix_tm_node *node; 119 120 list = nix_tm_node_list(nix, tree); 121 TAILQ_FOREACH(node, list, node) { 122 if (node->id == node_id) 123 return node; 124 } 125 return NULL; 126 } 127 128 uint64_t 129 nix_tm_shaper_rate_conv(uint64_t value, uint64_t *exponent_p, 130 uint64_t *mantissa_p, uint64_t *div_exp_p) 131 { 132 uint64_t div_exp, exponent, mantissa; 133 134 /* Boundary checks */ 135 if (value < NIX_TM_MIN_SHAPER_RATE || value > NIX_TM_MAX_SHAPER_RATE) 136 return 0; 137 138 if (value <= NIX_TM_SHAPER_RATE(0, 0, 0)) { 139 /* Calculate rate div_exp and mantissa using 140 * the following formula: 141 * 142 * value = (2E6 * (256 + mantissa) 143 * / ((1 << div_exp) * 256)) 144 */ 145 div_exp = 0; 146 exponent = 0; 147 mantissa = NIX_TM_MAX_RATE_MANTISSA; 148 149 while (value < (NIX_TM_SHAPER_RATE_CONST / (1 << div_exp))) 150 div_exp += 1; 151 152 while (value < ((NIX_TM_SHAPER_RATE_CONST * (256 + mantissa)) / 153 ((1 << div_exp) * 256))) 154 mantissa -= 1; 155 } else { 156 /* Calculate rate exponent and mantissa using 157 * the following formula: 158 * 159 * value = (2E6 * ((256 + mantissa) << exponent)) / 256 160 * 161 */ 162 div_exp = 0; 163 exponent = NIX_TM_MAX_RATE_EXPONENT; 164 mantissa = NIX_TM_MAX_RATE_MANTISSA; 165 166 while (value < (NIX_TM_SHAPER_RATE_CONST * (1 << exponent))) 167 exponent -= 1; 168 169 while (value < ((NIX_TM_SHAPER_RATE_CONST * 170 ((256 + mantissa) << exponent)) / 171 256)) 172 mantissa -= 1; 173 } 174 175 if (div_exp > NIX_TM_MAX_RATE_DIV_EXP || 176 exponent > NIX_TM_MAX_RATE_EXPONENT || 177 mantissa > NIX_TM_MAX_RATE_MANTISSA) 178 return 0; 179 180 if (div_exp_p) 181 *div_exp_p = div_exp; 182 if (exponent_p) 183 *exponent_p = exponent; 184 if (mantissa_p) 185 *mantissa_p = mantissa; 186 187 /* Calculate real rate value */ 188 return NIX_TM_SHAPER_RATE(exponent, mantissa, div_exp); 189 } 190 191 uint64_t 192 nix_tm_shaper_burst_conv(uint64_t value, uint64_t *exponent_p, 193 uint64_t *mantissa_p) 194 { 195 uint64_t min_burst, max_burst; 196 uint64_t exponent, mantissa; 197 uint32_t max_mantissa; 198 199 min_burst = NIX_TM_MIN_SHAPER_BURST; 200 max_burst = roc_nix_tm_max_shaper_burst_get(); 201 202 if (value < min_burst || value > max_burst) 203 return 0; 204 205 max_mantissa = (roc_model_is_cn9k() ? NIX_CN9K_TM_MAX_BURST_MANTISSA : 206 NIX_TM_MAX_BURST_MANTISSA); 207 /* Calculate burst exponent and mantissa using 208 * the following formula: 209 * 210 * value = (((256 + mantissa) << (exponent + 1) / 256) 211 * 212 */ 213 exponent = NIX_TM_MAX_BURST_EXPONENT; 214 mantissa = max_mantissa; 215 216 while (value < (1ull << (exponent + 1))) 217 exponent -= 1; 218 219 while (value < ((256 + mantissa) << (exponent + 1)) / 256) 220 mantissa -= 1; 221 222 if (exponent > NIX_TM_MAX_BURST_EXPONENT || mantissa > max_mantissa) 223 return 0; 224 225 if (exponent_p) 226 *exponent_p = exponent; 227 if (mantissa_p) 228 *mantissa_p = mantissa; 229 230 return NIX_TM_SHAPER_BURST(exponent, mantissa); 231 } 232 233 static void 234 nix_tm_shaper_conf_get(struct nix_tm_shaper_profile *profile, 235 struct nix_tm_shaper_data *cir, 236 struct nix_tm_shaper_data *pir) 237 { 238 memset(cir, 0, sizeof(*cir)); 239 memset(pir, 0, sizeof(*pir)); 240 241 if (!profile) 242 return; 243 244 /* Calculate CIR exponent and mantissa */ 245 if (profile->commit.rate) 246 cir->rate = nix_tm_shaper_rate_conv( 247 profile->commit.rate, &cir->exponent, &cir->mantissa, 248 &cir->div_exp); 249 250 /* Calculate PIR exponent and mantissa */ 251 if (profile->peak.rate) 252 pir->rate = nix_tm_shaper_rate_conv( 253 profile->peak.rate, &pir->exponent, &pir->mantissa, 254 &pir->div_exp); 255 256 /* Calculate CIR burst exponent and mantissa */ 257 if (profile->commit.size) 258 cir->burst = nix_tm_shaper_burst_conv(profile->commit.size, 259 &cir->burst_exponent, 260 &cir->burst_mantissa); 261 262 /* Calculate PIR burst exponent and mantissa */ 263 if (profile->peak.size) 264 pir->burst = nix_tm_shaper_burst_conv(profile->peak.size, 265 &pir->burst_exponent, 266 &pir->burst_mantissa); 267 } 268 269 uint32_t 270 nix_tm_check_rr(struct nix *nix, uint32_t parent_id, enum roc_nix_tm_tree tree, 271 uint32_t *rr_prio, uint32_t *max_prio) 272 { 273 uint32_t node_cnt[NIX_TM_TLX_SP_PRIO_MAX]; 274 struct nix_tm_node_list *list; 275 struct nix_tm_node *node; 276 uint32_t rr_num = 0, i; 277 uint32_t children = 0; 278 uint32_t priority; 279 280 memset(node_cnt, 0, sizeof(node_cnt)); 281 *rr_prio = 0xF; 282 *max_prio = UINT32_MAX; 283 284 list = nix_tm_node_list(nix, tree); 285 TAILQ_FOREACH(node, list, node) { 286 if (!node->parent) 287 continue; 288 289 if (!(node->parent->id == parent_id)) 290 continue; 291 292 priority = node->priority; 293 node_cnt[priority]++; 294 children++; 295 } 296 297 for (i = 0; i < NIX_TM_TLX_SP_PRIO_MAX; i++) { 298 if (!node_cnt[i]) 299 break; 300 301 if (node_cnt[i] > rr_num) { 302 *rr_prio = i; 303 rr_num = node_cnt[i]; 304 } 305 } 306 307 /* RR group of single RR child is considered as SP */ 308 if (rr_num == 1) { 309 *rr_prio = 0xF; 310 rr_num = 0; 311 } 312 313 /* Max prio will be returned only when we have non zero prio 314 * or if a parent has single child. 315 */ 316 if (i > 1 || (children == 1)) 317 *max_prio = i - 1; 318 return rr_num; 319 } 320 321 static uint16_t 322 nix_tm_max_prio(struct nix *nix, uint16_t hw_lvl) 323 { 324 if (hw_lvl >= NIX_TXSCH_LVL_CNT) 325 return 0; 326 327 /* MDQ does not support SP */ 328 if (hw_lvl == NIX_TXSCH_LVL_MDQ) 329 return 0; 330 331 /* PF's TL1 with VF's enabled does not support SP */ 332 if (hw_lvl == NIX_TXSCH_LVL_TL1 && (!nix_tm_have_tl1_access(nix) || 333 (nix->tm_flags & NIX_TM_TL1_NO_SP))) 334 return 0; 335 336 return NIX_TM_TLX_SP_PRIO_MAX - 1; 337 } 338 339 int 340 nix_tm_validate_prio(struct nix *nix, uint32_t lvl, uint32_t parent_id, 341 uint32_t priority, enum roc_nix_tm_tree tree) 342 { 343 uint8_t priorities[NIX_TM_TLX_SP_PRIO_MAX]; 344 struct nix_tm_node_list *list; 345 struct nix_tm_node *node; 346 uint32_t rr_num = 0; 347 int i; 348 349 list = nix_tm_node_list(nix, tree); 350 /* Validate priority against max */ 351 if (priority > nix_tm_max_prio(nix, nix_tm_lvl2nix(nix, lvl - 1))) 352 return NIX_ERR_TM_PRIO_EXCEEDED; 353 354 if (parent_id == ROC_NIX_TM_NODE_ID_INVALID) 355 return 0; 356 357 memset(priorities, 0, sizeof(priorities)); 358 priorities[priority] = 1; 359 360 TAILQ_FOREACH(node, list, node) { 361 if (!node->parent) 362 continue; 363 364 if (node->parent->id != parent_id) 365 continue; 366 367 priorities[node->priority]++; 368 } 369 370 for (i = 0; i < NIX_TM_TLX_SP_PRIO_MAX; i++) 371 if (priorities[i] > 1) 372 rr_num++; 373 374 /* At max, one rr groups per parent */ 375 if (rr_num > 1) 376 return NIX_ERR_TM_MULTIPLE_RR_GROUPS; 377 378 /* Check for previous priority to avoid holes in priorities */ 379 if (priority && !priorities[priority - 1]) 380 return NIX_ERR_TM_PRIO_ORDER; 381 382 return 0; 383 } 384 385 bool 386 nix_tm_child_res_valid(struct nix_tm_node_list *list, 387 struct nix_tm_node *parent) 388 { 389 struct nix_tm_node *child; 390 391 TAILQ_FOREACH(child, list, node) { 392 if (child->parent != parent) 393 continue; 394 if (!(child->flags & NIX_TM_NODE_HWRES)) 395 return false; 396 } 397 return true; 398 } 399 400 uint8_t 401 nix_tm_tl1_default_prep(uint32_t schq, volatile uint64_t *reg, 402 volatile uint64_t *regval) 403 { 404 uint8_t k = 0; 405 406 /* 407 * Default config for TL1. 408 * For VF this is always ignored. 409 */ 410 plt_tm_dbg("Default config for main root %s(%u)", 411 nix_tm_hwlvl2str(NIX_TXSCH_LVL_TL1), schq); 412 413 /* Set DWRR quantum */ 414 reg[k] = NIX_AF_TL1X_SCHEDULE(schq); 415 regval[k] = NIX_TM_TL1_DFLT_RR_QTM; 416 k++; 417 418 reg[k] = NIX_AF_TL1X_TOPOLOGY(schq); 419 regval[k] = (NIX_TM_TL1_DFLT_RR_PRIO << 1); 420 k++; 421 422 reg[k] = NIX_AF_TL1X_CIR(schq); 423 regval[k] = 0; 424 k++; 425 426 return k; 427 } 428 429 uint8_t 430 nix_tm_topology_reg_prep(struct nix *nix, struct nix_tm_node *node, 431 volatile uint64_t *reg, volatile uint64_t *regval, 432 volatile uint64_t *regval_mask) 433 { 434 struct roc_nix *roc_nix = nix_priv_to_roc_nix(nix); 435 uint8_t k = 0, hw_lvl, parent_lvl; 436 uint64_t parent = 0, child = 0; 437 enum roc_nix_tm_tree tree; 438 uint32_t rr_prio, schq; 439 uint16_t link, relchan; 440 441 tree = node->tree; 442 schq = node->hw_id; 443 hw_lvl = node->hw_lvl; 444 parent_lvl = hw_lvl + 1; 445 rr_prio = node->rr_prio; 446 447 /* Root node will not have a parent node */ 448 if (hw_lvl == nix->tm_root_lvl) 449 parent = node->parent_hw_id; 450 else 451 parent = node->parent->hw_id; 452 453 link = nix->tx_link; 454 relchan = nix_tm_relchan_get(nix); 455 456 if (hw_lvl != NIX_TXSCH_LVL_SMQ) 457 child = nix_tm_find_prio_anchor(nix, node->id, tree); 458 459 /* Override default rr_prio when TL1 460 * Static Priority is disabled 461 */ 462 if (hw_lvl == NIX_TXSCH_LVL_TL1 && nix->tm_flags & NIX_TM_TL1_NO_SP) { 463 rr_prio = NIX_TM_TL1_DFLT_RR_PRIO; 464 child = 0; 465 } 466 467 plt_tm_dbg("Topology config node %s(%u)->%s(%" PRIu64 ") lvl %u, id %u" 468 " prio_anchor %" PRIu64 " rr_prio %u (%p)", 469 nix_tm_hwlvl2str(hw_lvl), schq, nix_tm_hwlvl2str(parent_lvl), 470 parent, node->lvl, node->id, child, rr_prio, node); 471 472 /* Prepare Topology and Link config */ 473 switch (hw_lvl) { 474 case NIX_TXSCH_LVL_SMQ: 475 476 /* Set xoff which will be cleared later */ 477 reg[k] = NIX_AF_SMQX_CFG(schq); 478 regval[k] = (BIT_ULL(50) | NIX_MIN_HW_FRS | 479 ((nix->mtu & 0xFFFF) << 8)); 480 /* Maximum Vtag insertion size as a multiple of four bytes */ 481 if (roc_nix->hw_vlan_ins) 482 regval[k] |= (0x2ULL << 36); 483 regval_mask[k] = ~(BIT_ULL(50) | GENMASK_ULL(6, 0) | 484 GENMASK_ULL(23, 8) | GENMASK_ULL(38, 36)); 485 k++; 486 487 /* Parent and schedule conf */ 488 reg[k] = NIX_AF_MDQX_PARENT(schq); 489 regval[k] = parent << 16; 490 k++; 491 492 break; 493 case NIX_TXSCH_LVL_TL4: 494 /* Parent and schedule conf */ 495 reg[k] = NIX_AF_TL4X_PARENT(schq); 496 regval[k] = parent << 16; 497 k++; 498 499 reg[k] = NIX_AF_TL4X_TOPOLOGY(schq); 500 regval[k] = (child << 32) | (rr_prio << 1); 501 k++; 502 503 /* Configure TL4 to send to SDP channel instead of CGX/LBK */ 504 if (nix->sdp_link) { 505 reg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq); 506 regval[k] = BIT_ULL(12); 507 k++; 508 } 509 break; 510 case NIX_TXSCH_LVL_TL3: 511 /* Parent and schedule conf */ 512 reg[k] = NIX_AF_TL3X_PARENT(schq); 513 regval[k] = parent << 16; 514 k++; 515 516 reg[k] = NIX_AF_TL3X_TOPOLOGY(schq); 517 regval[k] = (child << 32) | (rr_prio << 1); 518 k++; 519 520 /* Link configuration */ 521 if (!nix->sdp_link && 522 nix->tm_link_cfg_lvl == NIX_TXSCH_LVL_TL3) { 523 reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, link); 524 regval[k] = BIT_ULL(12) | relchan; 525 k++; 526 } 527 528 break; 529 case NIX_TXSCH_LVL_TL2: 530 /* Parent and schedule conf */ 531 reg[k] = NIX_AF_TL2X_PARENT(schq); 532 regval[k] = parent << 16; 533 k++; 534 535 reg[k] = NIX_AF_TL2X_TOPOLOGY(schq); 536 regval[k] = (child << 32) | (rr_prio << 1); 537 k++; 538 539 /* Link configuration */ 540 if (!nix->sdp_link && 541 nix->tm_link_cfg_lvl == NIX_TXSCH_LVL_TL2) { 542 reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, link); 543 regval[k] = BIT_ULL(12) | relchan; 544 k++; 545 } 546 547 break; 548 case NIX_TXSCH_LVL_TL1: 549 reg[k] = NIX_AF_TL1X_TOPOLOGY(schq); 550 regval[k] = (child << 32) | (rr_prio << 1 /*RR_PRIO*/); 551 k++; 552 553 break; 554 } 555 556 return k; 557 } 558 559 uint8_t 560 nix_tm_sched_reg_prep(struct nix *nix, struct nix_tm_node *node, 561 volatile uint64_t *reg, volatile uint64_t *regval) 562 { 563 uint64_t strict_prio = node->priority; 564 uint32_t hw_lvl = node->hw_lvl; 565 uint32_t schq = node->hw_id; 566 uint64_t rr_quantum; 567 uint8_t k = 0; 568 569 /* For CN9K, weight needs to be converted to quantum */ 570 rr_quantum = nix_tm_weight_to_rr_quantum(node->weight); 571 572 /* For children to root, strict prio is default if either 573 * device root is TL2 or TL1 Static Priority is disabled. 574 */ 575 if (hw_lvl == NIX_TXSCH_LVL_TL2 && 576 (!nix_tm_have_tl1_access(nix) || nix->tm_flags & NIX_TM_TL1_NO_SP)) 577 strict_prio = NIX_TM_TL1_DFLT_RR_PRIO; 578 579 plt_tm_dbg("Schedule config node %s(%u) lvl %u id %u, " 580 "prio 0x%" PRIx64 ", rr_quantum/rr_wt 0x%" PRIx64 " (%p)", 581 nix_tm_hwlvl2str(node->hw_lvl), schq, node->lvl, node->id, 582 strict_prio, rr_quantum, node); 583 584 switch (hw_lvl) { 585 case NIX_TXSCH_LVL_SMQ: 586 reg[k] = NIX_AF_MDQX_SCHEDULE(schq); 587 regval[k] = (strict_prio << 24) | rr_quantum; 588 k++; 589 590 break; 591 case NIX_TXSCH_LVL_TL4: 592 reg[k] = NIX_AF_TL4X_SCHEDULE(schq); 593 regval[k] = (strict_prio << 24) | rr_quantum; 594 k++; 595 596 break; 597 case NIX_TXSCH_LVL_TL3: 598 reg[k] = NIX_AF_TL3X_SCHEDULE(schq); 599 regval[k] = (strict_prio << 24) | rr_quantum; 600 k++; 601 602 break; 603 case NIX_TXSCH_LVL_TL2: 604 reg[k] = NIX_AF_TL2X_SCHEDULE(schq); 605 regval[k] = (strict_prio << 24) | rr_quantum; 606 k++; 607 608 break; 609 case NIX_TXSCH_LVL_TL1: 610 reg[k] = NIX_AF_TL1X_SCHEDULE(schq); 611 regval[k] = rr_quantum; 612 k++; 613 614 break; 615 } 616 617 return k; 618 } 619 620 uint8_t 621 nix_tm_shaper_reg_prep(struct nix_tm_node *node, 622 struct nix_tm_shaper_profile *profile, 623 volatile uint64_t *reg, volatile uint64_t *regval) 624 { 625 struct nix_tm_shaper_data cir, pir; 626 uint32_t schq = node->hw_id; 627 uint64_t adjust = 0; 628 uint8_t k = 0; 629 630 nix_tm_shaper_conf_get(profile, &cir, &pir); 631 632 if (profile && node->pkt_mode) 633 adjust = profile->pkt_mode_adj; 634 else if (profile) 635 adjust = profile->pkt_len_adj; 636 637 plt_tm_dbg("Shaper config node %s(%u) lvl %u id %u, " 638 "pir %" PRIu64 "(%" PRIu64 "B)," 639 " cir %" PRIu64 "(%" PRIu64 "B)" 640 "adjust 0x%" PRIx64 "(pktmode %u) (%p)", 641 nix_tm_hwlvl2str(node->hw_lvl), schq, node->lvl, node->id, 642 pir.rate, pir.burst, cir.rate, cir.burst, adjust, 643 node->pkt_mode, node); 644 645 switch (node->hw_lvl) { 646 case NIX_TXSCH_LVL_SMQ: 647 /* Configure PIR, CIR */ 648 reg[k] = NIX_AF_MDQX_PIR(schq); 649 regval[k] = (pir.rate && pir.burst) ? 650 (nix_tm_shaper2regval(&pir) | 1) : 651 0; 652 k++; 653 654 reg[k] = NIX_AF_MDQX_CIR(schq); 655 regval[k] = (cir.rate && cir.burst) ? 656 (nix_tm_shaper2regval(&cir) | 1) : 657 0; 658 k++; 659 660 /* Configure RED ALG */ 661 reg[k] = NIX_AF_MDQX_SHAPE(schq); 662 regval[k] = (adjust | (uint64_t)node->red_algo << 9 | 663 (uint64_t)node->pkt_mode << 24); 664 k++; 665 break; 666 case NIX_TXSCH_LVL_TL4: 667 /* Configure PIR, CIR */ 668 reg[k] = NIX_AF_TL4X_PIR(schq); 669 regval[k] = (pir.rate && pir.burst) ? 670 (nix_tm_shaper2regval(&pir) | 1) : 671 0; 672 k++; 673 674 reg[k] = NIX_AF_TL4X_CIR(schq); 675 regval[k] = (cir.rate && cir.burst) ? 676 (nix_tm_shaper2regval(&cir) | 1) : 677 0; 678 k++; 679 680 /* Configure RED algo */ 681 reg[k] = NIX_AF_TL4X_SHAPE(schq); 682 regval[k] = (adjust | (uint64_t)node->red_algo << 9 | 683 (uint64_t)node->pkt_mode << 24); 684 k++; 685 break; 686 case NIX_TXSCH_LVL_TL3: 687 /* Configure PIR, CIR */ 688 reg[k] = NIX_AF_TL3X_PIR(schq); 689 regval[k] = (pir.rate && pir.burst) ? 690 (nix_tm_shaper2regval(&pir) | 1) : 691 0; 692 k++; 693 694 reg[k] = NIX_AF_TL3X_CIR(schq); 695 regval[k] = (cir.rate && cir.burst) ? 696 (nix_tm_shaper2regval(&cir) | 1) : 697 0; 698 k++; 699 700 /* Configure RED algo */ 701 reg[k] = NIX_AF_TL3X_SHAPE(schq); 702 regval[k] = (adjust | (uint64_t)node->red_algo << 9 | 703 (uint64_t)node->pkt_mode); 704 k++; 705 706 break; 707 case NIX_TXSCH_LVL_TL2: 708 /* Configure PIR, CIR */ 709 reg[k] = NIX_AF_TL2X_PIR(schq); 710 regval[k] = (pir.rate && pir.burst) ? 711 (nix_tm_shaper2regval(&pir) | 1) : 712 0; 713 k++; 714 715 reg[k] = NIX_AF_TL2X_CIR(schq); 716 regval[k] = (cir.rate && cir.burst) ? 717 (nix_tm_shaper2regval(&cir) | 1) : 718 0; 719 k++; 720 721 /* Configure RED algo */ 722 reg[k] = NIX_AF_TL2X_SHAPE(schq); 723 regval[k] = (adjust | (uint64_t)node->red_algo << 9 | 724 (uint64_t)node->pkt_mode << 24); 725 k++; 726 727 break; 728 case NIX_TXSCH_LVL_TL1: 729 /* Configure CIR */ 730 reg[k] = NIX_AF_TL1X_CIR(schq); 731 regval[k] = (cir.rate && cir.burst) ? 732 (nix_tm_shaper2regval(&cir) | 1) : 733 0; 734 k++; 735 736 /* Configure length disable and adjust */ 737 reg[k] = NIX_AF_TL1X_SHAPE(schq); 738 regval[k] = (adjust | (uint64_t)node->pkt_mode << 24); 739 k++; 740 break; 741 } 742 743 return k; 744 } 745 746 uint8_t 747 nix_tm_sw_xoff_prep(struct nix_tm_node *node, bool enable, 748 volatile uint64_t *reg, volatile uint64_t *regval) 749 { 750 uint32_t hw_lvl = node->hw_lvl; 751 uint32_t schq = node->hw_id; 752 uint8_t k = 0; 753 754 plt_tm_dbg("sw xoff config node %s(%u) lvl %u id %u, enable %u (%p)", 755 nix_tm_hwlvl2str(hw_lvl), schq, node->lvl, node->id, enable, 756 node); 757 758 regval[k] = enable; 759 760 switch (hw_lvl) { 761 case NIX_TXSCH_LVL_MDQ: 762 reg[k] = NIX_AF_MDQX_SW_XOFF(schq); 763 k++; 764 break; 765 case NIX_TXSCH_LVL_TL4: 766 reg[k] = NIX_AF_TL4X_SW_XOFF(schq); 767 k++; 768 break; 769 case NIX_TXSCH_LVL_TL3: 770 reg[k] = NIX_AF_TL3X_SW_XOFF(schq); 771 k++; 772 break; 773 case NIX_TXSCH_LVL_TL2: 774 reg[k] = NIX_AF_TL2X_SW_XOFF(schq); 775 k++; 776 break; 777 case NIX_TXSCH_LVL_TL1: 778 reg[k] = NIX_AF_TL1X_SW_XOFF(schq); 779 k++; 780 break; 781 default: 782 break; 783 } 784 785 return k; 786 } 787 788 /* Search for min rate in topology */ 789 uint64_t 790 nix_tm_shaper_profile_rate_min(struct nix *nix) 791 { 792 struct nix_tm_shaper_profile *profile; 793 uint64_t rate_min = 1E9; /* 1 Gbps */ 794 795 TAILQ_FOREACH(profile, &nix->shaper_profile_list, shaper) { 796 if (profile->peak.rate && profile->peak.rate < rate_min) 797 rate_min = profile->peak.rate; 798 799 if (profile->commit.rate && profile->commit.rate < rate_min) 800 rate_min = profile->commit.rate; 801 } 802 return rate_min; 803 } 804 805 uint16_t 806 nix_tm_resource_avail(struct nix *nix, uint8_t hw_lvl, bool contig) 807 { 808 uint32_t pos = 0, start_pos = 0; 809 struct plt_bitmap *bmp; 810 uint16_t count = 0; 811 uint64_t slab = 0; 812 813 bmp = contig ? nix->schq_contig_bmp[hw_lvl] : nix->schq_bmp[hw_lvl]; 814 plt_bitmap_scan_init(bmp); 815 816 if (!plt_bitmap_scan(bmp, &pos, &slab)) 817 return count; 818 819 /* Count bit set */ 820 start_pos = pos; 821 do { 822 count += __builtin_popcountll(slab); 823 if (!plt_bitmap_scan(bmp, &pos, &slab)) 824 break; 825 } while (pos != start_pos); 826 827 return count; 828 } 829 830 uint16_t 831 nix_tm_resource_estimate(struct nix *nix, uint16_t *schq_contig, uint16_t *schq, 832 enum roc_nix_tm_tree tree) 833 { 834 struct nix_tm_node_list *list; 835 uint8_t contig_cnt, hw_lvl; 836 struct nix_tm_node *parent; 837 uint16_t cnt = 0, avail; 838 839 list = nix_tm_node_list(nix, tree); 840 /* Walk through parents from TL1..TL4 */ 841 for (hw_lvl = NIX_TXSCH_LVL_TL1; hw_lvl > 0; hw_lvl--) { 842 TAILQ_FOREACH(parent, list, node) { 843 if (hw_lvl != parent->hw_lvl) 844 continue; 845 846 /* Skip accounting for children whose 847 * parent does not indicate so. 848 */ 849 if (!parent->child_realloc) 850 continue; 851 852 /* Count children needed */ 853 schq[hw_lvl - 1] += parent->rr_num; 854 if (parent->max_prio != UINT32_MAX) { 855 contig_cnt = parent->max_prio + 1; 856 schq_contig[hw_lvl - 1] += contig_cnt; 857 /* When we have SP + DWRR at a parent, 858 * we will always have a spare schq at rr prio 859 * location in contiguous queues. Hence reduce 860 * discontiguous count by 1. 861 */ 862 if (parent->max_prio > 0 && parent->rr_num) 863 schq[hw_lvl - 1] -= 1; 864 } 865 } 866 } 867 868 schq[nix->tm_root_lvl] = 1; 869 if (!nix_tm_have_tl1_access(nix)) 870 schq[NIX_TXSCH_LVL_TL1] = 1; 871 872 /* Now check for existing resources */ 873 for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) { 874 avail = nix_tm_resource_avail(nix, hw_lvl, false); 875 if (schq[hw_lvl] <= avail) 876 schq[hw_lvl] = 0; 877 else 878 schq[hw_lvl] -= avail; 879 880 /* For contiguous queues, realloc everything */ 881 avail = nix_tm_resource_avail(nix, hw_lvl, true); 882 if (schq_contig[hw_lvl] <= avail) 883 schq_contig[hw_lvl] = 0; 884 885 cnt += schq[hw_lvl]; 886 cnt += schq_contig[hw_lvl]; 887 888 plt_tm_dbg("Estimate resources needed for %s: dis %u cont %u", 889 nix_tm_hwlvl2str(hw_lvl), schq[hw_lvl], 890 schq_contig[hw_lvl]); 891 } 892 893 return cnt; 894 } 895 896 uint16_t 897 roc_nix_tm_leaf_cnt(struct roc_nix *roc_nix) 898 { 899 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 900 struct nix_tm_node_list *list; 901 struct nix_tm_node *node; 902 uint16_t leaf_cnt = 0; 903 904 /* Count leafs only in user list */ 905 list = nix_tm_node_list(nix, ROC_NIX_TM_USER); 906 TAILQ_FOREACH(node, list, node) { 907 if (node->id < nix->nb_tx_queues) 908 leaf_cnt++; 909 } 910 911 return leaf_cnt; 912 } 913 914 int 915 roc_nix_tm_node_lvl(struct roc_nix *roc_nix, uint32_t node_id) 916 { 917 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 918 struct nix_tm_node *node; 919 920 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER); 921 if (!node) 922 return NIX_ERR_TM_INVALID_NODE; 923 924 return node->lvl; 925 } 926 927 struct roc_nix_tm_node * 928 roc_nix_tm_node_get(struct roc_nix *roc_nix, uint32_t node_id) 929 { 930 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 931 struct nix_tm_node *node; 932 933 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER); 934 return (struct roc_nix_tm_node *)node; 935 } 936 937 struct roc_nix_tm_node * 938 roc_nix_tm_node_next(struct roc_nix *roc_nix, struct roc_nix_tm_node *__prev) 939 { 940 struct nix_tm_node *prev = (struct nix_tm_node *)__prev; 941 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 942 struct nix_tm_node_list *list; 943 944 list = nix_tm_node_list(nix, ROC_NIX_TM_USER); 945 946 /* HEAD of the list */ 947 if (!prev) 948 return (struct roc_nix_tm_node *)TAILQ_FIRST(list); 949 950 /* Next entry */ 951 if (prev->tree != ROC_NIX_TM_USER) 952 return NULL; 953 954 return (struct roc_nix_tm_node *)TAILQ_NEXT(prev, node); 955 } 956 957 struct roc_nix_tm_shaper_profile * 958 roc_nix_tm_shaper_profile_get(struct roc_nix *roc_nix, uint32_t profile_id) 959 { 960 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 961 struct nix_tm_shaper_profile *profile; 962 963 profile = nix_tm_shaper_profile_search(nix, profile_id); 964 return (struct roc_nix_tm_shaper_profile *)profile; 965 } 966 967 struct roc_nix_tm_shaper_profile * 968 roc_nix_tm_shaper_profile_next(struct roc_nix *roc_nix, 969 struct roc_nix_tm_shaper_profile *__prev) 970 { 971 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 972 struct nix_tm_shaper_profile_list *list; 973 struct nix_tm_shaper_profile *prev; 974 975 prev = (struct nix_tm_shaper_profile *)__prev; 976 list = &nix->shaper_profile_list; 977 978 /* HEAD of the list */ 979 if (!prev) 980 return (struct roc_nix_tm_shaper_profile *)TAILQ_FIRST(list); 981 982 return (struct roc_nix_tm_shaper_profile *)TAILQ_NEXT(prev, shaper); 983 } 984 985 struct nix_tm_node * 986 nix_tm_node_alloc(void) 987 { 988 struct nix_tm_node *node; 989 990 node = plt_zmalloc(sizeof(struct nix_tm_node), 0); 991 if (!node) 992 return NULL; 993 994 node->free_fn = plt_free; 995 return node; 996 } 997 998 void 999 nix_tm_node_free(struct nix_tm_node *node) 1000 { 1001 if (!node || node->free_fn == NULL) 1002 return; 1003 1004 (node->free_fn)(node); 1005 } 1006 1007 struct nix_tm_shaper_profile * 1008 nix_tm_shaper_profile_alloc(void) 1009 { 1010 struct nix_tm_shaper_profile *profile; 1011 1012 profile = plt_zmalloc(sizeof(struct nix_tm_shaper_profile), 0); 1013 if (!profile) 1014 return NULL; 1015 1016 profile->free_fn = plt_free; 1017 return profile; 1018 } 1019 1020 void 1021 nix_tm_shaper_profile_free(struct nix_tm_shaper_profile *profile) 1022 { 1023 if (!profile || !profile->free_fn) 1024 return; 1025 1026 (profile->free_fn)(profile); 1027 } 1028 1029 int 1030 roc_nix_tm_node_stats_get(struct roc_nix *roc_nix, uint32_t node_id, bool clear, 1031 struct roc_nix_tm_node_stats *n_stats) 1032 { 1033 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1034 struct mbox *mbox = (&nix->dev)->mbox; 1035 struct nix_txschq_config *req, *rsp; 1036 struct nix_tm_node *node; 1037 uint32_t schq; 1038 int rc, i; 1039 1040 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER); 1041 if (!node) 1042 return NIX_ERR_TM_INVALID_NODE; 1043 1044 if (node->hw_lvl != NIX_TXSCH_LVL_TL1) 1045 return NIX_ERR_OP_NOTSUP; 1046 1047 /* Check if node has HW resource */ 1048 if (!(node->flags & NIX_TM_NODE_HWRES)) 1049 return 0; 1050 1051 schq = node->hw_id; 1052 /* Skip fetch if not requested */ 1053 if (!n_stats) 1054 goto clear_stats; 1055 1056 memset(n_stats, 0, sizeof(struct roc_nix_tm_node_stats)); 1057 1058 req = mbox_alloc_msg_nix_txschq_cfg(mbox); 1059 req->read = 1; 1060 req->lvl = NIX_TXSCH_LVL_TL1; 1061 1062 i = 0; 1063 req->reg[i++] = NIX_AF_TL1X_DROPPED_PACKETS(schq); 1064 req->reg[i++] = NIX_AF_TL1X_DROPPED_BYTES(schq); 1065 req->reg[i++] = NIX_AF_TL1X_GREEN_PACKETS(schq); 1066 req->reg[i++] = NIX_AF_TL1X_GREEN_BYTES(schq); 1067 req->reg[i++] = NIX_AF_TL1X_YELLOW_PACKETS(schq); 1068 req->reg[i++] = NIX_AF_TL1X_YELLOW_BYTES(schq); 1069 req->reg[i++] = NIX_AF_TL1X_RED_PACKETS(schq); 1070 req->reg[i++] = NIX_AF_TL1X_RED_BYTES(schq); 1071 req->num_regs = i; 1072 1073 rc = mbox_process_msg(mbox, (void **)&rsp); 1074 if (rc) 1075 return rc; 1076 1077 /* Return stats */ 1078 n_stats->stats[ROC_NIX_TM_NODE_PKTS_DROPPED] = rsp->regval[0]; 1079 n_stats->stats[ROC_NIX_TM_NODE_BYTES_DROPPED] = rsp->regval[1]; 1080 n_stats->stats[ROC_NIX_TM_NODE_GREEN_PKTS] = rsp->regval[2]; 1081 n_stats->stats[ROC_NIX_TM_NODE_GREEN_BYTES] = rsp->regval[3]; 1082 n_stats->stats[ROC_NIX_TM_NODE_YELLOW_PKTS] = rsp->regval[4]; 1083 n_stats->stats[ROC_NIX_TM_NODE_YELLOW_BYTES] = rsp->regval[5]; 1084 n_stats->stats[ROC_NIX_TM_NODE_RED_PKTS] = rsp->regval[6]; 1085 n_stats->stats[ROC_NIX_TM_NODE_RED_BYTES] = rsp->regval[7]; 1086 1087 clear_stats: 1088 if (!clear) 1089 return 0; 1090 1091 /* Clear all the stats */ 1092 req = mbox_alloc_msg_nix_txschq_cfg(mbox); 1093 req->lvl = NIX_TXSCH_LVL_TL1; 1094 i = 0; 1095 req->reg[i++] = NIX_AF_TL1X_DROPPED_PACKETS(schq); 1096 req->reg[i++] = NIX_AF_TL1X_DROPPED_BYTES(schq); 1097 req->reg[i++] = NIX_AF_TL1X_GREEN_PACKETS(schq); 1098 req->reg[i++] = NIX_AF_TL1X_GREEN_BYTES(schq); 1099 req->reg[i++] = NIX_AF_TL1X_YELLOW_PACKETS(schq); 1100 req->reg[i++] = NIX_AF_TL1X_YELLOW_BYTES(schq); 1101 req->reg[i++] = NIX_AF_TL1X_RED_PACKETS(schq); 1102 req->reg[i++] = NIX_AF_TL1X_RED_BYTES(schq); 1103 req->num_regs = i; 1104 1105 return mbox_process_msg(mbox, (void **)&rsp); 1106 } 1107 1108 bool 1109 roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *roc_nix) 1110 { 1111 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1112 1113 if ((nix->tm_flags & NIX_TM_HIERARCHY_ENA) && 1114 (nix->tm_tree == ROC_NIX_TM_USER)) 1115 return true; 1116 return false; 1117 } 1118 1119 int 1120 roc_nix_tm_tree_type_get(struct roc_nix *roc_nix) 1121 { 1122 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1123 1124 return nix->tm_tree; 1125 } 1126 1127 int 1128 roc_nix_tm_max_prio(struct roc_nix *roc_nix, int lvl) 1129 { 1130 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1131 int hw_lvl = nix_tm_lvl2nix(nix, lvl); 1132 1133 return nix_tm_max_prio(nix, hw_lvl); 1134 } 1135 1136 int 1137 roc_nix_tm_lvl_is_leaf(struct roc_nix *roc_nix, int lvl) 1138 { 1139 return nix_tm_is_leaf(roc_nix_to_nix_priv(roc_nix), lvl); 1140 } 1141 1142 void 1143 roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node, 1144 struct roc_nix_tm_shaper_profile *roc_prof) 1145 { 1146 struct nix_tm_node *tm_node = (struct nix_tm_node *)node; 1147 struct nix_tm_shaper_profile *profile; 1148 struct nix_tm_shaper_data cir, pir; 1149 1150 profile = (struct nix_tm_shaper_profile *)roc_prof->reserved; 1151 tm_node->red_algo = NIX_REDALG_STD; 1152 1153 /* C0 doesn't support STALL when both PIR & CIR are enabled */ 1154 if (profile && roc_model_is_cn96_cx()) { 1155 nix_tm_shaper_conf_get(profile, &cir, &pir); 1156 1157 if (pir.rate && cir.rate) 1158 tm_node->red_algo = NIX_REDALG_DISCARD; 1159 } 1160 } 1161 1162 int 1163 roc_nix_tm_lvl_cnt_get(struct roc_nix *roc_nix) 1164 { 1165 if (nix_tm_have_tl1_access(roc_nix_to_nix_priv(roc_nix))) 1166 return NIX_TXSCH_LVL_CNT; 1167 1168 return (NIX_TXSCH_LVL_CNT - 1); 1169 } 1170 1171 int 1172 roc_nix_tm_lvl_have_link_access(struct roc_nix *roc_nix, int lvl) 1173 { 1174 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1175 1176 if (nix_tm_lvl2nix(nix, lvl) == NIX_TXSCH_LVL_TL1) 1177 return 1; 1178 1179 return 0; 1180 } 1181