1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <stdalign.h> 6 #include <stdio.h> 7 #include <string.h> 8 9 #include <rte_common.h> 10 #include <rte_log.h> 11 #include <rte_malloc.h> 12 #include <rte_cycles.h> 13 #include <rte_prefetch.h> 14 #include <rte_branch_prediction.h> 15 #include <rte_mbuf.h> 16 #include <rte_bitmap.h> 17 #include <rte_reciprocal.h> 18 19 #include "rte_sched.h" 20 #include "rte_sched_log.h" 21 #include "rte_sched_common.h" 22 23 #include "rte_approx.h" 24 25 26 #ifdef __INTEL_COMPILER 27 #pragma warning(disable:2259) /* conversion may lose significant bits */ 28 #endif 29 30 #ifndef RTE_SCHED_PORT_N_GRINDERS 31 #define RTE_SCHED_PORT_N_GRINDERS 8 32 #endif 33 34 #define RTE_SCHED_TB_RATE_CONFIG_ERR (1e-7) 35 #define RTE_SCHED_WRR_SHIFT 3 36 #define RTE_SCHED_MAX_QUEUES_PER_TC RTE_SCHED_BE_QUEUES_PER_PIPE 37 #define RTE_SCHED_GRINDER_PCACHE_SIZE (64 / RTE_SCHED_QUEUES_PER_PIPE) 38 #define RTE_SCHED_PIPE_INVALID UINT32_MAX 39 #define RTE_SCHED_BMP_POS_INVALID UINT32_MAX 40 41 /* Scaling for cycles_per_byte calculation 42 * Chosen so that minimum rate is 480 bit/sec 43 */ 44 #define RTE_SCHED_TIME_SHIFT 8 45 46 struct rte_sched_pipe_profile { 47 /* Token bucket (TB) */ 48 uint64_t tb_period; 49 uint64_t tb_credits_per_period; 50 uint64_t tb_size; 51 52 /* Pipe traffic classes */ 53 uint64_t tc_period; 54 uint64_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; 55 uint8_t tc_ov_weight; 56 57 /* Pipe best-effort traffic class queues */ 58 uint8_t wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE]; 59 }; 60 61 struct __rte_cache_aligned rte_sched_pipe { 62 /* Token bucket (TB) */ 63 uint64_t tb_time; /* time of last update */ 64 uint64_t tb_credits; 65 66 /* Pipe profile and flags */ 67 uint32_t profile; 68 69 /* Traffic classes (TCs) */ 70 uint64_t tc_time; /* time of next update */ 71 uint64_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; 72 73 /* Weighted Round Robin (WRR) */ 74 uint8_t wrr_tokens[RTE_SCHED_BE_QUEUES_PER_PIPE]; 75 76 /* TC oversubscription */ 77 uint64_t tc_ov_credits; 78 uint8_t tc_ov_period_id; 79 }; 80 81 struct rte_sched_queue { 82 uint16_t qw; 83 uint16_t qr; 84 }; 85 86 struct rte_sched_queue_extra { 87 struct rte_sched_queue_stats stats; 88 union { 89 struct rte_red red; 90 struct rte_pie pie; 91 }; 92 }; 93 94 enum grinder_state { 95 e_GRINDER_PREFETCH_PIPE = 0, 96 e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS, 97 e_GRINDER_PREFETCH_MBUF, 98 e_GRINDER_READ_MBUF 99 }; 100 101 struct rte_sched_subport_profile { 102 /* Token bucket (TB) */ 103 uint64_t tb_period; 104 uint64_t tb_credits_per_period; 105 uint64_t tb_size; 106 107 uint64_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; 108 uint64_t tc_period; 109 }; 110 111 struct rte_sched_grinder { 112 /* Pipe cache */ 113 uint16_t pcache_qmask[RTE_SCHED_GRINDER_PCACHE_SIZE]; 114 uint32_t pcache_qindex[RTE_SCHED_GRINDER_PCACHE_SIZE]; 115 uint32_t pcache_w; 116 uint32_t pcache_r; 117 118 /* Current pipe */ 119 enum grinder_state state; 120 uint32_t productive; 121 uint32_t pindex; 122 struct rte_sched_subport *subport; 123 struct rte_sched_subport_profile *subport_params; 124 struct rte_sched_pipe *pipe; 125 struct rte_sched_pipe_profile *pipe_params; 126 127 /* TC cache */ 128 uint8_t tccache_qmask[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; 129 uint32_t tccache_qindex[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; 130 uint32_t tccache_w; 131 uint32_t tccache_r; 132 133 /* Current TC */ 134 uint32_t tc_index; 135 struct rte_sched_queue *queue[RTE_SCHED_MAX_QUEUES_PER_TC]; 136 struct rte_mbuf **qbase[RTE_SCHED_MAX_QUEUES_PER_TC]; 137 uint32_t qindex[RTE_SCHED_MAX_QUEUES_PER_TC]; 138 uint16_t qsize; 139 uint32_t qmask; 140 uint32_t qpos; 141 struct rte_mbuf *pkt; 142 143 /* WRR */ 144 uint16_t wrr_tokens[RTE_SCHED_BE_QUEUES_PER_PIPE]; 145 uint16_t wrr_mask[RTE_SCHED_BE_QUEUES_PER_PIPE]; 146 uint8_t wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE]; 147 }; 148 149 struct __rte_cache_aligned rte_sched_subport { 150 /* Token bucket (TB) */ 151 uint64_t tb_time; /* time of last update */ 152 uint64_t tb_credits; 153 154 /* Traffic classes (TCs) */ 155 uint64_t tc_time; /* time of next update */ 156 uint64_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; 157 158 /* TC oversubscription */ 159 uint64_t tc_ov_wm; 160 uint64_t tc_ov_wm_min; 161 uint64_t tc_ov_wm_max; 162 uint8_t tc_ov_period_id; 163 uint8_t tc_ov; 164 uint32_t tc_ov_n; 165 double tc_ov_rate; 166 167 /* Statistics */ 168 alignas(RTE_CACHE_LINE_SIZE) struct rte_sched_subport_stats stats; 169 170 /* subport profile */ 171 uint32_t profile; 172 /* Subport pipes */ 173 uint32_t n_pipes_per_subport_enabled; 174 uint32_t n_pipe_profiles; 175 uint32_t n_max_pipe_profiles; 176 177 /* Pipe best-effort TC rate */ 178 uint64_t pipe_tc_be_rate_max; 179 180 /* Pipe queues size */ 181 uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; 182 183 bool cman_enabled; 184 enum rte_sched_cman_mode cman; 185 186 union { 187 struct rte_red_config red_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS]; 188 struct rte_pie_config pie_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; 189 }; 190 191 /* Scheduling loop detection */ 192 uint32_t pipe_loop; 193 uint32_t pipe_exhaustion; 194 195 /* Bitmap */ 196 struct rte_bitmap *bmp; 197 alignas(16) uint32_t grinder_base_bmp_pos[RTE_SCHED_PORT_N_GRINDERS]; 198 199 /* Grinders */ 200 struct rte_sched_grinder grinder[RTE_SCHED_PORT_N_GRINDERS]; 201 uint32_t busy_grinders; 202 203 /* Queue base calculation */ 204 uint32_t qsize_add[RTE_SCHED_QUEUES_PER_PIPE]; 205 uint32_t qsize_sum; 206 207 /* TC oversubscription activation */ 208 int tc_ov_enabled; 209 210 struct rte_sched_pipe *pipe; 211 struct rte_sched_queue *queue; 212 struct rte_sched_queue_extra *queue_extra; 213 struct rte_sched_pipe_profile *pipe_profiles; 214 uint8_t *bmp_array; 215 struct rte_mbuf **queue_array; 216 alignas(RTE_CACHE_LINE_SIZE) uint8_t memory[0]; 217 }; 218 219 struct __rte_cache_aligned rte_sched_port { 220 /* User parameters */ 221 uint32_t n_subports_per_port; 222 uint32_t n_pipes_per_subport; 223 uint32_t n_pipes_per_subport_log2; 224 uint16_t pipe_queue[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; 225 uint8_t pipe_tc[RTE_SCHED_QUEUES_PER_PIPE]; 226 uint8_t tc_queue[RTE_SCHED_QUEUES_PER_PIPE]; 227 uint32_t n_subport_profiles; 228 uint32_t n_max_subport_profiles; 229 uint64_t rate; 230 uint32_t mtu; 231 uint32_t frame_overhead; 232 int socket; 233 234 /* Timing */ 235 uint64_t time_cpu_cycles; /* Current CPU time measured in CPU cycles */ 236 uint64_t time_cpu_bytes; /* Current CPU time measured in bytes */ 237 uint64_t time; /* Current NIC TX time measured in bytes */ 238 struct rte_reciprocal inv_cycles_per_byte; /* CPU cycles per byte */ 239 uint64_t cycles_per_byte; 240 241 /* Grinders */ 242 struct rte_mbuf **pkts_out; 243 uint32_t n_pkts_out; 244 uint32_t subport_id; 245 246 /* Large data structures */ 247 struct rte_sched_subport_profile *subport_profiles; 248 alignas(RTE_CACHE_LINE_SIZE) struct rte_sched_subport *subports[0]; 249 }; 250 251 enum rte_sched_subport_array { 252 e_RTE_SCHED_SUBPORT_ARRAY_PIPE = 0, 253 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE, 254 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA, 255 e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES, 256 e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY, 257 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY, 258 e_RTE_SCHED_SUBPORT_ARRAY_TOTAL, 259 }; 260 261 static inline uint32_t 262 rte_sched_subport_pipe_queues(struct rte_sched_subport *subport) 263 { 264 return RTE_SCHED_QUEUES_PER_PIPE * subport->n_pipes_per_subport_enabled; 265 } 266 267 static inline struct rte_mbuf ** 268 rte_sched_subport_pipe_qbase(struct rte_sched_subport *subport, uint32_t qindex) 269 { 270 uint32_t pindex = qindex >> 4; 271 uint32_t qpos = qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1); 272 273 return (subport->queue_array + pindex * 274 subport->qsize_sum + subport->qsize_add[qpos]); 275 } 276 277 static inline uint16_t 278 rte_sched_subport_pipe_qsize(struct rte_sched_port *port, 279 struct rte_sched_subport *subport, uint32_t qindex) 280 { 281 uint32_t tc = port->pipe_tc[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)]; 282 283 return subport->qsize[tc]; 284 } 285 286 static inline uint32_t 287 rte_sched_port_queues_per_port(struct rte_sched_port *port) 288 { 289 uint32_t n_queues = 0, i; 290 291 for (i = 0; i < port->n_subports_per_port; i++) 292 n_queues += rte_sched_subport_pipe_queues(port->subports[i]); 293 294 return n_queues; 295 } 296 297 static inline uint16_t 298 rte_sched_port_pipe_queue(struct rte_sched_port *port, uint32_t traffic_class) 299 { 300 uint16_t pipe_queue = port->pipe_queue[traffic_class]; 301 302 return pipe_queue; 303 } 304 305 static inline uint8_t 306 rte_sched_port_pipe_tc(struct rte_sched_port *port, uint32_t qindex) 307 { 308 uint8_t pipe_tc = port->pipe_tc[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)]; 309 310 return pipe_tc; 311 } 312 313 static inline uint8_t 314 rte_sched_port_tc_queue(struct rte_sched_port *port, uint32_t qindex) 315 { 316 uint8_t tc_queue = port->tc_queue[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)]; 317 318 return tc_queue; 319 } 320 321 static int 322 pipe_profile_check(struct rte_sched_pipe_params *params, 323 uint64_t rate, uint16_t *qsize) 324 { 325 uint32_t i; 326 327 /* Pipe parameters */ 328 if (params == NULL) { 329 SCHED_LOG(ERR, 330 "%s: Incorrect value for parameter params", __func__); 331 return -EINVAL; 332 } 333 334 /* TB rate: non-zero, not greater than port rate */ 335 if (params->tb_rate == 0 || 336 params->tb_rate > rate) { 337 SCHED_LOG(ERR, 338 "%s: Incorrect value for tb rate", __func__); 339 return -EINVAL; 340 } 341 342 /* TB size: non-zero */ 343 if (params->tb_size == 0) { 344 SCHED_LOG(ERR, 345 "%s: Incorrect value for tb size", __func__); 346 return -EINVAL; 347 } 348 349 /* TC rate: non-zero if qsize non-zero, less than pipe rate */ 350 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) { 351 if ((qsize[i] == 0 && params->tc_rate[i] != 0) || 352 (qsize[i] != 0 && (params->tc_rate[i] == 0 || 353 params->tc_rate[i] > params->tb_rate))) { 354 SCHED_LOG(ERR, 355 "%s: Incorrect value for qsize or tc_rate", __func__); 356 return -EINVAL; 357 } 358 } 359 360 if (params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE] == 0 || 361 qsize[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) { 362 SCHED_LOG(ERR, 363 "%s: Incorrect value for be traffic class rate", __func__); 364 return -EINVAL; 365 } 366 367 /* TC period: non-zero */ 368 if (params->tc_period == 0) { 369 SCHED_LOG(ERR, 370 "%s: Incorrect value for tc period", __func__); 371 return -EINVAL; 372 } 373 374 /* Best effort tc oversubscription weight: non-zero */ 375 if (params->tc_ov_weight == 0) { 376 SCHED_LOG(ERR, 377 "%s: Incorrect value for tc ov weight", __func__); 378 return -EINVAL; 379 } 380 381 /* Queue WRR weights: non-zero */ 382 for (i = 0; i < RTE_SCHED_BE_QUEUES_PER_PIPE; i++) { 383 if (params->wrr_weights[i] == 0) { 384 SCHED_LOG(ERR, 385 "%s: Incorrect value for wrr weight", __func__); 386 return -EINVAL; 387 } 388 } 389 390 return 0; 391 } 392 393 static int 394 subport_profile_check(struct rte_sched_subport_profile_params *params, 395 uint64_t rate) 396 { 397 uint32_t i; 398 399 /* Check user parameters */ 400 if (params == NULL) { 401 SCHED_LOG(ERR, "%s: " 402 "Incorrect value for parameter params", __func__); 403 return -EINVAL; 404 } 405 406 if (params->tb_rate == 0 || params->tb_rate > rate) { 407 SCHED_LOG(ERR, "%s: " 408 "Incorrect value for tb rate", __func__); 409 return -EINVAL; 410 } 411 412 if (params->tb_size == 0) { 413 SCHED_LOG(ERR, "%s: " 414 "Incorrect value for tb size", __func__); 415 return -EINVAL; 416 } 417 418 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) { 419 uint64_t tc_rate = params->tc_rate[i]; 420 421 if (tc_rate == 0 || (tc_rate > params->tb_rate)) { 422 SCHED_LOG(ERR, "%s: " 423 "Incorrect value for tc rate", __func__); 424 return -EINVAL; 425 } 426 } 427 428 if (params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) { 429 SCHED_LOG(ERR, "%s: " 430 "Incorrect tc rate(best effort)", __func__); 431 return -EINVAL; 432 } 433 434 if (params->tc_period == 0) { 435 SCHED_LOG(ERR, "%s: " 436 "Incorrect value for tc period", __func__); 437 return -EINVAL; 438 } 439 440 return 0; 441 } 442 443 static int 444 rte_sched_port_check_params(struct rte_sched_port_params *params) 445 { 446 uint32_t i; 447 448 if (params == NULL) { 449 SCHED_LOG(ERR, 450 "%s: Incorrect value for parameter params", __func__); 451 return -EINVAL; 452 } 453 454 /* socket */ 455 if (params->socket < 0) { 456 SCHED_LOG(ERR, 457 "%s: Incorrect value for socket id", __func__); 458 return -EINVAL; 459 } 460 461 /* rate */ 462 if (params->rate == 0) { 463 SCHED_LOG(ERR, 464 "%s: Incorrect value for rate", __func__); 465 return -EINVAL; 466 } 467 468 /* mtu */ 469 if (params->mtu == 0) { 470 SCHED_LOG(ERR, 471 "%s: Incorrect value for mtu", __func__); 472 return -EINVAL; 473 } 474 475 /* n_subports_per_port: non-zero, limited to 16 bits, power of 2 */ 476 if (params->n_subports_per_port == 0 || 477 params->n_subports_per_port > 1u << 16 || 478 !rte_is_power_of_2(params->n_subports_per_port)) { 479 SCHED_LOG(ERR, 480 "%s: Incorrect value for number of subports", __func__); 481 return -EINVAL; 482 } 483 484 if (params->subport_profiles == NULL || 485 params->n_subport_profiles == 0 || 486 params->n_max_subport_profiles == 0 || 487 params->n_subport_profiles > params->n_max_subport_profiles) { 488 SCHED_LOG(ERR, 489 "%s: Incorrect value for subport profiles", __func__); 490 return -EINVAL; 491 } 492 493 for (i = 0; i < params->n_subport_profiles; i++) { 494 struct rte_sched_subport_profile_params *p = 495 params->subport_profiles + i; 496 int status; 497 498 status = subport_profile_check(p, params->rate); 499 if (status != 0) { 500 SCHED_LOG(ERR, 501 "%s: subport profile check failed(%d)", 502 __func__, status); 503 return -EINVAL; 504 } 505 } 506 507 /* n_pipes_per_subport: non-zero, power of 2 */ 508 if (params->n_pipes_per_subport == 0 || 509 !rte_is_power_of_2(params->n_pipes_per_subport)) { 510 SCHED_LOG(ERR, 511 "%s: Incorrect value for maximum pipes number", __func__); 512 return -EINVAL; 513 } 514 515 return 0; 516 } 517 518 static uint32_t 519 rte_sched_subport_get_array_base(struct rte_sched_subport_params *params, 520 enum rte_sched_subport_array array) 521 { 522 uint32_t n_pipes_per_subport = params->n_pipes_per_subport_enabled; 523 uint32_t n_subport_pipe_queues = 524 RTE_SCHED_QUEUES_PER_PIPE * n_pipes_per_subport; 525 526 uint32_t size_pipe = n_pipes_per_subport * sizeof(struct rte_sched_pipe); 527 uint32_t size_queue = 528 n_subport_pipe_queues * sizeof(struct rte_sched_queue); 529 uint32_t size_queue_extra 530 = n_subport_pipe_queues * sizeof(struct rte_sched_queue_extra); 531 uint32_t size_pipe_profiles = params->n_max_pipe_profiles * 532 sizeof(struct rte_sched_pipe_profile); 533 uint32_t size_bmp_array = 534 rte_bitmap_get_memory_footprint(n_subport_pipe_queues); 535 uint32_t size_per_pipe_queue_array, size_queue_array; 536 537 uint32_t base, i; 538 539 size_per_pipe_queue_array = 0; 540 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) { 541 if (i < RTE_SCHED_TRAFFIC_CLASS_BE) 542 size_per_pipe_queue_array += 543 params->qsize[i] * sizeof(struct rte_mbuf *); 544 else 545 size_per_pipe_queue_array += RTE_SCHED_MAX_QUEUES_PER_TC * 546 params->qsize[i] * sizeof(struct rte_mbuf *); 547 } 548 size_queue_array = n_pipes_per_subport * size_per_pipe_queue_array; 549 550 base = 0; 551 552 if (array == e_RTE_SCHED_SUBPORT_ARRAY_PIPE) 553 return base; 554 base += RTE_CACHE_LINE_ROUNDUP(size_pipe); 555 556 if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE) 557 return base; 558 base += RTE_CACHE_LINE_ROUNDUP(size_queue); 559 560 if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA) 561 return base; 562 base += RTE_CACHE_LINE_ROUNDUP(size_queue_extra); 563 564 if (array == e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES) 565 return base; 566 base += RTE_CACHE_LINE_ROUNDUP(size_pipe_profiles); 567 568 if (array == e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY) 569 return base; 570 base += RTE_CACHE_LINE_ROUNDUP(size_bmp_array); 571 572 if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY) 573 return base; 574 base += RTE_CACHE_LINE_ROUNDUP(size_queue_array); 575 576 return base; 577 } 578 579 static void 580 rte_sched_subport_config_qsize(struct rte_sched_subport *subport) 581 { 582 uint32_t i; 583 584 subport->qsize_add[0] = 0; 585 586 /* Strict priority traffic class */ 587 for (i = 1; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) 588 subport->qsize_add[i] = subport->qsize_add[i-1] + subport->qsize[i-1]; 589 590 /* Best-effort traffic class */ 591 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 1] = 592 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE] + 593 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE]; 594 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 2] = 595 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 1] + 596 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE]; 597 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 3] = 598 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 2] + 599 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE]; 600 601 subport->qsize_sum = subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 3] + 602 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE]; 603 } 604 605 static void 606 rte_sched_port_log_pipe_profile(struct rte_sched_subport *subport, uint32_t i) 607 { 608 struct rte_sched_pipe_profile *p = subport->pipe_profiles + i; 609 610 RTE_LOG(DEBUG, SCHED, "Low level config for pipe profile %u:\n" 611 " Token bucket: period = %"PRIu64", credits per period = %"PRIu64", size = %"PRIu64"\n" 612 " Traffic classes: period = %"PRIu64",\n" 613 " credits per period = [%"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64 614 ", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64 615 ", %"PRIu64", %"PRIu64", %"PRIu64"]\n" 616 " Best-effort traffic class oversubscription: weight = %hhu\n" 617 " WRR cost: [%hhu, %hhu, %hhu, %hhu]\n", 618 i, 619 620 /* Token bucket */ 621 p->tb_period, 622 p->tb_credits_per_period, 623 p->tb_size, 624 625 /* Traffic classes */ 626 p->tc_period, 627 p->tc_credits_per_period[0], 628 p->tc_credits_per_period[1], 629 p->tc_credits_per_period[2], 630 p->tc_credits_per_period[3], 631 p->tc_credits_per_period[4], 632 p->tc_credits_per_period[5], 633 p->tc_credits_per_period[6], 634 p->tc_credits_per_period[7], 635 p->tc_credits_per_period[8], 636 p->tc_credits_per_period[9], 637 p->tc_credits_per_period[10], 638 p->tc_credits_per_period[11], 639 p->tc_credits_per_period[12], 640 641 /* Best-effort traffic class oversubscription */ 642 p->tc_ov_weight, 643 644 /* WRR */ 645 p->wrr_cost[0], p->wrr_cost[1], p->wrr_cost[2], p->wrr_cost[3]); 646 } 647 648 static void 649 rte_sched_port_log_subport_profile(struct rte_sched_port *port, uint32_t i) 650 { 651 struct rte_sched_subport_profile *p = port->subport_profiles + i; 652 653 RTE_LOG(DEBUG, SCHED, "Low level config for subport profile %u:\n" 654 "Token bucket: period = %"PRIu64", credits per period = %"PRIu64"," 655 "size = %"PRIu64"\n" 656 "Traffic classes: period = %"PRIu64",\n" 657 "credits per period = [%"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64 658 " %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64 659 " %"PRIu64", %"PRIu64", %"PRIu64"]\n", 660 i, 661 662 /* Token bucket */ 663 p->tb_period, 664 p->tb_credits_per_period, 665 p->tb_size, 666 667 /* Traffic classes */ 668 p->tc_period, 669 p->tc_credits_per_period[0], 670 p->tc_credits_per_period[1], 671 p->tc_credits_per_period[2], 672 p->tc_credits_per_period[3], 673 p->tc_credits_per_period[4], 674 p->tc_credits_per_period[5], 675 p->tc_credits_per_period[6], 676 p->tc_credits_per_period[7], 677 p->tc_credits_per_period[8], 678 p->tc_credits_per_period[9], 679 p->tc_credits_per_period[10], 680 p->tc_credits_per_period[11], 681 p->tc_credits_per_period[12]); 682 } 683 684 static inline uint64_t 685 rte_sched_time_ms_to_bytes(uint64_t time_ms, uint64_t rate) 686 { 687 uint64_t time = time_ms; 688 689 time = (time * rate) / 1000; 690 691 return time; 692 } 693 694 static void 695 rte_sched_pipe_profile_convert(struct rte_sched_subport *subport, 696 struct rte_sched_pipe_params *src, 697 struct rte_sched_pipe_profile *dst, 698 uint64_t rate) 699 { 700 uint32_t wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE]; 701 uint32_t lcd1, lcd2, lcd; 702 uint32_t i; 703 704 /* Token Bucket */ 705 if (src->tb_rate == rate) { 706 dst->tb_credits_per_period = 1; 707 dst->tb_period = 1; 708 } else { 709 double tb_rate = (double) src->tb_rate 710 / (double) rate; 711 double d = RTE_SCHED_TB_RATE_CONFIG_ERR; 712 713 rte_approx_64(tb_rate, d, &dst->tb_credits_per_period, 714 &dst->tb_period); 715 } 716 717 dst->tb_size = src->tb_size; 718 719 /* Traffic Classes */ 720 dst->tc_period = rte_sched_time_ms_to_bytes(src->tc_period, 721 rate); 722 723 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) 724 if (subport->qsize[i]) 725 dst->tc_credits_per_period[i] 726 = rte_sched_time_ms_to_bytes(src->tc_period, 727 src->tc_rate[i]); 728 729 dst->tc_ov_weight = src->tc_ov_weight; 730 731 /* WRR queues */ 732 wrr_cost[0] = src->wrr_weights[0]; 733 wrr_cost[1] = src->wrr_weights[1]; 734 wrr_cost[2] = src->wrr_weights[2]; 735 wrr_cost[3] = src->wrr_weights[3]; 736 737 lcd1 = rte_get_lcd(wrr_cost[0], wrr_cost[1]); 738 lcd2 = rte_get_lcd(wrr_cost[2], wrr_cost[3]); 739 lcd = rte_get_lcd(lcd1, lcd2); 740 741 wrr_cost[0] = lcd / wrr_cost[0]; 742 wrr_cost[1] = lcd / wrr_cost[1]; 743 wrr_cost[2] = lcd / wrr_cost[2]; 744 wrr_cost[3] = lcd / wrr_cost[3]; 745 746 dst->wrr_cost[0] = (uint8_t) wrr_cost[0]; 747 dst->wrr_cost[1] = (uint8_t) wrr_cost[1]; 748 dst->wrr_cost[2] = (uint8_t) wrr_cost[2]; 749 dst->wrr_cost[3] = (uint8_t) wrr_cost[3]; 750 } 751 752 static void 753 rte_sched_subport_profile_convert(struct rte_sched_subport_profile_params *src, 754 struct rte_sched_subport_profile *dst, 755 uint64_t rate) 756 { 757 uint32_t i; 758 759 /* Token Bucket */ 760 if (src->tb_rate == rate) { 761 dst->tb_credits_per_period = 1; 762 dst->tb_period = 1; 763 } else { 764 double tb_rate = (double) src->tb_rate 765 / (double) rate; 766 double d = RTE_SCHED_TB_RATE_CONFIG_ERR; 767 768 rte_approx_64(tb_rate, d, &dst->tb_credits_per_period, 769 &dst->tb_period); 770 } 771 772 dst->tb_size = src->tb_size; 773 774 /* Traffic Classes */ 775 dst->tc_period = rte_sched_time_ms_to_bytes(src->tc_period, rate); 776 777 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) 778 dst->tc_credits_per_period[i] 779 = rte_sched_time_ms_to_bytes(src->tc_period, 780 src->tc_rate[i]); 781 } 782 783 static void 784 rte_sched_subport_config_pipe_profile_table(struct rte_sched_subport *subport, 785 struct rte_sched_subport_params *params, uint64_t rate) 786 { 787 uint32_t i; 788 789 for (i = 0; i < subport->n_pipe_profiles; i++) { 790 struct rte_sched_pipe_params *src = params->pipe_profiles + i; 791 struct rte_sched_pipe_profile *dst = subport->pipe_profiles + i; 792 793 rte_sched_pipe_profile_convert(subport, src, dst, rate); 794 rte_sched_port_log_pipe_profile(subport, i); 795 } 796 797 subport->pipe_tc_be_rate_max = 0; 798 for (i = 0; i < subport->n_pipe_profiles; i++) { 799 struct rte_sched_pipe_params *src = params->pipe_profiles + i; 800 uint64_t pipe_tc_be_rate = src->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE]; 801 802 if (subport->pipe_tc_be_rate_max < pipe_tc_be_rate) 803 subport->pipe_tc_be_rate_max = pipe_tc_be_rate; 804 } 805 } 806 807 static void 808 rte_sched_port_config_subport_profile_table(struct rte_sched_port *port, 809 struct rte_sched_port_params *params, 810 uint64_t rate) 811 { 812 uint32_t i; 813 814 for (i = 0; i < port->n_subport_profiles; i++) { 815 struct rte_sched_subport_profile_params *src 816 = params->subport_profiles + i; 817 struct rte_sched_subport_profile *dst 818 = port->subport_profiles + i; 819 820 rte_sched_subport_profile_convert(src, dst, rate); 821 rte_sched_port_log_subport_profile(port, i); 822 } 823 } 824 825 static int 826 rte_sched_subport_check_params(struct rte_sched_subport_params *params, 827 uint32_t n_max_pipes_per_subport, 828 uint64_t rate) 829 { 830 uint32_t i; 831 832 /* Check user parameters */ 833 if (params == NULL) { 834 SCHED_LOG(ERR, 835 "%s: Incorrect value for parameter params", __func__); 836 return -EINVAL; 837 } 838 839 /* qsize: if non-zero, power of 2, 840 * no bigger than 32K (due to 16-bit read/write pointers) 841 */ 842 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) { 843 uint16_t qsize = params->qsize[i]; 844 845 if (qsize != 0 && !rte_is_power_of_2(qsize)) { 846 SCHED_LOG(ERR, 847 "%s: Incorrect value for qsize", __func__); 848 return -EINVAL; 849 } 850 } 851 852 if (params->qsize[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) { 853 SCHED_LOG(ERR, "%s: Incorrect qsize", __func__); 854 return -EINVAL; 855 } 856 857 /* n_pipes_per_subport: non-zero, power of 2 */ 858 if (params->n_pipes_per_subport_enabled == 0 || 859 params->n_pipes_per_subport_enabled > n_max_pipes_per_subport || 860 !rte_is_power_of_2(params->n_pipes_per_subport_enabled)) { 861 SCHED_LOG(ERR, 862 "%s: Incorrect value for pipes number", __func__); 863 return -EINVAL; 864 } 865 866 /* pipe_profiles and n_pipe_profiles */ 867 if (params->pipe_profiles == NULL || 868 params->n_pipe_profiles == 0 || 869 params->n_max_pipe_profiles == 0 || 870 params->n_pipe_profiles > params->n_max_pipe_profiles) { 871 SCHED_LOG(ERR, 872 "%s: Incorrect value for pipe profiles", __func__); 873 return -EINVAL; 874 } 875 876 for (i = 0; i < params->n_pipe_profiles; i++) { 877 struct rte_sched_pipe_params *p = params->pipe_profiles + i; 878 int status; 879 880 status = pipe_profile_check(p, rate, ¶ms->qsize[0]); 881 if (status != 0) { 882 SCHED_LOG(ERR, 883 "%s: Pipe profile check failed(%d)", __func__, status); 884 return -EINVAL; 885 } 886 } 887 888 return 0; 889 } 890 891 uint32_t 892 rte_sched_port_get_memory_footprint(struct rte_sched_port_params *port_params, 893 struct rte_sched_subport_params **subport_params) 894 { 895 uint32_t size0 = 0, size1 = 0, i; 896 int status; 897 898 status = rte_sched_port_check_params(port_params); 899 if (status != 0) { 900 SCHED_LOG(ERR, 901 "%s: Port scheduler port params check failed (%d)", 902 __func__, status); 903 904 return 0; 905 } 906 907 for (i = 0; i < port_params->n_subports_per_port; i++) { 908 struct rte_sched_subport_params *sp = subport_params[i]; 909 910 status = rte_sched_subport_check_params(sp, 911 port_params->n_pipes_per_subport, 912 port_params->rate); 913 if (status != 0) { 914 SCHED_LOG(ERR, 915 "%s: Port scheduler subport params check failed (%d)", 916 __func__, status); 917 918 return 0; 919 } 920 } 921 922 size0 = sizeof(struct rte_sched_port); 923 924 for (i = 0; i < port_params->n_subports_per_port; i++) { 925 struct rte_sched_subport_params *sp = subport_params[i]; 926 927 size1 += rte_sched_subport_get_array_base(sp, 928 e_RTE_SCHED_SUBPORT_ARRAY_TOTAL); 929 } 930 931 return size0 + size1; 932 } 933 934 struct rte_sched_port * 935 rte_sched_port_config(struct rte_sched_port_params *params) 936 { 937 struct rte_sched_port *port = NULL; 938 uint32_t size0, size1, size2; 939 uint32_t cycles_per_byte; 940 uint32_t i, j; 941 int status; 942 943 status = rte_sched_port_check_params(params); 944 if (status != 0) { 945 SCHED_LOG(ERR, 946 "%s: Port scheduler params check failed (%d)", 947 __func__, status); 948 return NULL; 949 } 950 951 size0 = sizeof(struct rte_sched_port); 952 size1 = params->n_subports_per_port * sizeof(struct rte_sched_subport *); 953 size2 = params->n_max_subport_profiles * 954 sizeof(struct rte_sched_subport_profile); 955 956 /* Allocate memory to store the data structures */ 957 port = rte_zmalloc_socket("qos_params", size0 + size1, 958 RTE_CACHE_LINE_SIZE, params->socket); 959 if (port == NULL) { 960 SCHED_LOG(ERR, "%s: Memory allocation fails", __func__); 961 962 return NULL; 963 } 964 965 /* Allocate memory to store the subport profile */ 966 port->subport_profiles = rte_zmalloc_socket("subport_profile", size2, 967 RTE_CACHE_LINE_SIZE, params->socket); 968 if (port->subport_profiles == NULL) { 969 SCHED_LOG(ERR, "%s: Memory allocation fails", __func__); 970 rte_free(port); 971 return NULL; 972 } 973 974 /* User parameters */ 975 port->n_subports_per_port = params->n_subports_per_port; 976 port->n_subport_profiles = params->n_subport_profiles; 977 port->n_max_subport_profiles = params->n_max_subport_profiles; 978 port->n_pipes_per_subport = params->n_pipes_per_subport; 979 port->n_pipes_per_subport_log2 = 980 rte_ctz32(params->n_pipes_per_subport); 981 port->socket = params->socket; 982 983 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) 984 port->pipe_queue[i] = i; 985 986 for (i = 0, j = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) { 987 port->pipe_tc[i] = j; 988 989 if (j < RTE_SCHED_TRAFFIC_CLASS_BE) 990 j++; 991 } 992 993 for (i = 0, j = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) { 994 port->tc_queue[i] = j; 995 996 if (i >= RTE_SCHED_TRAFFIC_CLASS_BE) 997 j++; 998 } 999 port->rate = params->rate; 1000 port->mtu = params->mtu + params->frame_overhead; 1001 port->frame_overhead = params->frame_overhead; 1002 1003 /* Timing */ 1004 port->time_cpu_cycles = rte_get_tsc_cycles(); 1005 port->time_cpu_bytes = 0; 1006 port->time = 0; 1007 1008 /* Subport profile table */ 1009 rte_sched_port_config_subport_profile_table(port, params, port->rate); 1010 1011 cycles_per_byte = (rte_get_tsc_hz() << RTE_SCHED_TIME_SHIFT) 1012 / params->rate; 1013 port->inv_cycles_per_byte = rte_reciprocal_value(cycles_per_byte); 1014 port->cycles_per_byte = cycles_per_byte; 1015 1016 /* Grinders */ 1017 port->pkts_out = NULL; 1018 port->n_pkts_out = 0; 1019 port->subport_id = 0; 1020 1021 return port; 1022 } 1023 1024 static inline void 1025 rte_sched_subport_free(struct rte_sched_port *port, 1026 struct rte_sched_subport *subport) 1027 { 1028 uint32_t n_subport_pipe_queues; 1029 uint32_t qindex; 1030 1031 if (subport == NULL) 1032 return; 1033 1034 n_subport_pipe_queues = rte_sched_subport_pipe_queues(subport); 1035 1036 /* Free enqueued mbufs */ 1037 for (qindex = 0; qindex < n_subport_pipe_queues; qindex++) { 1038 struct rte_mbuf **mbufs = 1039 rte_sched_subport_pipe_qbase(subport, qindex); 1040 uint16_t qsize = rte_sched_subport_pipe_qsize(port, subport, qindex); 1041 if (qsize != 0) { 1042 struct rte_sched_queue *queue = subport->queue + qindex; 1043 uint16_t qr = queue->qr & (qsize - 1); 1044 uint16_t qw = queue->qw & (qsize - 1); 1045 1046 for (; qr != qw; qr = (qr + 1) & (qsize - 1)) 1047 rte_pktmbuf_free(mbufs[qr]); 1048 } 1049 } 1050 1051 rte_free(subport); 1052 } 1053 1054 void 1055 rte_sched_port_free(struct rte_sched_port *port) 1056 { 1057 uint32_t i; 1058 1059 /* Check user parameters */ 1060 if (port == NULL) 1061 return; 1062 1063 for (i = 0; i < port->n_subports_per_port; i++) 1064 rte_sched_subport_free(port, port->subports[i]); 1065 1066 rte_free(port->subport_profiles); 1067 rte_free(port); 1068 } 1069 1070 static void 1071 rte_sched_free_memory(struct rte_sched_port *port, uint32_t n_subports) 1072 { 1073 uint32_t i; 1074 1075 for (i = 0; i < n_subports; i++) { 1076 struct rte_sched_subport *subport = port->subports[i]; 1077 1078 rte_sched_subport_free(port, subport); 1079 } 1080 1081 rte_free(port->subport_profiles); 1082 rte_free(port); 1083 } 1084 1085 static int 1086 rte_sched_red_config(struct rte_sched_port *port, 1087 struct rte_sched_subport *s, 1088 struct rte_sched_subport_params *params, 1089 uint32_t n_subports) 1090 { 1091 uint32_t i; 1092 1093 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) { 1094 1095 uint32_t j; 1096 1097 for (j = 0; j < RTE_COLORS; j++) { 1098 /* if min/max are both zero, then RED is disabled */ 1099 if ((params->cman_params->red_params[i][j].min_th | 1100 params->cman_params->red_params[i][j].max_th) == 0) { 1101 continue; 1102 } 1103 1104 if (rte_red_config_init(&s->red_config[i][j], 1105 params->cman_params->red_params[i][j].wq_log2, 1106 params->cman_params->red_params[i][j].min_th, 1107 params->cman_params->red_params[i][j].max_th, 1108 params->cman_params->red_params[i][j].maxp_inv) != 0) { 1109 rte_sched_free_memory(port, n_subports); 1110 1111 SCHED_LOG(NOTICE, 1112 "%s: RED configuration init fails", __func__); 1113 return -EINVAL; 1114 } 1115 } 1116 } 1117 s->cman = RTE_SCHED_CMAN_RED; 1118 return 0; 1119 } 1120 1121 static int 1122 rte_sched_pie_config(struct rte_sched_port *port, 1123 struct rte_sched_subport *s, 1124 struct rte_sched_subport_params *params, 1125 uint32_t n_subports) 1126 { 1127 uint32_t i; 1128 1129 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) { 1130 if (params->cman_params->pie_params[i].tailq_th > params->qsize[i]) { 1131 SCHED_LOG(NOTICE, 1132 "%s: PIE tailq threshold incorrect", __func__); 1133 return -EINVAL; 1134 } 1135 1136 if (rte_pie_config_init(&s->pie_config[i], 1137 params->cman_params->pie_params[i].qdelay_ref, 1138 params->cman_params->pie_params[i].dp_update_interval, 1139 params->cman_params->pie_params[i].max_burst, 1140 params->cman_params->pie_params[i].tailq_th) != 0) { 1141 rte_sched_free_memory(port, n_subports); 1142 1143 SCHED_LOG(NOTICE, 1144 "%s: PIE configuration init fails", __func__); 1145 return -EINVAL; 1146 } 1147 } 1148 s->cman = RTE_SCHED_CMAN_PIE; 1149 return 0; 1150 } 1151 1152 static int 1153 rte_sched_cman_config(struct rte_sched_port *port, 1154 struct rte_sched_subport *s, 1155 struct rte_sched_subport_params *params, 1156 uint32_t n_subports) 1157 { 1158 if (params->cman_params->cman_mode == RTE_SCHED_CMAN_RED) 1159 return rte_sched_red_config(port, s, params, n_subports); 1160 1161 else if (params->cman_params->cman_mode == RTE_SCHED_CMAN_PIE) 1162 return rte_sched_pie_config(port, s, params, n_subports); 1163 1164 return -EINVAL; 1165 } 1166 1167 int 1168 rte_sched_subport_tc_ov_config(struct rte_sched_port *port, 1169 uint32_t subport_id, 1170 bool tc_ov_enable) 1171 { 1172 struct rte_sched_subport *s; 1173 1174 if (port == NULL) { 1175 SCHED_LOG(ERR, 1176 "%s: Incorrect value for parameter port", __func__); 1177 return -EINVAL; 1178 } 1179 1180 if (subport_id >= port->n_subports_per_port) { 1181 SCHED_LOG(ERR, 1182 "%s: Incorrect value for parameter subport id", __func__); 1183 return -EINVAL; 1184 } 1185 1186 s = port->subports[subport_id]; 1187 s->tc_ov_enabled = tc_ov_enable ? 1 : 0; 1188 1189 return 0; 1190 } 1191 1192 int 1193 rte_sched_subport_config(struct rte_sched_port *port, 1194 uint32_t subport_id, 1195 struct rte_sched_subport_params *params, 1196 uint32_t subport_profile_id) 1197 { 1198 struct rte_sched_subport *s = NULL; 1199 uint32_t n_subports = subport_id; 1200 struct rte_sched_subport_profile *profile; 1201 uint32_t n_subport_pipe_queues, i; 1202 uint32_t size0, size1, bmp_mem_size; 1203 int status; 1204 int ret; 1205 1206 /* Check user parameters */ 1207 if (port == NULL) { 1208 SCHED_LOG(ERR, 1209 "%s: Incorrect value for parameter port", __func__); 1210 return 0; 1211 } 1212 1213 if (subport_id >= port->n_subports_per_port) { 1214 SCHED_LOG(ERR, 1215 "%s: Incorrect value for subport id", __func__); 1216 ret = -EINVAL; 1217 goto out; 1218 } 1219 1220 if (subport_profile_id >= port->n_max_subport_profiles) { 1221 SCHED_LOG(ERR, "%s: " 1222 "Number of subport profile exceeds the max limit", 1223 __func__); 1224 ret = -EINVAL; 1225 goto out; 1226 } 1227 1228 /** Memory is allocated only on first invocation of the api for a 1229 * given subport. Subsequent invocation on same subport will just 1230 * update subport bandwidth parameter. 1231 */ 1232 if (port->subports[subport_id] == NULL) { 1233 1234 status = rte_sched_subport_check_params(params, 1235 port->n_pipes_per_subport, 1236 port->rate); 1237 if (status != 0) { 1238 SCHED_LOG(NOTICE, 1239 "%s: Port scheduler params check failed (%d)", 1240 __func__, status); 1241 ret = -EINVAL; 1242 goto out; 1243 } 1244 1245 /* Determine the amount of memory to allocate */ 1246 size0 = sizeof(struct rte_sched_subport); 1247 size1 = rte_sched_subport_get_array_base(params, 1248 e_RTE_SCHED_SUBPORT_ARRAY_TOTAL); 1249 1250 /* Allocate memory to store the data structures */ 1251 s = rte_zmalloc_socket("subport_params", size0 + size1, 1252 RTE_CACHE_LINE_SIZE, port->socket); 1253 if (s == NULL) { 1254 SCHED_LOG(ERR, 1255 "%s: Memory allocation fails", __func__); 1256 ret = -ENOMEM; 1257 goto out; 1258 } 1259 1260 n_subports++; 1261 1262 /* Port */ 1263 port->subports[subport_id] = s; 1264 1265 s->tb_time = port->time; 1266 1267 /* compile time checks */ 1268 RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS == 0); 1269 RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS & 1270 (RTE_SCHED_PORT_N_GRINDERS - 1)); 1271 1272 /* User parameters */ 1273 s->n_pipes_per_subport_enabled = 1274 params->n_pipes_per_subport_enabled; 1275 memcpy(s->qsize, params->qsize, sizeof(params->qsize)); 1276 s->n_pipe_profiles = params->n_pipe_profiles; 1277 s->n_max_pipe_profiles = params->n_max_pipe_profiles; 1278 1279 /* TC oversubscription is enabled by default */ 1280 s->tc_ov_enabled = 1; 1281 1282 if (params->cman_params != NULL) { 1283 s->cman_enabled = true; 1284 status = rte_sched_cman_config(port, s, params, n_subports); 1285 if (status) { 1286 SCHED_LOG(NOTICE, 1287 "%s: CMAN configuration fails", __func__); 1288 return status; 1289 } 1290 } else { 1291 s->cman_enabled = false; 1292 } 1293 1294 /* Scheduling loop detection */ 1295 s->pipe_loop = RTE_SCHED_PIPE_INVALID; 1296 s->pipe_exhaustion = 0; 1297 1298 /* Grinders */ 1299 s->busy_grinders = 0; 1300 1301 /* Queue base calculation */ 1302 rte_sched_subport_config_qsize(s); 1303 1304 /* Large data structures */ 1305 s->pipe = (struct rte_sched_pipe *) 1306 (s->memory + rte_sched_subport_get_array_base(params, 1307 e_RTE_SCHED_SUBPORT_ARRAY_PIPE)); 1308 s->queue = (struct rte_sched_queue *) 1309 (s->memory + rte_sched_subport_get_array_base(params, 1310 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE)); 1311 s->queue_extra = (struct rte_sched_queue_extra *) 1312 (s->memory + rte_sched_subport_get_array_base(params, 1313 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA)); 1314 s->pipe_profiles = (struct rte_sched_pipe_profile *) 1315 (s->memory + rte_sched_subport_get_array_base(params, 1316 e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES)); 1317 s->bmp_array = s->memory + rte_sched_subport_get_array_base( 1318 params, e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY); 1319 s->queue_array = (struct rte_mbuf **) 1320 (s->memory + rte_sched_subport_get_array_base(params, 1321 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY)); 1322 1323 /* Pipe profile table */ 1324 rte_sched_subport_config_pipe_profile_table(s, params, 1325 port->rate); 1326 1327 /* Bitmap */ 1328 n_subport_pipe_queues = rte_sched_subport_pipe_queues(s); 1329 bmp_mem_size = rte_bitmap_get_memory_footprint( 1330 n_subport_pipe_queues); 1331 s->bmp = rte_bitmap_init(n_subport_pipe_queues, s->bmp_array, 1332 bmp_mem_size); 1333 if (s->bmp == NULL) { 1334 SCHED_LOG(ERR, 1335 "%s: Subport bitmap init error", __func__); 1336 ret = -EINVAL; 1337 goto out; 1338 } 1339 1340 for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++) 1341 s->grinder_base_bmp_pos[i] = RTE_SCHED_PIPE_INVALID; 1342 1343 /* TC oversubscription */ 1344 s->tc_ov_wm_min = port->mtu; 1345 s->tc_ov_period_id = 0; 1346 s->tc_ov = 0; 1347 s->tc_ov_n = 0; 1348 s->tc_ov_rate = 0; 1349 } 1350 1351 { 1352 /* update subport parameters from subport profile table*/ 1353 profile = port->subport_profiles + subport_profile_id; 1354 1355 s = port->subports[subport_id]; 1356 1357 s->tb_credits = profile->tb_size / 2; 1358 1359 s->tc_time = port->time + profile->tc_period; 1360 1361 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) 1362 if (s->qsize[i]) 1363 s->tc_credits[i] = 1364 profile->tc_credits_per_period[i]; 1365 else 1366 profile->tc_credits_per_period[i] = 0; 1367 1368 s->tc_ov_wm_max = rte_sched_time_ms_to_bytes(profile->tc_period, 1369 s->pipe_tc_be_rate_max); 1370 s->tc_ov_wm = s->tc_ov_wm_max; 1371 s->profile = subport_profile_id; 1372 1373 } 1374 1375 rte_sched_port_log_subport_profile(port, subport_profile_id); 1376 1377 return 0; 1378 1379 out: 1380 rte_sched_free_memory(port, n_subports); 1381 1382 return ret; 1383 } 1384 1385 int 1386 rte_sched_pipe_config(struct rte_sched_port *port, 1387 uint32_t subport_id, 1388 uint32_t pipe_id, 1389 int32_t pipe_profile) 1390 { 1391 struct rte_sched_subport *s; 1392 struct rte_sched_subport_profile *sp; 1393 struct rte_sched_pipe *p; 1394 struct rte_sched_pipe_profile *params; 1395 uint32_t n_subports = subport_id + 1; 1396 uint32_t deactivate, profile, i; 1397 int ret; 1398 1399 /* Check user parameters */ 1400 profile = (uint32_t) pipe_profile; 1401 deactivate = (pipe_profile < 0); 1402 1403 if (port == NULL) { 1404 SCHED_LOG(ERR, 1405 "%s: Incorrect value for parameter port", __func__); 1406 return -EINVAL; 1407 } 1408 1409 if (subport_id >= port->n_subports_per_port) { 1410 SCHED_LOG(ERR, 1411 "%s: Incorrect value for parameter subport id", __func__); 1412 ret = -EINVAL; 1413 goto out; 1414 } 1415 1416 s = port->subports[subport_id]; 1417 if (pipe_id >= s->n_pipes_per_subport_enabled) { 1418 SCHED_LOG(ERR, 1419 "%s: Incorrect value for parameter pipe id", __func__); 1420 ret = -EINVAL; 1421 goto out; 1422 } 1423 1424 if (!deactivate && profile >= s->n_pipe_profiles) { 1425 SCHED_LOG(ERR, 1426 "%s: Incorrect value for parameter pipe profile", __func__); 1427 ret = -EINVAL; 1428 goto out; 1429 } 1430 1431 sp = port->subport_profiles + s->profile; 1432 /* Handle the case when pipe already has a valid configuration */ 1433 p = s->pipe + pipe_id; 1434 if (p->tb_time) { 1435 params = s->pipe_profiles + p->profile; 1436 1437 double subport_tc_be_rate = 1438 (double)sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] 1439 / (double) sp->tc_period; 1440 double pipe_tc_be_rate = 1441 (double) params->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] 1442 / (double) params->tc_period; 1443 uint32_t tc_be_ov = s->tc_ov; 1444 1445 /* Unplug pipe from its subport */ 1446 s->tc_ov_n -= params->tc_ov_weight; 1447 s->tc_ov_rate -= pipe_tc_be_rate; 1448 s->tc_ov = s->tc_ov_rate > subport_tc_be_rate; 1449 1450 if (s->tc_ov != tc_be_ov) { 1451 SCHED_LOG(DEBUG, 1452 "Subport %u Best-effort TC oversubscription is OFF (%.4lf >= %.4lf)", 1453 subport_id, subport_tc_be_rate, s->tc_ov_rate); 1454 } 1455 1456 /* Reset the pipe */ 1457 memset(p, 0, sizeof(struct rte_sched_pipe)); 1458 } 1459 1460 if (deactivate) 1461 return 0; 1462 1463 /* Apply the new pipe configuration */ 1464 p->profile = profile; 1465 params = s->pipe_profiles + p->profile; 1466 1467 /* Token Bucket (TB) */ 1468 p->tb_time = port->time; 1469 p->tb_credits = params->tb_size / 2; 1470 1471 /* Traffic Classes (TCs) */ 1472 p->tc_time = port->time + params->tc_period; 1473 1474 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) 1475 if (s->qsize[i]) 1476 p->tc_credits[i] = params->tc_credits_per_period[i]; 1477 1478 { 1479 /* Subport best effort tc oversubscription */ 1480 double subport_tc_be_rate = 1481 (double)sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] 1482 / (double) sp->tc_period; 1483 double pipe_tc_be_rate = 1484 (double) params->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] 1485 / (double) params->tc_period; 1486 uint32_t tc_be_ov = s->tc_ov; 1487 1488 s->tc_ov_n += params->tc_ov_weight; 1489 s->tc_ov_rate += pipe_tc_be_rate; 1490 s->tc_ov = s->tc_ov_rate > subport_tc_be_rate; 1491 1492 if (s->tc_ov != tc_be_ov) { 1493 SCHED_LOG(DEBUG, 1494 "Subport %u Best effort TC oversubscription is ON (%.4lf < %.4lf)", 1495 subport_id, subport_tc_be_rate, s->tc_ov_rate); 1496 } 1497 p->tc_ov_period_id = s->tc_ov_period_id; 1498 p->tc_ov_credits = s->tc_ov_wm; 1499 } 1500 1501 return 0; 1502 1503 out: 1504 rte_sched_free_memory(port, n_subports); 1505 1506 return ret; 1507 } 1508 1509 int 1510 rte_sched_subport_pipe_profile_add(struct rte_sched_port *port, 1511 uint32_t subport_id, 1512 struct rte_sched_pipe_params *params, 1513 uint32_t *pipe_profile_id) 1514 { 1515 struct rte_sched_subport *s; 1516 struct rte_sched_pipe_profile *pp; 1517 uint32_t i; 1518 int status; 1519 1520 /* Port */ 1521 if (port == NULL) { 1522 SCHED_LOG(ERR, 1523 "%s: Incorrect value for parameter port", __func__); 1524 return -EINVAL; 1525 } 1526 1527 /* Subport id not exceeds the max limit */ 1528 if (subport_id > port->n_subports_per_port) { 1529 SCHED_LOG(ERR, 1530 "%s: Incorrect value for subport id", __func__); 1531 return -EINVAL; 1532 } 1533 1534 s = port->subports[subport_id]; 1535 1536 /* Pipe profiles exceeds the max limit */ 1537 if (s->n_pipe_profiles >= s->n_max_pipe_profiles) { 1538 SCHED_LOG(ERR, 1539 "%s: Number of pipe profiles exceeds the max limit", __func__); 1540 return -EINVAL; 1541 } 1542 1543 /* Pipe params */ 1544 status = pipe_profile_check(params, port->rate, &s->qsize[0]); 1545 if (status != 0) { 1546 SCHED_LOG(ERR, 1547 "%s: Pipe profile check failed(%d)", __func__, status); 1548 return -EINVAL; 1549 } 1550 1551 pp = &s->pipe_profiles[s->n_pipe_profiles]; 1552 rte_sched_pipe_profile_convert(s, params, pp, port->rate); 1553 1554 /* Pipe profile should not exists */ 1555 for (i = 0; i < s->n_pipe_profiles; i++) 1556 if (memcmp(s->pipe_profiles + i, pp, sizeof(*pp)) == 0) { 1557 SCHED_LOG(ERR, 1558 "%s: Pipe profile exists", __func__); 1559 return -EINVAL; 1560 } 1561 1562 /* Pipe profile commit */ 1563 *pipe_profile_id = s->n_pipe_profiles; 1564 s->n_pipe_profiles++; 1565 1566 if (s->pipe_tc_be_rate_max < params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE]) 1567 s->pipe_tc_be_rate_max = params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE]; 1568 1569 rte_sched_port_log_pipe_profile(s, *pipe_profile_id); 1570 1571 return 0; 1572 } 1573 1574 int 1575 rte_sched_port_subport_profile_add(struct rte_sched_port *port, 1576 struct rte_sched_subport_profile_params *params, 1577 uint32_t *subport_profile_id) 1578 { 1579 int status; 1580 uint32_t i; 1581 struct rte_sched_subport_profile *dst; 1582 1583 /* Port */ 1584 if (port == NULL) { 1585 SCHED_LOG(ERR, "%s: " 1586 "Incorrect value for parameter port", __func__); 1587 return -EINVAL; 1588 } 1589 1590 if (params == NULL) { 1591 SCHED_LOG(ERR, "%s: " 1592 "Incorrect value for parameter profile", __func__); 1593 return -EINVAL; 1594 } 1595 1596 if (subport_profile_id == NULL) { 1597 SCHED_LOG(ERR, "%s: " 1598 "Incorrect value for parameter subport_profile_id", 1599 __func__); 1600 return -EINVAL; 1601 } 1602 1603 dst = port->subport_profiles + port->n_subport_profiles; 1604 1605 /* Subport profiles exceeds the max limit */ 1606 if (port->n_subport_profiles >= port->n_max_subport_profiles) { 1607 SCHED_LOG(ERR, "%s: " 1608 "Number of subport profiles exceeds the max limit", 1609 __func__); 1610 return -EINVAL; 1611 } 1612 1613 status = subport_profile_check(params, port->rate); 1614 if (status != 0) { 1615 SCHED_LOG(ERR, 1616 "%s: subport profile check failed(%d)", __func__, status); 1617 return -EINVAL; 1618 } 1619 1620 rte_sched_subport_profile_convert(params, dst, port->rate); 1621 1622 /* Subport profile should not exists */ 1623 for (i = 0; i < port->n_subport_profiles; i++) 1624 if (memcmp(port->subport_profiles + i, 1625 dst, sizeof(*dst)) == 0) { 1626 SCHED_LOG(ERR, 1627 "%s: subport profile exists", __func__); 1628 return -EINVAL; 1629 } 1630 1631 /* Subport profile commit */ 1632 *subport_profile_id = port->n_subport_profiles; 1633 port->n_subport_profiles++; 1634 1635 rte_sched_port_log_subport_profile(port, *subport_profile_id); 1636 1637 return 0; 1638 } 1639 1640 static inline uint32_t 1641 rte_sched_port_qindex(struct rte_sched_port *port, 1642 uint32_t subport, 1643 uint32_t pipe, 1644 uint32_t traffic_class, 1645 uint32_t queue) 1646 { 1647 return ((subport & (port->n_subports_per_port - 1)) << 1648 (port->n_pipes_per_subport_log2 + 4)) | 1649 ((pipe & 1650 (port->subports[subport]->n_pipes_per_subport_enabled - 1)) << 4) | 1651 ((rte_sched_port_pipe_queue(port, traffic_class) + queue) & 1652 (RTE_SCHED_QUEUES_PER_PIPE - 1)); 1653 } 1654 1655 void 1656 rte_sched_port_pkt_write(struct rte_sched_port *port, 1657 struct rte_mbuf *pkt, 1658 uint32_t subport, uint32_t pipe, 1659 uint32_t traffic_class, 1660 uint32_t queue, enum rte_color color) 1661 { 1662 uint32_t queue_id = 1663 rte_sched_port_qindex(port, subport, pipe, traffic_class, queue); 1664 1665 rte_mbuf_sched_set(pkt, queue_id, traffic_class, (uint8_t)color); 1666 } 1667 1668 void 1669 rte_sched_port_pkt_read_tree_path(struct rte_sched_port *port, 1670 const struct rte_mbuf *pkt, 1671 uint32_t *subport, uint32_t *pipe, 1672 uint32_t *traffic_class, uint32_t *queue) 1673 { 1674 uint32_t queue_id = rte_mbuf_sched_queue_get(pkt); 1675 1676 *subport = queue_id >> (port->n_pipes_per_subport_log2 + 4); 1677 *pipe = (queue_id >> 4) & 1678 (port->subports[*subport]->n_pipes_per_subport_enabled - 1); 1679 *traffic_class = rte_sched_port_pipe_tc(port, queue_id); 1680 *queue = rte_sched_port_tc_queue(port, queue_id); 1681 } 1682 1683 enum rte_color 1684 rte_sched_port_pkt_read_color(const struct rte_mbuf *pkt) 1685 { 1686 return (enum rte_color)rte_mbuf_sched_color_get(pkt); 1687 } 1688 1689 int 1690 rte_sched_subport_read_stats(struct rte_sched_port *port, 1691 uint32_t subport_id, 1692 struct rte_sched_subport_stats *stats, 1693 uint32_t *tc_ov) 1694 { 1695 struct rte_sched_subport *s; 1696 1697 /* Check user parameters */ 1698 if (port == NULL) { 1699 SCHED_LOG(ERR, 1700 "%s: Incorrect value for parameter port", __func__); 1701 return -EINVAL; 1702 } 1703 1704 if (subport_id >= port->n_subports_per_port) { 1705 SCHED_LOG(ERR, 1706 "%s: Incorrect value for subport id", __func__); 1707 return -EINVAL; 1708 } 1709 1710 if (stats == NULL) { 1711 SCHED_LOG(ERR, 1712 "%s: Incorrect value for parameter stats", __func__); 1713 return -EINVAL; 1714 } 1715 1716 if (tc_ov == NULL) { 1717 SCHED_LOG(ERR, 1718 "%s: Incorrect value for tc_ov", __func__); 1719 return -EINVAL; 1720 } 1721 1722 s = port->subports[subport_id]; 1723 1724 /* Copy subport stats and clear */ 1725 memcpy(stats, &s->stats, sizeof(struct rte_sched_subport_stats)); 1726 memset(&s->stats, 0, sizeof(struct rte_sched_subport_stats)); 1727 1728 /* Subport TC oversubscription status */ 1729 *tc_ov = s->tc_ov; 1730 1731 return 0; 1732 } 1733 1734 int 1735 rte_sched_queue_read_stats(struct rte_sched_port *port, 1736 uint32_t queue_id, 1737 struct rte_sched_queue_stats *stats, 1738 uint16_t *qlen) 1739 { 1740 struct rte_sched_subport *s; 1741 struct rte_sched_queue *q; 1742 struct rte_sched_queue_extra *qe; 1743 uint32_t subport_id, subport_qmask, subport_qindex; 1744 1745 /* Check user parameters */ 1746 if (port == NULL) { 1747 SCHED_LOG(ERR, 1748 "%s: Incorrect value for parameter port", __func__); 1749 return -EINVAL; 1750 } 1751 1752 if (queue_id >= rte_sched_port_queues_per_port(port)) { 1753 SCHED_LOG(ERR, 1754 "%s: Incorrect value for queue id", __func__); 1755 return -EINVAL; 1756 } 1757 1758 if (stats == NULL) { 1759 SCHED_LOG(ERR, 1760 "%s: Incorrect value for parameter stats", __func__); 1761 return -EINVAL; 1762 } 1763 1764 if (qlen == NULL) { 1765 SCHED_LOG(ERR, 1766 "%s: Incorrect value for parameter qlen", __func__); 1767 return -EINVAL; 1768 } 1769 subport_qmask = port->n_pipes_per_subport_log2 + 4; 1770 subport_id = (queue_id >> subport_qmask) & (port->n_subports_per_port - 1); 1771 1772 s = port->subports[subport_id]; 1773 subport_qindex = ((1 << subport_qmask) - 1) & queue_id; 1774 q = s->queue + subport_qindex; 1775 qe = s->queue_extra + subport_qindex; 1776 1777 /* Copy queue stats and clear */ 1778 memcpy(stats, &qe->stats, sizeof(struct rte_sched_queue_stats)); 1779 memset(&qe->stats, 0, sizeof(struct rte_sched_queue_stats)); 1780 1781 /* Queue length */ 1782 *qlen = q->qw - q->qr; 1783 1784 return 0; 1785 } 1786 1787 #ifdef RTE_SCHED_DEBUG 1788 1789 static inline int 1790 rte_sched_port_queue_is_empty(struct rte_sched_subport *subport, 1791 uint32_t qindex) 1792 { 1793 struct rte_sched_queue *queue = subport->queue + qindex; 1794 1795 return queue->qr == queue->qw; 1796 } 1797 1798 #endif /* RTE_SCHED_DEBUG */ 1799 1800 static inline void 1801 rte_sched_port_update_subport_stats(struct rte_sched_port *port, 1802 struct rte_sched_subport *subport, 1803 uint32_t qindex, 1804 struct rte_mbuf *pkt) 1805 { 1806 uint32_t tc_index = rte_sched_port_pipe_tc(port, qindex); 1807 uint32_t pkt_len = pkt->pkt_len; 1808 1809 subport->stats.n_pkts_tc[tc_index] += 1; 1810 subport->stats.n_bytes_tc[tc_index] += pkt_len; 1811 } 1812 1813 static inline void 1814 rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port, 1815 struct rte_sched_subport *subport, 1816 uint32_t qindex, 1817 struct rte_mbuf *pkt, 1818 uint32_t n_pkts_cman_dropped) 1819 { 1820 uint32_t tc_index = rte_sched_port_pipe_tc(port, qindex); 1821 uint32_t pkt_len = pkt->pkt_len; 1822 1823 subport->stats.n_pkts_tc_dropped[tc_index] += 1; 1824 subport->stats.n_bytes_tc_dropped[tc_index] += pkt_len; 1825 subport->stats.n_pkts_cman_dropped[tc_index] += n_pkts_cman_dropped; 1826 } 1827 1828 static inline void 1829 rte_sched_port_update_queue_stats(struct rte_sched_subport *subport, 1830 uint32_t qindex, 1831 struct rte_mbuf *pkt) 1832 { 1833 struct rte_sched_queue_extra *qe = subport->queue_extra + qindex; 1834 uint32_t pkt_len = pkt->pkt_len; 1835 1836 qe->stats.n_pkts += 1; 1837 qe->stats.n_bytes += pkt_len; 1838 } 1839 1840 static inline void 1841 rte_sched_port_update_queue_stats_on_drop(struct rte_sched_subport *subport, 1842 uint32_t qindex, 1843 struct rte_mbuf *pkt, 1844 uint32_t n_pkts_cman_dropped) 1845 { 1846 struct rte_sched_queue_extra *qe = subport->queue_extra + qindex; 1847 uint32_t pkt_len = pkt->pkt_len; 1848 1849 qe->stats.n_pkts_dropped += 1; 1850 qe->stats.n_bytes_dropped += pkt_len; 1851 if (subport->cman_enabled) 1852 qe->stats.n_pkts_cman_dropped += n_pkts_cman_dropped; 1853 } 1854 1855 static inline int 1856 rte_sched_port_cman_drop(struct rte_sched_port *port, 1857 struct rte_sched_subport *subport, 1858 struct rte_mbuf *pkt, 1859 uint32_t qindex, 1860 uint16_t qlen) 1861 { 1862 if (!subport->cman_enabled) 1863 return 0; 1864 1865 struct rte_sched_queue_extra *qe; 1866 uint32_t tc_index; 1867 1868 tc_index = rte_sched_port_pipe_tc(port, qindex); 1869 qe = subport->queue_extra + qindex; 1870 1871 /* RED */ 1872 if (subport->cman == RTE_SCHED_CMAN_RED) { 1873 struct rte_red_config *red_cfg; 1874 struct rte_red *red; 1875 enum rte_color color; 1876 1877 color = rte_sched_port_pkt_read_color(pkt); 1878 red_cfg = &subport->red_config[tc_index][color]; 1879 1880 if ((red_cfg->min_th | red_cfg->max_th) == 0) 1881 return 0; 1882 1883 red = &qe->red; 1884 1885 return rte_red_enqueue(red_cfg, red, qlen, port->time); 1886 } 1887 1888 /* PIE */ 1889 struct rte_pie_config *pie_cfg = &subport->pie_config[tc_index]; 1890 struct rte_pie *pie = &qe->pie; 1891 1892 return rte_pie_enqueue(pie_cfg, pie, qlen, pkt->pkt_len, port->time_cpu_cycles); 1893 } 1894 1895 static inline void 1896 rte_sched_port_red_set_queue_empty_timestamp(struct rte_sched_port *port, 1897 struct rte_sched_subport *subport, uint32_t qindex) 1898 { 1899 if (subport->cman_enabled && subport->cman == RTE_SCHED_CMAN_RED) { 1900 struct rte_sched_queue_extra *qe = subport->queue_extra + qindex; 1901 struct rte_red *red = &qe->red; 1902 1903 rte_red_mark_queue_empty(red, port->time); 1904 } 1905 } 1906 1907 static inline void 1908 rte_sched_port_pie_dequeue(struct rte_sched_subport *subport, 1909 uint32_t qindex, uint32_t pkt_len, uint64_t time) { 1910 if (subport->cman_enabled && subport->cman == RTE_SCHED_CMAN_PIE) { 1911 struct rte_sched_queue_extra *qe = subport->queue_extra + qindex; 1912 struct rte_pie *pie = &qe->pie; 1913 1914 /* Update queue length */ 1915 pie->qlen -= 1; 1916 pie->qlen_bytes -= pkt_len; 1917 1918 rte_pie_dequeue(pie, pkt_len, time); 1919 } 1920 } 1921 1922 #ifdef RTE_SCHED_DEBUG 1923 1924 static inline void 1925 debug_check_queue_slab(struct rte_sched_subport *subport, uint32_t bmp_pos, 1926 uint64_t bmp_slab) 1927 { 1928 uint64_t mask; 1929 uint32_t i, panic; 1930 1931 if (bmp_slab == 0) 1932 rte_panic("Empty slab at position %u\n", bmp_pos); 1933 1934 panic = 0; 1935 for (i = 0, mask = 1; i < 64; i++, mask <<= 1) { 1936 if (mask & bmp_slab) { 1937 if (rte_sched_port_queue_is_empty(subport, bmp_pos + i)) { 1938 printf("Queue %u (slab offset %u) is empty\n", bmp_pos + i, i); 1939 panic = 1; 1940 } 1941 } 1942 } 1943 1944 if (panic) 1945 rte_panic("Empty queues in slab 0x%" PRIx64 "starting at position %u\n", 1946 bmp_slab, bmp_pos); 1947 } 1948 1949 #endif /* RTE_SCHED_DEBUG */ 1950 1951 static inline struct rte_sched_subport * 1952 rte_sched_port_subport(struct rte_sched_port *port, 1953 struct rte_mbuf *pkt) 1954 { 1955 uint32_t queue_id = rte_mbuf_sched_queue_get(pkt); 1956 uint32_t subport_id = queue_id >> (port->n_pipes_per_subport_log2 + 4); 1957 1958 return port->subports[subport_id]; 1959 } 1960 1961 static inline uint32_t 1962 rte_sched_port_enqueue_qptrs_prefetch0(struct rte_sched_subport *subport, 1963 struct rte_mbuf *pkt, uint32_t subport_qmask) 1964 { 1965 struct rte_sched_queue *q; 1966 struct rte_sched_queue_extra *qe; 1967 uint32_t qindex = rte_mbuf_sched_queue_get(pkt); 1968 uint32_t subport_queue_id = subport_qmask & qindex; 1969 1970 q = subport->queue + subport_queue_id; 1971 rte_prefetch0(q); 1972 qe = subport->queue_extra + subport_queue_id; 1973 rte_prefetch0(qe); 1974 1975 return subport_queue_id; 1976 } 1977 1978 static inline void 1979 rte_sched_port_enqueue_qwa_prefetch0(struct rte_sched_port *port, 1980 struct rte_sched_subport *subport, 1981 uint32_t qindex, 1982 struct rte_mbuf **qbase) 1983 { 1984 struct rte_sched_queue *q; 1985 struct rte_mbuf **q_qw; 1986 uint16_t qsize; 1987 1988 q = subport->queue + qindex; 1989 qsize = rte_sched_subport_pipe_qsize(port, subport, qindex); 1990 q_qw = qbase + (q->qw & (qsize - 1)); 1991 1992 rte_prefetch0(q_qw); 1993 rte_bitmap_prefetch0(subport->bmp, qindex); 1994 } 1995 1996 static inline int 1997 rte_sched_port_enqueue_qwa(struct rte_sched_port *port, 1998 struct rte_sched_subport *subport, 1999 uint32_t qindex, 2000 struct rte_mbuf **qbase, 2001 struct rte_mbuf *pkt) 2002 { 2003 struct rte_sched_queue *q; 2004 uint16_t qsize; 2005 uint16_t qlen; 2006 2007 q = subport->queue + qindex; 2008 qsize = rte_sched_subport_pipe_qsize(port, subport, qindex); 2009 qlen = q->qw - q->qr; 2010 2011 /* Drop the packet (and update drop stats) when queue is full */ 2012 if (unlikely(rte_sched_port_cman_drop(port, subport, pkt, qindex, qlen) || 2013 (qlen >= qsize))) { 2014 rte_pktmbuf_free(pkt); 2015 rte_sched_port_update_subport_stats_on_drop(port, subport, 2016 qindex, pkt, qlen < qsize); 2017 rte_sched_port_update_queue_stats_on_drop(subport, qindex, pkt, 2018 qlen < qsize); 2019 return 0; 2020 } 2021 2022 /* Enqueue packet */ 2023 qbase[q->qw & (qsize - 1)] = pkt; 2024 q->qw++; 2025 2026 /* Activate queue in the subport bitmap */ 2027 rte_bitmap_set(subport->bmp, qindex); 2028 2029 /* Statistics */ 2030 rte_sched_port_update_subport_stats(port, subport, qindex, pkt); 2031 rte_sched_port_update_queue_stats(subport, qindex, pkt); 2032 2033 return 1; 2034 } 2035 2036 2037 /* 2038 * The enqueue function implements a 4-level pipeline with each stage 2039 * processing two different packets. The purpose of using a pipeline 2040 * is to hide the latency of prefetching the data structures. The 2041 * naming convention is presented in the diagram below: 2042 * 2043 * p00 _______ p10 _______ p20 _______ p30 _______ 2044 * ----->| |----->| |----->| |----->| |-----> 2045 * | 0 | | 1 | | 2 | | 3 | 2046 * ----->|_______|----->|_______|----->|_______|----->|_______|-----> 2047 * p01 p11 p21 p31 2048 */ 2049 int 2050 rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts, 2051 uint32_t n_pkts) 2052 { 2053 struct rte_mbuf *pkt00, *pkt01, *pkt10, *pkt11, *pkt20, *pkt21, 2054 *pkt30, *pkt31, *pkt_last; 2055 struct rte_mbuf **q00_base, **q01_base, **q10_base, **q11_base, 2056 **q20_base, **q21_base, **q30_base, **q31_base, **q_last_base; 2057 struct rte_sched_subport *subport00, *subport01, *subport10, *subport11, 2058 *subport20, *subport21, *subport30, *subport31, *subport_last; 2059 uint32_t q00, q01, q10, q11, q20, q21, q30, q31, q_last; 2060 uint32_t r00, r01, r10, r11, r20, r21, r30, r31, r_last; 2061 uint32_t subport_qmask; 2062 uint32_t result, i; 2063 2064 result = 0; 2065 subport_qmask = (1 << (port->n_pipes_per_subport_log2 + 4)) - 1; 2066 2067 /* 2068 * Less then 6 input packets available, which is not enough to 2069 * feed the pipeline 2070 */ 2071 if (unlikely(n_pkts < 6)) { 2072 struct rte_sched_subport *subports[5]; 2073 struct rte_mbuf **q_base[5]; 2074 uint32_t q[5]; 2075 2076 /* Prefetch the mbuf structure of each packet */ 2077 for (i = 0; i < n_pkts; i++) 2078 rte_prefetch0(pkts[i]); 2079 2080 /* Prefetch the subport structure for each packet */ 2081 for (i = 0; i < n_pkts; i++) 2082 subports[i] = rte_sched_port_subport(port, pkts[i]); 2083 2084 /* Prefetch the queue structure for each queue */ 2085 for (i = 0; i < n_pkts; i++) 2086 q[i] = rte_sched_port_enqueue_qptrs_prefetch0(subports[i], 2087 pkts[i], subport_qmask); 2088 2089 /* Prefetch the write pointer location of each queue */ 2090 for (i = 0; i < n_pkts; i++) { 2091 q_base[i] = rte_sched_subport_pipe_qbase(subports[i], q[i]); 2092 rte_sched_port_enqueue_qwa_prefetch0(port, subports[i], 2093 q[i], q_base[i]); 2094 } 2095 2096 /* Write each packet to its queue */ 2097 for (i = 0; i < n_pkts; i++) 2098 result += rte_sched_port_enqueue_qwa(port, subports[i], 2099 q[i], q_base[i], pkts[i]); 2100 2101 return result; 2102 } 2103 2104 /* Feed the first 3 stages of the pipeline (6 packets needed) */ 2105 pkt20 = pkts[0]; 2106 pkt21 = pkts[1]; 2107 rte_prefetch0(pkt20); 2108 rte_prefetch0(pkt21); 2109 2110 pkt10 = pkts[2]; 2111 pkt11 = pkts[3]; 2112 rte_prefetch0(pkt10); 2113 rte_prefetch0(pkt11); 2114 2115 subport20 = rte_sched_port_subport(port, pkt20); 2116 subport21 = rte_sched_port_subport(port, pkt21); 2117 q20 = rte_sched_port_enqueue_qptrs_prefetch0(subport20, 2118 pkt20, subport_qmask); 2119 q21 = rte_sched_port_enqueue_qptrs_prefetch0(subport21, 2120 pkt21, subport_qmask); 2121 2122 pkt00 = pkts[4]; 2123 pkt01 = pkts[5]; 2124 rte_prefetch0(pkt00); 2125 rte_prefetch0(pkt01); 2126 2127 subport10 = rte_sched_port_subport(port, pkt10); 2128 subport11 = rte_sched_port_subport(port, pkt11); 2129 q10 = rte_sched_port_enqueue_qptrs_prefetch0(subport10, 2130 pkt10, subport_qmask); 2131 q11 = rte_sched_port_enqueue_qptrs_prefetch0(subport11, 2132 pkt11, subport_qmask); 2133 2134 q20_base = rte_sched_subport_pipe_qbase(subport20, q20); 2135 q21_base = rte_sched_subport_pipe_qbase(subport21, q21); 2136 rte_sched_port_enqueue_qwa_prefetch0(port, subport20, q20, q20_base); 2137 rte_sched_port_enqueue_qwa_prefetch0(port, subport21, q21, q21_base); 2138 2139 /* Run the pipeline */ 2140 for (i = 6; i < (n_pkts & (~1)); i += 2) { 2141 /* Propagate stage inputs */ 2142 pkt30 = pkt20; 2143 pkt31 = pkt21; 2144 pkt20 = pkt10; 2145 pkt21 = pkt11; 2146 pkt10 = pkt00; 2147 pkt11 = pkt01; 2148 q30 = q20; 2149 q31 = q21; 2150 q20 = q10; 2151 q21 = q11; 2152 subport30 = subport20; 2153 subport31 = subport21; 2154 subport20 = subport10; 2155 subport21 = subport11; 2156 q30_base = q20_base; 2157 q31_base = q21_base; 2158 2159 /* Stage 0: Get packets in */ 2160 pkt00 = pkts[i]; 2161 pkt01 = pkts[i + 1]; 2162 rte_prefetch0(pkt00); 2163 rte_prefetch0(pkt01); 2164 2165 /* Stage 1: Prefetch subport and queue structure storing queue pointers */ 2166 subport10 = rte_sched_port_subport(port, pkt10); 2167 subport11 = rte_sched_port_subport(port, pkt11); 2168 q10 = rte_sched_port_enqueue_qptrs_prefetch0(subport10, 2169 pkt10, subport_qmask); 2170 q11 = rte_sched_port_enqueue_qptrs_prefetch0(subport11, 2171 pkt11, subport_qmask); 2172 2173 /* Stage 2: Prefetch queue write location */ 2174 q20_base = rte_sched_subport_pipe_qbase(subport20, q20); 2175 q21_base = rte_sched_subport_pipe_qbase(subport21, q21); 2176 rte_sched_port_enqueue_qwa_prefetch0(port, subport20, q20, q20_base); 2177 rte_sched_port_enqueue_qwa_prefetch0(port, subport21, q21, q21_base); 2178 2179 /* Stage 3: Write packet to queue and activate queue */ 2180 r30 = rte_sched_port_enqueue_qwa(port, subport30, 2181 q30, q30_base, pkt30); 2182 r31 = rte_sched_port_enqueue_qwa(port, subport31, 2183 q31, q31_base, pkt31); 2184 result += r30 + r31; 2185 } 2186 2187 /* 2188 * Drain the pipeline (exactly 6 packets). 2189 * Handle the last packet in the case 2190 * of an odd number of input packets. 2191 */ 2192 pkt_last = pkts[n_pkts - 1]; 2193 rte_prefetch0(pkt_last); 2194 2195 subport00 = rte_sched_port_subport(port, pkt00); 2196 subport01 = rte_sched_port_subport(port, pkt01); 2197 q00 = rte_sched_port_enqueue_qptrs_prefetch0(subport00, 2198 pkt00, subport_qmask); 2199 q01 = rte_sched_port_enqueue_qptrs_prefetch0(subport01, 2200 pkt01, subport_qmask); 2201 2202 q10_base = rte_sched_subport_pipe_qbase(subport10, q10); 2203 q11_base = rte_sched_subport_pipe_qbase(subport11, q11); 2204 rte_sched_port_enqueue_qwa_prefetch0(port, subport10, q10, q10_base); 2205 rte_sched_port_enqueue_qwa_prefetch0(port, subport11, q11, q11_base); 2206 2207 r20 = rte_sched_port_enqueue_qwa(port, subport20, 2208 q20, q20_base, pkt20); 2209 r21 = rte_sched_port_enqueue_qwa(port, subport21, 2210 q21, q21_base, pkt21); 2211 result += r20 + r21; 2212 2213 subport_last = rte_sched_port_subport(port, pkt_last); 2214 q_last = rte_sched_port_enqueue_qptrs_prefetch0(subport_last, 2215 pkt_last, subport_qmask); 2216 2217 q00_base = rte_sched_subport_pipe_qbase(subport00, q00); 2218 q01_base = rte_sched_subport_pipe_qbase(subport01, q01); 2219 rte_sched_port_enqueue_qwa_prefetch0(port, subport00, q00, q00_base); 2220 rte_sched_port_enqueue_qwa_prefetch0(port, subport01, q01, q01_base); 2221 2222 r10 = rte_sched_port_enqueue_qwa(port, subport10, q10, 2223 q10_base, pkt10); 2224 r11 = rte_sched_port_enqueue_qwa(port, subport11, q11, 2225 q11_base, pkt11); 2226 result += r10 + r11; 2227 2228 q_last_base = rte_sched_subport_pipe_qbase(subport_last, q_last); 2229 rte_sched_port_enqueue_qwa_prefetch0(port, subport_last, 2230 q_last, q_last_base); 2231 2232 r00 = rte_sched_port_enqueue_qwa(port, subport00, q00, 2233 q00_base, pkt00); 2234 r01 = rte_sched_port_enqueue_qwa(port, subport01, q01, 2235 q01_base, pkt01); 2236 result += r00 + r01; 2237 2238 if (n_pkts & 1) { 2239 r_last = rte_sched_port_enqueue_qwa(port, subport_last, 2240 q_last, q_last_base, pkt_last); 2241 result += r_last; 2242 } 2243 2244 return result; 2245 } 2246 2247 static inline uint64_t 2248 grinder_tc_ov_credits_update(struct rte_sched_port *port, 2249 struct rte_sched_subport *subport, uint32_t pos) 2250 { 2251 struct rte_sched_grinder *grinder = subport->grinder + pos; 2252 struct rte_sched_subport_profile *sp = grinder->subport_params; 2253 uint64_t tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; 2254 uint64_t tc_consumption = 0, tc_ov_consumption_max; 2255 uint64_t tc_ov_wm = subport->tc_ov_wm; 2256 uint32_t i; 2257 2258 if (subport->tc_ov == 0) 2259 return subport->tc_ov_wm_max; 2260 2261 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASS_BE; i++) { 2262 tc_ov_consumption[i] = sp->tc_credits_per_period[i] 2263 - subport->tc_credits[i]; 2264 tc_consumption += tc_ov_consumption[i]; 2265 } 2266 2267 tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASS_BE] = 2268 sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] - 2269 subport->tc_credits[RTE_SCHED_TRAFFIC_CLASS_BE]; 2270 2271 tc_ov_consumption_max = 2272 sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] - 2273 tc_consumption; 2274 2275 if (tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASS_BE] > 2276 (tc_ov_consumption_max - port->mtu)) { 2277 tc_ov_wm -= tc_ov_wm >> 7; 2278 if (tc_ov_wm < subport->tc_ov_wm_min) 2279 tc_ov_wm = subport->tc_ov_wm_min; 2280 2281 return tc_ov_wm; 2282 } 2283 2284 tc_ov_wm += (tc_ov_wm >> 7) + 1; 2285 if (tc_ov_wm > subport->tc_ov_wm_max) 2286 tc_ov_wm = subport->tc_ov_wm_max; 2287 2288 return tc_ov_wm; 2289 } 2290 2291 static inline void 2292 grinder_credits_update(struct rte_sched_port *port, 2293 struct rte_sched_subport *subport, uint32_t pos) 2294 { 2295 struct rte_sched_grinder *grinder = subport->grinder + pos; 2296 struct rte_sched_pipe *pipe = grinder->pipe; 2297 struct rte_sched_pipe_profile *params = grinder->pipe_params; 2298 struct rte_sched_subport_profile *sp = grinder->subport_params; 2299 uint64_t n_periods; 2300 uint32_t i; 2301 2302 /* Subport TB */ 2303 n_periods = (port->time - subport->tb_time) / sp->tb_period; 2304 subport->tb_credits += n_periods * sp->tb_credits_per_period; 2305 subport->tb_credits = RTE_MIN(subport->tb_credits, sp->tb_size); 2306 subport->tb_time += n_periods * sp->tb_period; 2307 2308 /* Pipe TB */ 2309 n_periods = (port->time - pipe->tb_time) / params->tb_period; 2310 pipe->tb_credits += n_periods * params->tb_credits_per_period; 2311 pipe->tb_credits = RTE_MIN(pipe->tb_credits, params->tb_size); 2312 pipe->tb_time += n_periods * params->tb_period; 2313 2314 /* Subport TCs */ 2315 if (unlikely(port->time >= subport->tc_time)) { 2316 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) 2317 subport->tc_credits[i] = sp->tc_credits_per_period[i]; 2318 2319 subport->tc_time = port->time + sp->tc_period; 2320 } 2321 2322 /* Pipe TCs */ 2323 if (unlikely(port->time >= pipe->tc_time)) { 2324 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) 2325 pipe->tc_credits[i] = params->tc_credits_per_period[i]; 2326 pipe->tc_time = port->time + params->tc_period; 2327 } 2328 } 2329 2330 static inline void 2331 grinder_credits_update_with_tc_ov(struct rte_sched_port *port, 2332 struct rte_sched_subport *subport, uint32_t pos) 2333 { 2334 struct rte_sched_grinder *grinder = subport->grinder + pos; 2335 struct rte_sched_pipe *pipe = grinder->pipe; 2336 struct rte_sched_pipe_profile *params = grinder->pipe_params; 2337 struct rte_sched_subport_profile *sp = grinder->subport_params; 2338 uint64_t n_periods; 2339 uint32_t i; 2340 2341 /* Subport TB */ 2342 n_periods = (port->time - subport->tb_time) / sp->tb_period; 2343 subport->tb_credits += n_periods * sp->tb_credits_per_period; 2344 subport->tb_credits = RTE_MIN(subport->tb_credits, sp->tb_size); 2345 subport->tb_time += n_periods * sp->tb_period; 2346 2347 /* Pipe TB */ 2348 n_periods = (port->time - pipe->tb_time) / params->tb_period; 2349 pipe->tb_credits += n_periods * params->tb_credits_per_period; 2350 pipe->tb_credits = RTE_MIN(pipe->tb_credits, params->tb_size); 2351 pipe->tb_time += n_periods * params->tb_period; 2352 2353 /* Subport TCs */ 2354 if (unlikely(port->time >= subport->tc_time)) { 2355 subport->tc_ov_wm = 2356 grinder_tc_ov_credits_update(port, subport, pos); 2357 2358 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) 2359 subport->tc_credits[i] = sp->tc_credits_per_period[i]; 2360 2361 subport->tc_time = port->time + sp->tc_period; 2362 subport->tc_ov_period_id++; 2363 } 2364 2365 /* Pipe TCs */ 2366 if (unlikely(port->time >= pipe->tc_time)) { 2367 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) 2368 pipe->tc_credits[i] = params->tc_credits_per_period[i]; 2369 pipe->tc_time = port->time + params->tc_period; 2370 } 2371 2372 /* Pipe TCs - Oversubscription */ 2373 if (unlikely(pipe->tc_ov_period_id != subport->tc_ov_period_id)) { 2374 pipe->tc_ov_credits = subport->tc_ov_wm * params->tc_ov_weight; 2375 2376 pipe->tc_ov_period_id = subport->tc_ov_period_id; 2377 } 2378 } 2379 2380 static inline int 2381 grinder_credits_check(struct rte_sched_port *port, 2382 struct rte_sched_subport *subport, uint32_t pos) 2383 { 2384 struct rte_sched_grinder *grinder = subport->grinder + pos; 2385 struct rte_sched_pipe *pipe = grinder->pipe; 2386 struct rte_mbuf *pkt = grinder->pkt; 2387 uint32_t tc_index = grinder->tc_index; 2388 uint64_t pkt_len = pkt->pkt_len + port->frame_overhead; 2389 uint64_t subport_tb_credits = subport->tb_credits; 2390 uint64_t subport_tc_credits = subport->tc_credits[tc_index]; 2391 uint64_t pipe_tb_credits = pipe->tb_credits; 2392 uint64_t pipe_tc_credits = pipe->tc_credits[tc_index]; 2393 int enough_credits; 2394 2395 /* Check pipe and subport credits */ 2396 enough_credits = (pkt_len <= subport_tb_credits) && 2397 (pkt_len <= subport_tc_credits) && 2398 (pkt_len <= pipe_tb_credits) && 2399 (pkt_len <= pipe_tc_credits); 2400 2401 if (!enough_credits) 2402 return 0; 2403 2404 /* Update pipe and subport credits */ 2405 subport->tb_credits -= pkt_len; 2406 subport->tc_credits[tc_index] -= pkt_len; 2407 pipe->tb_credits -= pkt_len; 2408 pipe->tc_credits[tc_index] -= pkt_len; 2409 2410 return 1; 2411 } 2412 2413 static inline int 2414 grinder_credits_check_with_tc_ov(struct rte_sched_port *port, 2415 struct rte_sched_subport *subport, uint32_t pos) 2416 { 2417 struct rte_sched_grinder *grinder = subport->grinder + pos; 2418 struct rte_sched_pipe *pipe = grinder->pipe; 2419 struct rte_mbuf *pkt = grinder->pkt; 2420 uint32_t tc_index = grinder->tc_index; 2421 uint64_t pkt_len = pkt->pkt_len + port->frame_overhead; 2422 uint64_t subport_tb_credits = subport->tb_credits; 2423 uint64_t subport_tc_credits = subport->tc_credits[tc_index]; 2424 uint64_t pipe_tb_credits = pipe->tb_credits; 2425 uint64_t pipe_tc_credits = pipe->tc_credits[tc_index]; 2426 uint64_t pipe_tc_ov_mask1[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; 2427 uint64_t pipe_tc_ov_mask2[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE] = {0}; 2428 uint64_t pipe_tc_ov_credits; 2429 uint32_t i; 2430 int enough_credits; 2431 2432 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) 2433 pipe_tc_ov_mask1[i] = ~0LLU; 2434 2435 pipe_tc_ov_mask1[RTE_SCHED_TRAFFIC_CLASS_BE] = pipe->tc_ov_credits; 2436 pipe_tc_ov_mask2[RTE_SCHED_TRAFFIC_CLASS_BE] = ~0LLU; 2437 pipe_tc_ov_credits = pipe_tc_ov_mask1[tc_index]; 2438 2439 /* Check pipe and subport credits */ 2440 enough_credits = (pkt_len <= subport_tb_credits) && 2441 (pkt_len <= subport_tc_credits) && 2442 (pkt_len <= pipe_tb_credits) && 2443 (pkt_len <= pipe_tc_credits) && 2444 (pkt_len <= pipe_tc_ov_credits); 2445 2446 if (!enough_credits) 2447 return 0; 2448 2449 /* Update pipe and subport credits */ 2450 subport->tb_credits -= pkt_len; 2451 subport->tc_credits[tc_index] -= pkt_len; 2452 pipe->tb_credits -= pkt_len; 2453 pipe->tc_credits[tc_index] -= pkt_len; 2454 pipe->tc_ov_credits -= pipe_tc_ov_mask2[tc_index] & pkt_len; 2455 2456 return 1; 2457 } 2458 2459 2460 static inline int 2461 grinder_schedule(struct rte_sched_port *port, 2462 struct rte_sched_subport *subport, uint32_t pos) 2463 { 2464 struct rte_sched_grinder *grinder = subport->grinder + pos; 2465 struct rte_sched_queue *queue = grinder->queue[grinder->qpos]; 2466 uint32_t qindex = grinder->qindex[grinder->qpos]; 2467 struct rte_mbuf *pkt = grinder->pkt; 2468 uint32_t pkt_len = pkt->pkt_len + port->frame_overhead; 2469 uint32_t be_tc_active; 2470 2471 if (subport->tc_ov_enabled) { 2472 if (!grinder_credits_check_with_tc_ov(port, subport, pos)) 2473 return 0; 2474 } else { 2475 if (!grinder_credits_check(port, subport, pos)) 2476 return 0; 2477 } 2478 2479 /* Advance port time */ 2480 port->time += pkt_len; 2481 2482 /* Send packet */ 2483 port->pkts_out[port->n_pkts_out++] = pkt; 2484 queue->qr++; 2485 2486 be_tc_active = (grinder->tc_index == RTE_SCHED_TRAFFIC_CLASS_BE) ? ~0x0 : 0x0; 2487 grinder->wrr_tokens[grinder->qpos] += 2488 (pkt_len * grinder->wrr_cost[grinder->qpos]) & be_tc_active; 2489 2490 if (queue->qr == queue->qw) { 2491 rte_bitmap_clear(subport->bmp, qindex); 2492 grinder->qmask &= ~(1 << grinder->qpos); 2493 if (be_tc_active) 2494 grinder->wrr_mask[grinder->qpos] = 0; 2495 2496 rte_sched_port_red_set_queue_empty_timestamp(port, subport, qindex); 2497 } 2498 2499 rte_sched_port_pie_dequeue(subport, qindex, pkt_len, port->time_cpu_cycles); 2500 2501 /* Reset pipe loop detection */ 2502 subport->pipe_loop = RTE_SCHED_PIPE_INVALID; 2503 grinder->productive = 1; 2504 2505 return 1; 2506 } 2507 2508 static inline int 2509 grinder_pipe_exists(struct rte_sched_subport *subport, uint32_t base_pipe) 2510 { 2511 uint32_t i; 2512 2513 for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++) { 2514 if (subport->grinder_base_bmp_pos[i] == base_pipe) 2515 return 1; 2516 } 2517 2518 return 0; 2519 } 2520 2521 static inline void 2522 grinder_pcache_populate(struct rte_sched_subport *subport, 2523 uint32_t pos, uint32_t bmp_pos, uint64_t bmp_slab) 2524 { 2525 struct rte_sched_grinder *grinder = subport->grinder + pos; 2526 uint16_t w[4]; 2527 2528 grinder->pcache_w = 0; 2529 grinder->pcache_r = 0; 2530 2531 w[0] = (uint16_t) bmp_slab; 2532 w[1] = (uint16_t) (bmp_slab >> 16); 2533 w[2] = (uint16_t) (bmp_slab >> 32); 2534 w[3] = (uint16_t) (bmp_slab >> 48); 2535 2536 grinder->pcache_qmask[grinder->pcache_w] = w[0]; 2537 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos; 2538 grinder->pcache_w += (w[0] != 0); 2539 2540 grinder->pcache_qmask[grinder->pcache_w] = w[1]; 2541 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 16; 2542 grinder->pcache_w += (w[1] != 0); 2543 2544 grinder->pcache_qmask[grinder->pcache_w] = w[2]; 2545 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 32; 2546 grinder->pcache_w += (w[2] != 0); 2547 2548 grinder->pcache_qmask[grinder->pcache_w] = w[3]; 2549 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 48; 2550 grinder->pcache_w += (w[3] != 0); 2551 } 2552 2553 static inline void 2554 grinder_tccache_populate(struct rte_sched_subport *subport, 2555 uint32_t pos, uint32_t qindex, uint16_t qmask) 2556 { 2557 struct rte_sched_grinder *grinder = subport->grinder + pos; 2558 uint8_t b, i; 2559 2560 grinder->tccache_w = 0; 2561 grinder->tccache_r = 0; 2562 2563 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASS_BE; i++) { 2564 b = (uint8_t) ((qmask >> i) & 0x1); 2565 grinder->tccache_qmask[grinder->tccache_w] = b; 2566 grinder->tccache_qindex[grinder->tccache_w] = qindex + i; 2567 grinder->tccache_w += (b != 0); 2568 } 2569 2570 b = (uint8_t) (qmask >> (RTE_SCHED_TRAFFIC_CLASS_BE)); 2571 grinder->tccache_qmask[grinder->tccache_w] = b; 2572 grinder->tccache_qindex[grinder->tccache_w] = qindex + 2573 RTE_SCHED_TRAFFIC_CLASS_BE; 2574 grinder->tccache_w += (b != 0); 2575 } 2576 2577 static inline int 2578 grinder_next_tc(struct rte_sched_port *port, 2579 struct rte_sched_subport *subport, uint32_t pos) 2580 { 2581 struct rte_sched_grinder *grinder = subport->grinder + pos; 2582 struct rte_mbuf **qbase; 2583 uint32_t qindex; 2584 uint16_t qsize; 2585 2586 if (grinder->tccache_r == grinder->tccache_w) 2587 return 0; 2588 2589 qindex = grinder->tccache_qindex[grinder->tccache_r]; 2590 qbase = rte_sched_subport_pipe_qbase(subport, qindex); 2591 qsize = rte_sched_subport_pipe_qsize(port, subport, qindex); 2592 2593 grinder->tc_index = rte_sched_port_pipe_tc(port, qindex); 2594 grinder->qmask = grinder->tccache_qmask[grinder->tccache_r]; 2595 grinder->qsize = qsize; 2596 2597 if (grinder->tc_index < RTE_SCHED_TRAFFIC_CLASS_BE) { 2598 grinder->queue[0] = subport->queue + qindex; 2599 grinder->qbase[0] = qbase; 2600 grinder->qindex[0] = qindex; 2601 grinder->tccache_r++; 2602 2603 return 1; 2604 } 2605 2606 grinder->queue[0] = subport->queue + qindex; 2607 grinder->queue[1] = subport->queue + qindex + 1; 2608 grinder->queue[2] = subport->queue + qindex + 2; 2609 grinder->queue[3] = subport->queue + qindex + 3; 2610 2611 grinder->qbase[0] = qbase; 2612 grinder->qbase[1] = qbase + qsize; 2613 grinder->qbase[2] = qbase + 2 * qsize; 2614 grinder->qbase[3] = qbase + 3 * qsize; 2615 2616 grinder->qindex[0] = qindex; 2617 grinder->qindex[1] = qindex + 1; 2618 grinder->qindex[2] = qindex + 2; 2619 grinder->qindex[3] = qindex + 3; 2620 2621 grinder->tccache_r++; 2622 return 1; 2623 } 2624 2625 static inline int 2626 grinder_next_pipe(struct rte_sched_port *port, 2627 struct rte_sched_subport *subport, uint32_t pos) 2628 { 2629 struct rte_sched_grinder *grinder = subport->grinder + pos; 2630 uint32_t pipe_qindex; 2631 uint16_t pipe_qmask; 2632 2633 if (grinder->pcache_r < grinder->pcache_w) { 2634 pipe_qmask = grinder->pcache_qmask[grinder->pcache_r]; 2635 pipe_qindex = grinder->pcache_qindex[grinder->pcache_r]; 2636 grinder->pcache_r++; 2637 } else { 2638 uint64_t bmp_slab = 0; 2639 uint32_t bmp_pos = 0; 2640 2641 /* Get another non-empty pipe group */ 2642 if (unlikely(rte_bitmap_scan(subport->bmp, &bmp_pos, &bmp_slab) <= 0)) 2643 return 0; 2644 2645 #ifdef RTE_SCHED_DEBUG 2646 debug_check_queue_slab(subport, bmp_pos, bmp_slab); 2647 #endif 2648 2649 /* Return if pipe group already in one of the other grinders */ 2650 subport->grinder_base_bmp_pos[pos] = RTE_SCHED_BMP_POS_INVALID; 2651 if (unlikely(grinder_pipe_exists(subport, bmp_pos))) 2652 return 0; 2653 2654 subport->grinder_base_bmp_pos[pos] = bmp_pos; 2655 2656 /* Install new pipe group into grinder's pipe cache */ 2657 grinder_pcache_populate(subport, pos, bmp_pos, bmp_slab); 2658 2659 pipe_qmask = grinder->pcache_qmask[0]; 2660 pipe_qindex = grinder->pcache_qindex[0]; 2661 grinder->pcache_r = 1; 2662 } 2663 2664 /* Install new pipe in the grinder */ 2665 grinder->pindex = pipe_qindex >> 4; 2666 grinder->subport = subport; 2667 grinder->pipe = subport->pipe + grinder->pindex; 2668 grinder->pipe_params = NULL; /* to be set after the pipe structure is prefetched */ 2669 grinder->productive = 0; 2670 2671 grinder_tccache_populate(subport, pos, pipe_qindex, pipe_qmask); 2672 grinder_next_tc(port, subport, pos); 2673 2674 /* Check for pipe exhaustion */ 2675 if (grinder->pindex == subport->pipe_loop) { 2676 subport->pipe_exhaustion = 1; 2677 subport->pipe_loop = RTE_SCHED_PIPE_INVALID; 2678 } 2679 2680 return 1; 2681 } 2682 2683 2684 static inline void 2685 grinder_wrr_load(struct rte_sched_subport *subport, uint32_t pos) 2686 { 2687 struct rte_sched_grinder *grinder = subport->grinder + pos; 2688 struct rte_sched_pipe *pipe = grinder->pipe; 2689 struct rte_sched_pipe_profile *pipe_params = grinder->pipe_params; 2690 uint32_t qmask = grinder->qmask; 2691 2692 grinder->wrr_tokens[0] = 2693 ((uint16_t) pipe->wrr_tokens[0]) << RTE_SCHED_WRR_SHIFT; 2694 grinder->wrr_tokens[1] = 2695 ((uint16_t) pipe->wrr_tokens[1]) << RTE_SCHED_WRR_SHIFT; 2696 grinder->wrr_tokens[2] = 2697 ((uint16_t) pipe->wrr_tokens[2]) << RTE_SCHED_WRR_SHIFT; 2698 grinder->wrr_tokens[3] = 2699 ((uint16_t) pipe->wrr_tokens[3]) << RTE_SCHED_WRR_SHIFT; 2700 2701 grinder->wrr_mask[0] = (qmask & 0x1) * 0xFFFF; 2702 grinder->wrr_mask[1] = ((qmask >> 1) & 0x1) * 0xFFFF; 2703 grinder->wrr_mask[2] = ((qmask >> 2) & 0x1) * 0xFFFF; 2704 grinder->wrr_mask[3] = ((qmask >> 3) & 0x1) * 0xFFFF; 2705 2706 grinder->wrr_cost[0] = pipe_params->wrr_cost[0]; 2707 grinder->wrr_cost[1] = pipe_params->wrr_cost[1]; 2708 grinder->wrr_cost[2] = pipe_params->wrr_cost[2]; 2709 grinder->wrr_cost[3] = pipe_params->wrr_cost[3]; 2710 } 2711 2712 static inline void 2713 grinder_wrr_store(struct rte_sched_subport *subport, uint32_t pos) 2714 { 2715 struct rte_sched_grinder *grinder = subport->grinder + pos; 2716 struct rte_sched_pipe *pipe = grinder->pipe; 2717 2718 pipe->wrr_tokens[0] = 2719 (grinder->wrr_tokens[0] & grinder->wrr_mask[0]) >> 2720 RTE_SCHED_WRR_SHIFT; 2721 pipe->wrr_tokens[1] = 2722 (grinder->wrr_tokens[1] & grinder->wrr_mask[1]) >> 2723 RTE_SCHED_WRR_SHIFT; 2724 pipe->wrr_tokens[2] = 2725 (grinder->wrr_tokens[2] & grinder->wrr_mask[2]) >> 2726 RTE_SCHED_WRR_SHIFT; 2727 pipe->wrr_tokens[3] = 2728 (grinder->wrr_tokens[3] & grinder->wrr_mask[3]) >> 2729 RTE_SCHED_WRR_SHIFT; 2730 } 2731 2732 static inline void 2733 grinder_wrr(struct rte_sched_subport *subport, uint32_t pos) 2734 { 2735 struct rte_sched_grinder *grinder = subport->grinder + pos; 2736 uint16_t wrr_tokens_min; 2737 2738 grinder->wrr_tokens[0] |= ~grinder->wrr_mask[0]; 2739 grinder->wrr_tokens[1] |= ~grinder->wrr_mask[1]; 2740 grinder->wrr_tokens[2] |= ~grinder->wrr_mask[2]; 2741 grinder->wrr_tokens[3] |= ~grinder->wrr_mask[3]; 2742 2743 grinder->qpos = rte_min_pos_4_u16(grinder->wrr_tokens); 2744 wrr_tokens_min = grinder->wrr_tokens[grinder->qpos]; 2745 2746 grinder->wrr_tokens[0] -= wrr_tokens_min; 2747 grinder->wrr_tokens[1] -= wrr_tokens_min; 2748 grinder->wrr_tokens[2] -= wrr_tokens_min; 2749 grinder->wrr_tokens[3] -= wrr_tokens_min; 2750 } 2751 2752 2753 #define grinder_evict(subport, pos) 2754 2755 static inline void 2756 grinder_prefetch_pipe(struct rte_sched_subport *subport, uint32_t pos) 2757 { 2758 struct rte_sched_grinder *grinder = subport->grinder + pos; 2759 2760 rte_prefetch0(grinder->pipe); 2761 rte_prefetch0(grinder->queue[0]); 2762 } 2763 2764 static inline void 2765 grinder_prefetch_tc_queue_arrays(struct rte_sched_subport *subport, uint32_t pos) 2766 { 2767 struct rte_sched_grinder *grinder = subport->grinder + pos; 2768 uint16_t qsize, qr[RTE_SCHED_MAX_QUEUES_PER_TC]; 2769 2770 qsize = grinder->qsize; 2771 grinder->qpos = 0; 2772 2773 if (grinder->tc_index < RTE_SCHED_TRAFFIC_CLASS_BE) { 2774 qr[0] = grinder->queue[0]->qr & (qsize - 1); 2775 2776 rte_prefetch0(grinder->qbase[0] + qr[0]); 2777 return; 2778 } 2779 2780 qr[0] = grinder->queue[0]->qr & (qsize - 1); 2781 qr[1] = grinder->queue[1]->qr & (qsize - 1); 2782 qr[2] = grinder->queue[2]->qr & (qsize - 1); 2783 qr[3] = grinder->queue[3]->qr & (qsize - 1); 2784 2785 rte_prefetch0(grinder->qbase[0] + qr[0]); 2786 rte_prefetch0(grinder->qbase[1] + qr[1]); 2787 2788 grinder_wrr_load(subport, pos); 2789 grinder_wrr(subport, pos); 2790 2791 rte_prefetch0(grinder->qbase[2] + qr[2]); 2792 rte_prefetch0(grinder->qbase[3] + qr[3]); 2793 } 2794 2795 static inline void 2796 grinder_prefetch_mbuf(struct rte_sched_subport *subport, uint32_t pos) 2797 { 2798 struct rte_sched_grinder *grinder = subport->grinder + pos; 2799 uint32_t qpos = grinder->qpos; 2800 struct rte_mbuf **qbase = grinder->qbase[qpos]; 2801 uint16_t qsize = grinder->qsize; 2802 uint16_t qr = grinder->queue[qpos]->qr & (qsize - 1); 2803 2804 grinder->pkt = qbase[qr]; 2805 rte_prefetch0(grinder->pkt); 2806 2807 if (unlikely((qr & 0x7) == 7)) { 2808 uint16_t qr_next = (grinder->queue[qpos]->qr + 1) & (qsize - 1); 2809 2810 rte_prefetch0(qbase + qr_next); 2811 } 2812 } 2813 2814 static inline uint32_t 2815 grinder_handle(struct rte_sched_port *port, 2816 struct rte_sched_subport *subport, uint32_t pos) 2817 { 2818 struct rte_sched_grinder *grinder = subport->grinder + pos; 2819 2820 switch (grinder->state) { 2821 case e_GRINDER_PREFETCH_PIPE: 2822 { 2823 if (grinder_next_pipe(port, subport, pos)) { 2824 grinder_prefetch_pipe(subport, pos); 2825 subport->busy_grinders++; 2826 2827 grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS; 2828 return 0; 2829 } 2830 2831 return 0; 2832 } 2833 2834 case e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS: 2835 { 2836 struct rte_sched_pipe *pipe = grinder->pipe; 2837 2838 grinder->pipe_params = subport->pipe_profiles + pipe->profile; 2839 grinder->subport_params = port->subport_profiles + 2840 subport->profile; 2841 2842 grinder_prefetch_tc_queue_arrays(subport, pos); 2843 2844 if (subport->tc_ov_enabled) 2845 grinder_credits_update_with_tc_ov(port, subport, pos); 2846 else 2847 grinder_credits_update(port, subport, pos); 2848 2849 grinder->state = e_GRINDER_PREFETCH_MBUF; 2850 return 0; 2851 } 2852 2853 case e_GRINDER_PREFETCH_MBUF: 2854 { 2855 grinder_prefetch_mbuf(subport, pos); 2856 2857 grinder->state = e_GRINDER_READ_MBUF; 2858 return 0; 2859 } 2860 2861 case e_GRINDER_READ_MBUF: 2862 { 2863 uint32_t wrr_active, result = 0; 2864 2865 result = grinder_schedule(port, subport, pos); 2866 2867 wrr_active = (grinder->tc_index == RTE_SCHED_TRAFFIC_CLASS_BE); 2868 2869 /* Look for next packet within the same TC */ 2870 if (result && grinder->qmask) { 2871 if (wrr_active) 2872 grinder_wrr(subport, pos); 2873 2874 grinder_prefetch_mbuf(subport, pos); 2875 2876 return 1; 2877 } 2878 2879 if (wrr_active) 2880 grinder_wrr_store(subport, pos); 2881 2882 /* Look for another active TC within same pipe */ 2883 if (grinder_next_tc(port, subport, pos)) { 2884 grinder_prefetch_tc_queue_arrays(subport, pos); 2885 2886 grinder->state = e_GRINDER_PREFETCH_MBUF; 2887 return result; 2888 } 2889 2890 if (grinder->productive == 0 && 2891 subport->pipe_loop == RTE_SCHED_PIPE_INVALID) 2892 subport->pipe_loop = grinder->pindex; 2893 2894 grinder_evict(subport, pos); 2895 2896 /* Look for another active pipe */ 2897 if (grinder_next_pipe(port, subport, pos)) { 2898 grinder_prefetch_pipe(subport, pos); 2899 2900 grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS; 2901 return result; 2902 } 2903 2904 /* No active pipe found */ 2905 subport->busy_grinders--; 2906 2907 grinder->state = e_GRINDER_PREFETCH_PIPE; 2908 return result; 2909 } 2910 2911 default: 2912 rte_panic("Algorithmic error (invalid state)\n"); 2913 return 0; 2914 } 2915 } 2916 2917 static inline void 2918 rte_sched_port_time_resync(struct rte_sched_port *port) 2919 { 2920 uint64_t cycles = rte_get_tsc_cycles(); 2921 uint64_t cycles_diff; 2922 uint64_t bytes_diff; 2923 uint32_t i; 2924 2925 if (cycles < port->time_cpu_cycles) 2926 port->time_cpu_cycles = 0; 2927 2928 cycles_diff = cycles - port->time_cpu_cycles; 2929 /* Compute elapsed time in bytes */ 2930 bytes_diff = rte_reciprocal_divide(cycles_diff << RTE_SCHED_TIME_SHIFT, 2931 port->inv_cycles_per_byte); 2932 2933 /* Advance port time */ 2934 port->time_cpu_cycles += 2935 (bytes_diff * port->cycles_per_byte) >> RTE_SCHED_TIME_SHIFT; 2936 port->time_cpu_bytes += bytes_diff; 2937 if (port->time < port->time_cpu_bytes) 2938 port->time = port->time_cpu_bytes; 2939 2940 /* Reset pipe loop detection */ 2941 for (i = 0; i < port->n_subports_per_port; i++) 2942 port->subports[i]->pipe_loop = RTE_SCHED_PIPE_INVALID; 2943 } 2944 2945 static inline int 2946 rte_sched_port_exceptions(struct rte_sched_subport *subport, int second_pass) 2947 { 2948 int exceptions; 2949 2950 /* Check if any exception flag is set */ 2951 exceptions = (second_pass && subport->busy_grinders == 0) || 2952 (subport->pipe_exhaustion == 1); 2953 2954 /* Clear exception flags */ 2955 subport->pipe_exhaustion = 0; 2956 2957 return exceptions; 2958 } 2959 2960 int 2961 rte_sched_port_dequeue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts) 2962 { 2963 struct rte_sched_subport *subport; 2964 uint32_t subport_id = port->subport_id; 2965 uint32_t i, n_subports = 0, count; 2966 2967 port->pkts_out = pkts; 2968 port->n_pkts_out = 0; 2969 2970 rte_sched_port_time_resync(port); 2971 2972 /* Take each queue in the grinder one step further */ 2973 for (i = 0, count = 0; ; i++) { 2974 subport = port->subports[subport_id]; 2975 2976 count += grinder_handle(port, subport, 2977 i & (RTE_SCHED_PORT_N_GRINDERS - 1)); 2978 2979 if (count == n_pkts) { 2980 subport_id++; 2981 2982 if (subport_id == port->n_subports_per_port) 2983 subport_id = 0; 2984 2985 port->subport_id = subport_id; 2986 break; 2987 } 2988 2989 if (rte_sched_port_exceptions(subport, i >= RTE_SCHED_PORT_N_GRINDERS)) { 2990 i = 0; 2991 subport_id++; 2992 n_subports++; 2993 } 2994 2995 if (subport_id == port->n_subports_per_port) 2996 subport_id = 0; 2997 2998 if (n_subports == port->n_subports_per_port) { 2999 port->subport_id = subport_id; 3000 break; 3001 } 3002 } 3003 3004 return count; 3005 } 3006 3007 RTE_LOG_REGISTER_DEFAULT(sched_logtype, INFO); 3008