1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <stdio.h> 6 #include <string.h> 7 8 #include <rte_common.h> 9 #include <rte_log.h> 10 #include <rte_malloc.h> 11 #include <rte_cycles.h> 12 #include <rte_prefetch.h> 13 #include <rte_branch_prediction.h> 14 #include <rte_mbuf.h> 15 #include <rte_bitmap.h> 16 #include <rte_reciprocal.h> 17 18 #include "rte_sched.h" 19 #include "rte_sched_common.h" 20 #include "rte_approx.h" 21 22 #ifdef __INTEL_COMPILER 23 #pragma warning(disable:2259) /* conversion may lose significant bits */ 24 #endif 25 26 #ifndef RTE_SCHED_PORT_N_GRINDERS 27 #define RTE_SCHED_PORT_N_GRINDERS 8 28 #endif 29 30 #define RTE_SCHED_TB_RATE_CONFIG_ERR (1e-7) 31 #define RTE_SCHED_WRR_SHIFT 3 32 #define RTE_SCHED_MAX_QUEUES_PER_TC RTE_SCHED_BE_QUEUES_PER_PIPE 33 #define RTE_SCHED_GRINDER_PCACHE_SIZE (64 / RTE_SCHED_QUEUES_PER_PIPE) 34 #define RTE_SCHED_PIPE_INVALID UINT32_MAX 35 #define RTE_SCHED_BMP_POS_INVALID UINT32_MAX 36 37 /* Scaling for cycles_per_byte calculation 38 * Chosen so that minimum rate is 480 bit/sec 39 */ 40 #define RTE_SCHED_TIME_SHIFT 8 41 42 struct rte_sched_pipe_profile { 43 /* Token bucket (TB) */ 44 uint64_t tb_period; 45 uint64_t tb_credits_per_period; 46 uint64_t tb_size; 47 48 /* Pipe traffic classes */ 49 uint64_t tc_period; 50 uint64_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; 51 uint8_t tc_ov_weight; 52 53 /* Pipe best-effort traffic class queues */ 54 uint8_t wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE]; 55 }; 56 57 struct rte_sched_pipe { 58 /* Token bucket (TB) */ 59 uint64_t tb_time; /* time of last update */ 60 uint64_t tb_credits; 61 62 /* Pipe profile and flags */ 63 uint32_t profile; 64 65 /* Traffic classes (TCs) */ 66 uint64_t tc_time; /* time of next update */ 67 uint64_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; 68 69 /* Weighted Round Robin (WRR) */ 70 uint8_t wrr_tokens[RTE_SCHED_BE_QUEUES_PER_PIPE]; 71 72 /* TC oversubscription */ 73 uint64_t tc_ov_credits; 74 uint8_t tc_ov_period_id; 75 } __rte_cache_aligned; 76 77 struct rte_sched_queue { 78 uint16_t qw; 79 uint16_t qr; 80 }; 81 82 struct rte_sched_queue_extra { 83 struct rte_sched_queue_stats stats; 84 RTE_STD_C11 85 union { 86 struct rte_red red; 87 struct rte_pie pie; 88 }; 89 }; 90 91 enum grinder_state { 92 e_GRINDER_PREFETCH_PIPE = 0, 93 e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS, 94 e_GRINDER_PREFETCH_MBUF, 95 e_GRINDER_READ_MBUF 96 }; 97 98 struct rte_sched_subport_profile { 99 /* Token bucket (TB) */ 100 uint64_t tb_period; 101 uint64_t tb_credits_per_period; 102 uint64_t tb_size; 103 104 uint64_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; 105 uint64_t tc_period; 106 }; 107 108 struct rte_sched_grinder { 109 /* Pipe cache */ 110 uint16_t pcache_qmask[RTE_SCHED_GRINDER_PCACHE_SIZE]; 111 uint32_t pcache_qindex[RTE_SCHED_GRINDER_PCACHE_SIZE]; 112 uint32_t pcache_w; 113 uint32_t pcache_r; 114 115 /* Current pipe */ 116 enum grinder_state state; 117 uint32_t productive; 118 uint32_t pindex; 119 struct rte_sched_subport *subport; 120 struct rte_sched_subport_profile *subport_params; 121 struct rte_sched_pipe *pipe; 122 struct rte_sched_pipe_profile *pipe_params; 123 124 /* TC cache */ 125 uint8_t tccache_qmask[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; 126 uint32_t tccache_qindex[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; 127 uint32_t tccache_w; 128 uint32_t tccache_r; 129 130 /* Current TC */ 131 uint32_t tc_index; 132 struct rte_sched_queue *queue[RTE_SCHED_MAX_QUEUES_PER_TC]; 133 struct rte_mbuf **qbase[RTE_SCHED_MAX_QUEUES_PER_TC]; 134 uint32_t qindex[RTE_SCHED_MAX_QUEUES_PER_TC]; 135 uint16_t qsize; 136 uint32_t qmask; 137 uint32_t qpos; 138 struct rte_mbuf *pkt; 139 140 /* WRR */ 141 uint16_t wrr_tokens[RTE_SCHED_BE_QUEUES_PER_PIPE]; 142 uint16_t wrr_mask[RTE_SCHED_BE_QUEUES_PER_PIPE]; 143 uint8_t wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE]; 144 }; 145 146 struct rte_sched_subport { 147 /* Token bucket (TB) */ 148 uint64_t tb_time; /* time of last update */ 149 uint64_t tb_credits; 150 151 /* Traffic classes (TCs) */ 152 uint64_t tc_time; /* time of next update */ 153 uint64_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; 154 155 /* TC oversubscription */ 156 uint64_t tc_ov_wm; 157 uint64_t tc_ov_wm_min; 158 uint64_t tc_ov_wm_max; 159 uint8_t tc_ov_period_id; 160 uint8_t tc_ov; 161 uint32_t tc_ov_n; 162 double tc_ov_rate; 163 164 /* Statistics */ 165 struct rte_sched_subport_stats stats __rte_cache_aligned; 166 167 /* subport profile */ 168 uint32_t profile; 169 /* Subport pipes */ 170 uint32_t n_pipes_per_subport_enabled; 171 uint32_t n_pipe_profiles; 172 uint32_t n_max_pipe_profiles; 173 174 /* Pipe best-effort TC rate */ 175 uint64_t pipe_tc_be_rate_max; 176 177 /* Pipe queues size */ 178 uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; 179 180 bool cman_enabled; 181 enum rte_sched_cman_mode cman; 182 183 RTE_STD_C11 184 union { 185 struct rte_red_config red_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS]; 186 struct rte_pie_config pie_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; 187 }; 188 189 /* Scheduling loop detection */ 190 uint32_t pipe_loop; 191 uint32_t pipe_exhaustion; 192 193 /* Bitmap */ 194 struct rte_bitmap *bmp; 195 uint32_t grinder_base_bmp_pos[RTE_SCHED_PORT_N_GRINDERS] __rte_aligned_16; 196 197 /* Grinders */ 198 struct rte_sched_grinder grinder[RTE_SCHED_PORT_N_GRINDERS]; 199 uint32_t busy_grinders; 200 201 /* Queue base calculation */ 202 uint32_t qsize_add[RTE_SCHED_QUEUES_PER_PIPE]; 203 uint32_t qsize_sum; 204 205 struct rte_sched_pipe *pipe; 206 struct rte_sched_queue *queue; 207 struct rte_sched_queue_extra *queue_extra; 208 struct rte_sched_pipe_profile *pipe_profiles; 209 uint8_t *bmp_array; 210 struct rte_mbuf **queue_array; 211 uint8_t memory[0] __rte_cache_aligned; 212 213 /* TC oversubscription activation */ 214 int tc_ov_enabled; 215 } __rte_cache_aligned; 216 217 struct rte_sched_port { 218 /* User parameters */ 219 uint32_t n_subports_per_port; 220 uint32_t n_pipes_per_subport; 221 uint32_t n_pipes_per_subport_log2; 222 uint16_t pipe_queue[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; 223 uint8_t pipe_tc[RTE_SCHED_QUEUES_PER_PIPE]; 224 uint8_t tc_queue[RTE_SCHED_QUEUES_PER_PIPE]; 225 uint32_t n_subport_profiles; 226 uint32_t n_max_subport_profiles; 227 uint64_t rate; 228 uint32_t mtu; 229 uint32_t frame_overhead; 230 int socket; 231 232 /* Timing */ 233 uint64_t time_cpu_cycles; /* Current CPU time measured in CPU cycles */ 234 uint64_t time_cpu_bytes; /* Current CPU time measured in bytes */ 235 uint64_t time; /* Current NIC TX time measured in bytes */ 236 struct rte_reciprocal inv_cycles_per_byte; /* CPU cycles per byte */ 237 uint64_t cycles_per_byte; 238 239 /* Grinders */ 240 struct rte_mbuf **pkts_out; 241 uint32_t n_pkts_out; 242 uint32_t subport_id; 243 244 /* Large data structures */ 245 struct rte_sched_subport_profile *subport_profiles; 246 struct rte_sched_subport *subports[0] __rte_cache_aligned; 247 } __rte_cache_aligned; 248 249 enum rte_sched_subport_array { 250 e_RTE_SCHED_SUBPORT_ARRAY_PIPE = 0, 251 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE, 252 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA, 253 e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES, 254 e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY, 255 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY, 256 e_RTE_SCHED_SUBPORT_ARRAY_TOTAL, 257 }; 258 259 static inline uint32_t 260 rte_sched_subport_pipe_queues(struct rte_sched_subport *subport) 261 { 262 return RTE_SCHED_QUEUES_PER_PIPE * subport->n_pipes_per_subport_enabled; 263 } 264 265 static inline struct rte_mbuf ** 266 rte_sched_subport_pipe_qbase(struct rte_sched_subport *subport, uint32_t qindex) 267 { 268 uint32_t pindex = qindex >> 4; 269 uint32_t qpos = qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1); 270 271 return (subport->queue_array + pindex * 272 subport->qsize_sum + subport->qsize_add[qpos]); 273 } 274 275 static inline uint16_t 276 rte_sched_subport_pipe_qsize(struct rte_sched_port *port, 277 struct rte_sched_subport *subport, uint32_t qindex) 278 { 279 uint32_t tc = port->pipe_tc[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)]; 280 281 return subport->qsize[tc]; 282 } 283 284 static inline uint32_t 285 rte_sched_port_queues_per_port(struct rte_sched_port *port) 286 { 287 uint32_t n_queues = 0, i; 288 289 for (i = 0; i < port->n_subports_per_port; i++) 290 n_queues += rte_sched_subport_pipe_queues(port->subports[i]); 291 292 return n_queues; 293 } 294 295 static inline uint16_t 296 rte_sched_port_pipe_queue(struct rte_sched_port *port, uint32_t traffic_class) 297 { 298 uint16_t pipe_queue = port->pipe_queue[traffic_class]; 299 300 return pipe_queue; 301 } 302 303 static inline uint8_t 304 rte_sched_port_pipe_tc(struct rte_sched_port *port, uint32_t qindex) 305 { 306 uint8_t pipe_tc = port->pipe_tc[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)]; 307 308 return pipe_tc; 309 } 310 311 static inline uint8_t 312 rte_sched_port_tc_queue(struct rte_sched_port *port, uint32_t qindex) 313 { 314 uint8_t tc_queue = port->tc_queue[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)]; 315 316 return tc_queue; 317 } 318 319 static int 320 pipe_profile_check(struct rte_sched_pipe_params *params, 321 uint64_t rate, uint16_t *qsize) 322 { 323 uint32_t i; 324 325 /* Pipe parameters */ 326 if (params == NULL) { 327 RTE_LOG(ERR, SCHED, 328 "%s: Incorrect value for parameter params\n", __func__); 329 return -EINVAL; 330 } 331 332 /* TB rate: non-zero, not greater than port rate */ 333 if (params->tb_rate == 0 || 334 params->tb_rate > rate) { 335 RTE_LOG(ERR, SCHED, 336 "%s: Incorrect value for tb rate\n", __func__); 337 return -EINVAL; 338 } 339 340 /* TB size: non-zero */ 341 if (params->tb_size == 0) { 342 RTE_LOG(ERR, SCHED, 343 "%s: Incorrect value for tb size\n", __func__); 344 return -EINVAL; 345 } 346 347 /* TC rate: non-zero if qsize non-zero, less than pipe rate */ 348 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) { 349 if ((qsize[i] == 0 && params->tc_rate[i] != 0) || 350 (qsize[i] != 0 && (params->tc_rate[i] == 0 || 351 params->tc_rate[i] > params->tb_rate))) { 352 RTE_LOG(ERR, SCHED, 353 "%s: Incorrect value for qsize or tc_rate\n", __func__); 354 return -EINVAL; 355 } 356 } 357 358 if (params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE] == 0 || 359 qsize[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) { 360 RTE_LOG(ERR, SCHED, 361 "%s: Incorrect value for be traffic class rate\n", __func__); 362 return -EINVAL; 363 } 364 365 /* TC period: non-zero */ 366 if (params->tc_period == 0) { 367 RTE_LOG(ERR, SCHED, 368 "%s: Incorrect value for tc period\n", __func__); 369 return -EINVAL; 370 } 371 372 /* Best effort tc oversubscription weight: non-zero */ 373 if (params->tc_ov_weight == 0) { 374 RTE_LOG(ERR, SCHED, 375 "%s: Incorrect value for tc ov weight\n", __func__); 376 return -EINVAL; 377 } 378 379 /* Queue WRR weights: non-zero */ 380 for (i = 0; i < RTE_SCHED_BE_QUEUES_PER_PIPE; i++) { 381 if (params->wrr_weights[i] == 0) { 382 RTE_LOG(ERR, SCHED, 383 "%s: Incorrect value for wrr weight\n", __func__); 384 return -EINVAL; 385 } 386 } 387 388 return 0; 389 } 390 391 static int 392 subport_profile_check(struct rte_sched_subport_profile_params *params, 393 uint64_t rate) 394 { 395 uint32_t i; 396 397 /* Check user parameters */ 398 if (params == NULL) { 399 RTE_LOG(ERR, SCHED, "%s: " 400 "Incorrect value for parameter params\n", __func__); 401 return -EINVAL; 402 } 403 404 if (params->tb_rate == 0 || params->tb_rate > rate) { 405 RTE_LOG(ERR, SCHED, "%s: " 406 "Incorrect value for tb rate\n", __func__); 407 return -EINVAL; 408 } 409 410 if (params->tb_size == 0) { 411 RTE_LOG(ERR, SCHED, "%s: " 412 "Incorrect value for tb size\n", __func__); 413 return -EINVAL; 414 } 415 416 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) { 417 uint64_t tc_rate = params->tc_rate[i]; 418 419 if (tc_rate == 0 || (tc_rate > params->tb_rate)) { 420 RTE_LOG(ERR, SCHED, "%s: " 421 "Incorrect value for tc rate\n", __func__); 422 return -EINVAL; 423 } 424 } 425 426 if (params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) { 427 RTE_LOG(ERR, SCHED, "%s: " 428 "Incorrect tc rate(best effort)\n", __func__); 429 return -EINVAL; 430 } 431 432 if (params->tc_period == 0) { 433 RTE_LOG(ERR, SCHED, "%s: " 434 "Incorrect value for tc period\n", __func__); 435 return -EINVAL; 436 } 437 438 return 0; 439 } 440 441 static int 442 rte_sched_port_check_params(struct rte_sched_port_params *params) 443 { 444 uint32_t i; 445 446 if (params == NULL) { 447 RTE_LOG(ERR, SCHED, 448 "%s: Incorrect value for parameter params\n", __func__); 449 return -EINVAL; 450 } 451 452 /* socket */ 453 if (params->socket < 0) { 454 RTE_LOG(ERR, SCHED, 455 "%s: Incorrect value for socket id\n", __func__); 456 return -EINVAL; 457 } 458 459 /* rate */ 460 if (params->rate == 0) { 461 RTE_LOG(ERR, SCHED, 462 "%s: Incorrect value for rate\n", __func__); 463 return -EINVAL; 464 } 465 466 /* mtu */ 467 if (params->mtu == 0) { 468 RTE_LOG(ERR, SCHED, 469 "%s: Incorrect value for mtu\n", __func__); 470 return -EINVAL; 471 } 472 473 /* n_subports_per_port: non-zero, limited to 16 bits, power of 2 */ 474 if (params->n_subports_per_port == 0 || 475 params->n_subports_per_port > 1u << 16 || 476 !rte_is_power_of_2(params->n_subports_per_port)) { 477 RTE_LOG(ERR, SCHED, 478 "%s: Incorrect value for number of subports\n", __func__); 479 return -EINVAL; 480 } 481 482 if (params->subport_profiles == NULL || 483 params->n_subport_profiles == 0 || 484 params->n_max_subport_profiles == 0 || 485 params->n_subport_profiles > params->n_max_subport_profiles) { 486 RTE_LOG(ERR, SCHED, 487 "%s: Incorrect value for subport profiles\n", __func__); 488 return -EINVAL; 489 } 490 491 for (i = 0; i < params->n_subport_profiles; i++) { 492 struct rte_sched_subport_profile_params *p = 493 params->subport_profiles + i; 494 int status; 495 496 status = subport_profile_check(p, params->rate); 497 if (status != 0) { 498 RTE_LOG(ERR, SCHED, 499 "%s: subport profile check failed(%d)\n", 500 __func__, status); 501 return -EINVAL; 502 } 503 } 504 505 /* n_pipes_per_subport: non-zero, power of 2 */ 506 if (params->n_pipes_per_subport == 0 || 507 !rte_is_power_of_2(params->n_pipes_per_subport)) { 508 RTE_LOG(ERR, SCHED, 509 "%s: Incorrect value for maximum pipes number\n", __func__); 510 return -EINVAL; 511 } 512 513 return 0; 514 } 515 516 static uint32_t 517 rte_sched_subport_get_array_base(struct rte_sched_subport_params *params, 518 enum rte_sched_subport_array array) 519 { 520 uint32_t n_pipes_per_subport = params->n_pipes_per_subport_enabled; 521 uint32_t n_subport_pipe_queues = 522 RTE_SCHED_QUEUES_PER_PIPE * n_pipes_per_subport; 523 524 uint32_t size_pipe = n_pipes_per_subport * sizeof(struct rte_sched_pipe); 525 uint32_t size_queue = 526 n_subport_pipe_queues * sizeof(struct rte_sched_queue); 527 uint32_t size_queue_extra 528 = n_subport_pipe_queues * sizeof(struct rte_sched_queue_extra); 529 uint32_t size_pipe_profiles = params->n_max_pipe_profiles * 530 sizeof(struct rte_sched_pipe_profile); 531 uint32_t size_bmp_array = 532 rte_bitmap_get_memory_footprint(n_subport_pipe_queues); 533 uint32_t size_per_pipe_queue_array, size_queue_array; 534 535 uint32_t base, i; 536 537 size_per_pipe_queue_array = 0; 538 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) { 539 if (i < RTE_SCHED_TRAFFIC_CLASS_BE) 540 size_per_pipe_queue_array += 541 params->qsize[i] * sizeof(struct rte_mbuf *); 542 else 543 size_per_pipe_queue_array += RTE_SCHED_MAX_QUEUES_PER_TC * 544 params->qsize[i] * sizeof(struct rte_mbuf *); 545 } 546 size_queue_array = n_pipes_per_subport * size_per_pipe_queue_array; 547 548 base = 0; 549 550 if (array == e_RTE_SCHED_SUBPORT_ARRAY_PIPE) 551 return base; 552 base += RTE_CACHE_LINE_ROUNDUP(size_pipe); 553 554 if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE) 555 return base; 556 base += RTE_CACHE_LINE_ROUNDUP(size_queue); 557 558 if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA) 559 return base; 560 base += RTE_CACHE_LINE_ROUNDUP(size_queue_extra); 561 562 if (array == e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES) 563 return base; 564 base += RTE_CACHE_LINE_ROUNDUP(size_pipe_profiles); 565 566 if (array == e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY) 567 return base; 568 base += RTE_CACHE_LINE_ROUNDUP(size_bmp_array); 569 570 if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY) 571 return base; 572 base += RTE_CACHE_LINE_ROUNDUP(size_queue_array); 573 574 return base; 575 } 576 577 static void 578 rte_sched_subport_config_qsize(struct rte_sched_subport *subport) 579 { 580 uint32_t i; 581 582 subport->qsize_add[0] = 0; 583 584 /* Strict priority traffic class */ 585 for (i = 1; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) 586 subport->qsize_add[i] = subport->qsize_add[i-1] + subport->qsize[i-1]; 587 588 /* Best-effort traffic class */ 589 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 1] = 590 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE] + 591 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE]; 592 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 2] = 593 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 1] + 594 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE]; 595 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 3] = 596 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 2] + 597 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE]; 598 599 subport->qsize_sum = subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 3] + 600 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE]; 601 } 602 603 static void 604 rte_sched_port_log_pipe_profile(struct rte_sched_subport *subport, uint32_t i) 605 { 606 struct rte_sched_pipe_profile *p = subport->pipe_profiles + i; 607 608 RTE_LOG(DEBUG, SCHED, "Low level config for pipe profile %u:\n" 609 " Token bucket: period = %"PRIu64", credits per period = %"PRIu64", size = %"PRIu64"\n" 610 " Traffic classes: period = %"PRIu64",\n" 611 " credits per period = [%"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64 612 ", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64 613 ", %"PRIu64", %"PRIu64", %"PRIu64"]\n" 614 " Best-effort traffic class oversubscription: weight = %hhu\n" 615 " WRR cost: [%hhu, %hhu, %hhu, %hhu]\n", 616 i, 617 618 /* Token bucket */ 619 p->tb_period, 620 p->tb_credits_per_period, 621 p->tb_size, 622 623 /* Traffic classes */ 624 p->tc_period, 625 p->tc_credits_per_period[0], 626 p->tc_credits_per_period[1], 627 p->tc_credits_per_period[2], 628 p->tc_credits_per_period[3], 629 p->tc_credits_per_period[4], 630 p->tc_credits_per_period[5], 631 p->tc_credits_per_period[6], 632 p->tc_credits_per_period[7], 633 p->tc_credits_per_period[8], 634 p->tc_credits_per_period[9], 635 p->tc_credits_per_period[10], 636 p->tc_credits_per_period[11], 637 p->tc_credits_per_period[12], 638 639 /* Best-effort traffic class oversubscription */ 640 p->tc_ov_weight, 641 642 /* WRR */ 643 p->wrr_cost[0], p->wrr_cost[1], p->wrr_cost[2], p->wrr_cost[3]); 644 } 645 646 static void 647 rte_sched_port_log_subport_profile(struct rte_sched_port *port, uint32_t i) 648 { 649 struct rte_sched_subport_profile *p = port->subport_profiles + i; 650 651 RTE_LOG(DEBUG, SCHED, "Low level config for subport profile %u:\n" 652 "Token bucket: period = %"PRIu64", credits per period = %"PRIu64"," 653 "size = %"PRIu64"\n" 654 "Traffic classes: period = %"PRIu64",\n" 655 "credits per period = [%"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64 656 " %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64 657 " %"PRIu64", %"PRIu64", %"PRIu64"]\n", 658 i, 659 660 /* Token bucket */ 661 p->tb_period, 662 p->tb_credits_per_period, 663 p->tb_size, 664 665 /* Traffic classes */ 666 p->tc_period, 667 p->tc_credits_per_period[0], 668 p->tc_credits_per_period[1], 669 p->tc_credits_per_period[2], 670 p->tc_credits_per_period[3], 671 p->tc_credits_per_period[4], 672 p->tc_credits_per_period[5], 673 p->tc_credits_per_period[6], 674 p->tc_credits_per_period[7], 675 p->tc_credits_per_period[8], 676 p->tc_credits_per_period[9], 677 p->tc_credits_per_period[10], 678 p->tc_credits_per_period[11], 679 p->tc_credits_per_period[12]); 680 } 681 682 static inline uint64_t 683 rte_sched_time_ms_to_bytes(uint64_t time_ms, uint64_t rate) 684 { 685 uint64_t time = time_ms; 686 687 time = (time * rate) / 1000; 688 689 return time; 690 } 691 692 static void 693 rte_sched_pipe_profile_convert(struct rte_sched_subport *subport, 694 struct rte_sched_pipe_params *src, 695 struct rte_sched_pipe_profile *dst, 696 uint64_t rate) 697 { 698 uint32_t wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE]; 699 uint32_t lcd1, lcd2, lcd; 700 uint32_t i; 701 702 /* Token Bucket */ 703 if (src->tb_rate == rate) { 704 dst->tb_credits_per_period = 1; 705 dst->tb_period = 1; 706 } else { 707 double tb_rate = (double) src->tb_rate 708 / (double) rate; 709 double d = RTE_SCHED_TB_RATE_CONFIG_ERR; 710 711 rte_approx_64(tb_rate, d, &dst->tb_credits_per_period, 712 &dst->tb_period); 713 } 714 715 dst->tb_size = src->tb_size; 716 717 /* Traffic Classes */ 718 dst->tc_period = rte_sched_time_ms_to_bytes(src->tc_period, 719 rate); 720 721 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) 722 if (subport->qsize[i]) 723 dst->tc_credits_per_period[i] 724 = rte_sched_time_ms_to_bytes(src->tc_period, 725 src->tc_rate[i]); 726 727 dst->tc_ov_weight = src->tc_ov_weight; 728 729 /* WRR queues */ 730 wrr_cost[0] = src->wrr_weights[0]; 731 wrr_cost[1] = src->wrr_weights[1]; 732 wrr_cost[2] = src->wrr_weights[2]; 733 wrr_cost[3] = src->wrr_weights[3]; 734 735 lcd1 = rte_get_lcd(wrr_cost[0], wrr_cost[1]); 736 lcd2 = rte_get_lcd(wrr_cost[2], wrr_cost[3]); 737 lcd = rte_get_lcd(lcd1, lcd2); 738 739 wrr_cost[0] = lcd / wrr_cost[0]; 740 wrr_cost[1] = lcd / wrr_cost[1]; 741 wrr_cost[2] = lcd / wrr_cost[2]; 742 wrr_cost[3] = lcd / wrr_cost[3]; 743 744 dst->wrr_cost[0] = (uint8_t) wrr_cost[0]; 745 dst->wrr_cost[1] = (uint8_t) wrr_cost[1]; 746 dst->wrr_cost[2] = (uint8_t) wrr_cost[2]; 747 dst->wrr_cost[3] = (uint8_t) wrr_cost[3]; 748 } 749 750 static void 751 rte_sched_subport_profile_convert(struct rte_sched_subport_profile_params *src, 752 struct rte_sched_subport_profile *dst, 753 uint64_t rate) 754 { 755 uint32_t i; 756 757 /* Token Bucket */ 758 if (src->tb_rate == rate) { 759 dst->tb_credits_per_period = 1; 760 dst->tb_period = 1; 761 } else { 762 double tb_rate = (double) src->tb_rate 763 / (double) rate; 764 double d = RTE_SCHED_TB_RATE_CONFIG_ERR; 765 766 rte_approx_64(tb_rate, d, &dst->tb_credits_per_period, 767 &dst->tb_period); 768 } 769 770 dst->tb_size = src->tb_size; 771 772 /* Traffic Classes */ 773 dst->tc_period = rte_sched_time_ms_to_bytes(src->tc_period, rate); 774 775 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) 776 dst->tc_credits_per_period[i] 777 = rte_sched_time_ms_to_bytes(src->tc_period, 778 src->tc_rate[i]); 779 } 780 781 static void 782 rte_sched_subport_config_pipe_profile_table(struct rte_sched_subport *subport, 783 struct rte_sched_subport_params *params, uint64_t rate) 784 { 785 uint32_t i; 786 787 for (i = 0; i < subport->n_pipe_profiles; i++) { 788 struct rte_sched_pipe_params *src = params->pipe_profiles + i; 789 struct rte_sched_pipe_profile *dst = subport->pipe_profiles + i; 790 791 rte_sched_pipe_profile_convert(subport, src, dst, rate); 792 rte_sched_port_log_pipe_profile(subport, i); 793 } 794 795 subport->pipe_tc_be_rate_max = 0; 796 for (i = 0; i < subport->n_pipe_profiles; i++) { 797 struct rte_sched_pipe_params *src = params->pipe_profiles + i; 798 uint64_t pipe_tc_be_rate = src->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE]; 799 800 if (subport->pipe_tc_be_rate_max < pipe_tc_be_rate) 801 subport->pipe_tc_be_rate_max = pipe_tc_be_rate; 802 } 803 } 804 805 static void 806 rte_sched_port_config_subport_profile_table(struct rte_sched_port *port, 807 struct rte_sched_port_params *params, 808 uint64_t rate) 809 { 810 uint32_t i; 811 812 for (i = 0; i < port->n_subport_profiles; i++) { 813 struct rte_sched_subport_profile_params *src 814 = params->subport_profiles + i; 815 struct rte_sched_subport_profile *dst 816 = port->subport_profiles + i; 817 818 rte_sched_subport_profile_convert(src, dst, rate); 819 rte_sched_port_log_subport_profile(port, i); 820 } 821 } 822 823 static int 824 rte_sched_subport_check_params(struct rte_sched_subport_params *params, 825 uint32_t n_max_pipes_per_subport, 826 uint64_t rate) 827 { 828 uint32_t i; 829 830 /* Check user parameters */ 831 if (params == NULL) { 832 RTE_LOG(ERR, SCHED, 833 "%s: Incorrect value for parameter params\n", __func__); 834 return -EINVAL; 835 } 836 837 /* qsize: if non-zero, power of 2, 838 * no bigger than 32K (due to 16-bit read/write pointers) 839 */ 840 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) { 841 uint16_t qsize = params->qsize[i]; 842 843 if (qsize != 0 && !rte_is_power_of_2(qsize)) { 844 RTE_LOG(ERR, SCHED, 845 "%s: Incorrect value for qsize\n", __func__); 846 return -EINVAL; 847 } 848 } 849 850 if (params->qsize[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) { 851 RTE_LOG(ERR, SCHED, "%s: Incorrect qsize\n", __func__); 852 return -EINVAL; 853 } 854 855 /* n_pipes_per_subport: non-zero, power of 2 */ 856 if (params->n_pipes_per_subport_enabled == 0 || 857 params->n_pipes_per_subport_enabled > n_max_pipes_per_subport || 858 !rte_is_power_of_2(params->n_pipes_per_subport_enabled)) { 859 RTE_LOG(ERR, SCHED, 860 "%s: Incorrect value for pipes number\n", __func__); 861 return -EINVAL; 862 } 863 864 /* pipe_profiles and n_pipe_profiles */ 865 if (params->pipe_profiles == NULL || 866 params->n_pipe_profiles == 0 || 867 params->n_max_pipe_profiles == 0 || 868 params->n_pipe_profiles > params->n_max_pipe_profiles) { 869 RTE_LOG(ERR, SCHED, 870 "%s: Incorrect value for pipe profiles\n", __func__); 871 return -EINVAL; 872 } 873 874 for (i = 0; i < params->n_pipe_profiles; i++) { 875 struct rte_sched_pipe_params *p = params->pipe_profiles + i; 876 int status; 877 878 status = pipe_profile_check(p, rate, ¶ms->qsize[0]); 879 if (status != 0) { 880 RTE_LOG(ERR, SCHED, 881 "%s: Pipe profile check failed(%d)\n", __func__, status); 882 return -EINVAL; 883 } 884 } 885 886 return 0; 887 } 888 889 uint32_t 890 rte_sched_port_get_memory_footprint(struct rte_sched_port_params *port_params, 891 struct rte_sched_subport_params **subport_params) 892 { 893 uint32_t size0 = 0, size1 = 0, i; 894 int status; 895 896 status = rte_sched_port_check_params(port_params); 897 if (status != 0) { 898 RTE_LOG(ERR, SCHED, 899 "%s: Port scheduler port params check failed (%d)\n", 900 __func__, status); 901 902 return 0; 903 } 904 905 for (i = 0; i < port_params->n_subports_per_port; i++) { 906 struct rte_sched_subport_params *sp = subport_params[i]; 907 908 status = rte_sched_subport_check_params(sp, 909 port_params->n_pipes_per_subport, 910 port_params->rate); 911 if (status != 0) { 912 RTE_LOG(ERR, SCHED, 913 "%s: Port scheduler subport params check failed (%d)\n", 914 __func__, status); 915 916 return 0; 917 } 918 } 919 920 size0 = sizeof(struct rte_sched_port); 921 922 for (i = 0; i < port_params->n_subports_per_port; i++) { 923 struct rte_sched_subport_params *sp = subport_params[i]; 924 925 size1 += rte_sched_subport_get_array_base(sp, 926 e_RTE_SCHED_SUBPORT_ARRAY_TOTAL); 927 } 928 929 return size0 + size1; 930 } 931 932 struct rte_sched_port * 933 rte_sched_port_config(struct rte_sched_port_params *params) 934 { 935 struct rte_sched_port *port = NULL; 936 uint32_t size0, size1, size2; 937 uint32_t cycles_per_byte; 938 uint32_t i, j; 939 int status; 940 941 status = rte_sched_port_check_params(params); 942 if (status != 0) { 943 RTE_LOG(ERR, SCHED, 944 "%s: Port scheduler params check failed (%d)\n", 945 __func__, status); 946 return NULL; 947 } 948 949 size0 = sizeof(struct rte_sched_port); 950 size1 = params->n_subports_per_port * sizeof(struct rte_sched_subport *); 951 size2 = params->n_max_subport_profiles * 952 sizeof(struct rte_sched_subport_profile); 953 954 /* Allocate memory to store the data structures */ 955 port = rte_zmalloc_socket("qos_params", size0 + size1, 956 RTE_CACHE_LINE_SIZE, params->socket); 957 if (port == NULL) { 958 RTE_LOG(ERR, SCHED, "%s: Memory allocation fails\n", __func__); 959 960 return NULL; 961 } 962 963 /* Allocate memory to store the subport profile */ 964 port->subport_profiles = rte_zmalloc_socket("subport_profile", size2, 965 RTE_CACHE_LINE_SIZE, params->socket); 966 if (port->subport_profiles == NULL) { 967 RTE_LOG(ERR, SCHED, "%s: Memory allocation fails\n", __func__); 968 rte_free(port); 969 return NULL; 970 } 971 972 /* User parameters */ 973 port->n_subports_per_port = params->n_subports_per_port; 974 port->n_subport_profiles = params->n_subport_profiles; 975 port->n_max_subport_profiles = params->n_max_subport_profiles; 976 port->n_pipes_per_subport = params->n_pipes_per_subport; 977 port->n_pipes_per_subport_log2 = 978 __builtin_ctz(params->n_pipes_per_subport); 979 port->socket = params->socket; 980 981 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) 982 port->pipe_queue[i] = i; 983 984 for (i = 0, j = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) { 985 port->pipe_tc[i] = j; 986 987 if (j < RTE_SCHED_TRAFFIC_CLASS_BE) 988 j++; 989 } 990 991 for (i = 0, j = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) { 992 port->tc_queue[i] = j; 993 994 if (i >= RTE_SCHED_TRAFFIC_CLASS_BE) 995 j++; 996 } 997 port->rate = params->rate; 998 port->mtu = params->mtu + params->frame_overhead; 999 port->frame_overhead = params->frame_overhead; 1000 1001 /* Timing */ 1002 port->time_cpu_cycles = rte_get_tsc_cycles(); 1003 port->time_cpu_bytes = 0; 1004 port->time = 0; 1005 1006 /* Subport profile table */ 1007 rte_sched_port_config_subport_profile_table(port, params, port->rate); 1008 1009 cycles_per_byte = (rte_get_tsc_hz() << RTE_SCHED_TIME_SHIFT) 1010 / params->rate; 1011 port->inv_cycles_per_byte = rte_reciprocal_value(cycles_per_byte); 1012 port->cycles_per_byte = cycles_per_byte; 1013 1014 /* Grinders */ 1015 port->pkts_out = NULL; 1016 port->n_pkts_out = 0; 1017 port->subport_id = 0; 1018 1019 return port; 1020 } 1021 1022 static inline void 1023 rte_sched_subport_free(struct rte_sched_port *port, 1024 struct rte_sched_subport *subport) 1025 { 1026 uint32_t n_subport_pipe_queues; 1027 uint32_t qindex; 1028 1029 if (subport == NULL) 1030 return; 1031 1032 n_subport_pipe_queues = rte_sched_subport_pipe_queues(subport); 1033 1034 /* Free enqueued mbufs */ 1035 for (qindex = 0; qindex < n_subport_pipe_queues; qindex++) { 1036 struct rte_mbuf **mbufs = 1037 rte_sched_subport_pipe_qbase(subport, qindex); 1038 uint16_t qsize = rte_sched_subport_pipe_qsize(port, subport, qindex); 1039 if (qsize != 0) { 1040 struct rte_sched_queue *queue = subport->queue + qindex; 1041 uint16_t qr = queue->qr & (qsize - 1); 1042 uint16_t qw = queue->qw & (qsize - 1); 1043 1044 for (; qr != qw; qr = (qr + 1) & (qsize - 1)) 1045 rte_pktmbuf_free(mbufs[qr]); 1046 } 1047 } 1048 1049 rte_free(subport); 1050 } 1051 1052 void 1053 rte_sched_port_free(struct rte_sched_port *port) 1054 { 1055 uint32_t i; 1056 1057 /* Check user parameters */ 1058 if (port == NULL) 1059 return; 1060 1061 for (i = 0; i < port->n_subports_per_port; i++) 1062 rte_sched_subport_free(port, port->subports[i]); 1063 1064 rte_free(port->subport_profiles); 1065 rte_free(port); 1066 } 1067 1068 static void 1069 rte_sched_free_memory(struct rte_sched_port *port, uint32_t n_subports) 1070 { 1071 uint32_t i; 1072 1073 for (i = 0; i < n_subports; i++) { 1074 struct rte_sched_subport *subport = port->subports[i]; 1075 1076 rte_sched_subport_free(port, subport); 1077 } 1078 1079 rte_free(port->subport_profiles); 1080 rte_free(port); 1081 } 1082 1083 static int 1084 rte_sched_red_config(struct rte_sched_port *port, 1085 struct rte_sched_subport *s, 1086 struct rte_sched_subport_params *params, 1087 uint32_t n_subports) 1088 { 1089 uint32_t i; 1090 1091 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) { 1092 1093 uint32_t j; 1094 1095 for (j = 0; j < RTE_COLORS; j++) { 1096 /* if min/max are both zero, then RED is disabled */ 1097 if ((params->cman_params->red_params[i][j].min_th | 1098 params->cman_params->red_params[i][j].max_th) == 0) { 1099 continue; 1100 } 1101 1102 if (rte_red_config_init(&s->red_config[i][j], 1103 params->cman_params->red_params[i][j].wq_log2, 1104 params->cman_params->red_params[i][j].min_th, 1105 params->cman_params->red_params[i][j].max_th, 1106 params->cman_params->red_params[i][j].maxp_inv) != 0) { 1107 rte_sched_free_memory(port, n_subports); 1108 1109 RTE_LOG(NOTICE, SCHED, 1110 "%s: RED configuration init fails\n", __func__); 1111 return -EINVAL; 1112 } 1113 } 1114 } 1115 s->cman = RTE_SCHED_CMAN_RED; 1116 return 0; 1117 } 1118 1119 static int 1120 rte_sched_pie_config(struct rte_sched_port *port, 1121 struct rte_sched_subport *s, 1122 struct rte_sched_subport_params *params, 1123 uint32_t n_subports) 1124 { 1125 uint32_t i; 1126 1127 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) { 1128 if (params->cman_params->pie_params[i].tailq_th > params->qsize[i]) { 1129 RTE_LOG(NOTICE, SCHED, 1130 "%s: PIE tailq threshold incorrect\n", __func__); 1131 return -EINVAL; 1132 } 1133 1134 if (rte_pie_config_init(&s->pie_config[i], 1135 params->cman_params->pie_params[i].qdelay_ref, 1136 params->cman_params->pie_params[i].dp_update_interval, 1137 params->cman_params->pie_params[i].max_burst, 1138 params->cman_params->pie_params[i].tailq_th) != 0) { 1139 rte_sched_free_memory(port, n_subports); 1140 1141 RTE_LOG(NOTICE, SCHED, 1142 "%s: PIE configuration init fails\n", __func__); 1143 return -EINVAL; 1144 } 1145 } 1146 s->cman = RTE_SCHED_CMAN_PIE; 1147 return 0; 1148 } 1149 1150 static int 1151 rte_sched_cman_config(struct rte_sched_port *port, 1152 struct rte_sched_subport *s, 1153 struct rte_sched_subport_params *params, 1154 uint32_t n_subports) 1155 { 1156 if (params->cman_params->cman_mode == RTE_SCHED_CMAN_RED) 1157 return rte_sched_red_config(port, s, params, n_subports); 1158 1159 else if (params->cman_params->cman_mode == RTE_SCHED_CMAN_PIE) 1160 return rte_sched_pie_config(port, s, params, n_subports); 1161 1162 return -EINVAL; 1163 } 1164 1165 int 1166 rte_sched_subport_tc_ov_config(struct rte_sched_port *port, 1167 uint32_t subport_id, 1168 bool tc_ov_enable) 1169 { 1170 struct rte_sched_subport *s; 1171 1172 if (port == NULL) { 1173 RTE_LOG(ERR, SCHED, 1174 "%s: Incorrect value for parameter port\n", __func__); 1175 return -EINVAL; 1176 } 1177 1178 if (subport_id >= port->n_subports_per_port) { 1179 RTE_LOG(ERR, SCHED, 1180 "%s: Incorrect value for parameter subport id\n", __func__); 1181 return -EINVAL; 1182 } 1183 1184 s = port->subports[subport_id]; 1185 s->tc_ov_enabled = tc_ov_enable ? 1 : 0; 1186 1187 return 0; 1188 } 1189 1190 int 1191 rte_sched_subport_config(struct rte_sched_port *port, 1192 uint32_t subport_id, 1193 struct rte_sched_subport_params *params, 1194 uint32_t subport_profile_id) 1195 { 1196 struct rte_sched_subport *s = NULL; 1197 uint32_t n_subports = subport_id; 1198 struct rte_sched_subport_profile *profile; 1199 uint32_t n_subport_pipe_queues, i; 1200 uint32_t size0, size1, bmp_mem_size; 1201 int status; 1202 int ret; 1203 1204 /* Check user parameters */ 1205 if (port == NULL) { 1206 RTE_LOG(ERR, SCHED, 1207 "%s: Incorrect value for parameter port\n", __func__); 1208 return 0; 1209 } 1210 1211 if (subport_id >= port->n_subports_per_port) { 1212 RTE_LOG(ERR, SCHED, 1213 "%s: Incorrect value for subport id\n", __func__); 1214 ret = -EINVAL; 1215 goto out; 1216 } 1217 1218 if (subport_profile_id >= port->n_max_subport_profiles) { 1219 RTE_LOG(ERR, SCHED, "%s: " 1220 "Number of subport profile exceeds the max limit\n", 1221 __func__); 1222 ret = -EINVAL; 1223 goto out; 1224 } 1225 1226 /** Memory is allocated only on first invocation of the api for a 1227 * given subport. Subsequent invocation on same subport will just 1228 * update subport bandwidth parameter. 1229 **/ 1230 if (port->subports[subport_id] == NULL) { 1231 1232 status = rte_sched_subport_check_params(params, 1233 port->n_pipes_per_subport, 1234 port->rate); 1235 if (status != 0) { 1236 RTE_LOG(NOTICE, SCHED, 1237 "%s: Port scheduler params check failed (%d)\n", 1238 __func__, status); 1239 ret = -EINVAL; 1240 goto out; 1241 } 1242 1243 /* Determine the amount of memory to allocate */ 1244 size0 = sizeof(struct rte_sched_subport); 1245 size1 = rte_sched_subport_get_array_base(params, 1246 e_RTE_SCHED_SUBPORT_ARRAY_TOTAL); 1247 1248 /* Allocate memory to store the data structures */ 1249 s = rte_zmalloc_socket("subport_params", size0 + size1, 1250 RTE_CACHE_LINE_SIZE, port->socket); 1251 if (s == NULL) { 1252 RTE_LOG(ERR, SCHED, 1253 "%s: Memory allocation fails\n", __func__); 1254 ret = -ENOMEM; 1255 goto out; 1256 } 1257 1258 n_subports++; 1259 1260 /* Port */ 1261 port->subports[subport_id] = s; 1262 1263 s->tb_time = port->time; 1264 1265 /* compile time checks */ 1266 RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS == 0); 1267 RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS & 1268 (RTE_SCHED_PORT_N_GRINDERS - 1)); 1269 1270 /* User parameters */ 1271 s->n_pipes_per_subport_enabled = 1272 params->n_pipes_per_subport_enabled; 1273 memcpy(s->qsize, params->qsize, sizeof(params->qsize)); 1274 s->n_pipe_profiles = params->n_pipe_profiles; 1275 s->n_max_pipe_profiles = params->n_max_pipe_profiles; 1276 1277 /* TC oversubscription is enabled by default */ 1278 s->tc_ov_enabled = 1; 1279 1280 if (params->cman_params != NULL) { 1281 s->cman_enabled = true; 1282 status = rte_sched_cman_config(port, s, params, n_subports); 1283 if (status) { 1284 RTE_LOG(NOTICE, SCHED, 1285 "%s: CMAN configuration fails\n", __func__); 1286 return status; 1287 } 1288 } else { 1289 s->cman_enabled = false; 1290 } 1291 1292 /* Scheduling loop detection */ 1293 s->pipe_loop = RTE_SCHED_PIPE_INVALID; 1294 s->pipe_exhaustion = 0; 1295 1296 /* Grinders */ 1297 s->busy_grinders = 0; 1298 1299 /* Queue base calculation */ 1300 rte_sched_subport_config_qsize(s); 1301 1302 /* Large data structures */ 1303 s->pipe = (struct rte_sched_pipe *) 1304 (s->memory + rte_sched_subport_get_array_base(params, 1305 e_RTE_SCHED_SUBPORT_ARRAY_PIPE)); 1306 s->queue = (struct rte_sched_queue *) 1307 (s->memory + rte_sched_subport_get_array_base(params, 1308 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE)); 1309 s->queue_extra = (struct rte_sched_queue_extra *) 1310 (s->memory + rte_sched_subport_get_array_base(params, 1311 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA)); 1312 s->pipe_profiles = (struct rte_sched_pipe_profile *) 1313 (s->memory + rte_sched_subport_get_array_base(params, 1314 e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES)); 1315 s->bmp_array = s->memory + rte_sched_subport_get_array_base( 1316 params, e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY); 1317 s->queue_array = (struct rte_mbuf **) 1318 (s->memory + rte_sched_subport_get_array_base(params, 1319 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY)); 1320 1321 /* Pipe profile table */ 1322 rte_sched_subport_config_pipe_profile_table(s, params, 1323 port->rate); 1324 1325 /* Bitmap */ 1326 n_subport_pipe_queues = rte_sched_subport_pipe_queues(s); 1327 bmp_mem_size = rte_bitmap_get_memory_footprint( 1328 n_subport_pipe_queues); 1329 s->bmp = rte_bitmap_init(n_subport_pipe_queues, s->bmp_array, 1330 bmp_mem_size); 1331 if (s->bmp == NULL) { 1332 RTE_LOG(ERR, SCHED, 1333 "%s: Subport bitmap init error\n", __func__); 1334 ret = -EINVAL; 1335 goto out; 1336 } 1337 1338 for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++) 1339 s->grinder_base_bmp_pos[i] = RTE_SCHED_PIPE_INVALID; 1340 1341 /* TC oversubscription */ 1342 s->tc_ov_wm_min = port->mtu; 1343 s->tc_ov_period_id = 0; 1344 s->tc_ov = 0; 1345 s->tc_ov_n = 0; 1346 s->tc_ov_rate = 0; 1347 } 1348 1349 { 1350 /* update subport parameters from subport profile table*/ 1351 profile = port->subport_profiles + subport_profile_id; 1352 1353 s = port->subports[subport_id]; 1354 1355 s->tb_credits = profile->tb_size / 2; 1356 1357 s->tc_time = port->time + profile->tc_period; 1358 1359 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) 1360 if (s->qsize[i]) 1361 s->tc_credits[i] = 1362 profile->tc_credits_per_period[i]; 1363 else 1364 profile->tc_credits_per_period[i] = 0; 1365 1366 s->tc_ov_wm_max = rte_sched_time_ms_to_bytes(profile->tc_period, 1367 s->pipe_tc_be_rate_max); 1368 s->tc_ov_wm = s->tc_ov_wm_max; 1369 s->profile = subport_profile_id; 1370 1371 } 1372 1373 rte_sched_port_log_subport_profile(port, subport_profile_id); 1374 1375 return 0; 1376 1377 out: 1378 rte_sched_free_memory(port, n_subports); 1379 1380 return ret; 1381 } 1382 1383 int 1384 rte_sched_pipe_config(struct rte_sched_port *port, 1385 uint32_t subport_id, 1386 uint32_t pipe_id, 1387 int32_t pipe_profile) 1388 { 1389 struct rte_sched_subport *s; 1390 struct rte_sched_subport_profile *sp; 1391 struct rte_sched_pipe *p; 1392 struct rte_sched_pipe_profile *params; 1393 uint32_t n_subports = subport_id + 1; 1394 uint32_t deactivate, profile, i; 1395 int ret; 1396 1397 /* Check user parameters */ 1398 profile = (uint32_t) pipe_profile; 1399 deactivate = (pipe_profile < 0); 1400 1401 if (port == NULL) { 1402 RTE_LOG(ERR, SCHED, 1403 "%s: Incorrect value for parameter port\n", __func__); 1404 return -EINVAL; 1405 } 1406 1407 if (subport_id >= port->n_subports_per_port) { 1408 RTE_LOG(ERR, SCHED, 1409 "%s: Incorrect value for parameter subport id\n", __func__); 1410 ret = -EINVAL; 1411 goto out; 1412 } 1413 1414 s = port->subports[subport_id]; 1415 if (pipe_id >= s->n_pipes_per_subport_enabled) { 1416 RTE_LOG(ERR, SCHED, 1417 "%s: Incorrect value for parameter pipe id\n", __func__); 1418 ret = -EINVAL; 1419 goto out; 1420 } 1421 1422 if (!deactivate && profile >= s->n_pipe_profiles) { 1423 RTE_LOG(ERR, SCHED, 1424 "%s: Incorrect value for parameter pipe profile\n", __func__); 1425 ret = -EINVAL; 1426 goto out; 1427 } 1428 1429 sp = port->subport_profiles + s->profile; 1430 /* Handle the case when pipe already has a valid configuration */ 1431 p = s->pipe + pipe_id; 1432 if (p->tb_time) { 1433 params = s->pipe_profiles + p->profile; 1434 1435 double subport_tc_be_rate = 1436 (double)sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] 1437 / (double) sp->tc_period; 1438 double pipe_tc_be_rate = 1439 (double) params->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] 1440 / (double) params->tc_period; 1441 uint32_t tc_be_ov = s->tc_ov; 1442 1443 /* Unplug pipe from its subport */ 1444 s->tc_ov_n -= params->tc_ov_weight; 1445 s->tc_ov_rate -= pipe_tc_be_rate; 1446 s->tc_ov = s->tc_ov_rate > subport_tc_be_rate; 1447 1448 if (s->tc_ov != tc_be_ov) { 1449 RTE_LOG(DEBUG, SCHED, 1450 "Subport %u Best-effort TC oversubscription is OFF (%.4lf >= %.4lf)\n", 1451 subport_id, subport_tc_be_rate, s->tc_ov_rate); 1452 } 1453 1454 /* Reset the pipe */ 1455 memset(p, 0, sizeof(struct rte_sched_pipe)); 1456 } 1457 1458 if (deactivate) 1459 return 0; 1460 1461 /* Apply the new pipe configuration */ 1462 p->profile = profile; 1463 params = s->pipe_profiles + p->profile; 1464 1465 /* Token Bucket (TB) */ 1466 p->tb_time = port->time; 1467 p->tb_credits = params->tb_size / 2; 1468 1469 /* Traffic Classes (TCs) */ 1470 p->tc_time = port->time + params->tc_period; 1471 1472 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) 1473 if (s->qsize[i]) 1474 p->tc_credits[i] = params->tc_credits_per_period[i]; 1475 1476 { 1477 /* Subport best effort tc oversubscription */ 1478 double subport_tc_be_rate = 1479 (double)sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] 1480 / (double) sp->tc_period; 1481 double pipe_tc_be_rate = 1482 (double) params->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] 1483 / (double) params->tc_period; 1484 uint32_t tc_be_ov = s->tc_ov; 1485 1486 s->tc_ov_n += params->tc_ov_weight; 1487 s->tc_ov_rate += pipe_tc_be_rate; 1488 s->tc_ov = s->tc_ov_rate > subport_tc_be_rate; 1489 1490 if (s->tc_ov != tc_be_ov) { 1491 RTE_LOG(DEBUG, SCHED, 1492 "Subport %u Best effort TC oversubscription is ON (%.4lf < %.4lf)\n", 1493 subport_id, subport_tc_be_rate, s->tc_ov_rate); 1494 } 1495 p->tc_ov_period_id = s->tc_ov_period_id; 1496 p->tc_ov_credits = s->tc_ov_wm; 1497 } 1498 1499 return 0; 1500 1501 out: 1502 rte_sched_free_memory(port, n_subports); 1503 1504 return ret; 1505 } 1506 1507 int 1508 rte_sched_subport_pipe_profile_add(struct rte_sched_port *port, 1509 uint32_t subport_id, 1510 struct rte_sched_pipe_params *params, 1511 uint32_t *pipe_profile_id) 1512 { 1513 struct rte_sched_subport *s; 1514 struct rte_sched_pipe_profile *pp; 1515 uint32_t i; 1516 int status; 1517 1518 /* Port */ 1519 if (port == NULL) { 1520 RTE_LOG(ERR, SCHED, 1521 "%s: Incorrect value for parameter port\n", __func__); 1522 return -EINVAL; 1523 } 1524 1525 /* Subport id not exceeds the max limit */ 1526 if (subport_id > port->n_subports_per_port) { 1527 RTE_LOG(ERR, SCHED, 1528 "%s: Incorrect value for subport id\n", __func__); 1529 return -EINVAL; 1530 } 1531 1532 s = port->subports[subport_id]; 1533 1534 /* Pipe profiles exceeds the max limit */ 1535 if (s->n_pipe_profiles >= s->n_max_pipe_profiles) { 1536 RTE_LOG(ERR, SCHED, 1537 "%s: Number of pipe profiles exceeds the max limit\n", __func__); 1538 return -EINVAL; 1539 } 1540 1541 /* Pipe params */ 1542 status = pipe_profile_check(params, port->rate, &s->qsize[0]); 1543 if (status != 0) { 1544 RTE_LOG(ERR, SCHED, 1545 "%s: Pipe profile check failed(%d)\n", __func__, status); 1546 return -EINVAL; 1547 } 1548 1549 pp = &s->pipe_profiles[s->n_pipe_profiles]; 1550 rte_sched_pipe_profile_convert(s, params, pp, port->rate); 1551 1552 /* Pipe profile should not exists */ 1553 for (i = 0; i < s->n_pipe_profiles; i++) 1554 if (memcmp(s->pipe_profiles + i, pp, sizeof(*pp)) == 0) { 1555 RTE_LOG(ERR, SCHED, 1556 "%s: Pipe profile exists\n", __func__); 1557 return -EINVAL; 1558 } 1559 1560 /* Pipe profile commit */ 1561 *pipe_profile_id = s->n_pipe_profiles; 1562 s->n_pipe_profiles++; 1563 1564 if (s->pipe_tc_be_rate_max < params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE]) 1565 s->pipe_tc_be_rate_max = params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE]; 1566 1567 rte_sched_port_log_pipe_profile(s, *pipe_profile_id); 1568 1569 return 0; 1570 } 1571 1572 int 1573 rte_sched_port_subport_profile_add(struct rte_sched_port *port, 1574 struct rte_sched_subport_profile_params *params, 1575 uint32_t *subport_profile_id) 1576 { 1577 int status; 1578 uint32_t i; 1579 struct rte_sched_subport_profile *dst; 1580 1581 /* Port */ 1582 if (port == NULL) { 1583 RTE_LOG(ERR, SCHED, "%s: " 1584 "Incorrect value for parameter port\n", __func__); 1585 return -EINVAL; 1586 } 1587 1588 if (params == NULL) { 1589 RTE_LOG(ERR, SCHED, "%s: " 1590 "Incorrect value for parameter profile\n", __func__); 1591 return -EINVAL; 1592 } 1593 1594 if (subport_profile_id == NULL) { 1595 RTE_LOG(ERR, SCHED, "%s: " 1596 "Incorrect value for parameter subport_profile_id\n", 1597 __func__); 1598 return -EINVAL; 1599 } 1600 1601 dst = port->subport_profiles + port->n_subport_profiles; 1602 1603 /* Subport profiles exceeds the max limit */ 1604 if (port->n_subport_profiles >= port->n_max_subport_profiles) { 1605 RTE_LOG(ERR, SCHED, "%s: " 1606 "Number of subport profiles exceeds the max limit\n", 1607 __func__); 1608 return -EINVAL; 1609 } 1610 1611 status = subport_profile_check(params, port->rate); 1612 if (status != 0) { 1613 RTE_LOG(ERR, SCHED, 1614 "%s: subport profile check failed(%d)\n", __func__, status); 1615 return -EINVAL; 1616 } 1617 1618 rte_sched_subport_profile_convert(params, dst, port->rate); 1619 1620 /* Subport profile should not exists */ 1621 for (i = 0; i < port->n_subport_profiles; i++) 1622 if (memcmp(port->subport_profiles + i, 1623 dst, sizeof(*dst)) == 0) { 1624 RTE_LOG(ERR, SCHED, 1625 "%s: subport profile exists\n", __func__); 1626 return -EINVAL; 1627 } 1628 1629 /* Subport profile commit */ 1630 *subport_profile_id = port->n_subport_profiles; 1631 port->n_subport_profiles++; 1632 1633 rte_sched_port_log_subport_profile(port, *subport_profile_id); 1634 1635 return 0; 1636 } 1637 1638 static inline uint32_t 1639 rte_sched_port_qindex(struct rte_sched_port *port, 1640 uint32_t subport, 1641 uint32_t pipe, 1642 uint32_t traffic_class, 1643 uint32_t queue) 1644 { 1645 return ((subport & (port->n_subports_per_port - 1)) << 1646 (port->n_pipes_per_subport_log2 + 4)) | 1647 ((pipe & 1648 (port->subports[subport]->n_pipes_per_subport_enabled - 1)) << 4) | 1649 ((rte_sched_port_pipe_queue(port, traffic_class) + queue) & 1650 (RTE_SCHED_QUEUES_PER_PIPE - 1)); 1651 } 1652 1653 void 1654 rte_sched_port_pkt_write(struct rte_sched_port *port, 1655 struct rte_mbuf *pkt, 1656 uint32_t subport, uint32_t pipe, 1657 uint32_t traffic_class, 1658 uint32_t queue, enum rte_color color) 1659 { 1660 uint32_t queue_id = 1661 rte_sched_port_qindex(port, subport, pipe, traffic_class, queue); 1662 1663 rte_mbuf_sched_set(pkt, queue_id, traffic_class, (uint8_t)color); 1664 } 1665 1666 void 1667 rte_sched_port_pkt_read_tree_path(struct rte_sched_port *port, 1668 const struct rte_mbuf *pkt, 1669 uint32_t *subport, uint32_t *pipe, 1670 uint32_t *traffic_class, uint32_t *queue) 1671 { 1672 uint32_t queue_id = rte_mbuf_sched_queue_get(pkt); 1673 1674 *subport = queue_id >> (port->n_pipes_per_subport_log2 + 4); 1675 *pipe = (queue_id >> 4) & 1676 (port->subports[*subport]->n_pipes_per_subport_enabled - 1); 1677 *traffic_class = rte_sched_port_pipe_tc(port, queue_id); 1678 *queue = rte_sched_port_tc_queue(port, queue_id); 1679 } 1680 1681 enum rte_color 1682 rte_sched_port_pkt_read_color(const struct rte_mbuf *pkt) 1683 { 1684 return (enum rte_color)rte_mbuf_sched_color_get(pkt); 1685 } 1686 1687 int 1688 rte_sched_subport_read_stats(struct rte_sched_port *port, 1689 uint32_t subport_id, 1690 struct rte_sched_subport_stats *stats, 1691 uint32_t *tc_ov) 1692 { 1693 struct rte_sched_subport *s; 1694 1695 /* Check user parameters */ 1696 if (port == NULL) { 1697 RTE_LOG(ERR, SCHED, 1698 "%s: Incorrect value for parameter port\n", __func__); 1699 return -EINVAL; 1700 } 1701 1702 if (subport_id >= port->n_subports_per_port) { 1703 RTE_LOG(ERR, SCHED, 1704 "%s: Incorrect value for subport id\n", __func__); 1705 return -EINVAL; 1706 } 1707 1708 if (stats == NULL) { 1709 RTE_LOG(ERR, SCHED, 1710 "%s: Incorrect value for parameter stats\n", __func__); 1711 return -EINVAL; 1712 } 1713 1714 if (tc_ov == NULL) { 1715 RTE_LOG(ERR, SCHED, 1716 "%s: Incorrect value for tc_ov\n", __func__); 1717 return -EINVAL; 1718 } 1719 1720 s = port->subports[subport_id]; 1721 1722 /* Copy subport stats and clear */ 1723 memcpy(stats, &s->stats, sizeof(struct rte_sched_subport_stats)); 1724 memset(&s->stats, 0, sizeof(struct rte_sched_subport_stats)); 1725 1726 /* Subport TC oversubscription status */ 1727 *tc_ov = s->tc_ov; 1728 1729 return 0; 1730 } 1731 1732 int 1733 rte_sched_queue_read_stats(struct rte_sched_port *port, 1734 uint32_t queue_id, 1735 struct rte_sched_queue_stats *stats, 1736 uint16_t *qlen) 1737 { 1738 struct rte_sched_subport *s; 1739 struct rte_sched_queue *q; 1740 struct rte_sched_queue_extra *qe; 1741 uint32_t subport_id, subport_qmask, subport_qindex; 1742 1743 /* Check user parameters */ 1744 if (port == NULL) { 1745 RTE_LOG(ERR, SCHED, 1746 "%s: Incorrect value for parameter port\n", __func__); 1747 return -EINVAL; 1748 } 1749 1750 if (queue_id >= rte_sched_port_queues_per_port(port)) { 1751 RTE_LOG(ERR, SCHED, 1752 "%s: Incorrect value for queue id\n", __func__); 1753 return -EINVAL; 1754 } 1755 1756 if (stats == NULL) { 1757 RTE_LOG(ERR, SCHED, 1758 "%s: Incorrect value for parameter stats\n", __func__); 1759 return -EINVAL; 1760 } 1761 1762 if (qlen == NULL) { 1763 RTE_LOG(ERR, SCHED, 1764 "%s: Incorrect value for parameter qlen\n", __func__); 1765 return -EINVAL; 1766 } 1767 subport_qmask = port->n_pipes_per_subport_log2 + 4; 1768 subport_id = (queue_id >> subport_qmask) & (port->n_subports_per_port - 1); 1769 1770 s = port->subports[subport_id]; 1771 subport_qindex = ((1 << subport_qmask) - 1) & queue_id; 1772 q = s->queue + subport_qindex; 1773 qe = s->queue_extra + subport_qindex; 1774 1775 /* Copy queue stats and clear */ 1776 memcpy(stats, &qe->stats, sizeof(struct rte_sched_queue_stats)); 1777 memset(&qe->stats, 0, sizeof(struct rte_sched_queue_stats)); 1778 1779 /* Queue length */ 1780 *qlen = q->qw - q->qr; 1781 1782 return 0; 1783 } 1784 1785 #ifdef RTE_SCHED_DEBUG 1786 1787 static inline int 1788 rte_sched_port_queue_is_empty(struct rte_sched_subport *subport, 1789 uint32_t qindex) 1790 { 1791 struct rte_sched_queue *queue = subport->queue + qindex; 1792 1793 return queue->qr == queue->qw; 1794 } 1795 1796 #endif /* RTE_SCHED_DEBUG */ 1797 1798 static inline void 1799 rte_sched_port_update_subport_stats(struct rte_sched_port *port, 1800 struct rte_sched_subport *subport, 1801 uint32_t qindex, 1802 struct rte_mbuf *pkt) 1803 { 1804 uint32_t tc_index = rte_sched_port_pipe_tc(port, qindex); 1805 uint32_t pkt_len = pkt->pkt_len; 1806 1807 subport->stats.n_pkts_tc[tc_index] += 1; 1808 subport->stats.n_bytes_tc[tc_index] += pkt_len; 1809 } 1810 1811 static inline void 1812 rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port, 1813 struct rte_sched_subport *subport, 1814 uint32_t qindex, 1815 struct rte_mbuf *pkt, 1816 uint32_t n_pkts_cman_dropped) 1817 { 1818 uint32_t tc_index = rte_sched_port_pipe_tc(port, qindex); 1819 uint32_t pkt_len = pkt->pkt_len; 1820 1821 subport->stats.n_pkts_tc_dropped[tc_index] += 1; 1822 subport->stats.n_bytes_tc_dropped[tc_index] += pkt_len; 1823 subport->stats.n_pkts_cman_dropped[tc_index] += n_pkts_cman_dropped; 1824 } 1825 1826 static inline void 1827 rte_sched_port_update_queue_stats(struct rte_sched_subport *subport, 1828 uint32_t qindex, 1829 struct rte_mbuf *pkt) 1830 { 1831 struct rte_sched_queue_extra *qe = subport->queue_extra + qindex; 1832 uint32_t pkt_len = pkt->pkt_len; 1833 1834 qe->stats.n_pkts += 1; 1835 qe->stats.n_bytes += pkt_len; 1836 } 1837 1838 static inline void 1839 rte_sched_port_update_queue_stats_on_drop(struct rte_sched_subport *subport, 1840 uint32_t qindex, 1841 struct rte_mbuf *pkt, 1842 uint32_t n_pkts_cman_dropped) 1843 { 1844 struct rte_sched_queue_extra *qe = subport->queue_extra + qindex; 1845 uint32_t pkt_len = pkt->pkt_len; 1846 1847 qe->stats.n_pkts_dropped += 1; 1848 qe->stats.n_bytes_dropped += pkt_len; 1849 if (subport->cman_enabled) 1850 qe->stats.n_pkts_cman_dropped += n_pkts_cman_dropped; 1851 } 1852 1853 static inline int 1854 rte_sched_port_cman_drop(struct rte_sched_port *port, 1855 struct rte_sched_subport *subport, 1856 struct rte_mbuf *pkt, 1857 uint32_t qindex, 1858 uint16_t qlen) 1859 { 1860 if (!subport->cman_enabled) 1861 return 0; 1862 1863 struct rte_sched_queue_extra *qe; 1864 uint32_t tc_index; 1865 1866 tc_index = rte_sched_port_pipe_tc(port, qindex); 1867 qe = subport->queue_extra + qindex; 1868 1869 /* RED */ 1870 if (subport->cman == RTE_SCHED_CMAN_RED) { 1871 struct rte_red_config *red_cfg; 1872 struct rte_red *red; 1873 enum rte_color color; 1874 1875 color = rte_sched_port_pkt_read_color(pkt); 1876 red_cfg = &subport->red_config[tc_index][color]; 1877 1878 if ((red_cfg->min_th | red_cfg->max_th) == 0) 1879 return 0; 1880 1881 red = &qe->red; 1882 1883 return rte_red_enqueue(red_cfg, red, qlen, port->time); 1884 } 1885 1886 /* PIE */ 1887 struct rte_pie_config *pie_cfg = &subport->pie_config[tc_index]; 1888 struct rte_pie *pie = &qe->pie; 1889 1890 return rte_pie_enqueue(pie_cfg, pie, qlen, pkt->pkt_len, port->time_cpu_cycles); 1891 } 1892 1893 static inline void 1894 rte_sched_port_red_set_queue_empty_timestamp(struct rte_sched_port *port, 1895 struct rte_sched_subport *subport, uint32_t qindex) 1896 { 1897 if (subport->cman_enabled && subport->cman == RTE_SCHED_CMAN_RED) { 1898 struct rte_sched_queue_extra *qe = subport->queue_extra + qindex; 1899 struct rte_red *red = &qe->red; 1900 1901 rte_red_mark_queue_empty(red, port->time); 1902 } 1903 } 1904 1905 static inline void 1906 rte_sched_port_pie_dequeue(struct rte_sched_subport *subport, 1907 uint32_t qindex, uint32_t pkt_len, uint64_t time) { 1908 if (subport->cman_enabled && subport->cman == RTE_SCHED_CMAN_PIE) { 1909 struct rte_sched_queue_extra *qe = subport->queue_extra + qindex; 1910 struct rte_pie *pie = &qe->pie; 1911 1912 /* Update queue length */ 1913 pie->qlen -= 1; 1914 pie->qlen_bytes -= pkt_len; 1915 1916 rte_pie_dequeue(pie, pkt_len, time); 1917 } 1918 } 1919 1920 #ifdef RTE_SCHED_DEBUG 1921 1922 static inline void 1923 debug_check_queue_slab(struct rte_sched_subport *subport, uint32_t bmp_pos, 1924 uint64_t bmp_slab) 1925 { 1926 uint64_t mask; 1927 uint32_t i, panic; 1928 1929 if (bmp_slab == 0) 1930 rte_panic("Empty slab at position %u\n", bmp_pos); 1931 1932 panic = 0; 1933 for (i = 0, mask = 1; i < 64; i++, mask <<= 1) { 1934 if (mask & bmp_slab) { 1935 if (rte_sched_port_queue_is_empty(subport, bmp_pos + i)) { 1936 printf("Queue %u (slab offset %u) is empty\n", bmp_pos + i, i); 1937 panic = 1; 1938 } 1939 } 1940 } 1941 1942 if (panic) 1943 rte_panic("Empty queues in slab 0x%" PRIx64 "starting at position %u\n", 1944 bmp_slab, bmp_pos); 1945 } 1946 1947 #endif /* RTE_SCHED_DEBUG */ 1948 1949 static inline struct rte_sched_subport * 1950 rte_sched_port_subport(struct rte_sched_port *port, 1951 struct rte_mbuf *pkt) 1952 { 1953 uint32_t queue_id = rte_mbuf_sched_queue_get(pkt); 1954 uint32_t subport_id = queue_id >> (port->n_pipes_per_subport_log2 + 4); 1955 1956 return port->subports[subport_id]; 1957 } 1958 1959 static inline uint32_t 1960 rte_sched_port_enqueue_qptrs_prefetch0(struct rte_sched_subport *subport, 1961 struct rte_mbuf *pkt, uint32_t subport_qmask) 1962 { 1963 struct rte_sched_queue *q; 1964 struct rte_sched_queue_extra *qe; 1965 uint32_t qindex = rte_mbuf_sched_queue_get(pkt); 1966 uint32_t subport_queue_id = subport_qmask & qindex; 1967 1968 q = subport->queue + subport_queue_id; 1969 rte_prefetch0(q); 1970 qe = subport->queue_extra + subport_queue_id; 1971 rte_prefetch0(qe); 1972 1973 return subport_queue_id; 1974 } 1975 1976 static inline void 1977 rte_sched_port_enqueue_qwa_prefetch0(struct rte_sched_port *port, 1978 struct rte_sched_subport *subport, 1979 uint32_t qindex, 1980 struct rte_mbuf **qbase) 1981 { 1982 struct rte_sched_queue *q; 1983 struct rte_mbuf **q_qw; 1984 uint16_t qsize; 1985 1986 q = subport->queue + qindex; 1987 qsize = rte_sched_subport_pipe_qsize(port, subport, qindex); 1988 q_qw = qbase + (q->qw & (qsize - 1)); 1989 1990 rte_prefetch0(q_qw); 1991 rte_bitmap_prefetch0(subport->bmp, qindex); 1992 } 1993 1994 static inline int 1995 rte_sched_port_enqueue_qwa(struct rte_sched_port *port, 1996 struct rte_sched_subport *subport, 1997 uint32_t qindex, 1998 struct rte_mbuf **qbase, 1999 struct rte_mbuf *pkt) 2000 { 2001 struct rte_sched_queue *q; 2002 uint16_t qsize; 2003 uint16_t qlen; 2004 2005 q = subport->queue + qindex; 2006 qsize = rte_sched_subport_pipe_qsize(port, subport, qindex); 2007 qlen = q->qw - q->qr; 2008 2009 /* Drop the packet (and update drop stats) when queue is full */ 2010 if (unlikely(rte_sched_port_cman_drop(port, subport, pkt, qindex, qlen) || 2011 (qlen >= qsize))) { 2012 rte_pktmbuf_free(pkt); 2013 rte_sched_port_update_subport_stats_on_drop(port, subport, 2014 qindex, pkt, qlen < qsize); 2015 rte_sched_port_update_queue_stats_on_drop(subport, qindex, pkt, 2016 qlen < qsize); 2017 return 0; 2018 } 2019 2020 /* Enqueue packet */ 2021 qbase[q->qw & (qsize - 1)] = pkt; 2022 q->qw++; 2023 2024 /* Activate queue in the subport bitmap */ 2025 rte_bitmap_set(subport->bmp, qindex); 2026 2027 /* Statistics */ 2028 rte_sched_port_update_subport_stats(port, subport, qindex, pkt); 2029 rte_sched_port_update_queue_stats(subport, qindex, pkt); 2030 2031 return 1; 2032 } 2033 2034 2035 /* 2036 * The enqueue function implements a 4-level pipeline with each stage 2037 * processing two different packets. The purpose of using a pipeline 2038 * is to hide the latency of prefetching the data structures. The 2039 * naming convention is presented in the diagram below: 2040 * 2041 * p00 _______ p10 _______ p20 _______ p30 _______ 2042 * ----->| |----->| |----->| |----->| |-----> 2043 * | 0 | | 1 | | 2 | | 3 | 2044 * ----->|_______|----->|_______|----->|_______|----->|_______|-----> 2045 * p01 p11 p21 p31 2046 * 2047 */ 2048 int 2049 rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts, 2050 uint32_t n_pkts) 2051 { 2052 struct rte_mbuf *pkt00, *pkt01, *pkt10, *pkt11, *pkt20, *pkt21, 2053 *pkt30, *pkt31, *pkt_last; 2054 struct rte_mbuf **q00_base, **q01_base, **q10_base, **q11_base, 2055 **q20_base, **q21_base, **q30_base, **q31_base, **q_last_base; 2056 struct rte_sched_subport *subport00, *subport01, *subport10, *subport11, 2057 *subport20, *subport21, *subport30, *subport31, *subport_last; 2058 uint32_t q00, q01, q10, q11, q20, q21, q30, q31, q_last; 2059 uint32_t r00, r01, r10, r11, r20, r21, r30, r31, r_last; 2060 uint32_t subport_qmask; 2061 uint32_t result, i; 2062 2063 result = 0; 2064 subport_qmask = (1 << (port->n_pipes_per_subport_log2 + 4)) - 1; 2065 2066 /* 2067 * Less then 6 input packets available, which is not enough to 2068 * feed the pipeline 2069 */ 2070 if (unlikely(n_pkts < 6)) { 2071 struct rte_sched_subport *subports[5]; 2072 struct rte_mbuf **q_base[5]; 2073 uint32_t q[5]; 2074 2075 /* Prefetch the mbuf structure of each packet */ 2076 for (i = 0; i < n_pkts; i++) 2077 rte_prefetch0(pkts[i]); 2078 2079 /* Prefetch the subport structure for each packet */ 2080 for (i = 0; i < n_pkts; i++) 2081 subports[i] = rte_sched_port_subport(port, pkts[i]); 2082 2083 /* Prefetch the queue structure for each queue */ 2084 for (i = 0; i < n_pkts; i++) 2085 q[i] = rte_sched_port_enqueue_qptrs_prefetch0(subports[i], 2086 pkts[i], subport_qmask); 2087 2088 /* Prefetch the write pointer location of each queue */ 2089 for (i = 0; i < n_pkts; i++) { 2090 q_base[i] = rte_sched_subport_pipe_qbase(subports[i], q[i]); 2091 rte_sched_port_enqueue_qwa_prefetch0(port, subports[i], 2092 q[i], q_base[i]); 2093 } 2094 2095 /* Write each packet to its queue */ 2096 for (i = 0; i < n_pkts; i++) 2097 result += rte_sched_port_enqueue_qwa(port, subports[i], 2098 q[i], q_base[i], pkts[i]); 2099 2100 return result; 2101 } 2102 2103 /* Feed the first 3 stages of the pipeline (6 packets needed) */ 2104 pkt20 = pkts[0]; 2105 pkt21 = pkts[1]; 2106 rte_prefetch0(pkt20); 2107 rte_prefetch0(pkt21); 2108 2109 pkt10 = pkts[2]; 2110 pkt11 = pkts[3]; 2111 rte_prefetch0(pkt10); 2112 rte_prefetch0(pkt11); 2113 2114 subport20 = rte_sched_port_subport(port, pkt20); 2115 subport21 = rte_sched_port_subport(port, pkt21); 2116 q20 = rte_sched_port_enqueue_qptrs_prefetch0(subport20, 2117 pkt20, subport_qmask); 2118 q21 = rte_sched_port_enqueue_qptrs_prefetch0(subport21, 2119 pkt21, subport_qmask); 2120 2121 pkt00 = pkts[4]; 2122 pkt01 = pkts[5]; 2123 rte_prefetch0(pkt00); 2124 rte_prefetch0(pkt01); 2125 2126 subport10 = rte_sched_port_subport(port, pkt10); 2127 subport11 = rte_sched_port_subport(port, pkt11); 2128 q10 = rte_sched_port_enqueue_qptrs_prefetch0(subport10, 2129 pkt10, subport_qmask); 2130 q11 = rte_sched_port_enqueue_qptrs_prefetch0(subport11, 2131 pkt11, subport_qmask); 2132 2133 q20_base = rte_sched_subport_pipe_qbase(subport20, q20); 2134 q21_base = rte_sched_subport_pipe_qbase(subport21, q21); 2135 rte_sched_port_enqueue_qwa_prefetch0(port, subport20, q20, q20_base); 2136 rte_sched_port_enqueue_qwa_prefetch0(port, subport21, q21, q21_base); 2137 2138 /* Run the pipeline */ 2139 for (i = 6; i < (n_pkts & (~1)); i += 2) { 2140 /* Propagate stage inputs */ 2141 pkt30 = pkt20; 2142 pkt31 = pkt21; 2143 pkt20 = pkt10; 2144 pkt21 = pkt11; 2145 pkt10 = pkt00; 2146 pkt11 = pkt01; 2147 q30 = q20; 2148 q31 = q21; 2149 q20 = q10; 2150 q21 = q11; 2151 subport30 = subport20; 2152 subport31 = subport21; 2153 subport20 = subport10; 2154 subport21 = subport11; 2155 q30_base = q20_base; 2156 q31_base = q21_base; 2157 2158 /* Stage 0: Get packets in */ 2159 pkt00 = pkts[i]; 2160 pkt01 = pkts[i + 1]; 2161 rte_prefetch0(pkt00); 2162 rte_prefetch0(pkt01); 2163 2164 /* Stage 1: Prefetch subport and queue structure storing queue pointers */ 2165 subport10 = rte_sched_port_subport(port, pkt10); 2166 subport11 = rte_sched_port_subport(port, pkt11); 2167 q10 = rte_sched_port_enqueue_qptrs_prefetch0(subport10, 2168 pkt10, subport_qmask); 2169 q11 = rte_sched_port_enqueue_qptrs_prefetch0(subport11, 2170 pkt11, subport_qmask); 2171 2172 /* Stage 2: Prefetch queue write location */ 2173 q20_base = rte_sched_subport_pipe_qbase(subport20, q20); 2174 q21_base = rte_sched_subport_pipe_qbase(subport21, q21); 2175 rte_sched_port_enqueue_qwa_prefetch0(port, subport20, q20, q20_base); 2176 rte_sched_port_enqueue_qwa_prefetch0(port, subport21, q21, q21_base); 2177 2178 /* Stage 3: Write packet to queue and activate queue */ 2179 r30 = rte_sched_port_enqueue_qwa(port, subport30, 2180 q30, q30_base, pkt30); 2181 r31 = rte_sched_port_enqueue_qwa(port, subport31, 2182 q31, q31_base, pkt31); 2183 result += r30 + r31; 2184 } 2185 2186 /* 2187 * Drain the pipeline (exactly 6 packets). 2188 * Handle the last packet in the case 2189 * of an odd number of input packets. 2190 */ 2191 pkt_last = pkts[n_pkts - 1]; 2192 rte_prefetch0(pkt_last); 2193 2194 subport00 = rte_sched_port_subport(port, pkt00); 2195 subport01 = rte_sched_port_subport(port, pkt01); 2196 q00 = rte_sched_port_enqueue_qptrs_prefetch0(subport00, 2197 pkt00, subport_qmask); 2198 q01 = rte_sched_port_enqueue_qptrs_prefetch0(subport01, 2199 pkt01, subport_qmask); 2200 2201 q10_base = rte_sched_subport_pipe_qbase(subport10, q10); 2202 q11_base = rte_sched_subport_pipe_qbase(subport11, q11); 2203 rte_sched_port_enqueue_qwa_prefetch0(port, subport10, q10, q10_base); 2204 rte_sched_port_enqueue_qwa_prefetch0(port, subport11, q11, q11_base); 2205 2206 r20 = rte_sched_port_enqueue_qwa(port, subport20, 2207 q20, q20_base, pkt20); 2208 r21 = rte_sched_port_enqueue_qwa(port, subport21, 2209 q21, q21_base, pkt21); 2210 result += r20 + r21; 2211 2212 subport_last = rte_sched_port_subport(port, pkt_last); 2213 q_last = rte_sched_port_enqueue_qptrs_prefetch0(subport_last, 2214 pkt_last, subport_qmask); 2215 2216 q00_base = rte_sched_subport_pipe_qbase(subport00, q00); 2217 q01_base = rte_sched_subport_pipe_qbase(subport01, q01); 2218 rte_sched_port_enqueue_qwa_prefetch0(port, subport00, q00, q00_base); 2219 rte_sched_port_enqueue_qwa_prefetch0(port, subport01, q01, q01_base); 2220 2221 r10 = rte_sched_port_enqueue_qwa(port, subport10, q10, 2222 q10_base, pkt10); 2223 r11 = rte_sched_port_enqueue_qwa(port, subport11, q11, 2224 q11_base, pkt11); 2225 result += r10 + r11; 2226 2227 q_last_base = rte_sched_subport_pipe_qbase(subport_last, q_last); 2228 rte_sched_port_enqueue_qwa_prefetch0(port, subport_last, 2229 q_last, q_last_base); 2230 2231 r00 = rte_sched_port_enqueue_qwa(port, subport00, q00, 2232 q00_base, pkt00); 2233 r01 = rte_sched_port_enqueue_qwa(port, subport01, q01, 2234 q01_base, pkt01); 2235 result += r00 + r01; 2236 2237 if (n_pkts & 1) { 2238 r_last = rte_sched_port_enqueue_qwa(port, subport_last, 2239 q_last, q_last_base, pkt_last); 2240 result += r_last; 2241 } 2242 2243 return result; 2244 } 2245 2246 static inline uint64_t 2247 grinder_tc_ov_credits_update(struct rte_sched_port *port, 2248 struct rte_sched_subport *subport, uint32_t pos) 2249 { 2250 struct rte_sched_grinder *grinder = subport->grinder + pos; 2251 struct rte_sched_subport_profile *sp = grinder->subport_params; 2252 uint64_t tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; 2253 uint64_t tc_consumption = 0, tc_ov_consumption_max; 2254 uint64_t tc_ov_wm = subport->tc_ov_wm; 2255 uint32_t i; 2256 2257 if (subport->tc_ov == 0) 2258 return subport->tc_ov_wm_max; 2259 2260 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASS_BE; i++) { 2261 tc_ov_consumption[i] = sp->tc_credits_per_period[i] 2262 - subport->tc_credits[i]; 2263 tc_consumption += tc_ov_consumption[i]; 2264 } 2265 2266 tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASS_BE] = 2267 sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] - 2268 subport->tc_credits[RTE_SCHED_TRAFFIC_CLASS_BE]; 2269 2270 tc_ov_consumption_max = 2271 sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] - 2272 tc_consumption; 2273 2274 if (tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASS_BE] > 2275 (tc_ov_consumption_max - port->mtu)) { 2276 tc_ov_wm -= tc_ov_wm >> 7; 2277 if (tc_ov_wm < subport->tc_ov_wm_min) 2278 tc_ov_wm = subport->tc_ov_wm_min; 2279 2280 return tc_ov_wm; 2281 } 2282 2283 tc_ov_wm += (tc_ov_wm >> 7) + 1; 2284 if (tc_ov_wm > subport->tc_ov_wm_max) 2285 tc_ov_wm = subport->tc_ov_wm_max; 2286 2287 return tc_ov_wm; 2288 } 2289 2290 static inline void 2291 grinder_credits_update(struct rte_sched_port *port, 2292 struct rte_sched_subport *subport, uint32_t pos) 2293 { 2294 struct rte_sched_grinder *grinder = subport->grinder + pos; 2295 struct rte_sched_pipe *pipe = grinder->pipe; 2296 struct rte_sched_pipe_profile *params = grinder->pipe_params; 2297 struct rte_sched_subport_profile *sp = grinder->subport_params; 2298 uint64_t n_periods; 2299 uint32_t i; 2300 2301 /* Subport TB */ 2302 n_periods = (port->time - subport->tb_time) / sp->tb_period; 2303 subport->tb_credits += n_periods * sp->tb_credits_per_period; 2304 subport->tb_credits = RTE_MIN(subport->tb_credits, sp->tb_size); 2305 subport->tb_time += n_periods * sp->tb_period; 2306 2307 /* Pipe TB */ 2308 n_periods = (port->time - pipe->tb_time) / params->tb_period; 2309 pipe->tb_credits += n_periods * params->tb_credits_per_period; 2310 pipe->tb_credits = RTE_MIN(pipe->tb_credits, params->tb_size); 2311 pipe->tb_time += n_periods * params->tb_period; 2312 2313 /* Subport TCs */ 2314 if (unlikely(port->time >= subport->tc_time)) { 2315 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) 2316 subport->tc_credits[i] = sp->tc_credits_per_period[i]; 2317 2318 subport->tc_time = port->time + sp->tc_period; 2319 } 2320 2321 /* Pipe TCs */ 2322 if (unlikely(port->time >= pipe->tc_time)) { 2323 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) 2324 pipe->tc_credits[i] = params->tc_credits_per_period[i]; 2325 pipe->tc_time = port->time + params->tc_period; 2326 } 2327 } 2328 2329 static inline void 2330 grinder_credits_update_with_tc_ov(struct rte_sched_port *port, 2331 struct rte_sched_subport *subport, uint32_t pos) 2332 { 2333 struct rte_sched_grinder *grinder = subport->grinder + pos; 2334 struct rte_sched_pipe *pipe = grinder->pipe; 2335 struct rte_sched_pipe_profile *params = grinder->pipe_params; 2336 struct rte_sched_subport_profile *sp = grinder->subport_params; 2337 uint64_t n_periods; 2338 uint32_t i; 2339 2340 /* Subport TB */ 2341 n_periods = (port->time - subport->tb_time) / sp->tb_period; 2342 subport->tb_credits += n_periods * sp->tb_credits_per_period; 2343 subport->tb_credits = RTE_MIN(subport->tb_credits, sp->tb_size); 2344 subport->tb_time += n_periods * sp->tb_period; 2345 2346 /* Pipe TB */ 2347 n_periods = (port->time - pipe->tb_time) / params->tb_period; 2348 pipe->tb_credits += n_periods * params->tb_credits_per_period; 2349 pipe->tb_credits = RTE_MIN(pipe->tb_credits, params->tb_size); 2350 pipe->tb_time += n_periods * params->tb_period; 2351 2352 /* Subport TCs */ 2353 if (unlikely(port->time >= subport->tc_time)) { 2354 subport->tc_ov_wm = 2355 grinder_tc_ov_credits_update(port, subport, pos); 2356 2357 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) 2358 subport->tc_credits[i] = sp->tc_credits_per_period[i]; 2359 2360 subport->tc_time = port->time + sp->tc_period; 2361 subport->tc_ov_period_id++; 2362 } 2363 2364 /* Pipe TCs */ 2365 if (unlikely(port->time >= pipe->tc_time)) { 2366 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) 2367 pipe->tc_credits[i] = params->tc_credits_per_period[i]; 2368 pipe->tc_time = port->time + params->tc_period; 2369 } 2370 2371 /* Pipe TCs - Oversubscription */ 2372 if (unlikely(pipe->tc_ov_period_id != subport->tc_ov_period_id)) { 2373 pipe->tc_ov_credits = subport->tc_ov_wm * params->tc_ov_weight; 2374 2375 pipe->tc_ov_period_id = subport->tc_ov_period_id; 2376 } 2377 } 2378 2379 static inline int 2380 grinder_credits_check(struct rte_sched_port *port, 2381 struct rte_sched_subport *subport, uint32_t pos) 2382 { 2383 struct rte_sched_grinder *grinder = subport->grinder + pos; 2384 struct rte_sched_pipe *pipe = grinder->pipe; 2385 struct rte_mbuf *pkt = grinder->pkt; 2386 uint32_t tc_index = grinder->tc_index; 2387 uint64_t pkt_len = pkt->pkt_len + port->frame_overhead; 2388 uint64_t subport_tb_credits = subport->tb_credits; 2389 uint64_t subport_tc_credits = subport->tc_credits[tc_index]; 2390 uint64_t pipe_tb_credits = pipe->tb_credits; 2391 uint64_t pipe_tc_credits = pipe->tc_credits[tc_index]; 2392 int enough_credits; 2393 2394 /* Check pipe and subport credits */ 2395 enough_credits = (pkt_len <= subport_tb_credits) && 2396 (pkt_len <= subport_tc_credits) && 2397 (pkt_len <= pipe_tb_credits) && 2398 (pkt_len <= pipe_tc_credits); 2399 2400 if (!enough_credits) 2401 return 0; 2402 2403 /* Update pipe and subport credits */ 2404 subport->tb_credits -= pkt_len; 2405 subport->tc_credits[tc_index] -= pkt_len; 2406 pipe->tb_credits -= pkt_len; 2407 pipe->tc_credits[tc_index] -= pkt_len; 2408 2409 return 1; 2410 } 2411 2412 static inline int 2413 grinder_credits_check_with_tc_ov(struct rte_sched_port *port, 2414 struct rte_sched_subport *subport, uint32_t pos) 2415 { 2416 struct rte_sched_grinder *grinder = subport->grinder + pos; 2417 struct rte_sched_pipe *pipe = grinder->pipe; 2418 struct rte_mbuf *pkt = grinder->pkt; 2419 uint32_t tc_index = grinder->tc_index; 2420 uint64_t pkt_len = pkt->pkt_len + port->frame_overhead; 2421 uint64_t subport_tb_credits = subport->tb_credits; 2422 uint64_t subport_tc_credits = subport->tc_credits[tc_index]; 2423 uint64_t pipe_tb_credits = pipe->tb_credits; 2424 uint64_t pipe_tc_credits = pipe->tc_credits[tc_index]; 2425 uint64_t pipe_tc_ov_mask1[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; 2426 uint64_t pipe_tc_ov_mask2[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE] = {0}; 2427 uint64_t pipe_tc_ov_credits; 2428 uint32_t i; 2429 int enough_credits; 2430 2431 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) 2432 pipe_tc_ov_mask1[i] = ~0LLU; 2433 2434 pipe_tc_ov_mask1[RTE_SCHED_TRAFFIC_CLASS_BE] = pipe->tc_ov_credits; 2435 pipe_tc_ov_mask2[RTE_SCHED_TRAFFIC_CLASS_BE] = ~0LLU; 2436 pipe_tc_ov_credits = pipe_tc_ov_mask1[tc_index]; 2437 2438 /* Check pipe and subport credits */ 2439 enough_credits = (pkt_len <= subport_tb_credits) && 2440 (pkt_len <= subport_tc_credits) && 2441 (pkt_len <= pipe_tb_credits) && 2442 (pkt_len <= pipe_tc_credits) && 2443 (pkt_len <= pipe_tc_ov_credits); 2444 2445 if (!enough_credits) 2446 return 0; 2447 2448 /* Update pipe and subport credits */ 2449 subport->tb_credits -= pkt_len; 2450 subport->tc_credits[tc_index] -= pkt_len; 2451 pipe->tb_credits -= pkt_len; 2452 pipe->tc_credits[tc_index] -= pkt_len; 2453 pipe->tc_ov_credits -= pipe_tc_ov_mask2[tc_index] & pkt_len; 2454 2455 return 1; 2456 } 2457 2458 2459 static inline int 2460 grinder_schedule(struct rte_sched_port *port, 2461 struct rte_sched_subport *subport, uint32_t pos) 2462 { 2463 struct rte_sched_grinder *grinder = subport->grinder + pos; 2464 struct rte_sched_queue *queue = grinder->queue[grinder->qpos]; 2465 uint32_t qindex = grinder->qindex[grinder->qpos]; 2466 struct rte_mbuf *pkt = grinder->pkt; 2467 uint32_t pkt_len = pkt->pkt_len + port->frame_overhead; 2468 uint32_t be_tc_active; 2469 2470 if (subport->tc_ov_enabled) { 2471 if (!grinder_credits_check_with_tc_ov(port, subport, pos)) 2472 return 0; 2473 } else { 2474 if (!grinder_credits_check(port, subport, pos)) 2475 return 0; 2476 } 2477 2478 /* Advance port time */ 2479 port->time += pkt_len; 2480 2481 /* Send packet */ 2482 port->pkts_out[port->n_pkts_out++] = pkt; 2483 queue->qr++; 2484 2485 be_tc_active = (grinder->tc_index == RTE_SCHED_TRAFFIC_CLASS_BE) ? ~0x0 : 0x0; 2486 grinder->wrr_tokens[grinder->qpos] += 2487 (pkt_len * grinder->wrr_cost[grinder->qpos]) & be_tc_active; 2488 2489 if (queue->qr == queue->qw) { 2490 rte_bitmap_clear(subport->bmp, qindex); 2491 grinder->qmask &= ~(1 << grinder->qpos); 2492 if (be_tc_active) 2493 grinder->wrr_mask[grinder->qpos] = 0; 2494 2495 rte_sched_port_red_set_queue_empty_timestamp(port, subport, qindex); 2496 } 2497 2498 rte_sched_port_pie_dequeue(subport, qindex, pkt_len, port->time_cpu_cycles); 2499 2500 /* Reset pipe loop detection */ 2501 subport->pipe_loop = RTE_SCHED_PIPE_INVALID; 2502 grinder->productive = 1; 2503 2504 return 1; 2505 } 2506 2507 static inline int 2508 grinder_pipe_exists(struct rte_sched_subport *subport, uint32_t base_pipe) 2509 { 2510 uint32_t i; 2511 2512 for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++) { 2513 if (subport->grinder_base_bmp_pos[i] == base_pipe) 2514 return 1; 2515 } 2516 2517 return 0; 2518 } 2519 2520 static inline void 2521 grinder_pcache_populate(struct rte_sched_subport *subport, 2522 uint32_t pos, uint32_t bmp_pos, uint64_t bmp_slab) 2523 { 2524 struct rte_sched_grinder *grinder = subport->grinder + pos; 2525 uint16_t w[4]; 2526 2527 grinder->pcache_w = 0; 2528 grinder->pcache_r = 0; 2529 2530 w[0] = (uint16_t) bmp_slab; 2531 w[1] = (uint16_t) (bmp_slab >> 16); 2532 w[2] = (uint16_t) (bmp_slab >> 32); 2533 w[3] = (uint16_t) (bmp_slab >> 48); 2534 2535 grinder->pcache_qmask[grinder->pcache_w] = w[0]; 2536 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos; 2537 grinder->pcache_w += (w[0] != 0); 2538 2539 grinder->pcache_qmask[grinder->pcache_w] = w[1]; 2540 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 16; 2541 grinder->pcache_w += (w[1] != 0); 2542 2543 grinder->pcache_qmask[grinder->pcache_w] = w[2]; 2544 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 32; 2545 grinder->pcache_w += (w[2] != 0); 2546 2547 grinder->pcache_qmask[grinder->pcache_w] = w[3]; 2548 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 48; 2549 grinder->pcache_w += (w[3] != 0); 2550 } 2551 2552 static inline void 2553 grinder_tccache_populate(struct rte_sched_subport *subport, 2554 uint32_t pos, uint32_t qindex, uint16_t qmask) 2555 { 2556 struct rte_sched_grinder *grinder = subport->grinder + pos; 2557 uint8_t b, i; 2558 2559 grinder->tccache_w = 0; 2560 grinder->tccache_r = 0; 2561 2562 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASS_BE; i++) { 2563 b = (uint8_t) ((qmask >> i) & 0x1); 2564 grinder->tccache_qmask[grinder->tccache_w] = b; 2565 grinder->tccache_qindex[grinder->tccache_w] = qindex + i; 2566 grinder->tccache_w += (b != 0); 2567 } 2568 2569 b = (uint8_t) (qmask >> (RTE_SCHED_TRAFFIC_CLASS_BE)); 2570 grinder->tccache_qmask[grinder->tccache_w] = b; 2571 grinder->tccache_qindex[grinder->tccache_w] = qindex + 2572 RTE_SCHED_TRAFFIC_CLASS_BE; 2573 grinder->tccache_w += (b != 0); 2574 } 2575 2576 static inline int 2577 grinder_next_tc(struct rte_sched_port *port, 2578 struct rte_sched_subport *subport, uint32_t pos) 2579 { 2580 struct rte_sched_grinder *grinder = subport->grinder + pos; 2581 struct rte_mbuf **qbase; 2582 uint32_t qindex; 2583 uint16_t qsize; 2584 2585 if (grinder->tccache_r == grinder->tccache_w) 2586 return 0; 2587 2588 qindex = grinder->tccache_qindex[grinder->tccache_r]; 2589 qbase = rte_sched_subport_pipe_qbase(subport, qindex); 2590 qsize = rte_sched_subport_pipe_qsize(port, subport, qindex); 2591 2592 grinder->tc_index = rte_sched_port_pipe_tc(port, qindex); 2593 grinder->qmask = grinder->tccache_qmask[grinder->tccache_r]; 2594 grinder->qsize = qsize; 2595 2596 if (grinder->tc_index < RTE_SCHED_TRAFFIC_CLASS_BE) { 2597 grinder->queue[0] = subport->queue + qindex; 2598 grinder->qbase[0] = qbase; 2599 grinder->qindex[0] = qindex; 2600 grinder->tccache_r++; 2601 2602 return 1; 2603 } 2604 2605 grinder->queue[0] = subport->queue + qindex; 2606 grinder->queue[1] = subport->queue + qindex + 1; 2607 grinder->queue[2] = subport->queue + qindex + 2; 2608 grinder->queue[3] = subport->queue + qindex + 3; 2609 2610 grinder->qbase[0] = qbase; 2611 grinder->qbase[1] = qbase + qsize; 2612 grinder->qbase[2] = qbase + 2 * qsize; 2613 grinder->qbase[3] = qbase + 3 * qsize; 2614 2615 grinder->qindex[0] = qindex; 2616 grinder->qindex[1] = qindex + 1; 2617 grinder->qindex[2] = qindex + 2; 2618 grinder->qindex[3] = qindex + 3; 2619 2620 grinder->tccache_r++; 2621 return 1; 2622 } 2623 2624 static inline int 2625 grinder_next_pipe(struct rte_sched_port *port, 2626 struct rte_sched_subport *subport, uint32_t pos) 2627 { 2628 struct rte_sched_grinder *grinder = subport->grinder + pos; 2629 uint32_t pipe_qindex; 2630 uint16_t pipe_qmask; 2631 2632 if (grinder->pcache_r < grinder->pcache_w) { 2633 pipe_qmask = grinder->pcache_qmask[grinder->pcache_r]; 2634 pipe_qindex = grinder->pcache_qindex[grinder->pcache_r]; 2635 grinder->pcache_r++; 2636 } else { 2637 uint64_t bmp_slab = 0; 2638 uint32_t bmp_pos = 0; 2639 2640 /* Get another non-empty pipe group */ 2641 if (unlikely(rte_bitmap_scan(subport->bmp, &bmp_pos, &bmp_slab) <= 0)) 2642 return 0; 2643 2644 #ifdef RTE_SCHED_DEBUG 2645 debug_check_queue_slab(subport, bmp_pos, bmp_slab); 2646 #endif 2647 2648 /* Return if pipe group already in one of the other grinders */ 2649 subport->grinder_base_bmp_pos[pos] = RTE_SCHED_BMP_POS_INVALID; 2650 if (unlikely(grinder_pipe_exists(subport, bmp_pos))) 2651 return 0; 2652 2653 subport->grinder_base_bmp_pos[pos] = bmp_pos; 2654 2655 /* Install new pipe group into grinder's pipe cache */ 2656 grinder_pcache_populate(subport, pos, bmp_pos, bmp_slab); 2657 2658 pipe_qmask = grinder->pcache_qmask[0]; 2659 pipe_qindex = grinder->pcache_qindex[0]; 2660 grinder->pcache_r = 1; 2661 } 2662 2663 /* Install new pipe in the grinder */ 2664 grinder->pindex = pipe_qindex >> 4; 2665 grinder->subport = subport; 2666 grinder->pipe = subport->pipe + grinder->pindex; 2667 grinder->pipe_params = NULL; /* to be set after the pipe structure is prefetched */ 2668 grinder->productive = 0; 2669 2670 grinder_tccache_populate(subport, pos, pipe_qindex, pipe_qmask); 2671 grinder_next_tc(port, subport, pos); 2672 2673 /* Check for pipe exhaustion */ 2674 if (grinder->pindex == subport->pipe_loop) { 2675 subport->pipe_exhaustion = 1; 2676 subport->pipe_loop = RTE_SCHED_PIPE_INVALID; 2677 } 2678 2679 return 1; 2680 } 2681 2682 2683 static inline void 2684 grinder_wrr_load(struct rte_sched_subport *subport, uint32_t pos) 2685 { 2686 struct rte_sched_grinder *grinder = subport->grinder + pos; 2687 struct rte_sched_pipe *pipe = grinder->pipe; 2688 struct rte_sched_pipe_profile *pipe_params = grinder->pipe_params; 2689 uint32_t qmask = grinder->qmask; 2690 2691 grinder->wrr_tokens[0] = 2692 ((uint16_t) pipe->wrr_tokens[0]) << RTE_SCHED_WRR_SHIFT; 2693 grinder->wrr_tokens[1] = 2694 ((uint16_t) pipe->wrr_tokens[1]) << RTE_SCHED_WRR_SHIFT; 2695 grinder->wrr_tokens[2] = 2696 ((uint16_t) pipe->wrr_tokens[2]) << RTE_SCHED_WRR_SHIFT; 2697 grinder->wrr_tokens[3] = 2698 ((uint16_t) pipe->wrr_tokens[3]) << RTE_SCHED_WRR_SHIFT; 2699 2700 grinder->wrr_mask[0] = (qmask & 0x1) * 0xFFFF; 2701 grinder->wrr_mask[1] = ((qmask >> 1) & 0x1) * 0xFFFF; 2702 grinder->wrr_mask[2] = ((qmask >> 2) & 0x1) * 0xFFFF; 2703 grinder->wrr_mask[3] = ((qmask >> 3) & 0x1) * 0xFFFF; 2704 2705 grinder->wrr_cost[0] = pipe_params->wrr_cost[0]; 2706 grinder->wrr_cost[1] = pipe_params->wrr_cost[1]; 2707 grinder->wrr_cost[2] = pipe_params->wrr_cost[2]; 2708 grinder->wrr_cost[3] = pipe_params->wrr_cost[3]; 2709 } 2710 2711 static inline void 2712 grinder_wrr_store(struct rte_sched_subport *subport, uint32_t pos) 2713 { 2714 struct rte_sched_grinder *grinder = subport->grinder + pos; 2715 struct rte_sched_pipe *pipe = grinder->pipe; 2716 2717 pipe->wrr_tokens[0] = 2718 (grinder->wrr_tokens[0] & grinder->wrr_mask[0]) >> 2719 RTE_SCHED_WRR_SHIFT; 2720 pipe->wrr_tokens[1] = 2721 (grinder->wrr_tokens[1] & grinder->wrr_mask[1]) >> 2722 RTE_SCHED_WRR_SHIFT; 2723 pipe->wrr_tokens[2] = 2724 (grinder->wrr_tokens[2] & grinder->wrr_mask[2]) >> 2725 RTE_SCHED_WRR_SHIFT; 2726 pipe->wrr_tokens[3] = 2727 (grinder->wrr_tokens[3] & grinder->wrr_mask[3]) >> 2728 RTE_SCHED_WRR_SHIFT; 2729 } 2730 2731 static inline void 2732 grinder_wrr(struct rte_sched_subport *subport, uint32_t pos) 2733 { 2734 struct rte_sched_grinder *grinder = subport->grinder + pos; 2735 uint16_t wrr_tokens_min; 2736 2737 grinder->wrr_tokens[0] |= ~grinder->wrr_mask[0]; 2738 grinder->wrr_tokens[1] |= ~grinder->wrr_mask[1]; 2739 grinder->wrr_tokens[2] |= ~grinder->wrr_mask[2]; 2740 grinder->wrr_tokens[3] |= ~grinder->wrr_mask[3]; 2741 2742 grinder->qpos = rte_min_pos_4_u16(grinder->wrr_tokens); 2743 wrr_tokens_min = grinder->wrr_tokens[grinder->qpos]; 2744 2745 grinder->wrr_tokens[0] -= wrr_tokens_min; 2746 grinder->wrr_tokens[1] -= wrr_tokens_min; 2747 grinder->wrr_tokens[2] -= wrr_tokens_min; 2748 grinder->wrr_tokens[3] -= wrr_tokens_min; 2749 } 2750 2751 2752 #define grinder_evict(subport, pos) 2753 2754 static inline void 2755 grinder_prefetch_pipe(struct rte_sched_subport *subport, uint32_t pos) 2756 { 2757 struct rte_sched_grinder *grinder = subport->grinder + pos; 2758 2759 rte_prefetch0(grinder->pipe); 2760 rte_prefetch0(grinder->queue[0]); 2761 } 2762 2763 static inline void 2764 grinder_prefetch_tc_queue_arrays(struct rte_sched_subport *subport, uint32_t pos) 2765 { 2766 struct rte_sched_grinder *grinder = subport->grinder + pos; 2767 uint16_t qsize, qr[RTE_SCHED_MAX_QUEUES_PER_TC]; 2768 2769 qsize = grinder->qsize; 2770 grinder->qpos = 0; 2771 2772 if (grinder->tc_index < RTE_SCHED_TRAFFIC_CLASS_BE) { 2773 qr[0] = grinder->queue[0]->qr & (qsize - 1); 2774 2775 rte_prefetch0(grinder->qbase[0] + qr[0]); 2776 return; 2777 } 2778 2779 qr[0] = grinder->queue[0]->qr & (qsize - 1); 2780 qr[1] = grinder->queue[1]->qr & (qsize - 1); 2781 qr[2] = grinder->queue[2]->qr & (qsize - 1); 2782 qr[3] = grinder->queue[3]->qr & (qsize - 1); 2783 2784 rte_prefetch0(grinder->qbase[0] + qr[0]); 2785 rte_prefetch0(grinder->qbase[1] + qr[1]); 2786 2787 grinder_wrr_load(subport, pos); 2788 grinder_wrr(subport, pos); 2789 2790 rte_prefetch0(grinder->qbase[2] + qr[2]); 2791 rte_prefetch0(grinder->qbase[3] + qr[3]); 2792 } 2793 2794 static inline void 2795 grinder_prefetch_mbuf(struct rte_sched_subport *subport, uint32_t pos) 2796 { 2797 struct rte_sched_grinder *grinder = subport->grinder + pos; 2798 uint32_t qpos = grinder->qpos; 2799 struct rte_mbuf **qbase = grinder->qbase[qpos]; 2800 uint16_t qsize = grinder->qsize; 2801 uint16_t qr = grinder->queue[qpos]->qr & (qsize - 1); 2802 2803 grinder->pkt = qbase[qr]; 2804 rte_prefetch0(grinder->pkt); 2805 2806 if (unlikely((qr & 0x7) == 7)) { 2807 uint16_t qr_next = (grinder->queue[qpos]->qr + 1) & (qsize - 1); 2808 2809 rte_prefetch0(qbase + qr_next); 2810 } 2811 } 2812 2813 static inline uint32_t 2814 grinder_handle(struct rte_sched_port *port, 2815 struct rte_sched_subport *subport, uint32_t pos) 2816 { 2817 struct rte_sched_grinder *grinder = subport->grinder + pos; 2818 2819 switch (grinder->state) { 2820 case e_GRINDER_PREFETCH_PIPE: 2821 { 2822 if (grinder_next_pipe(port, subport, pos)) { 2823 grinder_prefetch_pipe(subport, pos); 2824 subport->busy_grinders++; 2825 2826 grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS; 2827 return 0; 2828 } 2829 2830 return 0; 2831 } 2832 2833 case e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS: 2834 { 2835 struct rte_sched_pipe *pipe = grinder->pipe; 2836 2837 grinder->pipe_params = subport->pipe_profiles + pipe->profile; 2838 grinder->subport_params = port->subport_profiles + 2839 subport->profile; 2840 2841 grinder_prefetch_tc_queue_arrays(subport, pos); 2842 2843 if (subport->tc_ov_enabled) 2844 grinder_credits_update_with_tc_ov(port, subport, pos); 2845 else 2846 grinder_credits_update(port, subport, pos); 2847 2848 grinder->state = e_GRINDER_PREFETCH_MBUF; 2849 return 0; 2850 } 2851 2852 case e_GRINDER_PREFETCH_MBUF: 2853 { 2854 grinder_prefetch_mbuf(subport, pos); 2855 2856 grinder->state = e_GRINDER_READ_MBUF; 2857 return 0; 2858 } 2859 2860 case e_GRINDER_READ_MBUF: 2861 { 2862 uint32_t wrr_active, result = 0; 2863 2864 result = grinder_schedule(port, subport, pos); 2865 2866 wrr_active = (grinder->tc_index == RTE_SCHED_TRAFFIC_CLASS_BE); 2867 2868 /* Look for next packet within the same TC */ 2869 if (result && grinder->qmask) { 2870 if (wrr_active) 2871 grinder_wrr(subport, pos); 2872 2873 grinder_prefetch_mbuf(subport, pos); 2874 2875 return 1; 2876 } 2877 2878 if (wrr_active) 2879 grinder_wrr_store(subport, pos); 2880 2881 /* Look for another active TC within same pipe */ 2882 if (grinder_next_tc(port, subport, pos)) { 2883 grinder_prefetch_tc_queue_arrays(subport, pos); 2884 2885 grinder->state = e_GRINDER_PREFETCH_MBUF; 2886 return result; 2887 } 2888 2889 if (grinder->productive == 0 && 2890 subport->pipe_loop == RTE_SCHED_PIPE_INVALID) 2891 subport->pipe_loop = grinder->pindex; 2892 2893 grinder_evict(subport, pos); 2894 2895 /* Look for another active pipe */ 2896 if (grinder_next_pipe(port, subport, pos)) { 2897 grinder_prefetch_pipe(subport, pos); 2898 2899 grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS; 2900 return result; 2901 } 2902 2903 /* No active pipe found */ 2904 subport->busy_grinders--; 2905 2906 grinder->state = e_GRINDER_PREFETCH_PIPE; 2907 return result; 2908 } 2909 2910 default: 2911 rte_panic("Algorithmic error (invalid state)\n"); 2912 return 0; 2913 } 2914 } 2915 2916 static inline void 2917 rte_sched_port_time_resync(struct rte_sched_port *port) 2918 { 2919 uint64_t cycles = rte_get_tsc_cycles(); 2920 uint64_t cycles_diff; 2921 uint64_t bytes_diff; 2922 uint32_t i; 2923 2924 if (cycles < port->time_cpu_cycles) 2925 port->time_cpu_cycles = 0; 2926 2927 cycles_diff = cycles - port->time_cpu_cycles; 2928 /* Compute elapsed time in bytes */ 2929 bytes_diff = rte_reciprocal_divide(cycles_diff << RTE_SCHED_TIME_SHIFT, 2930 port->inv_cycles_per_byte); 2931 2932 /* Advance port time */ 2933 port->time_cpu_cycles += 2934 (bytes_diff * port->cycles_per_byte) >> RTE_SCHED_TIME_SHIFT; 2935 port->time_cpu_bytes += bytes_diff; 2936 if (port->time < port->time_cpu_bytes) 2937 port->time = port->time_cpu_bytes; 2938 2939 /* Reset pipe loop detection */ 2940 for (i = 0; i < port->n_subports_per_port; i++) 2941 port->subports[i]->pipe_loop = RTE_SCHED_PIPE_INVALID; 2942 } 2943 2944 static inline int 2945 rte_sched_port_exceptions(struct rte_sched_subport *subport, int second_pass) 2946 { 2947 int exceptions; 2948 2949 /* Check if any exception flag is set */ 2950 exceptions = (second_pass && subport->busy_grinders == 0) || 2951 (subport->pipe_exhaustion == 1); 2952 2953 /* Clear exception flags */ 2954 subport->pipe_exhaustion = 0; 2955 2956 return exceptions; 2957 } 2958 2959 int 2960 rte_sched_port_dequeue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts) 2961 { 2962 struct rte_sched_subport *subport; 2963 uint32_t subport_id = port->subport_id; 2964 uint32_t i, n_subports = 0, count; 2965 2966 port->pkts_out = pkts; 2967 port->n_pkts_out = 0; 2968 2969 rte_sched_port_time_resync(port); 2970 2971 /* Take each queue in the grinder one step further */ 2972 for (i = 0, count = 0; ; i++) { 2973 subport = port->subports[subport_id]; 2974 2975 count += grinder_handle(port, subport, 2976 i & (RTE_SCHED_PORT_N_GRINDERS - 1)); 2977 2978 if (count == n_pkts) { 2979 subport_id++; 2980 2981 if (subport_id == port->n_subports_per_port) 2982 subport_id = 0; 2983 2984 port->subport_id = subport_id; 2985 break; 2986 } 2987 2988 if (rte_sched_port_exceptions(subport, i >= RTE_SCHED_PORT_N_GRINDERS)) { 2989 i = 0; 2990 subport_id++; 2991 n_subports++; 2992 } 2993 2994 if (subport_id == port->n_subports_per_port) 2995 subport_id = 0; 2996 2997 if (n_subports == port->n_subports_per_port) { 2998 port->subport_id = subport_id; 2999 break; 3000 } 3001 } 3002 3003 return count; 3004 } 3005