xref: /dpdk/lib/sched/rte_sched.c (revision c56185fc183fc0532d2f03aaf04bbf0989ea91a5)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <string.h>
7 
8 #include <rte_common.h>
9 #include <rte_log.h>
10 #include <rte_malloc.h>
11 #include <rte_cycles.h>
12 #include <rte_prefetch.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_mbuf.h>
15 #include <rte_bitmap.h>
16 #include <rte_reciprocal.h>
17 
18 #include "rte_sched.h"
19 #include "rte_sched_common.h"
20 #include "rte_approx.h"
21 
22 #ifdef __INTEL_COMPILER
23 #pragma warning(disable:2259) /* conversion may lose significant bits */
24 #endif
25 
26 #ifndef RTE_SCHED_PORT_N_GRINDERS
27 #define RTE_SCHED_PORT_N_GRINDERS 8
28 #endif
29 
30 #define RTE_SCHED_TB_RATE_CONFIG_ERR          (1e-7)
31 #define RTE_SCHED_WRR_SHIFT                   3
32 #define RTE_SCHED_MAX_QUEUES_PER_TC           RTE_SCHED_BE_QUEUES_PER_PIPE
33 #define RTE_SCHED_GRINDER_PCACHE_SIZE         (64 / RTE_SCHED_QUEUES_PER_PIPE)
34 #define RTE_SCHED_PIPE_INVALID                UINT32_MAX
35 #define RTE_SCHED_BMP_POS_INVALID             UINT32_MAX
36 
37 /* Scaling for cycles_per_byte calculation
38  * Chosen so that minimum rate is 480 bit/sec
39  */
40 #define RTE_SCHED_TIME_SHIFT		      8
41 
42 struct rte_sched_pipe_profile {
43 	/* Token bucket (TB) */
44 	uint64_t tb_period;
45 	uint64_t tb_credits_per_period;
46 	uint64_t tb_size;
47 
48 	/* Pipe traffic classes */
49 	uint64_t tc_period;
50 	uint64_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
51 	uint8_t tc_ov_weight;
52 
53 	/* Pipe best-effort traffic class queues */
54 	uint8_t  wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE];
55 };
56 
57 struct rte_sched_pipe {
58 	/* Token bucket (TB) */
59 	uint64_t tb_time; /* time of last update */
60 	uint64_t tb_credits;
61 
62 	/* Pipe profile and flags */
63 	uint32_t profile;
64 
65 	/* Traffic classes (TCs) */
66 	uint64_t tc_time; /* time of next update */
67 	uint64_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
68 
69 	/* Weighted Round Robin (WRR) */
70 	uint8_t wrr_tokens[RTE_SCHED_BE_QUEUES_PER_PIPE];
71 
72 	/* TC oversubscription */
73 	uint64_t tc_ov_credits;
74 	uint8_t tc_ov_period_id;
75 } __rte_cache_aligned;
76 
77 struct rte_sched_queue {
78 	uint16_t qw;
79 	uint16_t qr;
80 };
81 
82 struct rte_sched_queue_extra {
83 	struct rte_sched_queue_stats stats;
84 	union {
85 		struct rte_red red;
86 		struct rte_pie pie;
87 	};
88 };
89 
90 enum grinder_state {
91 	e_GRINDER_PREFETCH_PIPE = 0,
92 	e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS,
93 	e_GRINDER_PREFETCH_MBUF,
94 	e_GRINDER_READ_MBUF
95 };
96 
97 struct rte_sched_subport_profile {
98 	/* Token bucket (TB) */
99 	uint64_t tb_period;
100 	uint64_t tb_credits_per_period;
101 	uint64_t tb_size;
102 
103 	uint64_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
104 	uint64_t tc_period;
105 };
106 
107 struct rte_sched_grinder {
108 	/* Pipe cache */
109 	uint16_t pcache_qmask[RTE_SCHED_GRINDER_PCACHE_SIZE];
110 	uint32_t pcache_qindex[RTE_SCHED_GRINDER_PCACHE_SIZE];
111 	uint32_t pcache_w;
112 	uint32_t pcache_r;
113 
114 	/* Current pipe */
115 	enum grinder_state state;
116 	uint32_t productive;
117 	uint32_t pindex;
118 	struct rte_sched_subport *subport;
119 	struct rte_sched_subport_profile *subport_params;
120 	struct rte_sched_pipe *pipe;
121 	struct rte_sched_pipe_profile *pipe_params;
122 
123 	/* TC cache */
124 	uint8_t tccache_qmask[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
125 	uint32_t tccache_qindex[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
126 	uint32_t tccache_w;
127 	uint32_t tccache_r;
128 
129 	/* Current TC */
130 	uint32_t tc_index;
131 	struct rte_sched_queue *queue[RTE_SCHED_MAX_QUEUES_PER_TC];
132 	struct rte_mbuf **qbase[RTE_SCHED_MAX_QUEUES_PER_TC];
133 	uint32_t qindex[RTE_SCHED_MAX_QUEUES_PER_TC];
134 	uint16_t qsize;
135 	uint32_t qmask;
136 	uint32_t qpos;
137 	struct rte_mbuf *pkt;
138 
139 	/* WRR */
140 	uint16_t wrr_tokens[RTE_SCHED_BE_QUEUES_PER_PIPE];
141 	uint16_t wrr_mask[RTE_SCHED_BE_QUEUES_PER_PIPE];
142 	uint8_t wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE];
143 };
144 
145 struct rte_sched_subport {
146 	/* Token bucket (TB) */
147 	uint64_t tb_time; /* time of last update */
148 	uint64_t tb_credits;
149 
150 	/* Traffic classes (TCs) */
151 	uint64_t tc_time; /* time of next update */
152 	uint64_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
153 
154 	/* TC oversubscription */
155 	uint64_t tc_ov_wm;
156 	uint64_t tc_ov_wm_min;
157 	uint64_t tc_ov_wm_max;
158 	uint8_t tc_ov_period_id;
159 	uint8_t tc_ov;
160 	uint32_t tc_ov_n;
161 	double tc_ov_rate;
162 
163 	/* Statistics */
164 	struct rte_sched_subport_stats stats __rte_cache_aligned;
165 
166 	/* subport profile */
167 	uint32_t profile;
168 	/* Subport pipes */
169 	uint32_t n_pipes_per_subport_enabled;
170 	uint32_t n_pipe_profiles;
171 	uint32_t n_max_pipe_profiles;
172 
173 	/* Pipe best-effort TC rate */
174 	uint64_t pipe_tc_be_rate_max;
175 
176 	/* Pipe queues size */
177 	uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
178 
179 	bool cman_enabled;
180 	enum rte_sched_cman_mode cman;
181 
182 	union {
183 		struct rte_red_config red_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS];
184 		struct rte_pie_config pie_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
185 	};
186 
187 	/* Scheduling loop detection */
188 	uint32_t pipe_loop;
189 	uint32_t pipe_exhaustion;
190 
191 	/* Bitmap */
192 	struct rte_bitmap *bmp;
193 	uint32_t grinder_base_bmp_pos[RTE_SCHED_PORT_N_GRINDERS] __rte_aligned_16;
194 
195 	/* Grinders */
196 	struct rte_sched_grinder grinder[RTE_SCHED_PORT_N_GRINDERS];
197 	uint32_t busy_grinders;
198 
199 	/* Queue base calculation */
200 	uint32_t qsize_add[RTE_SCHED_QUEUES_PER_PIPE];
201 	uint32_t qsize_sum;
202 
203 	/* TC oversubscription activation */
204 	int tc_ov_enabled;
205 
206 	struct rte_sched_pipe *pipe;
207 	struct rte_sched_queue *queue;
208 	struct rte_sched_queue_extra *queue_extra;
209 	struct rte_sched_pipe_profile *pipe_profiles;
210 	uint8_t *bmp_array;
211 	struct rte_mbuf **queue_array;
212 	uint8_t memory[0] __rte_cache_aligned;
213 } __rte_cache_aligned;
214 
215 struct rte_sched_port {
216 	/* User parameters */
217 	uint32_t n_subports_per_port;
218 	uint32_t n_pipes_per_subport;
219 	uint32_t n_pipes_per_subport_log2;
220 	uint16_t pipe_queue[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
221 	uint8_t pipe_tc[RTE_SCHED_QUEUES_PER_PIPE];
222 	uint8_t tc_queue[RTE_SCHED_QUEUES_PER_PIPE];
223 	uint32_t n_subport_profiles;
224 	uint32_t n_max_subport_profiles;
225 	uint64_t rate;
226 	uint32_t mtu;
227 	uint32_t frame_overhead;
228 	int socket;
229 
230 	/* Timing */
231 	uint64_t time_cpu_cycles;     /* Current CPU time measured in CPU cycles */
232 	uint64_t time_cpu_bytes;      /* Current CPU time measured in bytes */
233 	uint64_t time;                /* Current NIC TX time measured in bytes */
234 	struct rte_reciprocal inv_cycles_per_byte; /* CPU cycles per byte */
235 	uint64_t cycles_per_byte;
236 
237 	/* Grinders */
238 	struct rte_mbuf **pkts_out;
239 	uint32_t n_pkts_out;
240 	uint32_t subport_id;
241 
242 	/* Large data structures */
243 	struct rte_sched_subport_profile *subport_profiles;
244 	struct rte_sched_subport *subports[0] __rte_cache_aligned;
245 } __rte_cache_aligned;
246 
247 enum rte_sched_subport_array {
248 	e_RTE_SCHED_SUBPORT_ARRAY_PIPE = 0,
249 	e_RTE_SCHED_SUBPORT_ARRAY_QUEUE,
250 	e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA,
251 	e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES,
252 	e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY,
253 	e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY,
254 	e_RTE_SCHED_SUBPORT_ARRAY_TOTAL,
255 };
256 
257 static inline uint32_t
258 rte_sched_subport_pipe_queues(struct rte_sched_subport *subport)
259 {
260 	return RTE_SCHED_QUEUES_PER_PIPE * subport->n_pipes_per_subport_enabled;
261 }
262 
263 static inline struct rte_mbuf **
264 rte_sched_subport_pipe_qbase(struct rte_sched_subport *subport, uint32_t qindex)
265 {
266 	uint32_t pindex = qindex >> 4;
267 	uint32_t qpos = qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1);
268 
269 	return (subport->queue_array + pindex *
270 		subport->qsize_sum + subport->qsize_add[qpos]);
271 }
272 
273 static inline uint16_t
274 rte_sched_subport_pipe_qsize(struct rte_sched_port *port,
275 struct rte_sched_subport *subport, uint32_t qindex)
276 {
277 	uint32_t tc = port->pipe_tc[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)];
278 
279 	return subport->qsize[tc];
280 }
281 
282 static inline uint32_t
283 rte_sched_port_queues_per_port(struct rte_sched_port *port)
284 {
285 	uint32_t n_queues = 0, i;
286 
287 	for (i = 0; i < port->n_subports_per_port; i++)
288 		n_queues += rte_sched_subport_pipe_queues(port->subports[i]);
289 
290 	return n_queues;
291 }
292 
293 static inline uint16_t
294 rte_sched_port_pipe_queue(struct rte_sched_port *port, uint32_t traffic_class)
295 {
296 	uint16_t pipe_queue = port->pipe_queue[traffic_class];
297 
298 	return pipe_queue;
299 }
300 
301 static inline uint8_t
302 rte_sched_port_pipe_tc(struct rte_sched_port *port, uint32_t qindex)
303 {
304 	uint8_t pipe_tc = port->pipe_tc[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)];
305 
306 	return pipe_tc;
307 }
308 
309 static inline uint8_t
310 rte_sched_port_tc_queue(struct rte_sched_port *port, uint32_t qindex)
311 {
312 	uint8_t tc_queue = port->tc_queue[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)];
313 
314 	return tc_queue;
315 }
316 
317 static int
318 pipe_profile_check(struct rte_sched_pipe_params *params,
319 	uint64_t rate, uint16_t *qsize)
320 {
321 	uint32_t i;
322 
323 	/* Pipe parameters */
324 	if (params == NULL) {
325 		RTE_LOG(ERR, SCHED,
326 			"%s: Incorrect value for parameter params\n", __func__);
327 		return -EINVAL;
328 	}
329 
330 	/* TB rate: non-zero, not greater than port rate */
331 	if (params->tb_rate == 0 ||
332 		params->tb_rate > rate) {
333 		RTE_LOG(ERR, SCHED,
334 			"%s: Incorrect value for tb rate\n", __func__);
335 		return -EINVAL;
336 	}
337 
338 	/* TB size: non-zero */
339 	if (params->tb_size == 0) {
340 		RTE_LOG(ERR, SCHED,
341 			"%s: Incorrect value for tb size\n", __func__);
342 		return -EINVAL;
343 	}
344 
345 	/* TC rate: non-zero if qsize non-zero, less than pipe rate */
346 	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
347 		if ((qsize[i] == 0 && params->tc_rate[i] != 0) ||
348 			(qsize[i] != 0 && (params->tc_rate[i] == 0 ||
349 			params->tc_rate[i] > params->tb_rate))) {
350 			RTE_LOG(ERR, SCHED,
351 				"%s: Incorrect value for qsize or tc_rate\n", __func__);
352 			return -EINVAL;
353 		}
354 	}
355 
356 	if (params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE] == 0 ||
357 		qsize[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) {
358 		RTE_LOG(ERR, SCHED,
359 			"%s: Incorrect value for be traffic class rate\n", __func__);
360 		return -EINVAL;
361 	}
362 
363 	/* TC period: non-zero */
364 	if (params->tc_period == 0) {
365 		RTE_LOG(ERR, SCHED,
366 			"%s: Incorrect value for tc period\n", __func__);
367 		return -EINVAL;
368 	}
369 
370 	/*  Best effort tc oversubscription weight: non-zero */
371 	if (params->tc_ov_weight == 0) {
372 		RTE_LOG(ERR, SCHED,
373 			"%s: Incorrect value for tc ov weight\n", __func__);
374 		return -EINVAL;
375 	}
376 
377 	/* Queue WRR weights: non-zero */
378 	for (i = 0; i < RTE_SCHED_BE_QUEUES_PER_PIPE; i++) {
379 		if (params->wrr_weights[i] == 0) {
380 			RTE_LOG(ERR, SCHED,
381 				"%s: Incorrect value for wrr weight\n", __func__);
382 			return -EINVAL;
383 		}
384 	}
385 
386 	return 0;
387 }
388 
389 static int
390 subport_profile_check(struct rte_sched_subport_profile_params *params,
391 	uint64_t rate)
392 {
393 	uint32_t i;
394 
395 	/* Check user parameters */
396 	if (params == NULL) {
397 		RTE_LOG(ERR, SCHED, "%s: "
398 		"Incorrect value for parameter params\n", __func__);
399 		return -EINVAL;
400 	}
401 
402 	if (params->tb_rate == 0 || params->tb_rate > rate) {
403 		RTE_LOG(ERR, SCHED, "%s: "
404 		"Incorrect value for tb rate\n", __func__);
405 		return -EINVAL;
406 	}
407 
408 	if (params->tb_size == 0) {
409 		RTE_LOG(ERR, SCHED, "%s: "
410 		"Incorrect value for tb size\n", __func__);
411 		return -EINVAL;
412 	}
413 
414 	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
415 		uint64_t tc_rate = params->tc_rate[i];
416 
417 		if (tc_rate == 0 || (tc_rate > params->tb_rate)) {
418 			RTE_LOG(ERR, SCHED, "%s: "
419 			"Incorrect value for tc rate\n", __func__);
420 			return -EINVAL;
421 		}
422 	}
423 
424 	if (params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) {
425 		RTE_LOG(ERR, SCHED, "%s: "
426 		"Incorrect tc rate(best effort)\n", __func__);
427 		return -EINVAL;
428 	}
429 
430 	if (params->tc_period == 0) {
431 		RTE_LOG(ERR, SCHED, "%s: "
432 		"Incorrect value for tc period\n", __func__);
433 		return -EINVAL;
434 	}
435 
436 	return 0;
437 }
438 
439 static int
440 rte_sched_port_check_params(struct rte_sched_port_params *params)
441 {
442 	uint32_t i;
443 
444 	if (params == NULL) {
445 		RTE_LOG(ERR, SCHED,
446 			"%s: Incorrect value for parameter params\n", __func__);
447 		return -EINVAL;
448 	}
449 
450 	/* socket */
451 	if (params->socket < 0) {
452 		RTE_LOG(ERR, SCHED,
453 			"%s: Incorrect value for socket id\n", __func__);
454 		return -EINVAL;
455 	}
456 
457 	/* rate */
458 	if (params->rate == 0) {
459 		RTE_LOG(ERR, SCHED,
460 			"%s: Incorrect value for rate\n", __func__);
461 		return -EINVAL;
462 	}
463 
464 	/* mtu */
465 	if (params->mtu == 0) {
466 		RTE_LOG(ERR, SCHED,
467 			"%s: Incorrect value for mtu\n", __func__);
468 		return -EINVAL;
469 	}
470 
471 	/* n_subports_per_port: non-zero, limited to 16 bits, power of 2 */
472 	if (params->n_subports_per_port == 0 ||
473 	    params->n_subports_per_port > 1u << 16 ||
474 	    !rte_is_power_of_2(params->n_subports_per_port)) {
475 		RTE_LOG(ERR, SCHED,
476 			"%s: Incorrect value for number of subports\n", __func__);
477 		return -EINVAL;
478 	}
479 
480 	if (params->subport_profiles == NULL ||
481 		params->n_subport_profiles == 0 ||
482 		params->n_max_subport_profiles == 0 ||
483 		params->n_subport_profiles > params->n_max_subport_profiles) {
484 		RTE_LOG(ERR, SCHED,
485 		"%s: Incorrect value for subport profiles\n", __func__);
486 		return -EINVAL;
487 	}
488 
489 	for (i = 0; i < params->n_subport_profiles; i++) {
490 		struct rte_sched_subport_profile_params *p =
491 						params->subport_profiles + i;
492 		int status;
493 
494 		status = subport_profile_check(p, params->rate);
495 		if (status != 0) {
496 			RTE_LOG(ERR, SCHED,
497 			"%s: subport profile check failed(%d)\n",
498 			__func__, status);
499 			return -EINVAL;
500 		}
501 	}
502 
503 	/* n_pipes_per_subport: non-zero, power of 2 */
504 	if (params->n_pipes_per_subport == 0 ||
505 	    !rte_is_power_of_2(params->n_pipes_per_subport)) {
506 		RTE_LOG(ERR, SCHED,
507 			"%s: Incorrect value for maximum pipes number\n", __func__);
508 		return -EINVAL;
509 	}
510 
511 	return 0;
512 }
513 
514 static uint32_t
515 rte_sched_subport_get_array_base(struct rte_sched_subport_params *params,
516 	enum rte_sched_subport_array array)
517 {
518 	uint32_t n_pipes_per_subport = params->n_pipes_per_subport_enabled;
519 	uint32_t n_subport_pipe_queues =
520 		RTE_SCHED_QUEUES_PER_PIPE * n_pipes_per_subport;
521 
522 	uint32_t size_pipe = n_pipes_per_subport * sizeof(struct rte_sched_pipe);
523 	uint32_t size_queue =
524 		n_subport_pipe_queues * sizeof(struct rte_sched_queue);
525 	uint32_t size_queue_extra
526 		= n_subport_pipe_queues * sizeof(struct rte_sched_queue_extra);
527 	uint32_t size_pipe_profiles = params->n_max_pipe_profiles *
528 		sizeof(struct rte_sched_pipe_profile);
529 	uint32_t size_bmp_array =
530 		rte_bitmap_get_memory_footprint(n_subport_pipe_queues);
531 	uint32_t size_per_pipe_queue_array, size_queue_array;
532 
533 	uint32_t base, i;
534 
535 	size_per_pipe_queue_array = 0;
536 	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
537 		if (i < RTE_SCHED_TRAFFIC_CLASS_BE)
538 			size_per_pipe_queue_array +=
539 				params->qsize[i] * sizeof(struct rte_mbuf *);
540 		else
541 			size_per_pipe_queue_array += RTE_SCHED_MAX_QUEUES_PER_TC *
542 				params->qsize[i] * sizeof(struct rte_mbuf *);
543 	}
544 	size_queue_array = n_pipes_per_subport * size_per_pipe_queue_array;
545 
546 	base = 0;
547 
548 	if (array == e_RTE_SCHED_SUBPORT_ARRAY_PIPE)
549 		return base;
550 	base += RTE_CACHE_LINE_ROUNDUP(size_pipe);
551 
552 	if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE)
553 		return base;
554 	base += RTE_CACHE_LINE_ROUNDUP(size_queue);
555 
556 	if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA)
557 		return base;
558 	base += RTE_CACHE_LINE_ROUNDUP(size_queue_extra);
559 
560 	if (array == e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES)
561 		return base;
562 	base += RTE_CACHE_LINE_ROUNDUP(size_pipe_profiles);
563 
564 	if (array == e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY)
565 		return base;
566 	base += RTE_CACHE_LINE_ROUNDUP(size_bmp_array);
567 
568 	if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY)
569 		return base;
570 	base += RTE_CACHE_LINE_ROUNDUP(size_queue_array);
571 
572 	return base;
573 }
574 
575 static void
576 rte_sched_subport_config_qsize(struct rte_sched_subport *subport)
577 {
578 	uint32_t i;
579 
580 	subport->qsize_add[0] = 0;
581 
582 	/* Strict priority traffic class */
583 	for (i = 1; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
584 		subport->qsize_add[i] = subport->qsize_add[i-1] + subport->qsize[i-1];
585 
586 	/* Best-effort traffic class */
587 	subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 1] =
588 		subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE] +
589 		subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
590 	subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 2] =
591 		subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 1] +
592 		subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
593 	subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 3] =
594 		subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 2] +
595 		subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
596 
597 	subport->qsize_sum = subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 3] +
598 		subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
599 }
600 
601 static void
602 rte_sched_port_log_pipe_profile(struct rte_sched_subport *subport, uint32_t i)
603 {
604 	struct rte_sched_pipe_profile *p = subport->pipe_profiles + i;
605 
606 	RTE_LOG(DEBUG, SCHED, "Low level config for pipe profile %u:\n"
607 		"	Token bucket: period = %"PRIu64", credits per period = %"PRIu64", size = %"PRIu64"\n"
608 		"	Traffic classes: period = %"PRIu64",\n"
609 		"	credits per period = [%"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
610 		", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
611 		", %"PRIu64", %"PRIu64", %"PRIu64"]\n"
612 		"	Best-effort traffic class oversubscription: weight = %hhu\n"
613 		"	WRR cost: [%hhu, %hhu, %hhu, %hhu]\n",
614 		i,
615 
616 		/* Token bucket */
617 		p->tb_period,
618 		p->tb_credits_per_period,
619 		p->tb_size,
620 
621 		/* Traffic classes */
622 		p->tc_period,
623 		p->tc_credits_per_period[0],
624 		p->tc_credits_per_period[1],
625 		p->tc_credits_per_period[2],
626 		p->tc_credits_per_period[3],
627 		p->tc_credits_per_period[4],
628 		p->tc_credits_per_period[5],
629 		p->tc_credits_per_period[6],
630 		p->tc_credits_per_period[7],
631 		p->tc_credits_per_period[8],
632 		p->tc_credits_per_period[9],
633 		p->tc_credits_per_period[10],
634 		p->tc_credits_per_period[11],
635 		p->tc_credits_per_period[12],
636 
637 		/* Best-effort traffic class oversubscription */
638 		p->tc_ov_weight,
639 
640 		/* WRR */
641 		p->wrr_cost[0], p->wrr_cost[1], p->wrr_cost[2], p->wrr_cost[3]);
642 }
643 
644 static void
645 rte_sched_port_log_subport_profile(struct rte_sched_port *port, uint32_t i)
646 {
647 	struct rte_sched_subport_profile *p = port->subport_profiles + i;
648 
649 	RTE_LOG(DEBUG, SCHED, "Low level config for subport profile %u:\n"
650 	"Token bucket: period = %"PRIu64", credits per period = %"PRIu64","
651 	"size = %"PRIu64"\n"
652 	"Traffic classes: period = %"PRIu64",\n"
653 	"credits per period = [%"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
654 	" %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
655 	" %"PRIu64", %"PRIu64", %"PRIu64"]\n",
656 	i,
657 
658 	/* Token bucket */
659 	p->tb_period,
660 	p->tb_credits_per_period,
661 	p->tb_size,
662 
663 	/* Traffic classes */
664 	p->tc_period,
665 	p->tc_credits_per_period[0],
666 	p->tc_credits_per_period[1],
667 	p->tc_credits_per_period[2],
668 	p->tc_credits_per_period[3],
669 	p->tc_credits_per_period[4],
670 	p->tc_credits_per_period[5],
671 	p->tc_credits_per_period[6],
672 	p->tc_credits_per_period[7],
673 	p->tc_credits_per_period[8],
674 	p->tc_credits_per_period[9],
675 	p->tc_credits_per_period[10],
676 	p->tc_credits_per_period[11],
677 	p->tc_credits_per_period[12]);
678 }
679 
680 static inline uint64_t
681 rte_sched_time_ms_to_bytes(uint64_t time_ms, uint64_t rate)
682 {
683 	uint64_t time = time_ms;
684 
685 	time = (time * rate) / 1000;
686 
687 	return time;
688 }
689 
690 static void
691 rte_sched_pipe_profile_convert(struct rte_sched_subport *subport,
692 	struct rte_sched_pipe_params *src,
693 	struct rte_sched_pipe_profile *dst,
694 	uint64_t rate)
695 {
696 	uint32_t wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE];
697 	uint32_t lcd1, lcd2, lcd;
698 	uint32_t i;
699 
700 	/* Token Bucket */
701 	if (src->tb_rate == rate) {
702 		dst->tb_credits_per_period = 1;
703 		dst->tb_period = 1;
704 	} else {
705 		double tb_rate = (double) src->tb_rate
706 				/ (double) rate;
707 		double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
708 
709 		rte_approx_64(tb_rate, d, &dst->tb_credits_per_period,
710 			&dst->tb_period);
711 	}
712 
713 	dst->tb_size = src->tb_size;
714 
715 	/* Traffic Classes */
716 	dst->tc_period = rte_sched_time_ms_to_bytes(src->tc_period,
717 						rate);
718 
719 	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
720 		if (subport->qsize[i])
721 			dst->tc_credits_per_period[i]
722 				= rte_sched_time_ms_to_bytes(src->tc_period,
723 					src->tc_rate[i]);
724 
725 	dst->tc_ov_weight = src->tc_ov_weight;
726 
727 	/* WRR queues */
728 	wrr_cost[0] = src->wrr_weights[0];
729 	wrr_cost[1] = src->wrr_weights[1];
730 	wrr_cost[2] = src->wrr_weights[2];
731 	wrr_cost[3] = src->wrr_weights[3];
732 
733 	lcd1 = rte_get_lcd(wrr_cost[0], wrr_cost[1]);
734 	lcd2 = rte_get_lcd(wrr_cost[2], wrr_cost[3]);
735 	lcd = rte_get_lcd(lcd1, lcd2);
736 
737 	wrr_cost[0] = lcd / wrr_cost[0];
738 	wrr_cost[1] = lcd / wrr_cost[1];
739 	wrr_cost[2] = lcd / wrr_cost[2];
740 	wrr_cost[3] = lcd / wrr_cost[3];
741 
742 	dst->wrr_cost[0] = (uint8_t) wrr_cost[0];
743 	dst->wrr_cost[1] = (uint8_t) wrr_cost[1];
744 	dst->wrr_cost[2] = (uint8_t) wrr_cost[2];
745 	dst->wrr_cost[3] = (uint8_t) wrr_cost[3];
746 }
747 
748 static void
749 rte_sched_subport_profile_convert(struct rte_sched_subport_profile_params *src,
750 	struct rte_sched_subport_profile *dst,
751 	uint64_t rate)
752 {
753 	uint32_t i;
754 
755 	/* Token Bucket */
756 	if (src->tb_rate == rate) {
757 		dst->tb_credits_per_period = 1;
758 		dst->tb_period = 1;
759 	} else {
760 		double tb_rate = (double) src->tb_rate
761 				/ (double) rate;
762 		double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
763 
764 		rte_approx_64(tb_rate, d, &dst->tb_credits_per_period,
765 			&dst->tb_period);
766 	}
767 
768 	dst->tb_size = src->tb_size;
769 
770 	/* Traffic Classes */
771 	dst->tc_period = rte_sched_time_ms_to_bytes(src->tc_period, rate);
772 
773 	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
774 		dst->tc_credits_per_period[i]
775 			= rte_sched_time_ms_to_bytes(src->tc_period,
776 				src->tc_rate[i]);
777 }
778 
779 static void
780 rte_sched_subport_config_pipe_profile_table(struct rte_sched_subport *subport,
781 	struct rte_sched_subport_params *params, uint64_t rate)
782 {
783 	uint32_t i;
784 
785 	for (i = 0; i < subport->n_pipe_profiles; i++) {
786 		struct rte_sched_pipe_params *src = params->pipe_profiles + i;
787 		struct rte_sched_pipe_profile *dst = subport->pipe_profiles + i;
788 
789 		rte_sched_pipe_profile_convert(subport, src, dst, rate);
790 		rte_sched_port_log_pipe_profile(subport, i);
791 	}
792 
793 	subport->pipe_tc_be_rate_max = 0;
794 	for (i = 0; i < subport->n_pipe_profiles; i++) {
795 		struct rte_sched_pipe_params *src = params->pipe_profiles + i;
796 		uint64_t pipe_tc_be_rate = src->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE];
797 
798 		if (subport->pipe_tc_be_rate_max < pipe_tc_be_rate)
799 			subport->pipe_tc_be_rate_max = pipe_tc_be_rate;
800 	}
801 }
802 
803 static void
804 rte_sched_port_config_subport_profile_table(struct rte_sched_port *port,
805 	struct rte_sched_port_params *params,
806 	uint64_t rate)
807 {
808 	uint32_t i;
809 
810 	for (i = 0; i < port->n_subport_profiles; i++) {
811 		struct rte_sched_subport_profile_params *src
812 				= params->subport_profiles + i;
813 		struct rte_sched_subport_profile *dst
814 				= port->subport_profiles + i;
815 
816 		rte_sched_subport_profile_convert(src, dst, rate);
817 		rte_sched_port_log_subport_profile(port, i);
818 	}
819 }
820 
821 static int
822 rte_sched_subport_check_params(struct rte_sched_subport_params *params,
823 	uint32_t n_max_pipes_per_subport,
824 	uint64_t rate)
825 {
826 	uint32_t i;
827 
828 	/* Check user parameters */
829 	if (params == NULL) {
830 		RTE_LOG(ERR, SCHED,
831 			"%s: Incorrect value for parameter params\n", __func__);
832 		return -EINVAL;
833 	}
834 
835 	/* qsize: if non-zero, power of 2,
836 	 * no bigger than 32K (due to 16-bit read/write pointers)
837 	 */
838 	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
839 		uint16_t qsize = params->qsize[i];
840 
841 		if (qsize != 0 && !rte_is_power_of_2(qsize)) {
842 			RTE_LOG(ERR, SCHED,
843 				"%s: Incorrect value for qsize\n", __func__);
844 			return -EINVAL;
845 		}
846 	}
847 
848 	if (params->qsize[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) {
849 		RTE_LOG(ERR, SCHED, "%s: Incorrect qsize\n", __func__);
850 		return -EINVAL;
851 	}
852 
853 	/* n_pipes_per_subport: non-zero, power of 2 */
854 	if (params->n_pipes_per_subport_enabled == 0 ||
855 		params->n_pipes_per_subport_enabled > n_max_pipes_per_subport ||
856 	    !rte_is_power_of_2(params->n_pipes_per_subport_enabled)) {
857 		RTE_LOG(ERR, SCHED,
858 			"%s: Incorrect value for pipes number\n", __func__);
859 		return -EINVAL;
860 	}
861 
862 	/* pipe_profiles and n_pipe_profiles */
863 	if (params->pipe_profiles == NULL ||
864 	    params->n_pipe_profiles == 0 ||
865 		params->n_max_pipe_profiles == 0 ||
866 		params->n_pipe_profiles > params->n_max_pipe_profiles) {
867 		RTE_LOG(ERR, SCHED,
868 			"%s: Incorrect value for pipe profiles\n", __func__);
869 		return -EINVAL;
870 	}
871 
872 	for (i = 0; i < params->n_pipe_profiles; i++) {
873 		struct rte_sched_pipe_params *p = params->pipe_profiles + i;
874 		int status;
875 
876 		status = pipe_profile_check(p, rate, &params->qsize[0]);
877 		if (status != 0) {
878 			RTE_LOG(ERR, SCHED,
879 				"%s: Pipe profile check failed(%d)\n", __func__, status);
880 			return -EINVAL;
881 		}
882 	}
883 
884 	return 0;
885 }
886 
887 uint32_t
888 rte_sched_port_get_memory_footprint(struct rte_sched_port_params *port_params,
889 	struct rte_sched_subport_params **subport_params)
890 {
891 	uint32_t size0 = 0, size1 = 0, i;
892 	int status;
893 
894 	status = rte_sched_port_check_params(port_params);
895 	if (status != 0) {
896 		RTE_LOG(ERR, SCHED,
897 			"%s: Port scheduler port params check failed (%d)\n",
898 			__func__, status);
899 
900 		return 0;
901 	}
902 
903 	for (i = 0; i < port_params->n_subports_per_port; i++) {
904 		struct rte_sched_subport_params *sp = subport_params[i];
905 
906 		status = rte_sched_subport_check_params(sp,
907 				port_params->n_pipes_per_subport,
908 				port_params->rate);
909 		if (status != 0) {
910 			RTE_LOG(ERR, SCHED,
911 				"%s: Port scheduler subport params check failed (%d)\n",
912 				__func__, status);
913 
914 			return 0;
915 		}
916 	}
917 
918 	size0 = sizeof(struct rte_sched_port);
919 
920 	for (i = 0; i < port_params->n_subports_per_port; i++) {
921 		struct rte_sched_subport_params *sp = subport_params[i];
922 
923 		size1 += rte_sched_subport_get_array_base(sp,
924 					e_RTE_SCHED_SUBPORT_ARRAY_TOTAL);
925 	}
926 
927 	return size0 + size1;
928 }
929 
930 struct rte_sched_port *
931 rte_sched_port_config(struct rte_sched_port_params *params)
932 {
933 	struct rte_sched_port *port = NULL;
934 	uint32_t size0, size1, size2;
935 	uint32_t cycles_per_byte;
936 	uint32_t i, j;
937 	int status;
938 
939 	status = rte_sched_port_check_params(params);
940 	if (status != 0) {
941 		RTE_LOG(ERR, SCHED,
942 			"%s: Port scheduler params check failed (%d)\n",
943 			__func__, status);
944 		return NULL;
945 	}
946 
947 	size0 = sizeof(struct rte_sched_port);
948 	size1 = params->n_subports_per_port * sizeof(struct rte_sched_subport *);
949 	size2 = params->n_max_subport_profiles *
950 		sizeof(struct rte_sched_subport_profile);
951 
952 	/* Allocate memory to store the data structures */
953 	port = rte_zmalloc_socket("qos_params", size0 + size1,
954 				 RTE_CACHE_LINE_SIZE, params->socket);
955 	if (port == NULL) {
956 		RTE_LOG(ERR, SCHED, "%s: Memory allocation fails\n", __func__);
957 
958 		return NULL;
959 	}
960 
961 	/* Allocate memory to store the subport profile */
962 	port->subport_profiles  = rte_zmalloc_socket("subport_profile", size2,
963 					RTE_CACHE_LINE_SIZE, params->socket);
964 	if (port->subport_profiles == NULL) {
965 		RTE_LOG(ERR, SCHED, "%s: Memory allocation fails\n", __func__);
966 		rte_free(port);
967 		return NULL;
968 	}
969 
970 	/* User parameters */
971 	port->n_subports_per_port = params->n_subports_per_port;
972 	port->n_subport_profiles = params->n_subport_profiles;
973 	port->n_max_subport_profiles = params->n_max_subport_profiles;
974 	port->n_pipes_per_subport = params->n_pipes_per_subport;
975 	port->n_pipes_per_subport_log2 =
976 			rte_ctz32(params->n_pipes_per_subport);
977 	port->socket = params->socket;
978 
979 	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
980 		port->pipe_queue[i] = i;
981 
982 	for (i = 0, j = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
983 		port->pipe_tc[i] = j;
984 
985 		if (j < RTE_SCHED_TRAFFIC_CLASS_BE)
986 			j++;
987 	}
988 
989 	for (i = 0, j = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
990 		port->tc_queue[i] = j;
991 
992 		if (i >= RTE_SCHED_TRAFFIC_CLASS_BE)
993 			j++;
994 	}
995 	port->rate = params->rate;
996 	port->mtu = params->mtu + params->frame_overhead;
997 	port->frame_overhead = params->frame_overhead;
998 
999 	/* Timing */
1000 	port->time_cpu_cycles = rte_get_tsc_cycles();
1001 	port->time_cpu_bytes = 0;
1002 	port->time = 0;
1003 
1004 	/* Subport profile table */
1005 	rte_sched_port_config_subport_profile_table(port, params, port->rate);
1006 
1007 	cycles_per_byte = (rte_get_tsc_hz() << RTE_SCHED_TIME_SHIFT)
1008 		/ params->rate;
1009 	port->inv_cycles_per_byte = rte_reciprocal_value(cycles_per_byte);
1010 	port->cycles_per_byte = cycles_per_byte;
1011 
1012 	/* Grinders */
1013 	port->pkts_out = NULL;
1014 	port->n_pkts_out = 0;
1015 	port->subport_id = 0;
1016 
1017 	return port;
1018 }
1019 
1020 static inline void
1021 rte_sched_subport_free(struct rte_sched_port *port,
1022 	struct rte_sched_subport *subport)
1023 {
1024 	uint32_t n_subport_pipe_queues;
1025 	uint32_t qindex;
1026 
1027 	if (subport == NULL)
1028 		return;
1029 
1030 	n_subport_pipe_queues = rte_sched_subport_pipe_queues(subport);
1031 
1032 	/* Free enqueued mbufs */
1033 	for (qindex = 0; qindex < n_subport_pipe_queues; qindex++) {
1034 		struct rte_mbuf **mbufs =
1035 			rte_sched_subport_pipe_qbase(subport, qindex);
1036 		uint16_t qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
1037 		if (qsize != 0) {
1038 			struct rte_sched_queue *queue = subport->queue + qindex;
1039 			uint16_t qr = queue->qr & (qsize - 1);
1040 			uint16_t qw = queue->qw & (qsize - 1);
1041 
1042 			for (; qr != qw; qr = (qr + 1) & (qsize - 1))
1043 				rte_pktmbuf_free(mbufs[qr]);
1044 		}
1045 	}
1046 
1047 	rte_free(subport);
1048 }
1049 
1050 void
1051 rte_sched_port_free(struct rte_sched_port *port)
1052 {
1053 	uint32_t i;
1054 
1055 	/* Check user parameters */
1056 	if (port == NULL)
1057 		return;
1058 
1059 	for (i = 0; i < port->n_subports_per_port; i++)
1060 		rte_sched_subport_free(port, port->subports[i]);
1061 
1062 	rte_free(port->subport_profiles);
1063 	rte_free(port);
1064 }
1065 
1066 static void
1067 rte_sched_free_memory(struct rte_sched_port *port, uint32_t n_subports)
1068 {
1069 	uint32_t i;
1070 
1071 	for (i = 0; i < n_subports; i++) {
1072 		struct rte_sched_subport *subport = port->subports[i];
1073 
1074 		rte_sched_subport_free(port, subport);
1075 	}
1076 
1077 	rte_free(port->subport_profiles);
1078 	rte_free(port);
1079 }
1080 
1081 static int
1082 rte_sched_red_config(struct rte_sched_port *port,
1083 	struct rte_sched_subport *s,
1084 	struct rte_sched_subport_params *params,
1085 	uint32_t n_subports)
1086 {
1087 	uint32_t i;
1088 
1089 	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
1090 
1091 		uint32_t j;
1092 
1093 		for (j = 0; j < RTE_COLORS; j++) {
1094 			/* if min/max are both zero, then RED is disabled */
1095 			if ((params->cman_params->red_params[i][j].min_th |
1096 				 params->cman_params->red_params[i][j].max_th) == 0) {
1097 				continue;
1098 			}
1099 
1100 			if (rte_red_config_init(&s->red_config[i][j],
1101 				params->cman_params->red_params[i][j].wq_log2,
1102 				params->cman_params->red_params[i][j].min_th,
1103 				params->cman_params->red_params[i][j].max_th,
1104 				params->cman_params->red_params[i][j].maxp_inv) != 0) {
1105 				rte_sched_free_memory(port, n_subports);
1106 
1107 				RTE_LOG(NOTICE, SCHED,
1108 				"%s: RED configuration init fails\n", __func__);
1109 				return -EINVAL;
1110 			}
1111 		}
1112 	}
1113 	s->cman = RTE_SCHED_CMAN_RED;
1114 	return 0;
1115 }
1116 
1117 static int
1118 rte_sched_pie_config(struct rte_sched_port *port,
1119 	struct rte_sched_subport *s,
1120 	struct rte_sched_subport_params *params,
1121 	uint32_t n_subports)
1122 {
1123 	uint32_t i;
1124 
1125 	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
1126 		if (params->cman_params->pie_params[i].tailq_th > params->qsize[i]) {
1127 			RTE_LOG(NOTICE, SCHED,
1128 			"%s: PIE tailq threshold incorrect\n", __func__);
1129 			return -EINVAL;
1130 		}
1131 
1132 		if (rte_pie_config_init(&s->pie_config[i],
1133 			params->cman_params->pie_params[i].qdelay_ref,
1134 			params->cman_params->pie_params[i].dp_update_interval,
1135 			params->cman_params->pie_params[i].max_burst,
1136 			params->cman_params->pie_params[i].tailq_th) != 0) {
1137 			rte_sched_free_memory(port, n_subports);
1138 
1139 			RTE_LOG(NOTICE, SCHED,
1140 			"%s: PIE configuration init fails\n", __func__);
1141 			return -EINVAL;
1142 			}
1143 	}
1144 	s->cman = RTE_SCHED_CMAN_PIE;
1145 	return 0;
1146 }
1147 
1148 static int
1149 rte_sched_cman_config(struct rte_sched_port *port,
1150 	struct rte_sched_subport *s,
1151 	struct rte_sched_subport_params *params,
1152 	uint32_t n_subports)
1153 {
1154 	if (params->cman_params->cman_mode == RTE_SCHED_CMAN_RED)
1155 		return rte_sched_red_config(port, s, params, n_subports);
1156 
1157 	else if (params->cman_params->cman_mode == RTE_SCHED_CMAN_PIE)
1158 		return rte_sched_pie_config(port, s, params, n_subports);
1159 
1160 	return -EINVAL;
1161 }
1162 
1163 int
1164 rte_sched_subport_tc_ov_config(struct rte_sched_port *port,
1165 	uint32_t subport_id,
1166 	bool tc_ov_enable)
1167 {
1168 	struct rte_sched_subport *s;
1169 
1170 	if (port == NULL) {
1171 		RTE_LOG(ERR, SCHED,
1172 			"%s: Incorrect value for parameter port\n", __func__);
1173 		return -EINVAL;
1174 	}
1175 
1176 	if (subport_id >= port->n_subports_per_port) {
1177 		RTE_LOG(ERR, SCHED,
1178 			"%s: Incorrect value for parameter subport id\n", __func__);
1179 		return  -EINVAL;
1180 	}
1181 
1182 	s = port->subports[subport_id];
1183 	s->tc_ov_enabled = tc_ov_enable ? 1 : 0;
1184 
1185 	return 0;
1186 }
1187 
1188 int
1189 rte_sched_subport_config(struct rte_sched_port *port,
1190 	uint32_t subport_id,
1191 	struct rte_sched_subport_params *params,
1192 	uint32_t subport_profile_id)
1193 {
1194 	struct rte_sched_subport *s = NULL;
1195 	uint32_t n_subports = subport_id;
1196 	struct rte_sched_subport_profile *profile;
1197 	uint32_t n_subport_pipe_queues, i;
1198 	uint32_t size0, size1, bmp_mem_size;
1199 	int status;
1200 	int ret;
1201 
1202 	/* Check user parameters */
1203 	if (port == NULL) {
1204 		RTE_LOG(ERR, SCHED,
1205 			"%s: Incorrect value for parameter port\n", __func__);
1206 		return 0;
1207 	}
1208 
1209 	if (subport_id >= port->n_subports_per_port) {
1210 		RTE_LOG(ERR, SCHED,
1211 			"%s: Incorrect value for subport id\n", __func__);
1212 		ret = -EINVAL;
1213 		goto out;
1214 	}
1215 
1216 	if (subport_profile_id >= port->n_max_subport_profiles) {
1217 		RTE_LOG(ERR, SCHED, "%s: "
1218 			"Number of subport profile exceeds the max limit\n",
1219 			__func__);
1220 		ret = -EINVAL;
1221 		goto out;
1222 	}
1223 
1224 	/** Memory is allocated only on first invocation of the api for a
1225 	 * given subport. Subsequent invocation on same subport will just
1226 	 * update subport bandwidth parameter.
1227 	 */
1228 	if (port->subports[subport_id] == NULL) {
1229 
1230 		status = rte_sched_subport_check_params(params,
1231 			port->n_pipes_per_subport,
1232 			port->rate);
1233 		if (status != 0) {
1234 			RTE_LOG(NOTICE, SCHED,
1235 				"%s: Port scheduler params check failed (%d)\n",
1236 				__func__, status);
1237 			ret = -EINVAL;
1238 			goto out;
1239 		}
1240 
1241 		/* Determine the amount of memory to allocate */
1242 		size0 = sizeof(struct rte_sched_subport);
1243 		size1 = rte_sched_subport_get_array_base(params,
1244 					e_RTE_SCHED_SUBPORT_ARRAY_TOTAL);
1245 
1246 		/* Allocate memory to store the data structures */
1247 		s = rte_zmalloc_socket("subport_params", size0 + size1,
1248 			RTE_CACHE_LINE_SIZE, port->socket);
1249 		if (s == NULL) {
1250 			RTE_LOG(ERR, SCHED,
1251 				"%s: Memory allocation fails\n", __func__);
1252 			ret = -ENOMEM;
1253 			goto out;
1254 		}
1255 
1256 		n_subports++;
1257 
1258 		/* Port */
1259 		port->subports[subport_id] = s;
1260 
1261 		s->tb_time = port->time;
1262 
1263 		/* compile time checks */
1264 		RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS == 0);
1265 		RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS &
1266 			(RTE_SCHED_PORT_N_GRINDERS - 1));
1267 
1268 		/* User parameters */
1269 		s->n_pipes_per_subport_enabled =
1270 				params->n_pipes_per_subport_enabled;
1271 		memcpy(s->qsize, params->qsize, sizeof(params->qsize));
1272 		s->n_pipe_profiles = params->n_pipe_profiles;
1273 		s->n_max_pipe_profiles = params->n_max_pipe_profiles;
1274 
1275 		/* TC oversubscription is enabled by default */
1276 		s->tc_ov_enabled = 1;
1277 
1278 		if (params->cman_params != NULL) {
1279 			s->cman_enabled = true;
1280 			status = rte_sched_cman_config(port, s, params, n_subports);
1281 			if (status) {
1282 				RTE_LOG(NOTICE, SCHED,
1283 					"%s: CMAN configuration fails\n", __func__);
1284 				return status;
1285 			}
1286 		} else {
1287 			s->cman_enabled = false;
1288 		}
1289 
1290 		/* Scheduling loop detection */
1291 		s->pipe_loop = RTE_SCHED_PIPE_INVALID;
1292 		s->pipe_exhaustion = 0;
1293 
1294 		/* Grinders */
1295 		s->busy_grinders = 0;
1296 
1297 		/* Queue base calculation */
1298 		rte_sched_subport_config_qsize(s);
1299 
1300 		/* Large data structures */
1301 		s->pipe = (struct rte_sched_pipe *)
1302 			(s->memory + rte_sched_subport_get_array_base(params,
1303 			e_RTE_SCHED_SUBPORT_ARRAY_PIPE));
1304 		s->queue = (struct rte_sched_queue *)
1305 			(s->memory + rte_sched_subport_get_array_base(params,
1306 			e_RTE_SCHED_SUBPORT_ARRAY_QUEUE));
1307 		s->queue_extra = (struct rte_sched_queue_extra *)
1308 			(s->memory + rte_sched_subport_get_array_base(params,
1309 			e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA));
1310 		s->pipe_profiles = (struct rte_sched_pipe_profile *)
1311 			(s->memory + rte_sched_subport_get_array_base(params,
1312 			e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES));
1313 		s->bmp_array =  s->memory + rte_sched_subport_get_array_base(
1314 				params, e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY);
1315 		s->queue_array = (struct rte_mbuf **)
1316 			(s->memory + rte_sched_subport_get_array_base(params,
1317 			e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY));
1318 
1319 		/* Pipe profile table */
1320 		rte_sched_subport_config_pipe_profile_table(s, params,
1321 							    port->rate);
1322 
1323 		/* Bitmap */
1324 		n_subport_pipe_queues = rte_sched_subport_pipe_queues(s);
1325 		bmp_mem_size = rte_bitmap_get_memory_footprint(
1326 						n_subport_pipe_queues);
1327 		s->bmp = rte_bitmap_init(n_subport_pipe_queues, s->bmp_array,
1328 					bmp_mem_size);
1329 		if (s->bmp == NULL) {
1330 			RTE_LOG(ERR, SCHED,
1331 				"%s: Subport bitmap init error\n", __func__);
1332 			ret = -EINVAL;
1333 			goto out;
1334 		}
1335 
1336 		for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++)
1337 			s->grinder_base_bmp_pos[i] = RTE_SCHED_PIPE_INVALID;
1338 
1339 		/* TC oversubscription */
1340 		s->tc_ov_wm_min = port->mtu;
1341 		s->tc_ov_period_id = 0;
1342 		s->tc_ov = 0;
1343 		s->tc_ov_n = 0;
1344 		s->tc_ov_rate = 0;
1345 	}
1346 
1347 	{
1348 	/* update subport parameters from subport profile table*/
1349 		profile = port->subport_profiles + subport_profile_id;
1350 
1351 		s = port->subports[subport_id];
1352 
1353 		s->tb_credits = profile->tb_size / 2;
1354 
1355 		s->tc_time = port->time + profile->tc_period;
1356 
1357 		for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
1358 			if (s->qsize[i])
1359 				s->tc_credits[i] =
1360 					profile->tc_credits_per_period[i];
1361 			else
1362 				profile->tc_credits_per_period[i] = 0;
1363 
1364 		s->tc_ov_wm_max = rte_sched_time_ms_to_bytes(profile->tc_period,
1365 							s->pipe_tc_be_rate_max);
1366 		s->tc_ov_wm = s->tc_ov_wm_max;
1367 		s->profile = subport_profile_id;
1368 
1369 	}
1370 
1371 	rte_sched_port_log_subport_profile(port, subport_profile_id);
1372 
1373 	return 0;
1374 
1375 out:
1376 	rte_sched_free_memory(port, n_subports);
1377 
1378 	return ret;
1379 }
1380 
1381 int
1382 rte_sched_pipe_config(struct rte_sched_port *port,
1383 	uint32_t subport_id,
1384 	uint32_t pipe_id,
1385 	int32_t pipe_profile)
1386 {
1387 	struct rte_sched_subport *s;
1388 	struct rte_sched_subport_profile *sp;
1389 	struct rte_sched_pipe *p;
1390 	struct rte_sched_pipe_profile *params;
1391 	uint32_t n_subports = subport_id + 1;
1392 	uint32_t deactivate, profile, i;
1393 	int ret;
1394 
1395 	/* Check user parameters */
1396 	profile = (uint32_t) pipe_profile;
1397 	deactivate = (pipe_profile < 0);
1398 
1399 	if (port == NULL) {
1400 		RTE_LOG(ERR, SCHED,
1401 			"%s: Incorrect value for parameter port\n", __func__);
1402 		return -EINVAL;
1403 	}
1404 
1405 	if (subport_id >= port->n_subports_per_port) {
1406 		RTE_LOG(ERR, SCHED,
1407 			"%s: Incorrect value for parameter subport id\n", __func__);
1408 		ret = -EINVAL;
1409 		goto out;
1410 	}
1411 
1412 	s = port->subports[subport_id];
1413 	if (pipe_id >= s->n_pipes_per_subport_enabled) {
1414 		RTE_LOG(ERR, SCHED,
1415 			"%s: Incorrect value for parameter pipe id\n", __func__);
1416 		ret = -EINVAL;
1417 		goto out;
1418 	}
1419 
1420 	if (!deactivate && profile >= s->n_pipe_profiles) {
1421 		RTE_LOG(ERR, SCHED,
1422 			"%s: Incorrect value for parameter pipe profile\n", __func__);
1423 		ret = -EINVAL;
1424 		goto out;
1425 	}
1426 
1427 	sp = port->subport_profiles + s->profile;
1428 	/* Handle the case when pipe already has a valid configuration */
1429 	p = s->pipe + pipe_id;
1430 	if (p->tb_time) {
1431 		params = s->pipe_profiles + p->profile;
1432 
1433 		double subport_tc_be_rate =
1434 		(double)sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1435 			/ (double) sp->tc_period;
1436 		double pipe_tc_be_rate =
1437 			(double) params->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1438 			/ (double) params->tc_period;
1439 		uint32_t tc_be_ov = s->tc_ov;
1440 
1441 		/* Unplug pipe from its subport */
1442 		s->tc_ov_n -= params->tc_ov_weight;
1443 		s->tc_ov_rate -= pipe_tc_be_rate;
1444 		s->tc_ov = s->tc_ov_rate > subport_tc_be_rate;
1445 
1446 		if (s->tc_ov != tc_be_ov) {
1447 			RTE_LOG(DEBUG, SCHED,
1448 				"Subport %u Best-effort TC oversubscription is OFF (%.4lf >= %.4lf)\n",
1449 				subport_id, subport_tc_be_rate, s->tc_ov_rate);
1450 		}
1451 
1452 		/* Reset the pipe */
1453 		memset(p, 0, sizeof(struct rte_sched_pipe));
1454 	}
1455 
1456 	if (deactivate)
1457 		return 0;
1458 
1459 	/* Apply the new pipe configuration */
1460 	p->profile = profile;
1461 	params = s->pipe_profiles + p->profile;
1462 
1463 	/* Token Bucket (TB) */
1464 	p->tb_time = port->time;
1465 	p->tb_credits = params->tb_size / 2;
1466 
1467 	/* Traffic Classes (TCs) */
1468 	p->tc_time = port->time + params->tc_period;
1469 
1470 	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
1471 		if (s->qsize[i])
1472 			p->tc_credits[i] = params->tc_credits_per_period[i];
1473 
1474 	{
1475 		/* Subport best effort tc oversubscription */
1476 		double subport_tc_be_rate =
1477 		(double)sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1478 			/ (double) sp->tc_period;
1479 		double pipe_tc_be_rate =
1480 			(double) params->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1481 			/ (double) params->tc_period;
1482 		uint32_t tc_be_ov = s->tc_ov;
1483 
1484 		s->tc_ov_n += params->tc_ov_weight;
1485 		s->tc_ov_rate += pipe_tc_be_rate;
1486 		s->tc_ov = s->tc_ov_rate > subport_tc_be_rate;
1487 
1488 		if (s->tc_ov != tc_be_ov) {
1489 			RTE_LOG(DEBUG, SCHED,
1490 				"Subport %u Best effort TC oversubscription is ON (%.4lf < %.4lf)\n",
1491 				subport_id, subport_tc_be_rate, s->tc_ov_rate);
1492 		}
1493 		p->tc_ov_period_id = s->tc_ov_period_id;
1494 		p->tc_ov_credits = s->tc_ov_wm;
1495 	}
1496 
1497 	return 0;
1498 
1499 out:
1500 	rte_sched_free_memory(port, n_subports);
1501 
1502 	return ret;
1503 }
1504 
1505 int
1506 rte_sched_subport_pipe_profile_add(struct rte_sched_port *port,
1507 	uint32_t subport_id,
1508 	struct rte_sched_pipe_params *params,
1509 	uint32_t *pipe_profile_id)
1510 {
1511 	struct rte_sched_subport *s;
1512 	struct rte_sched_pipe_profile *pp;
1513 	uint32_t i;
1514 	int status;
1515 
1516 	/* Port */
1517 	if (port == NULL) {
1518 		RTE_LOG(ERR, SCHED,
1519 			"%s: Incorrect value for parameter port\n", __func__);
1520 		return -EINVAL;
1521 	}
1522 
1523 	/* Subport id not exceeds the max limit */
1524 	if (subport_id > port->n_subports_per_port) {
1525 		RTE_LOG(ERR, SCHED,
1526 			"%s: Incorrect value for subport id\n", __func__);
1527 		return -EINVAL;
1528 	}
1529 
1530 	s = port->subports[subport_id];
1531 
1532 	/* Pipe profiles exceeds the max limit */
1533 	if (s->n_pipe_profiles >= s->n_max_pipe_profiles) {
1534 		RTE_LOG(ERR, SCHED,
1535 			"%s: Number of pipe profiles exceeds the max limit\n", __func__);
1536 		return -EINVAL;
1537 	}
1538 
1539 	/* Pipe params */
1540 	status = pipe_profile_check(params, port->rate, &s->qsize[0]);
1541 	if (status != 0) {
1542 		RTE_LOG(ERR, SCHED,
1543 			"%s: Pipe profile check failed(%d)\n", __func__, status);
1544 		return -EINVAL;
1545 	}
1546 
1547 	pp = &s->pipe_profiles[s->n_pipe_profiles];
1548 	rte_sched_pipe_profile_convert(s, params, pp, port->rate);
1549 
1550 	/* Pipe profile should not exists */
1551 	for (i = 0; i < s->n_pipe_profiles; i++)
1552 		if (memcmp(s->pipe_profiles + i, pp, sizeof(*pp)) == 0) {
1553 			RTE_LOG(ERR, SCHED,
1554 				"%s: Pipe profile exists\n", __func__);
1555 			return -EINVAL;
1556 		}
1557 
1558 	/* Pipe profile commit */
1559 	*pipe_profile_id = s->n_pipe_profiles;
1560 	s->n_pipe_profiles++;
1561 
1562 	if (s->pipe_tc_be_rate_max < params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE])
1563 		s->pipe_tc_be_rate_max = params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE];
1564 
1565 	rte_sched_port_log_pipe_profile(s, *pipe_profile_id);
1566 
1567 	return 0;
1568 }
1569 
1570 int
1571 rte_sched_port_subport_profile_add(struct rte_sched_port *port,
1572 	struct rte_sched_subport_profile_params *params,
1573 	uint32_t *subport_profile_id)
1574 {
1575 	int status;
1576 	uint32_t i;
1577 	struct rte_sched_subport_profile *dst;
1578 
1579 	/* Port */
1580 	if (port == NULL) {
1581 		RTE_LOG(ERR, SCHED, "%s: "
1582 		"Incorrect value for parameter port\n", __func__);
1583 		return -EINVAL;
1584 	}
1585 
1586 	if (params == NULL) {
1587 		RTE_LOG(ERR, SCHED, "%s: "
1588 		"Incorrect value for parameter profile\n", __func__);
1589 		return -EINVAL;
1590 	}
1591 
1592 	if (subport_profile_id == NULL) {
1593 		RTE_LOG(ERR, SCHED, "%s: "
1594 		"Incorrect value for parameter subport_profile_id\n",
1595 		__func__);
1596 		return -EINVAL;
1597 	}
1598 
1599 	dst = port->subport_profiles + port->n_subport_profiles;
1600 
1601 	/* Subport profiles exceeds the max limit */
1602 	if (port->n_subport_profiles >= port->n_max_subport_profiles) {
1603 		RTE_LOG(ERR, SCHED, "%s: "
1604 		"Number of subport profiles exceeds the max limit\n",
1605 		 __func__);
1606 		return -EINVAL;
1607 	}
1608 
1609 	status = subport_profile_check(params, port->rate);
1610 	if (status != 0) {
1611 		RTE_LOG(ERR, SCHED,
1612 		"%s: subport profile check failed(%d)\n", __func__, status);
1613 		return -EINVAL;
1614 	}
1615 
1616 	rte_sched_subport_profile_convert(params, dst, port->rate);
1617 
1618 	/* Subport profile should not exists */
1619 	for (i = 0; i < port->n_subport_profiles; i++)
1620 		if (memcmp(port->subport_profiles + i,
1621 		    dst, sizeof(*dst)) == 0) {
1622 			RTE_LOG(ERR, SCHED,
1623 			"%s: subport profile exists\n", __func__);
1624 			return -EINVAL;
1625 		}
1626 
1627 	/* Subport profile commit */
1628 	*subport_profile_id = port->n_subport_profiles;
1629 	port->n_subport_profiles++;
1630 
1631 	rte_sched_port_log_subport_profile(port, *subport_profile_id);
1632 
1633 	return 0;
1634 }
1635 
1636 static inline uint32_t
1637 rte_sched_port_qindex(struct rte_sched_port *port,
1638 	uint32_t subport,
1639 	uint32_t pipe,
1640 	uint32_t traffic_class,
1641 	uint32_t queue)
1642 {
1643 	return ((subport & (port->n_subports_per_port - 1)) <<
1644 		(port->n_pipes_per_subport_log2 + 4)) |
1645 		((pipe &
1646 		(port->subports[subport]->n_pipes_per_subport_enabled - 1)) << 4) |
1647 		((rte_sched_port_pipe_queue(port, traffic_class) + queue) &
1648 		(RTE_SCHED_QUEUES_PER_PIPE - 1));
1649 }
1650 
1651 void
1652 rte_sched_port_pkt_write(struct rte_sched_port *port,
1653 			 struct rte_mbuf *pkt,
1654 			 uint32_t subport, uint32_t pipe,
1655 			 uint32_t traffic_class,
1656 			 uint32_t queue, enum rte_color color)
1657 {
1658 	uint32_t queue_id =
1659 		rte_sched_port_qindex(port, subport, pipe, traffic_class, queue);
1660 
1661 	rte_mbuf_sched_set(pkt, queue_id, traffic_class, (uint8_t)color);
1662 }
1663 
1664 void
1665 rte_sched_port_pkt_read_tree_path(struct rte_sched_port *port,
1666 				  const struct rte_mbuf *pkt,
1667 				  uint32_t *subport, uint32_t *pipe,
1668 				  uint32_t *traffic_class, uint32_t *queue)
1669 {
1670 	uint32_t queue_id = rte_mbuf_sched_queue_get(pkt);
1671 
1672 	*subport = queue_id >> (port->n_pipes_per_subport_log2 + 4);
1673 	*pipe = (queue_id >> 4) &
1674 		(port->subports[*subport]->n_pipes_per_subport_enabled - 1);
1675 	*traffic_class = rte_sched_port_pipe_tc(port, queue_id);
1676 	*queue = rte_sched_port_tc_queue(port, queue_id);
1677 }
1678 
1679 enum rte_color
1680 rte_sched_port_pkt_read_color(const struct rte_mbuf *pkt)
1681 {
1682 	return (enum rte_color)rte_mbuf_sched_color_get(pkt);
1683 }
1684 
1685 int
1686 rte_sched_subport_read_stats(struct rte_sched_port *port,
1687 			     uint32_t subport_id,
1688 			     struct rte_sched_subport_stats *stats,
1689 			     uint32_t *tc_ov)
1690 {
1691 	struct rte_sched_subport *s;
1692 
1693 	/* Check user parameters */
1694 	if (port == NULL) {
1695 		RTE_LOG(ERR, SCHED,
1696 			"%s: Incorrect value for parameter port\n", __func__);
1697 		return -EINVAL;
1698 	}
1699 
1700 	if (subport_id >= port->n_subports_per_port) {
1701 		RTE_LOG(ERR, SCHED,
1702 			"%s: Incorrect value for subport id\n", __func__);
1703 		return -EINVAL;
1704 	}
1705 
1706 	if (stats == NULL) {
1707 		RTE_LOG(ERR, SCHED,
1708 			"%s: Incorrect value for parameter stats\n", __func__);
1709 		return -EINVAL;
1710 	}
1711 
1712 	if (tc_ov == NULL) {
1713 		RTE_LOG(ERR, SCHED,
1714 			"%s: Incorrect value for tc_ov\n", __func__);
1715 		return -EINVAL;
1716 	}
1717 
1718 	s = port->subports[subport_id];
1719 
1720 	/* Copy subport stats and clear */
1721 	memcpy(stats, &s->stats, sizeof(struct rte_sched_subport_stats));
1722 	memset(&s->stats, 0, sizeof(struct rte_sched_subport_stats));
1723 
1724 	/* Subport TC oversubscription status */
1725 	*tc_ov = s->tc_ov;
1726 
1727 	return 0;
1728 }
1729 
1730 int
1731 rte_sched_queue_read_stats(struct rte_sched_port *port,
1732 	uint32_t queue_id,
1733 	struct rte_sched_queue_stats *stats,
1734 	uint16_t *qlen)
1735 {
1736 	struct rte_sched_subport *s;
1737 	struct rte_sched_queue *q;
1738 	struct rte_sched_queue_extra *qe;
1739 	uint32_t subport_id, subport_qmask, subport_qindex;
1740 
1741 	/* Check user parameters */
1742 	if (port == NULL) {
1743 		RTE_LOG(ERR, SCHED,
1744 			"%s: Incorrect value for parameter port\n", __func__);
1745 		return -EINVAL;
1746 	}
1747 
1748 	if (queue_id >= rte_sched_port_queues_per_port(port)) {
1749 		RTE_LOG(ERR, SCHED,
1750 			"%s: Incorrect value for queue id\n", __func__);
1751 		return -EINVAL;
1752 	}
1753 
1754 	if (stats == NULL) {
1755 		RTE_LOG(ERR, SCHED,
1756 			"%s: Incorrect value for parameter stats\n", __func__);
1757 		return -EINVAL;
1758 	}
1759 
1760 	if (qlen == NULL) {
1761 		RTE_LOG(ERR, SCHED,
1762 			"%s: Incorrect value for parameter qlen\n", __func__);
1763 		return -EINVAL;
1764 	}
1765 	subport_qmask = port->n_pipes_per_subport_log2 + 4;
1766 	subport_id = (queue_id >> subport_qmask) & (port->n_subports_per_port - 1);
1767 
1768 	s = port->subports[subport_id];
1769 	subport_qindex = ((1 << subport_qmask) - 1) & queue_id;
1770 	q = s->queue + subport_qindex;
1771 	qe = s->queue_extra + subport_qindex;
1772 
1773 	/* Copy queue stats and clear */
1774 	memcpy(stats, &qe->stats, sizeof(struct rte_sched_queue_stats));
1775 	memset(&qe->stats, 0, sizeof(struct rte_sched_queue_stats));
1776 
1777 	/* Queue length */
1778 	*qlen = q->qw - q->qr;
1779 
1780 	return 0;
1781 }
1782 
1783 #ifdef RTE_SCHED_DEBUG
1784 
1785 static inline int
1786 rte_sched_port_queue_is_empty(struct rte_sched_subport *subport,
1787 	uint32_t qindex)
1788 {
1789 	struct rte_sched_queue *queue = subport->queue + qindex;
1790 
1791 	return queue->qr == queue->qw;
1792 }
1793 
1794 #endif /* RTE_SCHED_DEBUG */
1795 
1796 static inline void
1797 rte_sched_port_update_subport_stats(struct rte_sched_port *port,
1798 	struct rte_sched_subport *subport,
1799 	uint32_t qindex,
1800 	struct rte_mbuf *pkt)
1801 {
1802 	uint32_t tc_index = rte_sched_port_pipe_tc(port, qindex);
1803 	uint32_t pkt_len = pkt->pkt_len;
1804 
1805 	subport->stats.n_pkts_tc[tc_index] += 1;
1806 	subport->stats.n_bytes_tc[tc_index] += pkt_len;
1807 }
1808 
1809 static inline void
1810 rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port,
1811 	struct rte_sched_subport *subport,
1812 	uint32_t qindex,
1813 	struct rte_mbuf *pkt,
1814 	uint32_t n_pkts_cman_dropped)
1815 {
1816 	uint32_t tc_index = rte_sched_port_pipe_tc(port, qindex);
1817 	uint32_t pkt_len = pkt->pkt_len;
1818 
1819 	subport->stats.n_pkts_tc_dropped[tc_index] += 1;
1820 	subport->stats.n_bytes_tc_dropped[tc_index] += pkt_len;
1821 	subport->stats.n_pkts_cman_dropped[tc_index] += n_pkts_cman_dropped;
1822 }
1823 
1824 static inline void
1825 rte_sched_port_update_queue_stats(struct rte_sched_subport *subport,
1826 	uint32_t qindex,
1827 	struct rte_mbuf *pkt)
1828 {
1829 	struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
1830 	uint32_t pkt_len = pkt->pkt_len;
1831 
1832 	qe->stats.n_pkts += 1;
1833 	qe->stats.n_bytes += pkt_len;
1834 }
1835 
1836 static inline void
1837 rte_sched_port_update_queue_stats_on_drop(struct rte_sched_subport *subport,
1838 	uint32_t qindex,
1839 	struct rte_mbuf *pkt,
1840 	uint32_t n_pkts_cman_dropped)
1841 {
1842 	struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
1843 	uint32_t pkt_len = pkt->pkt_len;
1844 
1845 	qe->stats.n_pkts_dropped += 1;
1846 	qe->stats.n_bytes_dropped += pkt_len;
1847 	if (subport->cman_enabled)
1848 		qe->stats.n_pkts_cman_dropped += n_pkts_cman_dropped;
1849 }
1850 
1851 static inline int
1852 rte_sched_port_cman_drop(struct rte_sched_port *port,
1853 	struct rte_sched_subport *subport,
1854 	struct rte_mbuf *pkt,
1855 	uint32_t qindex,
1856 	uint16_t qlen)
1857 {
1858 	if (!subport->cman_enabled)
1859 		return 0;
1860 
1861 	struct rte_sched_queue_extra *qe;
1862 	uint32_t tc_index;
1863 
1864 	tc_index = rte_sched_port_pipe_tc(port, qindex);
1865 	qe = subport->queue_extra + qindex;
1866 
1867 	/* RED */
1868 	if (subport->cman == RTE_SCHED_CMAN_RED) {
1869 		struct rte_red_config *red_cfg;
1870 		struct rte_red *red;
1871 		enum rte_color color;
1872 
1873 		color = rte_sched_port_pkt_read_color(pkt);
1874 		red_cfg = &subport->red_config[tc_index][color];
1875 
1876 		if ((red_cfg->min_th | red_cfg->max_th) == 0)
1877 			return 0;
1878 
1879 		red = &qe->red;
1880 
1881 		return rte_red_enqueue(red_cfg, red, qlen, port->time);
1882 	}
1883 
1884 	/* PIE */
1885 	struct rte_pie_config *pie_cfg = &subport->pie_config[tc_index];
1886 	struct rte_pie *pie = &qe->pie;
1887 
1888 	return rte_pie_enqueue(pie_cfg, pie, qlen, pkt->pkt_len, port->time_cpu_cycles);
1889 }
1890 
1891 static inline void
1892 rte_sched_port_red_set_queue_empty_timestamp(struct rte_sched_port *port,
1893 	struct rte_sched_subport *subport, uint32_t qindex)
1894 {
1895 	if (subport->cman_enabled && subport->cman == RTE_SCHED_CMAN_RED) {
1896 		struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
1897 		struct rte_red *red = &qe->red;
1898 
1899 		rte_red_mark_queue_empty(red, port->time);
1900 	}
1901 }
1902 
1903 static inline void
1904 rte_sched_port_pie_dequeue(struct rte_sched_subport *subport,
1905 uint32_t qindex, uint32_t pkt_len, uint64_t time) {
1906 	if (subport->cman_enabled && subport->cman == RTE_SCHED_CMAN_PIE) {
1907 		struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
1908 		struct rte_pie *pie = &qe->pie;
1909 
1910 		/* Update queue length */
1911 		pie->qlen -= 1;
1912 		pie->qlen_bytes -= pkt_len;
1913 
1914 		rte_pie_dequeue(pie, pkt_len, time);
1915 	}
1916 }
1917 
1918 #ifdef RTE_SCHED_DEBUG
1919 
1920 static inline void
1921 debug_check_queue_slab(struct rte_sched_subport *subport, uint32_t bmp_pos,
1922 		       uint64_t bmp_slab)
1923 {
1924 	uint64_t mask;
1925 	uint32_t i, panic;
1926 
1927 	if (bmp_slab == 0)
1928 		rte_panic("Empty slab at position %u\n", bmp_pos);
1929 
1930 	panic = 0;
1931 	for (i = 0, mask = 1; i < 64; i++, mask <<= 1) {
1932 		if (mask & bmp_slab) {
1933 			if (rte_sched_port_queue_is_empty(subport, bmp_pos + i)) {
1934 				printf("Queue %u (slab offset %u) is empty\n", bmp_pos + i, i);
1935 				panic = 1;
1936 			}
1937 		}
1938 	}
1939 
1940 	if (panic)
1941 		rte_panic("Empty queues in slab 0x%" PRIx64 "starting at position %u\n",
1942 			bmp_slab, bmp_pos);
1943 }
1944 
1945 #endif /* RTE_SCHED_DEBUG */
1946 
1947 static inline struct rte_sched_subport *
1948 rte_sched_port_subport(struct rte_sched_port *port,
1949 	struct rte_mbuf *pkt)
1950 {
1951 	uint32_t queue_id = rte_mbuf_sched_queue_get(pkt);
1952 	uint32_t subport_id = queue_id >> (port->n_pipes_per_subport_log2 + 4);
1953 
1954 	return port->subports[subport_id];
1955 }
1956 
1957 static inline uint32_t
1958 rte_sched_port_enqueue_qptrs_prefetch0(struct rte_sched_subport *subport,
1959 	struct rte_mbuf *pkt, uint32_t subport_qmask)
1960 {
1961 	struct rte_sched_queue *q;
1962 	struct rte_sched_queue_extra *qe;
1963 	uint32_t qindex = rte_mbuf_sched_queue_get(pkt);
1964 	uint32_t subport_queue_id = subport_qmask & qindex;
1965 
1966 	q = subport->queue + subport_queue_id;
1967 	rte_prefetch0(q);
1968 	qe = subport->queue_extra + subport_queue_id;
1969 	rte_prefetch0(qe);
1970 
1971 	return subport_queue_id;
1972 }
1973 
1974 static inline void
1975 rte_sched_port_enqueue_qwa_prefetch0(struct rte_sched_port *port,
1976 	struct rte_sched_subport *subport,
1977 	uint32_t qindex,
1978 	struct rte_mbuf **qbase)
1979 {
1980 	struct rte_sched_queue *q;
1981 	struct rte_mbuf **q_qw;
1982 	uint16_t qsize;
1983 
1984 	q = subport->queue + qindex;
1985 	qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
1986 	q_qw = qbase + (q->qw & (qsize - 1));
1987 
1988 	rte_prefetch0(q_qw);
1989 	rte_bitmap_prefetch0(subport->bmp, qindex);
1990 }
1991 
1992 static inline int
1993 rte_sched_port_enqueue_qwa(struct rte_sched_port *port,
1994 	struct rte_sched_subport *subport,
1995 	uint32_t qindex,
1996 	struct rte_mbuf **qbase,
1997 	struct rte_mbuf *pkt)
1998 {
1999 	struct rte_sched_queue *q;
2000 	uint16_t qsize;
2001 	uint16_t qlen;
2002 
2003 	q = subport->queue + qindex;
2004 	qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
2005 	qlen = q->qw - q->qr;
2006 
2007 	/* Drop the packet (and update drop stats) when queue is full */
2008 	if (unlikely(rte_sched_port_cman_drop(port, subport, pkt, qindex, qlen) ||
2009 		     (qlen >= qsize))) {
2010 		rte_pktmbuf_free(pkt);
2011 		rte_sched_port_update_subport_stats_on_drop(port, subport,
2012 			qindex, pkt, qlen < qsize);
2013 		rte_sched_port_update_queue_stats_on_drop(subport, qindex, pkt,
2014 			qlen < qsize);
2015 		return 0;
2016 	}
2017 
2018 	/* Enqueue packet */
2019 	qbase[q->qw & (qsize - 1)] = pkt;
2020 	q->qw++;
2021 
2022 	/* Activate queue in the subport bitmap */
2023 	rte_bitmap_set(subport->bmp, qindex);
2024 
2025 	/* Statistics */
2026 	rte_sched_port_update_subport_stats(port, subport, qindex, pkt);
2027 	rte_sched_port_update_queue_stats(subport, qindex, pkt);
2028 
2029 	return 1;
2030 }
2031 
2032 
2033 /*
2034  * The enqueue function implements a 4-level pipeline with each stage
2035  * processing two different packets. The purpose of using a pipeline
2036  * is to hide the latency of prefetching the data structures. The
2037  * naming convention is presented in the diagram below:
2038  *
2039  *   p00  _______   p10  _______   p20  _______   p30  _______
2040  * ----->|       |----->|       |----->|       |----->|       |----->
2041  *       |   0   |      |   1   |      |   2   |      |   3   |
2042  * ----->|_______|----->|_______|----->|_______|----->|_______|----->
2043  *   p01            p11            p21            p31
2044  */
2045 int
2046 rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts,
2047 		       uint32_t n_pkts)
2048 {
2049 	struct rte_mbuf *pkt00, *pkt01, *pkt10, *pkt11, *pkt20, *pkt21,
2050 		*pkt30, *pkt31, *pkt_last;
2051 	struct rte_mbuf **q00_base, **q01_base, **q10_base, **q11_base,
2052 		**q20_base, **q21_base, **q30_base, **q31_base, **q_last_base;
2053 	struct rte_sched_subport *subport00, *subport01, *subport10, *subport11,
2054 		*subport20, *subport21, *subport30, *subport31, *subport_last;
2055 	uint32_t q00, q01, q10, q11, q20, q21, q30, q31, q_last;
2056 	uint32_t r00, r01, r10, r11, r20, r21, r30, r31, r_last;
2057 	uint32_t subport_qmask;
2058 	uint32_t result, i;
2059 
2060 	result = 0;
2061 	subport_qmask = (1 << (port->n_pipes_per_subport_log2 + 4)) - 1;
2062 
2063 	/*
2064 	 * Less then 6 input packets available, which is not enough to
2065 	 * feed the pipeline
2066 	 */
2067 	if (unlikely(n_pkts < 6)) {
2068 		struct rte_sched_subport *subports[5];
2069 		struct rte_mbuf **q_base[5];
2070 		uint32_t q[5];
2071 
2072 		/* Prefetch the mbuf structure of each packet */
2073 		for (i = 0; i < n_pkts; i++)
2074 			rte_prefetch0(pkts[i]);
2075 
2076 		/* Prefetch the subport structure for each packet */
2077 		for (i = 0; i < n_pkts; i++)
2078 			subports[i] = rte_sched_port_subport(port, pkts[i]);
2079 
2080 		/* Prefetch the queue structure for each queue */
2081 		for (i = 0; i < n_pkts; i++)
2082 			q[i] = rte_sched_port_enqueue_qptrs_prefetch0(subports[i],
2083 					pkts[i], subport_qmask);
2084 
2085 		/* Prefetch the write pointer location of each queue */
2086 		for (i = 0; i < n_pkts; i++) {
2087 			q_base[i] = rte_sched_subport_pipe_qbase(subports[i], q[i]);
2088 			rte_sched_port_enqueue_qwa_prefetch0(port, subports[i],
2089 				q[i], q_base[i]);
2090 		}
2091 
2092 		/* Write each packet to its queue */
2093 		for (i = 0; i < n_pkts; i++)
2094 			result += rte_sched_port_enqueue_qwa(port, subports[i],
2095 						q[i], q_base[i], pkts[i]);
2096 
2097 		return result;
2098 	}
2099 
2100 	/* Feed the first 3 stages of the pipeline (6 packets needed) */
2101 	pkt20 = pkts[0];
2102 	pkt21 = pkts[1];
2103 	rte_prefetch0(pkt20);
2104 	rte_prefetch0(pkt21);
2105 
2106 	pkt10 = pkts[2];
2107 	pkt11 = pkts[3];
2108 	rte_prefetch0(pkt10);
2109 	rte_prefetch0(pkt11);
2110 
2111 	subport20 = rte_sched_port_subport(port, pkt20);
2112 	subport21 = rte_sched_port_subport(port, pkt21);
2113 	q20 = rte_sched_port_enqueue_qptrs_prefetch0(subport20,
2114 			pkt20, subport_qmask);
2115 	q21 = rte_sched_port_enqueue_qptrs_prefetch0(subport21,
2116 			pkt21, subport_qmask);
2117 
2118 	pkt00 = pkts[4];
2119 	pkt01 = pkts[5];
2120 	rte_prefetch0(pkt00);
2121 	rte_prefetch0(pkt01);
2122 
2123 	subport10 = rte_sched_port_subport(port, pkt10);
2124 	subport11 = rte_sched_port_subport(port, pkt11);
2125 	q10 = rte_sched_port_enqueue_qptrs_prefetch0(subport10,
2126 			pkt10, subport_qmask);
2127 	q11 = rte_sched_port_enqueue_qptrs_prefetch0(subport11,
2128 			pkt11, subport_qmask);
2129 
2130 	q20_base = rte_sched_subport_pipe_qbase(subport20, q20);
2131 	q21_base = rte_sched_subport_pipe_qbase(subport21, q21);
2132 	rte_sched_port_enqueue_qwa_prefetch0(port, subport20, q20, q20_base);
2133 	rte_sched_port_enqueue_qwa_prefetch0(port, subport21, q21, q21_base);
2134 
2135 	/* Run the pipeline */
2136 	for (i = 6; i < (n_pkts & (~1)); i += 2) {
2137 		/* Propagate stage inputs */
2138 		pkt30 = pkt20;
2139 		pkt31 = pkt21;
2140 		pkt20 = pkt10;
2141 		pkt21 = pkt11;
2142 		pkt10 = pkt00;
2143 		pkt11 = pkt01;
2144 		q30 = q20;
2145 		q31 = q21;
2146 		q20 = q10;
2147 		q21 = q11;
2148 		subport30 = subport20;
2149 		subport31 = subport21;
2150 		subport20 = subport10;
2151 		subport21 = subport11;
2152 		q30_base = q20_base;
2153 		q31_base = q21_base;
2154 
2155 		/* Stage 0: Get packets in */
2156 		pkt00 = pkts[i];
2157 		pkt01 = pkts[i + 1];
2158 		rte_prefetch0(pkt00);
2159 		rte_prefetch0(pkt01);
2160 
2161 		/* Stage 1: Prefetch subport and queue structure storing queue pointers */
2162 		subport10 = rte_sched_port_subport(port, pkt10);
2163 		subport11 = rte_sched_port_subport(port, pkt11);
2164 		q10 = rte_sched_port_enqueue_qptrs_prefetch0(subport10,
2165 				pkt10, subport_qmask);
2166 		q11 = rte_sched_port_enqueue_qptrs_prefetch0(subport11,
2167 				pkt11, subport_qmask);
2168 
2169 		/* Stage 2: Prefetch queue write location */
2170 		q20_base = rte_sched_subport_pipe_qbase(subport20, q20);
2171 		q21_base = rte_sched_subport_pipe_qbase(subport21, q21);
2172 		rte_sched_port_enqueue_qwa_prefetch0(port, subport20, q20, q20_base);
2173 		rte_sched_port_enqueue_qwa_prefetch0(port, subport21, q21, q21_base);
2174 
2175 		/* Stage 3: Write packet to queue and activate queue */
2176 		r30 = rte_sched_port_enqueue_qwa(port, subport30,
2177 				q30, q30_base, pkt30);
2178 		r31 = rte_sched_port_enqueue_qwa(port, subport31,
2179 				q31, q31_base, pkt31);
2180 		result += r30 + r31;
2181 	}
2182 
2183 	/*
2184 	 * Drain the pipeline (exactly 6 packets).
2185 	 * Handle the last packet in the case
2186 	 * of an odd number of input packets.
2187 	 */
2188 	pkt_last = pkts[n_pkts - 1];
2189 	rte_prefetch0(pkt_last);
2190 
2191 	subport00 = rte_sched_port_subport(port, pkt00);
2192 	subport01 = rte_sched_port_subport(port, pkt01);
2193 	q00 = rte_sched_port_enqueue_qptrs_prefetch0(subport00,
2194 			pkt00, subport_qmask);
2195 	q01 = rte_sched_port_enqueue_qptrs_prefetch0(subport01,
2196 			pkt01, subport_qmask);
2197 
2198 	q10_base = rte_sched_subport_pipe_qbase(subport10, q10);
2199 	q11_base = rte_sched_subport_pipe_qbase(subport11, q11);
2200 	rte_sched_port_enqueue_qwa_prefetch0(port, subport10, q10, q10_base);
2201 	rte_sched_port_enqueue_qwa_prefetch0(port, subport11, q11, q11_base);
2202 
2203 	r20 = rte_sched_port_enqueue_qwa(port, subport20,
2204 			q20, q20_base, pkt20);
2205 	r21 = rte_sched_port_enqueue_qwa(port, subport21,
2206 			q21, q21_base, pkt21);
2207 	result += r20 + r21;
2208 
2209 	subport_last = rte_sched_port_subport(port, pkt_last);
2210 	q_last = rte_sched_port_enqueue_qptrs_prefetch0(subport_last,
2211 				pkt_last, subport_qmask);
2212 
2213 	q00_base = rte_sched_subport_pipe_qbase(subport00, q00);
2214 	q01_base = rte_sched_subport_pipe_qbase(subport01, q01);
2215 	rte_sched_port_enqueue_qwa_prefetch0(port, subport00, q00, q00_base);
2216 	rte_sched_port_enqueue_qwa_prefetch0(port, subport01, q01, q01_base);
2217 
2218 	r10 = rte_sched_port_enqueue_qwa(port, subport10, q10,
2219 			q10_base, pkt10);
2220 	r11 = rte_sched_port_enqueue_qwa(port, subport11, q11,
2221 			q11_base, pkt11);
2222 	result += r10 + r11;
2223 
2224 	q_last_base = rte_sched_subport_pipe_qbase(subport_last, q_last);
2225 	rte_sched_port_enqueue_qwa_prefetch0(port, subport_last,
2226 		q_last, q_last_base);
2227 
2228 	r00 = rte_sched_port_enqueue_qwa(port, subport00, q00,
2229 			q00_base, pkt00);
2230 	r01 = rte_sched_port_enqueue_qwa(port, subport01, q01,
2231 			q01_base, pkt01);
2232 	result += r00 + r01;
2233 
2234 	if (n_pkts & 1) {
2235 		r_last = rte_sched_port_enqueue_qwa(port, subport_last,
2236 					q_last,	q_last_base, pkt_last);
2237 		result += r_last;
2238 	}
2239 
2240 	return result;
2241 }
2242 
2243 static inline uint64_t
2244 grinder_tc_ov_credits_update(struct rte_sched_port *port,
2245 	struct rte_sched_subport *subport, uint32_t pos)
2246 {
2247 	struct rte_sched_grinder *grinder = subport->grinder + pos;
2248 	struct rte_sched_subport_profile *sp = grinder->subport_params;
2249 	uint64_t tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2250 	uint64_t tc_consumption = 0, tc_ov_consumption_max;
2251 	uint64_t tc_ov_wm = subport->tc_ov_wm;
2252 	uint32_t i;
2253 
2254 	if (subport->tc_ov == 0)
2255 		return subport->tc_ov_wm_max;
2256 
2257 	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASS_BE; i++) {
2258 		tc_ov_consumption[i] = sp->tc_credits_per_period[i]
2259 					-  subport->tc_credits[i];
2260 		tc_consumption += tc_ov_consumption[i];
2261 	}
2262 
2263 	tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASS_BE] =
2264 	sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] -
2265 		subport->tc_credits[RTE_SCHED_TRAFFIC_CLASS_BE];
2266 
2267 	tc_ov_consumption_max =
2268 	sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] -
2269 			tc_consumption;
2270 
2271 	if (tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASS_BE] >
2272 		(tc_ov_consumption_max - port->mtu)) {
2273 		tc_ov_wm  -= tc_ov_wm >> 7;
2274 		if (tc_ov_wm < subport->tc_ov_wm_min)
2275 			tc_ov_wm = subport->tc_ov_wm_min;
2276 
2277 		return tc_ov_wm;
2278 	}
2279 
2280 	tc_ov_wm += (tc_ov_wm >> 7) + 1;
2281 	if (tc_ov_wm > subport->tc_ov_wm_max)
2282 		tc_ov_wm = subport->tc_ov_wm_max;
2283 
2284 	return tc_ov_wm;
2285 }
2286 
2287 static inline void
2288 grinder_credits_update(struct rte_sched_port *port,
2289 	struct rte_sched_subport *subport, uint32_t pos)
2290 {
2291 	struct rte_sched_grinder *grinder = subport->grinder + pos;
2292 	struct rte_sched_pipe *pipe = grinder->pipe;
2293 	struct rte_sched_pipe_profile *params = grinder->pipe_params;
2294 	struct rte_sched_subport_profile *sp = grinder->subport_params;
2295 	uint64_t n_periods;
2296 	uint32_t i;
2297 
2298 	/* Subport TB */
2299 	n_periods = (port->time - subport->tb_time) / sp->tb_period;
2300 	subport->tb_credits += n_periods * sp->tb_credits_per_period;
2301 	subport->tb_credits = RTE_MIN(subport->tb_credits, sp->tb_size);
2302 	subport->tb_time += n_periods * sp->tb_period;
2303 
2304 	/* Pipe TB */
2305 	n_periods = (port->time - pipe->tb_time) / params->tb_period;
2306 	pipe->tb_credits += n_periods * params->tb_credits_per_period;
2307 	pipe->tb_credits = RTE_MIN(pipe->tb_credits, params->tb_size);
2308 	pipe->tb_time += n_periods * params->tb_period;
2309 
2310 	/* Subport TCs */
2311 	if (unlikely(port->time >= subport->tc_time)) {
2312 		for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2313 			subport->tc_credits[i] = sp->tc_credits_per_period[i];
2314 
2315 		subport->tc_time = port->time + sp->tc_period;
2316 	}
2317 
2318 	/* Pipe TCs */
2319 	if (unlikely(port->time >= pipe->tc_time)) {
2320 		for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2321 			pipe->tc_credits[i] = params->tc_credits_per_period[i];
2322 		pipe->tc_time = port->time + params->tc_period;
2323 	}
2324 }
2325 
2326 static inline void
2327 grinder_credits_update_with_tc_ov(struct rte_sched_port *port,
2328 	struct rte_sched_subport *subport, uint32_t pos)
2329 {
2330 	struct rte_sched_grinder *grinder = subport->grinder + pos;
2331 	struct rte_sched_pipe *pipe = grinder->pipe;
2332 	struct rte_sched_pipe_profile *params = grinder->pipe_params;
2333 	struct rte_sched_subport_profile *sp = grinder->subport_params;
2334 	uint64_t n_periods;
2335 	uint32_t i;
2336 
2337 	/* Subport TB */
2338 	n_periods = (port->time - subport->tb_time) / sp->tb_period;
2339 	subport->tb_credits += n_periods * sp->tb_credits_per_period;
2340 	subport->tb_credits = RTE_MIN(subport->tb_credits, sp->tb_size);
2341 	subport->tb_time += n_periods * sp->tb_period;
2342 
2343 	/* Pipe TB */
2344 	n_periods = (port->time - pipe->tb_time) / params->tb_period;
2345 	pipe->tb_credits += n_periods * params->tb_credits_per_period;
2346 	pipe->tb_credits = RTE_MIN(pipe->tb_credits, params->tb_size);
2347 	pipe->tb_time += n_periods * params->tb_period;
2348 
2349 	/* Subport TCs */
2350 	if (unlikely(port->time >= subport->tc_time)) {
2351 		subport->tc_ov_wm =
2352 			grinder_tc_ov_credits_update(port, subport, pos);
2353 
2354 		for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2355 			subport->tc_credits[i] = sp->tc_credits_per_period[i];
2356 
2357 		subport->tc_time = port->time + sp->tc_period;
2358 		subport->tc_ov_period_id++;
2359 	}
2360 
2361 	/* Pipe TCs */
2362 	if (unlikely(port->time >= pipe->tc_time)) {
2363 		for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2364 			pipe->tc_credits[i] = params->tc_credits_per_period[i];
2365 		pipe->tc_time = port->time + params->tc_period;
2366 	}
2367 
2368 	/* Pipe TCs - Oversubscription */
2369 	if (unlikely(pipe->tc_ov_period_id != subport->tc_ov_period_id)) {
2370 		pipe->tc_ov_credits = subport->tc_ov_wm * params->tc_ov_weight;
2371 
2372 		pipe->tc_ov_period_id = subport->tc_ov_period_id;
2373 	}
2374 }
2375 
2376 static inline int
2377 grinder_credits_check(struct rte_sched_port *port,
2378 	struct rte_sched_subport *subport, uint32_t pos)
2379 {
2380 	struct rte_sched_grinder *grinder = subport->grinder + pos;
2381 	struct rte_sched_pipe *pipe = grinder->pipe;
2382 	struct rte_mbuf *pkt = grinder->pkt;
2383 	uint32_t tc_index = grinder->tc_index;
2384 	uint64_t pkt_len = pkt->pkt_len + port->frame_overhead;
2385 	uint64_t subport_tb_credits = subport->tb_credits;
2386 	uint64_t subport_tc_credits = subport->tc_credits[tc_index];
2387 	uint64_t pipe_tb_credits = pipe->tb_credits;
2388 	uint64_t pipe_tc_credits = pipe->tc_credits[tc_index];
2389 	int enough_credits;
2390 
2391 	/* Check pipe and subport credits */
2392 	enough_credits = (pkt_len <= subport_tb_credits) &&
2393 		(pkt_len <= subport_tc_credits) &&
2394 		(pkt_len <= pipe_tb_credits) &&
2395 		(pkt_len <= pipe_tc_credits);
2396 
2397 	if (!enough_credits)
2398 		return 0;
2399 
2400 	/* Update pipe and subport credits */
2401 	subport->tb_credits -= pkt_len;
2402 	subport->tc_credits[tc_index] -= pkt_len;
2403 	pipe->tb_credits -= pkt_len;
2404 	pipe->tc_credits[tc_index] -= pkt_len;
2405 
2406 	return 1;
2407 }
2408 
2409 static inline int
2410 grinder_credits_check_with_tc_ov(struct rte_sched_port *port,
2411 	struct rte_sched_subport *subport, uint32_t pos)
2412 {
2413 	struct rte_sched_grinder *grinder = subport->grinder + pos;
2414 	struct rte_sched_pipe *pipe = grinder->pipe;
2415 	struct rte_mbuf *pkt = grinder->pkt;
2416 	uint32_t tc_index = grinder->tc_index;
2417 	uint64_t pkt_len = pkt->pkt_len + port->frame_overhead;
2418 	uint64_t subport_tb_credits = subport->tb_credits;
2419 	uint64_t subport_tc_credits = subport->tc_credits[tc_index];
2420 	uint64_t pipe_tb_credits = pipe->tb_credits;
2421 	uint64_t pipe_tc_credits = pipe->tc_credits[tc_index];
2422 	uint64_t pipe_tc_ov_mask1[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2423 	uint64_t pipe_tc_ov_mask2[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE] = {0};
2424 	uint64_t pipe_tc_ov_credits;
2425 	uint32_t i;
2426 	int enough_credits;
2427 
2428 	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2429 		pipe_tc_ov_mask1[i] = ~0LLU;
2430 
2431 	pipe_tc_ov_mask1[RTE_SCHED_TRAFFIC_CLASS_BE] = pipe->tc_ov_credits;
2432 	pipe_tc_ov_mask2[RTE_SCHED_TRAFFIC_CLASS_BE] = ~0LLU;
2433 	pipe_tc_ov_credits = pipe_tc_ov_mask1[tc_index];
2434 
2435 	/* Check pipe and subport credits */
2436 	enough_credits = (pkt_len <= subport_tb_credits) &&
2437 		(pkt_len <= subport_tc_credits) &&
2438 		(pkt_len <= pipe_tb_credits) &&
2439 		(pkt_len <= pipe_tc_credits) &&
2440 		(pkt_len <= pipe_tc_ov_credits);
2441 
2442 	if (!enough_credits)
2443 		return 0;
2444 
2445 	/* Update pipe and subport credits */
2446 	subport->tb_credits -= pkt_len;
2447 	subport->tc_credits[tc_index] -= pkt_len;
2448 	pipe->tb_credits -= pkt_len;
2449 	pipe->tc_credits[tc_index] -= pkt_len;
2450 	pipe->tc_ov_credits -= pipe_tc_ov_mask2[tc_index] & pkt_len;
2451 
2452 	return 1;
2453 }
2454 
2455 
2456 static inline int
2457 grinder_schedule(struct rte_sched_port *port,
2458 	struct rte_sched_subport *subport, uint32_t pos)
2459 {
2460 	struct rte_sched_grinder *grinder = subport->grinder + pos;
2461 	struct rte_sched_queue *queue = grinder->queue[grinder->qpos];
2462 	uint32_t qindex = grinder->qindex[grinder->qpos];
2463 	struct rte_mbuf *pkt = grinder->pkt;
2464 	uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
2465 	uint32_t be_tc_active;
2466 
2467 	if (subport->tc_ov_enabled) {
2468 		if (!grinder_credits_check_with_tc_ov(port, subport, pos))
2469 			return 0;
2470 	} else {
2471 		if (!grinder_credits_check(port, subport, pos))
2472 			return 0;
2473 	}
2474 
2475 	/* Advance port time */
2476 	port->time += pkt_len;
2477 
2478 	/* Send packet */
2479 	port->pkts_out[port->n_pkts_out++] = pkt;
2480 	queue->qr++;
2481 
2482 	be_tc_active = (grinder->tc_index == RTE_SCHED_TRAFFIC_CLASS_BE) ? ~0x0 : 0x0;
2483 	grinder->wrr_tokens[grinder->qpos] +=
2484 		(pkt_len * grinder->wrr_cost[grinder->qpos]) & be_tc_active;
2485 
2486 	if (queue->qr == queue->qw) {
2487 		rte_bitmap_clear(subport->bmp, qindex);
2488 		grinder->qmask &= ~(1 << grinder->qpos);
2489 		if (be_tc_active)
2490 			grinder->wrr_mask[grinder->qpos] = 0;
2491 
2492 		rte_sched_port_red_set_queue_empty_timestamp(port, subport, qindex);
2493 	}
2494 
2495 	rte_sched_port_pie_dequeue(subport, qindex, pkt_len, port->time_cpu_cycles);
2496 
2497 	/* Reset pipe loop detection */
2498 	subport->pipe_loop = RTE_SCHED_PIPE_INVALID;
2499 	grinder->productive = 1;
2500 
2501 	return 1;
2502 }
2503 
2504 static inline int
2505 grinder_pipe_exists(struct rte_sched_subport *subport, uint32_t base_pipe)
2506 {
2507 	uint32_t i;
2508 
2509 	for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++) {
2510 		if (subport->grinder_base_bmp_pos[i] == base_pipe)
2511 			return 1;
2512 	}
2513 
2514 	return 0;
2515 }
2516 
2517 static inline void
2518 grinder_pcache_populate(struct rte_sched_subport *subport,
2519 	uint32_t pos, uint32_t bmp_pos, uint64_t bmp_slab)
2520 {
2521 	struct rte_sched_grinder *grinder = subport->grinder + pos;
2522 	uint16_t w[4];
2523 
2524 	grinder->pcache_w = 0;
2525 	grinder->pcache_r = 0;
2526 
2527 	w[0] = (uint16_t) bmp_slab;
2528 	w[1] = (uint16_t) (bmp_slab >> 16);
2529 	w[2] = (uint16_t) (bmp_slab >> 32);
2530 	w[3] = (uint16_t) (bmp_slab >> 48);
2531 
2532 	grinder->pcache_qmask[grinder->pcache_w] = w[0];
2533 	grinder->pcache_qindex[grinder->pcache_w] = bmp_pos;
2534 	grinder->pcache_w += (w[0] != 0);
2535 
2536 	grinder->pcache_qmask[grinder->pcache_w] = w[1];
2537 	grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 16;
2538 	grinder->pcache_w += (w[1] != 0);
2539 
2540 	grinder->pcache_qmask[grinder->pcache_w] = w[2];
2541 	grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 32;
2542 	grinder->pcache_w += (w[2] != 0);
2543 
2544 	grinder->pcache_qmask[grinder->pcache_w] = w[3];
2545 	grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 48;
2546 	grinder->pcache_w += (w[3] != 0);
2547 }
2548 
2549 static inline void
2550 grinder_tccache_populate(struct rte_sched_subport *subport,
2551 	uint32_t pos, uint32_t qindex, uint16_t qmask)
2552 {
2553 	struct rte_sched_grinder *grinder = subport->grinder + pos;
2554 	uint8_t b, i;
2555 
2556 	grinder->tccache_w = 0;
2557 	grinder->tccache_r = 0;
2558 
2559 	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASS_BE; i++) {
2560 		b = (uint8_t) ((qmask >> i) & 0x1);
2561 		grinder->tccache_qmask[grinder->tccache_w] = b;
2562 		grinder->tccache_qindex[grinder->tccache_w] = qindex + i;
2563 		grinder->tccache_w += (b != 0);
2564 	}
2565 
2566 	b = (uint8_t) (qmask >> (RTE_SCHED_TRAFFIC_CLASS_BE));
2567 	grinder->tccache_qmask[grinder->tccache_w] = b;
2568 	grinder->tccache_qindex[grinder->tccache_w] = qindex +
2569 		RTE_SCHED_TRAFFIC_CLASS_BE;
2570 	grinder->tccache_w += (b != 0);
2571 }
2572 
2573 static inline int
2574 grinder_next_tc(struct rte_sched_port *port,
2575 	struct rte_sched_subport *subport, uint32_t pos)
2576 {
2577 	struct rte_sched_grinder *grinder = subport->grinder + pos;
2578 	struct rte_mbuf **qbase;
2579 	uint32_t qindex;
2580 	uint16_t qsize;
2581 
2582 	if (grinder->tccache_r == grinder->tccache_w)
2583 		return 0;
2584 
2585 	qindex = grinder->tccache_qindex[grinder->tccache_r];
2586 	qbase = rte_sched_subport_pipe_qbase(subport, qindex);
2587 	qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
2588 
2589 	grinder->tc_index = rte_sched_port_pipe_tc(port, qindex);
2590 	grinder->qmask = grinder->tccache_qmask[grinder->tccache_r];
2591 	grinder->qsize = qsize;
2592 
2593 	if (grinder->tc_index < RTE_SCHED_TRAFFIC_CLASS_BE) {
2594 		grinder->queue[0] = subport->queue + qindex;
2595 		grinder->qbase[0] = qbase;
2596 		grinder->qindex[0] = qindex;
2597 		grinder->tccache_r++;
2598 
2599 		return 1;
2600 	}
2601 
2602 	grinder->queue[0] = subport->queue + qindex;
2603 	grinder->queue[1] = subport->queue + qindex + 1;
2604 	grinder->queue[2] = subport->queue + qindex + 2;
2605 	grinder->queue[3] = subport->queue + qindex + 3;
2606 
2607 	grinder->qbase[0] = qbase;
2608 	grinder->qbase[1] = qbase + qsize;
2609 	grinder->qbase[2] = qbase + 2 * qsize;
2610 	grinder->qbase[3] = qbase + 3 * qsize;
2611 
2612 	grinder->qindex[0] = qindex;
2613 	grinder->qindex[1] = qindex + 1;
2614 	grinder->qindex[2] = qindex + 2;
2615 	grinder->qindex[3] = qindex + 3;
2616 
2617 	grinder->tccache_r++;
2618 	return 1;
2619 }
2620 
2621 static inline int
2622 grinder_next_pipe(struct rte_sched_port *port,
2623 	struct rte_sched_subport *subport, uint32_t pos)
2624 {
2625 	struct rte_sched_grinder *grinder = subport->grinder + pos;
2626 	uint32_t pipe_qindex;
2627 	uint16_t pipe_qmask;
2628 
2629 	if (grinder->pcache_r < grinder->pcache_w) {
2630 		pipe_qmask = grinder->pcache_qmask[grinder->pcache_r];
2631 		pipe_qindex = grinder->pcache_qindex[grinder->pcache_r];
2632 		grinder->pcache_r++;
2633 	} else {
2634 		uint64_t bmp_slab = 0;
2635 		uint32_t bmp_pos = 0;
2636 
2637 		/* Get another non-empty pipe group */
2638 		if (unlikely(rte_bitmap_scan(subport->bmp, &bmp_pos, &bmp_slab) <= 0))
2639 			return 0;
2640 
2641 #ifdef RTE_SCHED_DEBUG
2642 		debug_check_queue_slab(subport, bmp_pos, bmp_slab);
2643 #endif
2644 
2645 		/* Return if pipe group already in one of the other grinders */
2646 		subport->grinder_base_bmp_pos[pos] = RTE_SCHED_BMP_POS_INVALID;
2647 		if (unlikely(grinder_pipe_exists(subport, bmp_pos)))
2648 			return 0;
2649 
2650 		subport->grinder_base_bmp_pos[pos] = bmp_pos;
2651 
2652 		/* Install new pipe group into grinder's pipe cache */
2653 		grinder_pcache_populate(subport, pos, bmp_pos, bmp_slab);
2654 
2655 		pipe_qmask = grinder->pcache_qmask[0];
2656 		pipe_qindex = grinder->pcache_qindex[0];
2657 		grinder->pcache_r = 1;
2658 	}
2659 
2660 	/* Install new pipe in the grinder */
2661 	grinder->pindex = pipe_qindex >> 4;
2662 	grinder->subport = subport;
2663 	grinder->pipe = subport->pipe + grinder->pindex;
2664 	grinder->pipe_params = NULL; /* to be set after the pipe structure is prefetched */
2665 	grinder->productive = 0;
2666 
2667 	grinder_tccache_populate(subport, pos, pipe_qindex, pipe_qmask);
2668 	grinder_next_tc(port, subport, pos);
2669 
2670 	/* Check for pipe exhaustion */
2671 	if (grinder->pindex == subport->pipe_loop) {
2672 		subport->pipe_exhaustion = 1;
2673 		subport->pipe_loop = RTE_SCHED_PIPE_INVALID;
2674 	}
2675 
2676 	return 1;
2677 }
2678 
2679 
2680 static inline void
2681 grinder_wrr_load(struct rte_sched_subport *subport, uint32_t pos)
2682 {
2683 	struct rte_sched_grinder *grinder = subport->grinder + pos;
2684 	struct rte_sched_pipe *pipe = grinder->pipe;
2685 	struct rte_sched_pipe_profile *pipe_params = grinder->pipe_params;
2686 	uint32_t qmask = grinder->qmask;
2687 
2688 	grinder->wrr_tokens[0] =
2689 		((uint16_t) pipe->wrr_tokens[0]) << RTE_SCHED_WRR_SHIFT;
2690 	grinder->wrr_tokens[1] =
2691 		((uint16_t) pipe->wrr_tokens[1]) << RTE_SCHED_WRR_SHIFT;
2692 	grinder->wrr_tokens[2] =
2693 		((uint16_t) pipe->wrr_tokens[2]) << RTE_SCHED_WRR_SHIFT;
2694 	grinder->wrr_tokens[3] =
2695 		((uint16_t) pipe->wrr_tokens[3]) << RTE_SCHED_WRR_SHIFT;
2696 
2697 	grinder->wrr_mask[0] = (qmask & 0x1) * 0xFFFF;
2698 	grinder->wrr_mask[1] = ((qmask >> 1) & 0x1) * 0xFFFF;
2699 	grinder->wrr_mask[2] = ((qmask >> 2) & 0x1) * 0xFFFF;
2700 	grinder->wrr_mask[3] = ((qmask >> 3) & 0x1) * 0xFFFF;
2701 
2702 	grinder->wrr_cost[0] = pipe_params->wrr_cost[0];
2703 	grinder->wrr_cost[1] = pipe_params->wrr_cost[1];
2704 	grinder->wrr_cost[2] = pipe_params->wrr_cost[2];
2705 	grinder->wrr_cost[3] = pipe_params->wrr_cost[3];
2706 }
2707 
2708 static inline void
2709 grinder_wrr_store(struct rte_sched_subport *subport, uint32_t pos)
2710 {
2711 	struct rte_sched_grinder *grinder = subport->grinder + pos;
2712 	struct rte_sched_pipe *pipe = grinder->pipe;
2713 
2714 	pipe->wrr_tokens[0] =
2715 			(grinder->wrr_tokens[0] & grinder->wrr_mask[0]) >>
2716 				RTE_SCHED_WRR_SHIFT;
2717 	pipe->wrr_tokens[1] =
2718 			(grinder->wrr_tokens[1] & grinder->wrr_mask[1]) >>
2719 				RTE_SCHED_WRR_SHIFT;
2720 	pipe->wrr_tokens[2] =
2721 			(grinder->wrr_tokens[2] & grinder->wrr_mask[2]) >>
2722 				RTE_SCHED_WRR_SHIFT;
2723 	pipe->wrr_tokens[3] =
2724 			(grinder->wrr_tokens[3] & grinder->wrr_mask[3]) >>
2725 				RTE_SCHED_WRR_SHIFT;
2726 }
2727 
2728 static inline void
2729 grinder_wrr(struct rte_sched_subport *subport, uint32_t pos)
2730 {
2731 	struct rte_sched_grinder *grinder = subport->grinder + pos;
2732 	uint16_t wrr_tokens_min;
2733 
2734 	grinder->wrr_tokens[0] |= ~grinder->wrr_mask[0];
2735 	grinder->wrr_tokens[1] |= ~grinder->wrr_mask[1];
2736 	grinder->wrr_tokens[2] |= ~grinder->wrr_mask[2];
2737 	grinder->wrr_tokens[3] |= ~grinder->wrr_mask[3];
2738 
2739 	grinder->qpos = rte_min_pos_4_u16(grinder->wrr_tokens);
2740 	wrr_tokens_min = grinder->wrr_tokens[grinder->qpos];
2741 
2742 	grinder->wrr_tokens[0] -= wrr_tokens_min;
2743 	grinder->wrr_tokens[1] -= wrr_tokens_min;
2744 	grinder->wrr_tokens[2] -= wrr_tokens_min;
2745 	grinder->wrr_tokens[3] -= wrr_tokens_min;
2746 }
2747 
2748 
2749 #define grinder_evict(subport, pos)
2750 
2751 static inline void
2752 grinder_prefetch_pipe(struct rte_sched_subport *subport, uint32_t pos)
2753 {
2754 	struct rte_sched_grinder *grinder = subport->grinder + pos;
2755 
2756 	rte_prefetch0(grinder->pipe);
2757 	rte_prefetch0(grinder->queue[0]);
2758 }
2759 
2760 static inline void
2761 grinder_prefetch_tc_queue_arrays(struct rte_sched_subport *subport, uint32_t pos)
2762 {
2763 	struct rte_sched_grinder *grinder = subport->grinder + pos;
2764 	uint16_t qsize, qr[RTE_SCHED_MAX_QUEUES_PER_TC];
2765 
2766 	qsize = grinder->qsize;
2767 	grinder->qpos = 0;
2768 
2769 	if (grinder->tc_index < RTE_SCHED_TRAFFIC_CLASS_BE) {
2770 		qr[0] = grinder->queue[0]->qr & (qsize - 1);
2771 
2772 		rte_prefetch0(grinder->qbase[0] + qr[0]);
2773 		return;
2774 	}
2775 
2776 	qr[0] = grinder->queue[0]->qr & (qsize - 1);
2777 	qr[1] = grinder->queue[1]->qr & (qsize - 1);
2778 	qr[2] = grinder->queue[2]->qr & (qsize - 1);
2779 	qr[3] = grinder->queue[3]->qr & (qsize - 1);
2780 
2781 	rte_prefetch0(grinder->qbase[0] + qr[0]);
2782 	rte_prefetch0(grinder->qbase[1] + qr[1]);
2783 
2784 	grinder_wrr_load(subport, pos);
2785 	grinder_wrr(subport, pos);
2786 
2787 	rte_prefetch0(grinder->qbase[2] + qr[2]);
2788 	rte_prefetch0(grinder->qbase[3] + qr[3]);
2789 }
2790 
2791 static inline void
2792 grinder_prefetch_mbuf(struct rte_sched_subport *subport, uint32_t pos)
2793 {
2794 	struct rte_sched_grinder *grinder = subport->grinder + pos;
2795 	uint32_t qpos = grinder->qpos;
2796 	struct rte_mbuf **qbase = grinder->qbase[qpos];
2797 	uint16_t qsize = grinder->qsize;
2798 	uint16_t qr = grinder->queue[qpos]->qr & (qsize - 1);
2799 
2800 	grinder->pkt = qbase[qr];
2801 	rte_prefetch0(grinder->pkt);
2802 
2803 	if (unlikely((qr & 0x7) == 7)) {
2804 		uint16_t qr_next = (grinder->queue[qpos]->qr + 1) & (qsize - 1);
2805 
2806 		rte_prefetch0(qbase + qr_next);
2807 	}
2808 }
2809 
2810 static inline uint32_t
2811 grinder_handle(struct rte_sched_port *port,
2812 	struct rte_sched_subport *subport, uint32_t pos)
2813 {
2814 	struct rte_sched_grinder *grinder = subport->grinder + pos;
2815 
2816 	switch (grinder->state) {
2817 	case e_GRINDER_PREFETCH_PIPE:
2818 	{
2819 		if (grinder_next_pipe(port, subport, pos)) {
2820 			grinder_prefetch_pipe(subport, pos);
2821 			subport->busy_grinders++;
2822 
2823 			grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS;
2824 			return 0;
2825 		}
2826 
2827 		return 0;
2828 	}
2829 
2830 	case e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS:
2831 	{
2832 		struct rte_sched_pipe *pipe = grinder->pipe;
2833 
2834 		grinder->pipe_params = subport->pipe_profiles + pipe->profile;
2835 		grinder->subport_params = port->subport_profiles +
2836 						subport->profile;
2837 
2838 		grinder_prefetch_tc_queue_arrays(subport, pos);
2839 
2840 		if (subport->tc_ov_enabled)
2841 			grinder_credits_update_with_tc_ov(port, subport, pos);
2842 		else
2843 			grinder_credits_update(port, subport, pos);
2844 
2845 		grinder->state = e_GRINDER_PREFETCH_MBUF;
2846 		return 0;
2847 	}
2848 
2849 	case e_GRINDER_PREFETCH_MBUF:
2850 	{
2851 		grinder_prefetch_mbuf(subport, pos);
2852 
2853 		grinder->state = e_GRINDER_READ_MBUF;
2854 		return 0;
2855 	}
2856 
2857 	case e_GRINDER_READ_MBUF:
2858 	{
2859 		uint32_t wrr_active, result = 0;
2860 
2861 		result = grinder_schedule(port, subport, pos);
2862 
2863 		wrr_active = (grinder->tc_index == RTE_SCHED_TRAFFIC_CLASS_BE);
2864 
2865 		/* Look for next packet within the same TC */
2866 		if (result && grinder->qmask) {
2867 			if (wrr_active)
2868 				grinder_wrr(subport, pos);
2869 
2870 			grinder_prefetch_mbuf(subport, pos);
2871 
2872 			return 1;
2873 		}
2874 
2875 		if (wrr_active)
2876 			grinder_wrr_store(subport, pos);
2877 
2878 		/* Look for another active TC within same pipe */
2879 		if (grinder_next_tc(port, subport, pos)) {
2880 			grinder_prefetch_tc_queue_arrays(subport, pos);
2881 
2882 			grinder->state = e_GRINDER_PREFETCH_MBUF;
2883 			return result;
2884 		}
2885 
2886 		if (grinder->productive == 0 &&
2887 		    subport->pipe_loop == RTE_SCHED_PIPE_INVALID)
2888 			subport->pipe_loop = grinder->pindex;
2889 
2890 		grinder_evict(subport, pos);
2891 
2892 		/* Look for another active pipe */
2893 		if (grinder_next_pipe(port, subport, pos)) {
2894 			grinder_prefetch_pipe(subport, pos);
2895 
2896 			grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS;
2897 			return result;
2898 		}
2899 
2900 		/* No active pipe found */
2901 		subport->busy_grinders--;
2902 
2903 		grinder->state = e_GRINDER_PREFETCH_PIPE;
2904 		return result;
2905 	}
2906 
2907 	default:
2908 		rte_panic("Algorithmic error (invalid state)\n");
2909 		return 0;
2910 	}
2911 }
2912 
2913 static inline void
2914 rte_sched_port_time_resync(struct rte_sched_port *port)
2915 {
2916 	uint64_t cycles = rte_get_tsc_cycles();
2917 	uint64_t cycles_diff;
2918 	uint64_t bytes_diff;
2919 	uint32_t i;
2920 
2921 	if (cycles < port->time_cpu_cycles)
2922 		port->time_cpu_cycles = 0;
2923 
2924 	cycles_diff = cycles - port->time_cpu_cycles;
2925 	/* Compute elapsed time in bytes */
2926 	bytes_diff = rte_reciprocal_divide(cycles_diff << RTE_SCHED_TIME_SHIFT,
2927 					   port->inv_cycles_per_byte);
2928 
2929 	/* Advance port time */
2930 	port->time_cpu_cycles +=
2931 		(bytes_diff * port->cycles_per_byte) >> RTE_SCHED_TIME_SHIFT;
2932 	port->time_cpu_bytes += bytes_diff;
2933 	if (port->time < port->time_cpu_bytes)
2934 		port->time = port->time_cpu_bytes;
2935 
2936 	/* Reset pipe loop detection */
2937 	for (i = 0; i < port->n_subports_per_port; i++)
2938 		port->subports[i]->pipe_loop = RTE_SCHED_PIPE_INVALID;
2939 }
2940 
2941 static inline int
2942 rte_sched_port_exceptions(struct rte_sched_subport *subport, int second_pass)
2943 {
2944 	int exceptions;
2945 
2946 	/* Check if any exception flag is set */
2947 	exceptions = (second_pass && subport->busy_grinders == 0) ||
2948 		(subport->pipe_exhaustion == 1);
2949 
2950 	/* Clear exception flags */
2951 	subport->pipe_exhaustion = 0;
2952 
2953 	return exceptions;
2954 }
2955 
2956 int
2957 rte_sched_port_dequeue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts)
2958 {
2959 	struct rte_sched_subport *subport;
2960 	uint32_t subport_id = port->subport_id;
2961 	uint32_t i, n_subports = 0, count;
2962 
2963 	port->pkts_out = pkts;
2964 	port->n_pkts_out = 0;
2965 
2966 	rte_sched_port_time_resync(port);
2967 
2968 	/* Take each queue in the grinder one step further */
2969 	for (i = 0, count = 0; ; i++)  {
2970 		subport = port->subports[subport_id];
2971 
2972 		count += grinder_handle(port, subport,
2973 				i & (RTE_SCHED_PORT_N_GRINDERS - 1));
2974 
2975 		if (count == n_pkts) {
2976 			subport_id++;
2977 
2978 			if (subport_id == port->n_subports_per_port)
2979 				subport_id = 0;
2980 
2981 			port->subport_id = subport_id;
2982 			break;
2983 		}
2984 
2985 		if (rte_sched_port_exceptions(subport, i >= RTE_SCHED_PORT_N_GRINDERS)) {
2986 			i = 0;
2987 			subport_id++;
2988 			n_subports++;
2989 		}
2990 
2991 		if (subport_id == port->n_subports_per_port)
2992 			subport_id = 0;
2993 
2994 		if (n_subports == port->n_subports_per_port) {
2995 			port->subport_id = subport_id;
2996 			break;
2997 		}
2998 	}
2999 
3000 	return count;
3001 }
3002