xref: /dpdk/drivers/common/cnxk/roc_nix_tm_utils.c (revision f8dbaebbf1c9efcbb2e2354b341ed62175466a57)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include "roc_api.h"
6 #include "roc_priv.h"
7 
8 static inline uint64_t
9 nix_tm_shaper2regval(struct nix_tm_shaper_data *shaper)
10 {
11 	uint64_t regval;
12 
13 	if (roc_model_is_cn9k()) {
14 		regval = (shaper->burst_exponent << 37);
15 		regval |= (shaper->burst_mantissa << 29);
16 		regval |= (shaper->div_exp << 13);
17 		regval |= (shaper->exponent << 9);
18 		regval |= (shaper->mantissa << 1);
19 		return regval;
20 	}
21 
22 	regval = (shaper->burst_exponent << 44);
23 	regval |= (shaper->burst_mantissa << 29);
24 	regval |= (shaper->div_exp << 13);
25 	regval |= (shaper->exponent << 9);
26 	regval |= (shaper->mantissa << 1);
27 	return regval;
28 }
29 
30 uint16_t
31 nix_tm_lvl2nix_tl1_root(uint32_t lvl)
32 {
33 	switch (lvl) {
34 	case ROC_TM_LVL_ROOT:
35 		return NIX_TXSCH_LVL_TL1;
36 	case ROC_TM_LVL_SCH1:
37 		return NIX_TXSCH_LVL_TL2;
38 	case ROC_TM_LVL_SCH2:
39 		return NIX_TXSCH_LVL_TL3;
40 	case ROC_TM_LVL_SCH3:
41 		return NIX_TXSCH_LVL_TL4;
42 	case ROC_TM_LVL_SCH4:
43 		return NIX_TXSCH_LVL_SMQ;
44 	default:
45 		return NIX_TXSCH_LVL_CNT;
46 	}
47 }
48 
49 uint16_t
50 nix_tm_lvl2nix_tl2_root(uint32_t lvl)
51 {
52 	switch (lvl) {
53 	case ROC_TM_LVL_ROOT:
54 		return NIX_TXSCH_LVL_TL2;
55 	case ROC_TM_LVL_SCH1:
56 		return NIX_TXSCH_LVL_TL3;
57 	case ROC_TM_LVL_SCH2:
58 		return NIX_TXSCH_LVL_TL4;
59 	case ROC_TM_LVL_SCH3:
60 		return NIX_TXSCH_LVL_SMQ;
61 	default:
62 		return NIX_TXSCH_LVL_CNT;
63 	}
64 }
65 
66 uint16_t
67 nix_tm_lvl2nix(struct nix *nix, uint32_t lvl)
68 {
69 	if (nix_tm_have_tl1_access(nix))
70 		return nix_tm_lvl2nix_tl1_root(lvl);
71 	else
72 		return nix_tm_lvl2nix_tl2_root(lvl);
73 }
74 
75 static uint8_t
76 nix_tm_relchan_get(struct nix *nix)
77 {
78 	return nix->tx_chan_base & 0xff;
79 }
80 
81 static int
82 nix_tm_find_prio_anchor(struct nix *nix, uint32_t node_id,
83 			enum roc_nix_tm_tree tree)
84 {
85 	struct nix_tm_node *child_node;
86 	struct nix_tm_node_list *list;
87 
88 	list = nix_tm_node_list(nix, tree);
89 
90 	TAILQ_FOREACH(child_node, list, node) {
91 		if (!child_node->parent)
92 			continue;
93 		if (!(child_node->parent->id == node_id))
94 			continue;
95 		if (child_node->priority == child_node->parent->rr_prio)
96 			continue;
97 		return child_node->hw_id - child_node->priority;
98 	}
99 	return 0;
100 }
101 
102 struct nix_tm_shaper_profile *
103 nix_tm_shaper_profile_search(struct nix *nix, uint32_t id)
104 {
105 	struct nix_tm_shaper_profile *profile;
106 
107 	TAILQ_FOREACH(profile, &nix->shaper_profile_list, shaper) {
108 		if (profile->id == id)
109 			return profile;
110 	}
111 	return NULL;
112 }
113 
114 struct nix_tm_node *
115 nix_tm_node_search(struct nix *nix, uint32_t node_id, enum roc_nix_tm_tree tree)
116 {
117 	struct nix_tm_node_list *list;
118 	struct nix_tm_node *node;
119 
120 	list = nix_tm_node_list(nix, tree);
121 	TAILQ_FOREACH(node, list, node) {
122 		if (node->id == node_id)
123 			return node;
124 	}
125 	return NULL;
126 }
127 
128 uint64_t
129 nix_tm_shaper_rate_conv(uint64_t value, uint64_t *exponent_p,
130 			uint64_t *mantissa_p, uint64_t *div_exp_p)
131 {
132 	uint64_t div_exp, exponent, mantissa;
133 
134 	/* Boundary checks */
135 	if (value < NIX_TM_MIN_SHAPER_RATE || value > NIX_TM_MAX_SHAPER_RATE)
136 		return 0;
137 
138 	if (value <= NIX_TM_SHAPER_RATE(0, 0, 0)) {
139 		/* Calculate rate div_exp and mantissa using
140 		 * the following formula:
141 		 *
142 		 * value = (2E6 * (256 + mantissa)
143 		 *              / ((1 << div_exp) * 256))
144 		 */
145 		div_exp = 0;
146 		exponent = 0;
147 		mantissa = NIX_TM_MAX_RATE_MANTISSA;
148 
149 		while (value < (NIX_TM_SHAPER_RATE_CONST / (1 << div_exp)))
150 			div_exp += 1;
151 
152 		while (value < ((NIX_TM_SHAPER_RATE_CONST * (256 + mantissa)) /
153 				((1 << div_exp) * 256)))
154 			mantissa -= 1;
155 	} else {
156 		/* Calculate rate exponent and mantissa using
157 		 * the following formula:
158 		 *
159 		 * value = (2E6 * ((256 + mantissa) << exponent)) / 256
160 		 *
161 		 */
162 		div_exp = 0;
163 		exponent = NIX_TM_MAX_RATE_EXPONENT;
164 		mantissa = NIX_TM_MAX_RATE_MANTISSA;
165 
166 		while (value < (NIX_TM_SHAPER_RATE_CONST * (1 << exponent)))
167 			exponent -= 1;
168 
169 		while (value < ((NIX_TM_SHAPER_RATE_CONST *
170 				 ((256 + mantissa) << exponent)) /
171 				256))
172 			mantissa -= 1;
173 	}
174 
175 	if (div_exp > NIX_TM_MAX_RATE_DIV_EXP ||
176 	    exponent > NIX_TM_MAX_RATE_EXPONENT ||
177 	    mantissa > NIX_TM_MAX_RATE_MANTISSA)
178 		return 0;
179 
180 	if (div_exp_p)
181 		*div_exp_p = div_exp;
182 	if (exponent_p)
183 		*exponent_p = exponent;
184 	if (mantissa_p)
185 		*mantissa_p = mantissa;
186 
187 	/* Calculate real rate value */
188 	return NIX_TM_SHAPER_RATE(exponent, mantissa, div_exp);
189 }
190 
191 uint64_t
192 nix_tm_shaper_burst_conv(uint64_t value, uint64_t *exponent_p,
193 			 uint64_t *mantissa_p)
194 {
195 	uint64_t min_burst, max_burst;
196 	uint64_t exponent, mantissa;
197 	uint32_t max_mantissa;
198 
199 	min_burst = NIX_TM_MIN_SHAPER_BURST;
200 	max_burst = roc_nix_tm_max_shaper_burst_get();
201 
202 	if (value < min_burst || value > max_burst)
203 		return 0;
204 
205 	max_mantissa = (roc_model_is_cn9k() ? NIX_CN9K_TM_MAX_BURST_MANTISSA :
206 					      NIX_TM_MAX_BURST_MANTISSA);
207 	/* Calculate burst exponent and mantissa using
208 	 * the following formula:
209 	 *
210 	 * value = (((256 + mantissa) << (exponent + 1) / 256)
211 	 *
212 	 */
213 	exponent = NIX_TM_MAX_BURST_EXPONENT;
214 	mantissa = max_mantissa;
215 
216 	while (value < (1ull << (exponent + 1)))
217 		exponent -= 1;
218 
219 	while (value < ((256 + mantissa) << (exponent + 1)) / 256)
220 		mantissa -= 1;
221 
222 	if (exponent > NIX_TM_MAX_BURST_EXPONENT || mantissa > max_mantissa)
223 		return 0;
224 
225 	if (exponent_p)
226 		*exponent_p = exponent;
227 	if (mantissa_p)
228 		*mantissa_p = mantissa;
229 
230 	return NIX_TM_SHAPER_BURST(exponent, mantissa);
231 }
232 
233 static void
234 nix_tm_shaper_conf_get(struct nix_tm_shaper_profile *profile,
235 		       struct nix_tm_shaper_data *cir,
236 		       struct nix_tm_shaper_data *pir)
237 {
238 	memset(cir, 0, sizeof(*cir));
239 	memset(pir, 0, sizeof(*pir));
240 
241 	if (!profile)
242 		return;
243 
244 	/* Calculate CIR exponent and mantissa */
245 	if (profile->commit.rate)
246 		cir->rate = nix_tm_shaper_rate_conv(
247 			profile->commit.rate, &cir->exponent, &cir->mantissa,
248 			&cir->div_exp);
249 
250 	/* Calculate PIR exponent and mantissa */
251 	if (profile->peak.rate)
252 		pir->rate = nix_tm_shaper_rate_conv(
253 			profile->peak.rate, &pir->exponent, &pir->mantissa,
254 			&pir->div_exp);
255 
256 	/* Calculate CIR burst exponent and mantissa */
257 	if (profile->commit.size)
258 		cir->burst = nix_tm_shaper_burst_conv(profile->commit.size,
259 						      &cir->burst_exponent,
260 						      &cir->burst_mantissa);
261 
262 	/* Calculate PIR burst exponent and mantissa */
263 	if (profile->peak.size)
264 		pir->burst = nix_tm_shaper_burst_conv(profile->peak.size,
265 						      &pir->burst_exponent,
266 						      &pir->burst_mantissa);
267 }
268 
269 uint32_t
270 nix_tm_check_rr(struct nix *nix, uint32_t parent_id, enum roc_nix_tm_tree tree,
271 		uint32_t *rr_prio, uint32_t *max_prio)
272 {
273 	uint32_t node_cnt[NIX_TM_TLX_SP_PRIO_MAX];
274 	struct nix_tm_node_list *list;
275 	struct nix_tm_node *node;
276 	uint32_t rr_num = 0, i;
277 	uint32_t children = 0;
278 	uint32_t priority;
279 
280 	memset(node_cnt, 0, sizeof(node_cnt));
281 	*rr_prio = 0xF;
282 	*max_prio = UINT32_MAX;
283 
284 	list = nix_tm_node_list(nix, tree);
285 	TAILQ_FOREACH(node, list, node) {
286 		if (!node->parent)
287 			continue;
288 
289 		if (!(node->parent->id == parent_id))
290 			continue;
291 
292 		priority = node->priority;
293 		node_cnt[priority]++;
294 		children++;
295 	}
296 
297 	for (i = 0; i < NIX_TM_TLX_SP_PRIO_MAX; i++) {
298 		if (!node_cnt[i])
299 			break;
300 
301 		if (node_cnt[i] > rr_num) {
302 			*rr_prio = i;
303 			rr_num = node_cnt[i];
304 		}
305 	}
306 
307 	/* RR group of single RR child is considered as SP */
308 	if (rr_num == 1) {
309 		*rr_prio = 0xF;
310 		rr_num = 0;
311 	}
312 
313 	/* Max prio will be returned only when we have non zero prio
314 	 * or if a parent has single child.
315 	 */
316 	if (i > 1 || (children == 1))
317 		*max_prio = i - 1;
318 	return rr_num;
319 }
320 
321 static uint16_t
322 nix_tm_max_prio(struct nix *nix, uint16_t hw_lvl)
323 {
324 	if (hw_lvl >= NIX_TXSCH_LVL_CNT)
325 		return 0;
326 
327 	/* MDQ does not support SP */
328 	if (hw_lvl == NIX_TXSCH_LVL_MDQ)
329 		return 0;
330 
331 	/* PF's TL1 with VF's enabled does not support SP */
332 	if (hw_lvl == NIX_TXSCH_LVL_TL1 && (!nix_tm_have_tl1_access(nix) ||
333 					    (nix->tm_flags & NIX_TM_TL1_NO_SP)))
334 		return 0;
335 
336 	return NIX_TM_TLX_SP_PRIO_MAX - 1;
337 }
338 
339 int
340 nix_tm_validate_prio(struct nix *nix, uint32_t lvl, uint32_t parent_id,
341 		     uint32_t priority, enum roc_nix_tm_tree tree)
342 {
343 	uint8_t priorities[NIX_TM_TLX_SP_PRIO_MAX];
344 	struct nix_tm_node_list *list;
345 	struct nix_tm_node *node;
346 	uint32_t rr_num = 0;
347 	int i;
348 
349 	list = nix_tm_node_list(nix, tree);
350 	/* Validate priority against max */
351 	if (priority > nix_tm_max_prio(nix, nix_tm_lvl2nix(nix, lvl - 1)))
352 		return NIX_ERR_TM_PRIO_EXCEEDED;
353 
354 	if (parent_id == ROC_NIX_TM_NODE_ID_INVALID)
355 		return 0;
356 
357 	memset(priorities, 0, sizeof(priorities));
358 	priorities[priority] = 1;
359 
360 	TAILQ_FOREACH(node, list, node) {
361 		if (!node->parent)
362 			continue;
363 
364 		if (node->parent->id != parent_id)
365 			continue;
366 
367 		priorities[node->priority]++;
368 	}
369 
370 	for (i = 0; i < NIX_TM_TLX_SP_PRIO_MAX; i++)
371 		if (priorities[i] > 1)
372 			rr_num++;
373 
374 	/* At max, one rr groups per parent */
375 	if (rr_num > 1)
376 		return NIX_ERR_TM_MULTIPLE_RR_GROUPS;
377 
378 	/* Check for previous priority to avoid holes in priorities */
379 	if (priority && !priorities[priority - 1])
380 		return NIX_ERR_TM_PRIO_ORDER;
381 
382 	return 0;
383 }
384 
385 bool
386 nix_tm_child_res_valid(struct nix_tm_node_list *list,
387 		       struct nix_tm_node *parent)
388 {
389 	struct nix_tm_node *child;
390 
391 	TAILQ_FOREACH(child, list, node) {
392 		if (child->parent != parent)
393 			continue;
394 		if (!(child->flags & NIX_TM_NODE_HWRES))
395 			return false;
396 	}
397 	return true;
398 }
399 
400 uint8_t
401 nix_tm_tl1_default_prep(uint32_t schq, volatile uint64_t *reg,
402 			volatile uint64_t *regval)
403 {
404 	uint8_t k = 0;
405 
406 	/*
407 	 * Default config for TL1.
408 	 * For VF this is always ignored.
409 	 */
410 	plt_tm_dbg("Default config for main root %s(%u)",
411 		   nix_tm_hwlvl2str(NIX_TXSCH_LVL_TL1), schq);
412 
413 	/* Set DWRR quantum */
414 	reg[k] = NIX_AF_TL1X_SCHEDULE(schq);
415 	regval[k] = NIX_TM_TL1_DFLT_RR_QTM;
416 	k++;
417 
418 	reg[k] = NIX_AF_TL1X_TOPOLOGY(schq);
419 	regval[k] = (NIX_TM_TL1_DFLT_RR_PRIO << 1);
420 	k++;
421 
422 	reg[k] = NIX_AF_TL1X_CIR(schq);
423 	regval[k] = 0;
424 	k++;
425 
426 	return k;
427 }
428 
429 uint8_t
430 nix_tm_topology_reg_prep(struct nix *nix, struct nix_tm_node *node,
431 			 volatile uint64_t *reg, volatile uint64_t *regval,
432 			 volatile uint64_t *regval_mask)
433 {
434 	struct roc_nix *roc_nix = nix_priv_to_roc_nix(nix);
435 	uint8_t k = 0, hw_lvl, parent_lvl;
436 	uint64_t parent = 0, child = 0;
437 	enum roc_nix_tm_tree tree;
438 	uint32_t rr_prio, schq;
439 	uint16_t link, relchan;
440 
441 	tree = node->tree;
442 	schq = node->hw_id;
443 	hw_lvl = node->hw_lvl;
444 	parent_lvl = hw_lvl + 1;
445 	rr_prio = node->rr_prio;
446 
447 	/* Root node will not have a parent node */
448 	if (hw_lvl == nix->tm_root_lvl)
449 		parent = node->parent_hw_id;
450 	else
451 		parent = node->parent->hw_id;
452 
453 	link = nix->tx_link;
454 	relchan = nix_tm_relchan_get(nix);
455 
456 	if (hw_lvl != NIX_TXSCH_LVL_SMQ)
457 		child = nix_tm_find_prio_anchor(nix, node->id, tree);
458 
459 	/* Override default rr_prio when TL1
460 	 * Static Priority is disabled
461 	 */
462 	if (hw_lvl == NIX_TXSCH_LVL_TL1 && nix->tm_flags & NIX_TM_TL1_NO_SP) {
463 		rr_prio = NIX_TM_TL1_DFLT_RR_PRIO;
464 		child = 0;
465 	}
466 
467 	plt_tm_dbg("Topology config node %s(%u)->%s(%" PRIu64 ") lvl %u, id %u"
468 		   " prio_anchor %" PRIu64 " rr_prio %u (%p)",
469 		   nix_tm_hwlvl2str(hw_lvl), schq, nix_tm_hwlvl2str(parent_lvl),
470 		   parent, node->lvl, node->id, child, rr_prio, node);
471 
472 	/* Prepare Topology and Link config */
473 	switch (hw_lvl) {
474 	case NIX_TXSCH_LVL_SMQ:
475 
476 		/* Set xoff which will be cleared later */
477 		reg[k] = NIX_AF_SMQX_CFG(schq);
478 		regval[k] = (BIT_ULL(50) | NIX_MIN_HW_FRS |
479 			     ((nix->mtu & 0xFFFF) << 8));
480 		/* Maximum Vtag insertion size as a multiple of four bytes */
481 		if (roc_nix->hw_vlan_ins)
482 			regval[k] |= (0x2ULL << 36);
483 		regval_mask[k] = ~(BIT_ULL(50) | GENMASK_ULL(6, 0) |
484 				   GENMASK_ULL(23, 8) | GENMASK_ULL(38, 36));
485 		k++;
486 
487 		/* Parent and schedule conf */
488 		reg[k] = NIX_AF_MDQX_PARENT(schq);
489 		regval[k] = parent << 16;
490 		k++;
491 
492 		break;
493 	case NIX_TXSCH_LVL_TL4:
494 		/* Parent and schedule conf */
495 		reg[k] = NIX_AF_TL4X_PARENT(schq);
496 		regval[k] = parent << 16;
497 		k++;
498 
499 		reg[k] = NIX_AF_TL4X_TOPOLOGY(schq);
500 		regval[k] = (child << 32) | (rr_prio << 1);
501 		k++;
502 
503 		/* Configure TL4 to send to SDP channel instead of CGX/LBK */
504 		if (nix->sdp_link) {
505 			reg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq);
506 			regval[k] = BIT_ULL(12);
507 			k++;
508 		}
509 		break;
510 	case NIX_TXSCH_LVL_TL3:
511 		/* Parent and schedule conf */
512 		reg[k] = NIX_AF_TL3X_PARENT(schq);
513 		regval[k] = parent << 16;
514 		k++;
515 
516 		reg[k] = NIX_AF_TL3X_TOPOLOGY(schq);
517 		regval[k] = (child << 32) | (rr_prio << 1);
518 		k++;
519 
520 		/* Link configuration */
521 		if (!nix->sdp_link &&
522 		    nix->tm_link_cfg_lvl == NIX_TXSCH_LVL_TL3) {
523 			reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, link);
524 			regval[k] = BIT_ULL(12) | relchan;
525 			/* Enable BP if node is BP capable and rx_pause is set
526 			 */
527 			if (nix->rx_pause && node->bp_capa)
528 				regval[k] |= BIT_ULL(13);
529 			k++;
530 		}
531 
532 		break;
533 	case NIX_TXSCH_LVL_TL2:
534 		/* Parent and schedule conf */
535 		reg[k] = NIX_AF_TL2X_PARENT(schq);
536 		regval[k] = parent << 16;
537 		k++;
538 
539 		reg[k] = NIX_AF_TL2X_TOPOLOGY(schq);
540 		regval[k] = (child << 32) | (rr_prio << 1);
541 		k++;
542 
543 		/* Link configuration */
544 		if (!nix->sdp_link &&
545 		    nix->tm_link_cfg_lvl == NIX_TXSCH_LVL_TL2) {
546 			reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, link);
547 			regval[k] = BIT_ULL(12) | relchan;
548 			/* Enable BP if node is BP capable and rx_pause is set
549 			 */
550 			if (nix->rx_pause && node->bp_capa)
551 				regval[k] |= BIT_ULL(13);
552 			k++;
553 		}
554 
555 		break;
556 	case NIX_TXSCH_LVL_TL1:
557 		reg[k] = NIX_AF_TL1X_TOPOLOGY(schq);
558 		regval[k] = (child << 32) | (rr_prio << 1 /*RR_PRIO*/);
559 		k++;
560 
561 		break;
562 	}
563 
564 	return k;
565 }
566 
567 uint8_t
568 nix_tm_sched_reg_prep(struct nix *nix, struct nix_tm_node *node,
569 		      volatile uint64_t *reg, volatile uint64_t *regval)
570 {
571 	uint64_t strict_prio = node->priority;
572 	uint32_t hw_lvl = node->hw_lvl;
573 	uint32_t schq = node->hw_id;
574 	uint64_t rr_quantum;
575 	uint8_t k = 0;
576 
577 	/* For CN9K, weight needs to be converted to quantum */
578 	rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
579 
580 	/* For children to root, strict prio is default if either
581 	 * device root is TL2 or TL1 Static Priority is disabled.
582 	 */
583 	if (hw_lvl == NIX_TXSCH_LVL_TL2 &&
584 	    (!nix_tm_have_tl1_access(nix) || nix->tm_flags & NIX_TM_TL1_NO_SP))
585 		strict_prio = NIX_TM_TL1_DFLT_RR_PRIO;
586 
587 	plt_tm_dbg("Schedule config node %s(%u) lvl %u id %u, "
588 		   "prio 0x%" PRIx64 ", rr_quantum/rr_wt 0x%" PRIx64 " (%p)",
589 		   nix_tm_hwlvl2str(node->hw_lvl), schq, node->lvl, node->id,
590 		   strict_prio, rr_quantum, node);
591 
592 	switch (hw_lvl) {
593 	case NIX_TXSCH_LVL_SMQ:
594 		reg[k] = NIX_AF_MDQX_SCHEDULE(schq);
595 		regval[k] = (strict_prio << 24) | rr_quantum;
596 		k++;
597 
598 		break;
599 	case NIX_TXSCH_LVL_TL4:
600 		reg[k] = NIX_AF_TL4X_SCHEDULE(schq);
601 		regval[k] = (strict_prio << 24) | rr_quantum;
602 		k++;
603 
604 		break;
605 	case NIX_TXSCH_LVL_TL3:
606 		reg[k] = NIX_AF_TL3X_SCHEDULE(schq);
607 		regval[k] = (strict_prio << 24) | rr_quantum;
608 		k++;
609 
610 		break;
611 	case NIX_TXSCH_LVL_TL2:
612 		reg[k] = NIX_AF_TL2X_SCHEDULE(schq);
613 		regval[k] = (strict_prio << 24) | rr_quantum;
614 		k++;
615 
616 		break;
617 	case NIX_TXSCH_LVL_TL1:
618 		reg[k] = NIX_AF_TL1X_SCHEDULE(schq);
619 		regval[k] = rr_quantum;
620 		k++;
621 
622 		break;
623 	}
624 
625 	return k;
626 }
627 
628 uint8_t
629 nix_tm_shaper_reg_prep(struct nix_tm_node *node,
630 		       struct nix_tm_shaper_profile *profile,
631 		       volatile uint64_t *reg, volatile uint64_t *regval)
632 {
633 	struct nix_tm_shaper_data cir, pir;
634 	uint32_t schq = node->hw_id;
635 	uint64_t adjust = 0;
636 	uint8_t k = 0;
637 
638 	nix_tm_shaper_conf_get(profile, &cir, &pir);
639 
640 	if (profile && node->pkt_mode)
641 		adjust = profile->pkt_mode_adj;
642 	else if (profile)
643 		adjust = profile->pkt_len_adj;
644 
645 	plt_tm_dbg("Shaper config node %s(%u) lvl %u id %u, "
646 		   "pir %" PRIu64 "(%" PRIu64 "B),"
647 		   " cir %" PRIu64 "(%" PRIu64 "B)"
648 		   "adjust 0x%" PRIx64 "(pktmode %u) (%p)",
649 		   nix_tm_hwlvl2str(node->hw_lvl), schq, node->lvl, node->id,
650 		   pir.rate, pir.burst, cir.rate, cir.burst, adjust,
651 		   node->pkt_mode, node);
652 
653 	switch (node->hw_lvl) {
654 	case NIX_TXSCH_LVL_SMQ:
655 		/* Configure PIR, CIR */
656 		reg[k] = NIX_AF_MDQX_PIR(schq);
657 		regval[k] = (pir.rate && pir.burst) ?
658 					  (nix_tm_shaper2regval(&pir) | 1) :
659 					  0;
660 		k++;
661 
662 		reg[k] = NIX_AF_MDQX_CIR(schq);
663 		regval[k] = (cir.rate && cir.burst) ?
664 					  (nix_tm_shaper2regval(&cir) | 1) :
665 					  0;
666 		k++;
667 
668 		/* Configure RED ALG */
669 		reg[k] = NIX_AF_MDQX_SHAPE(schq);
670 		regval[k] = (adjust | (uint64_t)node->red_algo << 9 |
671 			     (uint64_t)node->pkt_mode << 24);
672 		k++;
673 		break;
674 	case NIX_TXSCH_LVL_TL4:
675 		/* Configure PIR, CIR */
676 		reg[k] = NIX_AF_TL4X_PIR(schq);
677 		regval[k] = (pir.rate && pir.burst) ?
678 					  (nix_tm_shaper2regval(&pir) | 1) :
679 					  0;
680 		k++;
681 
682 		reg[k] = NIX_AF_TL4X_CIR(schq);
683 		regval[k] = (cir.rate && cir.burst) ?
684 					  (nix_tm_shaper2regval(&cir) | 1) :
685 					  0;
686 		k++;
687 
688 		/* Configure RED algo */
689 		reg[k] = NIX_AF_TL4X_SHAPE(schq);
690 		regval[k] = (adjust | (uint64_t)node->red_algo << 9 |
691 			     (uint64_t)node->pkt_mode << 24);
692 		k++;
693 		break;
694 	case NIX_TXSCH_LVL_TL3:
695 		/* Configure PIR, CIR */
696 		reg[k] = NIX_AF_TL3X_PIR(schq);
697 		regval[k] = (pir.rate && pir.burst) ?
698 					  (nix_tm_shaper2regval(&pir) | 1) :
699 					  0;
700 		k++;
701 
702 		reg[k] = NIX_AF_TL3X_CIR(schq);
703 		regval[k] = (cir.rate && cir.burst) ?
704 					  (nix_tm_shaper2regval(&cir) | 1) :
705 					  0;
706 		k++;
707 
708 		/* Configure RED algo */
709 		reg[k] = NIX_AF_TL3X_SHAPE(schq);
710 		regval[k] = (adjust | (uint64_t)node->red_algo << 9 |
711 			     (uint64_t)node->pkt_mode);
712 		k++;
713 
714 		break;
715 	case NIX_TXSCH_LVL_TL2:
716 		/* Configure PIR, CIR */
717 		reg[k] = NIX_AF_TL2X_PIR(schq);
718 		regval[k] = (pir.rate && pir.burst) ?
719 					  (nix_tm_shaper2regval(&pir) | 1) :
720 					  0;
721 		k++;
722 
723 		reg[k] = NIX_AF_TL2X_CIR(schq);
724 		regval[k] = (cir.rate && cir.burst) ?
725 					  (nix_tm_shaper2regval(&cir) | 1) :
726 					  0;
727 		k++;
728 
729 		/* Configure RED algo */
730 		reg[k] = NIX_AF_TL2X_SHAPE(schq);
731 		regval[k] = (adjust | (uint64_t)node->red_algo << 9 |
732 			     (uint64_t)node->pkt_mode << 24);
733 		k++;
734 
735 		break;
736 	case NIX_TXSCH_LVL_TL1:
737 		/* Configure CIR */
738 		reg[k] = NIX_AF_TL1X_CIR(schq);
739 		regval[k] = (cir.rate && cir.burst) ?
740 					  (nix_tm_shaper2regval(&cir) | 1) :
741 					  0;
742 		k++;
743 
744 		/* Configure length disable and adjust */
745 		reg[k] = NIX_AF_TL1X_SHAPE(schq);
746 		regval[k] = (adjust | (uint64_t)node->pkt_mode << 24);
747 		k++;
748 		break;
749 	}
750 
751 	return k;
752 }
753 
754 uint8_t
755 nix_tm_sw_xoff_prep(struct nix_tm_node *node, bool enable,
756 		    volatile uint64_t *reg, volatile uint64_t *regval)
757 {
758 	uint32_t hw_lvl = node->hw_lvl;
759 	uint32_t schq = node->hw_id;
760 	uint8_t k = 0;
761 
762 	plt_tm_dbg("sw xoff config node %s(%u) lvl %u id %u, enable %u (%p)",
763 		   nix_tm_hwlvl2str(hw_lvl), schq, node->lvl, node->id, enable,
764 		   node);
765 
766 	regval[k] = enable;
767 
768 	switch (hw_lvl) {
769 	case NIX_TXSCH_LVL_MDQ:
770 		reg[k] = NIX_AF_MDQX_SW_XOFF(schq);
771 		k++;
772 		break;
773 	case NIX_TXSCH_LVL_TL4:
774 		reg[k] = NIX_AF_TL4X_SW_XOFF(schq);
775 		k++;
776 		break;
777 	case NIX_TXSCH_LVL_TL3:
778 		reg[k] = NIX_AF_TL3X_SW_XOFF(schq);
779 		k++;
780 		break;
781 	case NIX_TXSCH_LVL_TL2:
782 		reg[k] = NIX_AF_TL2X_SW_XOFF(schq);
783 		k++;
784 		break;
785 	case NIX_TXSCH_LVL_TL1:
786 		reg[k] = NIX_AF_TL1X_SW_XOFF(schq);
787 		k++;
788 		break;
789 	default:
790 		break;
791 	}
792 
793 	return k;
794 }
795 
796 /* Search for min rate in topology */
797 uint64_t
798 nix_tm_shaper_profile_rate_min(struct nix *nix)
799 {
800 	struct nix_tm_shaper_profile *profile;
801 	uint64_t rate_min = 1E9; /* 1 Gbps */
802 
803 	TAILQ_FOREACH(profile, &nix->shaper_profile_list, shaper) {
804 		if (profile->peak.rate && profile->peak.rate < rate_min)
805 			rate_min = profile->peak.rate;
806 
807 		if (profile->commit.rate && profile->commit.rate < rate_min)
808 			rate_min = profile->commit.rate;
809 	}
810 	return rate_min;
811 }
812 
813 uint16_t
814 nix_tm_resource_avail(struct nix *nix, uint8_t hw_lvl, bool contig)
815 {
816 	uint32_t pos = 0, start_pos = 0;
817 	struct plt_bitmap *bmp;
818 	uint16_t count = 0;
819 	uint64_t slab = 0;
820 
821 	bmp = contig ? nix->schq_contig_bmp[hw_lvl] : nix->schq_bmp[hw_lvl];
822 	plt_bitmap_scan_init(bmp);
823 
824 	if (!plt_bitmap_scan(bmp, &pos, &slab))
825 		return count;
826 
827 	/* Count bit set */
828 	start_pos = pos;
829 	do {
830 		count += __builtin_popcountll(slab);
831 		if (!plt_bitmap_scan(bmp, &pos, &slab))
832 			break;
833 	} while (pos != start_pos);
834 
835 	return count;
836 }
837 
838 uint16_t
839 nix_tm_resource_estimate(struct nix *nix, uint16_t *schq_contig, uint16_t *schq,
840 			 enum roc_nix_tm_tree tree)
841 {
842 	struct nix_tm_node_list *list;
843 	uint8_t contig_cnt, hw_lvl;
844 	struct nix_tm_node *parent;
845 	uint16_t cnt = 0, avail;
846 
847 	list = nix_tm_node_list(nix, tree);
848 	/* Walk through parents from TL1..TL4 */
849 	for (hw_lvl = NIX_TXSCH_LVL_TL1; hw_lvl > 0; hw_lvl--) {
850 		TAILQ_FOREACH(parent, list, node) {
851 			if (hw_lvl != parent->hw_lvl)
852 				continue;
853 
854 			/* Skip accounting for children whose
855 			 * parent does not indicate so.
856 			 */
857 			if (!parent->child_realloc)
858 				continue;
859 
860 			/* Count children needed */
861 			schq[hw_lvl - 1] += parent->rr_num;
862 			if (parent->max_prio != UINT32_MAX) {
863 				contig_cnt = parent->max_prio + 1;
864 				schq_contig[hw_lvl - 1] += contig_cnt;
865 				/* When we have SP + DWRR at a parent,
866 				 * we will always have a spare schq at rr prio
867 				 * location in contiguous queues. Hence reduce
868 				 * discontiguous count by 1.
869 				 */
870 				if (parent->max_prio > 0 && parent->rr_num)
871 					schq[hw_lvl - 1] -= 1;
872 			}
873 		}
874 	}
875 
876 	schq[nix->tm_root_lvl] = 1;
877 	if (!nix_tm_have_tl1_access(nix))
878 		schq[NIX_TXSCH_LVL_TL1] = 1;
879 
880 	/* Now check for existing resources */
881 	for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
882 		avail = nix_tm_resource_avail(nix, hw_lvl, false);
883 		if (schq[hw_lvl] <= avail)
884 			schq[hw_lvl] = 0;
885 		else
886 			schq[hw_lvl] -= avail;
887 
888 		/* For contiguous queues, realloc everything */
889 		avail = nix_tm_resource_avail(nix, hw_lvl, true);
890 		if (schq_contig[hw_lvl] <= avail)
891 			schq_contig[hw_lvl] = 0;
892 
893 		cnt += schq[hw_lvl];
894 		cnt += schq_contig[hw_lvl];
895 
896 		plt_tm_dbg("Estimate resources needed for %s: dis %u cont %u",
897 			   nix_tm_hwlvl2str(hw_lvl), schq[hw_lvl],
898 			   schq_contig[hw_lvl]);
899 	}
900 
901 	return cnt;
902 }
903 
904 uint16_t
905 roc_nix_tm_leaf_cnt(struct roc_nix *roc_nix)
906 {
907 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
908 	struct nix_tm_node_list *list;
909 	struct nix_tm_node *node;
910 	uint16_t leaf_cnt = 0;
911 
912 	/* Count leafs only in user list */
913 	list = nix_tm_node_list(nix, ROC_NIX_TM_USER);
914 	TAILQ_FOREACH(node, list, node) {
915 		if (node->id < nix->nb_tx_queues)
916 			leaf_cnt++;
917 	}
918 
919 	return leaf_cnt;
920 }
921 
922 int
923 roc_nix_tm_node_lvl(struct roc_nix *roc_nix, uint32_t node_id)
924 {
925 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
926 	struct nix_tm_node *node;
927 
928 	node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
929 	if (!node)
930 		return NIX_ERR_TM_INVALID_NODE;
931 
932 	return node->lvl;
933 }
934 
935 struct roc_nix_tm_node *
936 roc_nix_tm_node_get(struct roc_nix *roc_nix, uint32_t node_id)
937 {
938 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
939 	struct nix_tm_node *node;
940 
941 	node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
942 	return (struct roc_nix_tm_node *)node;
943 }
944 
945 struct roc_nix_tm_node *
946 roc_nix_tm_node_next(struct roc_nix *roc_nix, struct roc_nix_tm_node *__prev)
947 {
948 	struct nix_tm_node *prev = (struct nix_tm_node *)__prev;
949 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
950 	struct nix_tm_node_list *list;
951 
952 	list = nix_tm_node_list(nix, ROC_NIX_TM_USER);
953 
954 	/* HEAD of the list */
955 	if (!prev)
956 		return (struct roc_nix_tm_node *)TAILQ_FIRST(list);
957 
958 	/* Next entry */
959 	if (prev->tree != ROC_NIX_TM_USER)
960 		return NULL;
961 
962 	return (struct roc_nix_tm_node *)TAILQ_NEXT(prev, node);
963 }
964 
965 struct roc_nix_tm_shaper_profile *
966 roc_nix_tm_shaper_profile_get(struct roc_nix *roc_nix, uint32_t profile_id)
967 {
968 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
969 	struct nix_tm_shaper_profile *profile;
970 
971 	profile = nix_tm_shaper_profile_search(nix, profile_id);
972 	return (struct roc_nix_tm_shaper_profile *)profile;
973 }
974 
975 struct roc_nix_tm_shaper_profile *
976 roc_nix_tm_shaper_profile_next(struct roc_nix *roc_nix,
977 			       struct roc_nix_tm_shaper_profile *__prev)
978 {
979 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
980 	struct nix_tm_shaper_profile_list *list;
981 	struct nix_tm_shaper_profile *prev;
982 
983 	prev = (struct nix_tm_shaper_profile *)__prev;
984 	list = &nix->shaper_profile_list;
985 
986 	/* HEAD of the list */
987 	if (!prev)
988 		return (struct roc_nix_tm_shaper_profile *)TAILQ_FIRST(list);
989 
990 	return (struct roc_nix_tm_shaper_profile *)TAILQ_NEXT(prev, shaper);
991 }
992 
993 struct nix_tm_node *
994 nix_tm_node_alloc(void)
995 {
996 	struct nix_tm_node *node;
997 
998 	node = plt_zmalloc(sizeof(struct nix_tm_node), 0);
999 	if (!node)
1000 		return NULL;
1001 
1002 	node->free_fn = plt_free;
1003 	return node;
1004 }
1005 
1006 void
1007 nix_tm_node_free(struct nix_tm_node *node)
1008 {
1009 	if (!node || node->free_fn == NULL)
1010 		return;
1011 
1012 	(node->free_fn)(node);
1013 }
1014 
1015 struct nix_tm_shaper_profile *
1016 nix_tm_shaper_profile_alloc(void)
1017 {
1018 	struct nix_tm_shaper_profile *profile;
1019 
1020 	profile = plt_zmalloc(sizeof(struct nix_tm_shaper_profile), 0);
1021 	if (!profile)
1022 		return NULL;
1023 
1024 	profile->free_fn = plt_free;
1025 	return profile;
1026 }
1027 
1028 void
1029 nix_tm_shaper_profile_free(struct nix_tm_shaper_profile *profile)
1030 {
1031 	if (!profile || !profile->free_fn)
1032 		return;
1033 
1034 	(profile->free_fn)(profile);
1035 }
1036 
1037 int
1038 roc_nix_tm_node_stats_get(struct roc_nix *roc_nix, uint32_t node_id, bool clear,
1039 			  struct roc_nix_tm_node_stats *n_stats)
1040 {
1041 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1042 	struct mbox *mbox = (&nix->dev)->mbox;
1043 	struct nix_txschq_config *req, *rsp;
1044 	struct nix_tm_node *node;
1045 	uint32_t schq;
1046 	int rc, i;
1047 
1048 	node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
1049 	if (!node)
1050 		return NIX_ERR_TM_INVALID_NODE;
1051 
1052 	if (node->hw_lvl != NIX_TXSCH_LVL_TL1)
1053 		return NIX_ERR_OP_NOTSUP;
1054 
1055 	/* Check if node has HW resource */
1056 	if (!(node->flags & NIX_TM_NODE_HWRES))
1057 		return 0;
1058 
1059 	schq = node->hw_id;
1060 	/* Skip fetch if not requested */
1061 	if (!n_stats)
1062 		goto clear_stats;
1063 
1064 	memset(n_stats, 0, sizeof(struct roc_nix_tm_node_stats));
1065 
1066 	req = mbox_alloc_msg_nix_txschq_cfg(mbox);
1067 	req->read = 1;
1068 	req->lvl = NIX_TXSCH_LVL_TL1;
1069 
1070 	i = 0;
1071 	req->reg[i++] = NIX_AF_TL1X_DROPPED_PACKETS(schq);
1072 	req->reg[i++] = NIX_AF_TL1X_DROPPED_BYTES(schq);
1073 	req->reg[i++] = NIX_AF_TL1X_GREEN_PACKETS(schq);
1074 	req->reg[i++] = NIX_AF_TL1X_GREEN_BYTES(schq);
1075 	req->reg[i++] = NIX_AF_TL1X_YELLOW_PACKETS(schq);
1076 	req->reg[i++] = NIX_AF_TL1X_YELLOW_BYTES(schq);
1077 	req->reg[i++] = NIX_AF_TL1X_RED_PACKETS(schq);
1078 	req->reg[i++] = NIX_AF_TL1X_RED_BYTES(schq);
1079 	req->num_regs = i;
1080 
1081 	rc = mbox_process_msg(mbox, (void **)&rsp);
1082 	if (rc)
1083 		return rc;
1084 
1085 	/* Return stats */
1086 	n_stats->stats[ROC_NIX_TM_NODE_PKTS_DROPPED] = rsp->regval[0];
1087 	n_stats->stats[ROC_NIX_TM_NODE_BYTES_DROPPED] = rsp->regval[1];
1088 	n_stats->stats[ROC_NIX_TM_NODE_GREEN_PKTS] = rsp->regval[2];
1089 	n_stats->stats[ROC_NIX_TM_NODE_GREEN_BYTES] = rsp->regval[3];
1090 	n_stats->stats[ROC_NIX_TM_NODE_YELLOW_PKTS] = rsp->regval[4];
1091 	n_stats->stats[ROC_NIX_TM_NODE_YELLOW_BYTES] = rsp->regval[5];
1092 	n_stats->stats[ROC_NIX_TM_NODE_RED_PKTS] = rsp->regval[6];
1093 	n_stats->stats[ROC_NIX_TM_NODE_RED_BYTES] = rsp->regval[7];
1094 
1095 clear_stats:
1096 	if (!clear)
1097 		return 0;
1098 
1099 	/* Clear all the stats */
1100 	req = mbox_alloc_msg_nix_txschq_cfg(mbox);
1101 	req->lvl = NIX_TXSCH_LVL_TL1;
1102 	i = 0;
1103 	req->reg[i++] = NIX_AF_TL1X_DROPPED_PACKETS(schq);
1104 	req->reg[i++] = NIX_AF_TL1X_DROPPED_BYTES(schq);
1105 	req->reg[i++] = NIX_AF_TL1X_GREEN_PACKETS(schq);
1106 	req->reg[i++] = NIX_AF_TL1X_GREEN_BYTES(schq);
1107 	req->reg[i++] = NIX_AF_TL1X_YELLOW_PACKETS(schq);
1108 	req->reg[i++] = NIX_AF_TL1X_YELLOW_BYTES(schq);
1109 	req->reg[i++] = NIX_AF_TL1X_RED_PACKETS(schq);
1110 	req->reg[i++] = NIX_AF_TL1X_RED_BYTES(schq);
1111 	req->num_regs = i;
1112 
1113 	return mbox_process_msg(mbox, (void **)&rsp);
1114 }
1115 
1116 bool
1117 roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *roc_nix)
1118 {
1119 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1120 
1121 	if ((nix->tm_flags & NIX_TM_HIERARCHY_ENA) &&
1122 	    (nix->tm_tree == ROC_NIX_TM_USER))
1123 		return true;
1124 	return false;
1125 }
1126 
1127 int
1128 roc_nix_tm_tree_type_get(struct roc_nix *roc_nix)
1129 {
1130 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1131 
1132 	return nix->tm_tree;
1133 }
1134 
1135 int
1136 roc_nix_tm_max_prio(struct roc_nix *roc_nix, int lvl)
1137 {
1138 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1139 	int hw_lvl = nix_tm_lvl2nix(nix, lvl);
1140 
1141 	return nix_tm_max_prio(nix, hw_lvl);
1142 }
1143 
1144 int
1145 roc_nix_tm_lvl_is_leaf(struct roc_nix *roc_nix, int lvl)
1146 {
1147 	return nix_tm_is_leaf(roc_nix_to_nix_priv(roc_nix), lvl);
1148 }
1149 
1150 void
1151 roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node,
1152 				   struct roc_nix_tm_shaper_profile *roc_prof)
1153 {
1154 	struct nix_tm_node *tm_node = (struct nix_tm_node *)node;
1155 	struct nix_tm_shaper_profile *profile;
1156 	struct nix_tm_shaper_data cir, pir;
1157 
1158 	profile = (struct nix_tm_shaper_profile *)roc_prof->reserved;
1159 	tm_node->red_algo = NIX_REDALG_STD;
1160 
1161 	/* C0 doesn't support STALL when both PIR & CIR are enabled */
1162 	if (profile && roc_model_is_cn96_cx()) {
1163 		nix_tm_shaper_conf_get(profile, &cir, &pir);
1164 
1165 		if (pir.rate && cir.rate)
1166 			tm_node->red_algo = NIX_REDALG_DISCARD;
1167 	}
1168 }
1169 
1170 int
1171 roc_nix_tm_lvl_cnt_get(struct roc_nix *roc_nix)
1172 {
1173 	if (nix_tm_have_tl1_access(roc_nix_to_nix_priv(roc_nix)))
1174 		return NIX_TXSCH_LVL_CNT;
1175 
1176 	return (NIX_TXSCH_LVL_CNT - 1);
1177 }
1178 
1179 int
1180 roc_nix_tm_lvl_have_link_access(struct roc_nix *roc_nix, int lvl)
1181 {
1182 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1183 
1184 	if (nix_tm_lvl2nix(nix, lvl) == NIX_TXSCH_LVL_TL1)
1185 		return 1;
1186 
1187 	return 0;
1188 }
1189