xref: /dpdk/drivers/common/cnxk/roc_nix_tm.c (revision 4785c406c26df4f8c29a84ea89712aa95acde44d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include "roc_api.h"
6 #include "roc_priv.h"
7 
8 static inline int
9 bitmap_ctzll(uint64_t slab)
10 {
11 	if (slab == 0)
12 		return 0;
13 
14 	return plt_ctz64(slab);
15 }
16 
17 void
18 nix_tm_clear_shaper_profiles(struct nix *nix)
19 {
20 	struct nix_tm_shaper_profile *shaper_profile, *tmp;
21 	struct nix_tm_shaper_profile_list *list;
22 
23 	list = &nix->shaper_profile_list;
24 	PLT_TAILQ_FOREACH_SAFE(shaper_profile, list, shaper, tmp) {
25 		if (shaper_profile->ref_cnt)
26 			plt_warn("Shaper profile %u has non zero references",
27 				 shaper_profile->id);
28 		TAILQ_REMOVE(&nix->shaper_profile_list, shaper_profile, shaper);
29 		nix_tm_shaper_profile_free(shaper_profile);
30 	}
31 }
32 
33 static int
34 nix_tm_node_reg_conf(struct nix *nix, struct nix_tm_node *node)
35 {
36 	uint64_t regval_mask[MAX_REGS_PER_MBOX_MSG];
37 	uint64_t regval[MAX_REGS_PER_MBOX_MSG];
38 	struct nix_tm_shaper_profile *profile;
39 	uint64_t reg[MAX_REGS_PER_MBOX_MSG];
40 	struct mbox *mbox = (&nix->dev)->mbox;
41 	struct nix_txschq_config *req;
42 	int rc = -EFAULT;
43 	uint32_t hw_lvl;
44 	uint8_t k = 0;
45 
46 	memset(regval, 0, sizeof(regval));
47 	memset(regval_mask, 0, sizeof(regval_mask));
48 
49 	profile = nix_tm_shaper_profile_search(nix, node->shaper_profile_id);
50 	hw_lvl = node->hw_lvl;
51 
52 	/* Need this trigger to configure TL1 */
53 	if (!nix_tm_have_tl1_access(nix) && hw_lvl == NIX_TXSCH_LVL_TL2) {
54 		/* Prepare default conf for TL1 */
55 		req = mbox_alloc_msg_nix_txschq_cfg(mbox_get(mbox));
56 		req->lvl = NIX_TXSCH_LVL_TL1;
57 
58 		k = nix_tm_tl1_default_prep(nix, node->parent_hw_id, req->reg,
59 					    req->regval);
60 		req->num_regs = k;
61 		rc = mbox_process(mbox);
62 		mbox_put(mbox);
63 		if (rc)
64 			goto error;
65 	}
66 
67 	/* Prepare topology config */
68 	k = nix_tm_topology_reg_prep(nix, node, reg, regval, regval_mask);
69 
70 	/* Prepare schedule config */
71 	k += nix_tm_sched_reg_prep(nix, node, &reg[k], &regval[k]);
72 
73 	/* Prepare shaping config */
74 	k += nix_tm_shaper_reg_prep(node, profile, &reg[k], &regval[k]);
75 
76 	if (!k)
77 		return 0;
78 
79 	/* Copy and send config mbox */
80 	req = mbox_alloc_msg_nix_txschq_cfg(mbox_get(mbox));
81 	req->lvl = hw_lvl;
82 	req->num_regs = k;
83 
84 	mbox_memcpy(req->reg, reg, sizeof(uint64_t) * k);
85 	mbox_memcpy(req->regval, regval, sizeof(uint64_t) * k);
86 	mbox_memcpy(req->regval_mask, regval_mask, sizeof(uint64_t) * k);
87 
88 	rc = mbox_process(mbox);
89 	mbox_put(mbox);
90 	if (rc)
91 		goto error;
92 
93 	return 0;
94 error:
95 	plt_err("Txschq conf failed for node %p, rc=%d", node, rc);
96 	return rc;
97 }
98 
99 int
100 nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
101 {
102 	struct nix_tm_node_list *list;
103 	struct nix_tm_node *node;
104 	uint32_t hw_lvl;
105 	int rc = 0;
106 
107 	list = nix_tm_node_list(nix, tree);
108 
109 	for (hw_lvl = 0; hw_lvl <= nix->tm_root_lvl; hw_lvl++) {
110 		TAILQ_FOREACH(node, list, node) {
111 			if (node->hw_lvl != hw_lvl)
112 				continue;
113 
114 			/* Only one TL3/TL2 Link config should have BP enable
115 			 * set per channel only for PF or lbk vf.
116 			 */
117 			node->bp_capa = 0;
118 			if (!nix->sdp_link && node->hw_lvl == nix->tm_link_cfg_lvl)
119 				node->bp_capa = 1;
120 
121 			rc = nix_tm_node_reg_conf(nix, node);
122 			if (rc)
123 				goto exit;
124 		}
125 	}
126 exit:
127 	return rc;
128 }
129 
130 int
131 nix_tm_update_parent_info(struct nix *nix, enum roc_nix_tm_tree tree)
132 {
133 	struct nix_tm_node *child, *parent;
134 	struct nix_tm_node_list *list;
135 	uint32_t rr_prio, max_prio;
136 	uint32_t rr_num = 0;
137 
138 	list = nix_tm_node_list(nix, tree);
139 
140 	/* Release all the node hw resources locally
141 	 * if parent marked as dirty and resource exists.
142 	 */
143 	TAILQ_FOREACH(child, list, node) {
144 		/* Release resource only if parent direct hierarchy changed */
145 		if (child->flags & NIX_TM_NODE_HWRES && child->parent &&
146 		    child->parent->child_realloc) {
147 			nix_tm_free_node_resource(nix, child);
148 		}
149 		child->max_prio = UINT32_MAX;
150 	}
151 
152 	TAILQ_FOREACH(parent, list, node) {
153 		/* Count group of children of same priority i.e are RR */
154 		rr_num = nix_tm_check_rr(nix, parent->id, tree, &rr_prio,
155 					 &max_prio);
156 
157 		/* Assuming that multiple RR groups are
158 		 * not configured based on capability.
159 		 */
160 		parent->rr_prio = rr_prio;
161 		parent->rr_num = rr_num;
162 		parent->max_prio = max_prio;
163 	}
164 
165 	return 0;
166 }
167 
168 static int
169 nix_tm_root_node_get(struct nix *nix, int tree)
170 {
171 	struct nix_tm_node_list *list = nix_tm_node_list(nix, tree);
172 	struct nix_tm_node *tm_node;
173 
174 	TAILQ_FOREACH(tm_node, list, node) {
175 		if (tm_node->hw_lvl == nix->tm_root_lvl)
176 			return 1;
177 	}
178 
179 	return 0;
180 }
181 
182 int
183 nix_tm_node_add(struct roc_nix *roc_nix, struct nix_tm_node *node)
184 {
185 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
186 	struct nix_tm_shaper_profile *profile;
187 	uint32_t node_id, parent_id, lvl;
188 	struct nix_tm_node *parent_node;
189 	uint32_t priority, profile_id;
190 	uint8_t hw_lvl, exp_next_lvl;
191 	enum roc_nix_tm_tree tree;
192 	int rc;
193 
194 	node_id = node->id;
195 	priority = node->priority;
196 	parent_id = node->parent_id;
197 	profile_id = node->shaper_profile_id;
198 	lvl = node->lvl;
199 	tree = node->tree;
200 
201 	plt_tm_dbg("Add node %s lvl %u id %u, prio 0x%x weight 0x%x "
202 		   "parent %u profile 0x%x tree %u",
203 		   nix_tm_hwlvl2str(nix_tm_lvl2nix(nix, lvl)), lvl, node_id,
204 		   priority, node->weight, parent_id, profile_id, tree);
205 
206 	if (tree >= ROC_NIX_TM_TREE_MAX)
207 		return NIX_ERR_PARAM;
208 
209 	/* Translate sw level id's to nix hw level id's */
210 	hw_lvl = nix_tm_lvl2nix(nix, lvl);
211 	if (hw_lvl == NIX_TXSCH_LVL_CNT && !nix_tm_is_leaf(nix, lvl))
212 		return NIX_ERR_TM_INVALID_LVL;
213 
214 	/* Leaf nodes have to be same priority */
215 	if (nix_tm_is_leaf(nix, lvl) && priority != 0)
216 		return NIX_ERR_TM_INVALID_PRIO;
217 
218 	parent_node = nix_tm_node_search(nix, parent_id, tree);
219 
220 	if (node_id < nix->nb_tx_queues)
221 		exp_next_lvl = NIX_TXSCH_LVL_SMQ;
222 	else
223 		exp_next_lvl = hw_lvl + 1;
224 
225 	/* Check if there is no parent node yet */
226 	if (hw_lvl != nix->tm_root_lvl &&
227 	    (!parent_node || parent_node->hw_lvl != exp_next_lvl))
228 		return NIX_ERR_TM_INVALID_PARENT;
229 
230 	/* Check if a node already exists */
231 	if (nix_tm_node_search(nix, node_id, tree))
232 		return NIX_ERR_TM_NODE_EXISTS;
233 
234 	/* Check if root node exists */
235 	if (hw_lvl == nix->tm_root_lvl && nix_tm_root_node_get(nix, tree))
236 		return NIX_ERR_TM_NODE_EXISTS;
237 
238 	profile = nix_tm_shaper_profile_search(nix, profile_id);
239 	if (!nix_tm_is_leaf(nix, lvl)) {
240 		/* Check if shaper profile exists for non leaf node */
241 		if (!profile && profile_id != ROC_NIX_TM_SHAPER_PROFILE_NONE)
242 			return NIX_ERR_TM_INVALID_SHAPER_PROFILE;
243 
244 		/* Packet mode in profile should match with that of tm node */
245 		if (profile && profile->pkt_mode != node->pkt_mode)
246 			return NIX_ERR_TM_PKT_MODE_MISMATCH;
247 	}
248 
249 	/* Check if there is second DWRR already in siblings or holes in prio */
250 	rc = nix_tm_validate_prio(nix, lvl, parent_id, priority, tree);
251 	if (rc)
252 		return rc;
253 
254 	if (node->weight > roc_nix_tm_max_sched_wt_get())
255 		return NIX_ERR_TM_WEIGHT_EXCEED;
256 
257 	node->hw_lvl = nix_tm_lvl2nix(nix, lvl);
258 	node->rr_prio = 0xF;
259 	node->max_prio = UINT32_MAX;
260 	node->hw_id = NIX_TM_HW_ID_INVALID;
261 	node->flags = 0;
262 
263 	if (profile)
264 		profile->ref_cnt++;
265 
266 	node->parent = parent_node;
267 	if (parent_node)
268 		parent_node->child_realloc = true;
269 	node->parent_hw_id = NIX_TM_HW_ID_INVALID;
270 
271 	TAILQ_INSERT_TAIL(&nix->trees[tree], node, node);
272 	plt_tm_dbg("Added node %s lvl %u id %u (%p)",
273 		   nix_tm_hwlvl2str(node->hw_lvl), lvl, node_id, node);
274 	return 0;
275 }
276 
277 int
278 nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)
279 {
280 	struct mbox *mbox = mbox_get((&nix->dev)->mbox);
281 	struct nix_txschq_config *req;
282 	struct nix_tm_node *p;
283 	int rc;
284 
285 	/* Enable nodes in path for flush to succeed */
286 	if (!nix_tm_is_leaf(nix, node->lvl))
287 		p = node;
288 	else
289 		p = node->parent;
290 	while (p) {
291 		if (!(p->flags & NIX_TM_NODE_ENABLED) &&
292 		    (p->flags & NIX_TM_NODE_HWRES)) {
293 			req = mbox_alloc_msg_nix_txschq_cfg(mbox);
294 			req->lvl = p->hw_lvl;
295 			req->num_regs = nix_tm_sw_xoff_prep(p, false, req->reg,
296 							    req->regval);
297 			rc = mbox_process(mbox);
298 			if (rc)
299 				goto exit;
300 
301 			p->flags |= NIX_TM_NODE_ENABLED;
302 		}
303 		p = p->parent;
304 	}
305 
306 	rc = 0;
307 exit:
308 	mbox_put(mbox);
309 	return rc;
310 }
311 
312 int
313 nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
314 		     bool enable)
315 {
316 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
317 	enum roc_nix_tm_tree tree = nix->tm_tree;
318 	struct mbox *mbox = (&nix->dev)->mbox;
319 	struct nix_txschq_config *req = NULL;
320 	struct nix_tm_node_list *list;
321 	uint16_t link = nix->tx_link;
322 	struct nix_tm_node *sq_node;
323 	struct nix_tm_node *parent;
324 	struct nix_tm_node *node;
325 	struct roc_nix_sq *sq_s;
326 	uint16_t rel_chan = 0;
327 	uint8_t parent_lvl;
328 	uint8_t k = 0;
329 	int rc = 0, i;
330 
331 	if (roc_nix_is_sdp(roc_nix))
332 		return 0;
333 
334 	sq_s = nix->sqs[sq];
335 	if (!sq_s)
336 		return -ENOENT;
337 
338 	sq_node = nix_tm_node_search(nix, sq, nix->tm_tree);
339 	if (!sq_node)
340 		return -ENOENT;
341 
342 	parent_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH2 :
343 		      ROC_TM_LVL_SCH1);
344 
345 	parent = sq_node->parent;
346 	while (parent) {
347 		if (parent->lvl == parent_lvl)
348 			break;
349 
350 		parent = parent->parent;
351 	}
352 	if (!parent)
353 		return -ENOENT;
354 
355 	list = nix_tm_node_list(nix, tree);
356 
357 	/* Get relative channel if loopback */
358 	if (roc_nix_is_lbk(roc_nix))
359 		rel_chan = nix_tm_lbk_relchan_get(nix);
360 	else
361 		rel_chan = tc;
362 
363 	/* Enable request, parent rel chan already configured */
364 	if (enable && parent->rel_chan != NIX_TM_CHAN_INVALID &&
365 	    parent->rel_chan != rel_chan) {
366 		plt_err("SQ %d: parent node TL3 id %d already has rel_chan %d set",
367 			sq, parent->hw_id, parent->rel_chan);
368 		rc = -EINVAL;
369 		goto err;
370 	}
371 
372 	/* No action if enable request for a non participating SQ. This case is
373 	 * required to handle post flush where TCs should be reconfigured after
374 	 * pre flush.
375 	 */
376 	if (enable && sq_s->tc == ROC_NIX_PFC_CLASS_INVALID &&
377 	    tc == ROC_NIX_PFC_CLASS_INVALID)
378 		return 0;
379 
380 	/* Find the parent TL3 */
381 	TAILQ_FOREACH(node, list, node) {
382 		if (node->hw_lvl != nix->tm_link_cfg_lvl)
383 			continue;
384 
385 		if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa)
386 			continue;
387 
388 		/* Restrict sharing of TL3 across the queues */
389 		if (enable && node != parent && node->rel_chan == rel_chan) {
390 			plt_warn("SQ %d: siblng node TL3 %d already has %d(%d) tc value set",
391 				 sq, node->hw_id, tc, rel_chan);
392 			return -EEXIST;
393 		}
394 	}
395 
396 	/* Allocating TL3 request */
397 	req = mbox_alloc_msg_nix_txschq_cfg(mbox_get(mbox));
398 	req->lvl = nix->tm_link_cfg_lvl;
399 	k = 0;
400 
401 	/* Enable PFC/pause on the identified TL3 */
402 	req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(parent->hw_id, link);
403 	req->regval[k] = enable ? rel_chan : 0;
404 	req->regval[k] |= enable ? BIT_ULL(13) : 0;
405 	req->regval_mask[k] = ~(BIT_ULL(13) | GENMASK_ULL(7, 0));
406 	k++;
407 
408 	req->num_regs = k;
409 	rc = mbox_process(mbox);
410 	if (rc)
411 		goto err;
412 
413 	parent->rel_chan = enable ? rel_chan : NIX_TM_CHAN_INVALID;
414 	sq_s->tc = enable ? tc : ROC_NIX_PFC_CLASS_INVALID;
415 	/* Clear other SQ's with same TC i.e same parent node */
416 	for (i = 0; !enable && i < nix->nb_tx_queues; i++) {
417 		if (nix->sqs[i] && nix->sqs[i]->tc == tc)
418 			nix->sqs[i]->tc = ROC_NIX_PFC_CLASS_INVALID;
419 	}
420 
421 	rc = 0;
422 	plt_tm_dbg("SQ %u: TL3 %d TC %u %s",
423 		   sq, parent->hw_id, tc, enable ? "enabled" : "disabled");
424 	goto exit;
425 err:
426 	plt_err("Failed to %s bp on link %u, rc=%d(%s)",
427 		enable ? "enable" : "disable", link, rc, roc_error_msg_get(rc));
428 exit:
429 	mbox_put(mbox);
430 	return rc;
431 }
432 
433 int
434 nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled)
435 {
436 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
437 	struct nix_txschq_config *req = NULL, *rsp;
438 	enum roc_nix_tm_tree tree = nix->tm_tree;
439 	struct mbox *mbox = mbox_get((&nix->dev)->mbox);
440 	struct nix_tm_node_list *list;
441 	struct nix_tm_node *node;
442 	bool found = false;
443 	uint8_t enable = 1;
444 	uint8_t k = 0, i;
445 	uint16_t link;
446 	int rc = 0;
447 
448 	list = nix_tm_node_list(nix, tree);
449 	link = nix->tx_link;
450 
451 	TAILQ_FOREACH(node, list, node) {
452 		if (node->hw_lvl != nix->tm_link_cfg_lvl)
453 			continue;
454 
455 		if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa)
456 			continue;
457 
458 		found = true;
459 		if (!req) {
460 			req = mbox_alloc_msg_nix_txschq_cfg(mbox);
461 			req->read = 1;
462 			req->lvl = nix->tm_link_cfg_lvl;
463 			k = 0;
464 		}
465 
466 		req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link);
467 		k++;
468 
469 		if (k >= MAX_REGS_PER_MBOX_MSG) {
470 			req->num_regs = k;
471 			rc = mbox_process_msg(mbox, (void **)&rsp);
472 			if (rc || rsp->num_regs != k)
473 				goto err;
474 			req = NULL;
475 
476 			/* Report it as enabled only if enabled or all */
477 			for (i = 0; i < k; i++)
478 				enable &= !!(rsp->regval[i] & BIT_ULL(13));
479 		}
480 	}
481 
482 	if (req) {
483 		req->num_regs = k;
484 		rc = mbox_process_msg(mbox, (void **)&rsp);
485 		if (rc)
486 			goto err;
487 		/* Report it as enabled only if enabled or all */
488 		for (i = 0; i < k; i++)
489 			enable &= !!(rsp->regval[i] & BIT_ULL(13));
490 	}
491 
492 	*is_enabled = found ? !!enable : false;
493 	rc = 0;
494 	goto exit;
495 err:
496 	plt_err("Failed to get bp status on link %u, rc=%d(%s)", link, rc,
497 		roc_error_msg_get(rc));
498 exit:
499 	mbox_put(mbox);
500 	return rc;
501 }
502 
503 int
504 nix_tm_smq_xoff(struct nix *nix, struct nix_tm_node *node, bool enable)
505 {
506 	struct mbox *mbox = (&nix->dev)->mbox;
507 	struct nix_txschq_config *req;
508 	uint16_t smq;
509 	int rc;
510 
511 	smq = node->hw_id;
512 	plt_tm_dbg("Setting SMQ %u XOFF/FLUSH to %s", smq,
513 		   enable ? "enable" : "disable");
514 
515 	rc = nix_tm_clear_path_xoff(nix, node);
516 	if (rc)
517 		return rc;
518 
519 	req = mbox_alloc_msg_nix_txschq_cfg(mbox_get(mbox));
520 	req->lvl = NIX_TXSCH_LVL_SMQ;
521 	req->num_regs = 1;
522 
523 	req->reg[0] = NIX_AF_SMQX_CFG(smq);
524 	req->regval[0] = enable ? (BIT_ULL(50) | BIT_ULL(49)) : 0;
525 	req->regval_mask[0] =
526 		enable ? ~(BIT_ULL(50) | BIT_ULL(49)) : ~BIT_ULL(50);
527 
528 	rc = mbox_process(mbox);
529 	mbox_put(mbox);
530 	return rc;
531 }
532 
533 int
534 nix_tm_leaf_data_get(struct nix *nix, uint16_t sq, uint32_t *rr_quantum,
535 		     uint16_t *smq)
536 {
537 	struct nix_tm_node *node;
538 	int rc;
539 
540 	node = nix_tm_node_search(nix, sq, nix->tm_tree);
541 
542 	/* Check if we found a valid leaf node */
543 	if (!node || !nix_tm_is_leaf(nix, node->lvl) || !node->parent ||
544 	    node->parent->hw_id == NIX_TM_HW_ID_INVALID) {
545 		return -EIO;
546 	}
547 
548 	/* Get SMQ Id of leaf node's parent */
549 	*smq = node->parent->hw_id;
550 	*rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
551 
552 	rc = nix_tm_smq_xoff(nix, node->parent, false);
553 	if (rc)
554 		return rc;
555 	node->flags |= NIX_TM_NODE_ENABLED;
556 	return 0;
557 }
558 
559 int
560 roc_nix_tm_sq_flush_spin(struct roc_nix_sq *sq)
561 {
562 	struct nix *nix = roc_nix_to_nix_priv(sq->roc_nix);
563 	uint16_t sqb_cnt, head_off, tail_off;
564 	uint64_t wdata, val, prev;
565 	uint16_t qid = sq->qid;
566 	int64_t *regaddr;
567 	uint64_t timeout; /* 10's of usec */
568 
569 	/* Wait for enough time based on shaper min rate */
570 	timeout = (sq->nb_desc * roc_nix_max_pkt_len(sq->roc_nix) * 8 * 1E5);
571 	/* Wait for worst case scenario of this SQ being last priority
572 	 * and so have to wait for all other SQ's drain out by their own.
573 	 */
574 	timeout = timeout * nix->nb_tx_queues;
575 	timeout = timeout / nix->tm_rate_min;
576 	if (!timeout)
577 		timeout = 10000;
578 
579 	wdata = ((uint64_t)qid << 32);
580 	regaddr = (int64_t *)(nix->base + NIX_LF_SQ_OP_STATUS);
581 	val = roc_atomic64_add_nosync(wdata, regaddr);
582 
583 	/* Spin multiple iterations as "sq->fc_cache_pkts" can still
584 	 * have space to send pkts even though fc_mem is disabled
585 	 */
586 
587 	while (true) {
588 		prev = val;
589 		plt_delay_us(10);
590 		val = roc_atomic64_add_nosync(wdata, regaddr);
591 		/* Continue on error */
592 		if (val & BIT_ULL(63))
593 			continue;
594 
595 		if (prev != val)
596 			continue;
597 
598 		sqb_cnt = val & 0xFFFF;
599 		head_off = (val >> 20) & 0x3F;
600 		tail_off = (val >> 28) & 0x3F;
601 
602 		/* SQ reached quiescent state */
603 		if (sqb_cnt <= 1 && head_off == tail_off &&
604 		    (*(volatile uint64_t *)sq->fc == sq->aura_sqb_bufs)) {
605 			break;
606 		}
607 
608 		/* Timeout */
609 		if (!timeout)
610 			goto exit;
611 		timeout--;
612 	}
613 
614 	return 0;
615 exit:
616 	return -EFAULT;
617 }
618 
619 void
620 nix_tm_sq_free_sqe_buffer(uint64_t *sqe, int head_off, int end_off, int instr_sz)
621 {
622 	int i, j, inc = (8 * (0x2 >> instr_sz)), segs;
623 	struct nix_send_hdr_s *send_hdr;
624 	uint64_t *ptr, aura_handle;
625 	struct idev_cfg *idev;
626 
627 	if (!sqe)
628 		return;
629 
630 	idev = idev_get_cfg();
631 	if (idev == NULL)
632 		return;
633 
634 	ptr = sqe + (head_off * inc);
635 	for (i = head_off; i < end_off; i++) {
636 		ptr = sqe + (i * inc);
637 		send_hdr = (struct nix_send_hdr_s *)(ptr);
638 		aura_handle = roc_npa_aura_handle_gen(send_hdr->w0.aura, idev->npa->base);
639 		ptr += 2;
640 		if (((*ptr >> 60) & 0xF) == NIX_SUBDC_EXT)
641 			ptr += 2;
642 		if (((*ptr >> 60) & 0xF) == NIX_SUBDC_AGE_AND_STATS)
643 			ptr += 2;
644 		if (((*ptr >> 60) & 0xF) == NIX_SUBDC_JUMP) {
645 			ptr += 1;
646 			ptr = (uint64_t *)*ptr;
647 		}
648 		if (((*ptr >> 60) & 0xF) == NIX_SUBDC_CRC)
649 			ptr += 2;
650 		/* We are not parsing immediate send descriptor */
651 		if (((*ptr >> 60) & 0xF) == NIX_SUBDC_IMM)
652 			continue;
653 		while (1) {
654 			if (((*ptr >> 60) & 0xF) == NIX_SUBDC_SG) {
655 				segs = (*ptr >> 48) & 0x3;
656 				ptr += 1;
657 				for (j = 0; j < segs; j++) {
658 					roc_npa_aura_op_free(aura_handle, 0, *ptr);
659 					ptr += 1;
660 				}
661 				if (segs == 2)
662 					ptr += 1;
663 			} else if (((*ptr >> 60) & 0xF) == NIX_SUBDC_SG2) {
664 				uint64_t aura = (*ptr >> 16) & 0xFFFFF;
665 
666 				aura = roc_npa_aura_handle_gen(aura, idev->npa->base);
667 				ptr += 1;
668 				roc_npa_aura_op_free(aura, 0, *ptr);
669 				ptr += 1;
670 			} else
671 				break;
672 		}
673 	}
674 }
675 
676 int
677 roc_nix_tm_sq_free_pending_sqe(struct nix *nix, int q)
678 {
679 	int head_off, count, rc = 0, tail_off;
680 	struct roc_nix_sq *sq = nix->sqs[q];
681 	void *sqb_buf, *dat, *tail_sqb;
682 	struct dev *dev = &nix->dev;
683 	struct ndc_sync_op *ndc_req;
684 	uint16_t sqes_per_sqb;
685 	struct mbox *mbox;
686 
687 	mbox = dev->mbox;
688 	/* Sync NDC-NIX-TX for LF */
689 	ndc_req = mbox_alloc_msg_ndc_sync_op(mbox_get(mbox));
690 	if (ndc_req == NULL) {
691 		mbox_put(mbox);
692 		return -EFAULT;
693 	}
694 
695 	ndc_req->nix_lf_tx_sync = 1;
696 	if (mbox_process(mbox))
697 		rc |= NIX_ERR_NDC_SYNC;
698 	mbox_put(mbox);
699 
700 	if (rc)
701 		plt_err("NDC_SYNC failed rc %d", rc);
702 
703 	rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_SQ, q, (void *)&dat);
704 
705 	if (roc_model_is_cn9k()) {
706 		volatile struct nix_sq_ctx_s *ctx = (struct nix_sq_ctx_s *)dat;
707 
708 		/* We will cleanup SQE buffers only when we received MNQ interrupt */
709 		if (!ctx->mnq_dis)
710 			return -EFAULT;
711 
712 		count = ctx->sqb_count;
713 		sqb_buf = (void *)ctx->head_sqb;
714 		tail_sqb = (void *)ctx->tail_sqb;
715 		head_off = ctx->head_offset;
716 		tail_off = ctx->tail_offset;
717 	} else {
718 		volatile struct nix_cn10k_sq_ctx_s *ctx = (struct nix_cn10k_sq_ctx_s *)dat;
719 
720 		/* We will cleanup SQE buffers only when we received MNQ interrupt */
721 		if (!ctx->mnq_dis)
722 			return -EFAULT;
723 
724 		count = ctx->sqb_count;
725 		/* Free SQB's that are used */
726 		sqb_buf = (void *)ctx->head_sqb;
727 		tail_sqb = (void *)ctx->tail_sqb;
728 		head_off = ctx->head_offset;
729 		tail_off = ctx->tail_offset;
730 	}
731 	sqes_per_sqb = 1 << sq->sqes_per_sqb_log2;
732 	/* Free SQB's that are used */
733 	while (count) {
734 		void *next_sqb;
735 
736 		if (sqb_buf == tail_sqb)
737 			nix_tm_sq_free_sqe_buffer(sqb_buf, head_off, tail_off, sq->max_sqe_sz);
738 		else
739 			nix_tm_sq_free_sqe_buffer(sqb_buf, head_off, (sqes_per_sqb - 1),
740 						  sq->max_sqe_sz);
741 		next_sqb = *(void **)((uint64_t *)sqb_buf +
742 				      (uint32_t)((sqes_per_sqb - 1) * (0x2 >> sq->max_sqe_sz) * 8));
743 		roc_npa_aura_op_free(sq->aura_handle, 1, (uint64_t)sqb_buf);
744 		sqb_buf = next_sqb;
745 		head_off = 0;
746 		count--;
747 	}
748 
749 	return 0;
750 }
751 
752 static inline int
753 nix_tm_sdp_sq_drop_pkts(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
754 {
755 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
756 	struct mbox *mbox = mbox_get((&nix->dev)->mbox);
757 	struct nix_txschq_config *req = NULL, *rsp;
758 	enum roc_nix_tm_tree tree = nix->tm_tree;
759 	int rc = 0, qid = sq->qid;
760 	struct nix_tm_node *node;
761 	uint64_t regval;
762 
763 	/* Find the node for this SQ */
764 	node = nix_tm_node_search(nix, qid, tree);
765 	while (node) {
766 		if (node->hw_lvl != NIX_TXSCH_LVL_TL4) {
767 			node = node->parent;
768 			continue;
769 		}
770 		break;
771 	}
772 	if (!node) {
773 		plt_err("Invalid node/state for sq %u", qid);
774 		return -EFAULT;
775 	}
776 
777 	/* Get present link config */
778 	req = mbox_alloc_msg_nix_txschq_cfg(mbox);
779 	req->read = 1;
780 	req->lvl = NIX_TXSCH_LVL_TL4;
781 	req->reg[0] = NIX_AF_TL4X_SDP_LINK_CFG(node->hw_id);
782 	req->num_regs = 1;
783 	rc = mbox_process_msg(mbox, (void **)&rsp);
784 	if (rc || rsp->num_regs != 1)
785 		goto err;
786 	regval = rsp->regval[0];
787 	/* Disable BP_ENA in SDP link config */
788 	req = mbox_alloc_msg_nix_txschq_cfg(mbox);
789 	req->lvl = NIX_TXSCH_LVL_TL4;
790 	req->reg[0] = NIX_AF_TL4X_SDP_LINK_CFG(node->hw_id);
791 	req->regval[0] = 0x0ull;
792 	req->regval_mask[0] = ~(BIT_ULL(13));
793 	req->num_regs = 1;
794 	rc = mbox_process(mbox);
795 	if (rc)
796 		goto err;
797 	mbox_put(mbox);
798 	/* Flush SQ to drop all packets */
799 	rc = roc_nix_tm_sq_flush_spin(sq);
800 	if (rc)
801 		plt_nix_dbg("SQ flush failed with link reset config rc %d", rc);
802 	mbox = mbox_get((&nix->dev)->mbox);
803 	/* Restore link config */
804 	req = mbox_alloc_msg_nix_txschq_cfg(mbox);
805 	req->reg[0] = NIX_AF_TL4X_SDP_LINK_CFG(node->hw_id);
806 	req->lvl = NIX_TXSCH_LVL_TL4;
807 	req->regval[0] = regval;
808 	req->regval_mask[0] = ~(BIT_ULL(13) | BIT_ULL(12) | GENMASK_ULL(7, 0));
809 	req->num_regs = 1;
810 	rc = mbox_process(mbox);
811 err:
812 	mbox_put(mbox);
813 	return rc;
814 }
815 
816 /* Flush and disable tx queue and its parent SMQ */
817 int
818 nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
819 {
820 	struct roc_nix *roc_nix = sq->roc_nix;
821 	struct nix_tm_node *node, *sibling;
822 	struct nix_tm_node_list *list;
823 	enum roc_nix_tm_tree tree;
824 	struct msg_req *req;
825 	struct mbox *mbox;
826 	struct nix *nix;
827 	uint16_t qid;
828 	int rc;
829 
830 	nix = roc_nix_to_nix_priv(roc_nix);
831 
832 	/* Need not do anything if tree is in disabled state */
833 	if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
834 		return 0;
835 
836 	mbox = (&nix->dev)->mbox;
837 	qid = sq->qid;
838 
839 	tree = nix->tm_tree;
840 	list = nix_tm_node_list(nix, tree);
841 
842 	/* Find the node for this SQ */
843 	node = nix_tm_node_search(nix, qid, tree);
844 	if (!node) {
845 		plt_err("Invalid node/state for sq %u", qid);
846 		return -EFAULT;
847 	}
848 
849 	/* Enable CGX RXTX to drain pkts */
850 	if (!roc_nix->io_enabled) {
851 		/* Though it enables both RX MCAM Entries and CGX Link
852 		 * we assume all the rx queues are stopped way back.
853 		 */
854 		mbox_alloc_msg_nix_lf_start_rx(mbox_get(mbox));
855 		rc = mbox_process(mbox);
856 		if (rc) {
857 			mbox_put(mbox);
858 			plt_err("cgx start failed, rc=%d", rc);
859 			return rc;
860 		}
861 		mbox_put(mbox);
862 	}
863 
864 	/* Disable backpressure */
865 	rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
866 	if (rc) {
867 		plt_err("Failed to disable backpressure for flush, rc=%d", rc);
868 		return rc;
869 	}
870 
871 	/* Disable smq xoff for case it was enabled earlier */
872 	rc = nix_tm_smq_xoff(nix, node->parent, false);
873 	if (rc) {
874 		plt_err("Failed to enable smq %u, rc=%d", node->parent->hw_id,
875 			rc);
876 		return rc;
877 	}
878 
879 	/* As per HRM, to disable an SQ, all other SQ's
880 	 * that feed to same SMQ must be paused before SMQ flush.
881 	 */
882 	TAILQ_FOREACH(sibling, list, node) {
883 		if (sibling->parent != node->parent)
884 			continue;
885 		if (!(sibling->flags & NIX_TM_NODE_ENABLED))
886 			continue;
887 
888 		qid = sibling->id;
889 		sq = nix->sqs[qid];
890 		if (!sq)
891 			continue;
892 
893 		if (sq->enable) {
894 			rc = roc_nix_tm_sq_aura_fc(sq, false);
895 			if (rc) {
896 				plt_err("Failed to disable sqb aura fc, rc=%d", rc);
897 				goto cleanup;
898 			}
899 		}
900 
901 		/* Wait for sq entries to be flushed */
902 		rc = roc_nix_tm_sq_flush_spin(sq);
903 		if (rc) {
904 			if (nix->sdp_link)
905 				rc = nix_tm_sdp_sq_drop_pkts(roc_nix, sq);
906 			else
907 				rc = roc_nix_tm_sq_free_pending_sqe(nix, sq->qid);
908 			if (rc) {
909 				roc_nix_tm_dump(sq->roc_nix, NULL);
910 				roc_nix_queues_ctx_dump(sq->roc_nix, NULL);
911 				plt_err("Failed to drain sq %u, rc=%d", sq->qid, rc);
912 				return rc;
913 			}
914 			/* Freed all pending SQEs for this SQ, so disable this node */
915 			sibling->flags &= ~NIX_TM_NODE_ENABLED;
916 		}
917 	}
918 
919 	node->flags &= ~NIX_TM_NODE_ENABLED;
920 
921 	/* Disable and flush */
922 	rc = nix_tm_smq_xoff(nix, node->parent, true);
923 	if (rc) {
924 		plt_err("Failed to disable smq %u, rc=%d", node->parent->hw_id,
925 			rc);
926 		goto cleanup;
927 	}
928 
929 	req = mbox_alloc_msg_nix_rx_sw_sync(mbox_get(mbox));
930 	if (!req) {
931 		mbox_put(mbox);
932 		return -ENOSPC;
933 	}
934 
935 	rc = mbox_process(mbox);
936 	mbox_put(mbox);
937 cleanup:
938 	/* Restore cgx state */
939 	if (!roc_nix->io_enabled) {
940 		mbox_alloc_msg_nix_lf_stop_rx(mbox_get(mbox));
941 		rc |= mbox_process(mbox);
942 		mbox_put(mbox);
943 	}
944 
945 	return rc;
946 }
947 
948 int
949 nix_tm_sq_flush_post(struct roc_nix_sq *sq)
950 {
951 	struct roc_nix *roc_nix = sq->roc_nix;
952 	struct nix_tm_node *node, *sibling;
953 	struct nix_tm_node_list *list;
954 	enum roc_nix_tm_tree tree;
955 	struct roc_nix_sq *s_sq;
956 	bool once = false;
957 	uint16_t qid, s_qid;
958 	struct nix *nix;
959 	int rc;
960 
961 	nix = roc_nix_to_nix_priv(roc_nix);
962 
963 	/* Need not do anything if tree is in disabled state */
964 	if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
965 		return 0;
966 
967 	qid = sq->qid;
968 	tree = nix->tm_tree;
969 	list = nix_tm_node_list(nix, tree);
970 
971 	/* Find the node for this SQ */
972 	node = nix_tm_node_search(nix, qid, tree);
973 	if (!node) {
974 		plt_err("Invalid node for sq %u", qid);
975 		return -EFAULT;
976 	}
977 
978 	/* Enable all the siblings back */
979 	TAILQ_FOREACH(sibling, list, node) {
980 		if (sibling->parent != node->parent)
981 			continue;
982 
983 		if (sibling->id == qid)
984 			continue;
985 
986 		if (!(sibling->flags & NIX_TM_NODE_ENABLED))
987 			continue;
988 
989 		s_qid = sibling->id;
990 		s_sq = nix->sqs[s_qid];
991 		if (!s_sq)
992 			continue;
993 
994 		if (!once) {
995 			/* Enable back if any SQ is still present */
996 			rc = nix_tm_smq_xoff(nix, node->parent, false);
997 			if (rc) {
998 				plt_err("Failed to enable smq %u, rc=%d",
999 					node->parent->hw_id, rc);
1000 				return rc;
1001 			}
1002 			once = true;
1003 		}
1004 
1005 		if (s_sq->enable) {
1006 			rc = roc_nix_tm_sq_aura_fc(s_sq, true);
1007 			if (rc) {
1008 				plt_err("Failed to enable sqb aura fc, rc=%d", rc);
1009 				return rc;
1010 			}
1011 		}
1012 	}
1013 
1014 	return 0;
1015 }
1016 
1017 int
1018 nix_tm_sq_sched_conf(struct nix *nix, struct nix_tm_node *node,
1019 		     bool rr_quantum_only)
1020 {
1021 	struct mbox *mbox = mbox_get((&nix->dev)->mbox);
1022 	uint16_t qid = node->id, smq;
1023 	uint64_t rr_quantum;
1024 	int rc;
1025 
1026 	smq = node->parent->hw_id;
1027 	rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
1028 
1029 	if (rr_quantum_only)
1030 		plt_tm_dbg("Update sq(%u) rr_quantum 0x%" PRIx64, qid,
1031 			   rr_quantum);
1032 	else
1033 		plt_tm_dbg("Enabling sq(%u)->smq(%u), rr_quantum 0x%" PRIx64,
1034 			   qid, smq, rr_quantum);
1035 
1036 	if (qid > nix->nb_tx_queues) {
1037 		rc = -EFAULT;
1038 		goto exit;
1039 	}
1040 
1041 	if (roc_model_is_cn9k()) {
1042 		struct nix_aq_enq_req *aq;
1043 
1044 		aq = mbox_alloc_msg_nix_aq_enq(mbox);
1045 		if (!aq) {
1046 			rc = -ENOSPC;
1047 			goto exit;
1048 		}
1049 
1050 		aq->qidx = qid;
1051 		aq->ctype = NIX_AQ_CTYPE_SQ;
1052 		aq->op = NIX_AQ_INSTOP_WRITE;
1053 
1054 		/* smq update only when needed */
1055 		if (!rr_quantum_only) {
1056 			aq->sq.smq = smq;
1057 			aq->sq_mask.smq = ~aq->sq_mask.smq;
1058 		}
1059 		aq->sq.smq_rr_quantum = rr_quantum;
1060 		aq->sq_mask.smq_rr_quantum = ~aq->sq_mask.smq_rr_quantum;
1061 	} else if (roc_model_is_cn10k()) {
1062 		struct nix_cn10k_aq_enq_req *aq;
1063 
1064 		aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
1065 		if (!aq) {
1066 			rc = -ENOSPC;
1067 			goto exit;
1068 		}
1069 
1070 		aq->qidx = qid;
1071 		aq->ctype = NIX_AQ_CTYPE_SQ;
1072 		aq->op = NIX_AQ_INSTOP_WRITE;
1073 
1074 		/* smq update only when needed */
1075 		if (!rr_quantum_only) {
1076 			aq->sq.smq = smq;
1077 			aq->sq_mask.smq = ~aq->sq_mask.smq;
1078 		}
1079 		aq->sq.smq_rr_weight = rr_quantum;
1080 		aq->sq_mask.smq_rr_weight = ~aq->sq_mask.smq_rr_weight;
1081 	} else {
1082 		struct nix_cn20k_aq_enq_req *aq;
1083 
1084 		aq = mbox_alloc_msg_nix_cn20k_aq_enq(mbox);
1085 		if (!aq) {
1086 			rc = -ENOSPC;
1087 			goto exit;
1088 		}
1089 
1090 		aq->qidx = qid;
1091 		aq->ctype = NIX_AQ_CTYPE_SQ;
1092 		aq->op = NIX_AQ_INSTOP_WRITE;
1093 
1094 		/* smq update only when needed */
1095 		if (!rr_quantum_only) {
1096 			aq->sq.smq = smq;
1097 			aq->sq_mask.smq = ~aq->sq_mask.smq;
1098 		}
1099 		aq->sq.smq_rr_weight = rr_quantum;
1100 		aq->sq_mask.smq_rr_weight = ~aq->sq_mask.smq_rr_weight;
1101 	}
1102 
1103 	rc = mbox_process(mbox);
1104 	if (rc)
1105 		plt_err("Failed to set smq, rc=%d", rc);
1106 exit:
1107 	mbox_put(mbox);
1108 	return rc;
1109 }
1110 
1111 int
1112 nix_tm_release_resources(struct nix *nix, uint8_t hw_lvl, bool contig,
1113 			 bool above_thresh)
1114 {
1115 	uint16_t avail, thresh, to_free = 0, schq;
1116 	struct mbox *mbox = (&nix->dev)->mbox;
1117 	struct nix_txsch_free_req *req;
1118 	struct plt_bitmap *bmp;
1119 	uint64_t slab = 0;
1120 	uint32_t pos = 0;
1121 	int rc = -ENOSPC;
1122 
1123 	bmp = contig ? nix->schq_contig_bmp[hw_lvl] : nix->schq_bmp[hw_lvl];
1124 	thresh =
1125 		contig ? nix->contig_rsvd[hw_lvl] : nix->discontig_rsvd[hw_lvl];
1126 	plt_bitmap_scan_init(bmp);
1127 
1128 	avail = nix_tm_resource_avail(nix, hw_lvl, contig);
1129 
1130 	if (above_thresh) {
1131 		/* Release only above threshold */
1132 		if (avail > thresh)
1133 			to_free = avail - thresh;
1134 	} else {
1135 		/* Release everything */
1136 		to_free = avail;
1137 	}
1138 
1139 	/* Now release resources to AF */
1140 	while (to_free) {
1141 		if (!slab && !plt_bitmap_scan(bmp, &pos, &slab))
1142 			break;
1143 
1144 		schq = bitmap_ctzll(slab);
1145 		slab &= ~(1ULL << schq);
1146 		schq += pos;
1147 
1148 		/* Free to AF */
1149 		req = mbox_alloc_msg_nix_txsch_free(mbox_get(mbox));
1150 		if (req == NULL) {
1151 			mbox_put(mbox);
1152 			return rc;
1153 		}
1154 		req->flags = 0;
1155 		req->schq_lvl = hw_lvl;
1156 		req->schq = schq;
1157 		rc = mbox_process(mbox);
1158 		if (rc) {
1159 			plt_err("failed to release hwres %s(%u) rc %d",
1160 				nix_tm_hwlvl2str(hw_lvl), schq, rc);
1161 			mbox_put(mbox);
1162 			return rc;
1163 		}
1164 		mbox_put(mbox);
1165 		plt_tm_dbg("Released hwres %s(%u)", nix_tm_hwlvl2str(hw_lvl),
1166 			   schq);
1167 		plt_bitmap_clear(bmp, schq);
1168 		to_free--;
1169 	}
1170 
1171 	if (to_free) {
1172 		plt_err("resource inconsistency for %s(%u)",
1173 			nix_tm_hwlvl2str(hw_lvl), contig);
1174 		return -EFAULT;
1175 	}
1176 	return 0;
1177 }
1178 
1179 int
1180 nix_tm_free_node_resource(struct nix *nix, struct nix_tm_node *node)
1181 {
1182 	struct mbox *mbox = mbox_get((&nix->dev)->mbox);
1183 	struct nix_txsch_free_req *req;
1184 	struct plt_bitmap *bmp;
1185 	uint16_t avail, hw_id;
1186 	uint8_t hw_lvl;
1187 	int rc = -ENOSPC;
1188 
1189 	hw_lvl = node->hw_lvl;
1190 	hw_id = node->hw_id;
1191 	bmp = nix->schq_bmp[hw_lvl];
1192 	/* Free specific HW resource */
1193 	plt_tm_dbg("Free hwres %s(%u) lvl %u id %u (%p)",
1194 		   nix_tm_hwlvl2str(node->hw_lvl), hw_id, node->lvl, node->id,
1195 		   node);
1196 
1197 	avail = nix_tm_resource_avail(nix, hw_lvl, false);
1198 	/* Always for now free to discontiguous queue when avail
1199 	 * is not sufficient.
1200 	 */
1201 	if (nix->discontig_rsvd[hw_lvl] &&
1202 	    avail < nix->discontig_rsvd[hw_lvl]) {
1203 		PLT_ASSERT(hw_id < NIX_TM_MAX_HW_TXSCHQ);
1204 		PLT_ASSERT(plt_bitmap_get(bmp, hw_id) == 0);
1205 		plt_bitmap_set(bmp, hw_id);
1206 		node->hw_id = NIX_TM_HW_ID_INVALID;
1207 		node->flags &= ~NIX_TM_NODE_HWRES;
1208 		rc = 0;
1209 		goto exit;
1210 	}
1211 
1212 	/* Free to AF */
1213 	req = mbox_alloc_msg_nix_txsch_free(mbox);
1214 	if (req == NULL)
1215 		goto exit;
1216 	req->flags = 0;
1217 	req->schq_lvl = node->hw_lvl;
1218 	req->schq = hw_id;
1219 	rc = mbox_process(mbox);
1220 	if (rc) {
1221 		plt_err("failed to release hwres %s(%u) rc %d",
1222 			nix_tm_hwlvl2str(node->hw_lvl), hw_id, rc);
1223 		goto exit;
1224 	}
1225 
1226 	/* Mark parent as dirty for reallocing it's children */
1227 	if (node->parent)
1228 		node->parent->child_realloc = true;
1229 
1230 	node->hw_id = NIX_TM_HW_ID_INVALID;
1231 	node->flags &= ~NIX_TM_NODE_HWRES;
1232 	plt_tm_dbg("Released hwres %s(%u) to af",
1233 		   nix_tm_hwlvl2str(node->hw_lvl), hw_id);
1234 	rc = 0;
1235 exit:
1236 	mbox_put(mbox);
1237 	return rc;
1238 }
1239 
1240 int
1241 nix_tm_node_delete(struct roc_nix *roc_nix, uint32_t node_id,
1242 		   enum roc_nix_tm_tree tree, bool free)
1243 {
1244 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1245 	struct nix_tm_shaper_profile *profile;
1246 	struct nix_tm_node *node, *child;
1247 	struct nix_tm_node_list *list;
1248 	uint32_t profile_id;
1249 	int rc;
1250 
1251 	plt_tm_dbg("Delete node id %u tree %u", node_id, tree);
1252 
1253 	node = nix_tm_node_search(nix, node_id, tree);
1254 	if (!node)
1255 		return NIX_ERR_TM_INVALID_NODE;
1256 
1257 	list = nix_tm_node_list(nix, tree);
1258 	/* Check for any existing children */
1259 	TAILQ_FOREACH(child, list, node) {
1260 		if (child->parent == node)
1261 			return NIX_ERR_TM_CHILD_EXISTS;
1262 	}
1263 
1264 	/* Remove shaper profile reference */
1265 	profile_id = node->shaper_profile_id;
1266 	profile = nix_tm_shaper_profile_search(nix, profile_id);
1267 
1268 	/* Free hw resource locally */
1269 	if (node->flags & NIX_TM_NODE_HWRES) {
1270 		rc = nix_tm_free_node_resource(nix, node);
1271 		if (rc)
1272 			return rc;
1273 	}
1274 
1275 	if (profile)
1276 		profile->ref_cnt--;
1277 
1278 	TAILQ_REMOVE(list, node, node);
1279 
1280 	plt_tm_dbg("Deleted node %s lvl %u id %u, prio 0x%x weight 0x%x "
1281 		   "parent %u profile 0x%x tree %u (%p)",
1282 		   nix_tm_hwlvl2str(node->hw_lvl), node->lvl, node->id,
1283 		   node->priority, node->weight,
1284 		   node->parent ? node->parent->id : UINT32_MAX,
1285 		   node->shaper_profile_id, tree, node);
1286 	/* Free only if requested */
1287 	if (free)
1288 		nix_tm_node_free(node);
1289 	return 0;
1290 }
1291 
1292 static int
1293 nix_tm_assign_hw_id(struct nix *nix, struct nix_tm_node *parent,
1294 		    uint16_t *contig_id, int *contig_cnt,
1295 		    struct nix_tm_node_list *list)
1296 {
1297 	struct nix_tm_node *child;
1298 	struct plt_bitmap *bmp;
1299 	uint8_t child_hw_lvl;
1300 	int spare_schq = -1;
1301 	uint32_t pos = 0;
1302 	uint64_t slab;
1303 	uint16_t schq;
1304 
1305 	child_hw_lvl = parent->hw_lvl - 1;
1306 	bmp = nix->schq_bmp[child_hw_lvl];
1307 	plt_bitmap_scan_init(bmp);
1308 	slab = 0;
1309 
1310 	/* Save spare schq if it is case of RR + SP */
1311 	if (parent->rr_prio != 0xf && *contig_cnt > 1)
1312 		spare_schq = *contig_id + parent->rr_prio;
1313 
1314 	TAILQ_FOREACH(child, list, node) {
1315 		if (!child->parent)
1316 			continue;
1317 		if (child->parent->id != parent->id)
1318 			continue;
1319 
1320 		/* Resource never expected to be present */
1321 		if (child->flags & NIX_TM_NODE_HWRES) {
1322 			plt_err("Resource exists for child (%s)%u, id %u (%p)",
1323 				nix_tm_hwlvl2str(child->hw_lvl), child->hw_id,
1324 				child->id, child);
1325 			return -EFAULT;
1326 		}
1327 
1328 		if (!slab)
1329 			plt_bitmap_scan(bmp, &pos, &slab);
1330 
1331 		if (child->priority == parent->rr_prio && spare_schq != -1) {
1332 			/* Use spare schq first if present */
1333 			schq = spare_schq;
1334 			spare_schq = -1;
1335 			*contig_cnt = *contig_cnt - 1;
1336 
1337 		} else if (child->priority == parent->rr_prio) {
1338 			/* Assign a discontiguous queue */
1339 			if (!slab) {
1340 				plt_err("Schq not found for Child %u "
1341 					"lvl %u (%p)",
1342 					child->id, child->lvl, child);
1343 				return -ENOENT;
1344 			}
1345 
1346 			schq = bitmap_ctzll(slab);
1347 			slab &= ~(1ULL << schq);
1348 			schq += pos;
1349 			plt_bitmap_clear(bmp, schq);
1350 		} else {
1351 			/* Assign a contiguous queue */
1352 			schq = *contig_id + child->priority;
1353 			*contig_cnt = *contig_cnt - 1;
1354 		}
1355 
1356 		plt_tm_dbg("Resource %s(%u), for lvl %u id %u(%p)",
1357 			   nix_tm_hwlvl2str(child->hw_lvl), schq, child->lvl,
1358 			   child->id, child);
1359 
1360 		child->hw_id = schq;
1361 		child->parent_hw_id = parent->hw_id;
1362 		child->flags |= NIX_TM_NODE_HWRES;
1363 	}
1364 
1365 	return 0;
1366 }
1367 
1368 int
1369 nix_tm_assign_resources(struct nix *nix, enum roc_nix_tm_tree tree)
1370 {
1371 	struct nix_tm_node *parent, *root = NULL;
1372 	struct plt_bitmap *bmp, *bmp_contig;
1373 	struct nix_tm_node_list *list;
1374 	uint8_t child_hw_lvl, hw_lvl;
1375 	uint16_t contig_id, j;
1376 	uint64_t slab = 0;
1377 	uint32_t pos = 0;
1378 	int cnt, rc;
1379 
1380 	list = nix_tm_node_list(nix, tree);
1381 	/* Walk from TL1 to TL4 parents */
1382 	for (hw_lvl = NIX_TXSCH_LVL_TL1; hw_lvl > 0; hw_lvl--) {
1383 		TAILQ_FOREACH(parent, list, node) {
1384 			child_hw_lvl = parent->hw_lvl - 1;
1385 			if (parent->hw_lvl != hw_lvl)
1386 				continue;
1387 
1388 			/* Remember root for future */
1389 			if (parent->hw_lvl == nix->tm_root_lvl)
1390 				root = parent;
1391 
1392 			if (!parent->child_realloc) {
1393 				/* Skip when parent is not dirty */
1394 				if (nix_tm_child_res_valid(list, parent))
1395 					continue;
1396 				plt_err("Parent not dirty but invalid "
1397 					"child res parent id %u(lvl %u)",
1398 					parent->id, parent->lvl);
1399 				return -EFAULT;
1400 			}
1401 
1402 			bmp_contig = nix->schq_contig_bmp[child_hw_lvl];
1403 
1404 			/* Prealloc contiguous indices for a parent */
1405 			contig_id = NIX_TM_MAX_HW_TXSCHQ;
1406 			cnt = (int)parent->max_prio + 1;
1407 			if (cnt > 0) {
1408 				plt_bitmap_scan_init(bmp_contig);
1409 				if (!plt_bitmap_scan(bmp_contig, &pos, &slab)) {
1410 					plt_err("Contig schq not found");
1411 					return -ENOENT;
1412 				}
1413 				contig_id = pos + bitmap_ctzll(slab);
1414 
1415 				/* Check if we have enough */
1416 				for (j = contig_id; j < contig_id + cnt; j++) {
1417 					if (!plt_bitmap_get(bmp_contig, j))
1418 						break;
1419 				}
1420 
1421 				if (j != contig_id + cnt) {
1422 					plt_err("Contig schq not sufficient");
1423 					return -ENOENT;
1424 				}
1425 
1426 				for (j = contig_id; j < contig_id + cnt; j++)
1427 					plt_bitmap_clear(bmp_contig, j);
1428 			}
1429 
1430 			/* Assign hw id to all children */
1431 			rc = nix_tm_assign_hw_id(nix, parent, &contig_id, &cnt,
1432 						 list);
1433 			if (cnt || rc) {
1434 				plt_err("Unexpected err, contig res alloc, "
1435 					"parent %u, of %s, rc=%d, cnt=%d",
1436 					parent->id, nix_tm_hwlvl2str(hw_lvl),
1437 					rc, cnt);
1438 				return -EFAULT;
1439 			}
1440 
1441 			/* Clear the dirty bit as children's
1442 			 * resources are reallocated.
1443 			 */
1444 			parent->child_realloc = false;
1445 		}
1446 	}
1447 
1448 	/* Root is always expected to be there */
1449 	if (!root)
1450 		return -EFAULT;
1451 
1452 	if (root->flags & NIX_TM_NODE_HWRES)
1453 		return 0;
1454 
1455 	/* Process root node */
1456 	bmp = nix->schq_bmp[nix->tm_root_lvl];
1457 	plt_bitmap_scan_init(bmp);
1458 	if (!plt_bitmap_scan(bmp, &pos, &slab)) {
1459 		plt_err("Resource not allocated for root");
1460 		return -EIO;
1461 	}
1462 
1463 	root->hw_id = pos + bitmap_ctzll(slab);
1464 	root->flags |= NIX_TM_NODE_HWRES;
1465 	plt_bitmap_clear(bmp, root->hw_id);
1466 
1467 	/* Get TL1 id as well when root is not TL1 */
1468 	if (!nix_tm_have_tl1_access(nix)) {
1469 		bmp = nix->schq_bmp[NIX_TXSCH_LVL_TL1];
1470 
1471 		plt_bitmap_scan_init(bmp);
1472 		if (!plt_bitmap_scan(bmp, &pos, &slab)) {
1473 			plt_err("Resource not found for TL1");
1474 			return -EIO;
1475 		}
1476 		root->parent_hw_id = pos + bitmap_ctzll(slab);
1477 		plt_bitmap_clear(bmp, root->parent_hw_id);
1478 	}
1479 
1480 	plt_tm_dbg("Resource %s(%u) for root(id %u) (%p)",
1481 		   nix_tm_hwlvl2str(root->hw_lvl), root->hw_id, root->id, root);
1482 
1483 	return 0;
1484 }
1485 
1486 void
1487 nix_tm_copy_rsp_to_nix(struct nix *nix, struct nix_txsch_alloc_rsp *rsp)
1488 {
1489 	uint8_t lvl;
1490 	uint16_t i;
1491 
1492 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1493 		for (i = 0; i < rsp->schq[lvl]; i++)
1494 			plt_bitmap_set(nix->schq_bmp[lvl],
1495 				       rsp->schq_list[lvl][i]);
1496 
1497 		for (i = 0; i < rsp->schq_contig[lvl]; i++)
1498 			plt_bitmap_set(nix->schq_contig_bmp[lvl],
1499 				       rsp->schq_contig_list[lvl][i]);
1500 	}
1501 }
1502 
1503 int
1504 nix_tm_alloc_txschq(struct nix *nix, enum roc_nix_tm_tree tree)
1505 {
1506 	uint16_t schq_contig[NIX_TXSCH_LVL_CNT];
1507 	struct mbox *mbox = (&nix->dev)->mbox;
1508 	uint16_t schq[NIX_TXSCH_LVL_CNT];
1509 	struct nix_txsch_alloc_req *req;
1510 	struct nix_txsch_alloc_rsp *rsp;
1511 	uint8_t hw_lvl, i;
1512 	bool pend;
1513 	int rc;
1514 
1515 	memset(schq, 0, sizeof(schq));
1516 	memset(schq_contig, 0, sizeof(schq_contig));
1517 
1518 	/* Estimate requirement */
1519 	rc = nix_tm_resource_estimate(nix, schq_contig, schq, tree);
1520 	if (!rc)
1521 		return 0;
1522 
1523 	/* Release existing contiguous resources when realloc requested
1524 	 * as there is no way to guarantee continuity of old with new.
1525 	 */
1526 	for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1527 		if (schq_contig[hw_lvl])
1528 			nix_tm_release_resources(nix, hw_lvl, true, false);
1529 	}
1530 
1531 	/* Alloc as needed */
1532 	do {
1533 		pend = false;
1534 		req = mbox_alloc_msg_nix_txsch_alloc(mbox_get(mbox));
1535 		if (!req) {
1536 			mbox_put(mbox);
1537 			rc = -ENOMEM;
1538 			goto alloc_err;
1539 		}
1540 		mbox_memcpy(req->schq, schq, sizeof(req->schq));
1541 		mbox_memcpy(req->schq_contig, schq_contig,
1542 			    sizeof(req->schq_contig));
1543 
1544 		/* Each alloc can be at max of MAX_TXSCHQ_PER_FUNC per level.
1545 		 * So split alloc to multiple requests.
1546 		 */
1547 		for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
1548 			if (req->schq[i] > MAX_TXSCHQ_PER_FUNC)
1549 				req->schq[i] = MAX_TXSCHQ_PER_FUNC;
1550 			schq[i] -= req->schq[i];
1551 
1552 			if (req->schq_contig[i] > MAX_TXSCHQ_PER_FUNC)
1553 				req->schq_contig[i] = MAX_TXSCHQ_PER_FUNC;
1554 			schq_contig[i] -= req->schq_contig[i];
1555 
1556 			if (schq[i] || schq_contig[i])
1557 				pend = true;
1558 		}
1559 
1560 		rc = mbox_process_msg(mbox, (void *)&rsp);
1561 		if (rc) {
1562 			mbox_put(mbox);
1563 			goto alloc_err;
1564 		}
1565 
1566 		nix_tm_copy_rsp_to_nix(nix, rsp);
1567 		mbox_put(mbox);
1568 	} while (pend);
1569 
1570 	nix->tm_link_cfg_lvl = rsp->link_cfg_lvl;
1571 	nix->tm_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio;
1572 	return 0;
1573 alloc_err:
1574 	for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
1575 		if (nix_tm_release_resources(nix, i, true, false))
1576 			plt_err("Failed to release contig resources of "
1577 				"lvl %d on error",
1578 				i);
1579 		if (nix_tm_release_resources(nix, i, false, false))
1580 			plt_err("Failed to release discontig resources of "
1581 				"lvl %d on error",
1582 				i);
1583 	}
1584 	return rc;
1585 }
1586 
1587 int
1588 nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
1589 {
1590 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1591 	uint32_t nonleaf_id = nix->nb_tx_queues;
1592 	struct nix_tm_node *node = NULL;
1593 	uint8_t leaf_lvl, lvl, lvl_end;
1594 	uint32_t parent, i;
1595 	int rc = 0;
1596 
1597 	/* Add ROOT, SCH1, SCH2, SCH3, [SCH4]  nodes */
1598 	parent = ROC_NIX_TM_NODE_ID_INVALID;
1599 	/* With TL1 access we have an extra level */
1600 	lvl_end = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH4 :
1601 						       ROC_TM_LVL_SCH3);
1602 
1603 	for (lvl = ROC_TM_LVL_ROOT; lvl <= lvl_end; lvl++) {
1604 		rc = -ENOMEM;
1605 		node = nix_tm_node_alloc();
1606 		if (!node)
1607 			goto error;
1608 
1609 		node->id = nonleaf_id;
1610 		node->parent_id = parent;
1611 		node->priority = 0;
1612 		/* Default VF root RR_QUANTUM is in sync with kernel */
1613 		if (lvl == ROC_TM_LVL_ROOT && !nix_tm_have_tl1_access(nix))
1614 			node->weight = roc_nix->root_sched_weight;
1615 		else
1616 			node->weight = NIX_TM_DFLT_RR_WT;
1617 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1618 		node->lvl = lvl;
1619 		node->tree = ROC_NIX_TM_DEFAULT;
1620 		node->rel_chan = NIX_TM_CHAN_INVALID;
1621 
1622 		rc = nix_tm_node_add(roc_nix, node);
1623 		if (rc)
1624 			goto error;
1625 		parent = nonleaf_id;
1626 		nonleaf_id++;
1627 	}
1628 
1629 	parent = nonleaf_id - 1;
1630 	leaf_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_QUEUE :
1631 							ROC_TM_LVL_SCH4);
1632 
1633 	/* Add leaf nodes */
1634 	for (i = 0; i < nix->nb_tx_queues; i++) {
1635 		rc = -ENOMEM;
1636 		node = nix_tm_node_alloc();
1637 		if (!node)
1638 			goto error;
1639 
1640 		node->id = i;
1641 		node->parent_id = parent;
1642 		node->priority = 0;
1643 		node->weight = NIX_TM_DFLT_RR_WT;
1644 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1645 		node->lvl = leaf_lvl;
1646 		node->tree = ROC_NIX_TM_DEFAULT;
1647 		node->rel_chan = NIX_TM_CHAN_INVALID;
1648 
1649 		rc = nix_tm_node_add(roc_nix, node);
1650 		if (rc)
1651 			goto error;
1652 	}
1653 
1654 	return 0;
1655 error:
1656 	nix_tm_node_free(node);
1657 	return rc;
1658 }
1659 
1660 int
1661 roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
1662 {
1663 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1664 	uint32_t nonleaf_id = nix->nb_tx_queues;
1665 	struct nix_tm_node *node = NULL;
1666 	uint8_t leaf_lvl, lvl, lvl_end;
1667 	uint32_t parent, i;
1668 	int rc = 0;
1669 
1670 	/* Add ROOT, SCH1, SCH2 nodes */
1671 	parent = ROC_NIX_TM_NODE_ID_INVALID;
1672 	lvl_end = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH3 :
1673 						       ROC_TM_LVL_SCH2);
1674 
1675 	for (lvl = ROC_TM_LVL_ROOT; lvl <= lvl_end; lvl++) {
1676 		rc = -ENOMEM;
1677 		node = nix_tm_node_alloc();
1678 		if (!node)
1679 			goto error;
1680 
1681 		node->id = nonleaf_id;
1682 		node->parent_id = parent;
1683 		node->priority = 0;
1684 		node->weight = NIX_TM_DFLT_RR_WT;
1685 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1686 		node->lvl = lvl;
1687 		node->tree = ROC_NIX_TM_RLIMIT;
1688 		node->rel_chan = NIX_TM_CHAN_INVALID;
1689 
1690 		rc = nix_tm_node_add(roc_nix, node);
1691 		if (rc)
1692 			goto error;
1693 		parent = nonleaf_id;
1694 		nonleaf_id++;
1695 	}
1696 
1697 	/* SMQ is mapped to SCH4 when we have TL1 access and SCH3 otherwise */
1698 	lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH4 : ROC_TM_LVL_SCH3);
1699 
1700 	/* Add per queue SMQ nodes i.e SCH4 / SCH3 */
1701 	for (i = 0; i < nix->nb_tx_queues; i++) {
1702 		rc = -ENOMEM;
1703 		node = nix_tm_node_alloc();
1704 		if (!node)
1705 			goto error;
1706 
1707 		node->id = nonleaf_id + i;
1708 		node->parent_id = parent;
1709 		node->priority = 0;
1710 		node->weight = NIX_TM_DFLT_RR_WT;
1711 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1712 		node->lvl = lvl;
1713 		node->tree = ROC_NIX_TM_RLIMIT;
1714 		node->rel_chan = NIX_TM_CHAN_INVALID;
1715 
1716 		rc = nix_tm_node_add(roc_nix, node);
1717 		if (rc)
1718 			goto error;
1719 	}
1720 
1721 	parent = nonleaf_id;
1722 	leaf_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_QUEUE :
1723 							ROC_TM_LVL_SCH4);
1724 
1725 	/* Add leaf nodes */
1726 	for (i = 0; i < nix->nb_tx_queues; i++) {
1727 		rc = -ENOMEM;
1728 		node = nix_tm_node_alloc();
1729 		if (!node)
1730 			goto error;
1731 
1732 		node->id = i;
1733 		node->parent_id = parent + i;
1734 		node->priority = 0;
1735 		node->weight = NIX_TM_DFLT_RR_WT;
1736 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1737 		node->lvl = leaf_lvl;
1738 		node->tree = ROC_NIX_TM_RLIMIT;
1739 		node->rel_chan = NIX_TM_CHAN_INVALID;
1740 
1741 		rc = nix_tm_node_add(roc_nix, node);
1742 		if (rc)
1743 			goto error;
1744 	}
1745 
1746 	return 0;
1747 error:
1748 	nix_tm_node_free(node);
1749 	return rc;
1750 }
1751 
1752 int
1753 roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix)
1754 {
1755 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1756 	uint8_t leaf_lvl, lvl, lvl_start, lvl_end;
1757 	uint32_t nonleaf_id = nix->nb_tx_queues;
1758 	struct nix_tm_node *node = NULL;
1759 	uint32_t tl2_node_id;
1760 	uint32_t parent, i;
1761 	int rc = -ENOMEM;
1762 
1763 	parent = ROC_NIX_TM_NODE_ID_INVALID;
1764 	lvl_end = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH3 :
1765 		   ROC_TM_LVL_SCH2);
1766 	leaf_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_QUEUE :
1767 		    ROC_TM_LVL_SCH4);
1768 
1769 	/* TL1 node */
1770 	node = nix_tm_node_alloc();
1771 	if (!node)
1772 		goto error;
1773 
1774 	node->id = nonleaf_id;
1775 	node->parent_id = parent;
1776 	node->priority = 0;
1777 	node->weight = NIX_TM_DFLT_RR_WT;
1778 	node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1779 	node->lvl = ROC_TM_LVL_ROOT;
1780 	node->tree = ROC_NIX_TM_PFC;
1781 	node->rel_chan = NIX_TM_CHAN_INVALID;
1782 
1783 	rc = nix_tm_node_add(roc_nix, node);
1784 	if (rc)
1785 		goto error;
1786 
1787 	parent = nonleaf_id;
1788 	nonleaf_id++;
1789 
1790 	lvl_start = ROC_TM_LVL_SCH1;
1791 	if (roc_nix_is_pf(roc_nix)) {
1792 		/* TL2 node */
1793 		rc = -ENOMEM;
1794 		node = nix_tm_node_alloc();
1795 		if (!node)
1796 			goto error;
1797 
1798 		node->id = nonleaf_id;
1799 		node->parent_id = parent;
1800 		node->priority = 0;
1801 		node->weight = NIX_TM_DFLT_RR_WT;
1802 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1803 		node->lvl = ROC_TM_LVL_SCH1;
1804 		node->tree = ROC_NIX_TM_PFC;
1805 		node->rel_chan = NIX_TM_CHAN_INVALID;
1806 
1807 		rc = nix_tm_node_add(roc_nix, node);
1808 		if (rc)
1809 			goto error;
1810 
1811 		lvl_start = ROC_TM_LVL_SCH2;
1812 		tl2_node_id = nonleaf_id;
1813 		nonleaf_id++;
1814 	} else {
1815 		tl2_node_id = parent;
1816 	}
1817 
1818 	for (i = 0; i < nix->nb_tx_queues; i++) {
1819 		parent = tl2_node_id;
1820 		for (lvl = lvl_start; lvl <= lvl_end; lvl++) {
1821 			rc = -ENOMEM;
1822 			node = nix_tm_node_alloc();
1823 			if (!node)
1824 				goto error;
1825 
1826 			node->id = nonleaf_id;
1827 			node->parent_id = parent;
1828 			node->priority = 0;
1829 			node->weight = NIX_TM_DFLT_RR_WT;
1830 			node->shaper_profile_id =
1831 				ROC_NIX_TM_SHAPER_PROFILE_NONE;
1832 			node->lvl = lvl;
1833 			node->tree = ROC_NIX_TM_PFC;
1834 			node->rel_chan = NIX_TM_CHAN_INVALID;
1835 
1836 			rc = nix_tm_node_add(roc_nix, node);
1837 			if (rc)
1838 				goto error;
1839 
1840 			parent = nonleaf_id;
1841 			nonleaf_id++;
1842 		}
1843 
1844 		lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH4 :
1845 		       ROC_TM_LVL_SCH3);
1846 
1847 		rc = -ENOMEM;
1848 		node = nix_tm_node_alloc();
1849 		if (!node)
1850 			goto error;
1851 
1852 		node->id = nonleaf_id;
1853 		node->parent_id = parent;
1854 		node->priority = 0;
1855 		node->weight = NIX_TM_DFLT_RR_WT;
1856 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1857 		node->lvl = lvl;
1858 		node->tree = ROC_NIX_TM_PFC;
1859 		node->rel_chan = NIX_TM_CHAN_INVALID;
1860 
1861 		rc = nix_tm_node_add(roc_nix, node);
1862 		if (rc)
1863 			goto error;
1864 
1865 		parent = nonleaf_id;
1866 		nonleaf_id++;
1867 
1868 		rc = -ENOMEM;
1869 		node = nix_tm_node_alloc();
1870 		if (!node)
1871 			goto error;
1872 
1873 		node->id = i;
1874 		node->parent_id = parent;
1875 		node->priority = 0;
1876 		node->weight = NIX_TM_DFLT_RR_WT;
1877 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1878 		node->lvl = leaf_lvl;
1879 		node->tree = ROC_NIX_TM_PFC;
1880 		node->rel_chan = NIX_TM_CHAN_INVALID;
1881 
1882 		rc = nix_tm_node_add(roc_nix, node);
1883 		if (rc)
1884 			goto error;
1885 	}
1886 
1887 	return 0;
1888 error:
1889 	nix_tm_node_free(node);
1890 	return rc;
1891 }
1892 
1893 int
1894 nix_tm_free_resources(struct roc_nix *roc_nix, uint32_t tree_mask, bool hw_only)
1895 {
1896 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1897 	struct nix_tm_shaper_profile *profile;
1898 	struct nix_tm_node *node, *next_node;
1899 	struct nix_tm_node_list *list;
1900 	enum roc_nix_tm_tree tree;
1901 	uint32_t profile_id;
1902 	int rc = 0;
1903 	int hw_lvl;
1904 
1905 	for (tree = 0; tree < ROC_NIX_TM_TREE_MAX; tree++) {
1906 		if (!(tree_mask & BIT(tree)))
1907 			continue;
1908 
1909 		plt_tm_dbg("Freeing resources of tree %u", tree);
1910 
1911 		list = nix_tm_node_list(nix, tree);
1912 		/* Flush and free resources from leaf */
1913 		for (hw_lvl = NIX_TXSCH_LVL_SMQ; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1914 			next_node = TAILQ_FIRST(list);
1915 			while (next_node) {
1916 				node = next_node;
1917 				next_node = TAILQ_NEXT(node, node);
1918 				if (node->hw_lvl != hw_lvl)
1919 					continue;
1920 
1921 				if (!nix_tm_is_leaf(nix, node->lvl) &&
1922 				    node->flags & NIX_TM_NODE_HWRES) {
1923 					/* Clear xoff in path for flush to succeed */
1924 					rc = nix_tm_clear_path_xoff(nix, node);
1925 					if (rc)
1926 						return rc;
1927 					rc = nix_tm_free_node_resource(nix, node);
1928 					if (rc)
1929 						return rc;
1930 				}
1931 			}
1932 		}
1933 
1934 		/* Leave software elements if needed */
1935 		if (hw_only)
1936 			continue;
1937 
1938 		next_node = TAILQ_FIRST(list);
1939 		while (next_node) {
1940 			node = next_node;
1941 			next_node = TAILQ_NEXT(node, node);
1942 
1943 			plt_tm_dbg("Free node lvl %u id %u (%p)", node->lvl,
1944 				   node->id, node);
1945 
1946 			profile_id = node->shaper_profile_id;
1947 			profile = nix_tm_shaper_profile_search(nix, profile_id);
1948 			if (profile)
1949 				profile->ref_cnt--;
1950 
1951 			TAILQ_REMOVE(list, node, node);
1952 			nix_tm_node_free(node);
1953 		}
1954 	}
1955 	return rc;
1956 }
1957 
1958 int
1959 nix_tm_conf_init(struct roc_nix *roc_nix)
1960 {
1961 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1962 	uint32_t bmp_sz, hw_lvl;
1963 	void *bmp_mem;
1964 	int rc, i;
1965 
1966 	PLT_STATIC_ASSERT(sizeof(struct nix_tm_node) <= ROC_NIX_TM_NODE_SZ);
1967 	PLT_STATIC_ASSERT(sizeof(struct nix_tm_shaper_profile) <=
1968 			  ROC_NIX_TM_SHAPER_PROFILE_SZ);
1969 
1970 	nix->tm_flags = 0;
1971 	for (i = 0; i < ROC_NIX_TM_TREE_MAX; i++)
1972 		TAILQ_INIT(&nix->trees[i]);
1973 
1974 	TAILQ_INIT(&nix->shaper_profile_list);
1975 	nix->tm_rate_min = 1E9; /* 1Gbps */
1976 
1977 	rc = -ENOMEM;
1978 	bmp_sz = plt_bitmap_get_memory_footprint(NIX_TM_MAX_HW_TXSCHQ);
1979 	bmp_mem = plt_zmalloc(bmp_sz * NIX_TXSCH_LVL_CNT * 2, 0);
1980 	if (!bmp_mem)
1981 		return rc;
1982 	nix->schq_bmp_mem = bmp_mem;
1983 
1984 	/* Init contiguous and discontiguous bitmap per lvl */
1985 	rc = -EIO;
1986 	for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1987 		/* Bitmap for discontiguous resource */
1988 		nix->schq_bmp[hw_lvl] =
1989 			plt_bitmap_init(NIX_TM_MAX_HW_TXSCHQ, bmp_mem, bmp_sz);
1990 		if (!nix->schq_bmp[hw_lvl])
1991 			goto exit;
1992 
1993 		bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
1994 
1995 		/* Bitmap for contiguous resource */
1996 		nix->schq_contig_bmp[hw_lvl] =
1997 			plt_bitmap_init(NIX_TM_MAX_HW_TXSCHQ, bmp_mem, bmp_sz);
1998 		if (!nix->schq_contig_bmp[hw_lvl])
1999 			goto exit;
2000 
2001 		bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
2002 	}
2003 
2004 	rc = nix_tm_mark_init(nix);
2005 	if (rc)
2006 		goto exit;
2007 
2008 	/* Disable TL1 Static Priority when VF's are enabled
2009 	 * as otherwise VF's TL2 reallocation will be needed
2010 	 * runtime to support a specific topology of PF.
2011 	 */
2012 	if (nix->pci_dev->max_vfs)
2013 		nix->tm_flags |= NIX_TM_TL1_NO_SP;
2014 
2015 	/* TL1 access is only for PF's */
2016 	if (roc_nix_is_pf(roc_nix)) {
2017 		nix->tm_flags |= NIX_TM_TL1_ACCESS;
2018 		nix->tm_root_lvl = NIX_TXSCH_LVL_TL1;
2019 	} else {
2020 		nix->tm_root_lvl = NIX_TXSCH_LVL_TL2;
2021 	}
2022 
2023 	return 0;
2024 exit:
2025 	nix_tm_conf_fini(roc_nix);
2026 	return rc;
2027 }
2028 
2029 void
2030 nix_tm_conf_fini(struct roc_nix *roc_nix)
2031 {
2032 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
2033 	uint16_t hw_lvl;
2034 
2035 	for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
2036 		plt_bitmap_free(nix->schq_bmp[hw_lvl]);
2037 		plt_bitmap_free(nix->schq_contig_bmp[hw_lvl]);
2038 	}
2039 	plt_free(nix->schq_bmp_mem);
2040 }
2041