xref: /dpdk/drivers/net/intel/ice/ice_tm.c (revision c1d145834f287aa8cf53de914618a7312f2c360e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2022 Intel Corporation
3  */
4 #include <rte_ethdev.h>
5 #include <rte_tm_driver.h>
6 
7 #include "ice_ethdev.h"
8 #include "ice_rxtx.h"
9 
10 static int ice_hierarchy_commit(struct rte_eth_dev *dev,
11 				 int clear_on_fail,
12 				 struct rte_tm_error *error);
13 static int ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
14 	      uint32_t parent_node_id, uint32_t priority,
15 	      uint32_t weight, uint32_t level_id,
16 	      const struct rte_tm_node_params *params,
17 	      struct rte_tm_error *error);
18 static int ice_node_query(const struct rte_eth_dev *dev, uint32_t node_id,
19 		uint32_t *parent_node_id, uint32_t *priority,
20 		uint32_t *weight, uint32_t *level_id,
21 		struct rte_tm_node_params *params,
22 		struct rte_tm_error *error);
23 static int ice_tm_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
24 			    struct rte_tm_error *error);
25 static int ice_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
26 		   int *is_leaf, struct rte_tm_error *error);
27 static int ice_shaper_profile_add(struct rte_eth_dev *dev,
28 			uint32_t shaper_profile_id,
29 			const struct rte_tm_shaper_params *profile,
30 			struct rte_tm_error *error);
31 static int ice_shaper_profile_del(struct rte_eth_dev *dev,
32 				   uint32_t shaper_profile_id,
33 				   struct rte_tm_error *error);
34 
35 const struct rte_tm_ops ice_tm_ops = {
36 	.shaper_profile_add = ice_shaper_profile_add,
37 	.shaper_profile_delete = ice_shaper_profile_del,
38 	.node_add = ice_tm_node_add,
39 	.node_delete = ice_tm_node_delete,
40 	.node_type_get = ice_node_type_get,
41 	.node_query = ice_node_query,
42 	.hierarchy_commit = ice_hierarchy_commit,
43 };
44 
45 void
46 ice_tm_conf_init(struct rte_eth_dev *dev)
47 {
48 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
49 
50 	/* initialize node configuration */
51 	TAILQ_INIT(&pf->tm_conf.shaper_profile_list);
52 	pf->tm_conf.root = NULL;
53 	pf->tm_conf.committed = false;
54 	pf->tm_conf.clear_on_fail = false;
55 }
56 
57 static void free_node(struct ice_tm_node *root)
58 {
59 	uint32_t i;
60 
61 	if (root == NULL)
62 		return;
63 
64 	for (i = 0; i < root->reference_count; i++)
65 		free_node(root->children[i]);
66 
67 	rte_free(root);
68 }
69 
70 void
71 ice_tm_conf_uninit(struct rte_eth_dev *dev)
72 {
73 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
74 	struct ice_tm_shaper_profile *shaper_profile;
75 
76 	/* clear profile */
77 	while ((shaper_profile = TAILQ_FIRST(&pf->tm_conf.shaper_profile_list))) {
78 		TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node);
79 		rte_free(shaper_profile);
80 	}
81 
82 	free_node(pf->tm_conf.root);
83 	pf->tm_conf.root = NULL;
84 }
85 
86 static int
87 ice_node_param_check(uint32_t node_id,
88 		      uint32_t priority, uint32_t weight,
89 		      const struct rte_tm_node_params *params,
90 		      bool is_leaf,
91 		      struct rte_tm_error *error)
92 {
93 	/* checked all the unsupported parameter */
94 	if (node_id == RTE_TM_NODE_ID_NULL) {
95 		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
96 		error->message = "invalid node id";
97 		return -EINVAL;
98 	}
99 
100 	if (priority >= 8) {
101 		error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
102 		error->message = "priority should be less than 8";
103 		return -EINVAL;
104 	}
105 
106 	if (weight > 200 || weight < 1) {
107 		error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
108 		error->message = "weight must be between 1 and 200";
109 		return -EINVAL;
110 	}
111 
112 	/* not support shared shaper */
113 	if (params->shared_shaper_id) {
114 		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
115 		error->message = "shared shaper not supported";
116 		return -EINVAL;
117 	}
118 	if (params->n_shared_shapers) {
119 		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
120 		error->message = "shared shaper not supported";
121 		return -EINVAL;
122 	}
123 
124 	/* for non-leaf node */
125 	if (!is_leaf) {
126 		if (params->nonleaf.wfq_weight_mode) {
127 			error->type =
128 				RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
129 			error->message = "WFQ not supported";
130 			return -EINVAL;
131 		}
132 		if (params->nonleaf.n_sp_priorities != 1) {
133 			error->type =
134 				RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
135 			error->message = "SP priority not supported";
136 			return -EINVAL;
137 		} else if (params->nonleaf.wfq_weight_mode &&
138 			   !(*params->nonleaf.wfq_weight_mode)) {
139 			error->type =
140 				RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
141 			error->message = "WFP should be byte mode";
142 			return -EINVAL;
143 		}
144 
145 		return 0;
146 	}
147 
148 	/* for leaf node */
149 	if (node_id >= RTE_MAX_QUEUES_PER_PORT) {
150 		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
151 		error->message = "Node ID out of range for a leaf node.";
152 		return -EINVAL;
153 	}
154 	if (params->leaf.cman) {
155 		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
156 		error->message = "Congestion management not supported";
157 		return -EINVAL;
158 	}
159 	if (params->leaf.wred.wred_profile_id !=
160 	    RTE_TM_WRED_PROFILE_ID_NONE) {
161 		error->type =
162 			RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
163 		error->message = "WRED not supported";
164 		return -EINVAL;
165 	}
166 	if (params->leaf.wred.shared_wred_context_id) {
167 		error->type =
168 			RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
169 		error->message = "WRED not supported";
170 		return -EINVAL;
171 	}
172 	if (params->leaf.wred.n_shared_wred_contexts) {
173 		error->type =
174 			RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
175 		error->message = "WRED not supported";
176 		return -EINVAL;
177 	}
178 
179 	return 0;
180 }
181 
182 static struct ice_tm_node *
183 find_node(struct ice_tm_node *root, uint32_t id)
184 {
185 	uint32_t i;
186 
187 	if (root == NULL || root->id == id)
188 		return root;
189 
190 	for (i = 0; i < root->reference_count; i++) {
191 		struct ice_tm_node *node = find_node(root->children[i], id);
192 
193 		if (node)
194 			return node;
195 	}
196 
197 	return NULL;
198 }
199 
200 static inline uint8_t
201 ice_get_leaf_level(const struct ice_pf *pf)
202 {
203 	const struct ice_hw *hw = ICE_PF_TO_HW(pf);
204 	return hw->num_tx_sched_layers - pf->tm_conf.hidden_layers - 1;
205 }
206 
207 static int
208 ice_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
209 		   int *is_leaf, struct rte_tm_error *error)
210 {
211 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
212 	struct ice_tm_node *tm_node;
213 
214 	if (!is_leaf || !error)
215 		return -EINVAL;
216 
217 	if (node_id == RTE_TM_NODE_ID_NULL) {
218 		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
219 		error->message = "invalid node id";
220 		return -EINVAL;
221 	}
222 
223 	/* check if the node id exists */
224 	tm_node = find_node(pf->tm_conf.root, node_id);
225 	if (!tm_node) {
226 		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
227 		error->message = "no such node";
228 		return -EINVAL;
229 	}
230 
231 	if (tm_node->level == ice_get_leaf_level(pf))
232 		*is_leaf = true;
233 	else
234 		*is_leaf = false;
235 
236 	return 0;
237 }
238 
239 static int
240 ice_node_query(const struct rte_eth_dev *dev, uint32_t node_id,
241 		uint32_t *parent_node_id, uint32_t *priority,
242 		uint32_t *weight, uint32_t *level_id,
243 		struct rte_tm_node_params *params,
244 		struct rte_tm_error *error)
245 {
246 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
247 	struct ice_tm_node *tm_node;
248 
249 	if (node_id == RTE_TM_NODE_ID_NULL) {
250 		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
251 		error->message = "invalid node id";
252 		return -EINVAL;
253 	}
254 
255 	/* check if the node id exists */
256 	tm_node = find_node(pf->tm_conf.root, node_id);
257 	if (!tm_node) {
258 		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
259 		error->message = "no such node";
260 		return -EEXIST;
261 	}
262 
263 	if (parent_node_id != NULL) {
264 		if (tm_node->parent != NULL)
265 			*parent_node_id = tm_node->parent->id;
266 		else
267 			*parent_node_id = RTE_TM_NODE_ID_NULL;
268 	}
269 
270 	if (priority != NULL)
271 		*priority = tm_node->priority;
272 
273 	if (weight != NULL)
274 		*weight = tm_node->weight;
275 
276 	if (level_id != NULL)
277 		*level_id = tm_node->level;
278 
279 	if (params != NULL)
280 		*params = tm_node->params;
281 
282 	return 0;
283 }
284 
285 static inline struct ice_tm_shaper_profile *
286 ice_shaper_profile_search(struct rte_eth_dev *dev,
287 			   uint32_t shaper_profile_id)
288 {
289 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
290 	struct ice_shaper_profile_list *shaper_profile_list =
291 		&pf->tm_conf.shaper_profile_list;
292 	struct ice_tm_shaper_profile *shaper_profile;
293 
294 	TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
295 		if (shaper_profile_id == shaper_profile->shaper_profile_id)
296 			return shaper_profile;
297 	}
298 
299 	return NULL;
300 }
301 
302 static int
303 ice_shaper_profile_param_check(const struct rte_tm_shaper_params *profile,
304 				struct rte_tm_error *error)
305 {
306 	/* min bucket size not supported */
307 	if (profile->committed.size) {
308 		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
309 		error->message = "committed bucket size not supported";
310 		return -EINVAL;
311 	}
312 	/* max bucket size not supported */
313 	if (profile->peak.size) {
314 		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
315 		error->message = "peak bucket size not supported";
316 		return -EINVAL;
317 	}
318 	/* length adjustment not supported */
319 	if (profile->pkt_length_adjust) {
320 		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
321 		error->message = "packet length adjustment not supported";
322 		return -EINVAL;
323 	}
324 
325 	return 0;
326 }
327 
328 static int
329 ice_shaper_profile_add(struct rte_eth_dev *dev,
330 			uint32_t shaper_profile_id,
331 			const struct rte_tm_shaper_params *profile,
332 			struct rte_tm_error *error)
333 {
334 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
335 	struct ice_tm_shaper_profile *shaper_profile;
336 	int ret;
337 
338 	if (!profile || !error)
339 		return -EINVAL;
340 
341 	ret = ice_shaper_profile_param_check(profile, error);
342 	if (ret)
343 		return ret;
344 
345 	shaper_profile = ice_shaper_profile_search(dev, shaper_profile_id);
346 
347 	if (shaper_profile) {
348 		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
349 		error->message = "profile ID exist";
350 		return -EINVAL;
351 	}
352 
353 	shaper_profile = rte_zmalloc("ice_tm_shaper_profile",
354 				     sizeof(struct ice_tm_shaper_profile),
355 				     0);
356 	if (!shaper_profile)
357 		return -ENOMEM;
358 	shaper_profile->shaper_profile_id = shaper_profile_id;
359 	rte_memcpy(&shaper_profile->profile, profile,
360 			 sizeof(struct rte_tm_shaper_params));
361 	TAILQ_INSERT_TAIL(&pf->tm_conf.shaper_profile_list,
362 			  shaper_profile, node);
363 
364 	return 0;
365 }
366 
367 static int
368 ice_shaper_profile_del(struct rte_eth_dev *dev,
369 			uint32_t shaper_profile_id,
370 			struct rte_tm_error *error)
371 {
372 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
373 	struct ice_tm_shaper_profile *shaper_profile;
374 
375 	if (!error)
376 		return -EINVAL;
377 
378 	shaper_profile = ice_shaper_profile_search(dev, shaper_profile_id);
379 
380 	if (!shaper_profile) {
381 		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
382 		error->message = "profile ID not exist";
383 		return -EINVAL;
384 	}
385 
386 	/* don't delete a profile if it's used by one or several nodes */
387 	if (shaper_profile->reference_count) {
388 		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
389 		error->message = "profile in use";
390 		return -EINVAL;
391 	}
392 
393 	TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node);
394 	rte_free(shaper_profile);
395 
396 	return 0;
397 }
398 
399 static int
400 ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
401 	      uint32_t parent_node_id, uint32_t priority,
402 	      uint32_t weight, uint32_t level_id,
403 	      const struct rte_tm_node_params *params,
404 	      struct rte_tm_error *error)
405 {
406 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
407 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
408 	struct ice_tm_shaper_profile *shaper_profile = NULL;
409 	struct ice_tm_node *tm_node;
410 	struct ice_tm_node *parent_node = NULL;
411 	uint8_t layer_offset = pf->tm_conf.hidden_layers;
412 	int ret;
413 
414 	if (!params || !error)
415 		return -EINVAL;
416 
417 	/* check the shaper profile id */
418 	if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
419 		shaper_profile = ice_shaper_profile_search(dev, params->shaper_profile_id);
420 		if (!shaper_profile) {
421 			error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
422 			error->message = "shaper profile does not exist";
423 			return -EINVAL;
424 		}
425 	}
426 
427 	/* root node if not have a parent */
428 	if (parent_node_id == RTE_TM_NODE_ID_NULL) {
429 		/* check level */
430 		if (level_id != 0) {
431 			error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
432 			error->message = "Wrong level, root node (NULL parent) must be at level 0";
433 			return -EINVAL;
434 		}
435 
436 		/* obviously no more than one root */
437 		if (pf->tm_conf.root) {
438 			error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
439 			error->message = "already have a root";
440 			return -EINVAL;
441 		}
442 
443 		ret = ice_node_param_check(node_id, priority, weight, params, false, error);
444 		if (ret)
445 			return ret;
446 
447 		/* add the root node */
448 		tm_node = rte_zmalloc(NULL,
449 				sizeof(struct ice_tm_node) +
450 				sizeof(struct ice_tm_node *) * hw->max_children[layer_offset],
451 				0);
452 		if (!tm_node)
453 			return -ENOMEM;
454 		tm_node->id = node_id;
455 		tm_node->level = 0;
456 		tm_node->parent = NULL;
457 		tm_node->reference_count = 0;
458 		tm_node->shaper_profile = shaper_profile;
459 		tm_node->children = RTE_PTR_ADD(tm_node, sizeof(struct ice_tm_node));
460 		tm_node->params = *params;
461 		pf->tm_conf.root = tm_node;
462 		return 0;
463 	}
464 
465 	parent_node = find_node(pf->tm_conf.root, parent_node_id);
466 	if (!parent_node) {
467 		error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
468 		error->message = "parent not exist";
469 		return -EINVAL;
470 	}
471 
472 	/* check level */
473 	if (level_id == RTE_TM_NODE_LEVEL_ID_ANY)
474 		level_id = parent_node->level + 1;
475 	else if (level_id != parent_node->level + 1) {
476 		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
477 		error->message = "Wrong level";
478 		return -EINVAL;
479 	}
480 
481 	ret = ice_node_param_check(node_id, priority, weight,
482 			params, level_id == ice_get_leaf_level(pf), error);
483 	if (ret)
484 		return ret;
485 
486 	/* check if the node is already existed */
487 	if (find_node(pf->tm_conf.root, node_id)) {
488 		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
489 		error->message = "node id already used";
490 		return -EINVAL;
491 	}
492 
493 	/* check the parent node */
494 	/* for n-level hierarchy, level n-1 is leaf, so last level with children is n-2 */
495 	if ((int)parent_node->level > hw->num_tx_sched_layers - 2) {
496 		error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
497 		error->message = "parent is not valid";
498 		return -EINVAL;
499 	}
500 
501 	/* check the max children allowed at this level */
502 	if (parent_node->reference_count >= hw->max_children[parent_node->level]) {
503 		error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
504 		error->message = "insufficient number of child nodes supported";
505 		return -EINVAL;
506 	}
507 
508 	tm_node = rte_zmalloc(NULL,
509 			sizeof(struct ice_tm_node) +
510 			sizeof(struct ice_tm_node *) * hw->max_children[level_id + layer_offset],
511 			0);
512 	if (!tm_node)
513 		return -ENOMEM;
514 	tm_node->id = node_id;
515 	tm_node->priority = priority;
516 	tm_node->weight = weight;
517 	tm_node->reference_count = 0;
518 	tm_node->parent = parent_node;
519 	tm_node->level = level_id;
520 	tm_node->shaper_profile = shaper_profile;
521 	tm_node->children = RTE_PTR_ADD(tm_node, sizeof(struct ice_tm_node));
522 	tm_node->parent->children[tm_node->parent->reference_count++] = tm_node;
523 	tm_node->params = *params;
524 
525 	if (tm_node->priority != 0)
526 		PMD_DRV_LOG(WARNING, "priority != 0 not supported in level %d", level_id);
527 
528 	if (tm_node->weight != 1 && level_id == 0)
529 		PMD_DRV_LOG(WARNING, "weight != 1 not supported in level %d", level_id);
530 
531 
532 	return 0;
533 }
534 
535 static int
536 ice_tm_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
537 		 struct rte_tm_error *error)
538 {
539 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
540 	struct ice_tm_node *tm_node;
541 	uint32_t i, j;
542 
543 	if (!error)
544 		return -EINVAL;
545 
546 	if (node_id == RTE_TM_NODE_ID_NULL) {
547 		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
548 		error->message = "invalid node id";
549 		return -EINVAL;
550 	}
551 
552 	/* check if the node id exists */
553 	tm_node = find_node(pf->tm_conf.root, node_id);
554 	if (!tm_node) {
555 		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
556 		error->message = "no such node";
557 		return -EINVAL;
558 	}
559 
560 	/* the node should have no child */
561 	if (tm_node->reference_count) {
562 		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
563 		error->message =
564 			"cannot delete a node which has children";
565 		return -EINVAL;
566 	}
567 
568 	/* root node */
569 	if (tm_node->level == 0) {
570 		rte_free(tm_node);
571 		pf->tm_conf.root = NULL;
572 		return 0;
573 	}
574 
575 	/* queue group or queue node */
576 	for (i = 0; i < tm_node->parent->reference_count; i++)
577 		if (tm_node->parent->children[i] == tm_node)
578 			break;
579 
580 	for (j = i ; j < tm_node->parent->reference_count - 1; j++)
581 		tm_node->parent->children[j] = tm_node->parent->children[j + 1];
582 
583 	tm_node->parent->reference_count--;
584 	rte_free(tm_node);
585 
586 	return 0;
587 }
588 
589 static int ice_set_node_rate(struct ice_hw *hw,
590 			     struct ice_tm_node *tm_node,
591 			     struct ice_sched_node *sched_node)
592 {
593 	bool reset = false;
594 	uint32_t peak = 0;
595 	uint32_t committed = 0;
596 	uint32_t rate;
597 	int status;
598 
599 	if (tm_node == NULL || tm_node->shaper_profile == NULL) {
600 		reset = true;
601 	} else {
602 		peak = (uint32_t)tm_node->shaper_profile->profile.peak.rate;
603 		committed = (uint32_t)tm_node->shaper_profile->profile.committed.rate;
604 	}
605 
606 	if (reset || peak == 0)
607 		rate = ICE_SCHED_DFLT_BW;
608 	else
609 		rate = peak / 1000 * BITS_PER_BYTE;
610 
611 
612 	status = ice_sched_set_node_bw_lmt(hw->port_info,
613 					   sched_node,
614 					   ICE_MAX_BW,
615 					   rate);
616 	if (status)
617 		return -EINVAL;
618 
619 	if (reset || committed == 0)
620 		rate = ICE_SCHED_DFLT_BW;
621 	else
622 		rate = committed / 1000 * BITS_PER_BYTE;
623 
624 	status = ice_sched_set_node_bw_lmt(hw->port_info,
625 					   sched_node,
626 					   ICE_MIN_BW,
627 					   rate);
628 	if (status)
629 		return -EINVAL;
630 
631 	return 0;
632 }
633 
634 static int ice_cfg_hw_node(struct ice_hw *hw,
635 			   struct ice_tm_node *tm_node,
636 			   struct ice_sched_node *sched_node)
637 {
638 	uint8_t priority;
639 	uint16_t weight;
640 	int status, ret;
641 
642 	ret = ice_set_node_rate(hw, tm_node, sched_node);
643 	if (ret) {
644 		PMD_DRV_LOG(ERR,
645 			    "configure queue group %u bandwidth failed",
646 			    sched_node->info.node_teid);
647 		return ret;
648 	}
649 
650 	priority = tm_node ? (7 - tm_node->priority) : 0;
651 	status = ice_sched_cfg_sibl_node_prio(hw->port_info,
652 					      sched_node,
653 					      priority);
654 	if (status) {
655 		PMD_DRV_LOG(ERR, "configure node %u priority %u failed",
656 			    sched_node->info.node_teid,
657 			    priority);
658 		return -EINVAL;
659 	}
660 
661 	weight = tm_node ? (uint16_t)tm_node->weight : 4;
662 
663 	status = ice_sched_cfg_node_bw_alloc(hw, sched_node,
664 					     ICE_MAX_BW,
665 					     weight);
666 	if (status) {
667 		PMD_DRV_LOG(ERR, "configure node %u weight %u failed",
668 			    sched_node->info.node_teid,
669 			    weight);
670 		return -EINVAL;
671 	}
672 
673 	return 0;
674 }
675 
676 int
677 ice_tm_setup_txq_node(struct ice_pf *pf, struct ice_hw *hw, uint16_t qid, uint32_t teid)
678 {
679 	struct ice_sched_node *hw_node = ice_sched_find_node_by_teid(hw->port_info->root, teid);
680 	struct ice_tm_node *sw_node = find_node(pf->tm_conf.root, qid);
681 
682 	/* bad node teid passed */
683 	if (hw_node == NULL)
684 		return -ENOENT;
685 
686 	/* not configured in hierarchy */
687 	if (sw_node == NULL)
688 		return 0;
689 
690 	sw_node->sched_node = hw_node;
691 
692 	/* if the queue node has been put in the wrong place in hierarchy */
693 	if (hw_node->parent != sw_node->parent->sched_node) {
694 		struct ice_aqc_move_txqs_data *buf;
695 		uint8_t txqs_moved = 0;
696 		uint16_t buf_size = ice_struct_size(buf, txqs, 1);
697 
698 		buf = ice_malloc(hw, buf_size);
699 		if (buf == NULL)
700 			return -ENOMEM;
701 
702 		struct ice_sched_node *parent = hw_node->parent;
703 		struct ice_sched_node *new_parent = sw_node->parent->sched_node;
704 		buf->src_teid = parent->info.node_teid;
705 		buf->dest_teid = new_parent->info.node_teid;
706 		buf->txqs[0].q_teid = hw_node->info.node_teid;
707 		buf->txqs[0].txq_id = qid;
708 
709 		int ret = ice_aq_move_recfg_lan_txq(hw, 1, true, false, false, false, 50,
710 						NULL, buf, buf_size, &txqs_moved, NULL);
711 		if (ret || txqs_moved == 0) {
712 			PMD_DRV_LOG(ERR, "move lan queue %u failed", qid);
713 			ice_free(hw, buf);
714 			return ICE_ERR_PARAM;
715 		}
716 
717 		/* now update the ice_sched_nodes to match physical layout */
718 		new_parent->children[new_parent->num_children++] = hw_node;
719 		hw_node->parent = new_parent;
720 		ice_sched_query_elem(hw, hw_node->info.node_teid, &hw_node->info);
721 		for (uint16_t i = 0; i < parent->num_children; i++)
722 			if (parent->children[i] == hw_node) {
723 				/* to remove, just overwrite the old node slot with the last ptr */
724 				parent->children[i] = parent->children[--parent->num_children];
725 				break;
726 			}
727 	}
728 
729 	return ice_cfg_hw_node(hw, sw_node, hw_node);
730 }
731 
732 /* from a given node, recursively deletes all the nodes that belong to that vsi.
733  * Any nodes which can't be deleted because they have children belonging to a different
734  * VSI, are now also adjusted to belong to that VSI also
735  */
736 static int
737 free_sched_node_recursive(struct ice_port_info *pi, const struct ice_sched_node *root,
738 		struct ice_sched_node *node, uint8_t vsi_id)
739 {
740 	uint16_t i = 0;
741 
742 	while (i < node->num_children) {
743 		if (node->children[i]->vsi_handle != vsi_id) {
744 			i++;
745 			continue;
746 		}
747 		free_sched_node_recursive(pi, root, node->children[i], vsi_id);
748 	}
749 
750 	if (node != root) {
751 		if (node->num_children == 0)
752 			ice_free_sched_node(pi, node);
753 		else
754 			node->vsi_handle = node->children[0]->vsi_handle;
755 	}
756 
757 	return 0;
758 }
759 
760 static int
761 create_sched_node_recursive(struct ice_pf *pf, struct ice_port_info *pi,
762 		 struct ice_tm_node *sw_node, struct ice_sched_node *hw_root, uint16_t *created)
763 {
764 	struct ice_sched_node *parent = sw_node->sched_node;
765 	uint32_t teid;
766 	uint16_t added;
767 
768 	/* first create all child nodes */
769 	for (uint16_t i = 0; i < sw_node->reference_count; i++) {
770 		struct ice_tm_node *tm_node = sw_node->children[i];
771 		int res = ice_sched_add_elems(pi, hw_root,
772 				parent, parent->tx_sched_layer + 1,
773 				1 /* num nodes */, &added, &teid,
774 				NULL /* no pre-alloc */);
775 		if (res != 0) {
776 			PMD_DRV_LOG(ERR, "Error with ice_sched_add_elems, adding child node to teid %u",
777 					parent->info.node_teid);
778 			return -1;
779 		}
780 		struct ice_sched_node *hw_node = ice_sched_find_node_by_teid(parent, teid);
781 		if (ice_cfg_hw_node(pi->hw, tm_node, hw_node) != 0) {
782 			PMD_DRV_LOG(ERR, "Error configuring node %u at layer %u",
783 					teid, parent->tx_sched_layer + 1);
784 			return -1;
785 		}
786 		tm_node->sched_node = hw_node;
787 		created[hw_node->tx_sched_layer]++;
788 	}
789 
790 	/* if we have just created the child nodes in the q-group, i.e. last non-leaf layer,
791 	 * then just return, rather than trying to create leaf nodes.
792 	 * That is done later at queue start.
793 	 */
794 	if (sw_node->level + 2 == ice_get_leaf_level(pf))
795 		return 0;
796 
797 	for (uint16_t i = 0; i < sw_node->reference_count; i++) {
798 		if (sw_node->children[i]->reference_count == 0)
799 			continue;
800 
801 		if (create_sched_node_recursive(pf, pi, sw_node->children[i], hw_root, created) < 0)
802 			return -1;
803 	}
804 	return 0;
805 }
806 
807 static int
808 commit_new_hierarchy(struct rte_eth_dev *dev)
809 {
810 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
811 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
812 	struct ice_port_info *pi = hw->port_info;
813 	struct ice_tm_node *sw_root = pf->tm_conf.root;
814 	const uint16_t new_root_level = pf->tm_conf.hidden_layers;
815 	/* count nodes per hw level, not per logical */
816 	uint16_t nodes_created_per_level[ICE_TM_MAX_LAYERS] = {0};
817 	uint8_t q_lvl = ice_get_leaf_level(pf);
818 	uint8_t qg_lvl = q_lvl - 1;
819 
820 	struct ice_sched_node *new_vsi_root = hw->vsi_ctx[pf->main_vsi->idx]->sched.vsi_node[0];
821 	while (new_vsi_root->tx_sched_layer > new_root_level)
822 		new_vsi_root = new_vsi_root->parent;
823 
824 	free_sched_node_recursive(pi, new_vsi_root, new_vsi_root, new_vsi_root->vsi_handle);
825 
826 	sw_root->sched_node = new_vsi_root;
827 	if (create_sched_node_recursive(pf, pi, sw_root, new_vsi_root, nodes_created_per_level) < 0)
828 		return -1;
829 	for (uint16_t i = 0; i < RTE_DIM(nodes_created_per_level); i++)
830 		PMD_DRV_LOG(DEBUG, "Created %u nodes at level %u",
831 				nodes_created_per_level[i], i);
832 	hw->vsi_ctx[pf->main_vsi->idx]->sched.vsi_node[0] = new_vsi_root;
833 
834 	pf->main_vsi->nb_qps =
835 			RTE_MIN(nodes_created_per_level[qg_lvl] * hw->max_children[qg_lvl],
836 				hw->layer_info[q_lvl].max_device_nodes);
837 
838 	pf->tm_conf.committed = true; /* set flag to be checks on queue start */
839 
840 	return ice_alloc_lan_q_ctx(hw, 0, 0, pf->main_vsi->nb_qps);
841 }
842 
843 static int
844 ice_hierarchy_commit(struct rte_eth_dev *dev,
845 				 int clear_on_fail,
846 				 struct rte_tm_error *error)
847 {
848 	bool restart = false;
849 
850 	/* commit should only be done to topology before start
851 	 * If port is already started, stop it and then restart when done.
852 	 */
853 	if (dev->data->dev_started) {
854 		if (rte_eth_dev_stop(dev->data->port_id) != 0) {
855 			error->message = "Device failed to Stop";
856 			return -1;
857 		}
858 		restart = true;
859 	}
860 
861 	int ret = commit_new_hierarchy(dev);
862 	if (ret < 0 && clear_on_fail) {
863 		ice_tm_conf_uninit(dev);
864 		ice_tm_conf_init(dev);
865 	}
866 
867 	if (restart) {
868 		if (rte_eth_dev_start(dev->data->port_id) != 0) {
869 			error->message = "Device failed to Start";
870 			return -1;
871 		}
872 	}
873 	return ret;
874 }
875