xref: /dpdk/drivers/net/dpaa2/dpaa2_tm.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 NXP
3  */
4 
5 #include <rte_ethdev.h>
6 #include <rte_malloc.h>
7 #include <rte_tm_driver.h>
8 
9 #include "dpaa2_ethdev.h"
10 
11 #define DPAA2_BURST_MAX	(64 * 1024)
12 
13 #define DPAA2_SHAPER_MIN_RATE 0
14 #define DPAA2_SHAPER_MAX_RATE 107374182400ull
15 #define DPAA2_WEIGHT_MAX 24701
16 
17 int
18 dpaa2_tm_init(struct rte_eth_dev *dev)
19 {
20 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
21 
22 	LIST_INIT(&priv->shaper_profiles);
23 	LIST_INIT(&priv->nodes);
24 
25 	return 0;
26 }
27 
28 void dpaa2_tm_deinit(struct rte_eth_dev *dev)
29 {
30 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
31 	struct dpaa2_tm_shaper_profile *profile =
32 		LIST_FIRST(&priv->shaper_profiles);
33 	struct dpaa2_tm_node *node = LIST_FIRST(&priv->nodes);
34 
35 	while (profile) {
36 		struct dpaa2_tm_shaper_profile *next = LIST_NEXT(profile, next);
37 
38 		LIST_REMOVE(profile, next);
39 		rte_free(profile);
40 		profile = next;
41 	}
42 
43 	while (node) {
44 		struct dpaa2_tm_node *next = LIST_NEXT(node, next);
45 
46 		LIST_REMOVE(node, next);
47 		rte_free(node);
48 		node = next;
49 	}
50 }
51 
52 static struct dpaa2_tm_node *
53 dpaa2_node_from_id(struct dpaa2_dev_priv *priv, uint32_t node_id)
54 {
55 	struct dpaa2_tm_node *node;
56 
57 	LIST_FOREACH(node, &priv->nodes, next)
58 		if (node->id == node_id)
59 			return node;
60 
61 	return NULL;
62 }
63 
64 static int
65 dpaa2_capabilities_get(struct rte_eth_dev *dev,
66 		       struct rte_tm_capabilities *cap,
67 		      struct rte_tm_error *error)
68 {
69 	if (!cap)
70 		return -rte_tm_error_set(error, EINVAL,
71 					 RTE_TM_ERROR_TYPE_UNSPECIFIED,
72 					 NULL, "Capabilities are NULL\n");
73 
74 	memset(cap, 0, sizeof(*cap));
75 
76 	/* root node(port) + txqs number, assuming each TX
77 	 * Queue is mapped to each TC
78 	 */
79 	cap->n_nodes_max = 1 + dev->data->nb_tx_queues;
80 	cap->n_levels_max = 2; /* port level + txqs level */
81 	cap->non_leaf_nodes_identical = 1;
82 	cap->leaf_nodes_identical = 1;
83 
84 	cap->shaper_n_max = 1;
85 	cap->shaper_private_n_max = 1;
86 	cap->shaper_private_dual_rate_n_max = 1;
87 	cap->shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
88 	cap->shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
89 
90 	cap->sched_n_children_max = dev->data->nb_tx_queues;
91 	cap->sched_sp_n_priorities_max = dev->data->nb_tx_queues;
92 	cap->sched_wfq_n_children_per_group_max = dev->data->nb_tx_queues;
93 	cap->sched_wfq_n_groups_max = 2;
94 	cap->sched_wfq_weight_max = DPAA2_WEIGHT_MAX;
95 
96 	cap->dynamic_update_mask = RTE_TM_UPDATE_NODE_STATS;
97 	cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
98 
99 	return 0;
100 }
101 
102 static int
103 dpaa2_level_capabilities_get(struct rte_eth_dev *dev,
104 			    uint32_t level_id,
105 			    struct rte_tm_level_capabilities *cap,
106 			    struct rte_tm_error *error)
107 {
108 	if (!cap)
109 		return -rte_tm_error_set(error, EINVAL,
110 					 RTE_TM_ERROR_TYPE_UNSPECIFIED,
111 					 NULL, NULL);
112 
113 	memset(cap, 0, sizeof(*cap));
114 
115 	if (level_id > 1)
116 		return -rte_tm_error_set(error, EINVAL,
117 					 RTE_TM_ERROR_TYPE_LEVEL_ID,
118 					 NULL, "Wrong level id\n");
119 
120 	if (level_id == 0) { /* Root node */
121 		cap->n_nodes_max = 1;
122 		cap->n_nodes_nonleaf_max = 1;
123 		cap->non_leaf_nodes_identical = 1;
124 
125 		cap->nonleaf.shaper_private_supported = 1;
126 		cap->nonleaf.shaper_private_dual_rate_supported = 1;
127 		cap->nonleaf.shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
128 		cap->nonleaf.shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
129 
130 		cap->nonleaf.sched_n_children_max = dev->data->nb_tx_queues;
131 		cap->nonleaf.sched_sp_n_priorities_max = 1;
132 		cap->nonleaf.sched_wfq_n_children_per_group_max =
133 			dev->data->nb_tx_queues;
134 		cap->nonleaf.sched_wfq_n_groups_max = 2;
135 		cap->nonleaf.sched_wfq_weight_max = DPAA2_WEIGHT_MAX;
136 		cap->nonleaf.stats_mask = RTE_TM_STATS_N_PKTS |
137 					  RTE_TM_STATS_N_BYTES;
138 	} else { /* leaf nodes */
139 		cap->n_nodes_max = dev->data->nb_tx_queues;
140 		cap->n_nodes_leaf_max = dev->data->nb_tx_queues;
141 		cap->leaf_nodes_identical = 1;
142 
143 		cap->leaf.stats_mask = RTE_TM_STATS_N_PKTS;
144 	}
145 
146 	return 0;
147 }
148 
149 static int
150 dpaa2_node_capabilities_get(struct rte_eth_dev *dev, uint32_t node_id,
151 			    struct rte_tm_node_capabilities *cap,
152 			   struct rte_tm_error *error)
153 {
154 	struct dpaa2_tm_node *node;
155 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
156 
157 	if (!cap)
158 		return -rte_tm_error_set(error, EINVAL,
159 					 RTE_TM_ERROR_TYPE_UNSPECIFIED,
160 					 NULL, NULL);
161 
162 	memset(cap, 0, sizeof(*cap));
163 
164 	node = dpaa2_node_from_id(priv, node_id);
165 	if (!node)
166 		return -rte_tm_error_set(error, ENODEV,
167 					 RTE_TM_ERROR_TYPE_NODE_ID,
168 					 NULL, "Node id does not exist\n");
169 
170 	if (node->type == 0) {
171 		cap->shaper_private_supported = 1;
172 
173 		cap->nonleaf.sched_n_children_max = dev->data->nb_tx_queues;
174 		cap->nonleaf.sched_sp_n_priorities_max = 1;
175 		cap->nonleaf.sched_wfq_n_children_per_group_max =
176 			dev->data->nb_tx_queues;
177 		cap->nonleaf.sched_wfq_n_groups_max = 2;
178 		cap->nonleaf.sched_wfq_weight_max = DPAA2_WEIGHT_MAX;
179 		cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
180 	} else {
181 		cap->stats_mask = RTE_TM_STATS_N_PKTS;
182 	}
183 
184 	return 0;
185 }
186 
187 static int
188 dpaa2_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, int *is_leaf,
189 		    struct rte_tm_error *error)
190 {
191 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
192 	struct dpaa2_tm_node *node;
193 
194 	if (!is_leaf)
195 		return -rte_tm_error_set(error, EINVAL,
196 					 RTE_TM_ERROR_TYPE_UNSPECIFIED,
197 					 NULL, NULL);
198 
199 	node = dpaa2_node_from_id(priv, node_id);
200 	if (!node)
201 		return -rte_tm_error_set(error, ENODEV,
202 					 RTE_TM_ERROR_TYPE_NODE_ID,
203 					 NULL, "Node id does not exist\n");
204 
205 	*is_leaf = node->type == 1/*NODE_QUEUE*/ ? 1 : 0;
206 
207 	return 0;
208 }
209 
210 static struct dpaa2_tm_shaper_profile *
211 dpaa2_shaper_profile_from_id(struct dpaa2_dev_priv *priv,
212 				uint32_t shaper_profile_id)
213 {
214 	struct dpaa2_tm_shaper_profile *profile;
215 
216 	LIST_FOREACH(profile, &priv->shaper_profiles, next)
217 		if (profile->id == shaper_profile_id)
218 			return profile;
219 
220 	return NULL;
221 }
222 
223 static int
224 dpaa2_shaper_profile_add(struct rte_eth_dev *dev, uint32_t shaper_profile_id,
225 			 struct rte_tm_shaper_params *params,
226 			struct rte_tm_error *error)
227 {
228 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
229 	struct dpaa2_tm_shaper_profile *profile;
230 
231 	if (!params)
232 		return -rte_tm_error_set(error, EINVAL,
233 					 RTE_TM_ERROR_TYPE_UNSPECIFIED,
234 					 NULL, NULL);
235 	if (params->committed.rate > DPAA2_SHAPER_MAX_RATE)
236 		return -rte_tm_error_set(error, EINVAL,
237 				RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
238 				NULL, "committed rate is out of range\n");
239 
240 	if (params->committed.size > DPAA2_BURST_MAX)
241 		return -rte_tm_error_set(error, EINVAL,
242 				RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
243 				NULL, "committed size is out of range\n");
244 
245 	if (params->peak.rate > DPAA2_SHAPER_MAX_RATE)
246 		return -rte_tm_error_set(error, EINVAL,
247 				RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
248 				NULL, "Peak rate is out of range\n");
249 
250 	if (params->peak.size > DPAA2_BURST_MAX)
251 		return -rte_tm_error_set(error, EINVAL,
252 				RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
253 				NULL, "Peak size is out of range\n");
254 
255 	if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE)
256 		return -rte_tm_error_set(error, EINVAL,
257 					 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
258 					 NULL, "Wrong shaper profile id\n");
259 
260 	profile = dpaa2_shaper_profile_from_id(priv, shaper_profile_id);
261 	if (profile)
262 		return -rte_tm_error_set(error, EEXIST,
263 					 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
264 					 NULL, "Profile id already exists\n");
265 
266 	profile = rte_zmalloc_socket(NULL, sizeof(*profile), 0,
267 				     rte_socket_id());
268 	if (!profile)
269 		return -rte_tm_error_set(error, ENOMEM,
270 					 RTE_TM_ERROR_TYPE_UNSPECIFIED,
271 					 NULL, NULL);
272 
273 	profile->id = shaper_profile_id;
274 	rte_memcpy(&profile->params, params, sizeof(profile->params));
275 
276 	LIST_INSERT_HEAD(&priv->shaper_profiles, profile, next);
277 
278 	return 0;
279 }
280 
281 static int
282 dpaa2_shaper_profile_delete(struct rte_eth_dev *dev, uint32_t shaper_profile_id,
283 			    struct rte_tm_error *error)
284 {
285 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
286 	struct dpaa2_tm_shaper_profile *profile;
287 
288 	profile = dpaa2_shaper_profile_from_id(priv, shaper_profile_id);
289 	if (!profile)
290 		return -rte_tm_error_set(error, ENODEV,
291 					 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
292 					 NULL, "Profile id does not exist\n");
293 
294 	if (profile->refcnt)
295 		return -rte_tm_error_set(error, EPERM,
296 					 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
297 					 NULL, "Profile is used\n");
298 
299 	LIST_REMOVE(profile, next);
300 	rte_free(profile);
301 
302 	return 0;
303 }
304 
305 static int
306 dpaa2_node_check_params(struct rte_eth_dev *dev, uint32_t node_id,
307 		__rte_unused uint32_t priority, uint32_t weight,
308 		       uint32_t level_id,
309 		       struct rte_tm_node_params *params,
310 		       struct rte_tm_error *error)
311 {
312 	if (node_id == RTE_TM_NODE_ID_NULL)
313 		return -rte_tm_error_set(error, EINVAL, RTE_TM_NODE_ID_NULL,
314 					 NULL, "Node id is invalid\n");
315 
316 	if (weight > DPAA2_WEIGHT_MAX)
317 		return -rte_tm_error_set(error, EINVAL,
318 					 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
319 					 NULL, "Weight is out of range\n");
320 
321 	if (level_id != 0 && level_id != 1)
322 		return -rte_tm_error_set(error, EINVAL,
323 					 RTE_TM_ERROR_TYPE_LEVEL_ID,
324 					 NULL, "Wrong level id\n");
325 
326 	if (!params)
327 		return -rte_tm_error_set(error, EINVAL,
328 					 RTE_TM_ERROR_TYPE_UNSPECIFIED,
329 					 NULL, NULL);
330 
331 	if (params->shared_shaper_id)
332 		return -rte_tm_error_set(error, EINVAL,
333 				RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID,
334 				NULL, "Shared shaper is not supported\n");
335 
336 	if (params->n_shared_shapers)
337 		return -rte_tm_error_set(error, EINVAL,
338 				RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
339 				NULL, "Shared shaper is not supported\n");
340 
341 	/* verify port (root node) settings */
342 	if (node_id >= dev->data->nb_tx_queues) {
343 		if (params->nonleaf.wfq_weight_mode)
344 			return -rte_tm_error_set(error, EINVAL,
345 				RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
346 				NULL, "WFQ weight mode is not supported\n");
347 
348 		if (params->stats_mask & ~(RTE_TM_STATS_N_PKTS |
349 					   RTE_TM_STATS_N_BYTES))
350 			return -rte_tm_error_set(error, EINVAL,
351 				RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
352 				NULL,
353 				"Requested port stats are not supported\n");
354 
355 		return 0;
356 	}
357 	if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
358 		return -rte_tm_error_set(error, EINVAL,
359 			RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
360 			NULL, "Private shaper not supported on leaf\n");
361 
362 	if (params->stats_mask & ~RTE_TM_STATS_N_PKTS)
363 		return -rte_tm_error_set(error, EINVAL,
364 			RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
365 			NULL,
366 			"Requested stats are not supported\n");
367 
368 	/* check leaf node */
369 	if (level_id == 1) {
370 		if (params->leaf.cman != RTE_TM_CMAN_TAIL_DROP)
371 			return -rte_tm_error_set(error, ENODEV,
372 					RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
373 					NULL, "Only taildrop is supported\n");
374 	}
375 
376 	return 0;
377 }
378 
379 static int
380 dpaa2_node_add(struct rte_eth_dev *dev, uint32_t node_id,
381 	      uint32_t parent_node_id, uint32_t priority, uint32_t weight,
382 	      uint32_t level_id, struct rte_tm_node_params *params,
383 	      struct rte_tm_error *error)
384 {
385 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
386 	struct dpaa2_tm_shaper_profile *profile = NULL;
387 	struct dpaa2_tm_node *node, *parent = NULL;
388 	int ret;
389 
390 	if (0/* If device is started*/)
391 		return -rte_tm_error_set(error, EPERM,
392 					 RTE_TM_ERROR_TYPE_UNSPECIFIED,
393 					 NULL, "Port is already started\n");
394 
395 	ret = dpaa2_node_check_params(dev, node_id, priority, weight, level_id,
396 				      params, error);
397 	if (ret)
398 		return ret;
399 
400 	if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
401 		profile = dpaa2_shaper_profile_from_id(priv,
402 						     params->shaper_profile_id);
403 		if (!profile)
404 			return -rte_tm_error_set(error, ENODEV,
405 					RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
406 					NULL, "Shaper id does not exist\n");
407 	}
408 	if (parent_node_id == RTE_TM_NODE_ID_NULL) {
409 		LIST_FOREACH(node, &priv->nodes, next) {
410 			if (node->type != 0 /*root node*/)
411 				continue;
412 
413 			return -rte_tm_error_set(error, EINVAL,
414 						 RTE_TM_ERROR_TYPE_UNSPECIFIED,
415 						 NULL, "Root node exists\n");
416 		}
417 	} else {
418 		parent = dpaa2_node_from_id(priv, parent_node_id);
419 		if (!parent)
420 			return -rte_tm_error_set(error, EINVAL,
421 					RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
422 					NULL, "Parent node id not exist\n");
423 	}
424 
425 	node = dpaa2_node_from_id(priv, node_id);
426 	if (node)
427 		return -rte_tm_error_set(error, ENODEV,
428 					 RTE_TM_ERROR_TYPE_NODE_ID,
429 					 NULL, "Node id already exists\n");
430 
431 	node = rte_zmalloc_socket(NULL, sizeof(*node), 0, rte_socket_id());
432 	if (!node)
433 		return -rte_tm_error_set(error, ENOMEM,
434 					 RTE_TM_ERROR_TYPE_UNSPECIFIED,
435 					 NULL, NULL);
436 
437 	node->id = node_id;
438 	node->type = parent_node_id == RTE_TM_NODE_ID_NULL ? 0/*NODE_PORT*/ :
439 							     1/*NODE_QUEUE*/;
440 
441 	if (parent) {
442 		node->parent = parent;
443 		parent->refcnt++;
444 	}
445 
446 	if (profile) {
447 		node->profile = profile;
448 		profile->refcnt++;
449 	}
450 
451 	node->weight = weight;
452 	node->priority = priority;
453 	node->stats_mask = params->stats_mask;
454 
455 	LIST_INSERT_HEAD(&priv->nodes, node, next);
456 
457 	return 0;
458 }
459 
460 static int
461 dpaa2_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
462 		  struct rte_tm_error *error)
463 {
464 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
465 	struct dpaa2_tm_node *node;
466 
467 	if (0) {
468 		return -rte_tm_error_set(error, EPERM,
469 					 RTE_TM_ERROR_TYPE_UNSPECIFIED,
470 					 NULL, "Port is already started\n");
471 	}
472 
473 	node = dpaa2_node_from_id(priv, node_id);
474 	if (!node)
475 		return -rte_tm_error_set(error, ENODEV,
476 					 RTE_TM_ERROR_TYPE_NODE_ID,
477 					 NULL, "Node id does not exist\n");
478 
479 	if (node->refcnt)
480 		return -rte_tm_error_set(error, EPERM,
481 					 RTE_TM_ERROR_TYPE_NODE_ID,
482 					 NULL, "Node id is used\n");
483 
484 	if (node->parent)
485 		node->parent->refcnt--;
486 
487 	if (node->profile)
488 		node->profile->refcnt--;
489 
490 	LIST_REMOVE(node, next);
491 	rte_free(node);
492 
493 	return 0;
494 }
495 
496 static int
497 dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
498 		       struct rte_tm_error *error)
499 {
500 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
501 	struct dpaa2_tm_node *node, *temp_node;
502 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
503 	int ret;
504 	int wfq_grp = 0, is_wfq_grp = 0, conf[DPNI_MAX_TC];
505 	struct dpni_tx_priorities_cfg prio_cfg;
506 
507 	memset(&prio_cfg, 0, sizeof(prio_cfg));
508 	memset(conf, 0, sizeof(conf));
509 
510 	LIST_FOREACH(node, &priv->nodes, next) {
511 		if (node->type == 0/*root node*/) {
512 			if (!node->profile)
513 				continue;
514 
515 			struct dpni_tx_shaping_cfg tx_cr_shaper, tx_er_shaper;
516 
517 			tx_cr_shaper.max_burst_size =
518 				node->profile->params.committed.size;
519 			tx_cr_shaper.rate_limit =
520 				node->profile->params.committed.rate / (1024 * 1024);
521 			tx_er_shaper.max_burst_size =
522 				node->profile->params.peak.size;
523 			tx_er_shaper.rate_limit =
524 				node->profile->params.peak.rate / (1024 * 1024);
525 			ret = dpni_set_tx_shaping(dpni, 0, priv->token,
526 					&tx_cr_shaper, &tx_er_shaper, 0);
527 			if (ret) {
528 				ret = -rte_tm_error_set(error, EINVAL,
529 					RTE_TM_ERROR_TYPE_SHAPER_PROFILE, NULL,
530 					"Error in setting Shaping\n");
531 				goto out;
532 			}
533 
534 			continue;
535 		} else { /* level 1, all leaf nodes */
536 			if (node->id >= dev->data->nb_tx_queues) {
537 				ret = -rte_tm_error_set(error, EINVAL,
538 						RTE_TM_ERROR_TYPE_NODE_ID, NULL,
539 						"Not enough txqs configured\n");
540 				goto out;
541 			}
542 
543 			if (conf[node->id])
544 				continue;
545 
546 			LIST_FOREACH(temp_node, &priv->nodes, next) {
547 				if (temp_node->id == node->id ||
548 					temp_node->type == 0)
549 					continue;
550 				if (conf[temp_node->id])
551 					continue;
552 				if (node->priority == temp_node->priority) {
553 					if (wfq_grp == 0) {
554 						prio_cfg.tc_sched[temp_node->id].mode =
555 								DPNI_TX_SCHED_WEIGHTED_A;
556 						/* DPDK support lowest weight 1
557 						 * and DPAA2 platform 100
558 						 */
559 						prio_cfg.tc_sched[temp_node->id].delta_bandwidth =
560 								temp_node->weight + 99;
561 					} else if (wfq_grp == 1) {
562 						prio_cfg.tc_sched[temp_node->id].mode =
563 								DPNI_TX_SCHED_WEIGHTED_B;
564 						prio_cfg.tc_sched[temp_node->id].delta_bandwidth =
565 								temp_node->weight + 99;
566 					} else {
567 						/*TODO: add one more check for
568 						 * number of nodes in a group
569 						 */
570 						ret = -rte_tm_error_set(error, EINVAL,
571 							RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
572 							"Only 2 WFQ Groups are supported\n");
573 						goto out;
574 					}
575 					conf[temp_node->id] = 1;
576 					is_wfq_grp = 1;
577 				}
578 			}
579 			if (is_wfq_grp) {
580 				if (wfq_grp == 0) {
581 					prio_cfg.tc_sched[node->id].mode =
582 							DPNI_TX_SCHED_WEIGHTED_A;
583 					prio_cfg.tc_sched[node->id].delta_bandwidth =
584 							node->weight + 99;
585 					prio_cfg.prio_group_A = node->priority;
586 				} else if (wfq_grp == 1) {
587 					prio_cfg.tc_sched[node->id].mode =
588 							DPNI_TX_SCHED_WEIGHTED_B;
589 					prio_cfg.tc_sched[node->id].delta_bandwidth =
590 							node->weight + 99;
591 					prio_cfg.prio_group_B = node->priority;
592 				}
593 				wfq_grp++;
594 				is_wfq_grp = 0;
595 			}
596 			conf[node->id] = 1;
597 		}
598 		if (wfq_grp)
599 			prio_cfg.separate_groups = 1;
600 	}
601 	ret = dpni_set_tx_priorities(dpni, 0, priv->token, &prio_cfg);
602 	if (ret) {
603 		ret = -rte_tm_error_set(error, EINVAL,
604 					RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
605 					"Scheduling Failed\n");
606 		goto out;
607 	}
608 
609 	return 0;
610 
611 out:
612 	if (clear_on_fail) {
613 		dpaa2_tm_deinit(dev);
614 		dpaa2_tm_init(dev);
615 	}
616 
617 	return ret;
618 }
619 
620 const struct rte_tm_ops dpaa2_tm_ops = {
621 	.node_type_get = dpaa2_node_type_get,
622 	.capabilities_get = dpaa2_capabilities_get,
623 	.level_capabilities_get = dpaa2_level_capabilities_get,
624 	.node_capabilities_get = dpaa2_node_capabilities_get,
625 	.shaper_profile_add = dpaa2_shaper_profile_add,
626 	.shaper_profile_delete = dpaa2_shaper_profile_delete,
627 	.node_add = dpaa2_node_add,
628 	.node_delete = dpaa2_node_delete,
629 	.hierarchy_commit = dpaa2_hierarchy_commit,
630 };
631