xref: /dpdk/lib/ethdev/rte_tm.c (revision 25a2a0dc3de31ca0a6fbc9371cf3dd85dfd74b07)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 
7 #include <rte_errno.h>
8 #include "ethdev_trace.h"
9 #include "rte_ethdev.h"
10 #include "rte_tm_driver.h"
11 #include "rte_tm.h"
12 
13 /* Get generic traffic manager operations structure from a port. */
14 const struct rte_tm_ops *
15 rte_tm_ops_get(uint16_t port_id, struct rte_tm_error *error)
16 {
17 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
18 	const struct rte_tm_ops *ops;
19 
20 	if (!rte_eth_dev_is_valid_port(port_id)) {
21 		rte_tm_error_set(error,
22 			ENODEV,
23 			RTE_TM_ERROR_TYPE_UNSPECIFIED,
24 			NULL,
25 			rte_strerror(ENODEV));
26 		return NULL;
27 	}
28 
29 	if ((dev->dev_ops->tm_ops_get == NULL) ||
30 		(dev->dev_ops->tm_ops_get(dev, &ops) != 0) ||
31 		(ops == NULL)) {
32 		rte_tm_error_set(error,
33 			ENOSYS,
34 			RTE_TM_ERROR_TYPE_UNSPECIFIED,
35 			NULL,
36 			rte_strerror(ENOSYS));
37 		return NULL;
38 	}
39 
40 	return ops;
41 }
42 
43 #define RTE_TM_FUNC(port_id, func)			\
44 __extension__ ({					\
45 	const struct rte_tm_ops *ops =			\
46 		rte_tm_ops_get(port_id, error);		\
47 	if (ops == NULL)				\
48 		return -rte_errno;			\
49 							\
50 	if (ops->func == NULL)				\
51 		return -rte_tm_error_set(error,		\
52 			ENOSYS,				\
53 			RTE_TM_ERROR_TYPE_UNSPECIFIED,	\
54 			NULL,				\
55 			rte_strerror(ENOSYS));		\
56 							\
57 	ops->func;					\
58 })
59 
60 /* Get number of leaf nodes */
61 int
62 rte_tm_get_number_of_leaf_nodes(uint16_t port_id,
63 	uint32_t *n_leaf_nodes,
64 	struct rte_tm_error *error)
65 {
66 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
67 	const struct rte_tm_ops *ops =
68 		rte_tm_ops_get(port_id, error);
69 
70 	if (ops == NULL)
71 		return -rte_errno;
72 
73 	if (n_leaf_nodes == NULL) {
74 		rte_tm_error_set(error,
75 			EINVAL,
76 			RTE_TM_ERROR_TYPE_UNSPECIFIED,
77 			NULL,
78 			rte_strerror(EINVAL));
79 		return -rte_errno;
80 	}
81 
82 	*n_leaf_nodes = dev->data->nb_tx_queues;
83 
84 	rte_tm_trace_get_number_of_leaf_nodes(port_id, *n_leaf_nodes);
85 
86 	return 0;
87 }
88 
89 /* Check node type (leaf or non-leaf) */
90 int
91 rte_tm_node_type_get(uint16_t port_id,
92 	uint32_t node_id,
93 	int *is_leaf,
94 	struct rte_tm_error *error)
95 {
96 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
97 	int ret;
98 	ret = RTE_TM_FUNC(port_id, node_type_get)(dev,
99 		node_id, is_leaf, error);
100 
101 	rte_tm_trace_node_type_get(port_id, node_id, *is_leaf, ret);
102 
103 	return ret;
104 }
105 
106 /* Get capabilities */
107 int rte_tm_capabilities_get(uint16_t port_id,
108 	struct rte_tm_capabilities *cap,
109 	struct rte_tm_error *error)
110 {
111 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
112 	int ret;
113 	ret = RTE_TM_FUNC(port_id, capabilities_get)(dev,
114 		cap, error);
115 
116 	rte_tm_trace_capabilities_get(port_id, cap, ret);
117 
118 	return ret;
119 }
120 
121 /* Get level capabilities */
122 int rte_tm_level_capabilities_get(uint16_t port_id,
123 	uint32_t level_id,
124 	struct rte_tm_level_capabilities *cap,
125 	struct rte_tm_error *error)
126 {
127 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
128 	int ret;
129 	ret = RTE_TM_FUNC(port_id, level_capabilities_get)(dev,
130 		level_id, cap, error);
131 
132 	rte_tm_trace_level_capabilities_get(port_id, level_id, cap, ret);
133 
134 	return ret;
135 }
136 
137 /* Get node capabilities */
138 int rte_tm_node_capabilities_get(uint16_t port_id,
139 	uint32_t node_id,
140 	struct rte_tm_node_capabilities *cap,
141 	struct rte_tm_error *error)
142 {
143 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
144 	int ret;
145 	ret = RTE_TM_FUNC(port_id, node_capabilities_get)(dev,
146 		node_id, cap, error);
147 
148 	rte_tm_trace_node_capabilities_get(port_id, node_id, cap, ret);
149 
150 	return ret;
151 }
152 
153 /* Add WRED profile */
154 int rte_tm_wred_profile_add(uint16_t port_id,
155 	uint32_t wred_profile_id,
156 	const struct rte_tm_wred_params *profile,
157 	struct rte_tm_error *error)
158 {
159 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
160 	int ret;
161 	ret = RTE_TM_FUNC(port_id, wred_profile_add)(dev,
162 		wred_profile_id, profile, error);
163 
164 	rte_tm_trace_wred_profile_add(port_id, wred_profile_id, profile, ret);
165 
166 	return ret;
167 }
168 
169 /* Delete WRED profile */
170 int rte_tm_wred_profile_delete(uint16_t port_id,
171 	uint32_t wred_profile_id,
172 	struct rte_tm_error *error)
173 {
174 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
175 	int ret;
176 	ret = RTE_TM_FUNC(port_id, wred_profile_delete)(dev,
177 		wred_profile_id, error);
178 
179 	rte_tm_trace_wred_profile_delete(port_id, wred_profile_id, ret);
180 
181 	return ret;
182 }
183 
184 /* Add/update shared WRED context */
185 int rte_tm_shared_wred_context_add_update(uint16_t port_id,
186 	uint32_t shared_wred_context_id,
187 	uint32_t wred_profile_id,
188 	struct rte_tm_error *error)
189 {
190 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
191 	int ret;
192 	ret = RTE_TM_FUNC(port_id, shared_wred_context_add_update)(dev,
193 		shared_wred_context_id, wred_profile_id, error);
194 
195 	rte_tm_trace_shared_wred_context_add_update(port_id,
196 						    shared_wred_context_id,
197 						    wred_profile_id, ret);
198 
199 	return ret;
200 }
201 
202 /* Delete shared WRED context */
203 int rte_tm_shared_wred_context_delete(uint16_t port_id,
204 	uint32_t shared_wred_context_id,
205 	struct rte_tm_error *error)
206 {
207 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
208 	int ret;
209 	ret = RTE_TM_FUNC(port_id, shared_wred_context_delete)(dev,
210 		shared_wred_context_id, error);
211 
212 	rte_tm_trace_shared_wred_context_delete(port_id,
213 						shared_wred_context_id, ret);
214 
215 	return ret;
216 }
217 
218 /* Add shaper profile */
219 int rte_tm_shaper_profile_add(uint16_t port_id,
220 	uint32_t shaper_profile_id,
221 	const struct rte_tm_shaper_params *profile,
222 	struct rte_tm_error *error)
223 {
224 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
225 	int ret;
226 	ret = RTE_TM_FUNC(port_id, shaper_profile_add)(dev,
227 		shaper_profile_id, profile, error);
228 
229 	rte_tm_trace_shaper_profile_add(port_id, shaper_profile_id, profile,
230 					ret);
231 
232 	return ret;
233 }
234 
235 /* Delete WRED profile */
236 int rte_tm_shaper_profile_delete(uint16_t port_id,
237 	uint32_t shaper_profile_id,
238 	struct rte_tm_error *error)
239 {
240 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
241 	int ret;
242 	ret = RTE_TM_FUNC(port_id, shaper_profile_delete)(dev,
243 		shaper_profile_id, error);
244 
245 	rte_tm_trace_shaper_profile_delete(port_id, shaper_profile_id, ret);
246 
247 	return ret;
248 }
249 
250 /* Add shared shaper */
251 int rte_tm_shared_shaper_add_update(uint16_t port_id,
252 	uint32_t shared_shaper_id,
253 	uint32_t shaper_profile_id,
254 	struct rte_tm_error *error)
255 {
256 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
257 	int ret;
258 	ret = RTE_TM_FUNC(port_id, shared_shaper_add_update)(dev,
259 		shared_shaper_id, shaper_profile_id, error);
260 
261 	rte_tm_trace_shared_shaper_add_update(port_id, shared_shaper_id,
262 					      shaper_profile_id, ret);
263 
264 	return ret;
265 }
266 
267 /* Delete shared shaper */
268 int rte_tm_shared_shaper_delete(uint16_t port_id,
269 	uint32_t shared_shaper_id,
270 	struct rte_tm_error *error)
271 {
272 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
273 	int ret;
274 	ret = RTE_TM_FUNC(port_id, shared_shaper_delete)(dev,
275 		shared_shaper_id, error);
276 
277 	rte_tm_trace_shared_shaper_delete(port_id, shared_shaper_id, ret);
278 
279 	return ret;
280 }
281 
282 /* Add node to port traffic manager hierarchy */
283 int rte_tm_node_add(uint16_t port_id,
284 	uint32_t node_id,
285 	uint32_t parent_node_id,
286 	uint32_t priority,
287 	uint32_t weight,
288 	uint32_t level_id,
289 	const struct rte_tm_node_params *params,
290 	struct rte_tm_error *error)
291 {
292 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
293 	int ret;
294 	ret = RTE_TM_FUNC(port_id, node_add)(dev,
295 		node_id, parent_node_id, priority, weight, level_id,
296 		params, error);
297 
298 	rte_tm_trace_node_add(port_id, node_id, parent_node_id, priority,
299 			      weight, level_id, params, ret);
300 
301 	return ret;
302 }
303 
304 int rte_tm_node_query(uint16_t port_id,
305 	uint32_t node_id,
306 	uint32_t *parent_node_id,
307 	uint32_t *priority,
308 	uint32_t *weight,
309 	uint32_t *level_id,
310 	struct rte_tm_node_params *params,
311 	struct rte_tm_error *error)
312 {
313 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
314 	int ret;
315 
316 	ret = RTE_TM_FUNC(port_id, node_query)(dev,
317 		node_id, parent_node_id, priority, weight, level_id,
318 		params, error);
319 
320 	rte_tm_trace_node_query(port_id, node_id, parent_node_id, priority,
321 			      weight, level_id, params, ret);
322 
323 	return ret;
324 }
325 
326 /* Delete node from traffic manager hierarchy */
327 int rte_tm_node_delete(uint16_t port_id,
328 	uint32_t node_id,
329 	struct rte_tm_error *error)
330 {
331 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
332 	int ret;
333 	ret = RTE_TM_FUNC(port_id, node_delete)(dev,
334 		node_id, error);
335 
336 	rte_tm_trace_node_delete(port_id, node_id, ret);
337 
338 	return ret;
339 }
340 
341 /* Suspend node */
342 int rte_tm_node_suspend(uint16_t port_id,
343 	uint32_t node_id,
344 	struct rte_tm_error *error)
345 {
346 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
347 	int ret;
348 	ret = RTE_TM_FUNC(port_id, node_suspend)(dev,
349 		node_id, error);
350 
351 	rte_tm_trace_node_suspend(port_id, node_id, ret);
352 
353 	return ret;
354 }
355 
356 /* Resume node */
357 int rte_tm_node_resume(uint16_t port_id,
358 	uint32_t node_id,
359 	struct rte_tm_error *error)
360 {
361 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
362 	int ret;
363 	ret = RTE_TM_FUNC(port_id, node_resume)(dev,
364 		node_id, error);
365 
366 	rte_tm_trace_node_resume(port_id, node_id, ret);
367 
368 	return ret;
369 }
370 
371 /* Commit the initial port traffic manager hierarchy */
372 int rte_tm_hierarchy_commit(uint16_t port_id,
373 	int clear_on_fail,
374 	struct rte_tm_error *error)
375 {
376 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
377 	int ret;
378 	ret = RTE_TM_FUNC(port_id, hierarchy_commit)(dev,
379 		clear_on_fail, error);
380 
381 	rte_tm_trace_hierarchy_commit(port_id, clear_on_fail, ret);
382 
383 	return ret;
384 }
385 
386 /* Update node parent  */
387 int rte_tm_node_parent_update(uint16_t port_id,
388 	uint32_t node_id,
389 	uint32_t parent_node_id,
390 	uint32_t priority,
391 	uint32_t weight,
392 	struct rte_tm_error *error)
393 {
394 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
395 	int ret;
396 	ret = RTE_TM_FUNC(port_id, node_parent_update)(dev,
397 		node_id, parent_node_id, priority, weight, error);
398 
399 	rte_tm_trace_node_parent_update(port_id, node_id, parent_node_id,
400 					priority, weight, ret);
401 
402 	return ret;
403 }
404 
405 /* Update node private shaper */
406 int rte_tm_node_shaper_update(uint16_t port_id,
407 	uint32_t node_id,
408 	uint32_t shaper_profile_id,
409 	struct rte_tm_error *error)
410 {
411 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
412 	int ret;
413 	ret = RTE_TM_FUNC(port_id, node_shaper_update)(dev,
414 		node_id, shaper_profile_id, error);
415 
416 	rte_tm_trace_node_shaper_update(port_id, node_id, shaper_profile_id,
417 					ret);
418 
419 	return ret;
420 }
421 
422 /* Update node shared shapers */
423 int rte_tm_node_shared_shaper_update(uint16_t port_id,
424 	uint32_t node_id,
425 	uint32_t shared_shaper_id,
426 	int add,
427 	struct rte_tm_error *error)
428 {
429 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
430 	int ret;
431 	ret = RTE_TM_FUNC(port_id, node_shared_shaper_update)(dev,
432 		node_id, shared_shaper_id, add, error);
433 
434 	rte_tm_trace_node_shared_shaper_update(port_id, node_id,
435 					       shared_shaper_id, add, ret);
436 
437 	return ret;
438 }
439 
440 /* Update node stats */
441 int rte_tm_node_stats_update(uint16_t port_id,
442 	uint32_t node_id,
443 	uint64_t stats_mask,
444 	struct rte_tm_error *error)
445 {
446 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
447 	int ret;
448 	ret = RTE_TM_FUNC(port_id, node_stats_update)(dev,
449 		node_id, stats_mask, error);
450 
451 	rte_tm_trace_node_stats_update(port_id, node_id, stats_mask, ret);
452 
453 	return ret;
454 }
455 
456 /* Update WFQ weight mode */
457 int rte_tm_node_wfq_weight_mode_update(uint16_t port_id,
458 	uint32_t node_id,
459 	int *wfq_weight_mode,
460 	uint32_t n_sp_priorities,
461 	struct rte_tm_error *error)
462 {
463 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
464 	int ret;
465 	ret = RTE_TM_FUNC(port_id, node_wfq_weight_mode_update)(dev,
466 		node_id, wfq_weight_mode, n_sp_priorities, error);
467 
468 	rte_tm_trace_node_wfq_weight_mode_update(port_id, node_id,
469 						 wfq_weight_mode,
470 						 n_sp_priorities, ret);
471 
472 	return ret;
473 }
474 
475 /* Update node congestion management mode */
476 int rte_tm_node_cman_update(uint16_t port_id,
477 	uint32_t node_id,
478 	enum rte_tm_cman_mode cman,
479 	struct rte_tm_error *error)
480 {
481 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
482 	int ret;
483 	ret = RTE_TM_FUNC(port_id, node_cman_update)(dev,
484 		node_id, cman, error);
485 
486 	rte_tm_trace_node_cman_update(port_id, node_id, cman, ret);
487 
488 	return ret;
489 }
490 
491 /* Update node private WRED context */
492 int rte_tm_node_wred_context_update(uint16_t port_id,
493 	uint32_t node_id,
494 	uint32_t wred_profile_id,
495 	struct rte_tm_error *error)
496 {
497 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
498 	int ret;
499 	ret = RTE_TM_FUNC(port_id, node_wred_context_update)(dev,
500 		node_id, wred_profile_id, error);
501 
502 	rte_tm_trace_node_wred_context_update(port_id, node_id, wred_profile_id,
503 					      ret);
504 
505 	return ret;
506 }
507 
508 /* Update node shared WRED context */
509 int rte_tm_node_shared_wred_context_update(uint16_t port_id,
510 	uint32_t node_id,
511 	uint32_t shared_wred_context_id,
512 	int add,
513 	struct rte_tm_error *error)
514 {
515 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
516 	int ret;
517 	ret = RTE_TM_FUNC(port_id, node_shared_wred_context_update)(dev,
518 		node_id, shared_wred_context_id, add, error);
519 
520 	rte_tm_trace_node_shared_wred_context_update(port_id, node_id,
521 						     shared_wred_context_id,
522 						     add, ret);
523 
524 	return ret;
525 }
526 
527 /* Read and/or clear stats counters for specific node */
528 int rte_tm_node_stats_read(uint16_t port_id,
529 	uint32_t node_id,
530 	struct rte_tm_node_stats *stats,
531 	uint64_t *stats_mask,
532 	int clear,
533 	struct rte_tm_error *error)
534 {
535 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
536 	int ret;
537 	ret = RTE_TM_FUNC(port_id, node_stats_read)(dev,
538 		node_id, stats, stats_mask, clear, error);
539 
540 	rte_tm_trace_node_stats_read(port_id, node_id, stats, *stats_mask,
541 				     clear, ret);
542 
543 	return ret;
544 }
545 
546 /* Packet marking - VLAN DEI */
547 int rte_tm_mark_vlan_dei(uint16_t port_id,
548 	int mark_green,
549 	int mark_yellow,
550 	int mark_red,
551 	struct rte_tm_error *error)
552 {
553 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
554 	int ret;
555 	ret = RTE_TM_FUNC(port_id, mark_vlan_dei)(dev,
556 		mark_green, mark_yellow, mark_red, error);
557 
558 	rte_tm_trace_mark_vlan_dei(port_id, mark_green, mark_yellow, mark_red,
559 				   ret);
560 
561 	return ret;
562 }
563 
564 /* Packet marking - IPv4/IPv6 ECN */
565 int rte_tm_mark_ip_ecn(uint16_t port_id,
566 	int mark_green,
567 	int mark_yellow,
568 	int mark_red,
569 	struct rte_tm_error *error)
570 {
571 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
572 	int ret;
573 	ret = RTE_TM_FUNC(port_id, mark_ip_ecn)(dev,
574 		mark_green, mark_yellow, mark_red, error);
575 
576 	rte_tm_trace_mark_ip_ecn(port_id, mark_green, mark_yellow, mark_red,
577 				 ret);
578 
579 	return ret;
580 }
581 
582 /* Packet marking - IPv4/IPv6 DSCP */
583 int rte_tm_mark_ip_dscp(uint16_t port_id,
584 	int mark_green,
585 	int mark_yellow,
586 	int mark_red,
587 	struct rte_tm_error *error)
588 {
589 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
590 	int ret;
591 	ret = RTE_TM_FUNC(port_id, mark_ip_dscp)(dev,
592 		mark_green, mark_yellow, mark_red, error);
593 
594 	rte_tm_trace_mark_ip_dscp(port_id, mark_green, mark_yellow, mark_red,
595 				  ret);
596 
597 	return ret;
598 }
599