xref: /dpdk/lib/ethdev/rte_tm.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 
7 #include <rte_errno.h>
8 #include "ethdev_trace.h"
9 #include "rte_ethdev.h"
10 #include "rte_tm_driver.h"
11 #include "rte_tm.h"
12 
13 /* Get generic traffic manager operations structure from a port. */
14 const struct rte_tm_ops *
15 rte_tm_ops_get(uint16_t port_id, struct rte_tm_error *error)
16 {
17 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
18 	const struct rte_tm_ops *ops;
19 
20 	if (!rte_eth_dev_is_valid_port(port_id)) {
21 		rte_tm_error_set(error,
22 			ENODEV,
23 			RTE_TM_ERROR_TYPE_UNSPECIFIED,
24 			NULL,
25 			rte_strerror(ENODEV));
26 		return NULL;
27 	}
28 
29 	if ((dev->dev_ops->tm_ops_get == NULL) ||
30 		(dev->dev_ops->tm_ops_get(dev, &ops) != 0) ||
31 		(ops == NULL)) {
32 		rte_tm_error_set(error,
33 			ENOSYS,
34 			RTE_TM_ERROR_TYPE_UNSPECIFIED,
35 			NULL,
36 			rte_strerror(ENOSYS));
37 		return NULL;
38 	}
39 
40 	return ops;
41 }
42 
43 #define RTE_TM_FUNC(port_id, func)			\
44 __extension__ ({					\
45 	const struct rte_tm_ops *ops =			\
46 		rte_tm_ops_get(port_id, error);		\
47 	if (ops == NULL)				\
48 		return -rte_errno;			\
49 							\
50 	if (ops->func == NULL)				\
51 		return -rte_tm_error_set(error,		\
52 			ENOSYS,				\
53 			RTE_TM_ERROR_TYPE_UNSPECIFIED,	\
54 			NULL,				\
55 			rte_strerror(ENOSYS));		\
56 							\
57 	ops->func;					\
58 })
59 
60 /* Get number of leaf nodes */
61 int
62 rte_tm_get_number_of_leaf_nodes(uint16_t port_id,
63 	uint32_t *n_leaf_nodes,
64 	struct rte_tm_error *error)
65 {
66 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
67 	const struct rte_tm_ops *ops =
68 		rte_tm_ops_get(port_id, error);
69 
70 	if (ops == NULL)
71 		return -rte_errno;
72 
73 	if (n_leaf_nodes == NULL) {
74 		rte_tm_error_set(error,
75 			EINVAL,
76 			RTE_TM_ERROR_TYPE_UNSPECIFIED,
77 			NULL,
78 			rte_strerror(EINVAL));
79 		return -rte_errno;
80 	}
81 
82 	*n_leaf_nodes = dev->data->nb_tx_queues;
83 
84 	rte_tm_trace_get_number_of_leaf_nodes(port_id, *n_leaf_nodes);
85 
86 	return 0;
87 }
88 
89 /* Check node type (leaf or non-leaf) */
90 int
91 rte_tm_node_type_get(uint16_t port_id,
92 	uint32_t node_id,
93 	int *is_leaf,
94 	struct rte_tm_error *error)
95 {
96 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
97 	int ret;
98 	ret = RTE_TM_FUNC(port_id, node_type_get)(dev,
99 		node_id, is_leaf, error);
100 
101 	rte_tm_trace_node_type_get(port_id, node_id, *is_leaf, ret);
102 
103 	return ret;
104 }
105 
106 /* Get capabilities */
107 int rte_tm_capabilities_get(uint16_t port_id,
108 	struct rte_tm_capabilities *cap,
109 	struct rte_tm_error *error)
110 {
111 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
112 	int ret;
113 	ret = RTE_TM_FUNC(port_id, capabilities_get)(dev,
114 		cap, error);
115 
116 	rte_tm_trace_capabilities_get(port_id, cap, ret);
117 
118 	return ret;
119 }
120 
121 /* Get level capabilities */
122 int rte_tm_level_capabilities_get(uint16_t port_id,
123 	uint32_t level_id,
124 	struct rte_tm_level_capabilities *cap,
125 	struct rte_tm_error *error)
126 {
127 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
128 	int ret;
129 	ret = RTE_TM_FUNC(port_id, level_capabilities_get)(dev,
130 		level_id, cap, error);
131 
132 	rte_tm_trace_level_capabilities_get(port_id, level_id, cap, ret);
133 
134 	return ret;
135 }
136 
137 /* Get node capabilities */
138 int rte_tm_node_capabilities_get(uint16_t port_id,
139 	uint32_t node_id,
140 	struct rte_tm_node_capabilities *cap,
141 	struct rte_tm_error *error)
142 {
143 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
144 	int ret;
145 	ret = RTE_TM_FUNC(port_id, node_capabilities_get)(dev,
146 		node_id, cap, error);
147 
148 	rte_tm_trace_node_capabilities_get(port_id, node_id, cap, ret);
149 
150 	return ret;
151 }
152 
153 /* Add WRED profile */
154 int rte_tm_wred_profile_add(uint16_t port_id,
155 	uint32_t wred_profile_id,
156 	struct rte_tm_wred_params *profile,
157 	struct rte_tm_error *error)
158 {
159 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
160 	int ret;
161 	ret = RTE_TM_FUNC(port_id, wred_profile_add)(dev,
162 		wred_profile_id, profile, error);
163 
164 	rte_tm_trace_wred_profile_add(port_id, wred_profile_id, profile, ret);
165 
166 	return ret;
167 }
168 
169 /* Delete WRED profile */
170 int rte_tm_wred_profile_delete(uint16_t port_id,
171 	uint32_t wred_profile_id,
172 	struct rte_tm_error *error)
173 {
174 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
175 	int ret;
176 	ret = RTE_TM_FUNC(port_id, wred_profile_delete)(dev,
177 		wred_profile_id, error);
178 
179 	rte_tm_trace_wred_profile_delete(port_id, wred_profile_id, ret);
180 
181 	return ret;
182 }
183 
184 /* Add/update shared WRED context */
185 int rte_tm_shared_wred_context_add_update(uint16_t port_id,
186 	uint32_t shared_wred_context_id,
187 	uint32_t wred_profile_id,
188 	struct rte_tm_error *error)
189 {
190 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
191 	int ret;
192 	ret = RTE_TM_FUNC(port_id, shared_wred_context_add_update)(dev,
193 		shared_wred_context_id, wred_profile_id, error);
194 
195 	rte_tm_trace_shared_wred_context_add_update(port_id,
196 						    shared_wred_context_id,
197 						    wred_profile_id, ret);
198 
199 	return ret;
200 }
201 
202 /* Delete shared WRED context */
203 int rte_tm_shared_wred_context_delete(uint16_t port_id,
204 	uint32_t shared_wred_context_id,
205 	struct rte_tm_error *error)
206 {
207 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
208 	int ret;
209 	ret = RTE_TM_FUNC(port_id, shared_wred_context_delete)(dev,
210 		shared_wred_context_id, error);
211 
212 	rte_tm_trace_shared_wred_context_delete(port_id,
213 						shared_wred_context_id, ret);
214 
215 	return ret;
216 }
217 
218 /* Add shaper profile */
219 int rte_tm_shaper_profile_add(uint16_t port_id,
220 	uint32_t shaper_profile_id,
221 	struct rte_tm_shaper_params *profile,
222 	struct rte_tm_error *error)
223 {
224 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
225 	int ret;
226 	ret = RTE_TM_FUNC(port_id, shaper_profile_add)(dev,
227 		shaper_profile_id, profile, error);
228 
229 	rte_tm_trace_shaper_profile_add(port_id, shaper_profile_id, profile,
230 					ret);
231 
232 	return ret;
233 }
234 
235 /* Delete WRED profile */
236 int rte_tm_shaper_profile_delete(uint16_t port_id,
237 	uint32_t shaper_profile_id,
238 	struct rte_tm_error *error)
239 {
240 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
241 	int ret;
242 	ret = RTE_TM_FUNC(port_id, shaper_profile_delete)(dev,
243 		shaper_profile_id, error);
244 
245 	rte_tm_trace_shaper_profile_delete(port_id, shaper_profile_id, ret);
246 
247 	return ret;
248 }
249 
250 /* Add shared shaper */
251 int rte_tm_shared_shaper_add_update(uint16_t port_id,
252 	uint32_t shared_shaper_id,
253 	uint32_t shaper_profile_id,
254 	struct rte_tm_error *error)
255 {
256 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
257 	int ret;
258 	ret = RTE_TM_FUNC(port_id, shared_shaper_add_update)(dev,
259 		shared_shaper_id, shaper_profile_id, error);
260 
261 	rte_tm_trace_shared_shaper_add_update(port_id, shared_shaper_id,
262 					      shaper_profile_id, ret);
263 
264 	return ret;
265 }
266 
267 /* Delete shared shaper */
268 int rte_tm_shared_shaper_delete(uint16_t port_id,
269 	uint32_t shared_shaper_id,
270 	struct rte_tm_error *error)
271 {
272 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
273 	int ret;
274 	ret = RTE_TM_FUNC(port_id, shared_shaper_delete)(dev,
275 		shared_shaper_id, error);
276 
277 	rte_tm_trace_shared_shaper_delete(port_id, shared_shaper_id, ret);
278 
279 	return ret;
280 }
281 
282 /* Add node to port traffic manager hierarchy */
283 int rte_tm_node_add(uint16_t port_id,
284 	uint32_t node_id,
285 	uint32_t parent_node_id,
286 	uint32_t priority,
287 	uint32_t weight,
288 	uint32_t level_id,
289 	struct rte_tm_node_params *params,
290 	struct rte_tm_error *error)
291 {
292 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
293 	int ret;
294 	ret = RTE_TM_FUNC(port_id, node_add)(dev,
295 		node_id, parent_node_id, priority, weight, level_id,
296 		params, error);
297 
298 	rte_tm_trace_node_add(port_id, node_id, parent_node_id, priority,
299 			      weight, level_id, params, ret);
300 
301 	return ret;
302 }
303 
304 /* Delete node from traffic manager hierarchy */
305 int rte_tm_node_delete(uint16_t port_id,
306 	uint32_t node_id,
307 	struct rte_tm_error *error)
308 {
309 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
310 	int ret;
311 	ret = RTE_TM_FUNC(port_id, node_delete)(dev,
312 		node_id, error);
313 
314 	rte_tm_trace_node_delete(port_id, node_id, ret);
315 
316 	return ret;
317 }
318 
319 /* Suspend node */
320 int rte_tm_node_suspend(uint16_t port_id,
321 	uint32_t node_id,
322 	struct rte_tm_error *error)
323 {
324 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
325 	int ret;
326 	ret = RTE_TM_FUNC(port_id, node_suspend)(dev,
327 		node_id, error);
328 
329 	rte_tm_trace_node_suspend(port_id, node_id, ret);
330 
331 	return ret;
332 }
333 
334 /* Resume node */
335 int rte_tm_node_resume(uint16_t port_id,
336 	uint32_t node_id,
337 	struct rte_tm_error *error)
338 {
339 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
340 	int ret;
341 	ret = RTE_TM_FUNC(port_id, node_resume)(dev,
342 		node_id, error);
343 
344 	rte_tm_trace_node_resume(port_id, node_id, ret);
345 
346 	return ret;
347 }
348 
349 /* Commit the initial port traffic manager hierarchy */
350 int rte_tm_hierarchy_commit(uint16_t port_id,
351 	int clear_on_fail,
352 	struct rte_tm_error *error)
353 {
354 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
355 	int ret;
356 	ret = RTE_TM_FUNC(port_id, hierarchy_commit)(dev,
357 		clear_on_fail, error);
358 
359 	rte_tm_trace_hierarchy_commit(port_id, clear_on_fail, ret);
360 
361 	return ret;
362 }
363 
364 /* Update node parent  */
365 int rte_tm_node_parent_update(uint16_t port_id,
366 	uint32_t node_id,
367 	uint32_t parent_node_id,
368 	uint32_t priority,
369 	uint32_t weight,
370 	struct rte_tm_error *error)
371 {
372 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
373 	int ret;
374 	ret = RTE_TM_FUNC(port_id, node_parent_update)(dev,
375 		node_id, parent_node_id, priority, weight, error);
376 
377 	rte_tm_trace_node_parent_update(port_id, node_id, parent_node_id,
378 					priority, weight, ret);
379 
380 	return ret;
381 }
382 
383 /* Update node private shaper */
384 int rte_tm_node_shaper_update(uint16_t port_id,
385 	uint32_t node_id,
386 	uint32_t shaper_profile_id,
387 	struct rte_tm_error *error)
388 {
389 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
390 	int ret;
391 	ret = RTE_TM_FUNC(port_id, node_shaper_update)(dev,
392 		node_id, shaper_profile_id, error);
393 
394 	rte_tm_trace_node_shaper_update(port_id, node_id, shaper_profile_id,
395 					ret);
396 
397 	return ret;
398 }
399 
400 /* Update node shared shapers */
401 int rte_tm_node_shared_shaper_update(uint16_t port_id,
402 	uint32_t node_id,
403 	uint32_t shared_shaper_id,
404 	int add,
405 	struct rte_tm_error *error)
406 {
407 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
408 	int ret;
409 	ret = RTE_TM_FUNC(port_id, node_shared_shaper_update)(dev,
410 		node_id, shared_shaper_id, add, error);
411 
412 	rte_tm_trace_node_shared_shaper_update(port_id, node_id,
413 					       shared_shaper_id, add, ret);
414 
415 	return ret;
416 }
417 
418 /* Update node stats */
419 int rte_tm_node_stats_update(uint16_t port_id,
420 	uint32_t node_id,
421 	uint64_t stats_mask,
422 	struct rte_tm_error *error)
423 {
424 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
425 	int ret;
426 	ret = RTE_TM_FUNC(port_id, node_stats_update)(dev,
427 		node_id, stats_mask, error);
428 
429 	rte_tm_trace_node_stats_update(port_id, node_id, stats_mask, ret);
430 
431 	return ret;
432 }
433 
434 /* Update WFQ weight mode */
435 int rte_tm_node_wfq_weight_mode_update(uint16_t port_id,
436 	uint32_t node_id,
437 	int *wfq_weight_mode,
438 	uint32_t n_sp_priorities,
439 	struct rte_tm_error *error)
440 {
441 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
442 	int ret;
443 	ret = RTE_TM_FUNC(port_id, node_wfq_weight_mode_update)(dev,
444 		node_id, wfq_weight_mode, n_sp_priorities, error);
445 
446 	rte_tm_trace_node_wfq_weight_mode_update(port_id, node_id,
447 						 wfq_weight_mode,
448 						 n_sp_priorities, ret);
449 
450 	return ret;
451 }
452 
453 /* Update node congestion management mode */
454 int rte_tm_node_cman_update(uint16_t port_id,
455 	uint32_t node_id,
456 	enum rte_tm_cman_mode cman,
457 	struct rte_tm_error *error)
458 {
459 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
460 	int ret;
461 	ret = RTE_TM_FUNC(port_id, node_cman_update)(dev,
462 		node_id, cman, error);
463 
464 	rte_tm_trace_node_cman_update(port_id, node_id, cman, ret);
465 
466 	return ret;
467 }
468 
469 /* Update node private WRED context */
470 int rte_tm_node_wred_context_update(uint16_t port_id,
471 	uint32_t node_id,
472 	uint32_t wred_profile_id,
473 	struct rte_tm_error *error)
474 {
475 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
476 	int ret;
477 	ret = RTE_TM_FUNC(port_id, node_wred_context_update)(dev,
478 		node_id, wred_profile_id, error);
479 
480 	rte_tm_trace_node_wred_context_update(port_id, node_id, wred_profile_id,
481 					      ret);
482 
483 	return ret;
484 }
485 
486 /* Update node shared WRED context */
487 int rte_tm_node_shared_wred_context_update(uint16_t port_id,
488 	uint32_t node_id,
489 	uint32_t shared_wred_context_id,
490 	int add,
491 	struct rte_tm_error *error)
492 {
493 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
494 	int ret;
495 	ret = RTE_TM_FUNC(port_id, node_shared_wred_context_update)(dev,
496 		node_id, shared_wred_context_id, add, error);
497 
498 	rte_tm_trace_node_shared_wred_context_update(port_id, node_id,
499 						     shared_wred_context_id,
500 						     add, ret);
501 
502 	return ret;
503 }
504 
505 /* Read and/or clear stats counters for specific node */
506 int rte_tm_node_stats_read(uint16_t port_id,
507 	uint32_t node_id,
508 	struct rte_tm_node_stats *stats,
509 	uint64_t *stats_mask,
510 	int clear,
511 	struct rte_tm_error *error)
512 {
513 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
514 	int ret;
515 	ret = RTE_TM_FUNC(port_id, node_stats_read)(dev,
516 		node_id, stats, stats_mask, clear, error);
517 
518 	rte_tm_trace_node_stats_read(port_id, node_id, stats, *stats_mask,
519 				     clear, ret);
520 
521 	return ret;
522 }
523 
524 /* Packet marking - VLAN DEI */
525 int rte_tm_mark_vlan_dei(uint16_t port_id,
526 	int mark_green,
527 	int mark_yellow,
528 	int mark_red,
529 	struct rte_tm_error *error)
530 {
531 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
532 	int ret;
533 	ret = RTE_TM_FUNC(port_id, mark_vlan_dei)(dev,
534 		mark_green, mark_yellow, mark_red, error);
535 
536 	rte_tm_trace_mark_vlan_dei(port_id, mark_green, mark_yellow, mark_red,
537 				   ret);
538 
539 	return ret;
540 }
541 
542 /* Packet marking - IPv4/IPv6 ECN */
543 int rte_tm_mark_ip_ecn(uint16_t port_id,
544 	int mark_green,
545 	int mark_yellow,
546 	int mark_red,
547 	struct rte_tm_error *error)
548 {
549 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
550 	int ret;
551 	ret = RTE_TM_FUNC(port_id, mark_ip_ecn)(dev,
552 		mark_green, mark_yellow, mark_red, error);
553 
554 	rte_tm_trace_mark_ip_ecn(port_id, mark_green, mark_yellow, mark_red,
555 				 ret);
556 
557 	return ret;
558 }
559 
560 /* Packet marking - IPv4/IPv6 DSCP */
561 int rte_tm_mark_ip_dscp(uint16_t port_id,
562 	int mark_green,
563 	int mark_yellow,
564 	int mark_red,
565 	struct rte_tm_error *error)
566 {
567 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
568 	int ret;
569 	ret = RTE_TM_FUNC(port_id, mark_ip_dscp)(dev,
570 		mark_green, mark_yellow, mark_red, error);
571 
572 	rte_tm_trace_mark_ip_dscp(port_id, mark_green, mark_yellow, mark_red,
573 				  ret);
574 
575 	return ret;
576 }
577