xref: /dpdk/lib/eventdev/rte_eventdev.c (revision 665b49c51639a10c553433bc2bcd85c7331c631e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc
3  */
4 
5 #include <ctype.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <errno.h>
10 #include <stdint.h>
11 #include <inttypes.h>
12 
13 #include <rte_string_fns.h>
14 #include <rte_log.h>
15 #include <dev_driver.h>
16 #include <rte_memzone.h>
17 #include <rte_eal.h>
18 #include <rte_common.h>
19 #include <rte_malloc.h>
20 #include <rte_errno.h>
21 #include <ethdev_driver.h>
22 #include <rte_cryptodev.h>
23 #include <cryptodev_pmd.h>
24 #include <rte_telemetry.h>
25 
26 #include "rte_eventdev.h"
27 #include "eventdev_pmd.h"
28 #include "eventdev_trace.h"
29 
30 static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
31 
32 struct rte_eventdev *rte_eventdevs = rte_event_devices;
33 
34 static struct rte_eventdev_global eventdev_globals = {
35 	.nb_devs		= 0
36 };
37 
38 /* Public fastpath APIs. */
39 struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
40 
41 /* Event dev north bound API implementation */
42 
43 uint8_t
44 rte_event_dev_count(void)
45 {
46 	return eventdev_globals.nb_devs;
47 }
48 
49 int
50 rte_event_dev_get_dev_id(const char *name)
51 {
52 	int i;
53 	uint8_t cmp;
54 
55 	if (!name)
56 		return -EINVAL;
57 
58 	for (i = 0; i < eventdev_globals.nb_devs; i++) {
59 		cmp = (strncmp(rte_event_devices[i].data->name, name,
60 				RTE_EVENTDEV_NAME_MAX_LEN) == 0) ||
61 			(rte_event_devices[i].dev ? (strncmp(
62 				rte_event_devices[i].dev->driver->name, name,
63 					 RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0);
64 		if (cmp && (rte_event_devices[i].attached ==
65 					RTE_EVENTDEV_ATTACHED)) {
66 			rte_eventdev_trace_get_dev_id(name, i);
67 			return i;
68 		}
69 	}
70 	return -ENODEV;
71 }
72 
73 int
74 rte_event_dev_socket_id(uint8_t dev_id)
75 {
76 	struct rte_eventdev *dev;
77 
78 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
79 	dev = &rte_eventdevs[dev_id];
80 
81 	rte_eventdev_trace_socket_id(dev_id, dev, dev->data->socket_id);
82 
83 	return dev->data->socket_id;
84 }
85 
86 int
87 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
88 {
89 	struct rte_eventdev *dev;
90 
91 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
92 	dev = &rte_eventdevs[dev_id];
93 
94 	if (dev_info == NULL)
95 		return -EINVAL;
96 
97 	memset(dev_info, 0, sizeof(struct rte_event_dev_info));
98 
99 	if (*dev->dev_ops->dev_infos_get == NULL)
100 		return -ENOTSUP;
101 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
102 
103 	dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
104 
105 	dev_info->dev = dev->dev;
106 
107 	rte_eventdev_trace_info_get(dev_id, dev_info, dev_info->dev);
108 
109 	return 0;
110 }
111 
112 int
113 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
114 				uint32_t *caps)
115 {
116 	struct rte_eventdev *dev;
117 
118 	rte_eventdev_trace_eth_rx_adapter_caps_get(dev_id, eth_port_id);
119 
120 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
121 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
122 
123 	dev = &rte_eventdevs[dev_id];
124 
125 	if (caps == NULL)
126 		return -EINVAL;
127 
128 	if (dev->dev_ops->eth_rx_adapter_caps_get == NULL)
129 		*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
130 	else
131 		*caps = 0;
132 
133 	return dev->dev_ops->eth_rx_adapter_caps_get ?
134 				(*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
135 						&rte_eth_devices[eth_port_id],
136 						caps)
137 				: 0;
138 }
139 
140 int
141 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
142 {
143 	struct rte_eventdev *dev;
144 	const struct event_timer_adapter_ops *ops;
145 
146 	rte_eventdev_trace_timer_adapter_caps_get(dev_id);
147 
148 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
149 
150 	dev = &rte_eventdevs[dev_id];
151 
152 	if (caps == NULL)
153 		return -EINVAL;
154 
155 	if (dev->dev_ops->timer_adapter_caps_get == NULL)
156 		*caps = RTE_EVENT_TIMER_ADAPTER_SW_CAP;
157 	else
158 		*caps = 0;
159 
160 	return dev->dev_ops->timer_adapter_caps_get ?
161 				(*dev->dev_ops->timer_adapter_caps_get)(dev,
162 									0,
163 									caps,
164 									&ops)
165 				: 0;
166 }
167 
168 int
169 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
170 				  uint32_t *caps)
171 {
172 	struct rte_eventdev *dev;
173 	struct rte_cryptodev *cdev;
174 
175 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
176 	if (!rte_cryptodev_is_valid_dev(cdev_id))
177 		return -EINVAL;
178 
179 	dev = &rte_eventdevs[dev_id];
180 	cdev = rte_cryptodev_pmd_get_dev(cdev_id);
181 
182 	rte_eventdev_trace_crypto_adapter_caps_get(dev_id, dev, cdev_id, cdev);
183 
184 	if (caps == NULL)
185 		return -EINVAL;
186 
187 	if (dev->dev_ops->crypto_adapter_caps_get == NULL)
188 		*caps = RTE_EVENT_CRYPTO_ADAPTER_SW_CAP;
189 	else
190 		*caps = 0;
191 
192 	return dev->dev_ops->crypto_adapter_caps_get ?
193 		(*dev->dev_ops->crypto_adapter_caps_get)
194 		(dev, cdev, caps) : 0;
195 }
196 
197 int
198 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
199 				uint32_t *caps)
200 {
201 	struct rte_eventdev *dev;
202 	struct rte_eth_dev *eth_dev;
203 
204 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
205 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
206 
207 	dev = &rte_eventdevs[dev_id];
208 	eth_dev = &rte_eth_devices[eth_port_id];
209 
210 	rte_eventdev_trace_eth_tx_adapter_caps_get(dev_id, dev, eth_port_id, eth_dev);
211 
212 	if (caps == NULL)
213 		return -EINVAL;
214 
215 	if (dev->dev_ops->eth_tx_adapter_caps_get == NULL)
216 		*caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR;
217 	else
218 		*caps = 0;
219 
220 	return dev->dev_ops->eth_tx_adapter_caps_get ?
221 			(*dev->dev_ops->eth_tx_adapter_caps_get)(dev,
222 								eth_dev,
223 								caps)
224 			: 0;
225 }
226 
227 static inline int
228 event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
229 {
230 	uint8_t old_nb_queues = dev->data->nb_queues;
231 	struct rte_event_queue_conf *queues_cfg;
232 	unsigned int i;
233 
234 	RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
235 			 dev->data->dev_id);
236 
237 	if (nb_queues != 0) {
238 		queues_cfg = dev->data->queues_cfg;
239 		if (*dev->dev_ops->queue_release == NULL)
240 			return -ENOTSUP;
241 
242 		for (i = nb_queues; i < old_nb_queues; i++)
243 			(*dev->dev_ops->queue_release)(dev, i);
244 
245 
246 		if (nb_queues > old_nb_queues) {
247 			uint8_t new_qs = nb_queues - old_nb_queues;
248 
249 			memset(queues_cfg + old_nb_queues, 0,
250 				sizeof(queues_cfg[0]) * new_qs);
251 		}
252 	} else {
253 		if (*dev->dev_ops->queue_release == NULL)
254 			return -ENOTSUP;
255 
256 		for (i = nb_queues; i < old_nb_queues; i++)
257 			(*dev->dev_ops->queue_release)(dev, i);
258 	}
259 
260 	dev->data->nb_queues = nb_queues;
261 	return 0;
262 }
263 
264 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
265 
266 static inline int
267 event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
268 {
269 	uint8_t old_nb_ports = dev->data->nb_ports;
270 	void **ports;
271 	uint16_t *links_map;
272 	struct rte_event_port_conf *ports_cfg;
273 	unsigned int i;
274 
275 	RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
276 			 dev->data->dev_id);
277 
278 	if (nb_ports != 0) { /* re-config */
279 		if (*dev->dev_ops->port_release == NULL)
280 			return -ENOTSUP;
281 
282 		ports = dev->data->ports;
283 		ports_cfg = dev->data->ports_cfg;
284 		links_map = dev->data->links_map;
285 
286 		for (i = nb_ports; i < old_nb_ports; i++)
287 			(*dev->dev_ops->port_release)(ports[i]);
288 
289 		if (nb_ports > old_nb_ports) {
290 			uint8_t new_ps = nb_ports - old_nb_ports;
291 			unsigned int old_links_map_end =
292 				old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
293 			unsigned int links_map_end =
294 				nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
295 
296 			memset(ports + old_nb_ports, 0,
297 				sizeof(ports[0]) * new_ps);
298 			memset(ports_cfg + old_nb_ports, 0,
299 				sizeof(ports_cfg[0]) * new_ps);
300 			for (i = old_links_map_end; i < links_map_end; i++)
301 				links_map[i] =
302 					EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
303 		}
304 	} else {
305 		if (*dev->dev_ops->port_release == NULL)
306 			return -ENOTSUP;
307 
308 		ports = dev->data->ports;
309 		for (i = nb_ports; i < old_nb_ports; i++) {
310 			(*dev->dev_ops->port_release)(ports[i]);
311 			ports[i] = NULL;
312 		}
313 	}
314 
315 	dev->data->nb_ports = nb_ports;
316 	return 0;
317 }
318 
319 int
320 rte_event_dev_configure(uint8_t dev_id,
321 			const struct rte_event_dev_config *dev_conf)
322 {
323 	struct rte_event_dev_info info;
324 	struct rte_eventdev *dev;
325 	int diag;
326 
327 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
328 	dev = &rte_eventdevs[dev_id];
329 
330 	if (*dev->dev_ops->dev_infos_get == NULL)
331 		return -ENOTSUP;
332 	if (*dev->dev_ops->dev_configure == NULL)
333 		return -ENOTSUP;
334 
335 	if (dev->data->dev_started) {
336 		RTE_EDEV_LOG_ERR(
337 		    "device %d must be stopped to allow configuration", dev_id);
338 		return -EBUSY;
339 	}
340 
341 	if (dev_conf == NULL)
342 		return -EINVAL;
343 
344 	(*dev->dev_ops->dev_infos_get)(dev, &info);
345 
346 	/* Check dequeue_timeout_ns value is in limit */
347 	if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
348 		if (dev_conf->dequeue_timeout_ns &&
349 		    (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
350 			|| dev_conf->dequeue_timeout_ns >
351 				 info.max_dequeue_timeout_ns)) {
352 			RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
353 			" min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
354 			dev_id, dev_conf->dequeue_timeout_ns,
355 			info.min_dequeue_timeout_ns,
356 			info.max_dequeue_timeout_ns);
357 			return -EINVAL;
358 		}
359 	}
360 
361 	/* Check nb_events_limit is in limit */
362 	if (dev_conf->nb_events_limit > info.max_num_events) {
363 		RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
364 		dev_id, dev_conf->nb_events_limit, info.max_num_events);
365 		return -EINVAL;
366 	}
367 
368 	/* Check nb_event_queues is in limit */
369 	if (!dev_conf->nb_event_queues) {
370 		RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
371 					dev_id);
372 		return -EINVAL;
373 	}
374 	if (dev_conf->nb_event_queues > info.max_event_queues +
375 			info.max_single_link_event_port_queue_pairs) {
376 		RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d + max_single_link_event_port_queue_pairs=%d",
377 				 dev_id, dev_conf->nb_event_queues,
378 				 info.max_event_queues,
379 				 info.max_single_link_event_port_queue_pairs);
380 		return -EINVAL;
381 	}
382 	if (dev_conf->nb_event_queues -
383 			dev_conf->nb_single_link_event_port_queues >
384 			info.max_event_queues) {
385 		RTE_EDEV_LOG_ERR("id%d nb_event_queues=%d - nb_single_link_event_port_queues=%d > max_event_queues=%d",
386 				 dev_id, dev_conf->nb_event_queues,
387 				 dev_conf->nb_single_link_event_port_queues,
388 				 info.max_event_queues);
389 		return -EINVAL;
390 	}
391 	if (dev_conf->nb_single_link_event_port_queues >
392 			dev_conf->nb_event_queues) {
393 		RTE_EDEV_LOG_ERR("dev%d nb_single_link_event_port_queues=%d > nb_event_queues=%d",
394 				 dev_id,
395 				 dev_conf->nb_single_link_event_port_queues,
396 				 dev_conf->nb_event_queues);
397 		return -EINVAL;
398 	}
399 
400 	/* Check nb_event_ports is in limit */
401 	if (!dev_conf->nb_event_ports) {
402 		RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
403 		return -EINVAL;
404 	}
405 	if (dev_conf->nb_event_ports > info.max_event_ports +
406 			info.max_single_link_event_port_queue_pairs) {
407 		RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports=%d + max_single_link_event_port_queue_pairs=%d",
408 				 dev_id, dev_conf->nb_event_ports,
409 				 info.max_event_ports,
410 				 info.max_single_link_event_port_queue_pairs);
411 		return -EINVAL;
412 	}
413 	if (dev_conf->nb_event_ports -
414 			dev_conf->nb_single_link_event_port_queues
415 			> info.max_event_ports) {
416 		RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d - nb_single_link_event_port_queues=%d > max_event_ports=%d",
417 				 dev_id, dev_conf->nb_event_ports,
418 				 dev_conf->nb_single_link_event_port_queues,
419 				 info.max_event_ports);
420 		return -EINVAL;
421 	}
422 
423 	if (dev_conf->nb_single_link_event_port_queues >
424 	    dev_conf->nb_event_ports) {
425 		RTE_EDEV_LOG_ERR(
426 				 "dev%d nb_single_link_event_port_queues=%d > nb_event_ports=%d",
427 				 dev_id,
428 				 dev_conf->nb_single_link_event_port_queues,
429 				 dev_conf->nb_event_ports);
430 		return -EINVAL;
431 	}
432 
433 	/* Check nb_event_queue_flows is in limit */
434 	if (!dev_conf->nb_event_queue_flows) {
435 		RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
436 		return -EINVAL;
437 	}
438 	if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
439 		RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
440 		dev_id, dev_conf->nb_event_queue_flows,
441 		info.max_event_queue_flows);
442 		return -EINVAL;
443 	}
444 
445 	/* Check nb_event_port_dequeue_depth is in limit */
446 	if (!dev_conf->nb_event_port_dequeue_depth) {
447 		RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
448 					dev_id);
449 		return -EINVAL;
450 	}
451 	if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
452 		 (dev_conf->nb_event_port_dequeue_depth >
453 			 info.max_event_port_dequeue_depth)) {
454 		RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
455 		dev_id, dev_conf->nb_event_port_dequeue_depth,
456 		info.max_event_port_dequeue_depth);
457 		return -EINVAL;
458 	}
459 
460 	/* Check nb_event_port_enqueue_depth is in limit */
461 	if (!dev_conf->nb_event_port_enqueue_depth) {
462 		RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
463 					dev_id);
464 		return -EINVAL;
465 	}
466 	if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
467 		(dev_conf->nb_event_port_enqueue_depth >
468 			 info.max_event_port_enqueue_depth)) {
469 		RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
470 		dev_id, dev_conf->nb_event_port_enqueue_depth,
471 		info.max_event_port_enqueue_depth);
472 		return -EINVAL;
473 	}
474 
475 	/* Copy the dev_conf parameter into the dev structure */
476 	memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
477 
478 	/* Setup new number of queues and reconfigure device. */
479 	diag = event_dev_queue_config(dev, dev_conf->nb_event_queues);
480 	if (diag != 0) {
481 		RTE_EDEV_LOG_ERR("dev%d event_dev_queue_config = %d", dev_id,
482 				 diag);
483 		return diag;
484 	}
485 
486 	/* Setup new number of ports and reconfigure device. */
487 	diag = event_dev_port_config(dev, dev_conf->nb_event_ports);
488 	if (diag != 0) {
489 		event_dev_queue_config(dev, 0);
490 		RTE_EDEV_LOG_ERR("dev%d event_dev_port_config = %d", dev_id,
491 				 diag);
492 		return diag;
493 	}
494 
495 	event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
496 
497 	/* Configure the device */
498 	diag = (*dev->dev_ops->dev_configure)(dev);
499 	if (diag != 0) {
500 		RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
501 		event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
502 		event_dev_queue_config(dev, 0);
503 		event_dev_port_config(dev, 0);
504 	}
505 
506 	dev->data->event_dev_cap = info.event_dev_cap;
507 	rte_eventdev_trace_configure(dev_id, dev_conf, diag);
508 	return diag;
509 }
510 
511 static inline int
512 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
513 {
514 	if (queue_id < dev->data->nb_queues && queue_id <
515 				RTE_EVENT_MAX_QUEUES_PER_DEV)
516 		return 1;
517 	else
518 		return 0;
519 }
520 
521 int
522 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
523 				 struct rte_event_queue_conf *queue_conf)
524 {
525 	struct rte_eventdev *dev;
526 
527 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
528 	dev = &rte_eventdevs[dev_id];
529 
530 	if (queue_conf == NULL)
531 		return -EINVAL;
532 
533 	if (!is_valid_queue(dev, queue_id)) {
534 		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
535 		return -EINVAL;
536 	}
537 
538 	if (*dev->dev_ops->queue_def_conf == NULL)
539 		return -ENOTSUP;
540 	memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
541 	(*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
542 
543 	rte_eventdev_trace_queue_default_conf_get(dev_id, dev, queue_id, queue_conf);
544 
545 	return 0;
546 }
547 
548 static inline int
549 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
550 {
551 	if (queue_conf &&
552 		!(queue_conf->event_queue_cfg &
553 		  RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
554 		((queue_conf->event_queue_cfg &
555 			 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
556 		(queue_conf->schedule_type
557 			== RTE_SCHED_TYPE_ATOMIC)
558 		))
559 		return 1;
560 	else
561 		return 0;
562 }
563 
564 static inline int
565 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
566 {
567 	if (queue_conf &&
568 		!(queue_conf->event_queue_cfg &
569 		  RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
570 		((queue_conf->event_queue_cfg &
571 			 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
572 		(queue_conf->schedule_type
573 			== RTE_SCHED_TYPE_ORDERED)
574 		))
575 		return 1;
576 	else
577 		return 0;
578 }
579 
580 
581 int
582 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
583 		      const struct rte_event_queue_conf *queue_conf)
584 {
585 	struct rte_eventdev *dev;
586 	struct rte_event_queue_conf def_conf;
587 
588 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
589 	dev = &rte_eventdevs[dev_id];
590 
591 	if (!is_valid_queue(dev, queue_id)) {
592 		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
593 		return -EINVAL;
594 	}
595 
596 	/* Check nb_atomic_flows limit */
597 	if (is_valid_atomic_queue_conf(queue_conf)) {
598 		if (queue_conf->nb_atomic_flows == 0 ||
599 		    queue_conf->nb_atomic_flows >
600 			dev->data->dev_conf.nb_event_queue_flows) {
601 			RTE_EDEV_LOG_ERR(
602 		"dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
603 			dev_id, queue_id, queue_conf->nb_atomic_flows,
604 			dev->data->dev_conf.nb_event_queue_flows);
605 			return -EINVAL;
606 		}
607 	}
608 
609 	/* Check nb_atomic_order_sequences limit */
610 	if (is_valid_ordered_queue_conf(queue_conf)) {
611 		if (queue_conf->nb_atomic_order_sequences == 0 ||
612 		    queue_conf->nb_atomic_order_sequences >
613 			dev->data->dev_conf.nb_event_queue_flows) {
614 			RTE_EDEV_LOG_ERR(
615 		"dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
616 			dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
617 			dev->data->dev_conf.nb_event_queue_flows);
618 			return -EINVAL;
619 		}
620 	}
621 
622 	if (dev->data->dev_started) {
623 		RTE_EDEV_LOG_ERR(
624 		    "device %d must be stopped to allow queue setup", dev_id);
625 		return -EBUSY;
626 	}
627 
628 	if (*dev->dev_ops->queue_setup == NULL)
629 		return -ENOTSUP;
630 
631 	if (queue_conf == NULL) {
632 		if (*dev->dev_ops->queue_def_conf == NULL)
633 			return -ENOTSUP;
634 		(*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
635 		queue_conf = &def_conf;
636 	}
637 
638 	dev->data->queues_cfg[queue_id] = *queue_conf;
639 	rte_eventdev_trace_queue_setup(dev_id, queue_id, queue_conf);
640 	return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
641 }
642 
643 static inline int
644 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
645 {
646 	if (port_id < dev->data->nb_ports)
647 		return 1;
648 	else
649 		return 0;
650 }
651 
652 int
653 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
654 				 struct rte_event_port_conf *port_conf)
655 {
656 	struct rte_eventdev *dev;
657 
658 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
659 	dev = &rte_eventdevs[dev_id];
660 
661 	if (port_conf == NULL)
662 		return -EINVAL;
663 
664 	if (!is_valid_port(dev, port_id)) {
665 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
666 		return -EINVAL;
667 	}
668 
669 	if (*dev->dev_ops->port_def_conf == NULL)
670 		return -ENOTSUP;
671 	memset(port_conf, 0, sizeof(struct rte_event_port_conf));
672 	(*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
673 
674 	rte_eventdev_trace_port_default_conf_get(dev_id, dev, port_id, port_conf);
675 
676 	return 0;
677 }
678 
679 int
680 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
681 		     const struct rte_event_port_conf *port_conf)
682 {
683 	struct rte_eventdev *dev;
684 	struct rte_event_port_conf def_conf;
685 	int diag;
686 
687 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
688 	dev = &rte_eventdevs[dev_id];
689 
690 	if (!is_valid_port(dev, port_id)) {
691 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
692 		return -EINVAL;
693 	}
694 
695 	/* Check new_event_threshold limit */
696 	if ((port_conf && !port_conf->new_event_threshold) ||
697 			(port_conf && port_conf->new_event_threshold >
698 				 dev->data->dev_conf.nb_events_limit)) {
699 		RTE_EDEV_LOG_ERR(
700 		   "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
701 			dev_id, port_id, port_conf->new_event_threshold,
702 			dev->data->dev_conf.nb_events_limit);
703 		return -EINVAL;
704 	}
705 
706 	/* Check dequeue_depth limit */
707 	if ((port_conf && !port_conf->dequeue_depth) ||
708 			(port_conf && port_conf->dequeue_depth >
709 		dev->data->dev_conf.nb_event_port_dequeue_depth)) {
710 		RTE_EDEV_LOG_ERR(
711 		   "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
712 			dev_id, port_id, port_conf->dequeue_depth,
713 			dev->data->dev_conf.nb_event_port_dequeue_depth);
714 		return -EINVAL;
715 	}
716 
717 	/* Check enqueue_depth limit */
718 	if ((port_conf && !port_conf->enqueue_depth) ||
719 			(port_conf && port_conf->enqueue_depth >
720 		dev->data->dev_conf.nb_event_port_enqueue_depth)) {
721 		RTE_EDEV_LOG_ERR(
722 		   "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
723 			dev_id, port_id, port_conf->enqueue_depth,
724 			dev->data->dev_conf.nb_event_port_enqueue_depth);
725 		return -EINVAL;
726 	}
727 
728 	if (port_conf &&
729 	    (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL) &&
730 	    !(dev->data->event_dev_cap &
731 	      RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
732 		RTE_EDEV_LOG_ERR(
733 		   "dev%d port%d Implicit release disable not supported",
734 			dev_id, port_id);
735 		return -EINVAL;
736 	}
737 
738 	if (dev->data->dev_started) {
739 		RTE_EDEV_LOG_ERR(
740 		    "device %d must be stopped to allow port setup", dev_id);
741 		return -EBUSY;
742 	}
743 
744 	if (*dev->dev_ops->port_setup == NULL)
745 		return -ENOTSUP;
746 
747 	if (port_conf == NULL) {
748 		if (*dev->dev_ops->port_def_conf == NULL)
749 			return -ENOTSUP;
750 		(*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
751 		port_conf = &def_conf;
752 	}
753 
754 	dev->data->ports_cfg[port_id] = *port_conf;
755 
756 	diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
757 
758 	/* Unlink all the queues from this port(default state after setup) */
759 	if (!diag)
760 		diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
761 
762 	rte_eventdev_trace_port_setup(dev_id, port_id, port_conf, diag);
763 	if (diag < 0)
764 		return diag;
765 
766 	return 0;
767 }
768 
769 void
770 rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
771 		       rte_eventdev_port_flush_t release_cb, void *args)
772 {
773 	struct rte_eventdev *dev;
774 
775 	RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
776 	dev = &rte_eventdevs[dev_id];
777 
778 	rte_eventdev_trace_port_quiesce(dev_id, dev, port_id, args);
779 
780 	if (!is_valid_port(dev, port_id)) {
781 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
782 		return;
783 	}
784 
785 	if (dev->dev_ops->port_quiesce)
786 		(*dev->dev_ops->port_quiesce)(dev, dev->data->ports[port_id],
787 					      release_cb, args);
788 }
789 
790 int
791 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
792 		       uint32_t *attr_value)
793 {
794 	struct rte_eventdev *dev;
795 
796 	if (!attr_value)
797 		return -EINVAL;
798 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
799 	dev = &rte_eventdevs[dev_id];
800 
801 	switch (attr_id) {
802 	case RTE_EVENT_DEV_ATTR_PORT_COUNT:
803 		*attr_value = dev->data->nb_ports;
804 		break;
805 	case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
806 		*attr_value = dev->data->nb_queues;
807 		break;
808 	case RTE_EVENT_DEV_ATTR_STARTED:
809 		*attr_value = dev->data->dev_started;
810 		break;
811 	default:
812 		return -EINVAL;
813 	}
814 
815 	rte_eventdev_trace_attr_get(dev_id, dev, attr_id, *attr_value);
816 
817 	return 0;
818 }
819 
820 int
821 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
822 			uint32_t *attr_value)
823 {
824 	struct rte_eventdev *dev;
825 
826 	if (!attr_value)
827 		return -EINVAL;
828 
829 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
830 	dev = &rte_eventdevs[dev_id];
831 	if (!is_valid_port(dev, port_id)) {
832 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
833 		return -EINVAL;
834 	}
835 
836 	switch (attr_id) {
837 	case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
838 		*attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
839 		break;
840 	case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
841 		*attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
842 		break;
843 	case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
844 		*attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
845 		break;
846 	case RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE:
847 	{
848 		uint32_t config;
849 
850 		config = dev->data->ports_cfg[port_id].event_port_cfg;
851 		*attr_value = !!(config & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
852 		break;
853 	}
854 	default:
855 		return -EINVAL;
856 	};
857 
858 	rte_eventdev_trace_port_attr_get(dev_id, dev, port_id, attr_id, *attr_value);
859 
860 	return 0;
861 }
862 
863 int
864 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
865 			uint32_t *attr_value)
866 {
867 	struct rte_event_queue_conf *conf;
868 	struct rte_eventdev *dev;
869 
870 	if (!attr_value)
871 		return -EINVAL;
872 
873 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
874 	dev = &rte_eventdevs[dev_id];
875 	if (!is_valid_queue(dev, queue_id)) {
876 		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
877 		return -EINVAL;
878 	}
879 
880 	conf = &dev->data->queues_cfg[queue_id];
881 
882 	switch (attr_id) {
883 	case RTE_EVENT_QUEUE_ATTR_PRIORITY:
884 		*attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
885 		if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
886 			*attr_value = conf->priority;
887 		break;
888 	case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
889 		*attr_value = conf->nb_atomic_flows;
890 		break;
891 	case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
892 		*attr_value = conf->nb_atomic_order_sequences;
893 		break;
894 	case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
895 		*attr_value = conf->event_queue_cfg;
896 		break;
897 	case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
898 		if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
899 			return -EOVERFLOW;
900 
901 		*attr_value = conf->schedule_type;
902 		break;
903 	case RTE_EVENT_QUEUE_ATTR_WEIGHT:
904 		*attr_value = RTE_EVENT_QUEUE_WEIGHT_LOWEST;
905 		if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
906 			*attr_value = conf->weight;
907 		break;
908 	case RTE_EVENT_QUEUE_ATTR_AFFINITY:
909 		*attr_value = RTE_EVENT_QUEUE_AFFINITY_LOWEST;
910 		if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
911 			*attr_value = conf->affinity;
912 		break;
913 	default:
914 		return -EINVAL;
915 	};
916 
917 	rte_eventdev_trace_queue_attr_get(dev_id, dev, queue_id, attr_id, *attr_value);
918 
919 	return 0;
920 }
921 
922 int
923 rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
924 			 uint64_t attr_value)
925 {
926 	struct rte_eventdev *dev;
927 
928 	rte_eventdev_trace_queue_attr_set(dev_id, queue_id, attr_id, attr_value);
929 
930 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
931 	dev = &rte_eventdevs[dev_id];
932 	if (!is_valid_queue(dev, queue_id)) {
933 		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
934 		return -EINVAL;
935 	}
936 
937 	if (!(dev->data->event_dev_cap &
938 	      RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR)) {
939 		RTE_EDEV_LOG_ERR(
940 			"Device %" PRIu8 "does not support changing queue attributes at runtime",
941 			dev_id);
942 		return -ENOTSUP;
943 	}
944 
945 	if (*dev->dev_ops->queue_attr_set == NULL)
946 		return -ENOTSUP;
947 	return (*dev->dev_ops->queue_attr_set)(dev, queue_id, attr_id,
948 					       attr_value);
949 }
950 
951 int
952 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
953 		    const uint8_t queues[], const uint8_t priorities[],
954 		    uint16_t nb_links)
955 {
956 	struct rte_eventdev *dev;
957 	uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
958 	uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
959 	uint16_t *links_map;
960 	int i, diag;
961 
962 	RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
963 	dev = &rte_eventdevs[dev_id];
964 
965 	if (*dev->dev_ops->port_link == NULL) {
966 		RTE_EDEV_LOG_ERR("Function not supported\n");
967 		rte_errno = ENOTSUP;
968 		return 0;
969 	}
970 
971 	if (!is_valid_port(dev, port_id)) {
972 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
973 		rte_errno = EINVAL;
974 		return 0;
975 	}
976 
977 	if (queues == NULL) {
978 		for (i = 0; i < dev->data->nb_queues; i++)
979 			queues_list[i] = i;
980 
981 		queues = queues_list;
982 		nb_links = dev->data->nb_queues;
983 	}
984 
985 	if (priorities == NULL) {
986 		for (i = 0; i < nb_links; i++)
987 			priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
988 
989 		priorities = priorities_list;
990 	}
991 
992 	for (i = 0; i < nb_links; i++)
993 		if (queues[i] >= dev->data->nb_queues) {
994 			rte_errno = EINVAL;
995 			return 0;
996 		}
997 
998 	diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
999 						queues, priorities, nb_links);
1000 	if (diag < 0)
1001 		return diag;
1002 
1003 	links_map = dev->data->links_map;
1004 	/* Point links_map to this port specific area */
1005 	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1006 	for (i = 0; i < diag; i++)
1007 		links_map[queues[i]] = (uint8_t)priorities[i];
1008 
1009 	rte_eventdev_trace_port_link(dev_id, port_id, nb_links, diag);
1010 	return diag;
1011 }
1012 
1013 int
1014 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
1015 		      uint8_t queues[], uint16_t nb_unlinks)
1016 {
1017 	struct rte_eventdev *dev;
1018 	uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1019 	int i, diag, j;
1020 	uint16_t *links_map;
1021 
1022 	RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
1023 	dev = &rte_eventdevs[dev_id];
1024 
1025 	if (*dev->dev_ops->port_unlink == NULL) {
1026 		RTE_EDEV_LOG_ERR("Function not supported");
1027 		rte_errno = ENOTSUP;
1028 		return 0;
1029 	}
1030 
1031 	if (!is_valid_port(dev, port_id)) {
1032 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1033 		rte_errno = EINVAL;
1034 		return 0;
1035 	}
1036 
1037 	links_map = dev->data->links_map;
1038 	/* Point links_map to this port specific area */
1039 	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1040 
1041 	if (queues == NULL) {
1042 		j = 0;
1043 		for (i = 0; i < dev->data->nb_queues; i++) {
1044 			if (links_map[i] !=
1045 					EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1046 				all_queues[j] = i;
1047 				j++;
1048 			}
1049 		}
1050 		queues = all_queues;
1051 	} else {
1052 		for (j = 0; j < nb_unlinks; j++) {
1053 			if (links_map[queues[j]] ==
1054 					EVENT_QUEUE_SERVICE_PRIORITY_INVALID)
1055 				break;
1056 		}
1057 	}
1058 
1059 	nb_unlinks = j;
1060 	for (i = 0; i < nb_unlinks; i++)
1061 		if (queues[i] >= dev->data->nb_queues) {
1062 			rte_errno = EINVAL;
1063 			return 0;
1064 		}
1065 
1066 	diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
1067 					queues, nb_unlinks);
1068 
1069 	if (diag < 0)
1070 		return diag;
1071 
1072 	for (i = 0; i < diag; i++)
1073 		links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
1074 
1075 	rte_eventdev_trace_port_unlink(dev_id, port_id, nb_unlinks, diag);
1076 	return diag;
1077 }
1078 
1079 int
1080 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
1081 {
1082 	struct rte_eventdev *dev;
1083 
1084 	rte_eventdev_trace_port_unlinks_in_progress(dev_id, port_id);
1085 
1086 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1087 	dev = &rte_eventdevs[dev_id];
1088 	if (!is_valid_port(dev, port_id)) {
1089 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1090 		return -EINVAL;
1091 	}
1092 
1093 	/* Return 0 if the PMD does not implement unlinks in progress.
1094 	 * This allows PMDs which handle unlink synchronously to not implement
1095 	 * this function at all.
1096 	 */
1097 	if (*dev->dev_ops->port_unlinks_in_progress == NULL)
1098 		return 0;
1099 
1100 	return (*dev->dev_ops->port_unlinks_in_progress)(dev,
1101 			dev->data->ports[port_id]);
1102 }
1103 
1104 int
1105 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1106 			 uint8_t queues[], uint8_t priorities[])
1107 {
1108 	struct rte_eventdev *dev;
1109 	uint16_t *links_map;
1110 	int i, count = 0;
1111 
1112 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1113 	dev = &rte_eventdevs[dev_id];
1114 	if (!is_valid_port(dev, port_id)) {
1115 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1116 		return -EINVAL;
1117 	}
1118 
1119 	links_map = dev->data->links_map;
1120 	/* Point links_map to this port specific area */
1121 	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1122 	for (i = 0; i < dev->data->nb_queues; i++) {
1123 		if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1124 			queues[count] = i;
1125 			priorities[count] = (uint8_t)links_map[i];
1126 			++count;
1127 		}
1128 	}
1129 
1130 	rte_eventdev_trace_port_links_get(dev_id, port_id, count);
1131 
1132 	return count;
1133 }
1134 
1135 int
1136 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1137 				 uint64_t *timeout_ticks)
1138 {
1139 	struct rte_eventdev *dev;
1140 
1141 	rte_eventdev_trace_dequeue_timeout_ticks(dev_id, ns, timeout_ticks);
1142 
1143 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1144 	dev = &rte_eventdevs[dev_id];
1145 	if (*dev->dev_ops->timeout_ticks == NULL)
1146 		return -ENOTSUP;
1147 
1148 	if (timeout_ticks == NULL)
1149 		return -EINVAL;
1150 
1151 	return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
1152 }
1153 
1154 int
1155 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
1156 {
1157 	struct rte_eventdev *dev;
1158 
1159 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1160 	dev = &rte_eventdevs[dev_id];
1161 
1162 	if (service_id == NULL)
1163 		return -EINVAL;
1164 
1165 	if (dev->data->service_inited)
1166 		*service_id = dev->data->service_id;
1167 
1168 	rte_eventdev_trace_service_id_get(dev_id, *service_id);
1169 
1170 	return dev->data->service_inited ? 0 : -ESRCH;
1171 }
1172 
1173 int
1174 rte_event_dev_dump(uint8_t dev_id, FILE *f)
1175 {
1176 	struct rte_eventdev *dev;
1177 
1178 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1179 	dev = &rte_eventdevs[dev_id];
1180 	if (*dev->dev_ops->dump == NULL)
1181 		return -ENOTSUP;
1182 	if (f == NULL)
1183 		return -EINVAL;
1184 
1185 	(*dev->dev_ops->dump)(dev, f);
1186 	return 0;
1187 
1188 }
1189 
1190 static int
1191 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1192 		uint8_t queue_port_id)
1193 {
1194 	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1195 	if (dev->dev_ops->xstats_get_names != NULL)
1196 		return (*dev->dev_ops->xstats_get_names)(dev, mode,
1197 							queue_port_id,
1198 							NULL, NULL, 0);
1199 	return 0;
1200 }
1201 
1202 int
1203 rte_event_dev_xstats_names_get(uint8_t dev_id,
1204 		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
1205 		struct rte_event_dev_xstats_name *xstats_names,
1206 		uint64_t *ids, unsigned int size)
1207 {
1208 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1209 	const int cnt_expected_entries = xstats_get_count(dev_id, mode,
1210 							  queue_port_id);
1211 	if (xstats_names == NULL || cnt_expected_entries < 0 ||
1212 			(int)size < cnt_expected_entries)
1213 		return cnt_expected_entries;
1214 
1215 	/* dev_id checked above */
1216 	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1217 
1218 	if (dev->dev_ops->xstats_get_names != NULL)
1219 		return (*dev->dev_ops->xstats_get_names)(dev, mode,
1220 				queue_port_id, xstats_names, ids, size);
1221 
1222 	return -ENOTSUP;
1223 }
1224 
1225 /* retrieve eventdev extended statistics */
1226 int
1227 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1228 		uint8_t queue_port_id, const uint64_t ids[],
1229 		uint64_t values[], unsigned int n)
1230 {
1231 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1232 	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1233 
1234 	/* implemented by the driver */
1235 	if (dev->dev_ops->xstats_get != NULL)
1236 		return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1237 				ids, values, n);
1238 	return -ENOTSUP;
1239 }
1240 
1241 uint64_t
1242 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1243 		uint64_t *id)
1244 {
1245 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1246 	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1247 	uint64_t temp = -1;
1248 
1249 	if (id != NULL)
1250 		*id = (unsigned int)-1;
1251 	else
1252 		id = &temp; /* ensure driver never gets a NULL value */
1253 
1254 	/* implemented by driver */
1255 	if (dev->dev_ops->xstats_get_by_name != NULL)
1256 		return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1257 	return -ENOTSUP;
1258 }
1259 
1260 int rte_event_dev_xstats_reset(uint8_t dev_id,
1261 		enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1262 		const uint64_t ids[], uint32_t nb_ids)
1263 {
1264 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1265 	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1266 
1267 	if (dev->dev_ops->xstats_reset != NULL)
1268 		return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1269 							ids, nb_ids);
1270 	return -ENOTSUP;
1271 }
1272 
1273 int rte_event_pmd_selftest_seqn_dynfield_offset = -1;
1274 
1275 int rte_event_dev_selftest(uint8_t dev_id)
1276 {
1277 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1278 	static const struct rte_mbuf_dynfield test_seqn_dynfield_desc = {
1279 		.name = "rte_event_pmd_selftest_seqn_dynfield",
1280 		.size = sizeof(rte_event_pmd_selftest_seqn_t),
1281 		.align = __alignof__(rte_event_pmd_selftest_seqn_t),
1282 	};
1283 	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1284 
1285 	if (dev->dev_ops->dev_selftest != NULL) {
1286 		rte_event_pmd_selftest_seqn_dynfield_offset =
1287 			rte_mbuf_dynfield_register(&test_seqn_dynfield_desc);
1288 		if (rte_event_pmd_selftest_seqn_dynfield_offset < 0)
1289 			return -ENOMEM;
1290 		return (*dev->dev_ops->dev_selftest)();
1291 	}
1292 	return -ENOTSUP;
1293 }
1294 
1295 struct rte_mempool *
1296 rte_event_vector_pool_create(const char *name, unsigned int n,
1297 			     unsigned int cache_size, uint16_t nb_elem,
1298 			     int socket_id)
1299 {
1300 	const char *mp_ops_name;
1301 	struct rte_mempool *mp;
1302 	unsigned int elt_sz;
1303 	int ret;
1304 
1305 	if (!nb_elem) {
1306 		RTE_LOG(ERR, EVENTDEV,
1307 			"Invalid number of elements=%d requested\n", nb_elem);
1308 		rte_errno = EINVAL;
1309 		return NULL;
1310 	}
1311 
1312 	elt_sz =
1313 		sizeof(struct rte_event_vector) + (nb_elem * sizeof(uintptr_t));
1314 	mp = rte_mempool_create_empty(name, n, elt_sz, cache_size, 0, socket_id,
1315 				      0);
1316 	if (mp == NULL)
1317 		return NULL;
1318 
1319 	mp_ops_name = rte_mbuf_best_mempool_ops();
1320 	ret = rte_mempool_set_ops_byname(mp, mp_ops_name, NULL);
1321 	if (ret != 0) {
1322 		RTE_LOG(ERR, EVENTDEV, "error setting mempool handler\n");
1323 		goto err;
1324 	}
1325 
1326 	ret = rte_mempool_populate_default(mp);
1327 	if (ret < 0)
1328 		goto err;
1329 
1330 	rte_eventdev_trace_vector_pool_create(mp, mp->name, mp->socket_id,
1331 		mp->size, mp->cache_size, mp->elt_size);
1332 
1333 	return mp;
1334 err:
1335 	rte_mempool_free(mp);
1336 	rte_errno = -ret;
1337 	return NULL;
1338 }
1339 
1340 int
1341 rte_event_dev_start(uint8_t dev_id)
1342 {
1343 	struct rte_eventdev *dev;
1344 	int diag;
1345 
1346 	RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1347 
1348 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1349 	dev = &rte_eventdevs[dev_id];
1350 	if (*dev->dev_ops->dev_start == NULL)
1351 		return -ENOTSUP;
1352 
1353 	if (dev->data->dev_started != 0) {
1354 		RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1355 			dev_id);
1356 		return 0;
1357 	}
1358 
1359 	diag = (*dev->dev_ops->dev_start)(dev);
1360 	rte_eventdev_trace_start(dev_id, diag);
1361 	if (diag == 0)
1362 		dev->data->dev_started = 1;
1363 	else
1364 		return diag;
1365 
1366 	event_dev_fp_ops_set(rte_event_fp_ops + dev_id, dev);
1367 
1368 	return 0;
1369 }
1370 
1371 int
1372 rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1373 					   rte_eventdev_stop_flush_t callback,
1374 					   void *userdata)
1375 {
1376 	struct rte_eventdev *dev;
1377 
1378 	RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id);
1379 
1380 	rte_eventdev_trace_stop_flush_callback_register(dev_id, callback, userdata);
1381 
1382 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1383 	dev = &rte_eventdevs[dev_id];
1384 
1385 	dev->dev_ops->dev_stop_flush = callback;
1386 	dev->data->dev_stop_flush_arg = userdata;
1387 
1388 	return 0;
1389 }
1390 
1391 void
1392 rte_event_dev_stop(uint8_t dev_id)
1393 {
1394 	struct rte_eventdev *dev;
1395 
1396 	RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1397 
1398 	RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1399 	dev = &rte_eventdevs[dev_id];
1400 	if (*dev->dev_ops->dev_stop == NULL)
1401 		return;
1402 
1403 	if (dev->data->dev_started == 0) {
1404 		RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1405 			dev_id);
1406 		return;
1407 	}
1408 
1409 	dev->data->dev_started = 0;
1410 	(*dev->dev_ops->dev_stop)(dev);
1411 	rte_eventdev_trace_stop(dev_id);
1412 	event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
1413 }
1414 
1415 int
1416 rte_event_dev_close(uint8_t dev_id)
1417 {
1418 	struct rte_eventdev *dev;
1419 
1420 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1421 	dev = &rte_eventdevs[dev_id];
1422 	if (*dev->dev_ops->dev_close == NULL)
1423 		return -ENOTSUP;
1424 
1425 	/* Device must be stopped before it can be closed */
1426 	if (dev->data->dev_started == 1) {
1427 		RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1428 				dev_id);
1429 		return -EBUSY;
1430 	}
1431 
1432 	event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
1433 	rte_eventdev_trace_close(dev_id);
1434 	return (*dev->dev_ops->dev_close)(dev);
1435 }
1436 
1437 static inline int
1438 eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1439 		    int socket_id)
1440 {
1441 	char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1442 	const struct rte_memzone *mz;
1443 	int n;
1444 
1445 	/* Generate memzone name */
1446 	n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1447 	if (n >= (int)sizeof(mz_name))
1448 		return -EINVAL;
1449 
1450 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1451 		mz = rte_memzone_reserve(mz_name,
1452 				sizeof(struct rte_eventdev_data),
1453 				socket_id, 0);
1454 	} else
1455 		mz = rte_memzone_lookup(mz_name);
1456 
1457 	if (mz == NULL)
1458 		return -ENOMEM;
1459 
1460 	*data = mz->addr;
1461 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1462 		memset(*data, 0, sizeof(struct rte_eventdev_data));
1463 		for (n = 0; n < RTE_EVENT_MAX_PORTS_PER_DEV *
1464 					RTE_EVENT_MAX_QUEUES_PER_DEV;
1465 		     n++)
1466 			(*data)->links_map[n] =
1467 				EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
1468 	}
1469 
1470 	return 0;
1471 }
1472 
1473 static inline uint8_t
1474 eventdev_find_free_device_index(void)
1475 {
1476 	uint8_t dev_id;
1477 
1478 	for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1479 		if (rte_eventdevs[dev_id].attached ==
1480 				RTE_EVENTDEV_DETACHED)
1481 			return dev_id;
1482 	}
1483 	return RTE_EVENT_MAX_DEVS;
1484 }
1485 
1486 struct rte_eventdev *
1487 rte_event_pmd_allocate(const char *name, int socket_id)
1488 {
1489 	struct rte_eventdev *eventdev;
1490 	uint8_t dev_id;
1491 
1492 	if (rte_event_pmd_get_named_dev(name) != NULL) {
1493 		RTE_EDEV_LOG_ERR("Event device with name %s already "
1494 				"allocated!", name);
1495 		return NULL;
1496 	}
1497 
1498 	dev_id = eventdev_find_free_device_index();
1499 	if (dev_id == RTE_EVENT_MAX_DEVS) {
1500 		RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1501 		return NULL;
1502 	}
1503 
1504 	eventdev = &rte_eventdevs[dev_id];
1505 
1506 	if (eventdev->data == NULL) {
1507 		struct rte_eventdev_data *eventdev_data = NULL;
1508 
1509 		int retval =
1510 			eventdev_data_alloc(dev_id, &eventdev_data, socket_id);
1511 
1512 		if (retval < 0 || eventdev_data == NULL)
1513 			return NULL;
1514 
1515 		eventdev->data = eventdev_data;
1516 
1517 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1518 
1519 			strlcpy(eventdev->data->name, name,
1520 				RTE_EVENTDEV_NAME_MAX_LEN);
1521 
1522 			eventdev->data->dev_id = dev_id;
1523 			eventdev->data->socket_id = socket_id;
1524 			eventdev->data->dev_started = 0;
1525 		}
1526 
1527 		eventdev->attached = RTE_EVENTDEV_ATTACHED;
1528 		eventdev_globals.nb_devs++;
1529 	}
1530 
1531 	return eventdev;
1532 }
1533 
1534 int
1535 rte_event_pmd_release(struct rte_eventdev *eventdev)
1536 {
1537 	int ret;
1538 	char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1539 	const struct rte_memzone *mz;
1540 
1541 	if (eventdev == NULL)
1542 		return -EINVAL;
1543 
1544 	event_dev_fp_ops_reset(rte_event_fp_ops + eventdev->data->dev_id);
1545 	eventdev->attached = RTE_EVENTDEV_DETACHED;
1546 	eventdev_globals.nb_devs--;
1547 
1548 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1549 		rte_free(eventdev->data->dev_private);
1550 
1551 		/* Generate memzone name */
1552 		ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1553 				eventdev->data->dev_id);
1554 		if (ret >= (int)sizeof(mz_name))
1555 			return -EINVAL;
1556 
1557 		mz = rte_memzone_lookup(mz_name);
1558 		if (mz == NULL)
1559 			return -ENOMEM;
1560 
1561 		ret = rte_memzone_free(mz);
1562 		if (ret)
1563 			return ret;
1564 	}
1565 
1566 	eventdev->data = NULL;
1567 	return 0;
1568 }
1569 
1570 void
1571 event_dev_probing_finish(struct rte_eventdev *eventdev)
1572 {
1573 	if (eventdev == NULL)
1574 		return;
1575 
1576 	event_dev_fp_ops_set(rte_event_fp_ops + eventdev->data->dev_id,
1577 			     eventdev);
1578 }
1579 
1580 static int
1581 handle_dev_list(const char *cmd __rte_unused,
1582 		const char *params __rte_unused,
1583 		struct rte_tel_data *d)
1584 {
1585 	uint8_t dev_id;
1586 	int ndev = rte_event_dev_count();
1587 
1588 	if (ndev < 1)
1589 		return -1;
1590 
1591 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1592 	for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1593 		if (rte_eventdevs[dev_id].attached ==
1594 				RTE_EVENTDEV_ATTACHED)
1595 			rte_tel_data_add_array_int(d, dev_id);
1596 	}
1597 
1598 	return 0;
1599 }
1600 
1601 static int
1602 handle_port_list(const char *cmd __rte_unused,
1603 		 const char *params,
1604 		 struct rte_tel_data *d)
1605 {
1606 	int i;
1607 	uint8_t dev_id;
1608 	struct rte_eventdev *dev;
1609 	char *end_param;
1610 
1611 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1612 		return -1;
1613 
1614 	dev_id = strtoul(params, &end_param, 10);
1615 	if (*end_param != '\0')
1616 		RTE_EDEV_LOG_DEBUG(
1617 			"Extra parameters passed to eventdev telemetry command, ignoring");
1618 
1619 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1620 	dev = &rte_eventdevs[dev_id];
1621 
1622 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1623 	for (i = 0; i < dev->data->nb_ports; i++)
1624 		rte_tel_data_add_array_int(d, i);
1625 
1626 	return 0;
1627 }
1628 
1629 static int
1630 handle_queue_list(const char *cmd __rte_unused,
1631 		  const char *params,
1632 		  struct rte_tel_data *d)
1633 {
1634 	int i;
1635 	uint8_t dev_id;
1636 	struct rte_eventdev *dev;
1637 	char *end_param;
1638 
1639 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1640 		return -1;
1641 
1642 	dev_id = strtoul(params, &end_param, 10);
1643 	if (*end_param != '\0')
1644 		RTE_EDEV_LOG_DEBUG(
1645 			"Extra parameters passed to eventdev telemetry command, ignoring");
1646 
1647 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1648 	dev = &rte_eventdevs[dev_id];
1649 
1650 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1651 	for (i = 0; i < dev->data->nb_queues; i++)
1652 		rte_tel_data_add_array_int(d, i);
1653 
1654 	return 0;
1655 }
1656 
1657 static int
1658 handle_queue_links(const char *cmd __rte_unused,
1659 		   const char *params,
1660 		   struct rte_tel_data *d)
1661 {
1662 	int i, ret, port_id = 0;
1663 	char *end_param;
1664 	uint8_t dev_id;
1665 	uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1666 	uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
1667 	const char *p_param;
1668 
1669 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1670 		return -1;
1671 
1672 	/* Get dev ID from parameter string */
1673 	dev_id = strtoul(params, &end_param, 10);
1674 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1675 
1676 	p_param = strtok(end_param, ",");
1677 	if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1678 		return -1;
1679 
1680 	port_id = strtoul(p_param, &end_param, 10);
1681 	p_param = strtok(NULL, "\0");
1682 	if (p_param != NULL)
1683 		RTE_EDEV_LOG_DEBUG(
1684 			"Extra parameters passed to eventdev telemetry command, ignoring");
1685 
1686 	ret = rte_event_port_links_get(dev_id, port_id, queues, priorities);
1687 	if (ret < 0)
1688 		return -1;
1689 
1690 	rte_tel_data_start_dict(d);
1691 	for (i = 0; i < ret; i++) {
1692 		char qid_name[32];
1693 
1694 		snprintf(qid_name, 31, "qid_%u", queues[i]);
1695 		rte_tel_data_add_dict_uint(d, qid_name, priorities[i]);
1696 	}
1697 
1698 	return 0;
1699 }
1700 
1701 static int
1702 eventdev_build_telemetry_data(int dev_id,
1703 			      enum rte_event_dev_xstats_mode mode,
1704 			      int port_queue_id,
1705 			      struct rte_tel_data *d)
1706 {
1707 	struct rte_event_dev_xstats_name *xstat_names;
1708 	uint64_t *ids;
1709 	uint64_t *values;
1710 	int i, ret, num_xstats;
1711 
1712 	num_xstats = rte_event_dev_xstats_names_get(dev_id,
1713 						    mode,
1714 						    port_queue_id,
1715 						    NULL,
1716 						    NULL,
1717 						    0);
1718 
1719 	if (num_xstats < 0)
1720 		return -1;
1721 
1722 	/* use one malloc for names */
1723 	xstat_names = malloc((sizeof(struct rte_event_dev_xstats_name))
1724 			     * num_xstats);
1725 	if (xstat_names == NULL)
1726 		return -1;
1727 
1728 	ids = malloc((sizeof(unsigned int)) * num_xstats);
1729 	if (ids == NULL) {
1730 		free(xstat_names);
1731 		return -1;
1732 	}
1733 
1734 	values = malloc((sizeof(uint64_t)) * num_xstats);
1735 	if (values == NULL) {
1736 		free(xstat_names);
1737 		free(ids);
1738 		return -1;
1739 	}
1740 
1741 	ret = rte_event_dev_xstats_names_get(dev_id, mode, port_queue_id,
1742 					     xstat_names, ids, num_xstats);
1743 	if (ret < 0 || ret > num_xstats) {
1744 		free(xstat_names);
1745 		free(ids);
1746 		free(values);
1747 		return -1;
1748 	}
1749 
1750 	ret = rte_event_dev_xstats_get(dev_id, mode, port_queue_id,
1751 				       ids, values, num_xstats);
1752 	if (ret < 0 || ret > num_xstats) {
1753 		free(xstat_names);
1754 		free(ids);
1755 		free(values);
1756 		return -1;
1757 	}
1758 
1759 	rte_tel_data_start_dict(d);
1760 	for (i = 0; i < num_xstats; i++)
1761 		rte_tel_data_add_dict_uint(d, xstat_names[i].name, values[i]);
1762 
1763 	free(xstat_names);
1764 	free(ids);
1765 	free(values);
1766 	return 0;
1767 }
1768 
1769 static int
1770 handle_dev_xstats(const char *cmd __rte_unused,
1771 		  const char *params,
1772 		  struct rte_tel_data *d)
1773 {
1774 	int dev_id;
1775 	enum rte_event_dev_xstats_mode mode;
1776 	char *end_param;
1777 
1778 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1779 		return -1;
1780 
1781 	/* Get dev ID from parameter string */
1782 	dev_id = strtoul(params, &end_param, 10);
1783 	if (*end_param != '\0')
1784 		RTE_EDEV_LOG_DEBUG(
1785 			"Extra parameters passed to eventdev telemetry command, ignoring");
1786 
1787 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1788 
1789 	mode = RTE_EVENT_DEV_XSTATS_DEVICE;
1790 	return eventdev_build_telemetry_data(dev_id, mode, 0, d);
1791 }
1792 
1793 static int
1794 handle_port_xstats(const char *cmd __rte_unused,
1795 		   const char *params,
1796 		   struct rte_tel_data *d)
1797 {
1798 	int dev_id;
1799 	int port_queue_id = 0;
1800 	enum rte_event_dev_xstats_mode mode;
1801 	char *end_param;
1802 	const char *p_param;
1803 
1804 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1805 		return -1;
1806 
1807 	/* Get dev ID from parameter string */
1808 	dev_id = strtoul(params, &end_param, 10);
1809 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1810 
1811 	p_param = strtok(end_param, ",");
1812 	mode = RTE_EVENT_DEV_XSTATS_PORT;
1813 
1814 	if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1815 		return -1;
1816 
1817 	port_queue_id = strtoul(p_param, &end_param, 10);
1818 
1819 	p_param = strtok(NULL, "\0");
1820 	if (p_param != NULL)
1821 		RTE_EDEV_LOG_DEBUG(
1822 			"Extra parameters passed to eventdev telemetry command, ignoring");
1823 
1824 	return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1825 }
1826 
1827 static int
1828 handle_queue_xstats(const char *cmd __rte_unused,
1829 		    const char *params,
1830 		    struct rte_tel_data *d)
1831 {
1832 	int dev_id;
1833 	int port_queue_id = 0;
1834 	enum rte_event_dev_xstats_mode mode;
1835 	char *end_param;
1836 	const char *p_param;
1837 
1838 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1839 		return -1;
1840 
1841 	/* Get dev ID from parameter string */
1842 	dev_id = strtoul(params, &end_param, 10);
1843 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1844 
1845 	p_param = strtok(end_param, ",");
1846 	mode = RTE_EVENT_DEV_XSTATS_QUEUE;
1847 
1848 	if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1849 		return -1;
1850 
1851 	port_queue_id = strtoul(p_param, &end_param, 10);
1852 
1853 	p_param = strtok(NULL, "\0");
1854 	if (p_param != NULL)
1855 		RTE_EDEV_LOG_DEBUG(
1856 			"Extra parameters passed to eventdev telemetry command, ignoring");
1857 
1858 	return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1859 }
1860 
1861 static int
1862 handle_dev_dump(const char *cmd __rte_unused,
1863 		const char *params,
1864 		struct rte_tel_data *d)
1865 {
1866 	char *buf, *end_param;
1867 	int dev_id, ret;
1868 	FILE *f;
1869 
1870 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1871 		return -1;
1872 
1873 	/* Get dev ID from parameter string */
1874 	dev_id = strtoul(params, &end_param, 10);
1875 	if (*end_param != '\0')
1876 		RTE_EDEV_LOG_DEBUG(
1877 			"Extra parameters passed to eventdev telemetry command, ignoring");
1878 
1879 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1880 
1881 	buf = calloc(sizeof(char), RTE_TEL_MAX_SINGLE_STRING_LEN);
1882 	if (buf == NULL)
1883 		return -ENOMEM;
1884 
1885 	f = fmemopen(buf, RTE_TEL_MAX_SINGLE_STRING_LEN - 1, "w+");
1886 	if (f == NULL) {
1887 		free(buf);
1888 		return -EINVAL;
1889 	}
1890 
1891 	ret = rte_event_dev_dump(dev_id, f);
1892 	fclose(f);
1893 	if (ret == 0) {
1894 		rte_tel_data_start_dict(d);
1895 		rte_tel_data_string(d, buf);
1896 	}
1897 
1898 	free(buf);
1899 	return ret;
1900 }
1901 
1902 RTE_INIT(eventdev_init_telemetry)
1903 {
1904 	rte_telemetry_register_cmd("/eventdev/dev_list", handle_dev_list,
1905 			"Returns list of available eventdevs. Takes no parameters");
1906 	rte_telemetry_register_cmd("/eventdev/port_list", handle_port_list,
1907 			"Returns list of available ports. Parameter: DevID");
1908 	rte_telemetry_register_cmd("/eventdev/queue_list", handle_queue_list,
1909 			"Returns list of available queues. Parameter: DevID");
1910 
1911 	rte_telemetry_register_cmd("/eventdev/dev_xstats", handle_dev_xstats,
1912 			"Returns stats for an eventdev. Parameter: DevID");
1913 	rte_telemetry_register_cmd("/eventdev/port_xstats", handle_port_xstats,
1914 			"Returns stats for an eventdev port. Params: DevID,PortID");
1915 	rte_telemetry_register_cmd("/eventdev/queue_xstats",
1916 			handle_queue_xstats,
1917 			"Returns stats for an eventdev queue. Params: DevID,QueueID");
1918 	rte_telemetry_register_cmd("/eventdev/dev_dump", handle_dev_dump,
1919 			"Returns dump information for an eventdev. Parameter: DevID");
1920 	rte_telemetry_register_cmd("/eventdev/queue_links", handle_queue_links,
1921 			"Returns links for an eventdev port. Params: DevID,QueueID");
1922 }
1923