xref: /dpdk/lib/eventdev/rte_eventdev.c (revision 09442498ef736d0a96632cf8b8c15d8ca78a6468)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc
3  */
4 
5 #include <ctype.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <errno.h>
10 #include <stdint.h>
11 #include <inttypes.h>
12 
13 #include <rte_string_fns.h>
14 #include <rte_log.h>
15 #include <dev_driver.h>
16 #include <rte_memzone.h>
17 #include <rte_eal.h>
18 #include <rte_common.h>
19 #include <rte_malloc.h>
20 #include <rte_errno.h>
21 #include <ethdev_driver.h>
22 #include <rte_cryptodev.h>
23 #include <cryptodev_pmd.h>
24 #include <rte_telemetry.h>
25 
26 #include "rte_eventdev.h"
27 #include "eventdev_pmd.h"
28 #include "eventdev_trace.h"
29 
30 static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
31 
32 struct rte_eventdev *rte_eventdevs = rte_event_devices;
33 
34 static struct rte_eventdev_global eventdev_globals = {
35 	.nb_devs		= 0
36 };
37 
38 /* Public fastpath APIs. */
39 struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
40 
41 /* Event dev north bound API implementation */
42 
43 uint8_t
44 rte_event_dev_count(void)
45 {
46 	return eventdev_globals.nb_devs;
47 }
48 
49 int
50 rte_event_dev_get_dev_id(const char *name)
51 {
52 	int i;
53 	uint8_t cmp;
54 
55 	if (!name)
56 		return -EINVAL;
57 
58 	for (i = 0; i < eventdev_globals.nb_devs; i++) {
59 		cmp = (strncmp(rte_event_devices[i].data->name, name,
60 				RTE_EVENTDEV_NAME_MAX_LEN) == 0) ||
61 			(rte_event_devices[i].dev ? (strncmp(
62 				rte_event_devices[i].dev->driver->name, name,
63 					 RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0);
64 		if (cmp && (rte_event_devices[i].attached ==
65 					RTE_EVENTDEV_ATTACHED))
66 			return i;
67 	}
68 	return -ENODEV;
69 }
70 
71 int
72 rte_event_dev_socket_id(uint8_t dev_id)
73 {
74 	struct rte_eventdev *dev;
75 
76 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
77 	dev = &rte_eventdevs[dev_id];
78 
79 	return dev->data->socket_id;
80 }
81 
82 int
83 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
84 {
85 	struct rte_eventdev *dev;
86 
87 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
88 	dev = &rte_eventdevs[dev_id];
89 
90 	if (dev_info == NULL)
91 		return -EINVAL;
92 
93 	memset(dev_info, 0, sizeof(struct rte_event_dev_info));
94 
95 	if (*dev->dev_ops->dev_infos_get == NULL)
96 		return -ENOTSUP;
97 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
98 
99 	dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
100 
101 	dev_info->dev = dev->dev;
102 	return 0;
103 }
104 
105 int
106 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
107 				uint32_t *caps)
108 {
109 	struct rte_eventdev *dev;
110 
111 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
112 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
113 
114 	dev = &rte_eventdevs[dev_id];
115 
116 	if (caps == NULL)
117 		return -EINVAL;
118 
119 	if (dev->dev_ops->eth_rx_adapter_caps_get == NULL)
120 		*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
121 	else
122 		*caps = 0;
123 
124 	return dev->dev_ops->eth_rx_adapter_caps_get ?
125 				(*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
126 						&rte_eth_devices[eth_port_id],
127 						caps)
128 				: 0;
129 }
130 
131 int
132 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
133 {
134 	struct rte_eventdev *dev;
135 	const struct event_timer_adapter_ops *ops;
136 
137 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
138 
139 	dev = &rte_eventdevs[dev_id];
140 
141 	if (caps == NULL)
142 		return -EINVAL;
143 
144 	if (dev->dev_ops->timer_adapter_caps_get == NULL)
145 		*caps = RTE_EVENT_TIMER_ADAPTER_SW_CAP;
146 	else
147 		*caps = 0;
148 
149 	return dev->dev_ops->timer_adapter_caps_get ?
150 				(*dev->dev_ops->timer_adapter_caps_get)(dev,
151 									0,
152 									caps,
153 									&ops)
154 				: 0;
155 }
156 
157 int
158 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
159 				  uint32_t *caps)
160 {
161 	struct rte_eventdev *dev;
162 	struct rte_cryptodev *cdev;
163 
164 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
165 	if (!rte_cryptodev_is_valid_dev(cdev_id))
166 		return -EINVAL;
167 
168 	dev = &rte_eventdevs[dev_id];
169 	cdev = rte_cryptodev_pmd_get_dev(cdev_id);
170 
171 	if (caps == NULL)
172 		return -EINVAL;
173 
174 	if (dev->dev_ops->crypto_adapter_caps_get == NULL)
175 		*caps = RTE_EVENT_CRYPTO_ADAPTER_SW_CAP;
176 	else
177 		*caps = 0;
178 
179 	return dev->dev_ops->crypto_adapter_caps_get ?
180 		(*dev->dev_ops->crypto_adapter_caps_get)
181 		(dev, cdev, caps) : 0;
182 }
183 
184 int
185 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
186 				uint32_t *caps)
187 {
188 	struct rte_eventdev *dev;
189 	struct rte_eth_dev *eth_dev;
190 
191 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
192 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
193 
194 	dev = &rte_eventdevs[dev_id];
195 	eth_dev = &rte_eth_devices[eth_port_id];
196 
197 	if (caps == NULL)
198 		return -EINVAL;
199 
200 	if (dev->dev_ops->eth_tx_adapter_caps_get == NULL)
201 		*caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR;
202 	else
203 		*caps = 0;
204 
205 	return dev->dev_ops->eth_tx_adapter_caps_get ?
206 			(*dev->dev_ops->eth_tx_adapter_caps_get)(dev,
207 								eth_dev,
208 								caps)
209 			: 0;
210 }
211 
212 static inline int
213 event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
214 {
215 	uint8_t old_nb_queues = dev->data->nb_queues;
216 	struct rte_event_queue_conf *queues_cfg;
217 	unsigned int i;
218 
219 	RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
220 			 dev->data->dev_id);
221 
222 	if (nb_queues != 0) {
223 		queues_cfg = dev->data->queues_cfg;
224 		if (*dev->dev_ops->queue_release == NULL)
225 			return -ENOTSUP;
226 
227 		for (i = nb_queues; i < old_nb_queues; i++)
228 			(*dev->dev_ops->queue_release)(dev, i);
229 
230 
231 		if (nb_queues > old_nb_queues) {
232 			uint8_t new_qs = nb_queues - old_nb_queues;
233 
234 			memset(queues_cfg + old_nb_queues, 0,
235 				sizeof(queues_cfg[0]) * new_qs);
236 		}
237 	} else {
238 		if (*dev->dev_ops->queue_release == NULL)
239 			return -ENOTSUP;
240 
241 		for (i = nb_queues; i < old_nb_queues; i++)
242 			(*dev->dev_ops->queue_release)(dev, i);
243 	}
244 
245 	dev->data->nb_queues = nb_queues;
246 	return 0;
247 }
248 
249 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
250 
251 static inline int
252 event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
253 {
254 	uint8_t old_nb_ports = dev->data->nb_ports;
255 	void **ports;
256 	uint16_t *links_map;
257 	struct rte_event_port_conf *ports_cfg;
258 	unsigned int i;
259 
260 	RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
261 			 dev->data->dev_id);
262 
263 	if (nb_ports != 0) { /* re-config */
264 		if (*dev->dev_ops->port_release == NULL)
265 			return -ENOTSUP;
266 
267 		ports = dev->data->ports;
268 		ports_cfg = dev->data->ports_cfg;
269 		links_map = dev->data->links_map;
270 
271 		for (i = nb_ports; i < old_nb_ports; i++)
272 			(*dev->dev_ops->port_release)(ports[i]);
273 
274 		if (nb_ports > old_nb_ports) {
275 			uint8_t new_ps = nb_ports - old_nb_ports;
276 			unsigned int old_links_map_end =
277 				old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
278 			unsigned int links_map_end =
279 				nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
280 
281 			memset(ports + old_nb_ports, 0,
282 				sizeof(ports[0]) * new_ps);
283 			memset(ports_cfg + old_nb_ports, 0,
284 				sizeof(ports_cfg[0]) * new_ps);
285 			for (i = old_links_map_end; i < links_map_end; i++)
286 				links_map[i] =
287 					EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
288 		}
289 	} else {
290 		if (*dev->dev_ops->port_release == NULL)
291 			return -ENOTSUP;
292 
293 		ports = dev->data->ports;
294 		for (i = nb_ports; i < old_nb_ports; i++) {
295 			(*dev->dev_ops->port_release)(ports[i]);
296 			ports[i] = NULL;
297 		}
298 	}
299 
300 	dev->data->nb_ports = nb_ports;
301 	return 0;
302 }
303 
304 int
305 rte_event_dev_configure(uint8_t dev_id,
306 			const struct rte_event_dev_config *dev_conf)
307 {
308 	struct rte_event_dev_info info;
309 	struct rte_eventdev *dev;
310 	int diag;
311 
312 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
313 	dev = &rte_eventdevs[dev_id];
314 
315 	if (*dev->dev_ops->dev_infos_get == NULL)
316 		return -ENOTSUP;
317 	if (*dev->dev_ops->dev_configure == NULL)
318 		return -ENOTSUP;
319 
320 	if (dev->data->dev_started) {
321 		RTE_EDEV_LOG_ERR(
322 		    "device %d must be stopped to allow configuration", dev_id);
323 		return -EBUSY;
324 	}
325 
326 	if (dev_conf == NULL)
327 		return -EINVAL;
328 
329 	(*dev->dev_ops->dev_infos_get)(dev, &info);
330 
331 	/* Check dequeue_timeout_ns value is in limit */
332 	if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
333 		if (dev_conf->dequeue_timeout_ns &&
334 		    (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
335 			|| dev_conf->dequeue_timeout_ns >
336 				 info.max_dequeue_timeout_ns)) {
337 			RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
338 			" min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
339 			dev_id, dev_conf->dequeue_timeout_ns,
340 			info.min_dequeue_timeout_ns,
341 			info.max_dequeue_timeout_ns);
342 			return -EINVAL;
343 		}
344 	}
345 
346 	/* Check nb_events_limit is in limit */
347 	if (dev_conf->nb_events_limit > info.max_num_events) {
348 		RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
349 		dev_id, dev_conf->nb_events_limit, info.max_num_events);
350 		return -EINVAL;
351 	}
352 
353 	/* Check nb_event_queues is in limit */
354 	if (!dev_conf->nb_event_queues) {
355 		RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
356 					dev_id);
357 		return -EINVAL;
358 	}
359 	if (dev_conf->nb_event_queues > info.max_event_queues +
360 			info.max_single_link_event_port_queue_pairs) {
361 		RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d + max_single_link_event_port_queue_pairs=%d",
362 				 dev_id, dev_conf->nb_event_queues,
363 				 info.max_event_queues,
364 				 info.max_single_link_event_port_queue_pairs);
365 		return -EINVAL;
366 	}
367 	if (dev_conf->nb_event_queues -
368 			dev_conf->nb_single_link_event_port_queues >
369 			info.max_event_queues) {
370 		RTE_EDEV_LOG_ERR("id%d nb_event_queues=%d - nb_single_link_event_port_queues=%d > max_event_queues=%d",
371 				 dev_id, dev_conf->nb_event_queues,
372 				 dev_conf->nb_single_link_event_port_queues,
373 				 info.max_event_queues);
374 		return -EINVAL;
375 	}
376 	if (dev_conf->nb_single_link_event_port_queues >
377 			dev_conf->nb_event_queues) {
378 		RTE_EDEV_LOG_ERR("dev%d nb_single_link_event_port_queues=%d > nb_event_queues=%d",
379 				 dev_id,
380 				 dev_conf->nb_single_link_event_port_queues,
381 				 dev_conf->nb_event_queues);
382 		return -EINVAL;
383 	}
384 
385 	/* Check nb_event_ports is in limit */
386 	if (!dev_conf->nb_event_ports) {
387 		RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
388 		return -EINVAL;
389 	}
390 	if (dev_conf->nb_event_ports > info.max_event_ports +
391 			info.max_single_link_event_port_queue_pairs) {
392 		RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports=%d + max_single_link_event_port_queue_pairs=%d",
393 				 dev_id, dev_conf->nb_event_ports,
394 				 info.max_event_ports,
395 				 info.max_single_link_event_port_queue_pairs);
396 		return -EINVAL;
397 	}
398 	if (dev_conf->nb_event_ports -
399 			dev_conf->nb_single_link_event_port_queues
400 			> info.max_event_ports) {
401 		RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d - nb_single_link_event_port_queues=%d > max_event_ports=%d",
402 				 dev_id, dev_conf->nb_event_ports,
403 				 dev_conf->nb_single_link_event_port_queues,
404 				 info.max_event_ports);
405 		return -EINVAL;
406 	}
407 
408 	if (dev_conf->nb_single_link_event_port_queues >
409 	    dev_conf->nb_event_ports) {
410 		RTE_EDEV_LOG_ERR(
411 				 "dev%d nb_single_link_event_port_queues=%d > nb_event_ports=%d",
412 				 dev_id,
413 				 dev_conf->nb_single_link_event_port_queues,
414 				 dev_conf->nb_event_ports);
415 		return -EINVAL;
416 	}
417 
418 	/* Check nb_event_queue_flows is in limit */
419 	if (!dev_conf->nb_event_queue_flows) {
420 		RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
421 		return -EINVAL;
422 	}
423 	if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
424 		RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
425 		dev_id, dev_conf->nb_event_queue_flows,
426 		info.max_event_queue_flows);
427 		return -EINVAL;
428 	}
429 
430 	/* Check nb_event_port_dequeue_depth is in limit */
431 	if (!dev_conf->nb_event_port_dequeue_depth) {
432 		RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
433 					dev_id);
434 		return -EINVAL;
435 	}
436 	if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
437 		 (dev_conf->nb_event_port_dequeue_depth >
438 			 info.max_event_port_dequeue_depth)) {
439 		RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
440 		dev_id, dev_conf->nb_event_port_dequeue_depth,
441 		info.max_event_port_dequeue_depth);
442 		return -EINVAL;
443 	}
444 
445 	/* Check nb_event_port_enqueue_depth is in limit */
446 	if (!dev_conf->nb_event_port_enqueue_depth) {
447 		RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
448 					dev_id);
449 		return -EINVAL;
450 	}
451 	if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
452 		(dev_conf->nb_event_port_enqueue_depth >
453 			 info.max_event_port_enqueue_depth)) {
454 		RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
455 		dev_id, dev_conf->nb_event_port_enqueue_depth,
456 		info.max_event_port_enqueue_depth);
457 		return -EINVAL;
458 	}
459 
460 	/* Copy the dev_conf parameter into the dev structure */
461 	memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
462 
463 	/* Setup new number of queues and reconfigure device. */
464 	diag = event_dev_queue_config(dev, dev_conf->nb_event_queues);
465 	if (diag != 0) {
466 		RTE_EDEV_LOG_ERR("dev%d event_dev_queue_config = %d", dev_id,
467 				 diag);
468 		return diag;
469 	}
470 
471 	/* Setup new number of ports and reconfigure device. */
472 	diag = event_dev_port_config(dev, dev_conf->nb_event_ports);
473 	if (diag != 0) {
474 		event_dev_queue_config(dev, 0);
475 		RTE_EDEV_LOG_ERR("dev%d event_dev_port_config = %d", dev_id,
476 				 diag);
477 		return diag;
478 	}
479 
480 	event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
481 
482 	/* Configure the device */
483 	diag = (*dev->dev_ops->dev_configure)(dev);
484 	if (diag != 0) {
485 		RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
486 		event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
487 		event_dev_queue_config(dev, 0);
488 		event_dev_port_config(dev, 0);
489 	}
490 
491 	dev->data->event_dev_cap = info.event_dev_cap;
492 	rte_eventdev_trace_configure(dev_id, dev_conf, diag);
493 	return diag;
494 }
495 
496 static inline int
497 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
498 {
499 	if (queue_id < dev->data->nb_queues && queue_id <
500 				RTE_EVENT_MAX_QUEUES_PER_DEV)
501 		return 1;
502 	else
503 		return 0;
504 }
505 
506 int
507 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
508 				 struct rte_event_queue_conf *queue_conf)
509 {
510 	struct rte_eventdev *dev;
511 
512 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
513 	dev = &rte_eventdevs[dev_id];
514 
515 	if (queue_conf == NULL)
516 		return -EINVAL;
517 
518 	if (!is_valid_queue(dev, queue_id)) {
519 		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
520 		return -EINVAL;
521 	}
522 
523 	if (*dev->dev_ops->queue_def_conf == NULL)
524 		return -ENOTSUP;
525 	memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
526 	(*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
527 	return 0;
528 }
529 
530 static inline int
531 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
532 {
533 	if (queue_conf &&
534 		!(queue_conf->event_queue_cfg &
535 		  RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
536 		((queue_conf->event_queue_cfg &
537 			 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
538 		(queue_conf->schedule_type
539 			== RTE_SCHED_TYPE_ATOMIC)
540 		))
541 		return 1;
542 	else
543 		return 0;
544 }
545 
546 static inline int
547 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
548 {
549 	if (queue_conf &&
550 		!(queue_conf->event_queue_cfg &
551 		  RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
552 		((queue_conf->event_queue_cfg &
553 			 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
554 		(queue_conf->schedule_type
555 			== RTE_SCHED_TYPE_ORDERED)
556 		))
557 		return 1;
558 	else
559 		return 0;
560 }
561 
562 
563 int
564 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
565 		      const struct rte_event_queue_conf *queue_conf)
566 {
567 	struct rte_eventdev *dev;
568 	struct rte_event_queue_conf def_conf;
569 
570 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
571 	dev = &rte_eventdevs[dev_id];
572 
573 	if (!is_valid_queue(dev, queue_id)) {
574 		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
575 		return -EINVAL;
576 	}
577 
578 	/* Check nb_atomic_flows limit */
579 	if (is_valid_atomic_queue_conf(queue_conf)) {
580 		if (queue_conf->nb_atomic_flows == 0 ||
581 		    queue_conf->nb_atomic_flows >
582 			dev->data->dev_conf.nb_event_queue_flows) {
583 			RTE_EDEV_LOG_ERR(
584 		"dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
585 			dev_id, queue_id, queue_conf->nb_atomic_flows,
586 			dev->data->dev_conf.nb_event_queue_flows);
587 			return -EINVAL;
588 		}
589 	}
590 
591 	/* Check nb_atomic_order_sequences limit */
592 	if (is_valid_ordered_queue_conf(queue_conf)) {
593 		if (queue_conf->nb_atomic_order_sequences == 0 ||
594 		    queue_conf->nb_atomic_order_sequences >
595 			dev->data->dev_conf.nb_event_queue_flows) {
596 			RTE_EDEV_LOG_ERR(
597 		"dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
598 			dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
599 			dev->data->dev_conf.nb_event_queue_flows);
600 			return -EINVAL;
601 		}
602 	}
603 
604 	if (dev->data->dev_started) {
605 		RTE_EDEV_LOG_ERR(
606 		    "device %d must be stopped to allow queue setup", dev_id);
607 		return -EBUSY;
608 	}
609 
610 	if (*dev->dev_ops->queue_setup == NULL)
611 		return -ENOTSUP;
612 
613 	if (queue_conf == NULL) {
614 		if (*dev->dev_ops->queue_def_conf == NULL)
615 			return -ENOTSUP;
616 		(*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
617 		queue_conf = &def_conf;
618 	}
619 
620 	dev->data->queues_cfg[queue_id] = *queue_conf;
621 	rte_eventdev_trace_queue_setup(dev_id, queue_id, queue_conf);
622 	return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
623 }
624 
625 static inline int
626 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
627 {
628 	if (port_id < dev->data->nb_ports)
629 		return 1;
630 	else
631 		return 0;
632 }
633 
634 int
635 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
636 				 struct rte_event_port_conf *port_conf)
637 {
638 	struct rte_eventdev *dev;
639 
640 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
641 	dev = &rte_eventdevs[dev_id];
642 
643 	if (port_conf == NULL)
644 		return -EINVAL;
645 
646 	if (!is_valid_port(dev, port_id)) {
647 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
648 		return -EINVAL;
649 	}
650 
651 	if (*dev->dev_ops->port_def_conf == NULL)
652 		return -ENOTSUP;
653 	memset(port_conf, 0, sizeof(struct rte_event_port_conf));
654 	(*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
655 	return 0;
656 }
657 
658 int
659 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
660 		     const struct rte_event_port_conf *port_conf)
661 {
662 	struct rte_eventdev *dev;
663 	struct rte_event_port_conf def_conf;
664 	int diag;
665 
666 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
667 	dev = &rte_eventdevs[dev_id];
668 
669 	if (!is_valid_port(dev, port_id)) {
670 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
671 		return -EINVAL;
672 	}
673 
674 	/* Check new_event_threshold limit */
675 	if ((port_conf && !port_conf->new_event_threshold) ||
676 			(port_conf && port_conf->new_event_threshold >
677 				 dev->data->dev_conf.nb_events_limit)) {
678 		RTE_EDEV_LOG_ERR(
679 		   "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
680 			dev_id, port_id, port_conf->new_event_threshold,
681 			dev->data->dev_conf.nb_events_limit);
682 		return -EINVAL;
683 	}
684 
685 	/* Check dequeue_depth limit */
686 	if ((port_conf && !port_conf->dequeue_depth) ||
687 			(port_conf && port_conf->dequeue_depth >
688 		dev->data->dev_conf.nb_event_port_dequeue_depth)) {
689 		RTE_EDEV_LOG_ERR(
690 		   "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
691 			dev_id, port_id, port_conf->dequeue_depth,
692 			dev->data->dev_conf.nb_event_port_dequeue_depth);
693 		return -EINVAL;
694 	}
695 
696 	/* Check enqueue_depth limit */
697 	if ((port_conf && !port_conf->enqueue_depth) ||
698 			(port_conf && port_conf->enqueue_depth >
699 		dev->data->dev_conf.nb_event_port_enqueue_depth)) {
700 		RTE_EDEV_LOG_ERR(
701 		   "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
702 			dev_id, port_id, port_conf->enqueue_depth,
703 			dev->data->dev_conf.nb_event_port_enqueue_depth);
704 		return -EINVAL;
705 	}
706 
707 	if (port_conf &&
708 	    (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL) &&
709 	    !(dev->data->event_dev_cap &
710 	      RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
711 		RTE_EDEV_LOG_ERR(
712 		   "dev%d port%d Implicit release disable not supported",
713 			dev_id, port_id);
714 		return -EINVAL;
715 	}
716 
717 	if (dev->data->dev_started) {
718 		RTE_EDEV_LOG_ERR(
719 		    "device %d must be stopped to allow port setup", dev_id);
720 		return -EBUSY;
721 	}
722 
723 	if (*dev->dev_ops->port_setup == NULL)
724 		return -ENOTSUP;
725 
726 	if (port_conf == NULL) {
727 		if (*dev->dev_ops->port_def_conf == NULL)
728 			return -ENOTSUP;
729 		(*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
730 		port_conf = &def_conf;
731 	}
732 
733 	dev->data->ports_cfg[port_id] = *port_conf;
734 
735 	diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
736 
737 	/* Unlink all the queues from this port(default state after setup) */
738 	if (!diag)
739 		diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
740 
741 	rte_eventdev_trace_port_setup(dev_id, port_id, port_conf, diag);
742 	if (diag < 0)
743 		return diag;
744 
745 	return 0;
746 }
747 
748 void
749 rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
750 		       rte_eventdev_port_flush_t release_cb, void *args)
751 {
752 	struct rte_eventdev *dev;
753 
754 	RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
755 	dev = &rte_eventdevs[dev_id];
756 
757 	if (!is_valid_port(dev, port_id)) {
758 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
759 		return;
760 	}
761 
762 	if (dev->dev_ops->port_quiesce)
763 		(*dev->dev_ops->port_quiesce)(dev, dev->data->ports[port_id],
764 					      release_cb, args);
765 }
766 
767 int
768 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
769 		       uint32_t *attr_value)
770 {
771 	struct rte_eventdev *dev;
772 
773 	if (!attr_value)
774 		return -EINVAL;
775 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
776 	dev = &rte_eventdevs[dev_id];
777 
778 	switch (attr_id) {
779 	case RTE_EVENT_DEV_ATTR_PORT_COUNT:
780 		*attr_value = dev->data->nb_ports;
781 		break;
782 	case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
783 		*attr_value = dev->data->nb_queues;
784 		break;
785 	case RTE_EVENT_DEV_ATTR_STARTED:
786 		*attr_value = dev->data->dev_started;
787 		break;
788 	default:
789 		return -EINVAL;
790 	}
791 
792 	return 0;
793 }
794 
795 int
796 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
797 			uint32_t *attr_value)
798 {
799 	struct rte_eventdev *dev;
800 
801 	if (!attr_value)
802 		return -EINVAL;
803 
804 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
805 	dev = &rte_eventdevs[dev_id];
806 	if (!is_valid_port(dev, port_id)) {
807 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
808 		return -EINVAL;
809 	}
810 
811 	switch (attr_id) {
812 	case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
813 		*attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
814 		break;
815 	case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
816 		*attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
817 		break;
818 	case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
819 		*attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
820 		break;
821 	case RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE:
822 	{
823 		uint32_t config;
824 
825 		config = dev->data->ports_cfg[port_id].event_port_cfg;
826 		*attr_value = !!(config & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
827 		break;
828 	}
829 	default:
830 		return -EINVAL;
831 	};
832 	return 0;
833 }
834 
835 int
836 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
837 			uint32_t *attr_value)
838 {
839 	struct rte_event_queue_conf *conf;
840 	struct rte_eventdev *dev;
841 
842 	if (!attr_value)
843 		return -EINVAL;
844 
845 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
846 	dev = &rte_eventdevs[dev_id];
847 	if (!is_valid_queue(dev, queue_id)) {
848 		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
849 		return -EINVAL;
850 	}
851 
852 	conf = &dev->data->queues_cfg[queue_id];
853 
854 	switch (attr_id) {
855 	case RTE_EVENT_QUEUE_ATTR_PRIORITY:
856 		*attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
857 		if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
858 			*attr_value = conf->priority;
859 		break;
860 	case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
861 		*attr_value = conf->nb_atomic_flows;
862 		break;
863 	case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
864 		*attr_value = conf->nb_atomic_order_sequences;
865 		break;
866 	case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
867 		*attr_value = conf->event_queue_cfg;
868 		break;
869 	case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
870 		if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
871 			return -EOVERFLOW;
872 
873 		*attr_value = conf->schedule_type;
874 		break;
875 	case RTE_EVENT_QUEUE_ATTR_WEIGHT:
876 		*attr_value = RTE_EVENT_QUEUE_WEIGHT_LOWEST;
877 		if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
878 			*attr_value = conf->weight;
879 		break;
880 	case RTE_EVENT_QUEUE_ATTR_AFFINITY:
881 		*attr_value = RTE_EVENT_QUEUE_AFFINITY_LOWEST;
882 		if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
883 			*attr_value = conf->affinity;
884 		break;
885 	default:
886 		return -EINVAL;
887 	};
888 	return 0;
889 }
890 
891 int
892 rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
893 			 uint64_t attr_value)
894 {
895 	struct rte_eventdev *dev;
896 
897 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
898 	dev = &rte_eventdevs[dev_id];
899 	if (!is_valid_queue(dev, queue_id)) {
900 		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
901 		return -EINVAL;
902 	}
903 
904 	if (!(dev->data->event_dev_cap &
905 	      RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR)) {
906 		RTE_EDEV_LOG_ERR(
907 			"Device %" PRIu8 "does not support changing queue attributes at runtime",
908 			dev_id);
909 		return -ENOTSUP;
910 	}
911 
912 	if (*dev->dev_ops->queue_attr_set == NULL)
913 		return -ENOTSUP;
914 	return (*dev->dev_ops->queue_attr_set)(dev, queue_id, attr_id,
915 					       attr_value);
916 }
917 
918 int
919 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
920 		    const uint8_t queues[], const uint8_t priorities[],
921 		    uint16_t nb_links)
922 {
923 	struct rte_eventdev *dev;
924 	uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
925 	uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
926 	uint16_t *links_map;
927 	int i, diag;
928 
929 	RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
930 	dev = &rte_eventdevs[dev_id];
931 
932 	if (*dev->dev_ops->port_link == NULL) {
933 		RTE_EDEV_LOG_ERR("Function not supported\n");
934 		rte_errno = ENOTSUP;
935 		return 0;
936 	}
937 
938 	if (!is_valid_port(dev, port_id)) {
939 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
940 		rte_errno = EINVAL;
941 		return 0;
942 	}
943 
944 	if (queues == NULL) {
945 		for (i = 0; i < dev->data->nb_queues; i++)
946 			queues_list[i] = i;
947 
948 		queues = queues_list;
949 		nb_links = dev->data->nb_queues;
950 	}
951 
952 	if (priorities == NULL) {
953 		for (i = 0; i < nb_links; i++)
954 			priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
955 
956 		priorities = priorities_list;
957 	}
958 
959 	for (i = 0; i < nb_links; i++)
960 		if (queues[i] >= dev->data->nb_queues) {
961 			rte_errno = EINVAL;
962 			return 0;
963 		}
964 
965 	diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
966 						queues, priorities, nb_links);
967 	if (diag < 0)
968 		return diag;
969 
970 	links_map = dev->data->links_map;
971 	/* Point links_map to this port specific area */
972 	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
973 	for (i = 0; i < diag; i++)
974 		links_map[queues[i]] = (uint8_t)priorities[i];
975 
976 	rte_eventdev_trace_port_link(dev_id, port_id, nb_links, diag);
977 	return diag;
978 }
979 
980 int
981 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
982 		      uint8_t queues[], uint16_t nb_unlinks)
983 {
984 	struct rte_eventdev *dev;
985 	uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
986 	int i, diag, j;
987 	uint16_t *links_map;
988 
989 	RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
990 	dev = &rte_eventdevs[dev_id];
991 
992 	if (*dev->dev_ops->port_unlink == NULL) {
993 		RTE_EDEV_LOG_ERR("Function not supported");
994 		rte_errno = ENOTSUP;
995 		return 0;
996 	}
997 
998 	if (!is_valid_port(dev, port_id)) {
999 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1000 		rte_errno = EINVAL;
1001 		return 0;
1002 	}
1003 
1004 	links_map = dev->data->links_map;
1005 	/* Point links_map to this port specific area */
1006 	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1007 
1008 	if (queues == NULL) {
1009 		j = 0;
1010 		for (i = 0; i < dev->data->nb_queues; i++) {
1011 			if (links_map[i] !=
1012 					EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1013 				all_queues[j] = i;
1014 				j++;
1015 			}
1016 		}
1017 		queues = all_queues;
1018 	} else {
1019 		for (j = 0; j < nb_unlinks; j++) {
1020 			if (links_map[queues[j]] ==
1021 					EVENT_QUEUE_SERVICE_PRIORITY_INVALID)
1022 				break;
1023 		}
1024 	}
1025 
1026 	nb_unlinks = j;
1027 	for (i = 0; i < nb_unlinks; i++)
1028 		if (queues[i] >= dev->data->nb_queues) {
1029 			rte_errno = EINVAL;
1030 			return 0;
1031 		}
1032 
1033 	diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
1034 					queues, nb_unlinks);
1035 
1036 	if (diag < 0)
1037 		return diag;
1038 
1039 	for (i = 0; i < diag; i++)
1040 		links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
1041 
1042 	rte_eventdev_trace_port_unlink(dev_id, port_id, nb_unlinks, diag);
1043 	return diag;
1044 }
1045 
1046 int
1047 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
1048 {
1049 	struct rte_eventdev *dev;
1050 
1051 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1052 	dev = &rte_eventdevs[dev_id];
1053 	if (!is_valid_port(dev, port_id)) {
1054 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1055 		return -EINVAL;
1056 	}
1057 
1058 	/* Return 0 if the PMD does not implement unlinks in progress.
1059 	 * This allows PMDs which handle unlink synchronously to not implement
1060 	 * this function at all.
1061 	 */
1062 	if (*dev->dev_ops->port_unlinks_in_progress == NULL)
1063 		return 0;
1064 
1065 	return (*dev->dev_ops->port_unlinks_in_progress)(dev,
1066 			dev->data->ports[port_id]);
1067 }
1068 
1069 int
1070 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1071 			 uint8_t queues[], uint8_t priorities[])
1072 {
1073 	struct rte_eventdev *dev;
1074 	uint16_t *links_map;
1075 	int i, count = 0;
1076 
1077 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1078 	dev = &rte_eventdevs[dev_id];
1079 	if (!is_valid_port(dev, port_id)) {
1080 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1081 		return -EINVAL;
1082 	}
1083 
1084 	links_map = dev->data->links_map;
1085 	/* Point links_map to this port specific area */
1086 	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1087 	for (i = 0; i < dev->data->nb_queues; i++) {
1088 		if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1089 			queues[count] = i;
1090 			priorities[count] = (uint8_t)links_map[i];
1091 			++count;
1092 		}
1093 	}
1094 	return count;
1095 }
1096 
1097 int
1098 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1099 				 uint64_t *timeout_ticks)
1100 {
1101 	struct rte_eventdev *dev;
1102 
1103 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1104 	dev = &rte_eventdevs[dev_id];
1105 	if (*dev->dev_ops->timeout_ticks == NULL)
1106 		return -ENOTSUP;
1107 
1108 	if (timeout_ticks == NULL)
1109 		return -EINVAL;
1110 
1111 	return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
1112 }
1113 
1114 int
1115 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
1116 {
1117 	struct rte_eventdev *dev;
1118 
1119 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1120 	dev = &rte_eventdevs[dev_id];
1121 
1122 	if (service_id == NULL)
1123 		return -EINVAL;
1124 
1125 	if (dev->data->service_inited)
1126 		*service_id = dev->data->service_id;
1127 
1128 	return dev->data->service_inited ? 0 : -ESRCH;
1129 }
1130 
1131 int
1132 rte_event_dev_dump(uint8_t dev_id, FILE *f)
1133 {
1134 	struct rte_eventdev *dev;
1135 
1136 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1137 	dev = &rte_eventdevs[dev_id];
1138 	if (*dev->dev_ops->dump == NULL)
1139 		return -ENOTSUP;
1140 	if (f == NULL)
1141 		return -EINVAL;
1142 
1143 	(*dev->dev_ops->dump)(dev, f);
1144 	return 0;
1145 
1146 }
1147 
1148 static int
1149 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1150 		uint8_t queue_port_id)
1151 {
1152 	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1153 	if (dev->dev_ops->xstats_get_names != NULL)
1154 		return (*dev->dev_ops->xstats_get_names)(dev, mode,
1155 							queue_port_id,
1156 							NULL, NULL, 0);
1157 	return 0;
1158 }
1159 
1160 int
1161 rte_event_dev_xstats_names_get(uint8_t dev_id,
1162 		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
1163 		struct rte_event_dev_xstats_name *xstats_names,
1164 		uint64_t *ids, unsigned int size)
1165 {
1166 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1167 	const int cnt_expected_entries = xstats_get_count(dev_id, mode,
1168 							  queue_port_id);
1169 	if (xstats_names == NULL || cnt_expected_entries < 0 ||
1170 			(int)size < cnt_expected_entries)
1171 		return cnt_expected_entries;
1172 
1173 	/* dev_id checked above */
1174 	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1175 
1176 	if (dev->dev_ops->xstats_get_names != NULL)
1177 		return (*dev->dev_ops->xstats_get_names)(dev, mode,
1178 				queue_port_id, xstats_names, ids, size);
1179 
1180 	return -ENOTSUP;
1181 }
1182 
1183 /* retrieve eventdev extended statistics */
1184 int
1185 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1186 		uint8_t queue_port_id, const uint64_t ids[],
1187 		uint64_t values[], unsigned int n)
1188 {
1189 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1190 	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1191 
1192 	/* implemented by the driver */
1193 	if (dev->dev_ops->xstats_get != NULL)
1194 		return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1195 				ids, values, n);
1196 	return -ENOTSUP;
1197 }
1198 
1199 uint64_t
1200 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1201 		uint64_t *id)
1202 {
1203 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1204 	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1205 	uint64_t temp = -1;
1206 
1207 	if (id != NULL)
1208 		*id = (unsigned int)-1;
1209 	else
1210 		id = &temp; /* ensure driver never gets a NULL value */
1211 
1212 	/* implemented by driver */
1213 	if (dev->dev_ops->xstats_get_by_name != NULL)
1214 		return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1215 	return -ENOTSUP;
1216 }
1217 
1218 int rte_event_dev_xstats_reset(uint8_t dev_id,
1219 		enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1220 		const uint64_t ids[], uint32_t nb_ids)
1221 {
1222 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1223 	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1224 
1225 	if (dev->dev_ops->xstats_reset != NULL)
1226 		return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1227 							ids, nb_ids);
1228 	return -ENOTSUP;
1229 }
1230 
1231 int rte_event_pmd_selftest_seqn_dynfield_offset = -1;
1232 
1233 int rte_event_dev_selftest(uint8_t dev_id)
1234 {
1235 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1236 	static const struct rte_mbuf_dynfield test_seqn_dynfield_desc = {
1237 		.name = "rte_event_pmd_selftest_seqn_dynfield",
1238 		.size = sizeof(rte_event_pmd_selftest_seqn_t),
1239 		.align = __alignof__(rte_event_pmd_selftest_seqn_t),
1240 	};
1241 	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1242 
1243 	if (dev->dev_ops->dev_selftest != NULL) {
1244 		rte_event_pmd_selftest_seqn_dynfield_offset =
1245 			rte_mbuf_dynfield_register(&test_seqn_dynfield_desc);
1246 		if (rte_event_pmd_selftest_seqn_dynfield_offset < 0)
1247 			return -ENOMEM;
1248 		return (*dev->dev_ops->dev_selftest)();
1249 	}
1250 	return -ENOTSUP;
1251 }
1252 
1253 struct rte_mempool *
1254 rte_event_vector_pool_create(const char *name, unsigned int n,
1255 			     unsigned int cache_size, uint16_t nb_elem,
1256 			     int socket_id)
1257 {
1258 	const char *mp_ops_name;
1259 	struct rte_mempool *mp;
1260 	unsigned int elt_sz;
1261 	int ret;
1262 
1263 	if (!nb_elem) {
1264 		RTE_LOG(ERR, EVENTDEV,
1265 			"Invalid number of elements=%d requested\n", nb_elem);
1266 		rte_errno = EINVAL;
1267 		return NULL;
1268 	}
1269 
1270 	elt_sz =
1271 		sizeof(struct rte_event_vector) + (nb_elem * sizeof(uintptr_t));
1272 	mp = rte_mempool_create_empty(name, n, elt_sz, cache_size, 0, socket_id,
1273 				      0);
1274 	if (mp == NULL)
1275 		return NULL;
1276 
1277 	mp_ops_name = rte_mbuf_best_mempool_ops();
1278 	ret = rte_mempool_set_ops_byname(mp, mp_ops_name, NULL);
1279 	if (ret != 0) {
1280 		RTE_LOG(ERR, EVENTDEV, "error setting mempool handler\n");
1281 		goto err;
1282 	}
1283 
1284 	ret = rte_mempool_populate_default(mp);
1285 	if (ret < 0)
1286 		goto err;
1287 
1288 	return mp;
1289 err:
1290 	rte_mempool_free(mp);
1291 	rte_errno = -ret;
1292 	return NULL;
1293 }
1294 
1295 int
1296 rte_event_dev_start(uint8_t dev_id)
1297 {
1298 	struct rte_eventdev *dev;
1299 	int diag;
1300 
1301 	RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1302 
1303 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1304 	dev = &rte_eventdevs[dev_id];
1305 	if (*dev->dev_ops->dev_start == NULL)
1306 		return -ENOTSUP;
1307 
1308 	if (dev->data->dev_started != 0) {
1309 		RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1310 			dev_id);
1311 		return 0;
1312 	}
1313 
1314 	diag = (*dev->dev_ops->dev_start)(dev);
1315 	rte_eventdev_trace_start(dev_id, diag);
1316 	if (diag == 0)
1317 		dev->data->dev_started = 1;
1318 	else
1319 		return diag;
1320 
1321 	event_dev_fp_ops_set(rte_event_fp_ops + dev_id, dev);
1322 
1323 	return 0;
1324 }
1325 
1326 int
1327 rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1328 					   rte_eventdev_stop_flush_t callback,
1329 					   void *userdata)
1330 {
1331 	struct rte_eventdev *dev;
1332 
1333 	RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id);
1334 
1335 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1336 	dev = &rte_eventdevs[dev_id];
1337 
1338 	dev->dev_ops->dev_stop_flush = callback;
1339 	dev->data->dev_stop_flush_arg = userdata;
1340 
1341 	return 0;
1342 }
1343 
1344 void
1345 rte_event_dev_stop(uint8_t dev_id)
1346 {
1347 	struct rte_eventdev *dev;
1348 
1349 	RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1350 
1351 	RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1352 	dev = &rte_eventdevs[dev_id];
1353 	if (*dev->dev_ops->dev_stop == NULL)
1354 		return;
1355 
1356 	if (dev->data->dev_started == 0) {
1357 		RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1358 			dev_id);
1359 		return;
1360 	}
1361 
1362 	dev->data->dev_started = 0;
1363 	(*dev->dev_ops->dev_stop)(dev);
1364 	rte_eventdev_trace_stop(dev_id);
1365 	event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
1366 }
1367 
1368 int
1369 rte_event_dev_close(uint8_t dev_id)
1370 {
1371 	struct rte_eventdev *dev;
1372 
1373 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1374 	dev = &rte_eventdevs[dev_id];
1375 	if (*dev->dev_ops->dev_close == NULL)
1376 		return -ENOTSUP;
1377 
1378 	/* Device must be stopped before it can be closed */
1379 	if (dev->data->dev_started == 1) {
1380 		RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1381 				dev_id);
1382 		return -EBUSY;
1383 	}
1384 
1385 	event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
1386 	rte_eventdev_trace_close(dev_id);
1387 	return (*dev->dev_ops->dev_close)(dev);
1388 }
1389 
1390 static inline int
1391 eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1392 		    int socket_id)
1393 {
1394 	char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1395 	const struct rte_memzone *mz;
1396 	int n;
1397 
1398 	/* Generate memzone name */
1399 	n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1400 	if (n >= (int)sizeof(mz_name))
1401 		return -EINVAL;
1402 
1403 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1404 		mz = rte_memzone_reserve(mz_name,
1405 				sizeof(struct rte_eventdev_data),
1406 				socket_id, 0);
1407 	} else
1408 		mz = rte_memzone_lookup(mz_name);
1409 
1410 	if (mz == NULL)
1411 		return -ENOMEM;
1412 
1413 	*data = mz->addr;
1414 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1415 		memset(*data, 0, sizeof(struct rte_eventdev_data));
1416 		for (n = 0; n < RTE_EVENT_MAX_PORTS_PER_DEV *
1417 					RTE_EVENT_MAX_QUEUES_PER_DEV;
1418 		     n++)
1419 			(*data)->links_map[n] =
1420 				EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
1421 	}
1422 
1423 	return 0;
1424 }
1425 
1426 static inline uint8_t
1427 eventdev_find_free_device_index(void)
1428 {
1429 	uint8_t dev_id;
1430 
1431 	for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1432 		if (rte_eventdevs[dev_id].attached ==
1433 				RTE_EVENTDEV_DETACHED)
1434 			return dev_id;
1435 	}
1436 	return RTE_EVENT_MAX_DEVS;
1437 }
1438 
1439 struct rte_eventdev *
1440 rte_event_pmd_allocate(const char *name, int socket_id)
1441 {
1442 	struct rte_eventdev *eventdev;
1443 	uint8_t dev_id;
1444 
1445 	if (rte_event_pmd_get_named_dev(name) != NULL) {
1446 		RTE_EDEV_LOG_ERR("Event device with name %s already "
1447 				"allocated!", name);
1448 		return NULL;
1449 	}
1450 
1451 	dev_id = eventdev_find_free_device_index();
1452 	if (dev_id == RTE_EVENT_MAX_DEVS) {
1453 		RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1454 		return NULL;
1455 	}
1456 
1457 	eventdev = &rte_eventdevs[dev_id];
1458 
1459 	if (eventdev->data == NULL) {
1460 		struct rte_eventdev_data *eventdev_data = NULL;
1461 
1462 		int retval =
1463 			eventdev_data_alloc(dev_id, &eventdev_data, socket_id);
1464 
1465 		if (retval < 0 || eventdev_data == NULL)
1466 			return NULL;
1467 
1468 		eventdev->data = eventdev_data;
1469 
1470 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1471 
1472 			strlcpy(eventdev->data->name, name,
1473 				RTE_EVENTDEV_NAME_MAX_LEN);
1474 
1475 			eventdev->data->dev_id = dev_id;
1476 			eventdev->data->socket_id = socket_id;
1477 			eventdev->data->dev_started = 0;
1478 		}
1479 
1480 		eventdev->attached = RTE_EVENTDEV_ATTACHED;
1481 		eventdev_globals.nb_devs++;
1482 	}
1483 
1484 	return eventdev;
1485 }
1486 
1487 int
1488 rte_event_pmd_release(struct rte_eventdev *eventdev)
1489 {
1490 	int ret;
1491 	char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1492 	const struct rte_memzone *mz;
1493 
1494 	if (eventdev == NULL)
1495 		return -EINVAL;
1496 
1497 	event_dev_fp_ops_reset(rte_event_fp_ops + eventdev->data->dev_id);
1498 	eventdev->attached = RTE_EVENTDEV_DETACHED;
1499 	eventdev_globals.nb_devs--;
1500 
1501 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1502 		rte_free(eventdev->data->dev_private);
1503 
1504 		/* Generate memzone name */
1505 		ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1506 				eventdev->data->dev_id);
1507 		if (ret >= (int)sizeof(mz_name))
1508 			return -EINVAL;
1509 
1510 		mz = rte_memzone_lookup(mz_name);
1511 		if (mz == NULL)
1512 			return -ENOMEM;
1513 
1514 		ret = rte_memzone_free(mz);
1515 		if (ret)
1516 			return ret;
1517 	}
1518 
1519 	eventdev->data = NULL;
1520 	return 0;
1521 }
1522 
1523 void
1524 event_dev_probing_finish(struct rte_eventdev *eventdev)
1525 {
1526 	if (eventdev == NULL)
1527 		return;
1528 
1529 	event_dev_fp_ops_set(rte_event_fp_ops + eventdev->data->dev_id,
1530 			     eventdev);
1531 }
1532 
1533 static int
1534 handle_dev_list(const char *cmd __rte_unused,
1535 		const char *params __rte_unused,
1536 		struct rte_tel_data *d)
1537 {
1538 	uint8_t dev_id;
1539 	int ndev = rte_event_dev_count();
1540 
1541 	if (ndev < 1)
1542 		return -1;
1543 
1544 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1545 	for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1546 		if (rte_eventdevs[dev_id].attached ==
1547 				RTE_EVENTDEV_ATTACHED)
1548 			rte_tel_data_add_array_int(d, dev_id);
1549 	}
1550 
1551 	return 0;
1552 }
1553 
1554 static int
1555 handle_port_list(const char *cmd __rte_unused,
1556 		 const char *params,
1557 		 struct rte_tel_data *d)
1558 {
1559 	int i;
1560 	uint8_t dev_id;
1561 	struct rte_eventdev *dev;
1562 	char *end_param;
1563 
1564 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1565 		return -1;
1566 
1567 	dev_id = strtoul(params, &end_param, 10);
1568 	if (*end_param != '\0')
1569 		RTE_EDEV_LOG_DEBUG(
1570 			"Extra parameters passed to eventdev telemetry command, ignoring");
1571 
1572 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1573 	dev = &rte_eventdevs[dev_id];
1574 
1575 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1576 	for (i = 0; i < dev->data->nb_ports; i++)
1577 		rte_tel_data_add_array_int(d, i);
1578 
1579 	return 0;
1580 }
1581 
1582 static int
1583 handle_queue_list(const char *cmd __rte_unused,
1584 		  const char *params,
1585 		  struct rte_tel_data *d)
1586 {
1587 	int i;
1588 	uint8_t dev_id;
1589 	struct rte_eventdev *dev;
1590 	char *end_param;
1591 
1592 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1593 		return -1;
1594 
1595 	dev_id = strtoul(params, &end_param, 10);
1596 	if (*end_param != '\0')
1597 		RTE_EDEV_LOG_DEBUG(
1598 			"Extra parameters passed to eventdev telemetry command, ignoring");
1599 
1600 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1601 	dev = &rte_eventdevs[dev_id];
1602 
1603 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1604 	for (i = 0; i < dev->data->nb_queues; i++)
1605 		rte_tel_data_add_array_int(d, i);
1606 
1607 	return 0;
1608 }
1609 
1610 static int
1611 handle_queue_links(const char *cmd __rte_unused,
1612 		   const char *params,
1613 		   struct rte_tel_data *d)
1614 {
1615 	int i, ret, port_id = 0;
1616 	char *end_param;
1617 	uint8_t dev_id;
1618 	uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1619 	uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
1620 	const char *p_param;
1621 
1622 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1623 		return -1;
1624 
1625 	/* Get dev ID from parameter string */
1626 	dev_id = strtoul(params, &end_param, 10);
1627 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1628 
1629 	p_param = strtok(end_param, ",");
1630 	if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1631 		return -1;
1632 
1633 	port_id = strtoul(p_param, &end_param, 10);
1634 	p_param = strtok(NULL, "\0");
1635 	if (p_param != NULL)
1636 		RTE_EDEV_LOG_DEBUG(
1637 			"Extra parameters passed to eventdev telemetry command, ignoring");
1638 
1639 	ret = rte_event_port_links_get(dev_id, port_id, queues, priorities);
1640 	if (ret < 0)
1641 		return -1;
1642 
1643 	rte_tel_data_start_dict(d);
1644 	for (i = 0; i < ret; i++) {
1645 		char qid_name[32];
1646 
1647 		snprintf(qid_name, 31, "qid_%u", queues[i]);
1648 		rte_tel_data_add_dict_uint(d, qid_name, priorities[i]);
1649 	}
1650 
1651 	return 0;
1652 }
1653 
1654 static int
1655 eventdev_build_telemetry_data(int dev_id,
1656 			      enum rte_event_dev_xstats_mode mode,
1657 			      int port_queue_id,
1658 			      struct rte_tel_data *d)
1659 {
1660 	struct rte_event_dev_xstats_name *xstat_names;
1661 	uint64_t *ids;
1662 	uint64_t *values;
1663 	int i, ret, num_xstats;
1664 
1665 	num_xstats = rte_event_dev_xstats_names_get(dev_id,
1666 						    mode,
1667 						    port_queue_id,
1668 						    NULL,
1669 						    NULL,
1670 						    0);
1671 
1672 	if (num_xstats < 0)
1673 		return -1;
1674 
1675 	/* use one malloc for names */
1676 	xstat_names = malloc((sizeof(struct rte_event_dev_xstats_name))
1677 			     * num_xstats);
1678 	if (xstat_names == NULL)
1679 		return -1;
1680 
1681 	ids = malloc((sizeof(unsigned int)) * num_xstats);
1682 	if (ids == NULL) {
1683 		free(xstat_names);
1684 		return -1;
1685 	}
1686 
1687 	values = malloc((sizeof(uint64_t)) * num_xstats);
1688 	if (values == NULL) {
1689 		free(xstat_names);
1690 		free(ids);
1691 		return -1;
1692 	}
1693 
1694 	ret = rte_event_dev_xstats_names_get(dev_id, mode, port_queue_id,
1695 					     xstat_names, ids, num_xstats);
1696 	if (ret < 0 || ret > num_xstats) {
1697 		free(xstat_names);
1698 		free(ids);
1699 		free(values);
1700 		return -1;
1701 	}
1702 
1703 	ret = rte_event_dev_xstats_get(dev_id, mode, port_queue_id,
1704 				       ids, values, num_xstats);
1705 	if (ret < 0 || ret > num_xstats) {
1706 		free(xstat_names);
1707 		free(ids);
1708 		free(values);
1709 		return -1;
1710 	}
1711 
1712 	rte_tel_data_start_dict(d);
1713 	for (i = 0; i < num_xstats; i++)
1714 		rte_tel_data_add_dict_uint(d, xstat_names[i].name, values[i]);
1715 
1716 	free(xstat_names);
1717 	free(ids);
1718 	free(values);
1719 	return 0;
1720 }
1721 
1722 static int
1723 handle_dev_xstats(const char *cmd __rte_unused,
1724 		  const char *params,
1725 		  struct rte_tel_data *d)
1726 {
1727 	int dev_id;
1728 	enum rte_event_dev_xstats_mode mode;
1729 	char *end_param;
1730 
1731 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1732 		return -1;
1733 
1734 	/* Get dev ID from parameter string */
1735 	dev_id = strtoul(params, &end_param, 10);
1736 	if (*end_param != '\0')
1737 		RTE_EDEV_LOG_DEBUG(
1738 			"Extra parameters passed to eventdev telemetry command, ignoring");
1739 
1740 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1741 
1742 	mode = RTE_EVENT_DEV_XSTATS_DEVICE;
1743 	return eventdev_build_telemetry_data(dev_id, mode, 0, d);
1744 }
1745 
1746 static int
1747 handle_port_xstats(const char *cmd __rte_unused,
1748 		   const char *params,
1749 		   struct rte_tel_data *d)
1750 {
1751 	int dev_id;
1752 	int port_queue_id = 0;
1753 	enum rte_event_dev_xstats_mode mode;
1754 	char *end_param;
1755 	const char *p_param;
1756 
1757 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1758 		return -1;
1759 
1760 	/* Get dev ID from parameter string */
1761 	dev_id = strtoul(params, &end_param, 10);
1762 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1763 
1764 	p_param = strtok(end_param, ",");
1765 	mode = RTE_EVENT_DEV_XSTATS_PORT;
1766 
1767 	if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1768 		return -1;
1769 
1770 	port_queue_id = strtoul(p_param, &end_param, 10);
1771 
1772 	p_param = strtok(NULL, "\0");
1773 	if (p_param != NULL)
1774 		RTE_EDEV_LOG_DEBUG(
1775 			"Extra parameters passed to eventdev telemetry command, ignoring");
1776 
1777 	return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1778 }
1779 
1780 static int
1781 handle_queue_xstats(const char *cmd __rte_unused,
1782 		    const char *params,
1783 		    struct rte_tel_data *d)
1784 {
1785 	int dev_id;
1786 	int port_queue_id = 0;
1787 	enum rte_event_dev_xstats_mode mode;
1788 	char *end_param;
1789 	const char *p_param;
1790 
1791 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1792 		return -1;
1793 
1794 	/* Get dev ID from parameter string */
1795 	dev_id = strtoul(params, &end_param, 10);
1796 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1797 
1798 	p_param = strtok(end_param, ",");
1799 	mode = RTE_EVENT_DEV_XSTATS_QUEUE;
1800 
1801 	if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1802 		return -1;
1803 
1804 	port_queue_id = strtoul(p_param, &end_param, 10);
1805 
1806 	p_param = strtok(NULL, "\0");
1807 	if (p_param != NULL)
1808 		RTE_EDEV_LOG_DEBUG(
1809 			"Extra parameters passed to eventdev telemetry command, ignoring");
1810 
1811 	return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1812 }
1813 
1814 static int
1815 handle_dev_dump(const char *cmd __rte_unused,
1816 		const char *params,
1817 		struct rte_tel_data *d)
1818 {
1819 	char *buf, *end_param;
1820 	int dev_id, ret;
1821 	FILE *f;
1822 
1823 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1824 		return -1;
1825 
1826 	/* Get dev ID from parameter string */
1827 	dev_id = strtoul(params, &end_param, 10);
1828 	if (*end_param != '\0')
1829 		RTE_EDEV_LOG_DEBUG(
1830 			"Extra parameters passed to eventdev telemetry command, ignoring");
1831 
1832 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1833 
1834 	buf = calloc(sizeof(char), RTE_TEL_MAX_SINGLE_STRING_LEN);
1835 	if (buf == NULL)
1836 		return -ENOMEM;
1837 
1838 	f = fmemopen(buf, RTE_TEL_MAX_SINGLE_STRING_LEN - 1, "w+");
1839 	if (f == NULL) {
1840 		free(buf);
1841 		return -EINVAL;
1842 	}
1843 
1844 	ret = rte_event_dev_dump(dev_id, f);
1845 	fclose(f);
1846 	if (ret == 0) {
1847 		rte_tel_data_start_dict(d);
1848 		rte_tel_data_string(d, buf);
1849 	}
1850 
1851 	free(buf);
1852 	return ret;
1853 }
1854 
1855 RTE_INIT(eventdev_init_telemetry)
1856 {
1857 	rte_telemetry_register_cmd("/eventdev/dev_list", handle_dev_list,
1858 			"Returns list of available eventdevs. Takes no parameters");
1859 	rte_telemetry_register_cmd("/eventdev/port_list", handle_port_list,
1860 			"Returns list of available ports. Parameter: DevID");
1861 	rte_telemetry_register_cmd("/eventdev/queue_list", handle_queue_list,
1862 			"Returns list of available queues. Parameter: DevID");
1863 
1864 	rte_telemetry_register_cmd("/eventdev/dev_xstats", handle_dev_xstats,
1865 			"Returns stats for an eventdev. Parameter: DevID");
1866 	rte_telemetry_register_cmd("/eventdev/port_xstats", handle_port_xstats,
1867 			"Returns stats for an eventdev port. Params: DevID,PortID");
1868 	rte_telemetry_register_cmd("/eventdev/queue_xstats",
1869 			handle_queue_xstats,
1870 			"Returns stats for an eventdev queue. Params: DevID,QueueID");
1871 	rte_telemetry_register_cmd("/eventdev/dev_dump", handle_dev_dump,
1872 			"Returns dump information for an eventdev. Parameter: DevID");
1873 	rte_telemetry_register_cmd("/eventdev/queue_links", handle_queue_links,
1874 			"Returns links for an eventdev port. Params: DevID,QueueID");
1875 }
1876