xref: /dpdk/lib/eventdev/rte_eventdev.c (revision c2c4f87b12590d96f549c4ef04a04d29d3b8fb97)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc
3  */
4 
5 #include <ctype.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <stdarg.h>
10 #include <errno.h>
11 #include <stdint.h>
12 #include <inttypes.h>
13 #include <sys/types.h>
14 #include <sys/queue.h>
15 
16 #include <rte_string_fns.h>
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_dev.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_eal.h>
25 #include <rte_per_lcore.h>
26 #include <rte_lcore.h>
27 #include <rte_atomic.h>
28 #include <rte_branch_prediction.h>
29 #include <rte_common.h>
30 #include <rte_malloc.h>
31 #include <rte_errno.h>
32 #include <rte_ethdev.h>
33 #include <rte_cryptodev.h>
34 #include <cryptodev_pmd.h>
35 #include <rte_telemetry.h>
36 
37 #include "rte_eventdev.h"
38 #include "eventdev_pmd.h"
39 #include "rte_eventdev_trace.h"
40 
41 static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
42 
43 struct rte_eventdev *rte_eventdevs = rte_event_devices;
44 
45 static struct rte_eventdev_global eventdev_globals = {
46 	.nb_devs		= 0
47 };
48 
49 /* Event dev north bound API implementation */
50 
51 uint8_t
52 rte_event_dev_count(void)
53 {
54 	return eventdev_globals.nb_devs;
55 }
56 
57 int
58 rte_event_dev_get_dev_id(const char *name)
59 {
60 	int i;
61 	uint8_t cmp;
62 
63 	if (!name)
64 		return -EINVAL;
65 
66 	for (i = 0; i < eventdev_globals.nb_devs; i++) {
67 		cmp = (strncmp(rte_event_devices[i].data->name, name,
68 				RTE_EVENTDEV_NAME_MAX_LEN) == 0) ||
69 			(rte_event_devices[i].dev ? (strncmp(
70 				rte_event_devices[i].dev->driver->name, name,
71 					 RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0);
72 		if (cmp && (rte_event_devices[i].attached ==
73 					RTE_EVENTDEV_ATTACHED))
74 			return i;
75 	}
76 	return -ENODEV;
77 }
78 
79 int
80 rte_event_dev_socket_id(uint8_t dev_id)
81 {
82 	struct rte_eventdev *dev;
83 
84 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
85 	dev = &rte_eventdevs[dev_id];
86 
87 	return dev->data->socket_id;
88 }
89 
90 int
91 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
92 {
93 	struct rte_eventdev *dev;
94 
95 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
96 	dev = &rte_eventdevs[dev_id];
97 
98 	if (dev_info == NULL)
99 		return -EINVAL;
100 
101 	memset(dev_info, 0, sizeof(struct rte_event_dev_info));
102 
103 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
104 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
105 
106 	dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
107 
108 	dev_info->dev = dev->dev;
109 	return 0;
110 }
111 
112 int
113 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
114 				uint32_t *caps)
115 {
116 	struct rte_eventdev *dev;
117 
118 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
119 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
120 
121 	dev = &rte_eventdevs[dev_id];
122 
123 	if (caps == NULL)
124 		return -EINVAL;
125 
126 	if (dev->dev_ops->eth_rx_adapter_caps_get == NULL)
127 		*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
128 	else
129 		*caps = 0;
130 
131 	return dev->dev_ops->eth_rx_adapter_caps_get ?
132 				(*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
133 						&rte_eth_devices[eth_port_id],
134 						caps)
135 				: 0;
136 }
137 
138 int
139 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
140 {
141 	struct rte_eventdev *dev;
142 	const struct rte_event_timer_adapter_ops *ops;
143 
144 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
145 
146 	dev = &rte_eventdevs[dev_id];
147 
148 	if (caps == NULL)
149 		return -EINVAL;
150 	*caps = 0;
151 
152 	return dev->dev_ops->timer_adapter_caps_get ?
153 				(*dev->dev_ops->timer_adapter_caps_get)(dev,
154 									0,
155 									caps,
156 									&ops)
157 				: 0;
158 }
159 
160 int
161 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
162 				  uint32_t *caps)
163 {
164 	struct rte_eventdev *dev;
165 	struct rte_cryptodev *cdev;
166 
167 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
168 	if (!rte_cryptodev_is_valid_dev(cdev_id))
169 		return -EINVAL;
170 
171 	dev = &rte_eventdevs[dev_id];
172 	cdev = rte_cryptodev_pmd_get_dev(cdev_id);
173 
174 	if (caps == NULL)
175 		return -EINVAL;
176 	*caps = 0;
177 
178 	return dev->dev_ops->crypto_adapter_caps_get ?
179 		(*dev->dev_ops->crypto_adapter_caps_get)
180 		(dev, cdev, caps) : -ENOTSUP;
181 }
182 
183 int
184 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
185 				uint32_t *caps)
186 {
187 	struct rte_eventdev *dev;
188 	struct rte_eth_dev *eth_dev;
189 
190 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
191 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
192 
193 	dev = &rte_eventdevs[dev_id];
194 	eth_dev = &rte_eth_devices[eth_port_id];
195 
196 	if (caps == NULL)
197 		return -EINVAL;
198 
199 	if (dev->dev_ops->eth_tx_adapter_caps_get == NULL)
200 		*caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR;
201 	else
202 		*caps = 0;
203 
204 	return dev->dev_ops->eth_tx_adapter_caps_get ?
205 			(*dev->dev_ops->eth_tx_adapter_caps_get)(dev,
206 								eth_dev,
207 								caps)
208 			: 0;
209 }
210 
211 static inline int
212 rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
213 {
214 	uint8_t old_nb_queues = dev->data->nb_queues;
215 	struct rte_event_queue_conf *queues_cfg;
216 	unsigned int i;
217 
218 	RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
219 			 dev->data->dev_id);
220 
221 	/* First time configuration */
222 	if (dev->data->queues_cfg == NULL && nb_queues != 0) {
223 		/* Allocate memory to store queue configuration */
224 		dev->data->queues_cfg = rte_zmalloc_socket(
225 				"eventdev->data->queues_cfg",
226 				sizeof(dev->data->queues_cfg[0]) * nb_queues,
227 				RTE_CACHE_LINE_SIZE, dev->data->socket_id);
228 		if (dev->data->queues_cfg == NULL) {
229 			dev->data->nb_queues = 0;
230 			RTE_EDEV_LOG_ERR("failed to get mem for queue cfg,"
231 					"nb_queues %u", nb_queues);
232 			return -(ENOMEM);
233 		}
234 	/* Re-configure */
235 	} else if (dev->data->queues_cfg != NULL && nb_queues != 0) {
236 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
237 
238 		for (i = nb_queues; i < old_nb_queues; i++)
239 			(*dev->dev_ops->queue_release)(dev, i);
240 
241 		/* Re allocate memory to store queue configuration */
242 		queues_cfg = dev->data->queues_cfg;
243 		queues_cfg = rte_realloc(queues_cfg,
244 				sizeof(queues_cfg[0]) * nb_queues,
245 				RTE_CACHE_LINE_SIZE);
246 		if (queues_cfg == NULL) {
247 			RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory,"
248 						" nb_queues %u", nb_queues);
249 			return -(ENOMEM);
250 		}
251 		dev->data->queues_cfg = queues_cfg;
252 
253 		if (nb_queues > old_nb_queues) {
254 			uint8_t new_qs = nb_queues - old_nb_queues;
255 
256 			memset(queues_cfg + old_nb_queues, 0,
257 				sizeof(queues_cfg[0]) * new_qs);
258 		}
259 	} else if (dev->data->queues_cfg != NULL && nb_queues == 0) {
260 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
261 
262 		for (i = nb_queues; i < old_nb_queues; i++)
263 			(*dev->dev_ops->queue_release)(dev, i);
264 	}
265 
266 	dev->data->nb_queues = nb_queues;
267 	return 0;
268 }
269 
270 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
271 
272 static inline int
273 rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
274 {
275 	uint8_t old_nb_ports = dev->data->nb_ports;
276 	void **ports;
277 	uint16_t *links_map;
278 	struct rte_event_port_conf *ports_cfg;
279 	unsigned int i;
280 
281 	RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
282 			 dev->data->dev_id);
283 
284 	/* First time configuration */
285 	if (dev->data->ports == NULL && nb_ports != 0) {
286 		dev->data->ports = rte_zmalloc_socket("eventdev->data->ports",
287 				sizeof(dev->data->ports[0]) * nb_ports,
288 				RTE_CACHE_LINE_SIZE, dev->data->socket_id);
289 		if (dev->data->ports == NULL) {
290 			dev->data->nb_ports = 0;
291 			RTE_EDEV_LOG_ERR("failed to get mem for port meta data,"
292 					"nb_ports %u", nb_ports);
293 			return -(ENOMEM);
294 		}
295 
296 		/* Allocate memory to store port configurations */
297 		dev->data->ports_cfg =
298 			rte_zmalloc_socket("eventdev->ports_cfg",
299 			sizeof(dev->data->ports_cfg[0]) * nb_ports,
300 			RTE_CACHE_LINE_SIZE, dev->data->socket_id);
301 		if (dev->data->ports_cfg == NULL) {
302 			dev->data->nb_ports = 0;
303 			RTE_EDEV_LOG_ERR("failed to get mem for port cfg,"
304 					"nb_ports %u", nb_ports);
305 			return -(ENOMEM);
306 		}
307 
308 		/* Allocate memory to store queue to port link connection */
309 		dev->data->links_map =
310 			rte_zmalloc_socket("eventdev->links_map",
311 			sizeof(dev->data->links_map[0]) * nb_ports *
312 			RTE_EVENT_MAX_QUEUES_PER_DEV,
313 			RTE_CACHE_LINE_SIZE, dev->data->socket_id);
314 		if (dev->data->links_map == NULL) {
315 			dev->data->nb_ports = 0;
316 			RTE_EDEV_LOG_ERR("failed to get mem for port_map area,"
317 					"nb_ports %u", nb_ports);
318 			return -(ENOMEM);
319 		}
320 		for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++)
321 			dev->data->links_map[i] =
322 				EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
323 	} else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
324 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
325 
326 		ports = dev->data->ports;
327 		ports_cfg = dev->data->ports_cfg;
328 		links_map = dev->data->links_map;
329 
330 		for (i = nb_ports; i < old_nb_ports; i++)
331 			(*dev->dev_ops->port_release)(ports[i]);
332 
333 		/* Realloc memory for ports */
334 		ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,
335 				RTE_CACHE_LINE_SIZE);
336 		if (ports == NULL) {
337 			RTE_EDEV_LOG_ERR("failed to realloc port meta data,"
338 						" nb_ports %u", nb_ports);
339 			return -(ENOMEM);
340 		}
341 
342 		/* Realloc memory for ports_cfg */
343 		ports_cfg = rte_realloc(ports_cfg,
344 			sizeof(ports_cfg[0]) * nb_ports,
345 			RTE_CACHE_LINE_SIZE);
346 		if (ports_cfg == NULL) {
347 			RTE_EDEV_LOG_ERR("failed to realloc port cfg mem,"
348 						" nb_ports %u", nb_ports);
349 			return -(ENOMEM);
350 		}
351 
352 		/* Realloc memory to store queue to port link connection */
353 		links_map = rte_realloc(links_map,
354 			sizeof(dev->data->links_map[0]) * nb_ports *
355 			RTE_EVENT_MAX_QUEUES_PER_DEV,
356 			RTE_CACHE_LINE_SIZE);
357 		if (links_map == NULL) {
358 			dev->data->nb_ports = 0;
359 			RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
360 					"nb_ports %u", nb_ports);
361 			return -(ENOMEM);
362 		}
363 
364 		if (nb_ports > old_nb_ports) {
365 			uint8_t new_ps = nb_ports - old_nb_ports;
366 			unsigned int old_links_map_end =
367 				old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
368 			unsigned int links_map_end =
369 				nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
370 
371 			memset(ports + old_nb_ports, 0,
372 				sizeof(ports[0]) * new_ps);
373 			memset(ports_cfg + old_nb_ports, 0,
374 				sizeof(ports_cfg[0]) * new_ps);
375 			for (i = old_links_map_end; i < links_map_end; i++)
376 				links_map[i] =
377 					EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
378 		}
379 
380 		dev->data->ports = ports;
381 		dev->data->ports_cfg = ports_cfg;
382 		dev->data->links_map = links_map;
383 	} else if (dev->data->ports != NULL && nb_ports == 0) {
384 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
385 
386 		ports = dev->data->ports;
387 		for (i = nb_ports; i < old_nb_ports; i++)
388 			(*dev->dev_ops->port_release)(ports[i]);
389 	}
390 
391 	dev->data->nb_ports = nb_ports;
392 	return 0;
393 }
394 
395 int
396 rte_event_dev_configure(uint8_t dev_id,
397 			const struct rte_event_dev_config *dev_conf)
398 {
399 	struct rte_eventdev *dev;
400 	struct rte_event_dev_info info;
401 	int diag;
402 
403 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
404 	dev = &rte_eventdevs[dev_id];
405 
406 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
407 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
408 
409 	if (dev->data->dev_started) {
410 		RTE_EDEV_LOG_ERR(
411 		    "device %d must be stopped to allow configuration", dev_id);
412 		return -EBUSY;
413 	}
414 
415 	if (dev_conf == NULL)
416 		return -EINVAL;
417 
418 	(*dev->dev_ops->dev_infos_get)(dev, &info);
419 
420 	/* Check dequeue_timeout_ns value is in limit */
421 	if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
422 		if (dev_conf->dequeue_timeout_ns &&
423 		    (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
424 			|| dev_conf->dequeue_timeout_ns >
425 				 info.max_dequeue_timeout_ns)) {
426 			RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
427 			" min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
428 			dev_id, dev_conf->dequeue_timeout_ns,
429 			info.min_dequeue_timeout_ns,
430 			info.max_dequeue_timeout_ns);
431 			return -EINVAL;
432 		}
433 	}
434 
435 	/* Check nb_events_limit is in limit */
436 	if (dev_conf->nb_events_limit > info.max_num_events) {
437 		RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
438 		dev_id, dev_conf->nb_events_limit, info.max_num_events);
439 		return -EINVAL;
440 	}
441 
442 	/* Check nb_event_queues is in limit */
443 	if (!dev_conf->nb_event_queues) {
444 		RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
445 					dev_id);
446 		return -EINVAL;
447 	}
448 	if (dev_conf->nb_event_queues > info.max_event_queues +
449 			info.max_single_link_event_port_queue_pairs) {
450 		RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d + max_single_link_event_port_queue_pairs=%d",
451 				 dev_id, dev_conf->nb_event_queues,
452 				 info.max_event_queues,
453 				 info.max_single_link_event_port_queue_pairs);
454 		return -EINVAL;
455 	}
456 	if (dev_conf->nb_event_queues -
457 			dev_conf->nb_single_link_event_port_queues >
458 			info.max_event_queues) {
459 		RTE_EDEV_LOG_ERR("id%d nb_event_queues=%d - nb_single_link_event_port_queues=%d > max_event_queues=%d",
460 				 dev_id, dev_conf->nb_event_queues,
461 				 dev_conf->nb_single_link_event_port_queues,
462 				 info.max_event_queues);
463 		return -EINVAL;
464 	}
465 	if (dev_conf->nb_single_link_event_port_queues >
466 			dev_conf->nb_event_queues) {
467 		RTE_EDEV_LOG_ERR("dev%d nb_single_link_event_port_queues=%d > nb_event_queues=%d",
468 				 dev_id,
469 				 dev_conf->nb_single_link_event_port_queues,
470 				 dev_conf->nb_event_queues);
471 		return -EINVAL;
472 	}
473 
474 	/* Check nb_event_ports is in limit */
475 	if (!dev_conf->nb_event_ports) {
476 		RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
477 		return -EINVAL;
478 	}
479 	if (dev_conf->nb_event_ports > info.max_event_ports +
480 			info.max_single_link_event_port_queue_pairs) {
481 		RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports=%d + max_single_link_event_port_queue_pairs=%d",
482 				 dev_id, dev_conf->nb_event_ports,
483 				 info.max_event_ports,
484 				 info.max_single_link_event_port_queue_pairs);
485 		return -EINVAL;
486 	}
487 	if (dev_conf->nb_event_ports -
488 			dev_conf->nb_single_link_event_port_queues
489 			> info.max_event_ports) {
490 		RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d - nb_single_link_event_port_queues=%d > max_event_ports=%d",
491 				 dev_id, dev_conf->nb_event_ports,
492 				 dev_conf->nb_single_link_event_port_queues,
493 				 info.max_event_ports);
494 		return -EINVAL;
495 	}
496 
497 	if (dev_conf->nb_single_link_event_port_queues >
498 	    dev_conf->nb_event_ports) {
499 		RTE_EDEV_LOG_ERR(
500 				 "dev%d nb_single_link_event_port_queues=%d > nb_event_ports=%d",
501 				 dev_id,
502 				 dev_conf->nb_single_link_event_port_queues,
503 				 dev_conf->nb_event_ports);
504 		return -EINVAL;
505 	}
506 
507 	/* Check nb_event_queue_flows is in limit */
508 	if (!dev_conf->nb_event_queue_flows) {
509 		RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
510 		return -EINVAL;
511 	}
512 	if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
513 		RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
514 		dev_id, dev_conf->nb_event_queue_flows,
515 		info.max_event_queue_flows);
516 		return -EINVAL;
517 	}
518 
519 	/* Check nb_event_port_dequeue_depth is in limit */
520 	if (!dev_conf->nb_event_port_dequeue_depth) {
521 		RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
522 					dev_id);
523 		return -EINVAL;
524 	}
525 	if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
526 		 (dev_conf->nb_event_port_dequeue_depth >
527 			 info.max_event_port_dequeue_depth)) {
528 		RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
529 		dev_id, dev_conf->nb_event_port_dequeue_depth,
530 		info.max_event_port_dequeue_depth);
531 		return -EINVAL;
532 	}
533 
534 	/* Check nb_event_port_enqueue_depth is in limit */
535 	if (!dev_conf->nb_event_port_enqueue_depth) {
536 		RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
537 					dev_id);
538 		return -EINVAL;
539 	}
540 	if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
541 		(dev_conf->nb_event_port_enqueue_depth >
542 			 info.max_event_port_enqueue_depth)) {
543 		RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
544 		dev_id, dev_conf->nb_event_port_enqueue_depth,
545 		info.max_event_port_enqueue_depth);
546 		return -EINVAL;
547 	}
548 
549 	/* Copy the dev_conf parameter into the dev structure */
550 	memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
551 
552 	/* Setup new number of queues and reconfigure device. */
553 	diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);
554 	if (diag != 0) {
555 		RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
556 				dev_id, diag);
557 		return diag;
558 	}
559 
560 	/* Setup new number of ports and reconfigure device. */
561 	diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);
562 	if (diag != 0) {
563 		rte_event_dev_queue_config(dev, 0);
564 		RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
565 				dev_id, diag);
566 		return diag;
567 	}
568 
569 	/* Configure the device */
570 	diag = (*dev->dev_ops->dev_configure)(dev);
571 	if (diag != 0) {
572 		RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
573 		rte_event_dev_queue_config(dev, 0);
574 		rte_event_dev_port_config(dev, 0);
575 	}
576 
577 	dev->data->event_dev_cap = info.event_dev_cap;
578 	rte_eventdev_trace_configure(dev_id, dev_conf, diag);
579 	return diag;
580 }
581 
582 static inline int
583 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
584 {
585 	if (queue_id < dev->data->nb_queues && queue_id <
586 				RTE_EVENT_MAX_QUEUES_PER_DEV)
587 		return 1;
588 	else
589 		return 0;
590 }
591 
592 int
593 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
594 				 struct rte_event_queue_conf *queue_conf)
595 {
596 	struct rte_eventdev *dev;
597 
598 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
599 	dev = &rte_eventdevs[dev_id];
600 
601 	if (queue_conf == NULL)
602 		return -EINVAL;
603 
604 	if (!is_valid_queue(dev, queue_id)) {
605 		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
606 		return -EINVAL;
607 	}
608 
609 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
610 	memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
611 	(*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
612 	return 0;
613 }
614 
615 static inline int
616 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
617 {
618 	if (queue_conf &&
619 		!(queue_conf->event_queue_cfg &
620 		  RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
621 		((queue_conf->event_queue_cfg &
622 			 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
623 		(queue_conf->schedule_type
624 			== RTE_SCHED_TYPE_ATOMIC)
625 		))
626 		return 1;
627 	else
628 		return 0;
629 }
630 
631 static inline int
632 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
633 {
634 	if (queue_conf &&
635 		!(queue_conf->event_queue_cfg &
636 		  RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
637 		((queue_conf->event_queue_cfg &
638 			 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
639 		(queue_conf->schedule_type
640 			== RTE_SCHED_TYPE_ORDERED)
641 		))
642 		return 1;
643 	else
644 		return 0;
645 }
646 
647 
648 int
649 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
650 		      const struct rte_event_queue_conf *queue_conf)
651 {
652 	struct rte_eventdev *dev;
653 	struct rte_event_queue_conf def_conf;
654 
655 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
656 	dev = &rte_eventdevs[dev_id];
657 
658 	if (!is_valid_queue(dev, queue_id)) {
659 		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
660 		return -EINVAL;
661 	}
662 
663 	/* Check nb_atomic_flows limit */
664 	if (is_valid_atomic_queue_conf(queue_conf)) {
665 		if (queue_conf->nb_atomic_flows == 0 ||
666 		    queue_conf->nb_atomic_flows >
667 			dev->data->dev_conf.nb_event_queue_flows) {
668 			RTE_EDEV_LOG_ERR(
669 		"dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
670 			dev_id, queue_id, queue_conf->nb_atomic_flows,
671 			dev->data->dev_conf.nb_event_queue_flows);
672 			return -EINVAL;
673 		}
674 	}
675 
676 	/* Check nb_atomic_order_sequences limit */
677 	if (is_valid_ordered_queue_conf(queue_conf)) {
678 		if (queue_conf->nb_atomic_order_sequences == 0 ||
679 		    queue_conf->nb_atomic_order_sequences >
680 			dev->data->dev_conf.nb_event_queue_flows) {
681 			RTE_EDEV_LOG_ERR(
682 		"dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
683 			dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
684 			dev->data->dev_conf.nb_event_queue_flows);
685 			return -EINVAL;
686 		}
687 	}
688 
689 	if (dev->data->dev_started) {
690 		RTE_EDEV_LOG_ERR(
691 		    "device %d must be stopped to allow queue setup", dev_id);
692 		return -EBUSY;
693 	}
694 
695 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
696 
697 	if (queue_conf == NULL) {
698 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
699 					-ENOTSUP);
700 		(*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
701 		queue_conf = &def_conf;
702 	}
703 
704 	dev->data->queues_cfg[queue_id] = *queue_conf;
705 	rte_eventdev_trace_queue_setup(dev_id, queue_id, queue_conf);
706 	return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
707 }
708 
709 static inline int
710 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
711 {
712 	if (port_id < dev->data->nb_ports)
713 		return 1;
714 	else
715 		return 0;
716 }
717 
718 int
719 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
720 				 struct rte_event_port_conf *port_conf)
721 {
722 	struct rte_eventdev *dev;
723 
724 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
725 	dev = &rte_eventdevs[dev_id];
726 
727 	if (port_conf == NULL)
728 		return -EINVAL;
729 
730 	if (!is_valid_port(dev, port_id)) {
731 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
732 		return -EINVAL;
733 	}
734 
735 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
736 	memset(port_conf, 0, sizeof(struct rte_event_port_conf));
737 	(*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
738 	return 0;
739 }
740 
741 int
742 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
743 		     const struct rte_event_port_conf *port_conf)
744 {
745 	struct rte_eventdev *dev;
746 	struct rte_event_port_conf def_conf;
747 	int diag;
748 
749 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
750 	dev = &rte_eventdevs[dev_id];
751 
752 	if (!is_valid_port(dev, port_id)) {
753 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
754 		return -EINVAL;
755 	}
756 
757 	/* Check new_event_threshold limit */
758 	if ((port_conf && !port_conf->new_event_threshold) ||
759 			(port_conf && port_conf->new_event_threshold >
760 				 dev->data->dev_conf.nb_events_limit)) {
761 		RTE_EDEV_LOG_ERR(
762 		   "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
763 			dev_id, port_id, port_conf->new_event_threshold,
764 			dev->data->dev_conf.nb_events_limit);
765 		return -EINVAL;
766 	}
767 
768 	/* Check dequeue_depth limit */
769 	if ((port_conf && !port_conf->dequeue_depth) ||
770 			(port_conf && port_conf->dequeue_depth >
771 		dev->data->dev_conf.nb_event_port_dequeue_depth)) {
772 		RTE_EDEV_LOG_ERR(
773 		   "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
774 			dev_id, port_id, port_conf->dequeue_depth,
775 			dev->data->dev_conf.nb_event_port_dequeue_depth);
776 		return -EINVAL;
777 	}
778 
779 	/* Check enqueue_depth limit */
780 	if ((port_conf && !port_conf->enqueue_depth) ||
781 			(port_conf && port_conf->enqueue_depth >
782 		dev->data->dev_conf.nb_event_port_enqueue_depth)) {
783 		RTE_EDEV_LOG_ERR(
784 		   "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
785 			dev_id, port_id, port_conf->enqueue_depth,
786 			dev->data->dev_conf.nb_event_port_enqueue_depth);
787 		return -EINVAL;
788 	}
789 
790 	if (port_conf &&
791 	    (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL) &&
792 	    !(dev->data->event_dev_cap &
793 	      RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
794 		RTE_EDEV_LOG_ERR(
795 		   "dev%d port%d Implicit release disable not supported",
796 			dev_id, port_id);
797 		return -EINVAL;
798 	}
799 
800 	if (dev->data->dev_started) {
801 		RTE_EDEV_LOG_ERR(
802 		    "device %d must be stopped to allow port setup", dev_id);
803 		return -EBUSY;
804 	}
805 
806 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
807 
808 	if (port_conf == NULL) {
809 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
810 					-ENOTSUP);
811 		(*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
812 		port_conf = &def_conf;
813 	}
814 
815 	dev->data->ports_cfg[port_id] = *port_conf;
816 
817 	diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
818 
819 	/* Unlink all the queues from this port(default state after setup) */
820 	if (!diag)
821 		diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
822 
823 	rte_eventdev_trace_port_setup(dev_id, port_id, port_conf, diag);
824 	if (diag < 0)
825 		return diag;
826 
827 	return 0;
828 }
829 
830 int
831 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
832 		       uint32_t *attr_value)
833 {
834 	struct rte_eventdev *dev;
835 
836 	if (!attr_value)
837 		return -EINVAL;
838 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
839 	dev = &rte_eventdevs[dev_id];
840 
841 	switch (attr_id) {
842 	case RTE_EVENT_DEV_ATTR_PORT_COUNT:
843 		*attr_value = dev->data->nb_ports;
844 		break;
845 	case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
846 		*attr_value = dev->data->nb_queues;
847 		break;
848 	case RTE_EVENT_DEV_ATTR_STARTED:
849 		*attr_value = dev->data->dev_started;
850 		break;
851 	default:
852 		return -EINVAL;
853 	}
854 
855 	return 0;
856 }
857 
858 int
859 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
860 			uint32_t *attr_value)
861 {
862 	struct rte_eventdev *dev;
863 
864 	if (!attr_value)
865 		return -EINVAL;
866 
867 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
868 	dev = &rte_eventdevs[dev_id];
869 	if (!is_valid_port(dev, port_id)) {
870 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
871 		return -EINVAL;
872 	}
873 
874 	switch (attr_id) {
875 	case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
876 		*attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
877 		break;
878 	case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
879 		*attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
880 		break;
881 	case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
882 		*attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
883 		break;
884 	case RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE:
885 	{
886 		uint32_t config;
887 
888 		config = dev->data->ports_cfg[port_id].event_port_cfg;
889 		*attr_value = !!(config & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
890 		break;
891 	}
892 	default:
893 		return -EINVAL;
894 	};
895 	return 0;
896 }
897 
898 int
899 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
900 			uint32_t *attr_value)
901 {
902 	struct rte_event_queue_conf *conf;
903 	struct rte_eventdev *dev;
904 
905 	if (!attr_value)
906 		return -EINVAL;
907 
908 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
909 	dev = &rte_eventdevs[dev_id];
910 	if (!is_valid_queue(dev, queue_id)) {
911 		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
912 		return -EINVAL;
913 	}
914 
915 	conf = &dev->data->queues_cfg[queue_id];
916 
917 	switch (attr_id) {
918 	case RTE_EVENT_QUEUE_ATTR_PRIORITY:
919 		*attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
920 		if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
921 			*attr_value = conf->priority;
922 		break;
923 	case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
924 		*attr_value = conf->nb_atomic_flows;
925 		break;
926 	case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
927 		*attr_value = conf->nb_atomic_order_sequences;
928 		break;
929 	case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
930 		*attr_value = conf->event_queue_cfg;
931 		break;
932 	case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
933 		if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
934 			return -EOVERFLOW;
935 
936 		*attr_value = conf->schedule_type;
937 		break;
938 	default:
939 		return -EINVAL;
940 	};
941 	return 0;
942 }
943 
944 int
945 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
946 		    const uint8_t queues[], const uint8_t priorities[],
947 		    uint16_t nb_links)
948 {
949 	struct rte_eventdev *dev;
950 	uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
951 	uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
952 	uint16_t *links_map;
953 	int i, diag;
954 
955 	RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
956 	dev = &rte_eventdevs[dev_id];
957 
958 	if (*dev->dev_ops->port_link == NULL) {
959 		RTE_EDEV_LOG_ERR("Function not supported\n");
960 		rte_errno = ENOTSUP;
961 		return 0;
962 	}
963 
964 	if (!is_valid_port(dev, port_id)) {
965 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
966 		rte_errno = EINVAL;
967 		return 0;
968 	}
969 
970 	if (queues == NULL) {
971 		for (i = 0; i < dev->data->nb_queues; i++)
972 			queues_list[i] = i;
973 
974 		queues = queues_list;
975 		nb_links = dev->data->nb_queues;
976 	}
977 
978 	if (priorities == NULL) {
979 		for (i = 0; i < nb_links; i++)
980 			priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
981 
982 		priorities = priorities_list;
983 	}
984 
985 	for (i = 0; i < nb_links; i++)
986 		if (queues[i] >= dev->data->nb_queues) {
987 			rte_errno = EINVAL;
988 			return 0;
989 		}
990 
991 	diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
992 						queues, priorities, nb_links);
993 	if (diag < 0)
994 		return diag;
995 
996 	links_map = dev->data->links_map;
997 	/* Point links_map to this port specific area */
998 	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
999 	for (i = 0; i < diag; i++)
1000 		links_map[queues[i]] = (uint8_t)priorities[i];
1001 
1002 	rte_eventdev_trace_port_link(dev_id, port_id, nb_links, diag);
1003 	return diag;
1004 }
1005 
1006 int
1007 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
1008 		      uint8_t queues[], uint16_t nb_unlinks)
1009 {
1010 	struct rte_eventdev *dev;
1011 	uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1012 	int i, diag, j;
1013 	uint16_t *links_map;
1014 
1015 	RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
1016 	dev = &rte_eventdevs[dev_id];
1017 
1018 	if (*dev->dev_ops->port_unlink == NULL) {
1019 		RTE_EDEV_LOG_ERR("Function not supported");
1020 		rte_errno = ENOTSUP;
1021 		return 0;
1022 	}
1023 
1024 	if (!is_valid_port(dev, port_id)) {
1025 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1026 		rte_errno = EINVAL;
1027 		return 0;
1028 	}
1029 
1030 	links_map = dev->data->links_map;
1031 	/* Point links_map to this port specific area */
1032 	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1033 
1034 	if (queues == NULL) {
1035 		j = 0;
1036 		for (i = 0; i < dev->data->nb_queues; i++) {
1037 			if (links_map[i] !=
1038 					EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1039 				all_queues[j] = i;
1040 				j++;
1041 			}
1042 		}
1043 		queues = all_queues;
1044 	} else {
1045 		for (j = 0; j < nb_unlinks; j++) {
1046 			if (links_map[queues[j]] ==
1047 					EVENT_QUEUE_SERVICE_PRIORITY_INVALID)
1048 				break;
1049 		}
1050 	}
1051 
1052 	nb_unlinks = j;
1053 	for (i = 0; i < nb_unlinks; i++)
1054 		if (queues[i] >= dev->data->nb_queues) {
1055 			rte_errno = EINVAL;
1056 			return 0;
1057 		}
1058 
1059 	diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
1060 					queues, nb_unlinks);
1061 
1062 	if (diag < 0)
1063 		return diag;
1064 
1065 	for (i = 0; i < diag; i++)
1066 		links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
1067 
1068 	rte_eventdev_trace_port_unlink(dev_id, port_id, nb_unlinks, diag);
1069 	return diag;
1070 }
1071 
1072 int
1073 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
1074 {
1075 	struct rte_eventdev *dev;
1076 
1077 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1078 	dev = &rte_eventdevs[dev_id];
1079 	if (!is_valid_port(dev, port_id)) {
1080 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1081 		return -EINVAL;
1082 	}
1083 
1084 	/* Return 0 if the PMD does not implement unlinks in progress.
1085 	 * This allows PMDs which handle unlink synchronously to not implement
1086 	 * this function at all.
1087 	 */
1088 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlinks_in_progress, 0);
1089 
1090 	return (*dev->dev_ops->port_unlinks_in_progress)(dev,
1091 			dev->data->ports[port_id]);
1092 }
1093 
1094 int
1095 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1096 			 uint8_t queues[], uint8_t priorities[])
1097 {
1098 	struct rte_eventdev *dev;
1099 	uint16_t *links_map;
1100 	int i, count = 0;
1101 
1102 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1103 	dev = &rte_eventdevs[dev_id];
1104 	if (!is_valid_port(dev, port_id)) {
1105 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1106 		return -EINVAL;
1107 	}
1108 
1109 	links_map = dev->data->links_map;
1110 	/* Point links_map to this port specific area */
1111 	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1112 	for (i = 0; i < dev->data->nb_queues; i++) {
1113 		if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1114 			queues[count] = i;
1115 			priorities[count] = (uint8_t)links_map[i];
1116 			++count;
1117 		}
1118 	}
1119 	return count;
1120 }
1121 
1122 int
1123 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1124 				 uint64_t *timeout_ticks)
1125 {
1126 	struct rte_eventdev *dev;
1127 
1128 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1129 	dev = &rte_eventdevs[dev_id];
1130 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
1131 
1132 	if (timeout_ticks == NULL)
1133 		return -EINVAL;
1134 
1135 	return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
1136 }
1137 
1138 int
1139 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
1140 {
1141 	struct rte_eventdev *dev;
1142 
1143 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1144 	dev = &rte_eventdevs[dev_id];
1145 
1146 	if (service_id == NULL)
1147 		return -EINVAL;
1148 
1149 	if (dev->data->service_inited)
1150 		*service_id = dev->data->service_id;
1151 
1152 	return dev->data->service_inited ? 0 : -ESRCH;
1153 }
1154 
1155 int
1156 rte_event_dev_dump(uint8_t dev_id, FILE *f)
1157 {
1158 	struct rte_eventdev *dev;
1159 
1160 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1161 	dev = &rte_eventdevs[dev_id];
1162 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
1163 	if (f == NULL)
1164 		return -EINVAL;
1165 
1166 	(*dev->dev_ops->dump)(dev, f);
1167 	return 0;
1168 
1169 }
1170 
1171 static int
1172 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1173 		uint8_t queue_port_id)
1174 {
1175 	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1176 	if (dev->dev_ops->xstats_get_names != NULL)
1177 		return (*dev->dev_ops->xstats_get_names)(dev, mode,
1178 							queue_port_id,
1179 							NULL, NULL, 0);
1180 	return 0;
1181 }
1182 
1183 int
1184 rte_event_dev_xstats_names_get(uint8_t dev_id,
1185 		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
1186 		struct rte_event_dev_xstats_name *xstats_names,
1187 		unsigned int *ids, unsigned int size)
1188 {
1189 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1190 	const int cnt_expected_entries = xstats_get_count(dev_id, mode,
1191 							  queue_port_id);
1192 	if (xstats_names == NULL || cnt_expected_entries < 0 ||
1193 			(int)size < cnt_expected_entries)
1194 		return cnt_expected_entries;
1195 
1196 	/* dev_id checked above */
1197 	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1198 
1199 	if (dev->dev_ops->xstats_get_names != NULL)
1200 		return (*dev->dev_ops->xstats_get_names)(dev, mode,
1201 				queue_port_id, xstats_names, ids, size);
1202 
1203 	return -ENOTSUP;
1204 }
1205 
1206 /* retrieve eventdev extended statistics */
1207 int
1208 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1209 		uint8_t queue_port_id, const unsigned int ids[],
1210 		uint64_t values[], unsigned int n)
1211 {
1212 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1213 	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1214 
1215 	/* implemented by the driver */
1216 	if (dev->dev_ops->xstats_get != NULL)
1217 		return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1218 				ids, values, n);
1219 	return -ENOTSUP;
1220 }
1221 
1222 uint64_t
1223 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1224 		unsigned int *id)
1225 {
1226 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1227 	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1228 	unsigned int temp = -1;
1229 
1230 	if (id != NULL)
1231 		*id = (unsigned int)-1;
1232 	else
1233 		id = &temp; /* ensure driver never gets a NULL value */
1234 
1235 	/* implemented by driver */
1236 	if (dev->dev_ops->xstats_get_by_name != NULL)
1237 		return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1238 	return -ENOTSUP;
1239 }
1240 
1241 int rte_event_dev_xstats_reset(uint8_t dev_id,
1242 		enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1243 		const uint32_t ids[], uint32_t nb_ids)
1244 {
1245 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1246 	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1247 
1248 	if (dev->dev_ops->xstats_reset != NULL)
1249 		return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1250 							ids, nb_ids);
1251 	return -ENOTSUP;
1252 }
1253 
1254 int rte_event_pmd_selftest_seqn_dynfield_offset = -1;
1255 
1256 int rte_event_dev_selftest(uint8_t dev_id)
1257 {
1258 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1259 	static const struct rte_mbuf_dynfield test_seqn_dynfield_desc = {
1260 		.name = "rte_event_pmd_selftest_seqn_dynfield",
1261 		.size = sizeof(rte_event_pmd_selftest_seqn_t),
1262 		.align = __alignof__(rte_event_pmd_selftest_seqn_t),
1263 	};
1264 	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1265 
1266 	if (dev->dev_ops->dev_selftest != NULL) {
1267 		rte_event_pmd_selftest_seqn_dynfield_offset =
1268 			rte_mbuf_dynfield_register(&test_seqn_dynfield_desc);
1269 		if (rte_event_pmd_selftest_seqn_dynfield_offset < 0)
1270 			return -ENOMEM;
1271 		return (*dev->dev_ops->dev_selftest)();
1272 	}
1273 	return -ENOTSUP;
1274 }
1275 
1276 struct rte_mempool *
1277 rte_event_vector_pool_create(const char *name, unsigned int n,
1278 			     unsigned int cache_size, uint16_t nb_elem,
1279 			     int socket_id)
1280 {
1281 	const char *mp_ops_name;
1282 	struct rte_mempool *mp;
1283 	unsigned int elt_sz;
1284 	int ret;
1285 
1286 	if (!nb_elem) {
1287 		RTE_LOG(ERR, EVENTDEV,
1288 			"Invalid number of elements=%d requested\n", nb_elem);
1289 		rte_errno = EINVAL;
1290 		return NULL;
1291 	}
1292 
1293 	elt_sz =
1294 		sizeof(struct rte_event_vector) + (nb_elem * sizeof(uintptr_t));
1295 	mp = rte_mempool_create_empty(name, n, elt_sz, cache_size, 0, socket_id,
1296 				      0);
1297 	if (mp == NULL)
1298 		return NULL;
1299 
1300 	mp_ops_name = rte_mbuf_best_mempool_ops();
1301 	ret = rte_mempool_set_ops_byname(mp, mp_ops_name, NULL);
1302 	if (ret != 0) {
1303 		RTE_LOG(ERR, EVENTDEV, "error setting mempool handler\n");
1304 		goto err;
1305 	}
1306 
1307 	ret = rte_mempool_populate_default(mp);
1308 	if (ret < 0)
1309 		goto err;
1310 
1311 	return mp;
1312 err:
1313 	rte_mempool_free(mp);
1314 	rte_errno = -ret;
1315 	return NULL;
1316 }
1317 
1318 int
1319 rte_event_dev_start(uint8_t dev_id)
1320 {
1321 	struct rte_eventdev *dev;
1322 	int diag;
1323 
1324 	RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1325 
1326 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1327 	dev = &rte_eventdevs[dev_id];
1328 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1329 
1330 	if (dev->data->dev_started != 0) {
1331 		RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1332 			dev_id);
1333 		return 0;
1334 	}
1335 
1336 	diag = (*dev->dev_ops->dev_start)(dev);
1337 	rte_eventdev_trace_start(dev_id, diag);
1338 	if (diag == 0)
1339 		dev->data->dev_started = 1;
1340 	else
1341 		return diag;
1342 
1343 	return 0;
1344 }
1345 
1346 int
1347 rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1348 		eventdev_stop_flush_t callback, void *userdata)
1349 {
1350 	struct rte_eventdev *dev;
1351 
1352 	RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id);
1353 
1354 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1355 	dev = &rte_eventdevs[dev_id];
1356 
1357 	dev->dev_ops->dev_stop_flush = callback;
1358 	dev->data->dev_stop_flush_arg = userdata;
1359 
1360 	return 0;
1361 }
1362 
1363 void
1364 rte_event_dev_stop(uint8_t dev_id)
1365 {
1366 	struct rte_eventdev *dev;
1367 
1368 	RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1369 
1370 	RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1371 	dev = &rte_eventdevs[dev_id];
1372 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1373 
1374 	if (dev->data->dev_started == 0) {
1375 		RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1376 			dev_id);
1377 		return;
1378 	}
1379 
1380 	dev->data->dev_started = 0;
1381 	(*dev->dev_ops->dev_stop)(dev);
1382 	rte_eventdev_trace_stop(dev_id);
1383 }
1384 
1385 int
1386 rte_event_dev_close(uint8_t dev_id)
1387 {
1388 	struct rte_eventdev *dev;
1389 
1390 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1391 	dev = &rte_eventdevs[dev_id];
1392 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1393 
1394 	/* Device must be stopped before it can be closed */
1395 	if (dev->data->dev_started == 1) {
1396 		RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1397 				dev_id);
1398 		return -EBUSY;
1399 	}
1400 
1401 	rte_eventdev_trace_close(dev_id);
1402 	return (*dev->dev_ops->dev_close)(dev);
1403 }
1404 
1405 static inline int
1406 rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1407 		int socket_id)
1408 {
1409 	char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1410 	const struct rte_memzone *mz;
1411 	int n;
1412 
1413 	/* Generate memzone name */
1414 	n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1415 	if (n >= (int)sizeof(mz_name))
1416 		return -EINVAL;
1417 
1418 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1419 		mz = rte_memzone_reserve(mz_name,
1420 				sizeof(struct rte_eventdev_data),
1421 				socket_id, 0);
1422 	} else
1423 		mz = rte_memzone_lookup(mz_name);
1424 
1425 	if (mz == NULL)
1426 		return -ENOMEM;
1427 
1428 	*data = mz->addr;
1429 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1430 		memset(*data, 0, sizeof(struct rte_eventdev_data));
1431 
1432 	return 0;
1433 }
1434 
1435 static inline uint8_t
1436 rte_eventdev_find_free_device_index(void)
1437 {
1438 	uint8_t dev_id;
1439 
1440 	for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1441 		if (rte_eventdevs[dev_id].attached ==
1442 				RTE_EVENTDEV_DETACHED)
1443 			return dev_id;
1444 	}
1445 	return RTE_EVENT_MAX_DEVS;
1446 }
1447 
1448 static uint16_t
1449 rte_event_tx_adapter_enqueue(__rte_unused void *port,
1450 			__rte_unused struct rte_event ev[],
1451 			__rte_unused uint16_t nb_events)
1452 {
1453 	rte_errno = ENOTSUP;
1454 	return 0;
1455 }
1456 
1457 static uint16_t
1458 rte_event_crypto_adapter_enqueue(__rte_unused void *port,
1459 			__rte_unused struct rte_event ev[],
1460 			__rte_unused uint16_t nb_events)
1461 {
1462 	rte_errno = ENOTSUP;
1463 	return 0;
1464 }
1465 
1466 struct rte_eventdev *
1467 rte_event_pmd_allocate(const char *name, int socket_id)
1468 {
1469 	struct rte_eventdev *eventdev;
1470 	uint8_t dev_id;
1471 
1472 	if (rte_event_pmd_get_named_dev(name) != NULL) {
1473 		RTE_EDEV_LOG_ERR("Event device with name %s already "
1474 				"allocated!", name);
1475 		return NULL;
1476 	}
1477 
1478 	dev_id = rte_eventdev_find_free_device_index();
1479 	if (dev_id == RTE_EVENT_MAX_DEVS) {
1480 		RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1481 		return NULL;
1482 	}
1483 
1484 	eventdev = &rte_eventdevs[dev_id];
1485 
1486 	eventdev->txa_enqueue = rte_event_tx_adapter_enqueue;
1487 	eventdev->txa_enqueue_same_dest = rte_event_tx_adapter_enqueue;
1488 	eventdev->ca_enqueue = rte_event_crypto_adapter_enqueue;
1489 
1490 	if (eventdev->data == NULL) {
1491 		struct rte_eventdev_data *eventdev_data = NULL;
1492 
1493 		int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data,
1494 				socket_id);
1495 
1496 		if (retval < 0 || eventdev_data == NULL)
1497 			return NULL;
1498 
1499 		eventdev->data = eventdev_data;
1500 
1501 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1502 
1503 			strlcpy(eventdev->data->name, name,
1504 				RTE_EVENTDEV_NAME_MAX_LEN);
1505 
1506 			eventdev->data->dev_id = dev_id;
1507 			eventdev->data->socket_id = socket_id;
1508 			eventdev->data->dev_started = 0;
1509 		}
1510 
1511 		eventdev->attached = RTE_EVENTDEV_ATTACHED;
1512 		eventdev_globals.nb_devs++;
1513 	}
1514 
1515 	return eventdev;
1516 }
1517 
1518 int
1519 rte_event_pmd_release(struct rte_eventdev *eventdev)
1520 {
1521 	int ret;
1522 	char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1523 	const struct rte_memzone *mz;
1524 
1525 	if (eventdev == NULL)
1526 		return -EINVAL;
1527 
1528 	eventdev->attached = RTE_EVENTDEV_DETACHED;
1529 	eventdev_globals.nb_devs--;
1530 
1531 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1532 		rte_free(eventdev->data->dev_private);
1533 
1534 		/* Generate memzone name */
1535 		ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1536 				eventdev->data->dev_id);
1537 		if (ret >= (int)sizeof(mz_name))
1538 			return -EINVAL;
1539 
1540 		mz = rte_memzone_lookup(mz_name);
1541 		if (mz == NULL)
1542 			return -ENOMEM;
1543 
1544 		ret = rte_memzone_free(mz);
1545 		if (ret)
1546 			return ret;
1547 	}
1548 
1549 	eventdev->data = NULL;
1550 	return 0;
1551 }
1552 
1553 
1554 static int
1555 handle_dev_list(const char *cmd __rte_unused,
1556 		const char *params __rte_unused,
1557 		struct rte_tel_data *d)
1558 {
1559 	uint8_t dev_id;
1560 	int ndev = rte_event_dev_count();
1561 
1562 	if (ndev < 1)
1563 		return -1;
1564 
1565 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1566 	for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1567 		if (rte_eventdevs[dev_id].attached ==
1568 				RTE_EVENTDEV_ATTACHED)
1569 			rte_tel_data_add_array_int(d, dev_id);
1570 	}
1571 
1572 	return 0;
1573 }
1574 
1575 static int
1576 handle_port_list(const char *cmd __rte_unused,
1577 		 const char *params,
1578 		 struct rte_tel_data *d)
1579 {
1580 	int i;
1581 	uint8_t dev_id;
1582 	struct rte_eventdev *dev;
1583 	char *end_param;
1584 
1585 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1586 		return -1;
1587 
1588 	dev_id = strtoul(params, &end_param, 10);
1589 	if (*end_param != '\0')
1590 		RTE_EDEV_LOG_DEBUG(
1591 			"Extra parameters passed to eventdev telemetry command, ignoring");
1592 
1593 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1594 	dev = &rte_eventdevs[dev_id];
1595 
1596 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1597 	for (i = 0; i < dev->data->nb_ports; i++)
1598 		rte_tel_data_add_array_int(d, i);
1599 
1600 	return 0;
1601 }
1602 
1603 static int
1604 handle_queue_list(const char *cmd __rte_unused,
1605 		  const char *params,
1606 		  struct rte_tel_data *d)
1607 {
1608 	int i;
1609 	uint8_t dev_id;
1610 	struct rte_eventdev *dev;
1611 	char *end_param;
1612 
1613 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1614 		return -1;
1615 
1616 	dev_id = strtoul(params, &end_param, 10);
1617 	if (*end_param != '\0')
1618 		RTE_EDEV_LOG_DEBUG(
1619 			"Extra parameters passed to eventdev telemetry command, ignoring");
1620 
1621 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1622 	dev = &rte_eventdevs[dev_id];
1623 
1624 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1625 	for (i = 0; i < dev->data->nb_queues; i++)
1626 		rte_tel_data_add_array_int(d, i);
1627 
1628 	return 0;
1629 }
1630 
1631 static int
1632 handle_queue_links(const char *cmd __rte_unused,
1633 		   const char *params,
1634 		   struct rte_tel_data *d)
1635 {
1636 	int i, ret, port_id = 0;
1637 	char *end_param;
1638 	uint8_t dev_id;
1639 	uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1640 	uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
1641 	const char *p_param;
1642 
1643 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1644 		return -1;
1645 
1646 	/* Get dev ID from parameter string */
1647 	dev_id = strtoul(params, &end_param, 10);
1648 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1649 
1650 	p_param = strtok(end_param, ",");
1651 	if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1652 		return -1;
1653 
1654 	port_id = strtoul(p_param, &end_param, 10);
1655 	p_param = strtok(NULL, "\0");
1656 	if (p_param != NULL)
1657 		RTE_EDEV_LOG_DEBUG(
1658 			"Extra parameters passed to eventdev telemetry command, ignoring");
1659 
1660 	ret = rte_event_port_links_get(dev_id, port_id, queues, priorities);
1661 	if (ret < 0)
1662 		return -1;
1663 
1664 	rte_tel_data_start_dict(d);
1665 	for (i = 0; i < ret; i++) {
1666 		char qid_name[32];
1667 
1668 		snprintf(qid_name, 31, "qid_%u", queues[i]);
1669 		rte_tel_data_add_dict_u64(d, qid_name, priorities[i]);
1670 	}
1671 
1672 	return 0;
1673 }
1674 
1675 static int
1676 eventdev_build_telemetry_data(int dev_id,
1677 			      enum rte_event_dev_xstats_mode mode,
1678 			      int port_queue_id,
1679 			      struct rte_tel_data *d)
1680 {
1681 	struct rte_event_dev_xstats_name *xstat_names;
1682 	unsigned int *ids;
1683 	uint64_t *values;
1684 	int i, ret, num_xstats;
1685 
1686 	num_xstats = rte_event_dev_xstats_names_get(dev_id,
1687 						    mode,
1688 						    port_queue_id,
1689 						    NULL,
1690 						    NULL,
1691 						    0);
1692 
1693 	if (num_xstats < 0)
1694 		return -1;
1695 
1696 	/* use one malloc for names */
1697 	xstat_names = malloc((sizeof(struct rte_event_dev_xstats_name))
1698 			     * num_xstats);
1699 	if (xstat_names == NULL)
1700 		return -1;
1701 
1702 	ids = malloc((sizeof(unsigned int)) * num_xstats);
1703 	if (ids == NULL) {
1704 		free(xstat_names);
1705 		return -1;
1706 	}
1707 
1708 	values = malloc((sizeof(uint64_t)) * num_xstats);
1709 	if (values == NULL) {
1710 		free(xstat_names);
1711 		free(ids);
1712 		return -1;
1713 	}
1714 
1715 	ret = rte_event_dev_xstats_names_get(dev_id, mode, port_queue_id,
1716 					     xstat_names, ids, num_xstats);
1717 	if (ret < 0 || ret > num_xstats) {
1718 		free(xstat_names);
1719 		free(ids);
1720 		free(values);
1721 		return -1;
1722 	}
1723 
1724 	ret = rte_event_dev_xstats_get(dev_id, mode, port_queue_id,
1725 				       ids, values, num_xstats);
1726 	if (ret < 0 || ret > num_xstats) {
1727 		free(xstat_names);
1728 		free(ids);
1729 		free(values);
1730 		return -1;
1731 	}
1732 
1733 	rte_tel_data_start_dict(d);
1734 	for (i = 0; i < num_xstats; i++)
1735 		rte_tel_data_add_dict_u64(d, xstat_names[i].name,
1736 					  values[i]);
1737 
1738 	free(xstat_names);
1739 	free(ids);
1740 	free(values);
1741 	return 0;
1742 }
1743 
1744 static int
1745 handle_dev_xstats(const char *cmd __rte_unused,
1746 		  const char *params,
1747 		  struct rte_tel_data *d)
1748 {
1749 	int dev_id;
1750 	enum rte_event_dev_xstats_mode mode;
1751 	char *end_param;
1752 
1753 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1754 		return -1;
1755 
1756 	/* Get dev ID from parameter string */
1757 	dev_id = strtoul(params, &end_param, 10);
1758 	if (*end_param != '\0')
1759 		RTE_EDEV_LOG_DEBUG(
1760 			"Extra parameters passed to eventdev telemetry command, ignoring");
1761 
1762 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1763 
1764 	mode = RTE_EVENT_DEV_XSTATS_DEVICE;
1765 	return eventdev_build_telemetry_data(dev_id, mode, 0, d);
1766 }
1767 
1768 static int
1769 handle_port_xstats(const char *cmd __rte_unused,
1770 		   const char *params,
1771 		   struct rte_tel_data *d)
1772 {
1773 	int dev_id;
1774 	int port_queue_id = 0;
1775 	enum rte_event_dev_xstats_mode mode;
1776 	char *end_param;
1777 	const char *p_param;
1778 
1779 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1780 		return -1;
1781 
1782 	/* Get dev ID from parameter string */
1783 	dev_id = strtoul(params, &end_param, 10);
1784 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1785 
1786 	p_param = strtok(end_param, ",");
1787 	mode = RTE_EVENT_DEV_XSTATS_PORT;
1788 
1789 	if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1790 		return -1;
1791 
1792 	port_queue_id = strtoul(p_param, &end_param, 10);
1793 
1794 	p_param = strtok(NULL, "\0");
1795 	if (p_param != NULL)
1796 		RTE_EDEV_LOG_DEBUG(
1797 			"Extra parameters passed to eventdev telemetry command, ignoring");
1798 
1799 	return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1800 }
1801 
1802 static int
1803 handle_queue_xstats(const char *cmd __rte_unused,
1804 		    const char *params,
1805 		    struct rte_tel_data *d)
1806 {
1807 	int dev_id;
1808 	int port_queue_id = 0;
1809 	enum rte_event_dev_xstats_mode mode;
1810 	char *end_param;
1811 	const char *p_param;
1812 
1813 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1814 		return -1;
1815 
1816 	/* Get dev ID from parameter string */
1817 	dev_id = strtoul(params, &end_param, 10);
1818 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1819 
1820 	p_param = strtok(end_param, ",");
1821 	mode = RTE_EVENT_DEV_XSTATS_QUEUE;
1822 
1823 	if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1824 		return -1;
1825 
1826 	port_queue_id = strtoul(p_param, &end_param, 10);
1827 
1828 	p_param = strtok(NULL, "\0");
1829 	if (p_param != NULL)
1830 		RTE_EDEV_LOG_DEBUG(
1831 			"Extra parameters passed to eventdev telemetry command, ignoring");
1832 
1833 	return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1834 }
1835 
1836 RTE_INIT(eventdev_init_telemetry)
1837 {
1838 	rte_telemetry_register_cmd("/eventdev/dev_list", handle_dev_list,
1839 			"Returns list of available eventdevs. Takes no parameters");
1840 	rte_telemetry_register_cmd("/eventdev/port_list", handle_port_list,
1841 			"Returns list of available ports. Parameter: DevID");
1842 	rte_telemetry_register_cmd("/eventdev/queue_list", handle_queue_list,
1843 			"Returns list of available queues. Parameter: DevID");
1844 
1845 	rte_telemetry_register_cmd("/eventdev/dev_xstats", handle_dev_xstats,
1846 			"Returns stats for an eventdev. Parameter: DevID");
1847 	rte_telemetry_register_cmd("/eventdev/port_xstats", handle_port_xstats,
1848 			"Returns stats for an eventdev port. Params: DevID,PortID");
1849 	rte_telemetry_register_cmd("/eventdev/queue_xstats",
1850 			handle_queue_xstats,
1851 			"Returns stats for an eventdev queue. Params: DevID,QueueID");
1852 	rte_telemetry_register_cmd("/eventdev/queue_links", handle_queue_links,
1853 			"Returns links for an eventdev port. Params: DevID,QueueID");
1854 }
1855