xref: /dpdk/lib/eventdev/rte_eventdev.c (revision ffe18b05b48b96b0ba1ea8fec03f7a197144b494)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc
3  */
4 
5 #include <ctype.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <errno.h>
10 #include <stdint.h>
11 #include <inttypes.h>
12 
13 #include <rte_string_fns.h>
14 #include <rte_log.h>
15 #include <dev_driver.h>
16 #include <rte_memzone.h>
17 #include <rte_eal.h>
18 #include <rte_common.h>
19 #include <rte_malloc.h>
20 #include <rte_errno.h>
21 #include <ethdev_driver.h>
22 #include <rte_cryptodev.h>
23 #include <rte_dmadev.h>
24 #include <cryptodev_pmd.h>
25 #include <rte_telemetry.h>
26 
27 #include "rte_eventdev.h"
28 #include "eventdev_pmd.h"
29 #include "eventdev_trace.h"
30 
31 static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
32 
33 struct rte_eventdev *rte_eventdevs = rte_event_devices;
34 
35 static struct rte_eventdev_global eventdev_globals = {
36 	.nb_devs		= 0
37 };
38 
39 /* Public fastpath APIs. */
40 struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
41 
42 /* Event dev north bound API implementation */
43 
44 uint8_t
45 rte_event_dev_count(void)
46 {
47 	return eventdev_globals.nb_devs;
48 }
49 
50 int
51 rte_event_dev_get_dev_id(const char *name)
52 {
53 	int i;
54 	uint8_t cmp;
55 
56 	if (!name)
57 		return -EINVAL;
58 
59 	for (i = 0; i < eventdev_globals.nb_devs; i++) {
60 		cmp = (strncmp(rte_event_devices[i].data->name, name,
61 				RTE_EVENTDEV_NAME_MAX_LEN) == 0) ||
62 			(rte_event_devices[i].dev ? (strncmp(
63 				rte_event_devices[i].dev->driver->name, name,
64 					 RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0);
65 		if (cmp && (rte_event_devices[i].attached ==
66 					RTE_EVENTDEV_ATTACHED)) {
67 			rte_eventdev_trace_get_dev_id(name, i);
68 			return i;
69 		}
70 	}
71 	return -ENODEV;
72 }
73 
74 int
75 rte_event_dev_socket_id(uint8_t dev_id)
76 {
77 	struct rte_eventdev *dev;
78 
79 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
80 	dev = &rte_eventdevs[dev_id];
81 
82 	rte_eventdev_trace_socket_id(dev_id, dev, dev->data->socket_id);
83 
84 	return dev->data->socket_id;
85 }
86 
87 int
88 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
89 {
90 	struct rte_eventdev *dev;
91 
92 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
93 	dev = &rte_eventdevs[dev_id];
94 
95 	if (dev_info == NULL)
96 		return -EINVAL;
97 
98 	memset(dev_info, 0, sizeof(struct rte_event_dev_info));
99 
100 	if (*dev->dev_ops->dev_infos_get == NULL)
101 		return -ENOTSUP;
102 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
103 
104 	dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
105 
106 	dev_info->dev = dev->dev;
107 
108 	rte_eventdev_trace_info_get(dev_id, dev_info, dev_info->dev);
109 
110 	return 0;
111 }
112 
113 int
114 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
115 				uint32_t *caps)
116 {
117 	struct rte_eventdev *dev;
118 
119 	rte_eventdev_trace_eth_rx_adapter_caps_get(dev_id, eth_port_id);
120 
121 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
122 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
123 
124 	dev = &rte_eventdevs[dev_id];
125 
126 	if (caps == NULL)
127 		return -EINVAL;
128 
129 	if (dev->dev_ops->eth_rx_adapter_caps_get == NULL)
130 		*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
131 	else
132 		*caps = 0;
133 
134 	return dev->dev_ops->eth_rx_adapter_caps_get ?
135 				(*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
136 						&rte_eth_devices[eth_port_id],
137 						caps)
138 				: 0;
139 }
140 
141 int
142 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
143 {
144 	struct rte_eventdev *dev;
145 	const struct event_timer_adapter_ops *ops;
146 
147 	rte_eventdev_trace_timer_adapter_caps_get(dev_id);
148 
149 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
150 
151 	dev = &rte_eventdevs[dev_id];
152 
153 	if (caps == NULL)
154 		return -EINVAL;
155 
156 	if (dev->dev_ops->timer_adapter_caps_get == NULL)
157 		*caps = RTE_EVENT_TIMER_ADAPTER_SW_CAP;
158 	else
159 		*caps = 0;
160 
161 	return dev->dev_ops->timer_adapter_caps_get ?
162 				(*dev->dev_ops->timer_adapter_caps_get)(dev,
163 									0,
164 									caps,
165 									&ops)
166 				: 0;
167 }
168 
169 int
170 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
171 				  uint32_t *caps)
172 {
173 	struct rte_eventdev *dev;
174 	struct rte_cryptodev *cdev;
175 
176 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
177 	if (!rte_cryptodev_is_valid_dev(cdev_id))
178 		return -EINVAL;
179 
180 	dev = &rte_eventdevs[dev_id];
181 	cdev = rte_cryptodev_pmd_get_dev(cdev_id);
182 
183 	rte_eventdev_trace_crypto_adapter_caps_get(dev_id, dev, cdev_id, cdev);
184 
185 	if (caps == NULL)
186 		return -EINVAL;
187 
188 	if (dev->dev_ops->crypto_adapter_caps_get == NULL)
189 		*caps = RTE_EVENT_CRYPTO_ADAPTER_SW_CAP;
190 	else
191 		*caps = 0;
192 
193 	return dev->dev_ops->crypto_adapter_caps_get ?
194 		(*dev->dev_ops->crypto_adapter_caps_get)
195 		(dev, cdev, caps) : 0;
196 }
197 
198 int
199 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
200 				uint32_t *caps)
201 {
202 	struct rte_eventdev *dev;
203 	struct rte_eth_dev *eth_dev;
204 
205 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
206 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
207 
208 	dev = &rte_eventdevs[dev_id];
209 	eth_dev = &rte_eth_devices[eth_port_id];
210 
211 	rte_eventdev_trace_eth_tx_adapter_caps_get(dev_id, dev, eth_port_id, eth_dev);
212 
213 	if (caps == NULL)
214 		return -EINVAL;
215 
216 	if (dev->dev_ops->eth_tx_adapter_caps_get == NULL)
217 		*caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR;
218 	else
219 		*caps = 0;
220 
221 	return dev->dev_ops->eth_tx_adapter_caps_get ?
222 			(*dev->dev_ops->eth_tx_adapter_caps_get)(dev,
223 								eth_dev,
224 								caps)
225 			: 0;
226 }
227 
228 int
229 rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dma_dev_id, uint32_t *caps)
230 {
231 	struct rte_eventdev *dev;
232 
233 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
234 	if (!rte_dma_is_valid(dma_dev_id))
235 		return -EINVAL;
236 
237 	dev = &rte_eventdevs[dev_id];
238 
239 	if (caps == NULL)
240 		return -EINVAL;
241 
242 	*caps = 0;
243 
244 	if (dev->dev_ops->dma_adapter_caps_get)
245 		return (*dev->dev_ops->dma_adapter_caps_get)(dev, dma_dev_id, caps);
246 
247 	return 0;
248 }
249 
250 static inline int
251 event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
252 {
253 	uint8_t old_nb_queues = dev->data->nb_queues;
254 	struct rte_event_queue_conf *queues_cfg;
255 	unsigned int i;
256 
257 	RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
258 			 dev->data->dev_id);
259 
260 	if (nb_queues != 0) {
261 		queues_cfg = dev->data->queues_cfg;
262 		if (*dev->dev_ops->queue_release == NULL)
263 			return -ENOTSUP;
264 
265 		for (i = nb_queues; i < old_nb_queues; i++)
266 			(*dev->dev_ops->queue_release)(dev, i);
267 
268 
269 		if (nb_queues > old_nb_queues) {
270 			uint8_t new_qs = nb_queues - old_nb_queues;
271 
272 			memset(queues_cfg + old_nb_queues, 0,
273 				sizeof(queues_cfg[0]) * new_qs);
274 		}
275 	} else {
276 		if (*dev->dev_ops->queue_release == NULL)
277 			return -ENOTSUP;
278 
279 		for (i = nb_queues; i < old_nb_queues; i++)
280 			(*dev->dev_ops->queue_release)(dev, i);
281 	}
282 
283 	dev->data->nb_queues = nb_queues;
284 	return 0;
285 }
286 
287 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
288 
289 static inline int
290 event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
291 {
292 	uint8_t old_nb_ports = dev->data->nb_ports;
293 	void **ports;
294 	uint16_t *links_map;
295 	struct rte_event_port_conf *ports_cfg;
296 	unsigned int i, j;
297 
298 	RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
299 			 dev->data->dev_id);
300 
301 	if (nb_ports != 0) { /* re-config */
302 		if (*dev->dev_ops->port_release == NULL)
303 			return -ENOTSUP;
304 
305 		ports = dev->data->ports;
306 		ports_cfg = dev->data->ports_cfg;
307 
308 		for (i = nb_ports; i < old_nb_ports; i++)
309 			(*dev->dev_ops->port_release)(ports[i]);
310 
311 		if (nb_ports > old_nb_ports) {
312 			uint8_t new_ps = nb_ports - old_nb_ports;
313 			unsigned int old_links_map_end =
314 				old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
315 			unsigned int links_map_end =
316 				nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
317 
318 			memset(ports + old_nb_ports, 0,
319 				sizeof(ports[0]) * new_ps);
320 			memset(ports_cfg + old_nb_ports, 0,
321 				sizeof(ports_cfg[0]) * new_ps);
322 			for (i = 0; i < RTE_EVENT_MAX_PROFILES_PER_PORT; i++) {
323 				links_map = dev->data->links_map[i];
324 				for (j = old_links_map_end; j < links_map_end; j++)
325 					links_map[j] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
326 			}
327 		}
328 	} else {
329 		if (*dev->dev_ops->port_release == NULL)
330 			return -ENOTSUP;
331 
332 		ports = dev->data->ports;
333 		for (i = nb_ports; i < old_nb_ports; i++) {
334 			(*dev->dev_ops->port_release)(ports[i]);
335 			ports[i] = NULL;
336 		}
337 	}
338 
339 	dev->data->nb_ports = nb_ports;
340 	return 0;
341 }
342 
343 int
344 rte_event_dev_configure(uint8_t dev_id,
345 			const struct rte_event_dev_config *dev_conf)
346 {
347 	struct rte_event_dev_info info;
348 	struct rte_eventdev *dev;
349 	int diag;
350 
351 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
352 	dev = &rte_eventdevs[dev_id];
353 
354 	if (*dev->dev_ops->dev_infos_get == NULL)
355 		return -ENOTSUP;
356 	if (*dev->dev_ops->dev_configure == NULL)
357 		return -ENOTSUP;
358 
359 	if (dev->data->dev_started) {
360 		RTE_EDEV_LOG_ERR(
361 		    "device %d must be stopped to allow configuration", dev_id);
362 		return -EBUSY;
363 	}
364 
365 	if (dev_conf == NULL)
366 		return -EINVAL;
367 
368 	(*dev->dev_ops->dev_infos_get)(dev, &info);
369 
370 	/* Check dequeue_timeout_ns value is in limit */
371 	if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
372 		if (dev_conf->dequeue_timeout_ns &&
373 		    (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
374 			|| dev_conf->dequeue_timeout_ns >
375 				 info.max_dequeue_timeout_ns)) {
376 			RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
377 			" min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
378 			dev_id, dev_conf->dequeue_timeout_ns,
379 			info.min_dequeue_timeout_ns,
380 			info.max_dequeue_timeout_ns);
381 			return -EINVAL;
382 		}
383 	}
384 
385 	/* Check nb_events_limit is in limit */
386 	if (dev_conf->nb_events_limit > info.max_num_events) {
387 		RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
388 		dev_id, dev_conf->nb_events_limit, info.max_num_events);
389 		return -EINVAL;
390 	}
391 
392 	/* Check nb_event_queues is in limit */
393 	if (!dev_conf->nb_event_queues) {
394 		RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
395 					dev_id);
396 		return -EINVAL;
397 	}
398 	if (dev_conf->nb_event_queues > info.max_event_queues +
399 			info.max_single_link_event_port_queue_pairs) {
400 		RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d + max_single_link_event_port_queue_pairs=%d",
401 				 dev_id, dev_conf->nb_event_queues,
402 				 info.max_event_queues,
403 				 info.max_single_link_event_port_queue_pairs);
404 		return -EINVAL;
405 	}
406 	if (dev_conf->nb_event_queues -
407 			dev_conf->nb_single_link_event_port_queues >
408 			info.max_event_queues) {
409 		RTE_EDEV_LOG_ERR("id%d nb_event_queues=%d - nb_single_link_event_port_queues=%d > max_event_queues=%d",
410 				 dev_id, dev_conf->nb_event_queues,
411 				 dev_conf->nb_single_link_event_port_queues,
412 				 info.max_event_queues);
413 		return -EINVAL;
414 	}
415 	if (dev_conf->nb_single_link_event_port_queues >
416 			dev_conf->nb_event_queues) {
417 		RTE_EDEV_LOG_ERR("dev%d nb_single_link_event_port_queues=%d > nb_event_queues=%d",
418 				 dev_id,
419 				 dev_conf->nb_single_link_event_port_queues,
420 				 dev_conf->nb_event_queues);
421 		return -EINVAL;
422 	}
423 
424 	/* Check nb_event_ports is in limit */
425 	if (!dev_conf->nb_event_ports) {
426 		RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
427 		return -EINVAL;
428 	}
429 	if (dev_conf->nb_event_ports > info.max_event_ports +
430 			info.max_single_link_event_port_queue_pairs) {
431 		RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports=%d + max_single_link_event_port_queue_pairs=%d",
432 				 dev_id, dev_conf->nb_event_ports,
433 				 info.max_event_ports,
434 				 info.max_single_link_event_port_queue_pairs);
435 		return -EINVAL;
436 	}
437 	if (dev_conf->nb_event_ports -
438 			dev_conf->nb_single_link_event_port_queues
439 			> info.max_event_ports) {
440 		RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d - nb_single_link_event_port_queues=%d > max_event_ports=%d",
441 				 dev_id, dev_conf->nb_event_ports,
442 				 dev_conf->nb_single_link_event_port_queues,
443 				 info.max_event_ports);
444 		return -EINVAL;
445 	}
446 
447 	if (dev_conf->nb_single_link_event_port_queues >
448 	    dev_conf->nb_event_ports) {
449 		RTE_EDEV_LOG_ERR(
450 				 "dev%d nb_single_link_event_port_queues=%d > nb_event_ports=%d",
451 				 dev_id,
452 				 dev_conf->nb_single_link_event_port_queues,
453 				 dev_conf->nb_event_ports);
454 		return -EINVAL;
455 	}
456 
457 	/* Check nb_event_queue_flows is in limit */
458 	if (!dev_conf->nb_event_queue_flows) {
459 		RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
460 		return -EINVAL;
461 	}
462 	if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
463 		RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
464 		dev_id, dev_conf->nb_event_queue_flows,
465 		info.max_event_queue_flows);
466 		return -EINVAL;
467 	}
468 
469 	/* Check nb_event_port_dequeue_depth is in limit */
470 	if (!dev_conf->nb_event_port_dequeue_depth) {
471 		RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
472 					dev_id);
473 		return -EINVAL;
474 	}
475 	if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
476 		 (dev_conf->nb_event_port_dequeue_depth >
477 			 info.max_event_port_dequeue_depth)) {
478 		RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
479 		dev_id, dev_conf->nb_event_port_dequeue_depth,
480 		info.max_event_port_dequeue_depth);
481 		return -EINVAL;
482 	}
483 
484 	/* Check nb_event_port_enqueue_depth is in limit */
485 	if (!dev_conf->nb_event_port_enqueue_depth) {
486 		RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
487 					dev_id);
488 		return -EINVAL;
489 	}
490 	if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
491 		(dev_conf->nb_event_port_enqueue_depth >
492 			 info.max_event_port_enqueue_depth)) {
493 		RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
494 		dev_id, dev_conf->nb_event_port_enqueue_depth,
495 		info.max_event_port_enqueue_depth);
496 		return -EINVAL;
497 	}
498 
499 	/* Copy the dev_conf parameter into the dev structure */
500 	memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
501 
502 	/* Setup new number of queues and reconfigure device. */
503 	diag = event_dev_queue_config(dev, dev_conf->nb_event_queues);
504 	if (diag != 0) {
505 		RTE_EDEV_LOG_ERR("dev%d event_dev_queue_config = %d", dev_id,
506 				 diag);
507 		return diag;
508 	}
509 
510 	/* Setup new number of ports and reconfigure device. */
511 	diag = event_dev_port_config(dev, dev_conf->nb_event_ports);
512 	if (diag != 0) {
513 		event_dev_queue_config(dev, 0);
514 		RTE_EDEV_LOG_ERR("dev%d event_dev_port_config = %d", dev_id,
515 				 diag);
516 		return diag;
517 	}
518 
519 	event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
520 
521 	/* Configure the device */
522 	diag = (*dev->dev_ops->dev_configure)(dev);
523 	if (diag != 0) {
524 		RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
525 		event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
526 		event_dev_queue_config(dev, 0);
527 		event_dev_port_config(dev, 0);
528 	}
529 
530 	dev->data->event_dev_cap = info.event_dev_cap;
531 	rte_eventdev_trace_configure(dev_id, dev_conf, diag);
532 	return diag;
533 }
534 
535 static inline int
536 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
537 {
538 	if (queue_id < dev->data->nb_queues && queue_id <
539 				RTE_EVENT_MAX_QUEUES_PER_DEV)
540 		return 1;
541 	else
542 		return 0;
543 }
544 
545 int
546 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
547 				 struct rte_event_queue_conf *queue_conf)
548 {
549 	struct rte_eventdev *dev;
550 
551 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
552 	dev = &rte_eventdevs[dev_id];
553 
554 	if (queue_conf == NULL)
555 		return -EINVAL;
556 
557 	if (!is_valid_queue(dev, queue_id)) {
558 		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
559 		return -EINVAL;
560 	}
561 
562 	if (*dev->dev_ops->queue_def_conf == NULL)
563 		return -ENOTSUP;
564 	memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
565 	(*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
566 
567 	rte_eventdev_trace_queue_default_conf_get(dev_id, dev, queue_id, queue_conf);
568 
569 	return 0;
570 }
571 
572 static inline int
573 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
574 {
575 	if (queue_conf &&
576 		!(queue_conf->event_queue_cfg &
577 		  RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
578 		((queue_conf->event_queue_cfg &
579 			 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
580 		(queue_conf->schedule_type
581 			== RTE_SCHED_TYPE_ATOMIC)
582 		))
583 		return 1;
584 	else
585 		return 0;
586 }
587 
588 static inline int
589 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
590 {
591 	if (queue_conf &&
592 		!(queue_conf->event_queue_cfg &
593 		  RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
594 		((queue_conf->event_queue_cfg &
595 			 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
596 		(queue_conf->schedule_type
597 			== RTE_SCHED_TYPE_ORDERED)
598 		))
599 		return 1;
600 	else
601 		return 0;
602 }
603 
604 
605 int
606 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
607 		      const struct rte_event_queue_conf *queue_conf)
608 {
609 	struct rte_eventdev *dev;
610 	struct rte_event_queue_conf def_conf;
611 
612 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
613 	dev = &rte_eventdevs[dev_id];
614 
615 	if (!is_valid_queue(dev, queue_id)) {
616 		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
617 		return -EINVAL;
618 	}
619 
620 	/* Check nb_atomic_flows limit */
621 	if (is_valid_atomic_queue_conf(queue_conf)) {
622 		if (queue_conf->nb_atomic_flows == 0 ||
623 		    queue_conf->nb_atomic_flows >
624 			dev->data->dev_conf.nb_event_queue_flows) {
625 			RTE_EDEV_LOG_ERR(
626 		"dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
627 			dev_id, queue_id, queue_conf->nb_atomic_flows,
628 			dev->data->dev_conf.nb_event_queue_flows);
629 			return -EINVAL;
630 		}
631 	}
632 
633 	/* Check nb_atomic_order_sequences limit */
634 	if (is_valid_ordered_queue_conf(queue_conf)) {
635 		if (queue_conf->nb_atomic_order_sequences == 0 ||
636 		    queue_conf->nb_atomic_order_sequences >
637 			dev->data->dev_conf.nb_event_queue_flows) {
638 			RTE_EDEV_LOG_ERR(
639 		"dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
640 			dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
641 			dev->data->dev_conf.nb_event_queue_flows);
642 			return -EINVAL;
643 		}
644 	}
645 
646 	if (dev->data->dev_started) {
647 		RTE_EDEV_LOG_ERR(
648 		    "device %d must be stopped to allow queue setup", dev_id);
649 		return -EBUSY;
650 	}
651 
652 	if (*dev->dev_ops->queue_setup == NULL)
653 		return -ENOTSUP;
654 
655 	if (queue_conf == NULL) {
656 		if (*dev->dev_ops->queue_def_conf == NULL)
657 			return -ENOTSUP;
658 		(*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
659 		queue_conf = &def_conf;
660 	}
661 
662 	dev->data->queues_cfg[queue_id] = *queue_conf;
663 	rte_eventdev_trace_queue_setup(dev_id, queue_id, queue_conf);
664 	return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
665 }
666 
667 static inline int
668 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
669 {
670 	if (port_id < dev->data->nb_ports)
671 		return 1;
672 	else
673 		return 0;
674 }
675 
676 int
677 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
678 				 struct rte_event_port_conf *port_conf)
679 {
680 	struct rte_eventdev *dev;
681 
682 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
683 	dev = &rte_eventdevs[dev_id];
684 
685 	if (port_conf == NULL)
686 		return -EINVAL;
687 
688 	if (!is_valid_port(dev, port_id)) {
689 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
690 		return -EINVAL;
691 	}
692 
693 	if (*dev->dev_ops->port_def_conf == NULL)
694 		return -ENOTSUP;
695 	memset(port_conf, 0, sizeof(struct rte_event_port_conf));
696 	(*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
697 
698 	rte_eventdev_trace_port_default_conf_get(dev_id, dev, port_id, port_conf);
699 
700 	return 0;
701 }
702 
703 int
704 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
705 		     const struct rte_event_port_conf *port_conf)
706 {
707 	struct rte_eventdev *dev;
708 	struct rte_event_port_conf def_conf;
709 	int diag;
710 
711 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
712 	dev = &rte_eventdevs[dev_id];
713 
714 	if (!is_valid_port(dev, port_id)) {
715 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
716 		return -EINVAL;
717 	}
718 
719 	/* Check new_event_threshold limit */
720 	if ((port_conf && !port_conf->new_event_threshold) ||
721 			(port_conf && port_conf->new_event_threshold >
722 				 dev->data->dev_conf.nb_events_limit)) {
723 		RTE_EDEV_LOG_ERR(
724 		   "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
725 			dev_id, port_id, port_conf->new_event_threshold,
726 			dev->data->dev_conf.nb_events_limit);
727 		return -EINVAL;
728 	}
729 
730 	/* Check dequeue_depth limit */
731 	if ((port_conf && !port_conf->dequeue_depth) ||
732 			(port_conf && port_conf->dequeue_depth >
733 		dev->data->dev_conf.nb_event_port_dequeue_depth)) {
734 		RTE_EDEV_LOG_ERR(
735 		   "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
736 			dev_id, port_id, port_conf->dequeue_depth,
737 			dev->data->dev_conf.nb_event_port_dequeue_depth);
738 		return -EINVAL;
739 	}
740 
741 	/* Check enqueue_depth limit */
742 	if ((port_conf && !port_conf->enqueue_depth) ||
743 			(port_conf && port_conf->enqueue_depth >
744 		dev->data->dev_conf.nb_event_port_enqueue_depth)) {
745 		RTE_EDEV_LOG_ERR(
746 		   "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
747 			dev_id, port_id, port_conf->enqueue_depth,
748 			dev->data->dev_conf.nb_event_port_enqueue_depth);
749 		return -EINVAL;
750 	}
751 
752 	if (port_conf &&
753 	    (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL) &&
754 	    !(dev->data->event_dev_cap &
755 	      RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
756 		RTE_EDEV_LOG_ERR(
757 		   "dev%d port%d Implicit release disable not supported",
758 			dev_id, port_id);
759 		return -EINVAL;
760 	}
761 
762 	if (dev->data->dev_started) {
763 		RTE_EDEV_LOG_ERR(
764 		    "device %d must be stopped to allow port setup", dev_id);
765 		return -EBUSY;
766 	}
767 
768 	if (*dev->dev_ops->port_setup == NULL)
769 		return -ENOTSUP;
770 
771 	if (port_conf == NULL) {
772 		if (*dev->dev_ops->port_def_conf == NULL)
773 			return -ENOTSUP;
774 		(*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
775 		port_conf = &def_conf;
776 	}
777 
778 	dev->data->ports_cfg[port_id] = *port_conf;
779 
780 	diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
781 
782 	/* Unlink all the queues from this port(default state after setup) */
783 	if (!diag)
784 		diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
785 
786 	rte_eventdev_trace_port_setup(dev_id, port_id, port_conf, diag);
787 	if (diag < 0)
788 		return diag;
789 
790 	return 0;
791 }
792 
793 void
794 rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
795 		       rte_eventdev_port_flush_t release_cb, void *args)
796 {
797 	struct rte_eventdev *dev;
798 
799 	RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
800 	dev = &rte_eventdevs[dev_id];
801 
802 	rte_eventdev_trace_port_quiesce(dev_id, dev, port_id, args);
803 
804 	if (!is_valid_port(dev, port_id)) {
805 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
806 		return;
807 	}
808 
809 	if (dev->dev_ops->port_quiesce)
810 		(*dev->dev_ops->port_quiesce)(dev, dev->data->ports[port_id],
811 					      release_cb, args);
812 }
813 
814 int
815 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
816 		       uint32_t *attr_value)
817 {
818 	struct rte_eventdev *dev;
819 
820 	if (!attr_value)
821 		return -EINVAL;
822 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
823 	dev = &rte_eventdevs[dev_id];
824 
825 	switch (attr_id) {
826 	case RTE_EVENT_DEV_ATTR_PORT_COUNT:
827 		*attr_value = dev->data->nb_ports;
828 		break;
829 	case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
830 		*attr_value = dev->data->nb_queues;
831 		break;
832 	case RTE_EVENT_DEV_ATTR_STARTED:
833 		*attr_value = dev->data->dev_started;
834 		break;
835 	default:
836 		return -EINVAL;
837 	}
838 
839 	rte_eventdev_trace_attr_get(dev_id, dev, attr_id, *attr_value);
840 
841 	return 0;
842 }
843 
844 int
845 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
846 			uint32_t *attr_value)
847 {
848 	struct rte_eventdev *dev;
849 
850 	if (!attr_value)
851 		return -EINVAL;
852 
853 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
854 	dev = &rte_eventdevs[dev_id];
855 	if (!is_valid_port(dev, port_id)) {
856 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
857 		return -EINVAL;
858 	}
859 
860 	switch (attr_id) {
861 	case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
862 		*attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
863 		break;
864 	case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
865 		*attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
866 		break;
867 	case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
868 		*attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
869 		break;
870 	case RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE:
871 	{
872 		uint32_t config;
873 
874 		config = dev->data->ports_cfg[port_id].event_port_cfg;
875 		*attr_value = !!(config & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
876 		break;
877 	}
878 	default:
879 		return -EINVAL;
880 	};
881 
882 	rte_eventdev_trace_port_attr_get(dev_id, dev, port_id, attr_id, *attr_value);
883 
884 	return 0;
885 }
886 
887 int
888 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
889 			uint32_t *attr_value)
890 {
891 	struct rte_event_queue_conf *conf;
892 	struct rte_eventdev *dev;
893 
894 	if (!attr_value)
895 		return -EINVAL;
896 
897 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
898 	dev = &rte_eventdevs[dev_id];
899 	if (!is_valid_queue(dev, queue_id)) {
900 		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
901 		return -EINVAL;
902 	}
903 
904 	conf = &dev->data->queues_cfg[queue_id];
905 
906 	switch (attr_id) {
907 	case RTE_EVENT_QUEUE_ATTR_PRIORITY:
908 		*attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
909 		if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
910 			*attr_value = conf->priority;
911 		break;
912 	case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
913 		*attr_value = conf->nb_atomic_flows;
914 		break;
915 	case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
916 		*attr_value = conf->nb_atomic_order_sequences;
917 		break;
918 	case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
919 		*attr_value = conf->event_queue_cfg;
920 		break;
921 	case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
922 		if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
923 			return -EOVERFLOW;
924 
925 		*attr_value = conf->schedule_type;
926 		break;
927 	case RTE_EVENT_QUEUE_ATTR_WEIGHT:
928 		*attr_value = RTE_EVENT_QUEUE_WEIGHT_LOWEST;
929 		if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
930 			*attr_value = conf->weight;
931 		break;
932 	case RTE_EVENT_QUEUE_ATTR_AFFINITY:
933 		*attr_value = RTE_EVENT_QUEUE_AFFINITY_LOWEST;
934 		if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
935 			*attr_value = conf->affinity;
936 		break;
937 	default:
938 		return -EINVAL;
939 	};
940 
941 	rte_eventdev_trace_queue_attr_get(dev_id, dev, queue_id, attr_id, *attr_value);
942 
943 	return 0;
944 }
945 
946 int
947 rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
948 			 uint64_t attr_value)
949 {
950 	struct rte_eventdev *dev;
951 
952 	rte_eventdev_trace_queue_attr_set(dev_id, queue_id, attr_id, attr_value);
953 
954 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
955 	dev = &rte_eventdevs[dev_id];
956 	if (!is_valid_queue(dev, queue_id)) {
957 		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
958 		return -EINVAL;
959 	}
960 
961 	if (!(dev->data->event_dev_cap &
962 	      RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR)) {
963 		RTE_EDEV_LOG_ERR(
964 			"Device %" PRIu8 "does not support changing queue attributes at runtime",
965 			dev_id);
966 		return -ENOTSUP;
967 	}
968 
969 	if (*dev->dev_ops->queue_attr_set == NULL)
970 		return -ENOTSUP;
971 	return (*dev->dev_ops->queue_attr_set)(dev, queue_id, attr_id,
972 					       attr_value);
973 }
974 
975 int
976 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
977 		    const uint8_t queues[], const uint8_t priorities[],
978 		    uint16_t nb_links)
979 {
980 	return rte_event_port_profile_links_set(dev_id, port_id, queues, priorities, nb_links, 0);
981 }
982 
983 int
984 rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[],
985 				 const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id)
986 {
987 	uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
988 	uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
989 	struct rte_event_dev_info info;
990 	struct rte_eventdev *dev;
991 	uint16_t *links_map;
992 	int i, diag;
993 
994 	RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
995 	dev = &rte_eventdevs[dev_id];
996 
997 	if (*dev->dev_ops->dev_infos_get == NULL)
998 		return -ENOTSUP;
999 
1000 	(*dev->dev_ops->dev_infos_get)(dev, &info);
1001 	if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT ||
1002 	    profile_id >= info.max_profiles_per_port) {
1003 		RTE_EDEV_LOG_ERR("Invalid profile_id=%" PRIu8, profile_id);
1004 		return -EINVAL;
1005 	}
1006 
1007 	if (*dev->dev_ops->port_link == NULL) {
1008 		RTE_EDEV_LOG_ERR("Function not supported\n");
1009 		rte_errno = ENOTSUP;
1010 		return 0;
1011 	}
1012 
1013 	if (profile_id && *dev->dev_ops->port_link_profile == NULL) {
1014 		RTE_EDEV_LOG_ERR("Function not supported\n");
1015 		rte_errno = ENOTSUP;
1016 		return 0;
1017 	}
1018 
1019 	if (!is_valid_port(dev, port_id)) {
1020 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1021 		rte_errno = EINVAL;
1022 		return 0;
1023 	}
1024 
1025 	if (queues == NULL) {
1026 		for (i = 0; i < dev->data->nb_queues; i++)
1027 			queues_list[i] = i;
1028 
1029 		queues = queues_list;
1030 		nb_links = dev->data->nb_queues;
1031 	}
1032 
1033 	if (priorities == NULL) {
1034 		for (i = 0; i < nb_links; i++)
1035 			priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
1036 
1037 		priorities = priorities_list;
1038 	}
1039 
1040 	for (i = 0; i < nb_links; i++)
1041 		if (queues[i] >= dev->data->nb_queues) {
1042 			rte_errno = EINVAL;
1043 			return 0;
1044 		}
1045 
1046 	if (profile_id)
1047 		diag = (*dev->dev_ops->port_link_profile)(dev, dev->data->ports[port_id], queues,
1048 							  priorities, nb_links, profile_id);
1049 	else
1050 		diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id], queues,
1051 						  priorities, nb_links);
1052 	if (diag < 0)
1053 		return diag;
1054 
1055 	links_map = dev->data->links_map[profile_id];
1056 	/* Point links_map to this port specific area */
1057 	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1058 	for (i = 0; i < diag; i++)
1059 		links_map[queues[i]] = (uint8_t)priorities[i];
1060 
1061 	rte_eventdev_trace_port_profile_links_set(dev_id, port_id, nb_links, profile_id, diag);
1062 	return diag;
1063 }
1064 
1065 int
1066 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
1067 		      uint8_t queues[], uint16_t nb_unlinks)
1068 {
1069 	return rte_event_port_profile_unlink(dev_id, port_id, queues, nb_unlinks, 0);
1070 }
1071 
1072 int
1073 rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
1074 			      uint16_t nb_unlinks, uint8_t profile_id)
1075 {
1076 	uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1077 	struct rte_event_dev_info info;
1078 	struct rte_eventdev *dev;
1079 	uint16_t *links_map;
1080 	int i, diag, j;
1081 
1082 	RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
1083 	dev = &rte_eventdevs[dev_id];
1084 
1085 	if (*dev->dev_ops->dev_infos_get == NULL)
1086 		return -ENOTSUP;
1087 
1088 	(*dev->dev_ops->dev_infos_get)(dev, &info);
1089 	if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT ||
1090 	    profile_id >= info.max_profiles_per_port) {
1091 		RTE_EDEV_LOG_ERR("Invalid profile_id=%" PRIu8, profile_id);
1092 		return -EINVAL;
1093 	}
1094 
1095 	if (*dev->dev_ops->port_unlink == NULL) {
1096 		RTE_EDEV_LOG_ERR("Function not supported");
1097 		rte_errno = ENOTSUP;
1098 		return 0;
1099 	}
1100 
1101 	if (profile_id && *dev->dev_ops->port_unlink_profile == NULL) {
1102 		RTE_EDEV_LOG_ERR("Function not supported");
1103 		rte_errno = ENOTSUP;
1104 		return 0;
1105 	}
1106 
1107 	if (!is_valid_port(dev, port_id)) {
1108 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1109 		rte_errno = EINVAL;
1110 		return 0;
1111 	}
1112 
1113 	links_map = dev->data->links_map[profile_id];
1114 	/* Point links_map to this port specific area */
1115 	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1116 
1117 	if (queues == NULL) {
1118 		j = 0;
1119 		for (i = 0; i < dev->data->nb_queues; i++) {
1120 			if (links_map[i] !=
1121 					EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1122 				all_queues[j] = i;
1123 				j++;
1124 			}
1125 		}
1126 		queues = all_queues;
1127 	} else {
1128 		for (j = 0; j < nb_unlinks; j++) {
1129 			if (links_map[queues[j]] ==
1130 					EVENT_QUEUE_SERVICE_PRIORITY_INVALID)
1131 				break;
1132 		}
1133 	}
1134 
1135 	nb_unlinks = j;
1136 	for (i = 0; i < nb_unlinks; i++)
1137 		if (queues[i] >= dev->data->nb_queues) {
1138 			rte_errno = EINVAL;
1139 			return 0;
1140 		}
1141 
1142 	if (profile_id)
1143 		diag = (*dev->dev_ops->port_unlink_profile)(dev, dev->data->ports[port_id], queues,
1144 							    nb_unlinks, profile_id);
1145 	else
1146 		diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id], queues,
1147 						    nb_unlinks);
1148 	if (diag < 0)
1149 		return diag;
1150 
1151 	for (i = 0; i < diag; i++)
1152 		links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
1153 
1154 	rte_eventdev_trace_port_profile_unlink(dev_id, port_id, nb_unlinks, profile_id, diag);
1155 	return diag;
1156 }
1157 
1158 int
1159 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
1160 {
1161 	struct rte_eventdev *dev;
1162 
1163 	rte_eventdev_trace_port_unlinks_in_progress(dev_id, port_id);
1164 
1165 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1166 	dev = &rte_eventdevs[dev_id];
1167 	if (!is_valid_port(dev, port_id)) {
1168 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1169 		return -EINVAL;
1170 	}
1171 
1172 	/* Return 0 if the PMD does not implement unlinks in progress.
1173 	 * This allows PMDs which handle unlink synchronously to not implement
1174 	 * this function at all.
1175 	 */
1176 	if (*dev->dev_ops->port_unlinks_in_progress == NULL)
1177 		return 0;
1178 
1179 	return (*dev->dev_ops->port_unlinks_in_progress)(dev,
1180 			dev->data->ports[port_id]);
1181 }
1182 
1183 int
1184 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1185 			 uint8_t queues[], uint8_t priorities[])
1186 {
1187 	struct rte_eventdev *dev;
1188 	uint16_t *links_map;
1189 	int i, count = 0;
1190 
1191 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1192 	dev = &rte_eventdevs[dev_id];
1193 	if (!is_valid_port(dev, port_id)) {
1194 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1195 		return -EINVAL;
1196 	}
1197 
1198 	/* Use the default profile_id. */
1199 	links_map = dev->data->links_map[0];
1200 	/* Point links_map to this port specific area */
1201 	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1202 	for (i = 0; i < dev->data->nb_queues; i++) {
1203 		if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1204 			queues[count] = i;
1205 			priorities[count] = (uint8_t)links_map[i];
1206 			++count;
1207 		}
1208 	}
1209 
1210 	rte_eventdev_trace_port_links_get(dev_id, port_id, count);
1211 
1212 	return count;
1213 }
1214 
1215 int
1216 rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
1217 				 uint8_t priorities[], uint8_t profile_id)
1218 {
1219 	struct rte_event_dev_info info;
1220 	struct rte_eventdev *dev;
1221 	uint16_t *links_map;
1222 	int i, count = 0;
1223 
1224 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1225 
1226 	dev = &rte_eventdevs[dev_id];
1227 	if (*dev->dev_ops->dev_infos_get == NULL)
1228 		return -ENOTSUP;
1229 
1230 	(*dev->dev_ops->dev_infos_get)(dev, &info);
1231 	if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT ||
1232 	    profile_id >= info.max_profiles_per_port) {
1233 		RTE_EDEV_LOG_ERR("Invalid profile_id=%" PRIu8, profile_id);
1234 		return -EINVAL;
1235 	}
1236 
1237 	if (!is_valid_port(dev, port_id)) {
1238 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1239 		return -EINVAL;
1240 	}
1241 
1242 	links_map = dev->data->links_map[profile_id];
1243 	/* Point links_map to this port specific area */
1244 	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1245 	for (i = 0; i < dev->data->nb_queues; i++) {
1246 		if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1247 			queues[count] = i;
1248 			priorities[count] = (uint8_t)links_map[i];
1249 			++count;
1250 		}
1251 	}
1252 
1253 	rte_eventdev_trace_port_profile_links_get(dev_id, port_id, profile_id, count);
1254 
1255 	return count;
1256 }
1257 
1258 int
1259 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1260 				 uint64_t *timeout_ticks)
1261 {
1262 	struct rte_eventdev *dev;
1263 
1264 	rte_eventdev_trace_dequeue_timeout_ticks(dev_id, ns, timeout_ticks);
1265 
1266 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1267 	dev = &rte_eventdevs[dev_id];
1268 	if (*dev->dev_ops->timeout_ticks == NULL)
1269 		return -ENOTSUP;
1270 
1271 	if (timeout_ticks == NULL)
1272 		return -EINVAL;
1273 
1274 	return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
1275 }
1276 
1277 int
1278 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
1279 {
1280 	struct rte_eventdev *dev;
1281 
1282 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1283 	dev = &rte_eventdevs[dev_id];
1284 
1285 	if (service_id == NULL)
1286 		return -EINVAL;
1287 
1288 	if (dev->data->service_inited)
1289 		*service_id = dev->data->service_id;
1290 
1291 	rte_eventdev_trace_service_id_get(dev_id, *service_id);
1292 
1293 	return dev->data->service_inited ? 0 : -ESRCH;
1294 }
1295 
1296 int
1297 rte_event_dev_dump(uint8_t dev_id, FILE *f)
1298 {
1299 	struct rte_eventdev *dev;
1300 
1301 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1302 	dev = &rte_eventdevs[dev_id];
1303 	if (*dev->dev_ops->dump == NULL)
1304 		return -ENOTSUP;
1305 	if (f == NULL)
1306 		return -EINVAL;
1307 
1308 	(*dev->dev_ops->dump)(dev, f);
1309 	return 0;
1310 
1311 }
1312 
1313 static int
1314 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1315 		uint8_t queue_port_id)
1316 {
1317 	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1318 	if (dev->dev_ops->xstats_get_names != NULL)
1319 		return (*dev->dev_ops->xstats_get_names)(dev, mode,
1320 							queue_port_id,
1321 							NULL, NULL, 0);
1322 	return 0;
1323 }
1324 
1325 int
1326 rte_event_dev_xstats_names_get(uint8_t dev_id,
1327 		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
1328 		struct rte_event_dev_xstats_name *xstats_names,
1329 		uint64_t *ids, unsigned int size)
1330 {
1331 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1332 	const int cnt_expected_entries = xstats_get_count(dev_id, mode,
1333 							  queue_port_id);
1334 	if (xstats_names == NULL || cnt_expected_entries < 0 ||
1335 			(int)size < cnt_expected_entries)
1336 		return cnt_expected_entries;
1337 
1338 	/* dev_id checked above */
1339 	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1340 
1341 	if (dev->dev_ops->xstats_get_names != NULL)
1342 		return (*dev->dev_ops->xstats_get_names)(dev, mode,
1343 				queue_port_id, xstats_names, ids, size);
1344 
1345 	return -ENOTSUP;
1346 }
1347 
1348 /* retrieve eventdev extended statistics */
1349 int
1350 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1351 		uint8_t queue_port_id, const uint64_t ids[],
1352 		uint64_t values[], unsigned int n)
1353 {
1354 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1355 	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1356 
1357 	/* implemented by the driver */
1358 	if (dev->dev_ops->xstats_get != NULL)
1359 		return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1360 				ids, values, n);
1361 	return -ENOTSUP;
1362 }
1363 
1364 uint64_t
1365 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1366 		uint64_t *id)
1367 {
1368 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1369 	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1370 	uint64_t temp = -1;
1371 
1372 	if (id != NULL)
1373 		*id = (unsigned int)-1;
1374 	else
1375 		id = &temp; /* ensure driver never gets a NULL value */
1376 
1377 	/* implemented by driver */
1378 	if (dev->dev_ops->xstats_get_by_name != NULL)
1379 		return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1380 	return -ENOTSUP;
1381 }
1382 
1383 int rte_event_dev_xstats_reset(uint8_t dev_id,
1384 		enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1385 		const uint64_t ids[], uint32_t nb_ids)
1386 {
1387 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1388 	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1389 
1390 	if (dev->dev_ops->xstats_reset != NULL)
1391 		return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1392 							ids, nb_ids);
1393 	return -ENOTSUP;
1394 }
1395 
1396 int rte_event_pmd_selftest_seqn_dynfield_offset = -1;
1397 
1398 int rte_event_dev_selftest(uint8_t dev_id)
1399 {
1400 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1401 	static const struct rte_mbuf_dynfield test_seqn_dynfield_desc = {
1402 		.name = "rte_event_pmd_selftest_seqn_dynfield",
1403 		.size = sizeof(rte_event_pmd_selftest_seqn_t),
1404 		.align = __alignof__(rte_event_pmd_selftest_seqn_t),
1405 	};
1406 	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1407 
1408 	if (dev->dev_ops->dev_selftest != NULL) {
1409 		rte_event_pmd_selftest_seqn_dynfield_offset =
1410 			rte_mbuf_dynfield_register(&test_seqn_dynfield_desc);
1411 		if (rte_event_pmd_selftest_seqn_dynfield_offset < 0)
1412 			return -ENOMEM;
1413 		return (*dev->dev_ops->dev_selftest)();
1414 	}
1415 	return -ENOTSUP;
1416 }
1417 
1418 struct rte_mempool *
1419 rte_event_vector_pool_create(const char *name, unsigned int n,
1420 			     unsigned int cache_size, uint16_t nb_elem,
1421 			     int socket_id)
1422 {
1423 	const char *mp_ops_name;
1424 	struct rte_mempool *mp;
1425 	unsigned int elt_sz;
1426 	int ret;
1427 
1428 	if (!nb_elem) {
1429 		RTE_LOG(ERR, EVENTDEV,
1430 			"Invalid number of elements=%d requested\n", nb_elem);
1431 		rte_errno = EINVAL;
1432 		return NULL;
1433 	}
1434 
1435 	elt_sz =
1436 		sizeof(struct rte_event_vector) + (nb_elem * sizeof(uintptr_t));
1437 	mp = rte_mempool_create_empty(name, n, elt_sz, cache_size, 0, socket_id,
1438 				      0);
1439 	if (mp == NULL)
1440 		return NULL;
1441 
1442 	mp_ops_name = rte_mbuf_best_mempool_ops();
1443 	ret = rte_mempool_set_ops_byname(mp, mp_ops_name, NULL);
1444 	if (ret != 0) {
1445 		RTE_LOG(ERR, EVENTDEV, "error setting mempool handler\n");
1446 		goto err;
1447 	}
1448 
1449 	ret = rte_mempool_populate_default(mp);
1450 	if (ret < 0)
1451 		goto err;
1452 
1453 	rte_eventdev_trace_vector_pool_create(mp, mp->name, mp->socket_id,
1454 		mp->size, mp->cache_size, mp->elt_size);
1455 
1456 	return mp;
1457 err:
1458 	rte_mempool_free(mp);
1459 	rte_errno = -ret;
1460 	return NULL;
1461 }
1462 
1463 int
1464 rte_event_dev_start(uint8_t dev_id)
1465 {
1466 	struct rte_eventdev *dev;
1467 	int diag;
1468 
1469 	RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1470 
1471 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1472 	dev = &rte_eventdevs[dev_id];
1473 	if (*dev->dev_ops->dev_start == NULL)
1474 		return -ENOTSUP;
1475 
1476 	if (dev->data->dev_started != 0) {
1477 		RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1478 			dev_id);
1479 		return 0;
1480 	}
1481 
1482 	diag = (*dev->dev_ops->dev_start)(dev);
1483 	rte_eventdev_trace_start(dev_id, diag);
1484 	if (diag == 0)
1485 		dev->data->dev_started = 1;
1486 	else
1487 		return diag;
1488 
1489 	event_dev_fp_ops_set(rte_event_fp_ops + dev_id, dev);
1490 
1491 	return 0;
1492 }
1493 
1494 int
1495 rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1496 					   rte_eventdev_stop_flush_t callback,
1497 					   void *userdata)
1498 {
1499 	struct rte_eventdev *dev;
1500 
1501 	RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id);
1502 
1503 	rte_eventdev_trace_stop_flush_callback_register(dev_id, callback, userdata);
1504 
1505 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1506 	dev = &rte_eventdevs[dev_id];
1507 
1508 	dev->dev_ops->dev_stop_flush = callback;
1509 	dev->data->dev_stop_flush_arg = userdata;
1510 
1511 	return 0;
1512 }
1513 
1514 void
1515 rte_event_dev_stop(uint8_t dev_id)
1516 {
1517 	struct rte_eventdev *dev;
1518 
1519 	RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1520 
1521 	RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1522 	dev = &rte_eventdevs[dev_id];
1523 	if (*dev->dev_ops->dev_stop == NULL)
1524 		return;
1525 
1526 	if (dev->data->dev_started == 0) {
1527 		RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1528 			dev_id);
1529 		return;
1530 	}
1531 
1532 	dev->data->dev_started = 0;
1533 	(*dev->dev_ops->dev_stop)(dev);
1534 	rte_eventdev_trace_stop(dev_id);
1535 	event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
1536 }
1537 
1538 int
1539 rte_event_dev_close(uint8_t dev_id)
1540 {
1541 	struct rte_eventdev *dev;
1542 
1543 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1544 	dev = &rte_eventdevs[dev_id];
1545 	if (*dev->dev_ops->dev_close == NULL)
1546 		return -ENOTSUP;
1547 
1548 	/* Device must be stopped before it can be closed */
1549 	if (dev->data->dev_started == 1) {
1550 		RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1551 				dev_id);
1552 		return -EBUSY;
1553 	}
1554 
1555 	event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
1556 	rte_eventdev_trace_close(dev_id);
1557 	return (*dev->dev_ops->dev_close)(dev);
1558 }
1559 
1560 static inline int
1561 eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1562 		    int socket_id)
1563 {
1564 	char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1565 	const struct rte_memzone *mz;
1566 	int i, n;
1567 
1568 	/* Generate memzone name */
1569 	n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1570 	if (n >= (int)sizeof(mz_name))
1571 		return -EINVAL;
1572 
1573 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1574 		mz = rte_memzone_reserve(mz_name,
1575 				sizeof(struct rte_eventdev_data),
1576 				socket_id, 0);
1577 	} else
1578 		mz = rte_memzone_lookup(mz_name);
1579 
1580 	if (mz == NULL)
1581 		return -ENOMEM;
1582 
1583 	*data = mz->addr;
1584 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1585 		memset(*data, 0, sizeof(struct rte_eventdev_data));
1586 		for (i = 0; i < RTE_EVENT_MAX_PROFILES_PER_PORT; i++)
1587 			for (n = 0; n < RTE_EVENT_MAX_PORTS_PER_DEV * RTE_EVENT_MAX_QUEUES_PER_DEV;
1588 			     n++)
1589 				(*data)->links_map[i][n] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
1590 	}
1591 
1592 	return 0;
1593 }
1594 
1595 static inline uint8_t
1596 eventdev_find_free_device_index(void)
1597 {
1598 	uint8_t dev_id;
1599 
1600 	for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1601 		if (rte_eventdevs[dev_id].attached ==
1602 				RTE_EVENTDEV_DETACHED)
1603 			return dev_id;
1604 	}
1605 	return RTE_EVENT_MAX_DEVS;
1606 }
1607 
1608 struct rte_eventdev *
1609 rte_event_pmd_allocate(const char *name, int socket_id)
1610 {
1611 	struct rte_eventdev *eventdev;
1612 	uint8_t dev_id;
1613 
1614 	if (rte_event_pmd_get_named_dev(name) != NULL) {
1615 		RTE_EDEV_LOG_ERR("Event device with name %s already "
1616 				"allocated!", name);
1617 		return NULL;
1618 	}
1619 
1620 	dev_id = eventdev_find_free_device_index();
1621 	if (dev_id == RTE_EVENT_MAX_DEVS) {
1622 		RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1623 		return NULL;
1624 	}
1625 
1626 	eventdev = &rte_eventdevs[dev_id];
1627 
1628 	if (eventdev->data == NULL) {
1629 		struct rte_eventdev_data *eventdev_data = NULL;
1630 
1631 		int retval =
1632 			eventdev_data_alloc(dev_id, &eventdev_data, socket_id);
1633 
1634 		if (retval < 0 || eventdev_data == NULL)
1635 			return NULL;
1636 
1637 		eventdev->data = eventdev_data;
1638 
1639 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1640 
1641 			strlcpy(eventdev->data->name, name,
1642 				RTE_EVENTDEV_NAME_MAX_LEN);
1643 
1644 			eventdev->data->dev_id = dev_id;
1645 			eventdev->data->socket_id = socket_id;
1646 			eventdev->data->dev_started = 0;
1647 		}
1648 
1649 		eventdev->attached = RTE_EVENTDEV_ATTACHED;
1650 		eventdev_globals.nb_devs++;
1651 	}
1652 
1653 	return eventdev;
1654 }
1655 
1656 int
1657 rte_event_pmd_release(struct rte_eventdev *eventdev)
1658 {
1659 	int ret;
1660 	char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1661 	const struct rte_memzone *mz;
1662 
1663 	if (eventdev == NULL)
1664 		return -EINVAL;
1665 
1666 	event_dev_fp_ops_reset(rte_event_fp_ops + eventdev->data->dev_id);
1667 	eventdev->attached = RTE_EVENTDEV_DETACHED;
1668 	eventdev_globals.nb_devs--;
1669 
1670 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1671 		rte_free(eventdev->data->dev_private);
1672 
1673 		/* Generate memzone name */
1674 		ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1675 				eventdev->data->dev_id);
1676 		if (ret >= (int)sizeof(mz_name))
1677 			return -EINVAL;
1678 
1679 		mz = rte_memzone_lookup(mz_name);
1680 		if (mz == NULL)
1681 			return -ENOMEM;
1682 
1683 		ret = rte_memzone_free(mz);
1684 		if (ret)
1685 			return ret;
1686 	}
1687 
1688 	eventdev->data = NULL;
1689 	return 0;
1690 }
1691 
1692 void
1693 event_dev_probing_finish(struct rte_eventdev *eventdev)
1694 {
1695 	if (eventdev == NULL)
1696 		return;
1697 
1698 	event_dev_fp_ops_set(rte_event_fp_ops + eventdev->data->dev_id,
1699 			     eventdev);
1700 }
1701 
1702 static int
1703 handle_dev_list(const char *cmd __rte_unused,
1704 		const char *params __rte_unused,
1705 		struct rte_tel_data *d)
1706 {
1707 	uint8_t dev_id;
1708 	int ndev = rte_event_dev_count();
1709 
1710 	if (ndev < 1)
1711 		return -1;
1712 
1713 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1714 	for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1715 		if (rte_eventdevs[dev_id].attached ==
1716 				RTE_EVENTDEV_ATTACHED)
1717 			rte_tel_data_add_array_int(d, dev_id);
1718 	}
1719 
1720 	return 0;
1721 }
1722 
1723 static int
1724 handle_port_list(const char *cmd __rte_unused,
1725 		 const char *params,
1726 		 struct rte_tel_data *d)
1727 {
1728 	int i;
1729 	uint8_t dev_id;
1730 	struct rte_eventdev *dev;
1731 	char *end_param;
1732 
1733 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1734 		return -1;
1735 
1736 	dev_id = strtoul(params, &end_param, 10);
1737 	if (*end_param != '\0')
1738 		RTE_EDEV_LOG_DEBUG(
1739 			"Extra parameters passed to eventdev telemetry command, ignoring");
1740 
1741 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1742 	dev = &rte_eventdevs[dev_id];
1743 
1744 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1745 	for (i = 0; i < dev->data->nb_ports; i++)
1746 		rte_tel_data_add_array_int(d, i);
1747 
1748 	return 0;
1749 }
1750 
1751 static int
1752 handle_queue_list(const char *cmd __rte_unused,
1753 		  const char *params,
1754 		  struct rte_tel_data *d)
1755 {
1756 	int i;
1757 	uint8_t dev_id;
1758 	struct rte_eventdev *dev;
1759 	char *end_param;
1760 
1761 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1762 		return -1;
1763 
1764 	dev_id = strtoul(params, &end_param, 10);
1765 	if (*end_param != '\0')
1766 		RTE_EDEV_LOG_DEBUG(
1767 			"Extra parameters passed to eventdev telemetry command, ignoring");
1768 
1769 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1770 	dev = &rte_eventdevs[dev_id];
1771 
1772 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1773 	for (i = 0; i < dev->data->nb_queues; i++)
1774 		rte_tel_data_add_array_int(d, i);
1775 
1776 	return 0;
1777 }
1778 
1779 static int
1780 handle_queue_links(const char *cmd __rte_unused,
1781 		   const char *params,
1782 		   struct rte_tel_data *d)
1783 {
1784 	int i, ret, port_id = 0;
1785 	char *end_param;
1786 	uint8_t dev_id;
1787 	uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1788 	uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
1789 	const char *p_param;
1790 
1791 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1792 		return -1;
1793 
1794 	/* Get dev ID from parameter string */
1795 	dev_id = strtoul(params, &end_param, 10);
1796 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1797 
1798 	p_param = strtok(end_param, ",");
1799 	if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1800 		return -1;
1801 
1802 	port_id = strtoul(p_param, &end_param, 10);
1803 	p_param = strtok(NULL, "\0");
1804 	if (p_param != NULL)
1805 		RTE_EDEV_LOG_DEBUG(
1806 			"Extra parameters passed to eventdev telemetry command, ignoring");
1807 
1808 	ret = rte_event_port_links_get(dev_id, port_id, queues, priorities);
1809 	if (ret < 0)
1810 		return -1;
1811 
1812 	rte_tel_data_start_dict(d);
1813 	for (i = 0; i < ret; i++) {
1814 		char qid_name[32];
1815 
1816 		snprintf(qid_name, 31, "qid_%u", queues[i]);
1817 		rte_tel_data_add_dict_uint(d, qid_name, priorities[i]);
1818 	}
1819 
1820 	return 0;
1821 }
1822 
1823 static int
1824 eventdev_build_telemetry_data(int dev_id,
1825 			      enum rte_event_dev_xstats_mode mode,
1826 			      int port_queue_id,
1827 			      struct rte_tel_data *d)
1828 {
1829 	struct rte_event_dev_xstats_name *xstat_names;
1830 	uint64_t *ids;
1831 	uint64_t *values;
1832 	int i, ret, num_xstats;
1833 
1834 	num_xstats = rte_event_dev_xstats_names_get(dev_id,
1835 						    mode,
1836 						    port_queue_id,
1837 						    NULL,
1838 						    NULL,
1839 						    0);
1840 
1841 	if (num_xstats < 0)
1842 		return -1;
1843 
1844 	/* use one malloc for names */
1845 	xstat_names = malloc((sizeof(struct rte_event_dev_xstats_name))
1846 			     * num_xstats);
1847 	if (xstat_names == NULL)
1848 		return -1;
1849 
1850 	ids = malloc((sizeof(uint64_t)) * num_xstats);
1851 	if (ids == NULL) {
1852 		free(xstat_names);
1853 		return -1;
1854 	}
1855 
1856 	values = malloc((sizeof(uint64_t)) * num_xstats);
1857 	if (values == NULL) {
1858 		free(xstat_names);
1859 		free(ids);
1860 		return -1;
1861 	}
1862 
1863 	ret = rte_event_dev_xstats_names_get(dev_id, mode, port_queue_id,
1864 					     xstat_names, ids, num_xstats);
1865 	if (ret < 0 || ret > num_xstats) {
1866 		free(xstat_names);
1867 		free(ids);
1868 		free(values);
1869 		return -1;
1870 	}
1871 
1872 	ret = rte_event_dev_xstats_get(dev_id, mode, port_queue_id,
1873 				       ids, values, num_xstats);
1874 	if (ret < 0 || ret > num_xstats) {
1875 		free(xstat_names);
1876 		free(ids);
1877 		free(values);
1878 		return -1;
1879 	}
1880 
1881 	rte_tel_data_start_dict(d);
1882 	for (i = 0; i < num_xstats; i++)
1883 		rte_tel_data_add_dict_uint(d, xstat_names[i].name, values[i]);
1884 
1885 	free(xstat_names);
1886 	free(ids);
1887 	free(values);
1888 	return 0;
1889 }
1890 
1891 static int
1892 handle_dev_xstats(const char *cmd __rte_unused,
1893 		  const char *params,
1894 		  struct rte_tel_data *d)
1895 {
1896 	int dev_id;
1897 	enum rte_event_dev_xstats_mode mode;
1898 	char *end_param;
1899 
1900 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1901 		return -1;
1902 
1903 	/* Get dev ID from parameter string */
1904 	dev_id = strtoul(params, &end_param, 10);
1905 	if (*end_param != '\0')
1906 		RTE_EDEV_LOG_DEBUG(
1907 			"Extra parameters passed to eventdev telemetry command, ignoring");
1908 
1909 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1910 
1911 	mode = RTE_EVENT_DEV_XSTATS_DEVICE;
1912 	return eventdev_build_telemetry_data(dev_id, mode, 0, d);
1913 }
1914 
1915 static int
1916 handle_port_xstats(const char *cmd __rte_unused,
1917 		   const char *params,
1918 		   struct rte_tel_data *d)
1919 {
1920 	int dev_id;
1921 	int port_queue_id = 0;
1922 	enum rte_event_dev_xstats_mode mode;
1923 	char *end_param;
1924 	const char *p_param;
1925 
1926 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1927 		return -1;
1928 
1929 	/* Get dev ID from parameter string */
1930 	dev_id = strtoul(params, &end_param, 10);
1931 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1932 
1933 	p_param = strtok(end_param, ",");
1934 	mode = RTE_EVENT_DEV_XSTATS_PORT;
1935 
1936 	if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1937 		return -1;
1938 
1939 	port_queue_id = strtoul(p_param, &end_param, 10);
1940 
1941 	p_param = strtok(NULL, "\0");
1942 	if (p_param != NULL)
1943 		RTE_EDEV_LOG_DEBUG(
1944 			"Extra parameters passed to eventdev telemetry command, ignoring");
1945 
1946 	return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1947 }
1948 
1949 static int
1950 handle_queue_xstats(const char *cmd __rte_unused,
1951 		    const char *params,
1952 		    struct rte_tel_data *d)
1953 {
1954 	int dev_id;
1955 	int port_queue_id = 0;
1956 	enum rte_event_dev_xstats_mode mode;
1957 	char *end_param;
1958 	const char *p_param;
1959 
1960 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1961 		return -1;
1962 
1963 	/* Get dev ID from parameter string */
1964 	dev_id = strtoul(params, &end_param, 10);
1965 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1966 
1967 	p_param = strtok(end_param, ",");
1968 	mode = RTE_EVENT_DEV_XSTATS_QUEUE;
1969 
1970 	if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1971 		return -1;
1972 
1973 	port_queue_id = strtoul(p_param, &end_param, 10);
1974 
1975 	p_param = strtok(NULL, "\0");
1976 	if (p_param != NULL)
1977 		RTE_EDEV_LOG_DEBUG(
1978 			"Extra parameters passed to eventdev telemetry command, ignoring");
1979 
1980 	return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1981 }
1982 
1983 static int
1984 handle_dev_dump(const char *cmd __rte_unused,
1985 		const char *params,
1986 		struct rte_tel_data *d)
1987 {
1988 	char *buf, *end_param;
1989 	int dev_id, ret;
1990 	FILE *f;
1991 
1992 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1993 		return -1;
1994 
1995 	/* Get dev ID from parameter string */
1996 	dev_id = strtoul(params, &end_param, 10);
1997 	if (*end_param != '\0')
1998 		RTE_EDEV_LOG_DEBUG(
1999 			"Extra parameters passed to eventdev telemetry command, ignoring");
2000 
2001 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
2002 
2003 	buf = calloc(sizeof(char), RTE_TEL_MAX_SINGLE_STRING_LEN);
2004 	if (buf == NULL)
2005 		return -ENOMEM;
2006 
2007 	f = fmemopen(buf, RTE_TEL_MAX_SINGLE_STRING_LEN - 1, "w+");
2008 	if (f == NULL) {
2009 		free(buf);
2010 		return -EINVAL;
2011 	}
2012 
2013 	ret = rte_event_dev_dump(dev_id, f);
2014 	fclose(f);
2015 	if (ret == 0) {
2016 		rte_tel_data_start_dict(d);
2017 		rte_tel_data_string(d, buf);
2018 	}
2019 
2020 	free(buf);
2021 	return ret;
2022 }
2023 
2024 RTE_INIT(eventdev_init_telemetry)
2025 {
2026 	rte_telemetry_register_cmd("/eventdev/dev_list", handle_dev_list,
2027 			"Returns list of available eventdevs. Takes no parameters");
2028 	rte_telemetry_register_cmd("/eventdev/port_list", handle_port_list,
2029 			"Returns list of available ports. Parameter: DevID");
2030 	rte_telemetry_register_cmd("/eventdev/queue_list", handle_queue_list,
2031 			"Returns list of available queues. Parameter: DevID");
2032 
2033 	rte_telemetry_register_cmd("/eventdev/dev_xstats", handle_dev_xstats,
2034 			"Returns stats for an eventdev. Parameter: DevID");
2035 	rte_telemetry_register_cmd("/eventdev/port_xstats", handle_port_xstats,
2036 			"Returns stats for an eventdev port. Params: DevID,PortID");
2037 	rte_telemetry_register_cmd("/eventdev/queue_xstats",
2038 			handle_queue_xstats,
2039 			"Returns stats for an eventdev queue. Params: DevID,QueueID");
2040 	rte_telemetry_register_cmd("/eventdev/dev_dump", handle_dev_dump,
2041 			"Returns dump information for an eventdev. Parameter: DevID");
2042 	rte_telemetry_register_cmd("/eventdev/queue_links", handle_queue_links,
2043 			"Returns links for an eventdev port. Params: DevID,QueueID");
2044 }
2045