xref: /dpdk/lib/eventdev/rte_eventdev.c (revision 3178e37c65a676366f33f0bc56f49d9b26a06448)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc
3  */
4 
5 #include <ctype.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <errno.h>
10 #include <stdint.h>
11 #include <inttypes.h>
12 
13 #include <rte_string_fns.h>
14 #include <rte_log.h>
15 #include <dev_driver.h>
16 #include <rte_memzone.h>
17 #include <rte_eal.h>
18 #include <rte_common.h>
19 #include <rte_malloc.h>
20 #include <rte_errno.h>
21 #include <ethdev_driver.h>
22 #include <rte_cryptodev.h>
23 #include <rte_dmadev.h>
24 #include <cryptodev_pmd.h>
25 #include <rte_telemetry.h>
26 
27 #include "rte_eventdev.h"
28 #include "eventdev_pmd.h"
29 #include "eventdev_trace.h"
30 
31 RTE_LOG_REGISTER_DEFAULT(rte_event_logtype, INFO);
32 
33 static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
34 
35 struct rte_eventdev *rte_eventdevs = rte_event_devices;
36 
37 static struct rte_eventdev_global eventdev_globals = {
38 	.nb_devs		= 0
39 };
40 
41 /* Public fastpath APIs. */
42 struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
43 
44 /* Event dev north bound API implementation */
45 
46 uint8_t
47 rte_event_dev_count(void)
48 {
49 	return eventdev_globals.nb_devs;
50 }
51 
52 int
53 rte_event_dev_get_dev_id(const char *name)
54 {
55 	int i;
56 	uint8_t cmp;
57 
58 	if (!name)
59 		return -EINVAL;
60 
61 	for (i = 0; i < eventdev_globals.nb_devs; i++) {
62 		cmp = (strncmp(rte_event_devices[i].data->name, name,
63 				RTE_EVENTDEV_NAME_MAX_LEN) == 0) ||
64 			(rte_event_devices[i].dev ? (strncmp(
65 				rte_event_devices[i].dev->driver->name, name,
66 					 RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0);
67 		if (cmp && (rte_event_devices[i].attached ==
68 					RTE_EVENTDEV_ATTACHED)) {
69 			rte_eventdev_trace_get_dev_id(name, i);
70 			return i;
71 		}
72 	}
73 	return -ENODEV;
74 }
75 
76 int
77 rte_event_dev_socket_id(uint8_t dev_id)
78 {
79 	struct rte_eventdev *dev;
80 
81 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
82 	dev = &rte_eventdevs[dev_id];
83 
84 	rte_eventdev_trace_socket_id(dev_id, dev, dev->data->socket_id);
85 
86 	return dev->data->socket_id;
87 }
88 
89 int
90 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
91 {
92 	struct rte_eventdev *dev;
93 
94 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
95 	dev = &rte_eventdevs[dev_id];
96 
97 	if (dev_info == NULL)
98 		return -EINVAL;
99 
100 	memset(dev_info, 0, sizeof(struct rte_event_dev_info));
101 
102 	if (*dev->dev_ops->dev_infos_get == NULL)
103 		return -ENOTSUP;
104 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
105 
106 	dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
107 
108 	dev_info->dev = dev->dev;
109 	if (dev->dev != NULL && dev->dev->driver != NULL)
110 		dev_info->driver_name = dev->dev->driver->name;
111 
112 	rte_eventdev_trace_info_get(dev_id, dev_info, dev_info->dev);
113 
114 	return 0;
115 }
116 
117 int
118 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
119 				uint32_t *caps)
120 {
121 	struct rte_eventdev *dev;
122 
123 	rte_eventdev_trace_eth_rx_adapter_caps_get(dev_id, eth_port_id);
124 
125 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
126 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
127 
128 	dev = &rte_eventdevs[dev_id];
129 
130 	if (caps == NULL)
131 		return -EINVAL;
132 
133 	if (dev->dev_ops->eth_rx_adapter_caps_get == NULL)
134 		*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
135 	else
136 		*caps = 0;
137 
138 	return dev->dev_ops->eth_rx_adapter_caps_get ?
139 				(*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
140 						&rte_eth_devices[eth_port_id],
141 						caps)
142 				: 0;
143 }
144 
145 int
146 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
147 {
148 	struct rte_eventdev *dev;
149 	const struct event_timer_adapter_ops *ops;
150 
151 	rte_eventdev_trace_timer_adapter_caps_get(dev_id);
152 
153 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
154 
155 	dev = &rte_eventdevs[dev_id];
156 
157 	if (caps == NULL)
158 		return -EINVAL;
159 
160 	if (dev->dev_ops->timer_adapter_caps_get == NULL)
161 		*caps = RTE_EVENT_TIMER_ADAPTER_SW_CAP;
162 	else
163 		*caps = 0;
164 
165 	return dev->dev_ops->timer_adapter_caps_get ?
166 				(*dev->dev_ops->timer_adapter_caps_get)(dev,
167 									0,
168 									caps,
169 									&ops)
170 				: 0;
171 }
172 
173 int
174 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
175 				  uint32_t *caps)
176 {
177 	struct rte_eventdev *dev;
178 	struct rte_cryptodev *cdev;
179 
180 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
181 	if (!rte_cryptodev_is_valid_dev(cdev_id))
182 		return -EINVAL;
183 
184 	dev = &rte_eventdevs[dev_id];
185 	cdev = rte_cryptodev_pmd_get_dev(cdev_id);
186 
187 	rte_eventdev_trace_crypto_adapter_caps_get(dev_id, dev, cdev_id, cdev);
188 
189 	if (caps == NULL)
190 		return -EINVAL;
191 
192 	if (dev->dev_ops->crypto_adapter_caps_get == NULL)
193 		*caps = RTE_EVENT_CRYPTO_ADAPTER_SW_CAP;
194 	else
195 		*caps = 0;
196 
197 	return dev->dev_ops->crypto_adapter_caps_get ?
198 		(*dev->dev_ops->crypto_adapter_caps_get)
199 		(dev, cdev, caps) : 0;
200 }
201 
202 int
203 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
204 				uint32_t *caps)
205 {
206 	struct rte_eventdev *dev;
207 	struct rte_eth_dev *eth_dev;
208 
209 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
210 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
211 
212 	dev = &rte_eventdevs[dev_id];
213 	eth_dev = &rte_eth_devices[eth_port_id];
214 
215 	rte_eventdev_trace_eth_tx_adapter_caps_get(dev_id, dev, eth_port_id, eth_dev);
216 
217 	if (caps == NULL)
218 		return -EINVAL;
219 
220 	if (dev->dev_ops->eth_tx_adapter_caps_get == NULL)
221 		*caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR;
222 	else
223 		*caps = 0;
224 
225 	return dev->dev_ops->eth_tx_adapter_caps_get ?
226 			(*dev->dev_ops->eth_tx_adapter_caps_get)(dev,
227 								eth_dev,
228 								caps)
229 			: 0;
230 }
231 
232 int
233 rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dma_dev_id, uint32_t *caps)
234 {
235 	struct rte_eventdev *dev;
236 
237 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
238 	if (!rte_dma_is_valid(dma_dev_id))
239 		return -EINVAL;
240 
241 	dev = &rte_eventdevs[dev_id];
242 
243 	if (caps == NULL)
244 		return -EINVAL;
245 
246 	*caps = 0;
247 
248 	if (dev->dev_ops->dma_adapter_caps_get)
249 		return (*dev->dev_ops->dma_adapter_caps_get)(dev, dma_dev_id, caps);
250 
251 	return 0;
252 }
253 
254 static inline int
255 event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
256 {
257 	uint8_t old_nb_queues = dev->data->nb_queues;
258 	struct rte_event_queue_conf *queues_cfg;
259 	unsigned int i;
260 
261 	RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
262 			 dev->data->dev_id);
263 
264 	if (nb_queues != 0) {
265 		queues_cfg = dev->data->queues_cfg;
266 		if (*dev->dev_ops->queue_release == NULL)
267 			return -ENOTSUP;
268 
269 		for (i = nb_queues; i < old_nb_queues; i++)
270 			(*dev->dev_ops->queue_release)(dev, i);
271 
272 
273 		if (nb_queues > old_nb_queues) {
274 			uint8_t new_qs = nb_queues - old_nb_queues;
275 
276 			memset(queues_cfg + old_nb_queues, 0,
277 				sizeof(queues_cfg[0]) * new_qs);
278 		}
279 	} else {
280 		if (*dev->dev_ops->queue_release == NULL)
281 			return -ENOTSUP;
282 
283 		for (i = nb_queues; i < old_nb_queues; i++)
284 			(*dev->dev_ops->queue_release)(dev, i);
285 	}
286 
287 	dev->data->nb_queues = nb_queues;
288 	return 0;
289 }
290 
291 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
292 
293 static inline int
294 event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
295 {
296 	uint8_t old_nb_ports = dev->data->nb_ports;
297 	void **ports;
298 	uint16_t *links_map;
299 	struct rte_event_port_conf *ports_cfg;
300 	unsigned int i, j;
301 
302 	RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
303 			 dev->data->dev_id);
304 
305 	if (nb_ports != 0) { /* re-config */
306 		if (*dev->dev_ops->port_release == NULL)
307 			return -ENOTSUP;
308 
309 		ports = dev->data->ports;
310 		ports_cfg = dev->data->ports_cfg;
311 
312 		for (i = nb_ports; i < old_nb_ports; i++)
313 			(*dev->dev_ops->port_release)(ports[i]);
314 
315 		if (nb_ports > old_nb_ports) {
316 			uint8_t new_ps = nb_ports - old_nb_ports;
317 			unsigned int old_links_map_end =
318 				old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
319 			unsigned int links_map_end =
320 				nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
321 
322 			memset(ports + old_nb_ports, 0,
323 				sizeof(ports[0]) * new_ps);
324 			memset(ports_cfg + old_nb_ports, 0,
325 				sizeof(ports_cfg[0]) * new_ps);
326 			for (i = 0; i < RTE_EVENT_MAX_PROFILES_PER_PORT; i++) {
327 				links_map = dev->data->links_map[i];
328 				for (j = old_links_map_end; j < links_map_end; j++)
329 					links_map[j] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
330 			}
331 		}
332 	} else {
333 		if (*dev->dev_ops->port_release == NULL)
334 			return -ENOTSUP;
335 
336 		ports = dev->data->ports;
337 		for (i = nb_ports; i < old_nb_ports; i++) {
338 			(*dev->dev_ops->port_release)(ports[i]);
339 			ports[i] = NULL;
340 		}
341 	}
342 
343 	dev->data->nb_ports = nb_ports;
344 	return 0;
345 }
346 
347 int
348 rte_event_dev_configure(uint8_t dev_id,
349 			const struct rte_event_dev_config *dev_conf)
350 {
351 	struct rte_event_dev_info info;
352 	struct rte_eventdev *dev;
353 	int diag;
354 
355 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
356 	dev = &rte_eventdevs[dev_id];
357 
358 	if (*dev->dev_ops->dev_infos_get == NULL)
359 		return -ENOTSUP;
360 	if (*dev->dev_ops->dev_configure == NULL)
361 		return -ENOTSUP;
362 
363 	if (dev->data->dev_started) {
364 		RTE_EDEV_LOG_ERR(
365 		    "device %d must be stopped to allow configuration", dev_id);
366 		return -EBUSY;
367 	}
368 
369 	if (dev_conf == NULL)
370 		return -EINVAL;
371 
372 	(*dev->dev_ops->dev_infos_get)(dev, &info);
373 
374 	/* Check dequeue_timeout_ns value is in limit */
375 	if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
376 		if (dev_conf->dequeue_timeout_ns &&
377 		    (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
378 			|| dev_conf->dequeue_timeout_ns >
379 				 info.max_dequeue_timeout_ns)) {
380 			RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
381 			" min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
382 			dev_id, dev_conf->dequeue_timeout_ns,
383 			info.min_dequeue_timeout_ns,
384 			info.max_dequeue_timeout_ns);
385 			return -EINVAL;
386 		}
387 	}
388 
389 	/* Check nb_events_limit is in limit */
390 	if (dev_conf->nb_events_limit > info.max_num_events) {
391 		RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
392 		dev_id, dev_conf->nb_events_limit, info.max_num_events);
393 		return -EINVAL;
394 	}
395 
396 	/* Check nb_event_queues is in limit */
397 	if (!dev_conf->nb_event_queues) {
398 		RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
399 					dev_id);
400 		return -EINVAL;
401 	}
402 	if (dev_conf->nb_event_queues > info.max_event_queues +
403 			info.max_single_link_event_port_queue_pairs) {
404 		RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d + max_single_link_event_port_queue_pairs=%d",
405 				 dev_id, dev_conf->nb_event_queues,
406 				 info.max_event_queues,
407 				 info.max_single_link_event_port_queue_pairs);
408 		return -EINVAL;
409 	}
410 	if (dev_conf->nb_event_queues -
411 			dev_conf->nb_single_link_event_port_queues >
412 			info.max_event_queues) {
413 		RTE_EDEV_LOG_ERR("id%d nb_event_queues=%d - nb_single_link_event_port_queues=%d > max_event_queues=%d",
414 				 dev_id, dev_conf->nb_event_queues,
415 				 dev_conf->nb_single_link_event_port_queues,
416 				 info.max_event_queues);
417 		return -EINVAL;
418 	}
419 	if (dev_conf->nb_single_link_event_port_queues >
420 			dev_conf->nb_event_queues) {
421 		RTE_EDEV_LOG_ERR("dev%d nb_single_link_event_port_queues=%d > nb_event_queues=%d",
422 				 dev_id,
423 				 dev_conf->nb_single_link_event_port_queues,
424 				 dev_conf->nb_event_queues);
425 		return -EINVAL;
426 	}
427 
428 	/* Check nb_event_ports is in limit */
429 	if (!dev_conf->nb_event_ports) {
430 		RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
431 		return -EINVAL;
432 	}
433 	if (dev_conf->nb_event_ports > info.max_event_ports +
434 			info.max_single_link_event_port_queue_pairs) {
435 		RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports=%d + max_single_link_event_port_queue_pairs=%d",
436 				 dev_id, dev_conf->nb_event_ports,
437 				 info.max_event_ports,
438 				 info.max_single_link_event_port_queue_pairs);
439 		return -EINVAL;
440 	}
441 	if (dev_conf->nb_event_ports -
442 			dev_conf->nb_single_link_event_port_queues
443 			> info.max_event_ports) {
444 		RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d - nb_single_link_event_port_queues=%d > max_event_ports=%d",
445 				 dev_id, dev_conf->nb_event_ports,
446 				 dev_conf->nb_single_link_event_port_queues,
447 				 info.max_event_ports);
448 		return -EINVAL;
449 	}
450 
451 	if (dev_conf->nb_single_link_event_port_queues >
452 	    dev_conf->nb_event_ports) {
453 		RTE_EDEV_LOG_ERR(
454 				 "dev%d nb_single_link_event_port_queues=%d > nb_event_ports=%d",
455 				 dev_id,
456 				 dev_conf->nb_single_link_event_port_queues,
457 				 dev_conf->nb_event_ports);
458 		return -EINVAL;
459 	}
460 
461 	/* Check nb_event_queue_flows is in limit */
462 	if (!dev_conf->nb_event_queue_flows) {
463 		RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
464 		return -EINVAL;
465 	}
466 	if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
467 		RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
468 		dev_id, dev_conf->nb_event_queue_flows,
469 		info.max_event_queue_flows);
470 		return -EINVAL;
471 	}
472 
473 	/* Check nb_event_port_dequeue_depth is in limit */
474 	if (!dev_conf->nb_event_port_dequeue_depth) {
475 		RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
476 					dev_id);
477 		return -EINVAL;
478 	}
479 	if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
480 		 (dev_conf->nb_event_port_dequeue_depth >
481 			 info.max_event_port_dequeue_depth)) {
482 		RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
483 		dev_id, dev_conf->nb_event_port_dequeue_depth,
484 		info.max_event_port_dequeue_depth);
485 		return -EINVAL;
486 	}
487 
488 	/* Check nb_event_port_enqueue_depth is in limit */
489 	if (!dev_conf->nb_event_port_enqueue_depth) {
490 		RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
491 					dev_id);
492 		return -EINVAL;
493 	}
494 	if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
495 		(dev_conf->nb_event_port_enqueue_depth >
496 			 info.max_event_port_enqueue_depth)) {
497 		RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
498 		dev_id, dev_conf->nb_event_port_enqueue_depth,
499 		info.max_event_port_enqueue_depth);
500 		return -EINVAL;
501 	}
502 
503 	/* Copy the dev_conf parameter into the dev structure */
504 	memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
505 
506 	/* Setup new number of queues and reconfigure device. */
507 	diag = event_dev_queue_config(dev, dev_conf->nb_event_queues);
508 	if (diag != 0) {
509 		RTE_EDEV_LOG_ERR("dev%d event_dev_queue_config = %d", dev_id,
510 				 diag);
511 		return diag;
512 	}
513 
514 	/* Setup new number of ports and reconfigure device. */
515 	diag = event_dev_port_config(dev, dev_conf->nb_event_ports);
516 	if (diag != 0) {
517 		event_dev_queue_config(dev, 0);
518 		RTE_EDEV_LOG_ERR("dev%d event_dev_port_config = %d", dev_id,
519 				 diag);
520 		return diag;
521 	}
522 
523 	event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
524 
525 	/* Configure the device */
526 	diag = (*dev->dev_ops->dev_configure)(dev);
527 	if (diag != 0) {
528 		RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
529 		event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
530 		event_dev_queue_config(dev, 0);
531 		event_dev_port_config(dev, 0);
532 	}
533 
534 	dev->data->event_dev_cap = info.event_dev_cap;
535 	rte_eventdev_trace_configure(dev_id, dev_conf, diag);
536 	return diag;
537 }
538 
539 static inline int
540 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
541 {
542 	if (queue_id < dev->data->nb_queues && queue_id <
543 				RTE_EVENT_MAX_QUEUES_PER_DEV)
544 		return 1;
545 	else
546 		return 0;
547 }
548 
549 int
550 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
551 				 struct rte_event_queue_conf *queue_conf)
552 {
553 	struct rte_eventdev *dev;
554 
555 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
556 	dev = &rte_eventdevs[dev_id];
557 
558 	if (queue_conf == NULL)
559 		return -EINVAL;
560 
561 	if (!is_valid_queue(dev, queue_id)) {
562 		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
563 		return -EINVAL;
564 	}
565 
566 	if (*dev->dev_ops->queue_def_conf == NULL)
567 		return -ENOTSUP;
568 	memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
569 	(*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
570 
571 	rte_eventdev_trace_queue_default_conf_get(dev_id, dev, queue_id, queue_conf);
572 
573 	return 0;
574 }
575 
576 static inline int
577 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
578 {
579 	if (queue_conf &&
580 		!(queue_conf->event_queue_cfg &
581 		  RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
582 		((queue_conf->event_queue_cfg &
583 			 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
584 		(queue_conf->schedule_type
585 			== RTE_SCHED_TYPE_ATOMIC)
586 		))
587 		return 1;
588 	else
589 		return 0;
590 }
591 
592 static inline int
593 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
594 {
595 	if (queue_conf &&
596 		!(queue_conf->event_queue_cfg &
597 		  RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
598 		((queue_conf->event_queue_cfg &
599 			 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
600 		(queue_conf->schedule_type
601 			== RTE_SCHED_TYPE_ORDERED)
602 		))
603 		return 1;
604 	else
605 		return 0;
606 }
607 
608 
609 int
610 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
611 		      const struct rte_event_queue_conf *queue_conf)
612 {
613 	struct rte_eventdev *dev;
614 	struct rte_event_queue_conf def_conf;
615 
616 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
617 	dev = &rte_eventdevs[dev_id];
618 
619 	if (!is_valid_queue(dev, queue_id)) {
620 		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
621 		return -EINVAL;
622 	}
623 
624 	/* Check nb_atomic_flows limit */
625 	if (is_valid_atomic_queue_conf(queue_conf)) {
626 		if (queue_conf->nb_atomic_flows == 0 ||
627 		    queue_conf->nb_atomic_flows >
628 			dev->data->dev_conf.nb_event_queue_flows) {
629 			RTE_EDEV_LOG_ERR(
630 		"dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
631 			dev_id, queue_id, queue_conf->nb_atomic_flows,
632 			dev->data->dev_conf.nb_event_queue_flows);
633 			return -EINVAL;
634 		}
635 	}
636 
637 	/* Check nb_atomic_order_sequences limit */
638 	if (is_valid_ordered_queue_conf(queue_conf)) {
639 		if (queue_conf->nb_atomic_order_sequences == 0 ||
640 		    queue_conf->nb_atomic_order_sequences >
641 			dev->data->dev_conf.nb_event_queue_flows) {
642 			RTE_EDEV_LOG_ERR(
643 		"dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
644 			dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
645 			dev->data->dev_conf.nb_event_queue_flows);
646 			return -EINVAL;
647 		}
648 	}
649 
650 	if (dev->data->dev_started) {
651 		RTE_EDEV_LOG_ERR(
652 		    "device %d must be stopped to allow queue setup", dev_id);
653 		return -EBUSY;
654 	}
655 
656 	if (*dev->dev_ops->queue_setup == NULL)
657 		return -ENOTSUP;
658 
659 	if (queue_conf == NULL) {
660 		if (*dev->dev_ops->queue_def_conf == NULL)
661 			return -ENOTSUP;
662 		(*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
663 		queue_conf = &def_conf;
664 	}
665 
666 	dev->data->queues_cfg[queue_id] = *queue_conf;
667 	rte_eventdev_trace_queue_setup(dev_id, queue_id, queue_conf);
668 	return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
669 }
670 
671 static inline int
672 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
673 {
674 	if (port_id < dev->data->nb_ports)
675 		return 1;
676 	else
677 		return 0;
678 }
679 
680 int
681 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
682 				 struct rte_event_port_conf *port_conf)
683 {
684 	struct rte_eventdev *dev;
685 
686 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
687 	dev = &rte_eventdevs[dev_id];
688 
689 	if (port_conf == NULL)
690 		return -EINVAL;
691 
692 	if (!is_valid_port(dev, port_id)) {
693 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
694 		return -EINVAL;
695 	}
696 
697 	if (*dev->dev_ops->port_def_conf == NULL)
698 		return -ENOTSUP;
699 	memset(port_conf, 0, sizeof(struct rte_event_port_conf));
700 	(*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
701 
702 	rte_eventdev_trace_port_default_conf_get(dev_id, dev, port_id, port_conf);
703 
704 	return 0;
705 }
706 
707 int
708 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
709 		     const struct rte_event_port_conf *port_conf)
710 {
711 	struct rte_eventdev *dev;
712 	struct rte_event_port_conf def_conf;
713 	int diag;
714 
715 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
716 	dev = &rte_eventdevs[dev_id];
717 
718 	if (!is_valid_port(dev, port_id)) {
719 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
720 		return -EINVAL;
721 	}
722 
723 	/* Check new_event_threshold limit */
724 	if ((port_conf && !port_conf->new_event_threshold) ||
725 			(port_conf && port_conf->new_event_threshold >
726 				 dev->data->dev_conf.nb_events_limit)) {
727 		RTE_EDEV_LOG_ERR(
728 		   "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
729 			dev_id, port_id, port_conf->new_event_threshold,
730 			dev->data->dev_conf.nb_events_limit);
731 		return -EINVAL;
732 	}
733 
734 	/* Check dequeue_depth limit */
735 	if ((port_conf && !port_conf->dequeue_depth) ||
736 			(port_conf && port_conf->dequeue_depth >
737 		dev->data->dev_conf.nb_event_port_dequeue_depth)) {
738 		RTE_EDEV_LOG_ERR(
739 		   "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
740 			dev_id, port_id, port_conf->dequeue_depth,
741 			dev->data->dev_conf.nb_event_port_dequeue_depth);
742 		return -EINVAL;
743 	}
744 
745 	/* Check enqueue_depth limit */
746 	if ((port_conf && !port_conf->enqueue_depth) ||
747 			(port_conf && port_conf->enqueue_depth >
748 		dev->data->dev_conf.nb_event_port_enqueue_depth)) {
749 		RTE_EDEV_LOG_ERR(
750 		   "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
751 			dev_id, port_id, port_conf->enqueue_depth,
752 			dev->data->dev_conf.nb_event_port_enqueue_depth);
753 		return -EINVAL;
754 	}
755 
756 	if (port_conf &&
757 	    (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL) &&
758 	    !(dev->data->event_dev_cap &
759 	      RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
760 		RTE_EDEV_LOG_ERR(
761 		   "dev%d port%d Implicit release disable not supported",
762 			dev_id, port_id);
763 		return -EINVAL;
764 	}
765 
766 	if (dev->data->dev_started) {
767 		RTE_EDEV_LOG_ERR(
768 		    "device %d must be stopped to allow port setup", dev_id);
769 		return -EBUSY;
770 	}
771 
772 	if (*dev->dev_ops->port_setup == NULL)
773 		return -ENOTSUP;
774 
775 	if (port_conf == NULL) {
776 		if (*dev->dev_ops->port_def_conf == NULL)
777 			return -ENOTSUP;
778 		(*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
779 		port_conf = &def_conf;
780 	}
781 
782 	dev->data->ports_cfg[port_id] = *port_conf;
783 
784 	diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
785 
786 	/* Unlink all the queues from this port(default state after setup) */
787 	if (!diag)
788 		diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
789 
790 	rte_eventdev_trace_port_setup(dev_id, port_id, port_conf, diag);
791 	if (diag < 0)
792 		return diag;
793 
794 	return 0;
795 }
796 
797 void
798 rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
799 		       rte_eventdev_port_flush_t release_cb, void *args)
800 {
801 	struct rte_eventdev *dev;
802 
803 	RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
804 	dev = &rte_eventdevs[dev_id];
805 
806 	rte_eventdev_trace_port_quiesce(dev_id, dev, port_id, args);
807 
808 	if (!is_valid_port(dev, port_id)) {
809 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
810 		return;
811 	}
812 
813 	if (dev->dev_ops->port_quiesce)
814 		(*dev->dev_ops->port_quiesce)(dev, dev->data->ports[port_id],
815 					      release_cb, args);
816 }
817 
818 int
819 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
820 		       uint32_t *attr_value)
821 {
822 	struct rte_eventdev *dev;
823 
824 	if (!attr_value)
825 		return -EINVAL;
826 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
827 	dev = &rte_eventdevs[dev_id];
828 
829 	switch (attr_id) {
830 	case RTE_EVENT_DEV_ATTR_PORT_COUNT:
831 		*attr_value = dev->data->nb_ports;
832 		break;
833 	case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
834 		*attr_value = dev->data->nb_queues;
835 		break;
836 	case RTE_EVENT_DEV_ATTR_STARTED:
837 		*attr_value = dev->data->dev_started;
838 		break;
839 	default:
840 		return -EINVAL;
841 	}
842 
843 	rte_eventdev_trace_attr_get(dev_id, dev, attr_id, *attr_value);
844 
845 	return 0;
846 }
847 
848 int
849 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
850 			uint32_t *attr_value)
851 {
852 	struct rte_eventdev *dev;
853 
854 	if (!attr_value)
855 		return -EINVAL;
856 
857 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
858 	dev = &rte_eventdevs[dev_id];
859 	if (!is_valid_port(dev, port_id)) {
860 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
861 		return -EINVAL;
862 	}
863 
864 	switch (attr_id) {
865 	case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
866 		*attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
867 		break;
868 	case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
869 		*attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
870 		break;
871 	case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
872 		*attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
873 		break;
874 	case RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE:
875 	{
876 		uint32_t config;
877 
878 		config = dev->data->ports_cfg[port_id].event_port_cfg;
879 		*attr_value = !!(config & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
880 		break;
881 	}
882 	default:
883 		return -EINVAL;
884 	};
885 
886 	rte_eventdev_trace_port_attr_get(dev_id, dev, port_id, attr_id, *attr_value);
887 
888 	return 0;
889 }
890 
891 int
892 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
893 			uint32_t *attr_value)
894 {
895 	struct rte_event_queue_conf *conf;
896 	struct rte_eventdev *dev;
897 
898 	if (!attr_value)
899 		return -EINVAL;
900 
901 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
902 	dev = &rte_eventdevs[dev_id];
903 	if (!is_valid_queue(dev, queue_id)) {
904 		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
905 		return -EINVAL;
906 	}
907 
908 	conf = &dev->data->queues_cfg[queue_id];
909 
910 	switch (attr_id) {
911 	case RTE_EVENT_QUEUE_ATTR_PRIORITY:
912 		*attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
913 		if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
914 			*attr_value = conf->priority;
915 		break;
916 	case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
917 		*attr_value = conf->nb_atomic_flows;
918 		break;
919 	case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
920 		*attr_value = conf->nb_atomic_order_sequences;
921 		break;
922 	case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
923 		*attr_value = conf->event_queue_cfg;
924 		break;
925 	case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
926 		if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
927 			return -EOVERFLOW;
928 
929 		*attr_value = conf->schedule_type;
930 		break;
931 	case RTE_EVENT_QUEUE_ATTR_WEIGHT:
932 		*attr_value = RTE_EVENT_QUEUE_WEIGHT_LOWEST;
933 		if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
934 			*attr_value = conf->weight;
935 		break;
936 	case RTE_EVENT_QUEUE_ATTR_AFFINITY:
937 		*attr_value = RTE_EVENT_QUEUE_AFFINITY_LOWEST;
938 		if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
939 			*attr_value = conf->affinity;
940 		break;
941 	default:
942 		return -EINVAL;
943 	};
944 
945 	rte_eventdev_trace_queue_attr_get(dev_id, dev, queue_id, attr_id, *attr_value);
946 
947 	return 0;
948 }
949 
950 int
951 rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
952 			 uint64_t attr_value)
953 {
954 	struct rte_eventdev *dev;
955 
956 	rte_eventdev_trace_queue_attr_set(dev_id, queue_id, attr_id, attr_value);
957 
958 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
959 	dev = &rte_eventdevs[dev_id];
960 	if (!is_valid_queue(dev, queue_id)) {
961 		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
962 		return -EINVAL;
963 	}
964 
965 	if (!(dev->data->event_dev_cap &
966 	      RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR)) {
967 		RTE_EDEV_LOG_ERR(
968 			"Device %" PRIu8 "does not support changing queue attributes at runtime",
969 			dev_id);
970 		return -ENOTSUP;
971 	}
972 
973 	if (*dev->dev_ops->queue_attr_set == NULL)
974 		return -ENOTSUP;
975 	return (*dev->dev_ops->queue_attr_set)(dev, queue_id, attr_id,
976 					       attr_value);
977 }
978 
979 int
980 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
981 		    const uint8_t queues[], const uint8_t priorities[],
982 		    uint16_t nb_links)
983 {
984 	return rte_event_port_profile_links_set(dev_id, port_id, queues, priorities, nb_links, 0);
985 }
986 
987 int
988 rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[],
989 				 const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id)
990 {
991 	uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
992 	uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
993 	struct rte_event_dev_info info;
994 	struct rte_eventdev *dev;
995 	uint16_t *links_map;
996 	int i, diag;
997 
998 	RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
999 	dev = &rte_eventdevs[dev_id];
1000 
1001 	if (*dev->dev_ops->dev_infos_get == NULL)
1002 		return -ENOTSUP;
1003 
1004 	(*dev->dev_ops->dev_infos_get)(dev, &info);
1005 	if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT ||
1006 	    profile_id >= info.max_profiles_per_port) {
1007 		RTE_EDEV_LOG_ERR("Invalid profile_id=%" PRIu8, profile_id);
1008 		return -EINVAL;
1009 	}
1010 
1011 	if (*dev->dev_ops->port_link == NULL) {
1012 		RTE_EDEV_LOG_ERR("Function not supported");
1013 		rte_errno = ENOTSUP;
1014 		return 0;
1015 	}
1016 
1017 	if (profile_id && *dev->dev_ops->port_link_profile == NULL) {
1018 		RTE_EDEV_LOG_ERR("Function not supported");
1019 		rte_errno = ENOTSUP;
1020 		return 0;
1021 	}
1022 
1023 	if (!is_valid_port(dev, port_id)) {
1024 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1025 		rte_errno = EINVAL;
1026 		return 0;
1027 	}
1028 
1029 	if (queues == NULL) {
1030 		for (i = 0; i < dev->data->nb_queues; i++)
1031 			queues_list[i] = i;
1032 
1033 		queues = queues_list;
1034 		nb_links = dev->data->nb_queues;
1035 	}
1036 
1037 	if (priorities == NULL) {
1038 		for (i = 0; i < nb_links; i++)
1039 			priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
1040 
1041 		priorities = priorities_list;
1042 	}
1043 
1044 	for (i = 0; i < nb_links; i++)
1045 		if (queues[i] >= dev->data->nb_queues) {
1046 			rte_errno = EINVAL;
1047 			return 0;
1048 		}
1049 
1050 	if (profile_id)
1051 		diag = (*dev->dev_ops->port_link_profile)(dev, dev->data->ports[port_id], queues,
1052 							  priorities, nb_links, profile_id);
1053 	else
1054 		diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id], queues,
1055 						  priorities, nb_links);
1056 	if (diag < 0)
1057 		return diag;
1058 
1059 	links_map = dev->data->links_map[profile_id];
1060 	/* Point links_map to this port specific area */
1061 	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1062 	for (i = 0; i < diag; i++)
1063 		links_map[queues[i]] = (uint8_t)priorities[i];
1064 
1065 	rte_eventdev_trace_port_profile_links_set(dev_id, port_id, nb_links, profile_id, diag);
1066 	return diag;
1067 }
1068 
1069 int
1070 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
1071 		      uint8_t queues[], uint16_t nb_unlinks)
1072 {
1073 	return rte_event_port_profile_unlink(dev_id, port_id, queues, nb_unlinks, 0);
1074 }
1075 
1076 int
1077 rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
1078 			      uint16_t nb_unlinks, uint8_t profile_id)
1079 {
1080 	uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1081 	struct rte_event_dev_info info;
1082 	struct rte_eventdev *dev;
1083 	uint16_t *links_map;
1084 	int i, diag, j;
1085 
1086 	RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
1087 	dev = &rte_eventdevs[dev_id];
1088 
1089 	if (*dev->dev_ops->dev_infos_get == NULL)
1090 		return -ENOTSUP;
1091 
1092 	(*dev->dev_ops->dev_infos_get)(dev, &info);
1093 	if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT ||
1094 	    profile_id >= info.max_profiles_per_port) {
1095 		RTE_EDEV_LOG_ERR("Invalid profile_id=%" PRIu8, profile_id);
1096 		return -EINVAL;
1097 	}
1098 
1099 	if (*dev->dev_ops->port_unlink == NULL) {
1100 		RTE_EDEV_LOG_ERR("Function not supported");
1101 		rte_errno = ENOTSUP;
1102 		return 0;
1103 	}
1104 
1105 	if (profile_id && *dev->dev_ops->port_unlink_profile == NULL) {
1106 		RTE_EDEV_LOG_ERR("Function not supported");
1107 		rte_errno = ENOTSUP;
1108 		return 0;
1109 	}
1110 
1111 	if (!is_valid_port(dev, port_id)) {
1112 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1113 		rte_errno = EINVAL;
1114 		return 0;
1115 	}
1116 
1117 	links_map = dev->data->links_map[profile_id];
1118 	/* Point links_map to this port specific area */
1119 	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1120 
1121 	if (queues == NULL) {
1122 		j = 0;
1123 		for (i = 0; i < dev->data->nb_queues; i++) {
1124 			if (links_map[i] !=
1125 					EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1126 				all_queues[j] = i;
1127 				j++;
1128 			}
1129 		}
1130 		queues = all_queues;
1131 	} else {
1132 		for (j = 0; j < nb_unlinks; j++) {
1133 			if (links_map[queues[j]] ==
1134 					EVENT_QUEUE_SERVICE_PRIORITY_INVALID)
1135 				break;
1136 		}
1137 	}
1138 
1139 	nb_unlinks = j;
1140 	for (i = 0; i < nb_unlinks; i++)
1141 		if (queues[i] >= dev->data->nb_queues) {
1142 			rte_errno = EINVAL;
1143 			return 0;
1144 		}
1145 
1146 	if (profile_id)
1147 		diag = (*dev->dev_ops->port_unlink_profile)(dev, dev->data->ports[port_id], queues,
1148 							    nb_unlinks, profile_id);
1149 	else
1150 		diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id], queues,
1151 						    nb_unlinks);
1152 	if (diag < 0)
1153 		return diag;
1154 
1155 	for (i = 0; i < diag; i++)
1156 		links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
1157 
1158 	rte_eventdev_trace_port_profile_unlink(dev_id, port_id, nb_unlinks, profile_id, diag);
1159 	return diag;
1160 }
1161 
1162 int
1163 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
1164 {
1165 	struct rte_eventdev *dev;
1166 
1167 	rte_eventdev_trace_port_unlinks_in_progress(dev_id, port_id);
1168 
1169 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1170 	dev = &rte_eventdevs[dev_id];
1171 	if (!is_valid_port(dev, port_id)) {
1172 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1173 		return -EINVAL;
1174 	}
1175 
1176 	/* Return 0 if the PMD does not implement unlinks in progress.
1177 	 * This allows PMDs which handle unlink synchronously to not implement
1178 	 * this function at all.
1179 	 */
1180 	if (*dev->dev_ops->port_unlinks_in_progress == NULL)
1181 		return 0;
1182 
1183 	return (*dev->dev_ops->port_unlinks_in_progress)(dev,
1184 			dev->data->ports[port_id]);
1185 }
1186 
1187 int
1188 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1189 			 uint8_t queues[], uint8_t priorities[])
1190 {
1191 	struct rte_eventdev *dev;
1192 	uint16_t *links_map;
1193 	int i, count = 0;
1194 
1195 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1196 	dev = &rte_eventdevs[dev_id];
1197 	if (!is_valid_port(dev, port_id)) {
1198 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1199 		return -EINVAL;
1200 	}
1201 
1202 	/* Use the default profile_id. */
1203 	links_map = dev->data->links_map[0];
1204 	/* Point links_map to this port specific area */
1205 	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1206 	for (i = 0; i < dev->data->nb_queues; i++) {
1207 		if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1208 			queues[count] = i;
1209 			priorities[count] = (uint8_t)links_map[i];
1210 			++count;
1211 		}
1212 	}
1213 
1214 	rte_eventdev_trace_port_links_get(dev_id, port_id, count);
1215 
1216 	return count;
1217 }
1218 
1219 int
1220 rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
1221 				 uint8_t priorities[], uint8_t profile_id)
1222 {
1223 	struct rte_event_dev_info info;
1224 	struct rte_eventdev *dev;
1225 	uint16_t *links_map;
1226 	int i, count = 0;
1227 
1228 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1229 
1230 	dev = &rte_eventdevs[dev_id];
1231 	if (*dev->dev_ops->dev_infos_get == NULL)
1232 		return -ENOTSUP;
1233 
1234 	(*dev->dev_ops->dev_infos_get)(dev, &info);
1235 	if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT ||
1236 	    profile_id >= info.max_profiles_per_port) {
1237 		RTE_EDEV_LOG_ERR("Invalid profile_id=%" PRIu8, profile_id);
1238 		return -EINVAL;
1239 	}
1240 
1241 	if (!is_valid_port(dev, port_id)) {
1242 		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1243 		return -EINVAL;
1244 	}
1245 
1246 	links_map = dev->data->links_map[profile_id];
1247 	/* Point links_map to this port specific area */
1248 	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1249 	for (i = 0; i < dev->data->nb_queues; i++) {
1250 		if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1251 			queues[count] = i;
1252 			priorities[count] = (uint8_t)links_map[i];
1253 			++count;
1254 		}
1255 	}
1256 
1257 	rte_eventdev_trace_port_profile_links_get(dev_id, port_id, profile_id, count);
1258 
1259 	return count;
1260 }
1261 
1262 int
1263 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1264 				 uint64_t *timeout_ticks)
1265 {
1266 	struct rte_eventdev *dev;
1267 
1268 	rte_eventdev_trace_dequeue_timeout_ticks(dev_id, ns, timeout_ticks);
1269 
1270 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1271 	dev = &rte_eventdevs[dev_id];
1272 	if (*dev->dev_ops->timeout_ticks == NULL)
1273 		return -ENOTSUP;
1274 
1275 	if (timeout_ticks == NULL)
1276 		return -EINVAL;
1277 
1278 	return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
1279 }
1280 
1281 int
1282 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
1283 {
1284 	struct rte_eventdev *dev;
1285 
1286 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1287 	dev = &rte_eventdevs[dev_id];
1288 
1289 	if (service_id == NULL)
1290 		return -EINVAL;
1291 
1292 	if (dev->data->service_inited)
1293 		*service_id = dev->data->service_id;
1294 
1295 	rte_eventdev_trace_service_id_get(dev_id, *service_id);
1296 
1297 	return dev->data->service_inited ? 0 : -ESRCH;
1298 }
1299 
1300 int
1301 rte_event_dev_dump(uint8_t dev_id, FILE *f)
1302 {
1303 	struct rte_eventdev *dev;
1304 
1305 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1306 	dev = &rte_eventdevs[dev_id];
1307 	if (*dev->dev_ops->dump == NULL)
1308 		return -ENOTSUP;
1309 	if (f == NULL)
1310 		return -EINVAL;
1311 
1312 	(*dev->dev_ops->dump)(dev, f);
1313 	return 0;
1314 
1315 }
1316 
1317 static int
1318 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1319 		uint8_t queue_port_id)
1320 {
1321 	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1322 	if (dev->dev_ops->xstats_get_names != NULL)
1323 		return (*dev->dev_ops->xstats_get_names)(dev, mode,
1324 							queue_port_id,
1325 							NULL, NULL, 0);
1326 	return 0;
1327 }
1328 
1329 int
1330 rte_event_dev_xstats_names_get(uint8_t dev_id,
1331 		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
1332 		struct rte_event_dev_xstats_name *xstats_names,
1333 		uint64_t *ids, unsigned int size)
1334 {
1335 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1336 	const int cnt_expected_entries = xstats_get_count(dev_id, mode,
1337 							  queue_port_id);
1338 	if (xstats_names == NULL || cnt_expected_entries < 0 ||
1339 			(int)size < cnt_expected_entries)
1340 		return cnt_expected_entries;
1341 
1342 	/* dev_id checked above */
1343 	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1344 
1345 	if (dev->dev_ops->xstats_get_names != NULL)
1346 		return (*dev->dev_ops->xstats_get_names)(dev, mode,
1347 				queue_port_id, xstats_names, ids, size);
1348 
1349 	return -ENOTSUP;
1350 }
1351 
1352 /* retrieve eventdev extended statistics */
1353 int
1354 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1355 		uint8_t queue_port_id, const uint64_t ids[],
1356 		uint64_t values[], unsigned int n)
1357 {
1358 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1359 	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1360 
1361 	/* implemented by the driver */
1362 	if (dev->dev_ops->xstats_get != NULL)
1363 		return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1364 				ids, values, n);
1365 	return -ENOTSUP;
1366 }
1367 
1368 uint64_t
1369 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1370 		uint64_t *id)
1371 {
1372 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1373 	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1374 	uint64_t temp = -1;
1375 
1376 	if (id != NULL)
1377 		*id = (unsigned int)-1;
1378 	else
1379 		id = &temp; /* ensure driver never gets a NULL value */
1380 
1381 	/* implemented by driver */
1382 	if (dev->dev_ops->xstats_get_by_name != NULL)
1383 		return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1384 	return -ENOTSUP;
1385 }
1386 
1387 int rte_event_dev_xstats_reset(uint8_t dev_id,
1388 		enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1389 		const uint64_t ids[], uint32_t nb_ids)
1390 {
1391 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1392 	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1393 
1394 	if (dev->dev_ops->xstats_reset != NULL)
1395 		return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1396 							ids, nb_ids);
1397 	return -ENOTSUP;
1398 }
1399 
1400 int rte_event_pmd_selftest_seqn_dynfield_offset = -1;
1401 
1402 int rte_event_dev_selftest(uint8_t dev_id)
1403 {
1404 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1405 	static const struct rte_mbuf_dynfield test_seqn_dynfield_desc = {
1406 		.name = "rte_event_pmd_selftest_seqn_dynfield",
1407 		.size = sizeof(rte_event_pmd_selftest_seqn_t),
1408 		.align = __alignof__(rte_event_pmd_selftest_seqn_t),
1409 	};
1410 	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1411 
1412 	if (dev->dev_ops->dev_selftest != NULL) {
1413 		rte_event_pmd_selftest_seqn_dynfield_offset =
1414 			rte_mbuf_dynfield_register(&test_seqn_dynfield_desc);
1415 		if (rte_event_pmd_selftest_seqn_dynfield_offset < 0)
1416 			return -ENOMEM;
1417 		return (*dev->dev_ops->dev_selftest)();
1418 	}
1419 	return -ENOTSUP;
1420 }
1421 
1422 struct rte_mempool *
1423 rte_event_vector_pool_create(const char *name, unsigned int n,
1424 			     unsigned int cache_size, uint16_t nb_elem,
1425 			     int socket_id)
1426 {
1427 	const char *mp_ops_name;
1428 	struct rte_mempool *mp;
1429 	unsigned int elt_sz;
1430 	int ret;
1431 
1432 	if (!nb_elem) {
1433 		RTE_EDEV_LOG_ERR("Invalid number of elements=%d requested",
1434 			nb_elem);
1435 		rte_errno = EINVAL;
1436 		return NULL;
1437 	}
1438 
1439 	elt_sz =
1440 		sizeof(struct rte_event_vector) + (nb_elem * sizeof(uintptr_t));
1441 	mp = rte_mempool_create_empty(name, n, elt_sz, cache_size, 0, socket_id,
1442 				      0);
1443 	if (mp == NULL)
1444 		return NULL;
1445 
1446 	mp_ops_name = rte_mbuf_best_mempool_ops();
1447 	ret = rte_mempool_set_ops_byname(mp, mp_ops_name, NULL);
1448 	if (ret != 0) {
1449 		RTE_EDEV_LOG_ERR("error setting mempool handler");
1450 		goto err;
1451 	}
1452 
1453 	ret = rte_mempool_populate_default(mp);
1454 	if (ret < 0)
1455 		goto err;
1456 
1457 	rte_eventdev_trace_vector_pool_create(mp, mp->name, mp->socket_id,
1458 		mp->size, mp->cache_size, mp->elt_size);
1459 
1460 	return mp;
1461 err:
1462 	rte_mempool_free(mp);
1463 	rte_errno = -ret;
1464 	return NULL;
1465 }
1466 
1467 int
1468 rte_event_dev_start(uint8_t dev_id)
1469 {
1470 	struct rte_eventdev *dev;
1471 	int diag;
1472 
1473 	RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1474 
1475 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1476 	dev = &rte_eventdevs[dev_id];
1477 	if (*dev->dev_ops->dev_start == NULL)
1478 		return -ENOTSUP;
1479 
1480 	if (dev->data->dev_started != 0) {
1481 		RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1482 			dev_id);
1483 		return 0;
1484 	}
1485 
1486 	diag = (*dev->dev_ops->dev_start)(dev);
1487 	rte_eventdev_trace_start(dev_id, diag);
1488 	if (diag == 0)
1489 		dev->data->dev_started = 1;
1490 	else
1491 		return diag;
1492 
1493 	event_dev_fp_ops_set(rte_event_fp_ops + dev_id, dev);
1494 
1495 	return 0;
1496 }
1497 
1498 int
1499 rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1500 					   rte_eventdev_stop_flush_t callback,
1501 					   void *userdata)
1502 {
1503 	struct rte_eventdev *dev;
1504 
1505 	RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id);
1506 
1507 	rte_eventdev_trace_stop_flush_callback_register(dev_id, callback, userdata);
1508 
1509 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1510 	dev = &rte_eventdevs[dev_id];
1511 
1512 	dev->dev_ops->dev_stop_flush = callback;
1513 	dev->data->dev_stop_flush_arg = userdata;
1514 
1515 	return 0;
1516 }
1517 
1518 void
1519 rte_event_dev_stop(uint8_t dev_id)
1520 {
1521 	struct rte_eventdev *dev;
1522 
1523 	RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1524 
1525 	RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1526 	dev = &rte_eventdevs[dev_id];
1527 	if (*dev->dev_ops->dev_stop == NULL)
1528 		return;
1529 
1530 	if (dev->data->dev_started == 0) {
1531 		RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1532 			dev_id);
1533 		return;
1534 	}
1535 
1536 	dev->data->dev_started = 0;
1537 	(*dev->dev_ops->dev_stop)(dev);
1538 	rte_eventdev_trace_stop(dev_id);
1539 	event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
1540 }
1541 
1542 int
1543 rte_event_dev_close(uint8_t dev_id)
1544 {
1545 	struct rte_eventdev *dev;
1546 
1547 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1548 	dev = &rte_eventdevs[dev_id];
1549 	if (*dev->dev_ops->dev_close == NULL)
1550 		return -ENOTSUP;
1551 
1552 	/* Device must be stopped before it can be closed */
1553 	if (dev->data->dev_started == 1) {
1554 		RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1555 				dev_id);
1556 		return -EBUSY;
1557 	}
1558 
1559 	event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
1560 	rte_eventdev_trace_close(dev_id);
1561 	return (*dev->dev_ops->dev_close)(dev);
1562 }
1563 
1564 static inline int
1565 eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1566 		    int socket_id)
1567 {
1568 	char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1569 	const struct rte_memzone *mz;
1570 	int i, n;
1571 
1572 	/* Generate memzone name */
1573 	n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1574 	if (n >= (int)sizeof(mz_name))
1575 		return -EINVAL;
1576 
1577 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1578 		mz = rte_memzone_reserve(mz_name,
1579 				sizeof(struct rte_eventdev_data),
1580 				socket_id, 0);
1581 	} else
1582 		mz = rte_memzone_lookup(mz_name);
1583 
1584 	if (mz == NULL)
1585 		return -ENOMEM;
1586 
1587 	*data = mz->addr;
1588 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1589 		memset(*data, 0, sizeof(struct rte_eventdev_data));
1590 		for (i = 0; i < RTE_EVENT_MAX_PROFILES_PER_PORT; i++)
1591 			for (n = 0; n < RTE_EVENT_MAX_PORTS_PER_DEV * RTE_EVENT_MAX_QUEUES_PER_DEV;
1592 			     n++)
1593 				(*data)->links_map[i][n] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
1594 	}
1595 
1596 	return 0;
1597 }
1598 
1599 static inline uint8_t
1600 eventdev_find_free_device_index(void)
1601 {
1602 	uint8_t dev_id;
1603 
1604 	for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1605 		if (rte_eventdevs[dev_id].attached ==
1606 				RTE_EVENTDEV_DETACHED)
1607 			return dev_id;
1608 	}
1609 	return RTE_EVENT_MAX_DEVS;
1610 }
1611 
1612 struct rte_eventdev *
1613 rte_event_pmd_allocate(const char *name, int socket_id)
1614 {
1615 	struct rte_eventdev *eventdev;
1616 	uint8_t dev_id;
1617 
1618 	if (rte_event_pmd_get_named_dev(name) != NULL) {
1619 		RTE_EDEV_LOG_ERR("Event device with name %s already "
1620 				"allocated!", name);
1621 		return NULL;
1622 	}
1623 
1624 	dev_id = eventdev_find_free_device_index();
1625 	if (dev_id == RTE_EVENT_MAX_DEVS) {
1626 		RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1627 		return NULL;
1628 	}
1629 
1630 	eventdev = &rte_eventdevs[dev_id];
1631 
1632 	if (eventdev->data == NULL) {
1633 		struct rte_eventdev_data *eventdev_data = NULL;
1634 
1635 		int retval =
1636 			eventdev_data_alloc(dev_id, &eventdev_data, socket_id);
1637 
1638 		if (retval < 0 || eventdev_data == NULL)
1639 			return NULL;
1640 
1641 		eventdev->data = eventdev_data;
1642 
1643 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1644 
1645 			strlcpy(eventdev->data->name, name,
1646 				RTE_EVENTDEV_NAME_MAX_LEN);
1647 
1648 			eventdev->data->dev_id = dev_id;
1649 			eventdev->data->socket_id = socket_id;
1650 			eventdev->data->dev_started = 0;
1651 		}
1652 
1653 		eventdev->attached = RTE_EVENTDEV_ATTACHED;
1654 		eventdev_globals.nb_devs++;
1655 	}
1656 
1657 	return eventdev;
1658 }
1659 
1660 int
1661 rte_event_pmd_release(struct rte_eventdev *eventdev)
1662 {
1663 	int ret;
1664 	char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1665 	const struct rte_memzone *mz;
1666 
1667 	if (eventdev == NULL)
1668 		return -EINVAL;
1669 
1670 	event_dev_fp_ops_reset(rte_event_fp_ops + eventdev->data->dev_id);
1671 	eventdev->attached = RTE_EVENTDEV_DETACHED;
1672 	eventdev_globals.nb_devs--;
1673 
1674 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1675 		rte_free(eventdev->data->dev_private);
1676 
1677 		/* Generate memzone name */
1678 		ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1679 				eventdev->data->dev_id);
1680 		if (ret >= (int)sizeof(mz_name))
1681 			return -EINVAL;
1682 
1683 		mz = rte_memzone_lookup(mz_name);
1684 		if (mz == NULL)
1685 			return -ENOMEM;
1686 
1687 		ret = rte_memzone_free(mz);
1688 		if (ret)
1689 			return ret;
1690 	}
1691 
1692 	eventdev->data = NULL;
1693 	return 0;
1694 }
1695 
1696 void
1697 event_dev_probing_finish(struct rte_eventdev *eventdev)
1698 {
1699 	if (eventdev == NULL)
1700 		return;
1701 
1702 	event_dev_fp_ops_set(rte_event_fp_ops + eventdev->data->dev_id,
1703 			     eventdev);
1704 }
1705 
1706 static int
1707 handle_dev_list(const char *cmd __rte_unused,
1708 		const char *params __rte_unused,
1709 		struct rte_tel_data *d)
1710 {
1711 	uint8_t dev_id;
1712 	int ndev = rte_event_dev_count();
1713 
1714 	if (ndev < 1)
1715 		return -1;
1716 
1717 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1718 	for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1719 		if (rte_eventdevs[dev_id].attached ==
1720 				RTE_EVENTDEV_ATTACHED)
1721 			rte_tel_data_add_array_int(d, dev_id);
1722 	}
1723 
1724 	return 0;
1725 }
1726 
1727 static int
1728 handle_port_list(const char *cmd __rte_unused,
1729 		 const char *params,
1730 		 struct rte_tel_data *d)
1731 {
1732 	int i;
1733 	uint8_t dev_id;
1734 	struct rte_eventdev *dev;
1735 	char *end_param;
1736 
1737 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1738 		return -1;
1739 
1740 	dev_id = strtoul(params, &end_param, 10);
1741 	if (*end_param != '\0')
1742 		RTE_EDEV_LOG_DEBUG(
1743 			"Extra parameters passed to eventdev telemetry command, ignoring");
1744 
1745 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1746 	dev = &rte_eventdevs[dev_id];
1747 
1748 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1749 	for (i = 0; i < dev->data->nb_ports; i++)
1750 		rte_tel_data_add_array_int(d, i);
1751 
1752 	return 0;
1753 }
1754 
1755 static int
1756 handle_queue_list(const char *cmd __rte_unused,
1757 		  const char *params,
1758 		  struct rte_tel_data *d)
1759 {
1760 	int i;
1761 	uint8_t dev_id;
1762 	struct rte_eventdev *dev;
1763 	char *end_param;
1764 
1765 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1766 		return -1;
1767 
1768 	dev_id = strtoul(params, &end_param, 10);
1769 	if (*end_param != '\0')
1770 		RTE_EDEV_LOG_DEBUG(
1771 			"Extra parameters passed to eventdev telemetry command, ignoring");
1772 
1773 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1774 	dev = &rte_eventdevs[dev_id];
1775 
1776 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1777 	for (i = 0; i < dev->data->nb_queues; i++)
1778 		rte_tel_data_add_array_int(d, i);
1779 
1780 	return 0;
1781 }
1782 
1783 static int
1784 handle_queue_links(const char *cmd __rte_unused,
1785 		   const char *params,
1786 		   struct rte_tel_data *d)
1787 {
1788 	int i, ret, port_id = 0;
1789 	char *end_param;
1790 	uint8_t dev_id;
1791 	uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1792 	uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
1793 	const char *p_param;
1794 
1795 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1796 		return -1;
1797 
1798 	/* Get dev ID from parameter string */
1799 	dev_id = strtoul(params, &end_param, 10);
1800 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1801 
1802 	p_param = strtok(end_param, ",");
1803 	if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1804 		return -1;
1805 
1806 	port_id = strtoul(p_param, &end_param, 10);
1807 	p_param = strtok(NULL, "\0");
1808 	if (p_param != NULL)
1809 		RTE_EDEV_LOG_DEBUG(
1810 			"Extra parameters passed to eventdev telemetry command, ignoring");
1811 
1812 	ret = rte_event_port_links_get(dev_id, port_id, queues, priorities);
1813 	if (ret < 0)
1814 		return -1;
1815 
1816 	rte_tel_data_start_dict(d);
1817 	for (i = 0; i < ret; i++) {
1818 		char qid_name[32];
1819 
1820 		snprintf(qid_name, 31, "qid_%u", queues[i]);
1821 		rte_tel_data_add_dict_uint(d, qid_name, priorities[i]);
1822 	}
1823 
1824 	return 0;
1825 }
1826 
1827 static int
1828 eventdev_build_telemetry_data(int dev_id,
1829 			      enum rte_event_dev_xstats_mode mode,
1830 			      int port_queue_id,
1831 			      struct rte_tel_data *d)
1832 {
1833 	struct rte_event_dev_xstats_name *xstat_names;
1834 	uint64_t *ids;
1835 	uint64_t *values;
1836 	int i, ret, num_xstats;
1837 
1838 	num_xstats = rte_event_dev_xstats_names_get(dev_id,
1839 						    mode,
1840 						    port_queue_id,
1841 						    NULL,
1842 						    NULL,
1843 						    0);
1844 
1845 	if (num_xstats < 0)
1846 		return -1;
1847 
1848 	/* use one malloc for names */
1849 	xstat_names = malloc((sizeof(struct rte_event_dev_xstats_name))
1850 			     * num_xstats);
1851 	if (xstat_names == NULL)
1852 		return -1;
1853 
1854 	ids = malloc((sizeof(uint64_t)) * num_xstats);
1855 	if (ids == NULL) {
1856 		free(xstat_names);
1857 		return -1;
1858 	}
1859 
1860 	values = malloc((sizeof(uint64_t)) * num_xstats);
1861 	if (values == NULL) {
1862 		free(xstat_names);
1863 		free(ids);
1864 		return -1;
1865 	}
1866 
1867 	ret = rte_event_dev_xstats_names_get(dev_id, mode, port_queue_id,
1868 					     xstat_names, ids, num_xstats);
1869 	if (ret < 0 || ret > num_xstats) {
1870 		free(xstat_names);
1871 		free(ids);
1872 		free(values);
1873 		return -1;
1874 	}
1875 
1876 	ret = rte_event_dev_xstats_get(dev_id, mode, port_queue_id,
1877 				       ids, values, num_xstats);
1878 	if (ret < 0 || ret > num_xstats) {
1879 		free(xstat_names);
1880 		free(ids);
1881 		free(values);
1882 		return -1;
1883 	}
1884 
1885 	rte_tel_data_start_dict(d);
1886 	for (i = 0; i < num_xstats; i++)
1887 		rte_tel_data_add_dict_uint(d, xstat_names[i].name, values[i]);
1888 
1889 	free(xstat_names);
1890 	free(ids);
1891 	free(values);
1892 	return 0;
1893 }
1894 
1895 static int
1896 handle_dev_xstats(const char *cmd __rte_unused,
1897 		  const char *params,
1898 		  struct rte_tel_data *d)
1899 {
1900 	int dev_id;
1901 	enum rte_event_dev_xstats_mode mode;
1902 	char *end_param;
1903 
1904 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1905 		return -1;
1906 
1907 	/* Get dev ID from parameter string */
1908 	dev_id = strtoul(params, &end_param, 10);
1909 	if (*end_param != '\0')
1910 		RTE_EDEV_LOG_DEBUG(
1911 			"Extra parameters passed to eventdev telemetry command, ignoring");
1912 
1913 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1914 
1915 	mode = RTE_EVENT_DEV_XSTATS_DEVICE;
1916 	return eventdev_build_telemetry_data(dev_id, mode, 0, d);
1917 }
1918 
1919 static int
1920 handle_port_xstats(const char *cmd __rte_unused,
1921 		   const char *params,
1922 		   struct rte_tel_data *d)
1923 {
1924 	int dev_id;
1925 	int port_queue_id = 0;
1926 	enum rte_event_dev_xstats_mode mode;
1927 	char *end_param;
1928 	const char *p_param;
1929 
1930 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1931 		return -1;
1932 
1933 	/* Get dev ID from parameter string */
1934 	dev_id = strtoul(params, &end_param, 10);
1935 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1936 
1937 	p_param = strtok(end_param, ",");
1938 	mode = RTE_EVENT_DEV_XSTATS_PORT;
1939 
1940 	if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1941 		return -1;
1942 
1943 	port_queue_id = strtoul(p_param, &end_param, 10);
1944 
1945 	p_param = strtok(NULL, "\0");
1946 	if (p_param != NULL)
1947 		RTE_EDEV_LOG_DEBUG(
1948 			"Extra parameters passed to eventdev telemetry command, ignoring");
1949 
1950 	return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1951 }
1952 
1953 static int
1954 handle_queue_xstats(const char *cmd __rte_unused,
1955 		    const char *params,
1956 		    struct rte_tel_data *d)
1957 {
1958 	int dev_id;
1959 	int port_queue_id = 0;
1960 	enum rte_event_dev_xstats_mode mode;
1961 	char *end_param;
1962 	const char *p_param;
1963 
1964 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1965 		return -1;
1966 
1967 	/* Get dev ID from parameter string */
1968 	dev_id = strtoul(params, &end_param, 10);
1969 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1970 
1971 	p_param = strtok(end_param, ",");
1972 	mode = RTE_EVENT_DEV_XSTATS_QUEUE;
1973 
1974 	if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1975 		return -1;
1976 
1977 	port_queue_id = strtoul(p_param, &end_param, 10);
1978 
1979 	p_param = strtok(NULL, "\0");
1980 	if (p_param != NULL)
1981 		RTE_EDEV_LOG_DEBUG(
1982 			"Extra parameters passed to eventdev telemetry command, ignoring");
1983 
1984 	return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1985 }
1986 
1987 static int
1988 handle_dev_dump(const char *cmd __rte_unused,
1989 		const char *params,
1990 		struct rte_tel_data *d)
1991 {
1992 	char *buf, *end_param;
1993 	int dev_id, ret;
1994 	FILE *f;
1995 
1996 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1997 		return -1;
1998 
1999 	/* Get dev ID from parameter string */
2000 	dev_id = strtoul(params, &end_param, 10);
2001 	if (*end_param != '\0')
2002 		RTE_EDEV_LOG_DEBUG(
2003 			"Extra parameters passed to eventdev telemetry command, ignoring");
2004 
2005 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
2006 
2007 	buf = calloc(sizeof(char), RTE_TEL_MAX_SINGLE_STRING_LEN);
2008 	if (buf == NULL)
2009 		return -ENOMEM;
2010 
2011 	f = fmemopen(buf, RTE_TEL_MAX_SINGLE_STRING_LEN - 1, "w+");
2012 	if (f == NULL) {
2013 		free(buf);
2014 		return -EINVAL;
2015 	}
2016 
2017 	ret = rte_event_dev_dump(dev_id, f);
2018 	fclose(f);
2019 	if (ret == 0) {
2020 		rte_tel_data_start_dict(d);
2021 		rte_tel_data_string(d, buf);
2022 	}
2023 
2024 	free(buf);
2025 	return ret;
2026 }
2027 
2028 RTE_INIT(eventdev_init_telemetry)
2029 {
2030 	rte_telemetry_register_cmd("/eventdev/dev_list", handle_dev_list,
2031 			"Returns list of available eventdevs. Takes no parameters");
2032 	rte_telemetry_register_cmd("/eventdev/port_list", handle_port_list,
2033 			"Returns list of available ports. Parameter: DevID");
2034 	rte_telemetry_register_cmd("/eventdev/queue_list", handle_queue_list,
2035 			"Returns list of available queues. Parameter: DevID");
2036 
2037 	rte_telemetry_register_cmd("/eventdev/dev_xstats", handle_dev_xstats,
2038 			"Returns stats for an eventdev. Parameter: DevID");
2039 	rte_telemetry_register_cmd("/eventdev/port_xstats", handle_port_xstats,
2040 			"Returns stats for an eventdev port. Params: DevID,PortID");
2041 	rte_telemetry_register_cmd("/eventdev/queue_xstats",
2042 			handle_queue_xstats,
2043 			"Returns stats for an eventdev queue. Params: DevID,QueueID");
2044 	rte_telemetry_register_cmd("/eventdev/dev_dump", handle_dev_dump,
2045 			"Returns dump information for an eventdev. Parameter: DevID");
2046 	rte_telemetry_register_cmd("/eventdev/queue_links", handle_queue_links,
2047 			"Returns links for an eventdev port. Params: DevID,QueueID");
2048 }
2049