1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
3 */
4
5 #include <stdalign.h>
6 #include <ctype.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <errno.h>
11 #include <stdint.h>
12 #include <inttypes.h>
13
14 #include <rte_string_fns.h>
15 #include <rte_log.h>
16 #include <dev_driver.h>
17 #include <rte_memzone.h>
18 #include <rte_eal.h>
19 #include <rte_common.h>
20 #include <rte_malloc.h>
21 #include <rte_errno.h>
22 #include <ethdev_driver.h>
23 #include <rte_cryptodev.h>
24 #include <rte_dmadev.h>
25 #include <cryptodev_pmd.h>
26 #include <rte_telemetry.h>
27
28 #include "rte_eventdev.h"
29 #include "eventdev_pmd.h"
30 #include "eventdev_trace.h"
31
32 RTE_LOG_REGISTER_DEFAULT(rte_event_logtype, INFO);
33
34 static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
35
36 struct rte_eventdev *rte_eventdevs = rte_event_devices;
37
38 static struct rte_eventdev_global eventdev_globals = {
39 .nb_devs = 0
40 };
41
42 /* Public fastpath APIs. */
43 struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
44
45 /* Event dev north bound API implementation */
46
47 uint8_t
rte_event_dev_count(void)48 rte_event_dev_count(void)
49 {
50 return eventdev_globals.nb_devs;
51 }
52
53 int
rte_event_dev_get_dev_id(const char * name)54 rte_event_dev_get_dev_id(const char *name)
55 {
56 int i;
57 uint8_t cmp;
58
59 if (!name)
60 return -EINVAL;
61
62 for (i = 0; i < eventdev_globals.nb_devs; i++) {
63 cmp = (strncmp(rte_event_devices[i].data->name, name,
64 RTE_EVENTDEV_NAME_MAX_LEN) == 0) ||
65 (rte_event_devices[i].dev ? (strncmp(
66 rte_event_devices[i].dev->driver->name, name,
67 RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0);
68 if (cmp && (rte_event_devices[i].attached ==
69 RTE_EVENTDEV_ATTACHED)) {
70 rte_eventdev_trace_get_dev_id(name, i);
71 return i;
72 }
73 }
74 return -ENODEV;
75 }
76
77 int
rte_event_dev_socket_id(uint8_t dev_id)78 rte_event_dev_socket_id(uint8_t dev_id)
79 {
80 struct rte_eventdev *dev;
81
82 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
83 dev = &rte_eventdevs[dev_id];
84
85 rte_eventdev_trace_socket_id(dev_id, dev, dev->data->socket_id);
86
87 return dev->data->socket_id;
88 }
89
90 int
rte_event_dev_info_get(uint8_t dev_id,struct rte_event_dev_info * dev_info)91 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
92 {
93 struct rte_eventdev *dev;
94
95 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
96 dev = &rte_eventdevs[dev_id];
97
98 if (dev_info == NULL)
99 return -EINVAL;
100
101 memset(dev_info, 0, sizeof(struct rte_event_dev_info));
102
103 if (*dev->dev_ops->dev_infos_get == NULL)
104 return -ENOTSUP;
105 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
106
107 dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
108
109 dev_info->dev = dev->dev;
110 if (dev->dev != NULL && dev->dev->driver != NULL)
111 dev_info->driver_name = dev->dev->driver->name;
112
113 rte_eventdev_trace_info_get(dev_id, dev_info, dev_info->dev);
114
115 return 0;
116 }
117
118 int
rte_event_eth_rx_adapter_caps_get(uint8_t dev_id,uint16_t eth_port_id,uint32_t * caps)119 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
120 uint32_t *caps)
121 {
122 struct rte_eventdev *dev;
123
124 rte_eventdev_trace_eth_rx_adapter_caps_get(dev_id, eth_port_id);
125
126 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
127 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
128
129 dev = &rte_eventdevs[dev_id];
130
131 if (caps == NULL)
132 return -EINVAL;
133
134 if (dev->dev_ops->eth_rx_adapter_caps_get == NULL)
135 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
136 else
137 *caps = 0;
138
139 return dev->dev_ops->eth_rx_adapter_caps_get ?
140 (*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
141 &rte_eth_devices[eth_port_id],
142 caps)
143 : 0;
144 }
145
146 int
rte_event_timer_adapter_caps_get(uint8_t dev_id,uint32_t * caps)147 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
148 {
149 struct rte_eventdev *dev;
150 const struct event_timer_adapter_ops *ops;
151
152 rte_eventdev_trace_timer_adapter_caps_get(dev_id);
153
154 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
155
156 dev = &rte_eventdevs[dev_id];
157
158 if (caps == NULL)
159 return -EINVAL;
160
161 if (dev->dev_ops->timer_adapter_caps_get == NULL)
162 *caps = RTE_EVENT_TIMER_ADAPTER_SW_CAP;
163 else
164 *caps = 0;
165
166 return dev->dev_ops->timer_adapter_caps_get ?
167 (*dev->dev_ops->timer_adapter_caps_get)(dev,
168 0,
169 caps,
170 &ops)
171 : 0;
172 }
173
174 int
rte_event_crypto_adapter_caps_get(uint8_t dev_id,uint8_t cdev_id,uint32_t * caps)175 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
176 uint32_t *caps)
177 {
178 struct rte_eventdev *dev;
179 struct rte_cryptodev *cdev;
180
181 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
182 if (!rte_cryptodev_is_valid_dev(cdev_id))
183 return -EINVAL;
184
185 dev = &rte_eventdevs[dev_id];
186 cdev = rte_cryptodev_pmd_get_dev(cdev_id);
187
188 rte_eventdev_trace_crypto_adapter_caps_get(dev_id, dev, cdev_id, cdev);
189
190 if (caps == NULL)
191 return -EINVAL;
192
193 if (dev->dev_ops->crypto_adapter_caps_get == NULL)
194 *caps = RTE_EVENT_CRYPTO_ADAPTER_SW_CAP;
195 else
196 *caps = 0;
197
198 return dev->dev_ops->crypto_adapter_caps_get ?
199 (*dev->dev_ops->crypto_adapter_caps_get)
200 (dev, cdev, caps) : 0;
201 }
202
203 int
rte_event_eth_tx_adapter_caps_get(uint8_t dev_id,uint16_t eth_port_id,uint32_t * caps)204 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
205 uint32_t *caps)
206 {
207 struct rte_eventdev *dev;
208 struct rte_eth_dev *eth_dev;
209
210 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
211 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
212
213 dev = &rte_eventdevs[dev_id];
214 eth_dev = &rte_eth_devices[eth_port_id];
215
216 rte_eventdev_trace_eth_tx_adapter_caps_get(dev_id, dev, eth_port_id, eth_dev);
217
218 if (caps == NULL)
219 return -EINVAL;
220
221 if (dev->dev_ops->eth_tx_adapter_caps_get == NULL)
222 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR;
223 else
224 *caps = 0;
225
226 return dev->dev_ops->eth_tx_adapter_caps_get ?
227 (*dev->dev_ops->eth_tx_adapter_caps_get)(dev,
228 eth_dev,
229 caps)
230 : 0;
231 }
232
233 int
rte_event_dma_adapter_caps_get(uint8_t dev_id,uint8_t dma_dev_id,uint32_t * caps)234 rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dma_dev_id, uint32_t *caps)
235 {
236 struct rte_eventdev *dev;
237
238 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
239 if (!rte_dma_is_valid(dma_dev_id))
240 return -EINVAL;
241
242 dev = &rte_eventdevs[dev_id];
243
244 if (caps == NULL)
245 return -EINVAL;
246
247 *caps = 0;
248
249 if (dev->dev_ops->dma_adapter_caps_get)
250 return (*dev->dev_ops->dma_adapter_caps_get)(dev, dma_dev_id, caps);
251
252 return 0;
253 }
254
255 static inline int
event_dev_queue_config(struct rte_eventdev * dev,uint8_t nb_queues)256 event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
257 {
258 uint8_t old_nb_queues = dev->data->nb_queues;
259 struct rte_event_queue_conf *queues_cfg;
260 unsigned int i;
261
262 RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
263 dev->data->dev_id);
264
265 if (nb_queues != 0) {
266 queues_cfg = dev->data->queues_cfg;
267 if (*dev->dev_ops->queue_release == NULL)
268 return -ENOTSUP;
269
270 for (i = nb_queues; i < old_nb_queues; i++)
271 (*dev->dev_ops->queue_release)(dev, i);
272
273
274 if (nb_queues > old_nb_queues) {
275 uint8_t new_qs = nb_queues - old_nb_queues;
276
277 memset(queues_cfg + old_nb_queues, 0,
278 sizeof(queues_cfg[0]) * new_qs);
279 }
280 } else {
281 if (*dev->dev_ops->queue_release == NULL)
282 return -ENOTSUP;
283
284 for (i = nb_queues; i < old_nb_queues; i++)
285 (*dev->dev_ops->queue_release)(dev, i);
286 }
287
288 dev->data->nb_queues = nb_queues;
289 return 0;
290 }
291
292 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
293
294 static inline int
event_dev_port_config(struct rte_eventdev * dev,uint8_t nb_ports)295 event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
296 {
297 uint8_t old_nb_ports = dev->data->nb_ports;
298 void **ports;
299 uint16_t *links_map;
300 struct rte_event_port_conf *ports_cfg;
301 unsigned int i, j;
302
303 RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
304 dev->data->dev_id);
305
306 if (nb_ports != 0) { /* re-config */
307 if (*dev->dev_ops->port_release == NULL)
308 return -ENOTSUP;
309
310 ports = dev->data->ports;
311 ports_cfg = dev->data->ports_cfg;
312
313 for (i = nb_ports; i < old_nb_ports; i++)
314 (*dev->dev_ops->port_release)(ports[i]);
315
316 if (nb_ports > old_nb_ports) {
317 uint8_t new_ps = nb_ports - old_nb_ports;
318 unsigned int old_links_map_end =
319 old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
320 unsigned int links_map_end =
321 nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
322
323 memset(ports + old_nb_ports, 0,
324 sizeof(ports[0]) * new_ps);
325 memset(ports_cfg + old_nb_ports, 0,
326 sizeof(ports_cfg[0]) * new_ps);
327 for (i = 0; i < RTE_EVENT_MAX_PROFILES_PER_PORT; i++) {
328 links_map = dev->data->links_map[i];
329 for (j = old_links_map_end; j < links_map_end; j++)
330 links_map[j] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
331 }
332 }
333 } else {
334 if (*dev->dev_ops->port_release == NULL)
335 return -ENOTSUP;
336
337 ports = dev->data->ports;
338 for (i = nb_ports; i < old_nb_ports; i++) {
339 (*dev->dev_ops->port_release)(ports[i]);
340 ports[i] = NULL;
341 }
342 }
343
344 dev->data->nb_ports = nb_ports;
345 return 0;
346 }
347
348 int
rte_event_dev_configure(uint8_t dev_id,const struct rte_event_dev_config * dev_conf)349 rte_event_dev_configure(uint8_t dev_id,
350 const struct rte_event_dev_config *dev_conf)
351 {
352 struct rte_event_dev_info info;
353 struct rte_eventdev *dev;
354 int diag;
355
356 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
357 dev = &rte_eventdevs[dev_id];
358
359 if (*dev->dev_ops->dev_infos_get == NULL)
360 return -ENOTSUP;
361 if (*dev->dev_ops->dev_configure == NULL)
362 return -ENOTSUP;
363
364 if (dev->data->dev_started) {
365 RTE_EDEV_LOG_ERR(
366 "device %d must be stopped to allow configuration", dev_id);
367 return -EBUSY;
368 }
369
370 if (dev_conf == NULL)
371 return -EINVAL;
372
373 (*dev->dev_ops->dev_infos_get)(dev, &info);
374
375 /* Check dequeue_timeout_ns value is in limit */
376 if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
377 if (dev_conf->dequeue_timeout_ns &&
378 (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
379 || dev_conf->dequeue_timeout_ns >
380 info.max_dequeue_timeout_ns)) {
381 RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
382 " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
383 dev_id, dev_conf->dequeue_timeout_ns,
384 info.min_dequeue_timeout_ns,
385 info.max_dequeue_timeout_ns);
386 return -EINVAL;
387 }
388 }
389
390 /* Check nb_events_limit is in limit */
391 if (dev_conf->nb_events_limit > info.max_num_events) {
392 RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
393 dev_id, dev_conf->nb_events_limit, info.max_num_events);
394 return -EINVAL;
395 }
396
397 /* Check nb_event_queues is in limit */
398 if (!dev_conf->nb_event_queues) {
399 RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
400 dev_id);
401 return -EINVAL;
402 }
403 if (dev_conf->nb_event_queues > info.max_event_queues +
404 info.max_single_link_event_port_queue_pairs) {
405 RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d + max_single_link_event_port_queue_pairs=%d",
406 dev_id, dev_conf->nb_event_queues,
407 info.max_event_queues,
408 info.max_single_link_event_port_queue_pairs);
409 return -EINVAL;
410 }
411 if (dev_conf->nb_event_queues -
412 dev_conf->nb_single_link_event_port_queues >
413 info.max_event_queues) {
414 RTE_EDEV_LOG_ERR("id%d nb_event_queues=%d - nb_single_link_event_port_queues=%d > max_event_queues=%d",
415 dev_id, dev_conf->nb_event_queues,
416 dev_conf->nb_single_link_event_port_queues,
417 info.max_event_queues);
418 return -EINVAL;
419 }
420 if (dev_conf->nb_single_link_event_port_queues >
421 dev_conf->nb_event_queues) {
422 RTE_EDEV_LOG_ERR("dev%d nb_single_link_event_port_queues=%d > nb_event_queues=%d",
423 dev_id,
424 dev_conf->nb_single_link_event_port_queues,
425 dev_conf->nb_event_queues);
426 return -EINVAL;
427 }
428
429 /* Check nb_event_ports is in limit */
430 if (!dev_conf->nb_event_ports) {
431 RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
432 return -EINVAL;
433 }
434 if (dev_conf->nb_event_ports > info.max_event_ports +
435 info.max_single_link_event_port_queue_pairs) {
436 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports=%d + max_single_link_event_port_queue_pairs=%d",
437 dev_id, dev_conf->nb_event_ports,
438 info.max_event_ports,
439 info.max_single_link_event_port_queue_pairs);
440 return -EINVAL;
441 }
442 if (dev_conf->nb_event_ports -
443 dev_conf->nb_single_link_event_port_queues
444 > info.max_event_ports) {
445 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d - nb_single_link_event_port_queues=%d > max_event_ports=%d",
446 dev_id, dev_conf->nb_event_ports,
447 dev_conf->nb_single_link_event_port_queues,
448 info.max_event_ports);
449 return -EINVAL;
450 }
451
452 if (dev_conf->nb_single_link_event_port_queues >
453 dev_conf->nb_event_ports) {
454 RTE_EDEV_LOG_ERR(
455 "dev%d nb_single_link_event_port_queues=%d > nb_event_ports=%d",
456 dev_id,
457 dev_conf->nb_single_link_event_port_queues,
458 dev_conf->nb_event_ports);
459 return -EINVAL;
460 }
461
462 /* Check nb_event_queue_flows is in limit */
463 if (!dev_conf->nb_event_queue_flows) {
464 RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
465 return -EINVAL;
466 }
467 if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
468 RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
469 dev_id, dev_conf->nb_event_queue_flows,
470 info.max_event_queue_flows);
471 return -EINVAL;
472 }
473
474 /* Check nb_event_port_dequeue_depth is in limit */
475 if (!dev_conf->nb_event_port_dequeue_depth) {
476 RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
477 dev_id);
478 return -EINVAL;
479 }
480 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
481 (dev_conf->nb_event_port_dequeue_depth >
482 info.max_event_port_dequeue_depth)) {
483 RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
484 dev_id, dev_conf->nb_event_port_dequeue_depth,
485 info.max_event_port_dequeue_depth);
486 return -EINVAL;
487 }
488
489 /* Check nb_event_port_enqueue_depth is in limit */
490 if (!dev_conf->nb_event_port_enqueue_depth) {
491 RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
492 dev_id);
493 return -EINVAL;
494 }
495 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
496 (dev_conf->nb_event_port_enqueue_depth >
497 info.max_event_port_enqueue_depth)) {
498 RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
499 dev_id, dev_conf->nb_event_port_enqueue_depth,
500 info.max_event_port_enqueue_depth);
501 return -EINVAL;
502 }
503
504 /* Copy the dev_conf parameter into the dev structure */
505 memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
506
507 /* Setup new number of queues and reconfigure device. */
508 diag = event_dev_queue_config(dev, dev_conf->nb_event_queues);
509 if (diag != 0) {
510 RTE_EDEV_LOG_ERR("dev%d event_dev_queue_config = %d", dev_id,
511 diag);
512 return diag;
513 }
514
515 /* Setup new number of ports and reconfigure device. */
516 diag = event_dev_port_config(dev, dev_conf->nb_event_ports);
517 if (diag != 0) {
518 event_dev_queue_config(dev, 0);
519 RTE_EDEV_LOG_ERR("dev%d event_dev_port_config = %d", dev_id,
520 diag);
521 return diag;
522 }
523
524 event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
525
526 /* Configure the device */
527 diag = (*dev->dev_ops->dev_configure)(dev);
528 if (diag != 0) {
529 RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
530 event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
531 event_dev_queue_config(dev, 0);
532 event_dev_port_config(dev, 0);
533 }
534
535 dev->data->event_dev_cap = info.event_dev_cap;
536 rte_eventdev_trace_configure(dev_id, dev_conf, diag);
537 return diag;
538 }
539
540 static inline int
is_valid_queue(struct rte_eventdev * dev,uint8_t queue_id)541 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
542 {
543 if (queue_id < dev->data->nb_queues && queue_id <
544 RTE_EVENT_MAX_QUEUES_PER_DEV)
545 return 1;
546 else
547 return 0;
548 }
549
550 int
rte_event_queue_default_conf_get(uint8_t dev_id,uint8_t queue_id,struct rte_event_queue_conf * queue_conf)551 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
552 struct rte_event_queue_conf *queue_conf)
553 {
554 struct rte_eventdev *dev;
555
556 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
557 dev = &rte_eventdevs[dev_id];
558
559 if (queue_conf == NULL)
560 return -EINVAL;
561
562 if (!is_valid_queue(dev, queue_id)) {
563 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
564 return -EINVAL;
565 }
566
567 if (*dev->dev_ops->queue_def_conf == NULL)
568 return -ENOTSUP;
569 memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
570 (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
571
572 rte_eventdev_trace_queue_default_conf_get(dev_id, dev, queue_id, queue_conf);
573
574 return 0;
575 }
576
577 static inline int
is_valid_atomic_queue_conf(const struct rte_event_queue_conf * queue_conf)578 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
579 {
580 if (queue_conf &&
581 !(queue_conf->event_queue_cfg &
582 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
583 ((queue_conf->event_queue_cfg &
584 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
585 (queue_conf->schedule_type
586 == RTE_SCHED_TYPE_ATOMIC)
587 ))
588 return 1;
589 else
590 return 0;
591 }
592
593 static inline int
is_valid_ordered_queue_conf(const struct rte_event_queue_conf * queue_conf)594 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
595 {
596 if (queue_conf &&
597 !(queue_conf->event_queue_cfg &
598 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
599 ((queue_conf->event_queue_cfg &
600 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
601 (queue_conf->schedule_type
602 == RTE_SCHED_TYPE_ORDERED)
603 ))
604 return 1;
605 else
606 return 0;
607 }
608
609
610 int
rte_event_queue_setup(uint8_t dev_id,uint8_t queue_id,const struct rte_event_queue_conf * queue_conf)611 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
612 const struct rte_event_queue_conf *queue_conf)
613 {
614 struct rte_eventdev *dev;
615 struct rte_event_queue_conf def_conf;
616
617 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
618 dev = &rte_eventdevs[dev_id];
619
620 if (!is_valid_queue(dev, queue_id)) {
621 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
622 return -EINVAL;
623 }
624
625 /* Check nb_atomic_flows limit */
626 if (is_valid_atomic_queue_conf(queue_conf)) {
627 if (queue_conf->nb_atomic_flows == 0 ||
628 queue_conf->nb_atomic_flows >
629 dev->data->dev_conf.nb_event_queue_flows) {
630 RTE_EDEV_LOG_ERR(
631 "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
632 dev_id, queue_id, queue_conf->nb_atomic_flows,
633 dev->data->dev_conf.nb_event_queue_flows);
634 return -EINVAL;
635 }
636 }
637
638 /* Check nb_atomic_order_sequences limit */
639 if (is_valid_ordered_queue_conf(queue_conf)) {
640 if (queue_conf->nb_atomic_order_sequences == 0 ||
641 queue_conf->nb_atomic_order_sequences >
642 dev->data->dev_conf.nb_event_queue_flows) {
643 RTE_EDEV_LOG_ERR(
644 "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
645 dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
646 dev->data->dev_conf.nb_event_queue_flows);
647 return -EINVAL;
648 }
649 }
650
651 if (dev->data->dev_started) {
652 RTE_EDEV_LOG_ERR(
653 "device %d must be stopped to allow queue setup", dev_id);
654 return -EBUSY;
655 }
656
657 if (*dev->dev_ops->queue_setup == NULL)
658 return -ENOTSUP;
659
660 if (queue_conf == NULL) {
661 if (*dev->dev_ops->queue_def_conf == NULL)
662 return -ENOTSUP;
663 (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
664 queue_conf = &def_conf;
665 }
666
667 dev->data->queues_cfg[queue_id] = *queue_conf;
668 rte_eventdev_trace_queue_setup(dev_id, queue_id, queue_conf);
669 return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
670 }
671
672 static inline int
is_valid_port(struct rte_eventdev * dev,uint8_t port_id)673 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
674 {
675 if (port_id < dev->data->nb_ports)
676 return 1;
677 else
678 return 0;
679 }
680
681 int
rte_event_port_default_conf_get(uint8_t dev_id,uint8_t port_id,struct rte_event_port_conf * port_conf)682 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
683 struct rte_event_port_conf *port_conf)
684 {
685 struct rte_eventdev *dev;
686
687 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
688 dev = &rte_eventdevs[dev_id];
689
690 if (port_conf == NULL)
691 return -EINVAL;
692
693 if (!is_valid_port(dev, port_id)) {
694 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
695 return -EINVAL;
696 }
697
698 if (*dev->dev_ops->port_def_conf == NULL)
699 return -ENOTSUP;
700 memset(port_conf, 0, sizeof(struct rte_event_port_conf));
701 (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
702
703 rte_eventdev_trace_port_default_conf_get(dev_id, dev, port_id, port_conf);
704
705 return 0;
706 }
707
708 int
rte_event_port_setup(uint8_t dev_id,uint8_t port_id,const struct rte_event_port_conf * port_conf)709 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
710 const struct rte_event_port_conf *port_conf)
711 {
712 struct rte_eventdev *dev;
713 struct rte_event_port_conf def_conf;
714 int diag;
715
716 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
717 dev = &rte_eventdevs[dev_id];
718
719 if (!is_valid_port(dev, port_id)) {
720 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
721 return -EINVAL;
722 }
723
724 /* Check new_event_threshold limit */
725 if ((port_conf && !port_conf->new_event_threshold) ||
726 (port_conf && port_conf->new_event_threshold >
727 dev->data->dev_conf.nb_events_limit)) {
728 RTE_EDEV_LOG_ERR(
729 "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
730 dev_id, port_id, port_conf->new_event_threshold,
731 dev->data->dev_conf.nb_events_limit);
732 return -EINVAL;
733 }
734
735 /* Check dequeue_depth limit */
736 if ((port_conf && !port_conf->dequeue_depth) ||
737 (port_conf && port_conf->dequeue_depth >
738 dev->data->dev_conf.nb_event_port_dequeue_depth)) {
739 RTE_EDEV_LOG_ERR(
740 "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
741 dev_id, port_id, port_conf->dequeue_depth,
742 dev->data->dev_conf.nb_event_port_dequeue_depth);
743 return -EINVAL;
744 }
745
746 /* Check enqueue_depth limit */
747 if ((port_conf && !port_conf->enqueue_depth) ||
748 (port_conf && port_conf->enqueue_depth >
749 dev->data->dev_conf.nb_event_port_enqueue_depth)) {
750 RTE_EDEV_LOG_ERR(
751 "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
752 dev_id, port_id, port_conf->enqueue_depth,
753 dev->data->dev_conf.nb_event_port_enqueue_depth);
754 return -EINVAL;
755 }
756
757 if (port_conf &&
758 (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL) &&
759 !(dev->data->event_dev_cap &
760 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
761 RTE_EDEV_LOG_ERR(
762 "dev%d port%d Implicit release disable not supported",
763 dev_id, port_id);
764 return -EINVAL;
765 }
766
767 if (dev->data->dev_started) {
768 RTE_EDEV_LOG_ERR(
769 "device %d must be stopped to allow port setup", dev_id);
770 return -EBUSY;
771 }
772
773 if (*dev->dev_ops->port_setup == NULL)
774 return -ENOTSUP;
775
776 if (port_conf == NULL) {
777 if (*dev->dev_ops->port_def_conf == NULL)
778 return -ENOTSUP;
779 (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
780 port_conf = &def_conf;
781 }
782
783 dev->data->ports_cfg[port_id] = *port_conf;
784
785 diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
786
787 /* Unlink all the queues from this port(default state after setup) */
788 if (!diag)
789 diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
790
791 rte_eventdev_trace_port_setup(dev_id, port_id, port_conf, diag);
792 if (diag < 0)
793 return diag;
794
795 return 0;
796 }
797
798 void
rte_event_port_quiesce(uint8_t dev_id,uint8_t port_id,rte_eventdev_port_flush_t release_cb,void * args)799 rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
800 rte_eventdev_port_flush_t release_cb, void *args)
801 {
802 struct rte_eventdev *dev;
803
804 RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
805 dev = &rte_eventdevs[dev_id];
806
807 rte_eventdev_trace_port_quiesce(dev_id, dev, port_id, args);
808
809 if (!is_valid_port(dev, port_id)) {
810 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
811 return;
812 }
813
814 if (dev->dev_ops->port_quiesce)
815 (*dev->dev_ops->port_quiesce)(dev, dev->data->ports[port_id],
816 release_cb, args);
817 }
818
819 int
rte_event_dev_attr_get(uint8_t dev_id,uint32_t attr_id,uint32_t * attr_value)820 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
821 uint32_t *attr_value)
822 {
823 struct rte_eventdev *dev;
824
825 if (!attr_value)
826 return -EINVAL;
827 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
828 dev = &rte_eventdevs[dev_id];
829
830 switch (attr_id) {
831 case RTE_EVENT_DEV_ATTR_PORT_COUNT:
832 *attr_value = dev->data->nb_ports;
833 break;
834 case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
835 *attr_value = dev->data->nb_queues;
836 break;
837 case RTE_EVENT_DEV_ATTR_STARTED:
838 *attr_value = dev->data->dev_started;
839 break;
840 default:
841 return -EINVAL;
842 }
843
844 rte_eventdev_trace_attr_get(dev_id, dev, attr_id, *attr_value);
845
846 return 0;
847 }
848
849 int
rte_event_port_attr_get(uint8_t dev_id,uint8_t port_id,uint32_t attr_id,uint32_t * attr_value)850 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
851 uint32_t *attr_value)
852 {
853 struct rte_eventdev *dev;
854
855 if (!attr_value)
856 return -EINVAL;
857
858 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
859 dev = &rte_eventdevs[dev_id];
860 if (!is_valid_port(dev, port_id)) {
861 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
862 return -EINVAL;
863 }
864
865 switch (attr_id) {
866 case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
867 *attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
868 break;
869 case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
870 *attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
871 break;
872 case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
873 *attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
874 break;
875 case RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE:
876 {
877 uint32_t config;
878
879 config = dev->data->ports_cfg[port_id].event_port_cfg;
880 *attr_value = !!(config & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
881 break;
882 }
883 default:
884 return -EINVAL;
885 };
886
887 rte_eventdev_trace_port_attr_get(dev_id, dev, port_id, attr_id, *attr_value);
888
889 return 0;
890 }
891
892 int
rte_event_queue_attr_get(uint8_t dev_id,uint8_t queue_id,uint32_t attr_id,uint32_t * attr_value)893 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
894 uint32_t *attr_value)
895 {
896 struct rte_event_queue_conf *conf;
897 struct rte_eventdev *dev;
898
899 if (!attr_value)
900 return -EINVAL;
901
902 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
903 dev = &rte_eventdevs[dev_id];
904 if (!is_valid_queue(dev, queue_id)) {
905 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
906 return -EINVAL;
907 }
908
909 conf = &dev->data->queues_cfg[queue_id];
910
911 switch (attr_id) {
912 case RTE_EVENT_QUEUE_ATTR_PRIORITY:
913 *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
914 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
915 *attr_value = conf->priority;
916 break;
917 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
918 *attr_value = conf->nb_atomic_flows;
919 break;
920 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
921 *attr_value = conf->nb_atomic_order_sequences;
922 break;
923 case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
924 *attr_value = conf->event_queue_cfg;
925 break;
926 case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
927 if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
928 return -EOVERFLOW;
929
930 *attr_value = conf->schedule_type;
931 break;
932 case RTE_EVENT_QUEUE_ATTR_WEIGHT:
933 *attr_value = RTE_EVENT_QUEUE_WEIGHT_LOWEST;
934 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
935 *attr_value = conf->weight;
936 break;
937 case RTE_EVENT_QUEUE_ATTR_AFFINITY:
938 *attr_value = RTE_EVENT_QUEUE_AFFINITY_LOWEST;
939 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
940 *attr_value = conf->affinity;
941 break;
942 default:
943 return -EINVAL;
944 };
945
946 rte_eventdev_trace_queue_attr_get(dev_id, dev, queue_id, attr_id, *attr_value);
947
948 return 0;
949 }
950
951 int
rte_event_queue_attr_set(uint8_t dev_id,uint8_t queue_id,uint32_t attr_id,uint64_t attr_value)952 rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
953 uint64_t attr_value)
954 {
955 struct rte_eventdev *dev;
956
957 rte_eventdev_trace_queue_attr_set(dev_id, queue_id, attr_id, attr_value);
958
959 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
960 dev = &rte_eventdevs[dev_id];
961 if (!is_valid_queue(dev, queue_id)) {
962 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
963 return -EINVAL;
964 }
965
966 if (!(dev->data->event_dev_cap &
967 RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR)) {
968 RTE_EDEV_LOG_ERR(
969 "Device %" PRIu8 "does not support changing queue attributes at runtime",
970 dev_id);
971 return -ENOTSUP;
972 }
973
974 if (*dev->dev_ops->queue_attr_set == NULL)
975 return -ENOTSUP;
976 return (*dev->dev_ops->queue_attr_set)(dev, queue_id, attr_id,
977 attr_value);
978 }
979
980 int
rte_event_port_link(uint8_t dev_id,uint8_t port_id,const uint8_t queues[],const uint8_t priorities[],uint16_t nb_links)981 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
982 const uint8_t queues[], const uint8_t priorities[],
983 uint16_t nb_links)
984 {
985 return rte_event_port_profile_links_set(dev_id, port_id, queues, priorities, nb_links, 0);
986 }
987
988 int
rte_event_port_profile_links_set(uint8_t dev_id,uint8_t port_id,const uint8_t queues[],const uint8_t priorities[],uint16_t nb_links,uint8_t profile_id)989 rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[],
990 const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id)
991 {
992 uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
993 uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
994 struct rte_event_dev_info info;
995 struct rte_eventdev *dev;
996 uint16_t *links_map;
997 int i, diag;
998
999 RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
1000 dev = &rte_eventdevs[dev_id];
1001
1002 if (*dev->dev_ops->dev_infos_get == NULL)
1003 return -ENOTSUP;
1004
1005 (*dev->dev_ops->dev_infos_get)(dev, &info);
1006 if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT ||
1007 profile_id >= info.max_profiles_per_port) {
1008 RTE_EDEV_LOG_ERR("Invalid profile_id=%" PRIu8, profile_id);
1009 return -EINVAL;
1010 }
1011
1012 if (*dev->dev_ops->port_link == NULL) {
1013 RTE_EDEV_LOG_ERR("Function not supported");
1014 rte_errno = ENOTSUP;
1015 return 0;
1016 }
1017
1018 if (profile_id && *dev->dev_ops->port_link_profile == NULL) {
1019 RTE_EDEV_LOG_ERR("Function not supported");
1020 rte_errno = ENOTSUP;
1021 return 0;
1022 }
1023
1024 if (!is_valid_port(dev, port_id)) {
1025 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1026 rte_errno = EINVAL;
1027 return 0;
1028 }
1029
1030 if (queues == NULL) {
1031 for (i = 0; i < dev->data->nb_queues; i++)
1032 queues_list[i] = i;
1033
1034 queues = queues_list;
1035 nb_links = dev->data->nb_queues;
1036 }
1037
1038 if (priorities == NULL) {
1039 for (i = 0; i < nb_links; i++)
1040 priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
1041
1042 priorities = priorities_list;
1043 }
1044
1045 for (i = 0; i < nb_links; i++)
1046 if (queues[i] >= dev->data->nb_queues) {
1047 rte_errno = EINVAL;
1048 return 0;
1049 }
1050
1051 if (profile_id)
1052 diag = (*dev->dev_ops->port_link_profile)(dev, dev->data->ports[port_id], queues,
1053 priorities, nb_links, profile_id);
1054 else
1055 diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id], queues,
1056 priorities, nb_links);
1057 if (diag < 0)
1058 return diag;
1059
1060 links_map = dev->data->links_map[profile_id];
1061 /* Point links_map to this port specific area */
1062 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1063 for (i = 0; i < diag; i++)
1064 links_map[queues[i]] = (uint8_t)priorities[i];
1065
1066 rte_eventdev_trace_port_profile_links_set(dev_id, port_id, nb_links, profile_id, diag);
1067 return diag;
1068 }
1069
1070 int
rte_event_port_unlink(uint8_t dev_id,uint8_t port_id,uint8_t queues[],uint16_t nb_unlinks)1071 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
1072 uint8_t queues[], uint16_t nb_unlinks)
1073 {
1074 return rte_event_port_profile_unlink(dev_id, port_id, queues, nb_unlinks, 0);
1075 }
1076
1077 int
rte_event_port_profile_unlink(uint8_t dev_id,uint8_t port_id,uint8_t queues[],uint16_t nb_unlinks,uint8_t profile_id)1078 rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
1079 uint16_t nb_unlinks, uint8_t profile_id)
1080 {
1081 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1082 struct rte_event_dev_info info;
1083 struct rte_eventdev *dev;
1084 uint16_t *links_map;
1085 int i, diag, j;
1086
1087 RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
1088 dev = &rte_eventdevs[dev_id];
1089
1090 if (*dev->dev_ops->dev_infos_get == NULL)
1091 return -ENOTSUP;
1092
1093 (*dev->dev_ops->dev_infos_get)(dev, &info);
1094 if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT ||
1095 profile_id >= info.max_profiles_per_port) {
1096 RTE_EDEV_LOG_ERR("Invalid profile_id=%" PRIu8, profile_id);
1097 return -EINVAL;
1098 }
1099
1100 if (*dev->dev_ops->port_unlink == NULL) {
1101 RTE_EDEV_LOG_ERR("Function not supported");
1102 rte_errno = ENOTSUP;
1103 return 0;
1104 }
1105
1106 if (profile_id && *dev->dev_ops->port_unlink_profile == NULL) {
1107 RTE_EDEV_LOG_ERR("Function not supported");
1108 rte_errno = ENOTSUP;
1109 return 0;
1110 }
1111
1112 if (!is_valid_port(dev, port_id)) {
1113 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1114 rte_errno = EINVAL;
1115 return 0;
1116 }
1117
1118 links_map = dev->data->links_map[profile_id];
1119 /* Point links_map to this port specific area */
1120 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1121
1122 if (queues == NULL) {
1123 j = 0;
1124 for (i = 0; i < dev->data->nb_queues; i++) {
1125 if (links_map[i] !=
1126 EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1127 all_queues[j] = i;
1128 j++;
1129 }
1130 }
1131 queues = all_queues;
1132 } else {
1133 for (j = 0; j < nb_unlinks; j++) {
1134 if (links_map[queues[j]] ==
1135 EVENT_QUEUE_SERVICE_PRIORITY_INVALID)
1136 break;
1137 }
1138 }
1139
1140 nb_unlinks = j;
1141 for (i = 0; i < nb_unlinks; i++)
1142 if (queues[i] >= dev->data->nb_queues) {
1143 rte_errno = EINVAL;
1144 return 0;
1145 }
1146
1147 if (profile_id)
1148 diag = (*dev->dev_ops->port_unlink_profile)(dev, dev->data->ports[port_id], queues,
1149 nb_unlinks, profile_id);
1150 else
1151 diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id], queues,
1152 nb_unlinks);
1153 if (diag < 0)
1154 return diag;
1155
1156 for (i = 0; i < diag; i++)
1157 links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
1158
1159 rte_eventdev_trace_port_profile_unlink(dev_id, port_id, nb_unlinks, profile_id, diag);
1160 return diag;
1161 }
1162
1163 int
rte_event_port_unlinks_in_progress(uint8_t dev_id,uint8_t port_id)1164 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
1165 {
1166 struct rte_eventdev *dev;
1167
1168 rte_eventdev_trace_port_unlinks_in_progress(dev_id, port_id);
1169
1170 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1171 dev = &rte_eventdevs[dev_id];
1172 if (!is_valid_port(dev, port_id)) {
1173 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1174 return -EINVAL;
1175 }
1176
1177 /* Return 0 if the PMD does not implement unlinks in progress.
1178 * This allows PMDs which handle unlink synchronously to not implement
1179 * this function at all.
1180 */
1181 if (*dev->dev_ops->port_unlinks_in_progress == NULL)
1182 return 0;
1183
1184 return (*dev->dev_ops->port_unlinks_in_progress)(dev,
1185 dev->data->ports[port_id]);
1186 }
1187
1188 int
rte_event_port_links_get(uint8_t dev_id,uint8_t port_id,uint8_t queues[],uint8_t priorities[])1189 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1190 uint8_t queues[], uint8_t priorities[])
1191 {
1192 struct rte_eventdev *dev;
1193 uint16_t *links_map;
1194 int i, count = 0;
1195
1196 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1197 dev = &rte_eventdevs[dev_id];
1198 if (!is_valid_port(dev, port_id)) {
1199 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1200 return -EINVAL;
1201 }
1202
1203 /* Use the default profile_id. */
1204 links_map = dev->data->links_map[0];
1205 /* Point links_map to this port specific area */
1206 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1207 for (i = 0; i < dev->data->nb_queues; i++) {
1208 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1209 queues[count] = i;
1210 priorities[count] = (uint8_t)links_map[i];
1211 ++count;
1212 }
1213 }
1214
1215 rte_eventdev_trace_port_links_get(dev_id, port_id, count);
1216
1217 return count;
1218 }
1219
1220 int
rte_event_port_profile_links_get(uint8_t dev_id,uint8_t port_id,uint8_t queues[],uint8_t priorities[],uint8_t profile_id)1221 rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
1222 uint8_t priorities[], uint8_t profile_id)
1223 {
1224 struct rte_event_dev_info info;
1225 struct rte_eventdev *dev;
1226 uint16_t *links_map;
1227 int i, count = 0;
1228
1229 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1230
1231 dev = &rte_eventdevs[dev_id];
1232 if (*dev->dev_ops->dev_infos_get == NULL)
1233 return -ENOTSUP;
1234
1235 (*dev->dev_ops->dev_infos_get)(dev, &info);
1236 if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT ||
1237 profile_id >= info.max_profiles_per_port) {
1238 RTE_EDEV_LOG_ERR("Invalid profile_id=%" PRIu8, profile_id);
1239 return -EINVAL;
1240 }
1241
1242 if (!is_valid_port(dev, port_id)) {
1243 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1244 return -EINVAL;
1245 }
1246
1247 links_map = dev->data->links_map[profile_id];
1248 /* Point links_map to this port specific area */
1249 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1250 for (i = 0; i < dev->data->nb_queues; i++) {
1251 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1252 queues[count] = i;
1253 priorities[count] = (uint8_t)links_map[i];
1254 ++count;
1255 }
1256 }
1257
1258 rte_eventdev_trace_port_profile_links_get(dev_id, port_id, profile_id, count);
1259
1260 return count;
1261 }
1262
1263 int
rte_event_dequeue_timeout_ticks(uint8_t dev_id,uint64_t ns,uint64_t * timeout_ticks)1264 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1265 uint64_t *timeout_ticks)
1266 {
1267 struct rte_eventdev *dev;
1268
1269 rte_eventdev_trace_dequeue_timeout_ticks(dev_id, ns, timeout_ticks);
1270
1271 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1272 dev = &rte_eventdevs[dev_id];
1273 if (*dev->dev_ops->timeout_ticks == NULL)
1274 return -ENOTSUP;
1275
1276 if (timeout_ticks == NULL)
1277 return -EINVAL;
1278
1279 return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
1280 }
1281
1282 int
rte_event_dev_service_id_get(uint8_t dev_id,uint32_t * service_id)1283 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
1284 {
1285 struct rte_eventdev *dev;
1286
1287 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1288 dev = &rte_eventdevs[dev_id];
1289
1290 if (service_id == NULL)
1291 return -EINVAL;
1292
1293 if (dev->data->service_inited)
1294 *service_id = dev->data->service_id;
1295
1296 rte_eventdev_trace_service_id_get(dev_id, *service_id);
1297
1298 return dev->data->service_inited ? 0 : -ESRCH;
1299 }
1300
1301 int
rte_event_dev_dump(uint8_t dev_id,FILE * f)1302 rte_event_dev_dump(uint8_t dev_id, FILE *f)
1303 {
1304 struct rte_eventdev *dev;
1305
1306 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1307 dev = &rte_eventdevs[dev_id];
1308 if (*dev->dev_ops->dump == NULL)
1309 return -ENOTSUP;
1310 if (f == NULL)
1311 return -EINVAL;
1312
1313 (*dev->dev_ops->dump)(dev, f);
1314 return 0;
1315
1316 }
1317
1318 static int
xstats_get_count(uint8_t dev_id,enum rte_event_dev_xstats_mode mode,uint8_t queue_port_id)1319 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1320 uint8_t queue_port_id)
1321 {
1322 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1323 if (dev->dev_ops->xstats_get_names != NULL)
1324 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1325 queue_port_id,
1326 NULL, NULL, 0);
1327 return 0;
1328 }
1329
1330 int
rte_event_dev_xstats_names_get(uint8_t dev_id,enum rte_event_dev_xstats_mode mode,uint8_t queue_port_id,struct rte_event_dev_xstats_name * xstats_names,uint64_t * ids,unsigned int size)1331 rte_event_dev_xstats_names_get(uint8_t dev_id,
1332 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
1333 struct rte_event_dev_xstats_name *xstats_names,
1334 uint64_t *ids, unsigned int size)
1335 {
1336 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1337 const int cnt_expected_entries = xstats_get_count(dev_id, mode,
1338 queue_port_id);
1339 if (xstats_names == NULL || cnt_expected_entries < 0 ||
1340 (int)size < cnt_expected_entries)
1341 return cnt_expected_entries;
1342
1343 /* dev_id checked above */
1344 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1345
1346 if (dev->dev_ops->xstats_get_names != NULL)
1347 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1348 queue_port_id, xstats_names, ids, size);
1349
1350 return -ENOTSUP;
1351 }
1352
1353 /* retrieve eventdev extended statistics */
1354 int
rte_event_dev_xstats_get(uint8_t dev_id,enum rte_event_dev_xstats_mode mode,uint8_t queue_port_id,const uint64_t ids[],uint64_t values[],unsigned int n)1355 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1356 uint8_t queue_port_id, const uint64_t ids[],
1357 uint64_t values[], unsigned int n)
1358 {
1359 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1360 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1361
1362 /* implemented by the driver */
1363 if (dev->dev_ops->xstats_get != NULL)
1364 return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1365 ids, values, n);
1366 return -ENOTSUP;
1367 }
1368
1369 uint64_t
rte_event_dev_xstats_by_name_get(uint8_t dev_id,const char * name,uint64_t * id)1370 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1371 uint64_t *id)
1372 {
1373 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1374 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1375 uint64_t temp = -1;
1376
1377 if (id != NULL)
1378 *id = (unsigned int)-1;
1379 else
1380 id = &temp; /* ensure driver never gets a NULL value */
1381
1382 /* implemented by driver */
1383 if (dev->dev_ops->xstats_get_by_name != NULL)
1384 return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1385 return -ENOTSUP;
1386 }
1387
rte_event_dev_xstats_reset(uint8_t dev_id,enum rte_event_dev_xstats_mode mode,int16_t queue_port_id,const uint64_t ids[],uint32_t nb_ids)1388 int rte_event_dev_xstats_reset(uint8_t dev_id,
1389 enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1390 const uint64_t ids[], uint32_t nb_ids)
1391 {
1392 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1393 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1394
1395 if (dev->dev_ops->xstats_reset != NULL)
1396 return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1397 ids, nb_ids);
1398 return -ENOTSUP;
1399 }
1400
1401 int rte_event_pmd_selftest_seqn_dynfield_offset = -1;
1402
rte_event_dev_selftest(uint8_t dev_id)1403 int rte_event_dev_selftest(uint8_t dev_id)
1404 {
1405 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1406 static const struct rte_mbuf_dynfield test_seqn_dynfield_desc = {
1407 .name = "rte_event_pmd_selftest_seqn_dynfield",
1408 .size = sizeof(rte_event_pmd_selftest_seqn_t),
1409 .align = alignof(rte_event_pmd_selftest_seqn_t),
1410 };
1411 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1412
1413 if (dev->dev_ops->dev_selftest != NULL) {
1414 rte_event_pmd_selftest_seqn_dynfield_offset =
1415 rte_mbuf_dynfield_register(&test_seqn_dynfield_desc);
1416 if (rte_event_pmd_selftest_seqn_dynfield_offset < 0)
1417 return -ENOMEM;
1418 return (*dev->dev_ops->dev_selftest)();
1419 }
1420 return -ENOTSUP;
1421 }
1422
1423 struct rte_mempool *
rte_event_vector_pool_create(const char * name,unsigned int n,unsigned int cache_size,uint16_t nb_elem,int socket_id)1424 rte_event_vector_pool_create(const char *name, unsigned int n,
1425 unsigned int cache_size, uint16_t nb_elem,
1426 int socket_id)
1427 {
1428 const char *mp_ops_name;
1429 struct rte_mempool *mp;
1430 unsigned int elt_sz;
1431 int ret;
1432
1433 if (!nb_elem) {
1434 RTE_EDEV_LOG_ERR("Invalid number of elements=%d requested",
1435 nb_elem);
1436 rte_errno = EINVAL;
1437 return NULL;
1438 }
1439
1440 elt_sz =
1441 sizeof(struct rte_event_vector) + (nb_elem * sizeof(uintptr_t));
1442 mp = rte_mempool_create_empty(name, n, elt_sz, cache_size, 0, socket_id,
1443 0);
1444 if (mp == NULL)
1445 return NULL;
1446
1447 mp_ops_name = rte_mbuf_best_mempool_ops();
1448 ret = rte_mempool_set_ops_byname(mp, mp_ops_name, NULL);
1449 if (ret != 0) {
1450 RTE_EDEV_LOG_ERR("error setting mempool handler");
1451 goto err;
1452 }
1453
1454 ret = rte_mempool_populate_default(mp);
1455 if (ret < 0)
1456 goto err;
1457
1458 rte_eventdev_trace_vector_pool_create(mp, mp->name, mp->socket_id,
1459 mp->size, mp->cache_size, mp->elt_size);
1460
1461 return mp;
1462 err:
1463 rte_mempool_free(mp);
1464 rte_errno = -ret;
1465 return NULL;
1466 }
1467
1468 int
rte_event_dev_start(uint8_t dev_id)1469 rte_event_dev_start(uint8_t dev_id)
1470 {
1471 struct rte_eventdev *dev;
1472 int diag;
1473
1474 RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1475
1476 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1477 dev = &rte_eventdevs[dev_id];
1478 if (*dev->dev_ops->dev_start == NULL)
1479 return -ENOTSUP;
1480
1481 if (dev->data->dev_started != 0) {
1482 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1483 dev_id);
1484 return 0;
1485 }
1486
1487 diag = (*dev->dev_ops->dev_start)(dev);
1488 rte_eventdev_trace_start(dev_id, diag);
1489 if (diag == 0)
1490 dev->data->dev_started = 1;
1491 else
1492 return diag;
1493
1494 event_dev_fp_ops_set(rte_event_fp_ops + dev_id, dev);
1495
1496 return 0;
1497 }
1498
1499 int
rte_event_dev_stop_flush_callback_register(uint8_t dev_id,rte_eventdev_stop_flush_t callback,void * userdata)1500 rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1501 rte_eventdev_stop_flush_t callback,
1502 void *userdata)
1503 {
1504 struct rte_eventdev *dev;
1505
1506 RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id);
1507
1508 rte_eventdev_trace_stop_flush_callback_register(dev_id, callback, userdata);
1509
1510 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1511 dev = &rte_eventdevs[dev_id];
1512
1513 dev->dev_ops->dev_stop_flush = callback;
1514 dev->data->dev_stop_flush_arg = userdata;
1515
1516 return 0;
1517 }
1518
1519 void
rte_event_dev_stop(uint8_t dev_id)1520 rte_event_dev_stop(uint8_t dev_id)
1521 {
1522 struct rte_eventdev *dev;
1523
1524 RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1525
1526 RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1527 dev = &rte_eventdevs[dev_id];
1528 if (*dev->dev_ops->dev_stop == NULL)
1529 return;
1530
1531 if (dev->data->dev_started == 0) {
1532 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1533 dev_id);
1534 return;
1535 }
1536
1537 dev->data->dev_started = 0;
1538 (*dev->dev_ops->dev_stop)(dev);
1539 rte_eventdev_trace_stop(dev_id);
1540 event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
1541 }
1542
1543 int
rte_event_dev_close(uint8_t dev_id)1544 rte_event_dev_close(uint8_t dev_id)
1545 {
1546 struct rte_eventdev *dev;
1547
1548 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1549 dev = &rte_eventdevs[dev_id];
1550 if (*dev->dev_ops->dev_close == NULL)
1551 return -ENOTSUP;
1552
1553 /* Device must be stopped before it can be closed */
1554 if (dev->data->dev_started == 1) {
1555 RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1556 dev_id);
1557 return -EBUSY;
1558 }
1559
1560 event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
1561 rte_eventdev_trace_close(dev_id);
1562 return (*dev->dev_ops->dev_close)(dev);
1563 }
1564
1565 static inline int
eventdev_data_alloc(uint8_t dev_id,struct rte_eventdev_data ** data,int socket_id)1566 eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1567 int socket_id)
1568 {
1569 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1570 const struct rte_memzone *mz;
1571 int i, n;
1572
1573 /* Generate memzone name */
1574 n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1575 if (n >= (int)sizeof(mz_name))
1576 return -EINVAL;
1577
1578 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1579 mz = rte_memzone_reserve(mz_name,
1580 sizeof(struct rte_eventdev_data),
1581 socket_id, 0);
1582 } else
1583 mz = rte_memzone_lookup(mz_name);
1584
1585 if (mz == NULL)
1586 return -ENOMEM;
1587
1588 *data = mz->addr;
1589 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1590 memset(*data, 0, sizeof(struct rte_eventdev_data));
1591 for (i = 0; i < RTE_EVENT_MAX_PROFILES_PER_PORT; i++)
1592 for (n = 0; n < RTE_EVENT_MAX_PORTS_PER_DEV * RTE_EVENT_MAX_QUEUES_PER_DEV;
1593 n++)
1594 (*data)->links_map[i][n] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
1595 }
1596
1597 return 0;
1598 }
1599
1600 static inline uint8_t
eventdev_find_free_device_index(void)1601 eventdev_find_free_device_index(void)
1602 {
1603 uint8_t dev_id;
1604
1605 for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1606 if (rte_eventdevs[dev_id].attached ==
1607 RTE_EVENTDEV_DETACHED)
1608 return dev_id;
1609 }
1610 return RTE_EVENT_MAX_DEVS;
1611 }
1612
1613 struct rte_eventdev *
rte_event_pmd_allocate(const char * name,int socket_id)1614 rte_event_pmd_allocate(const char *name, int socket_id)
1615 {
1616 struct rte_eventdev *eventdev;
1617 uint8_t dev_id;
1618
1619 if (rte_event_pmd_get_named_dev(name) != NULL) {
1620 RTE_EDEV_LOG_ERR("Event device with name %s already "
1621 "allocated!", name);
1622 return NULL;
1623 }
1624
1625 dev_id = eventdev_find_free_device_index();
1626 if (dev_id == RTE_EVENT_MAX_DEVS) {
1627 RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1628 return NULL;
1629 }
1630
1631 eventdev = &rte_eventdevs[dev_id];
1632
1633 if (eventdev->data == NULL) {
1634 struct rte_eventdev_data *eventdev_data = NULL;
1635
1636 int retval =
1637 eventdev_data_alloc(dev_id, &eventdev_data, socket_id);
1638
1639 if (retval < 0 || eventdev_data == NULL)
1640 return NULL;
1641
1642 eventdev->data = eventdev_data;
1643
1644 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1645
1646 strlcpy(eventdev->data->name, name,
1647 RTE_EVENTDEV_NAME_MAX_LEN);
1648
1649 eventdev->data->dev_id = dev_id;
1650 eventdev->data->socket_id = socket_id;
1651 eventdev->data->dev_started = 0;
1652 }
1653
1654 eventdev->attached = RTE_EVENTDEV_ATTACHED;
1655 eventdev_globals.nb_devs++;
1656 }
1657
1658 return eventdev;
1659 }
1660
1661 int
rte_event_pmd_release(struct rte_eventdev * eventdev)1662 rte_event_pmd_release(struct rte_eventdev *eventdev)
1663 {
1664 int ret;
1665 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1666 const struct rte_memzone *mz;
1667
1668 if (eventdev == NULL)
1669 return -EINVAL;
1670
1671 event_dev_fp_ops_reset(rte_event_fp_ops + eventdev->data->dev_id);
1672 eventdev->attached = RTE_EVENTDEV_DETACHED;
1673 eventdev_globals.nb_devs--;
1674
1675 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1676 rte_free(eventdev->data->dev_private);
1677
1678 /* Generate memzone name */
1679 ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1680 eventdev->data->dev_id);
1681 if (ret >= (int)sizeof(mz_name))
1682 return -EINVAL;
1683
1684 mz = rte_memzone_lookup(mz_name);
1685 if (mz == NULL)
1686 return -ENOMEM;
1687
1688 ret = rte_memzone_free(mz);
1689 if (ret)
1690 return ret;
1691 }
1692
1693 eventdev->data = NULL;
1694 return 0;
1695 }
1696
1697 void
event_dev_probing_finish(struct rte_eventdev * eventdev)1698 event_dev_probing_finish(struct rte_eventdev *eventdev)
1699 {
1700 if (eventdev == NULL)
1701 return;
1702
1703 event_dev_fp_ops_set(rte_event_fp_ops + eventdev->data->dev_id,
1704 eventdev);
1705 }
1706
1707 static int
handle_dev_list(const char * cmd __rte_unused,const char * params __rte_unused,struct rte_tel_data * d)1708 handle_dev_list(const char *cmd __rte_unused,
1709 const char *params __rte_unused,
1710 struct rte_tel_data *d)
1711 {
1712 uint8_t dev_id;
1713 int ndev = rte_event_dev_count();
1714
1715 if (ndev < 1)
1716 return -1;
1717
1718 rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1719 for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1720 if (rte_eventdevs[dev_id].attached ==
1721 RTE_EVENTDEV_ATTACHED)
1722 rte_tel_data_add_array_int(d, dev_id);
1723 }
1724
1725 return 0;
1726 }
1727
1728 static int
handle_port_list(const char * cmd __rte_unused,const char * params,struct rte_tel_data * d)1729 handle_port_list(const char *cmd __rte_unused,
1730 const char *params,
1731 struct rte_tel_data *d)
1732 {
1733 int i;
1734 uint8_t dev_id;
1735 struct rte_eventdev *dev;
1736 char *end_param;
1737
1738 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1739 return -1;
1740
1741 dev_id = strtoul(params, &end_param, 10);
1742 if (*end_param != '\0')
1743 RTE_EDEV_LOG_DEBUG(
1744 "Extra parameters passed to eventdev telemetry command, ignoring");
1745
1746 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1747 dev = &rte_eventdevs[dev_id];
1748
1749 rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1750 for (i = 0; i < dev->data->nb_ports; i++)
1751 rte_tel_data_add_array_int(d, i);
1752
1753 return 0;
1754 }
1755
1756 static int
handle_queue_list(const char * cmd __rte_unused,const char * params,struct rte_tel_data * d)1757 handle_queue_list(const char *cmd __rte_unused,
1758 const char *params,
1759 struct rte_tel_data *d)
1760 {
1761 int i;
1762 uint8_t dev_id;
1763 struct rte_eventdev *dev;
1764 char *end_param;
1765
1766 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1767 return -1;
1768
1769 dev_id = strtoul(params, &end_param, 10);
1770 if (*end_param != '\0')
1771 RTE_EDEV_LOG_DEBUG(
1772 "Extra parameters passed to eventdev telemetry command, ignoring");
1773
1774 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1775 dev = &rte_eventdevs[dev_id];
1776
1777 rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1778 for (i = 0; i < dev->data->nb_queues; i++)
1779 rte_tel_data_add_array_int(d, i);
1780
1781 return 0;
1782 }
1783
1784 static int
handle_queue_links(const char * cmd __rte_unused,const char * params,struct rte_tel_data * d)1785 handle_queue_links(const char *cmd __rte_unused,
1786 const char *params,
1787 struct rte_tel_data *d)
1788 {
1789 int i, ret, port_id = 0;
1790 char *end_param;
1791 uint8_t dev_id;
1792 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1793 uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
1794 const char *p_param;
1795
1796 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1797 return -1;
1798
1799 /* Get dev ID from parameter string */
1800 dev_id = strtoul(params, &end_param, 10);
1801 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1802
1803 p_param = strtok(end_param, ",");
1804 if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1805 return -1;
1806
1807 port_id = strtoul(p_param, &end_param, 10);
1808 p_param = strtok(NULL, "\0");
1809 if (p_param != NULL)
1810 RTE_EDEV_LOG_DEBUG(
1811 "Extra parameters passed to eventdev telemetry command, ignoring");
1812
1813 ret = rte_event_port_links_get(dev_id, port_id, queues, priorities);
1814 if (ret < 0)
1815 return -1;
1816
1817 rte_tel_data_start_dict(d);
1818 for (i = 0; i < ret; i++) {
1819 char qid_name[32];
1820
1821 snprintf(qid_name, 31, "qid_%u", queues[i]);
1822 rte_tel_data_add_dict_uint(d, qid_name, priorities[i]);
1823 }
1824
1825 return 0;
1826 }
1827
1828 static int
eventdev_build_telemetry_data(int dev_id,enum rte_event_dev_xstats_mode mode,int port_queue_id,struct rte_tel_data * d)1829 eventdev_build_telemetry_data(int dev_id,
1830 enum rte_event_dev_xstats_mode mode,
1831 int port_queue_id,
1832 struct rte_tel_data *d)
1833 {
1834 struct rte_event_dev_xstats_name *xstat_names;
1835 uint64_t *ids;
1836 uint64_t *values;
1837 int i, ret, num_xstats;
1838
1839 num_xstats = rte_event_dev_xstats_names_get(dev_id,
1840 mode,
1841 port_queue_id,
1842 NULL,
1843 NULL,
1844 0);
1845
1846 if (num_xstats < 0)
1847 return -1;
1848
1849 /* use one malloc for names */
1850 xstat_names = malloc((sizeof(struct rte_event_dev_xstats_name))
1851 * num_xstats);
1852 if (xstat_names == NULL)
1853 return -1;
1854
1855 ids = malloc((sizeof(uint64_t)) * num_xstats);
1856 if (ids == NULL) {
1857 free(xstat_names);
1858 return -1;
1859 }
1860
1861 values = malloc((sizeof(uint64_t)) * num_xstats);
1862 if (values == NULL) {
1863 free(xstat_names);
1864 free(ids);
1865 return -1;
1866 }
1867
1868 ret = rte_event_dev_xstats_names_get(dev_id, mode, port_queue_id,
1869 xstat_names, ids, num_xstats);
1870 if (ret < 0 || ret > num_xstats) {
1871 free(xstat_names);
1872 free(ids);
1873 free(values);
1874 return -1;
1875 }
1876
1877 ret = rte_event_dev_xstats_get(dev_id, mode, port_queue_id,
1878 ids, values, num_xstats);
1879 if (ret < 0 || ret > num_xstats) {
1880 free(xstat_names);
1881 free(ids);
1882 free(values);
1883 return -1;
1884 }
1885
1886 rte_tel_data_start_dict(d);
1887 for (i = 0; i < num_xstats; i++)
1888 rte_tel_data_add_dict_uint(d, xstat_names[i].name, values[i]);
1889
1890 free(xstat_names);
1891 free(ids);
1892 free(values);
1893 return 0;
1894 }
1895
1896 static int
handle_dev_xstats(const char * cmd __rte_unused,const char * params,struct rte_tel_data * d)1897 handle_dev_xstats(const char *cmd __rte_unused,
1898 const char *params,
1899 struct rte_tel_data *d)
1900 {
1901 int dev_id;
1902 enum rte_event_dev_xstats_mode mode;
1903 char *end_param;
1904
1905 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1906 return -1;
1907
1908 /* Get dev ID from parameter string */
1909 dev_id = strtoul(params, &end_param, 10);
1910 if (*end_param != '\0')
1911 RTE_EDEV_LOG_DEBUG(
1912 "Extra parameters passed to eventdev telemetry command, ignoring");
1913
1914 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1915
1916 mode = RTE_EVENT_DEV_XSTATS_DEVICE;
1917 return eventdev_build_telemetry_data(dev_id, mode, 0, d);
1918 }
1919
1920 static int
handle_port_xstats(const char * cmd __rte_unused,const char * params,struct rte_tel_data * d)1921 handle_port_xstats(const char *cmd __rte_unused,
1922 const char *params,
1923 struct rte_tel_data *d)
1924 {
1925 int dev_id;
1926 int port_queue_id = 0;
1927 enum rte_event_dev_xstats_mode mode;
1928 char *end_param;
1929 const char *p_param;
1930
1931 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1932 return -1;
1933
1934 /* Get dev ID from parameter string */
1935 dev_id = strtoul(params, &end_param, 10);
1936 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1937
1938 p_param = strtok(end_param, ",");
1939 mode = RTE_EVENT_DEV_XSTATS_PORT;
1940
1941 if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1942 return -1;
1943
1944 port_queue_id = strtoul(p_param, &end_param, 10);
1945
1946 p_param = strtok(NULL, "\0");
1947 if (p_param != NULL)
1948 RTE_EDEV_LOG_DEBUG(
1949 "Extra parameters passed to eventdev telemetry command, ignoring");
1950
1951 return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1952 }
1953
1954 static int
handle_queue_xstats(const char * cmd __rte_unused,const char * params,struct rte_tel_data * d)1955 handle_queue_xstats(const char *cmd __rte_unused,
1956 const char *params,
1957 struct rte_tel_data *d)
1958 {
1959 int dev_id;
1960 int port_queue_id = 0;
1961 enum rte_event_dev_xstats_mode mode;
1962 char *end_param;
1963 const char *p_param;
1964
1965 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1966 return -1;
1967
1968 /* Get dev ID from parameter string */
1969 dev_id = strtoul(params, &end_param, 10);
1970 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1971
1972 p_param = strtok(end_param, ",");
1973 mode = RTE_EVENT_DEV_XSTATS_QUEUE;
1974
1975 if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1976 return -1;
1977
1978 port_queue_id = strtoul(p_param, &end_param, 10);
1979
1980 p_param = strtok(NULL, "\0");
1981 if (p_param != NULL)
1982 RTE_EDEV_LOG_DEBUG(
1983 "Extra parameters passed to eventdev telemetry command, ignoring");
1984
1985 return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1986 }
1987
1988 static int
handle_dev_dump(const char * cmd __rte_unused,const char * params,struct rte_tel_data * d)1989 handle_dev_dump(const char *cmd __rte_unused,
1990 const char *params,
1991 struct rte_tel_data *d)
1992 {
1993 char *buf, *end_param;
1994 int dev_id, ret;
1995 FILE *f;
1996
1997 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1998 return -1;
1999
2000 /* Get dev ID from parameter string */
2001 dev_id = strtoul(params, &end_param, 10);
2002 if (*end_param != '\0')
2003 RTE_EDEV_LOG_DEBUG(
2004 "Extra parameters passed to eventdev telemetry command, ignoring");
2005
2006 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
2007
2008 buf = calloc(RTE_TEL_MAX_SINGLE_STRING_LEN, sizeof(char));
2009 if (buf == NULL)
2010 return -ENOMEM;
2011
2012 f = fmemopen(buf, RTE_TEL_MAX_SINGLE_STRING_LEN - 1, "w+");
2013 if (f == NULL) {
2014 free(buf);
2015 return -EINVAL;
2016 }
2017
2018 ret = rte_event_dev_dump(dev_id, f);
2019 fclose(f);
2020 if (ret == 0) {
2021 rte_tel_data_start_dict(d);
2022 rte_tel_data_string(d, buf);
2023 }
2024
2025 free(buf);
2026 return ret;
2027 }
2028
RTE_INIT(eventdev_init_telemetry)2029 RTE_INIT(eventdev_init_telemetry)
2030 {
2031 rte_telemetry_register_cmd("/eventdev/dev_list", handle_dev_list,
2032 "Returns list of available eventdevs. Takes no parameters");
2033 rte_telemetry_register_cmd("/eventdev/port_list", handle_port_list,
2034 "Returns list of available ports. Parameter: DevID");
2035 rte_telemetry_register_cmd("/eventdev/queue_list", handle_queue_list,
2036 "Returns list of available queues. Parameter: DevID");
2037
2038 rte_telemetry_register_cmd("/eventdev/dev_xstats", handle_dev_xstats,
2039 "Returns stats for an eventdev. Parameter: DevID");
2040 rte_telemetry_register_cmd("/eventdev/port_xstats", handle_port_xstats,
2041 "Returns stats for an eventdev port. Params: DevID,PortID");
2042 rte_telemetry_register_cmd("/eventdev/queue_xstats",
2043 handle_queue_xstats,
2044 "Returns stats for an eventdev queue. Params: DevID,QueueID");
2045 rte_telemetry_register_cmd("/eventdev/dev_dump", handle_dev_dump,
2046 "Returns dump information for an eventdev. Parameter: DevID");
2047 rte_telemetry_register_cmd("/eventdev/queue_links", handle_queue_links,
2048 "Returns links for an eventdev port. Params: DevID,QueueID");
2049 }
2050