Lines Matching refs:dev

65 			(rte_event_devices[i].dev ? (strncmp(  in rte_event_dev_get_dev_id()
66 rte_event_devices[i].dev->driver->name, name, in rte_event_dev_get_dev_id()
80 struct rte_eventdev *dev; in rte_event_dev_socket_id() local
83 dev = &rte_eventdevs[dev_id]; in rte_event_dev_socket_id()
85 rte_eventdev_trace_socket_id(dev_id, dev, dev->data->socket_id); in rte_event_dev_socket_id()
87 return dev->data->socket_id; in rte_event_dev_socket_id()
93 struct rte_eventdev *dev; in rte_event_dev_info_get() local
96 dev = &rte_eventdevs[dev_id]; in rte_event_dev_info_get()
103 if (*dev->dev_ops->dev_infos_get == NULL) in rte_event_dev_info_get()
105 (*dev->dev_ops->dev_infos_get)(dev, dev_info); in rte_event_dev_info_get()
107 dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns; in rte_event_dev_info_get()
109 dev_info->dev = dev->dev; in rte_event_dev_info_get()
110 if (dev->dev != NULL && dev->dev->driver != NULL) in rte_event_dev_info_get()
111 dev_info->driver_name = dev->dev->driver->name; in rte_event_dev_info_get()
113 rte_eventdev_trace_info_get(dev_id, dev_info, dev_info->dev); in rte_event_dev_info_get()
122 struct rte_eventdev *dev; in rte_event_eth_rx_adapter_caps_get() local
129 dev = &rte_eventdevs[dev_id]; in rte_event_eth_rx_adapter_caps_get()
134 if (dev->dev_ops->eth_rx_adapter_caps_get == NULL) in rte_event_eth_rx_adapter_caps_get()
139 return dev->dev_ops->eth_rx_adapter_caps_get ? in rte_event_eth_rx_adapter_caps_get()
140 (*dev->dev_ops->eth_rx_adapter_caps_get)(dev, in rte_event_eth_rx_adapter_caps_get()
149 struct rte_eventdev *dev; in rte_event_timer_adapter_caps_get() local
156 dev = &rte_eventdevs[dev_id]; in rte_event_timer_adapter_caps_get()
161 if (dev->dev_ops->timer_adapter_caps_get == NULL) in rte_event_timer_adapter_caps_get()
166 return dev->dev_ops->timer_adapter_caps_get ? in rte_event_timer_adapter_caps_get()
167 (*dev->dev_ops->timer_adapter_caps_get)(dev, in rte_event_timer_adapter_caps_get()
178 struct rte_eventdev *dev; in rte_event_crypto_adapter_caps_get() local
185 dev = &rte_eventdevs[dev_id]; in rte_event_crypto_adapter_caps_get()
188 rte_eventdev_trace_crypto_adapter_caps_get(dev_id, dev, cdev_id, cdev); in rte_event_crypto_adapter_caps_get()
193 if (dev->dev_ops->crypto_adapter_caps_get == NULL) in rte_event_crypto_adapter_caps_get()
198 return dev->dev_ops->crypto_adapter_caps_get ? in rte_event_crypto_adapter_caps_get()
199 (*dev->dev_ops->crypto_adapter_caps_get) in rte_event_crypto_adapter_caps_get()
200 (dev, cdev, caps) : 0; in rte_event_crypto_adapter_caps_get()
207 struct rte_eventdev *dev; in rte_event_eth_tx_adapter_caps_get() local
213 dev = &rte_eventdevs[dev_id]; in rte_event_eth_tx_adapter_caps_get()
216 rte_eventdev_trace_eth_tx_adapter_caps_get(dev_id, dev, eth_port_id, eth_dev); in rte_event_eth_tx_adapter_caps_get()
221 if (dev->dev_ops->eth_tx_adapter_caps_get == NULL) in rte_event_eth_tx_adapter_caps_get()
226 return dev->dev_ops->eth_tx_adapter_caps_get ? in rte_event_eth_tx_adapter_caps_get()
227 (*dev->dev_ops->eth_tx_adapter_caps_get)(dev, in rte_event_eth_tx_adapter_caps_get()
236 struct rte_eventdev *dev; in rte_event_dma_adapter_caps_get() local
242 dev = &rte_eventdevs[dev_id]; in rte_event_dma_adapter_caps_get()
249 if (dev->dev_ops->dma_adapter_caps_get) in rte_event_dma_adapter_caps_get()
250 return (*dev->dev_ops->dma_adapter_caps_get)(dev, dma_dev_id, caps); in rte_event_dma_adapter_caps_get()
256 event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues) in event_dev_queue_config() argument
258 uint8_t old_nb_queues = dev->data->nb_queues; in event_dev_queue_config()
263 dev->data->dev_id); in event_dev_queue_config()
266 queues_cfg = dev->data->queues_cfg; in event_dev_queue_config()
267 if (*dev->dev_ops->queue_release == NULL) in event_dev_queue_config()
271 (*dev->dev_ops->queue_release)(dev, i); in event_dev_queue_config()
281 if (*dev->dev_ops->queue_release == NULL) in event_dev_queue_config()
285 (*dev->dev_ops->queue_release)(dev, i); in event_dev_queue_config()
288 dev->data->nb_queues = nb_queues; in event_dev_queue_config()
295 event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports) in event_dev_port_config() argument
297 uint8_t old_nb_ports = dev->data->nb_ports; in event_dev_port_config()
304 dev->data->dev_id); in event_dev_port_config()
307 if (*dev->dev_ops->port_release == NULL) in event_dev_port_config()
310 ports = dev->data->ports; in event_dev_port_config()
311 ports_cfg = dev->data->ports_cfg; in event_dev_port_config()
314 (*dev->dev_ops->port_release)(ports[i]); in event_dev_port_config()
328 links_map = dev->data->links_map[i]; in event_dev_port_config()
334 if (*dev->dev_ops->port_release == NULL) in event_dev_port_config()
337 ports = dev->data->ports; in event_dev_port_config()
339 (*dev->dev_ops->port_release)(ports[i]); in event_dev_port_config()
344 dev->data->nb_ports = nb_ports; in event_dev_port_config()
353 struct rte_eventdev *dev; in rte_event_dev_configure() local
357 dev = &rte_eventdevs[dev_id]; in rte_event_dev_configure()
359 if (*dev->dev_ops->dev_infos_get == NULL) in rte_event_dev_configure()
361 if (*dev->dev_ops->dev_configure == NULL) in rte_event_dev_configure()
364 if (dev->data->dev_started) { in rte_event_dev_configure()
373 (*dev->dev_ops->dev_infos_get)(dev, &info); in rte_event_dev_configure()
505 memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf)); in rte_event_dev_configure()
508 diag = event_dev_queue_config(dev, dev_conf->nb_event_queues); in rte_event_dev_configure()
516 diag = event_dev_port_config(dev, dev_conf->nb_event_ports); in rte_event_dev_configure()
518 event_dev_queue_config(dev, 0); in rte_event_dev_configure()
527 diag = (*dev->dev_ops->dev_configure)(dev); in rte_event_dev_configure()
531 event_dev_queue_config(dev, 0); in rte_event_dev_configure()
532 event_dev_port_config(dev, 0); in rte_event_dev_configure()
535 dev->data->event_dev_cap = info.event_dev_cap; in rte_event_dev_configure()
541 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id) in is_valid_queue() argument
543 if (queue_id < dev->data->nb_queues && queue_id < in is_valid_queue()
554 struct rte_eventdev *dev; in rte_event_queue_default_conf_get() local
557 dev = &rte_eventdevs[dev_id]; in rte_event_queue_default_conf_get()
562 if (!is_valid_queue(dev, queue_id)) { in rte_event_queue_default_conf_get()
567 if (*dev->dev_ops->queue_def_conf == NULL) in rte_event_queue_default_conf_get()
570 (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf); in rte_event_queue_default_conf_get()
572 rte_eventdev_trace_queue_default_conf_get(dev_id, dev, queue_id, queue_conf); in rte_event_queue_default_conf_get()
614 struct rte_eventdev *dev; in rte_event_queue_setup() local
618 dev = &rte_eventdevs[dev_id]; in rte_event_queue_setup()
620 if (!is_valid_queue(dev, queue_id)) { in rte_event_queue_setup()
629 dev->data->dev_conf.nb_event_queue_flows) { in rte_event_queue_setup()
633 dev->data->dev_conf.nb_event_queue_flows); in rte_event_queue_setup()
642 dev->data->dev_conf.nb_event_queue_flows) { in rte_event_queue_setup()
646 dev->data->dev_conf.nb_event_queue_flows); in rte_event_queue_setup()
651 if (dev->data->dev_started) { in rte_event_queue_setup()
657 if (*dev->dev_ops->queue_setup == NULL) in rte_event_queue_setup()
661 if (*dev->dev_ops->queue_def_conf == NULL) in rte_event_queue_setup()
663 (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf); in rte_event_queue_setup()
667 dev->data->queues_cfg[queue_id] = *queue_conf; in rte_event_queue_setup()
669 return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf); in rte_event_queue_setup()
673 is_valid_port(struct rte_eventdev *dev, uint8_t port_id) in is_valid_port() argument
675 if (port_id < dev->data->nb_ports) in is_valid_port()
685 struct rte_eventdev *dev; in rte_event_port_default_conf_get() local
688 dev = &rte_eventdevs[dev_id]; in rte_event_port_default_conf_get()
693 if (!is_valid_port(dev, port_id)) { in rte_event_port_default_conf_get()
698 if (*dev->dev_ops->port_def_conf == NULL) in rte_event_port_default_conf_get()
701 (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf); in rte_event_port_default_conf_get()
703 rte_eventdev_trace_port_default_conf_get(dev_id, dev, port_id, port_conf); in rte_event_port_default_conf_get()
712 struct rte_eventdev *dev; in rte_event_port_setup() local
717 dev = &rte_eventdevs[dev_id]; in rte_event_port_setup()
719 if (!is_valid_port(dev, port_id)) { in rte_event_port_setup()
727 dev->data->dev_conf.nb_events_limit)) { in rte_event_port_setup()
731 dev->data->dev_conf.nb_events_limit); in rte_event_port_setup()
738 dev->data->dev_conf.nb_event_port_dequeue_depth)) { in rte_event_port_setup()
742 dev->data->dev_conf.nb_event_port_dequeue_depth); in rte_event_port_setup()
749 dev->data->dev_conf.nb_event_port_enqueue_depth)) { in rte_event_port_setup()
753 dev->data->dev_conf.nb_event_port_enqueue_depth); in rte_event_port_setup()
759 !(dev->data->event_dev_cap & in rte_event_port_setup()
767 if (dev->data->dev_started) { in rte_event_port_setup()
773 if (*dev->dev_ops->port_setup == NULL) in rte_event_port_setup()
777 if (*dev->dev_ops->port_def_conf == NULL) in rte_event_port_setup()
779 (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf); in rte_event_port_setup()
783 dev->data->ports_cfg[port_id] = *port_conf; in rte_event_port_setup()
785 diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf); in rte_event_port_setup()
802 struct rte_eventdev *dev; in rte_event_port_quiesce() local
805 dev = &rte_eventdevs[dev_id]; in rte_event_port_quiesce()
807 rte_eventdev_trace_port_quiesce(dev_id, dev, port_id, args); in rte_event_port_quiesce()
809 if (!is_valid_port(dev, port_id)) { in rte_event_port_quiesce()
814 if (dev->dev_ops->port_quiesce) in rte_event_port_quiesce()
815 (*dev->dev_ops->port_quiesce)(dev, dev->data->ports[port_id], in rte_event_port_quiesce()
823 struct rte_eventdev *dev; in rte_event_dev_attr_get() local
828 dev = &rte_eventdevs[dev_id]; in rte_event_dev_attr_get()
832 *attr_value = dev->data->nb_ports; in rte_event_dev_attr_get()
835 *attr_value = dev->data->nb_queues; in rte_event_dev_attr_get()
838 *attr_value = dev->data->dev_started; in rte_event_dev_attr_get()
844 rte_eventdev_trace_attr_get(dev_id, dev, attr_id, *attr_value); in rte_event_dev_attr_get()
853 struct rte_eventdev *dev; in rte_event_port_attr_get() local
859 dev = &rte_eventdevs[dev_id]; in rte_event_port_attr_get()
860 if (!is_valid_port(dev, port_id)) { in rte_event_port_attr_get()
867 *attr_value = dev->data->ports_cfg[port_id].enqueue_depth; in rte_event_port_attr_get()
870 *attr_value = dev->data->ports_cfg[port_id].dequeue_depth; in rte_event_port_attr_get()
873 *attr_value = dev->data->ports_cfg[port_id].new_event_threshold; in rte_event_port_attr_get()
879 config = dev->data->ports_cfg[port_id].event_port_cfg; in rte_event_port_attr_get()
887 rte_eventdev_trace_port_attr_get(dev_id, dev, port_id, attr_id, *attr_value); in rte_event_port_attr_get()
897 struct rte_eventdev *dev; in rte_event_queue_attr_get() local
903 dev = &rte_eventdevs[dev_id]; in rte_event_queue_attr_get()
904 if (!is_valid_queue(dev, queue_id)) { in rte_event_queue_attr_get()
909 conf = &dev->data->queues_cfg[queue_id]; in rte_event_queue_attr_get()
914 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS) in rte_event_queue_attr_get()
934 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS) in rte_event_queue_attr_get()
939 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS) in rte_event_queue_attr_get()
946 rte_eventdev_trace_queue_attr_get(dev_id, dev, queue_id, attr_id, *attr_value); in rte_event_queue_attr_get()
955 struct rte_eventdev *dev; in rte_event_queue_attr_set() local
960 dev = &rte_eventdevs[dev_id]; in rte_event_queue_attr_set()
961 if (!is_valid_queue(dev, queue_id)) { in rte_event_queue_attr_set()
966 if (!(dev->data->event_dev_cap & in rte_event_queue_attr_set()
974 if (*dev->dev_ops->queue_attr_set == NULL) in rte_event_queue_attr_set()
976 return (*dev->dev_ops->queue_attr_set)(dev, queue_id, attr_id, in rte_event_queue_attr_set()
995 struct rte_eventdev *dev; in rte_event_port_profile_links_set() local
1000 dev = &rte_eventdevs[dev_id]; in rte_event_port_profile_links_set()
1002 if (*dev->dev_ops->dev_infos_get == NULL) in rte_event_port_profile_links_set()
1005 (*dev->dev_ops->dev_infos_get)(dev, &info); in rte_event_port_profile_links_set()
1012 if (*dev->dev_ops->port_link == NULL) { in rte_event_port_profile_links_set()
1018 if (profile_id && *dev->dev_ops->port_link_profile == NULL) { in rte_event_port_profile_links_set()
1024 if (!is_valid_port(dev, port_id)) { in rte_event_port_profile_links_set()
1031 for (i = 0; i < dev->data->nb_queues; i++) in rte_event_port_profile_links_set()
1035 nb_links = dev->data->nb_queues; in rte_event_port_profile_links_set()
1046 if (queues[i] >= dev->data->nb_queues) { in rte_event_port_profile_links_set()
1052 diag = (*dev->dev_ops->port_link_profile)(dev, dev->data->ports[port_id], queues, in rte_event_port_profile_links_set()
1055 diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id], queues, in rte_event_port_profile_links_set()
1060 links_map = dev->data->links_map[profile_id]; in rte_event_port_profile_links_set()
1083 struct rte_eventdev *dev; in rte_event_port_profile_unlink() local
1088 dev = &rte_eventdevs[dev_id]; in rte_event_port_profile_unlink()
1090 if (*dev->dev_ops->dev_infos_get == NULL) in rte_event_port_profile_unlink()
1093 (*dev->dev_ops->dev_infos_get)(dev, &info); in rte_event_port_profile_unlink()
1100 if (*dev->dev_ops->port_unlink == NULL) { in rte_event_port_profile_unlink()
1106 if (profile_id && *dev->dev_ops->port_unlink_profile == NULL) { in rte_event_port_profile_unlink()
1112 if (!is_valid_port(dev, port_id)) { in rte_event_port_profile_unlink()
1118 links_map = dev->data->links_map[profile_id]; in rte_event_port_profile_unlink()
1124 for (i = 0; i < dev->data->nb_queues; i++) { in rte_event_port_profile_unlink()
1142 if (queues[i] >= dev->data->nb_queues) { in rte_event_port_profile_unlink()
1148 diag = (*dev->dev_ops->port_unlink_profile)(dev, dev->data->ports[port_id], queues, in rte_event_port_profile_unlink()
1151 diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id], queues, in rte_event_port_profile_unlink()
1166 struct rte_eventdev *dev; in rte_event_port_unlinks_in_progress() local
1171 dev = &rte_eventdevs[dev_id]; in rte_event_port_unlinks_in_progress()
1172 if (!is_valid_port(dev, port_id)) { in rte_event_port_unlinks_in_progress()
1181 if (*dev->dev_ops->port_unlinks_in_progress == NULL) in rte_event_port_unlinks_in_progress()
1184 return (*dev->dev_ops->port_unlinks_in_progress)(dev, in rte_event_port_unlinks_in_progress()
1185 dev->data->ports[port_id]); in rte_event_port_unlinks_in_progress()
1192 struct rte_eventdev *dev; in rte_event_port_links_get() local
1197 dev = &rte_eventdevs[dev_id]; in rte_event_port_links_get()
1198 if (!is_valid_port(dev, port_id)) { in rte_event_port_links_get()
1204 links_map = dev->data->links_map[0]; in rte_event_port_links_get()
1207 for (i = 0; i < dev->data->nb_queues; i++) { in rte_event_port_links_get()
1225 struct rte_eventdev *dev; in rte_event_port_profile_links_get() local
1231 dev = &rte_eventdevs[dev_id]; in rte_event_port_profile_links_get()
1232 if (*dev->dev_ops->dev_infos_get == NULL) in rte_event_port_profile_links_get()
1235 (*dev->dev_ops->dev_infos_get)(dev, &info); in rte_event_port_profile_links_get()
1242 if (!is_valid_port(dev, port_id)) { in rte_event_port_profile_links_get()
1247 links_map = dev->data->links_map[profile_id]; in rte_event_port_profile_links_get()
1250 for (i = 0; i < dev->data->nb_queues; i++) { in rte_event_port_profile_links_get()
1267 struct rte_eventdev *dev; in rte_event_dequeue_timeout_ticks() local
1272 dev = &rte_eventdevs[dev_id]; in rte_event_dequeue_timeout_ticks()
1273 if (*dev->dev_ops->timeout_ticks == NULL) in rte_event_dequeue_timeout_ticks()
1279 return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks); in rte_event_dequeue_timeout_ticks()
1285 struct rte_eventdev *dev; in rte_event_dev_service_id_get() local
1288 dev = &rte_eventdevs[dev_id]; in rte_event_dev_service_id_get()
1293 if (dev->data->service_inited) in rte_event_dev_service_id_get()
1294 *service_id = dev->data->service_id; in rte_event_dev_service_id_get()
1298 return dev->data->service_inited ? 0 : -ESRCH; in rte_event_dev_service_id_get()
1304 struct rte_eventdev *dev; in rte_event_dev_dump() local
1307 dev = &rte_eventdevs[dev_id]; in rte_event_dev_dump()
1308 if (*dev->dev_ops->dump == NULL) in rte_event_dev_dump()
1313 (*dev->dev_ops->dump)(dev, f); in rte_event_dev_dump()
1322 struct rte_eventdev *dev = &rte_eventdevs[dev_id]; in xstats_get_count() local
1323 if (dev->dev_ops->xstats_get_names != NULL) in xstats_get_count()
1324 return (*dev->dev_ops->xstats_get_names)(dev, mode, in xstats_get_count()
1344 const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; in rte_event_dev_xstats_names_get() local
1346 if (dev->dev_ops->xstats_get_names != NULL) in rte_event_dev_xstats_names_get()
1347 return (*dev->dev_ops->xstats_get_names)(dev, mode, in rte_event_dev_xstats_names_get()
1360 const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; in rte_event_dev_xstats_get() local
1363 if (dev->dev_ops->xstats_get != NULL) in rte_event_dev_xstats_get()
1364 return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id, in rte_event_dev_xstats_get()
1374 const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; in rte_event_dev_xstats_by_name_get() local
1383 if (dev->dev_ops->xstats_get_by_name != NULL) in rte_event_dev_xstats_by_name_get()
1384 return (*dev->dev_ops->xstats_get_by_name)(dev, name, id); in rte_event_dev_xstats_by_name_get()
1393 struct rte_eventdev *dev = &rte_eventdevs[dev_id]; in rte_event_dev_xstats_reset() local
1395 if (dev->dev_ops->xstats_reset != NULL) in rte_event_dev_xstats_reset()
1396 return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id, in rte_event_dev_xstats_reset()
1411 struct rte_eventdev *dev = &rte_eventdevs[dev_id]; in rte_event_dev_selftest() local
1413 if (dev->dev_ops->dev_selftest != NULL) { in rte_event_dev_selftest()
1418 return (*dev->dev_ops->dev_selftest)(); in rte_event_dev_selftest()
1471 struct rte_eventdev *dev; in rte_event_dev_start() local
1477 dev = &rte_eventdevs[dev_id]; in rte_event_dev_start()
1478 if (*dev->dev_ops->dev_start == NULL) in rte_event_dev_start()
1481 if (dev->data->dev_started != 0) { in rte_event_dev_start()
1487 diag = (*dev->dev_ops->dev_start)(dev); in rte_event_dev_start()
1490 dev->data->dev_started = 1; in rte_event_dev_start()
1494 event_dev_fp_ops_set(rte_event_fp_ops + dev_id, dev); in rte_event_dev_start()
1504 struct rte_eventdev *dev; in rte_event_dev_stop_flush_callback_register() local
1511 dev = &rte_eventdevs[dev_id]; in rte_event_dev_stop_flush_callback_register()
1513 dev->dev_ops->dev_stop_flush = callback; in rte_event_dev_stop_flush_callback_register()
1514 dev->data->dev_stop_flush_arg = userdata; in rte_event_dev_stop_flush_callback_register()
1522 struct rte_eventdev *dev; in rte_event_dev_stop() local
1527 dev = &rte_eventdevs[dev_id]; in rte_event_dev_stop()
1528 if (*dev->dev_ops->dev_stop == NULL) in rte_event_dev_stop()
1531 if (dev->data->dev_started == 0) { in rte_event_dev_stop()
1537 dev->data->dev_started = 0; in rte_event_dev_stop()
1538 (*dev->dev_ops->dev_stop)(dev); in rte_event_dev_stop()
1546 struct rte_eventdev *dev; in rte_event_dev_close() local
1549 dev = &rte_eventdevs[dev_id]; in rte_event_dev_close()
1550 if (*dev->dev_ops->dev_close == NULL) in rte_event_dev_close()
1554 if (dev->data->dev_started == 1) { in rte_event_dev_close()
1562 return (*dev->dev_ops->dev_close)(dev); in rte_event_dev_close()
1735 struct rte_eventdev *dev; in handle_port_list() local
1747 dev = &rte_eventdevs[dev_id]; in handle_port_list()
1750 for (i = 0; i < dev->data->nb_ports; i++) in handle_port_list()
1763 struct rte_eventdev *dev; in handle_queue_list() local
1775 dev = &rte_eventdevs[dev_id]; in handle_queue_list()
1778 for (i = 0; i < dev->data->nb_queues; i++) in handle_queue_list()