14870a8cdSMoti Haimovsky /* SPDX-License-Identifier: BSD-3-Clause
25feecc57SShahaf Shuler * Copyright 2018 Mellanox Technologies, Ltd
34870a8cdSMoti Haimovsky */
44870a8cdSMoti Haimovsky
54870a8cdSMoti Haimovsky /**
64870a8cdSMoti Haimovsky * @file
74870a8cdSMoti Haimovsky * Interrupts handling for tap driver.
84870a8cdSMoti Haimovsky */
94870a8cdSMoti Haimovsky
104870a8cdSMoti Haimovsky #include <errno.h>
114870a8cdSMoti Haimovsky #include <fcntl.h>
124870a8cdSMoti Haimovsky #include <signal.h>
134870a8cdSMoti Haimovsky #include <stdint.h>
144870a8cdSMoti Haimovsky #include <stdlib.h>
154870a8cdSMoti Haimovsky #include <unistd.h>
164870a8cdSMoti Haimovsky
174870a8cdSMoti Haimovsky #include <rte_eth_tap.h>
184870a8cdSMoti Haimovsky #include <rte_errno.h>
194870a8cdSMoti Haimovsky #include <rte_interrupts.h>
204870a8cdSMoti Haimovsky
214870a8cdSMoti Haimovsky
224870a8cdSMoti Haimovsky /**
234870a8cdSMoti Haimovsky * Unregister Rx interrupts free the queue interrupt vector.
244870a8cdSMoti Haimovsky *
254870a8cdSMoti Haimovsky * @param dev
264870a8cdSMoti Haimovsky * Pointer to the tap rte_eth_dev structure.
274870a8cdSMoti Haimovsky */
284870a8cdSMoti Haimovsky static void
tap_rx_intr_vec_uninstall(struct rte_eth_dev * dev)294870a8cdSMoti Haimovsky tap_rx_intr_vec_uninstall(struct rte_eth_dev *dev)
304870a8cdSMoti Haimovsky {
314870a8cdSMoti Haimovsky struct pmd_internals *pmd = dev->data->dev_private;
32d61138d4SHarman Kalra struct rte_intr_handle *intr_handle = pmd->intr_handle;
334870a8cdSMoti Haimovsky
344870a8cdSMoti Haimovsky rte_intr_free_epoll_fd(intr_handle);
35d61138d4SHarman Kalra rte_intr_vec_list_free(intr_handle);
36d61138d4SHarman Kalra rte_intr_nb_efd_set(intr_handle, 0);
374870a8cdSMoti Haimovsky }
384870a8cdSMoti Haimovsky
394870a8cdSMoti Haimovsky /**
404870a8cdSMoti Haimovsky * Allocate Rx queue interrupt vector and register Rx interrupts.
414870a8cdSMoti Haimovsky *
424870a8cdSMoti Haimovsky * @param dev
434870a8cdSMoti Haimovsky * Pointer to the tap rte_eth_dev device structure.
444870a8cdSMoti Haimovsky *
454870a8cdSMoti Haimovsky * @return
464870a8cdSMoti Haimovsky * 0 on success, negative errno value otherwise and rte_errno is set.
474870a8cdSMoti Haimovsky */
484870a8cdSMoti Haimovsky static int
tap_rx_intr_vec_install(struct rte_eth_dev * dev)494870a8cdSMoti Haimovsky tap_rx_intr_vec_install(struct rte_eth_dev *dev)
504870a8cdSMoti Haimovsky {
514870a8cdSMoti Haimovsky struct pmd_internals *pmd = dev->data->dev_private;
52ed8132e7SRaslan Darawsheh struct pmd_process_private *process_private = dev->process_private;
534870a8cdSMoti Haimovsky unsigned int rxqs_n = pmd->dev->data->nb_rx_queues;
54d61138d4SHarman Kalra struct rte_intr_handle *intr_handle = pmd->intr_handle;
554870a8cdSMoti Haimovsky unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
564870a8cdSMoti Haimovsky unsigned int i;
574870a8cdSMoti Haimovsky unsigned int count = 0;
584870a8cdSMoti Haimovsky
594870a8cdSMoti Haimovsky if (!dev->data->dev_conf.intr_conf.rxq)
604870a8cdSMoti Haimovsky return 0;
61d61138d4SHarman Kalra
62d61138d4SHarman Kalra if (rte_intr_vec_list_alloc(intr_handle, NULL, rxqs_n)) {
634870a8cdSMoti Haimovsky rte_errno = ENOMEM;
641b3b7caeSStephen Hemminger TAP_LOG(ERR,
654870a8cdSMoti Haimovsky "failed to allocate memory for interrupt vector,"
664870a8cdSMoti Haimovsky " Rx interrupts will not be supported");
674870a8cdSMoti Haimovsky return -rte_errno;
684870a8cdSMoti Haimovsky }
694870a8cdSMoti Haimovsky for (i = 0; i < n; i++) {
704870a8cdSMoti Haimovsky struct rx_queue *rxq = pmd->dev->data->rx_queues[i];
71*72ab1dc1SStephen Hemminger int fd = process_private->fds[i];
724870a8cdSMoti Haimovsky
734870a8cdSMoti Haimovsky /* Skip queues that cannot request interrupts. */
74*72ab1dc1SStephen Hemminger if (!rxq || fd == -1) {
75*72ab1dc1SStephen Hemminger /* Use invalid intr_vec[] index to disable entry. */
764870a8cdSMoti Haimovsky /* Use invalid intr_vec[] index to disable entry. */
77d61138d4SHarman Kalra if (rte_intr_vec_list_index_set(intr_handle, i,
78d61138d4SHarman Kalra RTE_INTR_VEC_RXTX_OFFSET + RTE_MAX_RXTX_INTR_VEC_ID))
79d61138d4SHarman Kalra return -rte_errno;
804870a8cdSMoti Haimovsky continue;
814870a8cdSMoti Haimovsky }
82d61138d4SHarman Kalra if (rte_intr_vec_list_index_set(intr_handle, i,
83d61138d4SHarman Kalra RTE_INTR_VEC_RXTX_OFFSET + count))
84d61138d4SHarman Kalra return -rte_errno;
85*72ab1dc1SStephen Hemminger if (rte_intr_efds_index_set(intr_handle, count, fd))
86d61138d4SHarman Kalra return -rte_errno;
874870a8cdSMoti Haimovsky count++;
884870a8cdSMoti Haimovsky }
894870a8cdSMoti Haimovsky if (!count)
904870a8cdSMoti Haimovsky tap_rx_intr_vec_uninstall(dev);
91d61138d4SHarman Kalra else if (rte_intr_nb_efd_set(intr_handle, count))
92d61138d4SHarman Kalra return -rte_errno;
934870a8cdSMoti Haimovsky return 0;
944870a8cdSMoti Haimovsky }
954870a8cdSMoti Haimovsky
964870a8cdSMoti Haimovsky /**
974870a8cdSMoti Haimovsky * Register or unregister the Rx interrupts.
984870a8cdSMoti Haimovsky *
994870a8cdSMoti Haimovsky * @param dev
1004870a8cdSMoti Haimovsky * Pointer to the tap rte_eth_dev device structure.
1014870a8cdSMoti Haimovsky * @param set
1024870a8cdSMoti Haimovsky * should the operation be register or unregister the interrupts.
1034870a8cdSMoti Haimovsky *
1044870a8cdSMoti Haimovsky * @return
1054870a8cdSMoti Haimovsky * 0 on success, negative errno value otherwise and rte_errno is set.
1064870a8cdSMoti Haimovsky */
1074870a8cdSMoti Haimovsky int
tap_rx_intr_vec_set(struct rte_eth_dev * dev,int set)1084870a8cdSMoti Haimovsky tap_rx_intr_vec_set(struct rte_eth_dev *dev, int set)
1094870a8cdSMoti Haimovsky {
1104870a8cdSMoti Haimovsky tap_rx_intr_vec_uninstall(dev);
1114870a8cdSMoti Haimovsky if (set)
1124870a8cdSMoti Haimovsky return tap_rx_intr_vec_install(dev);
1134870a8cdSMoti Haimovsky return 0;
1144870a8cdSMoti Haimovsky }
115