xref: /dpdk/drivers/net/mlx4/mlx4_intr.c (revision d61138d4f0e2927cc1d0fef6d810fa7f5d1161a1)
182092c87SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause
2b62579d4SAdrien Mazarguil  * Copyright 2017 6WIND S.A.
35feecc57SShahaf Shuler  * Copyright 2017 Mellanox Technologies, Ltd
4b62579d4SAdrien Mazarguil  */
5b62579d4SAdrien Mazarguil 
6b62579d4SAdrien Mazarguil /**
7b62579d4SAdrien Mazarguil  * @file
8b62579d4SAdrien Mazarguil  * Interrupts handling for mlx4 driver.
9b62579d4SAdrien Mazarguil  */
10b62579d4SAdrien Mazarguil 
11b62579d4SAdrien Mazarguil #include <errno.h>
12b62579d4SAdrien Mazarguil #include <stdint.h>
13b62579d4SAdrien Mazarguil #include <stdlib.h>
14b62579d4SAdrien Mazarguil 
15b62579d4SAdrien Mazarguil /* Verbs headers do not support -pedantic. */
16b62579d4SAdrien Mazarguil #ifdef PEDANTIC
17b62579d4SAdrien Mazarguil #pragma GCC diagnostic ignored "-Wpedantic"
18b62579d4SAdrien Mazarguil #endif
19b62579d4SAdrien Mazarguil #include <infiniband/verbs.h>
20b62579d4SAdrien Mazarguil #ifdef PEDANTIC
21b62579d4SAdrien Mazarguil #pragma GCC diagnostic error "-Wpedantic"
22b62579d4SAdrien Mazarguil #endif
23b62579d4SAdrien Mazarguil 
24b62579d4SAdrien Mazarguil #include <rte_alarm.h>
25b62579d4SAdrien Mazarguil #include <rte_errno.h>
26df96fd0dSBruce Richardson #include <ethdev_driver.h>
2762e96ffbSMoti Haimovsky #include <rte_io.h>
28b62579d4SAdrien Mazarguil #include <rte_interrupts.h>
29b62579d4SAdrien Mazarguil 
30b62579d4SAdrien Mazarguil #include "mlx4.h"
314eba244bSAdrien Mazarguil #include "mlx4_glue.h"
323d555728SAdrien Mazarguil #include "mlx4_rxtx.h"
33b62579d4SAdrien Mazarguil #include "mlx4_utils.h"
34b62579d4SAdrien Mazarguil 
35dbeba4cfSThomas Monjalon static int mlx4_link_status_check(struct mlx4_priv *priv);
36b62579d4SAdrien Mazarguil 
37b62579d4SAdrien Mazarguil /**
38b62579d4SAdrien Mazarguil  * Clean up Rx interrupts handler.
39b62579d4SAdrien Mazarguil  *
40b62579d4SAdrien Mazarguil  * @param priv
41b62579d4SAdrien Mazarguil  *   Pointer to private structure.
42b62579d4SAdrien Mazarguil  */
43b62579d4SAdrien Mazarguil static void
mlx4_rx_intr_vec_disable(struct mlx4_priv * priv)44dbeba4cfSThomas Monjalon mlx4_rx_intr_vec_disable(struct mlx4_priv *priv)
45b62579d4SAdrien Mazarguil {
46*d61138d4SHarman Kalra 	struct rte_intr_handle *intr_handle = priv->intr_handle;
47b62579d4SAdrien Mazarguil 
48b62579d4SAdrien Mazarguil 	rte_intr_free_epoll_fd(intr_handle);
49*d61138d4SHarman Kalra 	rte_intr_vec_list_free(intr_handle);
50*d61138d4SHarman Kalra 
51*d61138d4SHarman Kalra 	rte_intr_nb_efd_set(intr_handle, 0);
52b62579d4SAdrien Mazarguil }
53b62579d4SAdrien Mazarguil 
54b62579d4SAdrien Mazarguil /**
55b62579d4SAdrien Mazarguil  * Allocate queue vector and fill epoll fd list for Rx interrupts.
56b62579d4SAdrien Mazarguil  *
57b62579d4SAdrien Mazarguil  * @param priv
58b62579d4SAdrien Mazarguil  *   Pointer to private structure.
59b62579d4SAdrien Mazarguil  *
60b62579d4SAdrien Mazarguil  * @return
61b62579d4SAdrien Mazarguil  *   0 on success, negative errno value otherwise and rte_errno is set.
62b62579d4SAdrien Mazarguil  */
63b62579d4SAdrien Mazarguil static int
mlx4_rx_intr_vec_enable(struct mlx4_priv * priv)64dbeba4cfSThomas Monjalon mlx4_rx_intr_vec_enable(struct mlx4_priv *priv)
65b62579d4SAdrien Mazarguil {
66b62579d4SAdrien Mazarguil 	unsigned int i;
67099c2c53SYongseok Koh 	unsigned int rxqs_n = ETH_DEV(priv)->data->nb_rx_queues;
68b62579d4SAdrien Mazarguil 	unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
69b62579d4SAdrien Mazarguil 	unsigned int count = 0;
70*d61138d4SHarman Kalra 	struct rte_intr_handle *intr_handle = priv->intr_handle;
71b62579d4SAdrien Mazarguil 
72b62579d4SAdrien Mazarguil 	mlx4_rx_intr_vec_disable(priv);
73*d61138d4SHarman Kalra 	if (rte_intr_vec_list_alloc(intr_handle, NULL, n)) {
74b62579d4SAdrien Mazarguil 		rte_errno = ENOMEM;
75b62579d4SAdrien Mazarguil 		ERROR("failed to allocate memory for interrupt vector,"
76b62579d4SAdrien Mazarguil 		      " Rx interrupts will not be supported");
77b62579d4SAdrien Mazarguil 		return -rte_errno;
78b62579d4SAdrien Mazarguil 	}
79b62579d4SAdrien Mazarguil 	for (i = 0; i != n; ++i) {
80099c2c53SYongseok Koh 		struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i];
81b62579d4SAdrien Mazarguil 
82b62579d4SAdrien Mazarguil 		/* Skip queues that cannot request interrupts. */
83b62579d4SAdrien Mazarguil 		if (!rxq || !rxq->channel) {
84b62579d4SAdrien Mazarguil 			/* Use invalid intr_vec[] index to disable entry. */
85*d61138d4SHarman Kalra 			if (rte_intr_vec_list_index_set(intr_handle, i,
86*d61138d4SHarman Kalra 			RTE_INTR_VEC_RXTX_OFFSET + RTE_MAX_RXTX_INTR_VEC_ID))
87*d61138d4SHarman Kalra 				return -rte_errno;
88b62579d4SAdrien Mazarguil 			continue;
89b62579d4SAdrien Mazarguil 		}
90b62579d4SAdrien Mazarguil 		if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
91b62579d4SAdrien Mazarguil 			rte_errno = E2BIG;
92b62579d4SAdrien Mazarguil 			ERROR("too many Rx queues for interrupt vector size"
93b62579d4SAdrien Mazarguil 			      " (%d), Rx interrupts cannot be enabled",
94b62579d4SAdrien Mazarguil 			      RTE_MAX_RXTX_INTR_VEC_ID);
95b62579d4SAdrien Mazarguil 			mlx4_rx_intr_vec_disable(priv);
96b62579d4SAdrien Mazarguil 			return -rte_errno;
97b62579d4SAdrien Mazarguil 		}
98*d61138d4SHarman Kalra 
99*d61138d4SHarman Kalra 		if (rte_intr_vec_list_index_set(intr_handle, i,
100*d61138d4SHarman Kalra 					RTE_INTR_VEC_RXTX_OFFSET + count))
101*d61138d4SHarman Kalra 			return -rte_errno;
102*d61138d4SHarman Kalra 
103*d61138d4SHarman Kalra 		if (rte_intr_efds_index_set(intr_handle, i,
104*d61138d4SHarman Kalra 						   rxq->channel->fd))
105*d61138d4SHarman Kalra 			return -rte_errno;
106*d61138d4SHarman Kalra 
107b62579d4SAdrien Mazarguil 		count++;
108b62579d4SAdrien Mazarguil 	}
109b62579d4SAdrien Mazarguil 	if (!count)
110b62579d4SAdrien Mazarguil 		mlx4_rx_intr_vec_disable(priv);
111*d61138d4SHarman Kalra 	else if (rte_intr_nb_efd_set(intr_handle, count))
112*d61138d4SHarman Kalra 		return -rte_errno;
113b62579d4SAdrien Mazarguil 	return 0;
114b62579d4SAdrien Mazarguil }
115b62579d4SAdrien Mazarguil 
116b62579d4SAdrien Mazarguil /**
117b62579d4SAdrien Mazarguil  * Process scheduled link status check.
118b62579d4SAdrien Mazarguil  *
119258937a3SAdrien Mazarguil  * If LSC interrupts are requested, process related callback.
120258937a3SAdrien Mazarguil  *
121b62579d4SAdrien Mazarguil  * @param priv
122b62579d4SAdrien Mazarguil  *   Pointer to private structure.
123b62579d4SAdrien Mazarguil  */
124b62579d4SAdrien Mazarguil static void
mlx4_link_status_alarm(struct mlx4_priv * priv)125dbeba4cfSThomas Monjalon mlx4_link_status_alarm(struct mlx4_priv *priv)
126b62579d4SAdrien Mazarguil {
127295968d1SFerruh Yigit 	const struct rte_eth_intr_conf *const intr_conf =
128099c2c53SYongseok Koh 		&ETH_DEV(priv)->data->dev_conf.intr_conf;
129b62579d4SAdrien Mazarguil 
1308e08df22SAlexander Kozyrev 	MLX4_ASSERT(priv->intr_alarm == 1);
131b62579d4SAdrien Mazarguil 	priv->intr_alarm = 0;
132258937a3SAdrien Mazarguil 	if (intr_conf->lsc && !mlx4_link_status_check(priv))
1335723fbedSFerruh Yigit 		rte_eth_dev_callback_process(ETH_DEV(priv),
134b62579d4SAdrien Mazarguil 					     RTE_ETH_EVENT_INTR_LSC,
135cebe3d7bSThomas Monjalon 					     NULL);
136b62579d4SAdrien Mazarguil }
137b62579d4SAdrien Mazarguil 
138b62579d4SAdrien Mazarguil /**
139258937a3SAdrien Mazarguil  * Check link status.
140258937a3SAdrien Mazarguil  *
141258937a3SAdrien Mazarguil  * In case of inconsistency, another check is scheduled.
142258937a3SAdrien Mazarguil  *
143258937a3SAdrien Mazarguil  * @param priv
144258937a3SAdrien Mazarguil  *   Pointer to private structure.
145258937a3SAdrien Mazarguil  *
146258937a3SAdrien Mazarguil  * @return
147258937a3SAdrien Mazarguil  *   0 on success (link status is consistent), negative errno value
148258937a3SAdrien Mazarguil  *   otherwise and rte_errno is set.
149258937a3SAdrien Mazarguil  */
150258937a3SAdrien Mazarguil static int
mlx4_link_status_check(struct mlx4_priv * priv)151dbeba4cfSThomas Monjalon mlx4_link_status_check(struct mlx4_priv *priv)
152258937a3SAdrien Mazarguil {
153099c2c53SYongseok Koh 	struct rte_eth_link *link = &ETH_DEV(priv)->data->dev_link;
154099c2c53SYongseok Koh 	int ret = mlx4_link_update(ETH_DEV(priv), 0);
155258937a3SAdrien Mazarguil 
156258937a3SAdrien Mazarguil 	if (ret)
157258937a3SAdrien Mazarguil 		return ret;
158258937a3SAdrien Mazarguil 	if ((!link->link_speed && link->link_status) ||
159258937a3SAdrien Mazarguil 	    (link->link_speed && !link->link_status)) {
160258937a3SAdrien Mazarguil 		if (!priv->intr_alarm) {
161258937a3SAdrien Mazarguil 			/* Inconsistent status, check again later. */
162258937a3SAdrien Mazarguil 			ret = rte_eal_alarm_set(MLX4_INTR_ALARM_TIMEOUT,
163258937a3SAdrien Mazarguil 						(void (*)(void *))
164258937a3SAdrien Mazarguil 						mlx4_link_status_alarm,
165258937a3SAdrien Mazarguil 						priv);
166258937a3SAdrien Mazarguil 			if (ret)
167258937a3SAdrien Mazarguil 				return ret;
168258937a3SAdrien Mazarguil 			priv->intr_alarm = 1;
169258937a3SAdrien Mazarguil 		}
170258937a3SAdrien Mazarguil 		rte_errno = EINPROGRESS;
171258937a3SAdrien Mazarguil 		return -rte_errno;
172258937a3SAdrien Mazarguil 	}
173258937a3SAdrien Mazarguil 	return 0;
174258937a3SAdrien Mazarguil }
175258937a3SAdrien Mazarguil 
176258937a3SAdrien Mazarguil /**
177b62579d4SAdrien Mazarguil  * Handle interrupts from the NIC.
178b62579d4SAdrien Mazarguil  *
179b62579d4SAdrien Mazarguil  * @param priv
180b62579d4SAdrien Mazarguil  *   Pointer to private structure.
181b62579d4SAdrien Mazarguil  */
182b62579d4SAdrien Mazarguil static void
mlx4_interrupt_handler(struct mlx4_priv * priv)183dbeba4cfSThomas Monjalon mlx4_interrupt_handler(struct mlx4_priv *priv)
184b62579d4SAdrien Mazarguil {
185d84fb5ebSAdrien Mazarguil 	enum { LSC, RMV, };
186d84fb5ebSAdrien Mazarguil 	static const enum rte_eth_event_type type[] = {
187d84fb5ebSAdrien Mazarguil 		[LSC] = RTE_ETH_EVENT_INTR_LSC,
188d84fb5ebSAdrien Mazarguil 		[RMV] = RTE_ETH_EVENT_INTR_RMV,
189d84fb5ebSAdrien Mazarguil 	};
190d84fb5ebSAdrien Mazarguil 	uint32_t caught[RTE_DIM(type)] = { 0 };
191d84fb5ebSAdrien Mazarguil 	struct ibv_async_event event;
192295968d1SFerruh Yigit 	const struct rte_eth_intr_conf *const intr_conf =
193099c2c53SYongseok Koh 		&ETH_DEV(priv)->data->dev_conf.intr_conf;
194d84fb5ebSAdrien Mazarguil 	unsigned int i;
195b62579d4SAdrien Mazarguil 
196d84fb5ebSAdrien Mazarguil 	/* Read all message and acknowledge them. */
1974eba244bSAdrien Mazarguil 	while (!mlx4_glue->get_async_event(priv->ctx, &event)) {
198d84fb5ebSAdrien Mazarguil 		switch (event.event_type) {
199d84fb5ebSAdrien Mazarguil 		case IBV_EVENT_PORT_ACTIVE:
200d84fb5ebSAdrien Mazarguil 		case IBV_EVENT_PORT_ERR:
201d84fb5ebSAdrien Mazarguil 			if (intr_conf->lsc && !mlx4_link_status_check(priv))
202d84fb5ebSAdrien Mazarguil 				++caught[LSC];
203d84fb5ebSAdrien Mazarguil 			break;
204d84fb5ebSAdrien Mazarguil 		case IBV_EVENT_DEVICE_FATAL:
205d84fb5ebSAdrien Mazarguil 			if (intr_conf->rmv)
206d84fb5ebSAdrien Mazarguil 				++caught[RMV];
207d84fb5ebSAdrien Mazarguil 			break;
208d84fb5ebSAdrien Mazarguil 		default:
209d84fb5ebSAdrien Mazarguil 			DEBUG("event type %d on physical port %d not handled",
210d84fb5ebSAdrien Mazarguil 			      event.event_type, event.element.port_num);
211d84fb5ebSAdrien Mazarguil 		}
2124eba244bSAdrien Mazarguil 		mlx4_glue->ack_async_event(&event);
213d84fb5ebSAdrien Mazarguil 	}
214d84fb5ebSAdrien Mazarguil 	for (i = 0; i != RTE_DIM(caught); ++i)
215d84fb5ebSAdrien Mazarguil 		if (caught[i])
2165723fbedSFerruh Yigit 			rte_eth_dev_callback_process(ETH_DEV(priv), type[i],
217cebe3d7bSThomas Monjalon 						     NULL);
218b62579d4SAdrien Mazarguil }
219b62579d4SAdrien Mazarguil 
220b62579d4SAdrien Mazarguil /**
22162e96ffbSMoti Haimovsky  * MLX4 CQ notification .
22262e96ffbSMoti Haimovsky  *
22362e96ffbSMoti Haimovsky  * @param rxq
22462e96ffbSMoti Haimovsky  *   Pointer to receive queue structure.
22562e96ffbSMoti Haimovsky  * @param solicited
22662e96ffbSMoti Haimovsky  *   Is request solicited or not.
22762e96ffbSMoti Haimovsky  */
22862e96ffbSMoti Haimovsky static void
mlx4_arm_cq(struct rxq * rxq,int solicited)22962e96ffbSMoti Haimovsky mlx4_arm_cq(struct rxq *rxq, int solicited)
23062e96ffbSMoti Haimovsky {
23162e96ffbSMoti Haimovsky 	struct mlx4_cq *cq = &rxq->mcq;
23262e96ffbSMoti Haimovsky 	uint64_t doorbell;
23362e96ffbSMoti Haimovsky 	uint32_t sn = cq->arm_sn & MLX4_CQ_DB_GEQ_N_MASK;
23462e96ffbSMoti Haimovsky 	uint32_t ci = cq->cons_index & MLX4_CQ_DB_CI_MASK;
23562e96ffbSMoti Haimovsky 	uint32_t cmd = solicited ? MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT;
23662e96ffbSMoti Haimovsky 
23762e96ffbSMoti Haimovsky 	*cq->arm_db = rte_cpu_to_be_32(sn << 28 | cmd | ci);
23862e96ffbSMoti Haimovsky 	/*
23962e96ffbSMoti Haimovsky 	 * Make sure that the doorbell record in host memory is
24062e96ffbSMoti Haimovsky 	 * written before ringing the doorbell via PCI MMIO.
24162e96ffbSMoti Haimovsky 	 */
24262e96ffbSMoti Haimovsky 	rte_wmb();
24362e96ffbSMoti Haimovsky 	doorbell = sn << 28 | cmd | cq->cqn;
24462e96ffbSMoti Haimovsky 	doorbell <<= 32;
24562e96ffbSMoti Haimovsky 	doorbell |= ci;
24662e96ffbSMoti Haimovsky 	rte_write64(rte_cpu_to_be_64(doorbell), cq->cq_db_reg);
24762e96ffbSMoti Haimovsky }
24862e96ffbSMoti Haimovsky 
24962e96ffbSMoti Haimovsky /**
250b62579d4SAdrien Mazarguil  * Uninstall interrupt handler.
251b62579d4SAdrien Mazarguil  *
252b62579d4SAdrien Mazarguil  * @param priv
253b62579d4SAdrien Mazarguil  *   Pointer to private structure.
254b62579d4SAdrien Mazarguil  *
255b62579d4SAdrien Mazarguil  * @return
256b62579d4SAdrien Mazarguil  *   0 on success, negative errno value otherwise and rte_errno is set.
257b62579d4SAdrien Mazarguil  */
258b62579d4SAdrien Mazarguil int
mlx4_intr_uninstall(struct mlx4_priv * priv)259dbeba4cfSThomas Monjalon mlx4_intr_uninstall(struct mlx4_priv *priv)
260b62579d4SAdrien Mazarguil {
261b62579d4SAdrien Mazarguil 	int err = rte_errno; /* Make sure rte_errno remains unchanged. */
262b62579d4SAdrien Mazarguil 
263*d61138d4SHarman Kalra 	if (rte_intr_fd_get(priv->intr_handle) != -1) {
264*d61138d4SHarman Kalra 		rte_intr_callback_unregister(priv->intr_handle,
265b62579d4SAdrien Mazarguil 					     (void (*)(void *))
266b62579d4SAdrien Mazarguil 					     mlx4_interrupt_handler,
267b62579d4SAdrien Mazarguil 					     priv);
268*d61138d4SHarman Kalra 		if (rte_intr_fd_set(priv->intr_handle, -1))
269*d61138d4SHarman Kalra 			return -rte_errno;
270b62579d4SAdrien Mazarguil 	}
271b62579d4SAdrien Mazarguil 	rte_eal_alarm_cancel((void (*)(void *))mlx4_link_status_alarm, priv);
272b62579d4SAdrien Mazarguil 	priv->intr_alarm = 0;
273fc1b5ec5SMoti Haimovsky 	mlx4_rxq_intr_disable(priv);
274b62579d4SAdrien Mazarguil 	rte_errno = err;
275b62579d4SAdrien Mazarguil 	return 0;
276b62579d4SAdrien Mazarguil }
277b62579d4SAdrien Mazarguil 
278b62579d4SAdrien Mazarguil /**
279b62579d4SAdrien Mazarguil  * Install interrupt handler.
280b62579d4SAdrien Mazarguil  *
281b62579d4SAdrien Mazarguil  * @param priv
282b62579d4SAdrien Mazarguil  *   Pointer to private structure.
283b62579d4SAdrien Mazarguil  *
284b62579d4SAdrien Mazarguil  * @return
285b62579d4SAdrien Mazarguil  *   0 on success, negative errno value otherwise and rte_errno is set.
286b62579d4SAdrien Mazarguil  */
287b62579d4SAdrien Mazarguil int
mlx4_intr_install(struct mlx4_priv * priv)288dbeba4cfSThomas Monjalon mlx4_intr_install(struct mlx4_priv *priv)
289b62579d4SAdrien Mazarguil {
290295968d1SFerruh Yigit 	const struct rte_eth_intr_conf *const intr_conf =
291099c2c53SYongseok Koh 		&ETH_DEV(priv)->data->dev_conf.intr_conf;
292b62579d4SAdrien Mazarguil 	int rc;
293b62579d4SAdrien Mazarguil 
294b62579d4SAdrien Mazarguil 	mlx4_intr_uninstall(priv);
295b62579d4SAdrien Mazarguil 	if (intr_conf->lsc | intr_conf->rmv) {
296*d61138d4SHarman Kalra 		if (rte_intr_fd_set(priv->intr_handle, priv->ctx->async_fd))
297*d61138d4SHarman Kalra 			return -rte_errno;
298*d61138d4SHarman Kalra 
299*d61138d4SHarman Kalra 		rc = rte_intr_callback_register(priv->intr_handle,
300b62579d4SAdrien Mazarguil 						(void (*)(void *))
301b62579d4SAdrien Mazarguil 						mlx4_interrupt_handler,
302b62579d4SAdrien Mazarguil 						priv);
303b62579d4SAdrien Mazarguil 		if (rc < 0) {
304b62579d4SAdrien Mazarguil 			rte_errno = -rc;
305b62579d4SAdrien Mazarguil 			goto error;
306b62579d4SAdrien Mazarguil 		}
307b62579d4SAdrien Mazarguil 	}
308b62579d4SAdrien Mazarguil 	return 0;
309b62579d4SAdrien Mazarguil error:
310b62579d4SAdrien Mazarguil 	mlx4_intr_uninstall(priv);
311b62579d4SAdrien Mazarguil 	return -rte_errno;
312b62579d4SAdrien Mazarguil }
313b62579d4SAdrien Mazarguil 
314b62579d4SAdrien Mazarguil /**
315b62579d4SAdrien Mazarguil  * DPDK callback for Rx queue interrupt disable.
316b62579d4SAdrien Mazarguil  *
317b62579d4SAdrien Mazarguil  * @param dev
318b62579d4SAdrien Mazarguil  *   Pointer to Ethernet device structure.
319b62579d4SAdrien Mazarguil  * @param idx
320b62579d4SAdrien Mazarguil  *   Rx queue index.
321b62579d4SAdrien Mazarguil  *
322b62579d4SAdrien Mazarguil  * @return
323b62579d4SAdrien Mazarguil  *   0 on success, negative errno value otherwise and rte_errno is set.
324b62579d4SAdrien Mazarguil  */
325b62579d4SAdrien Mazarguil int
mlx4_rx_intr_disable(struct rte_eth_dev * dev,uint16_t idx)326b62579d4SAdrien Mazarguil mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
327b62579d4SAdrien Mazarguil {
328be65fdcbSAdrien Mazarguil 	struct rxq *rxq = dev->data->rx_queues[idx];
329b62579d4SAdrien Mazarguil 	struct ibv_cq *ev_cq;
330b62579d4SAdrien Mazarguil 	void *ev_ctx;
331b62579d4SAdrien Mazarguil 	int ret;
332b62579d4SAdrien Mazarguil 
333b62579d4SAdrien Mazarguil 	if (!rxq || !rxq->channel) {
334b62579d4SAdrien Mazarguil 		ret = EINVAL;
335b62579d4SAdrien Mazarguil 	} else {
3364eba244bSAdrien Mazarguil 		ret = mlx4_glue->get_cq_event(rxq->cq->channel, &ev_cq,
3374eba244bSAdrien Mazarguil 					      &ev_ctx);
33854b20f94SOphir Munk 		/** For non-zero ret save the errno (may be EAGAIN
33954b20f94SOphir Munk 		 * which means the get_cq_event function was called before
34054b20f94SOphir Munk 		 * receiving one).
34154b20f94SOphir Munk 		 */
34254b20f94SOphir Munk 		if (ret)
34354b20f94SOphir Munk 			ret = errno;
34454b20f94SOphir Munk 		else if (ev_cq != rxq->cq)
345b62579d4SAdrien Mazarguil 			ret = EINVAL;
346b62579d4SAdrien Mazarguil 	}
347b62579d4SAdrien Mazarguil 	if (ret) {
348b62579d4SAdrien Mazarguil 		rte_errno = ret;
34954b20f94SOphir Munk 		if (ret != EAGAIN)
350b62579d4SAdrien Mazarguil 			WARN("unable to disable interrupt on rx queue %d",
351b62579d4SAdrien Mazarguil 			     idx);
352b62579d4SAdrien Mazarguil 	} else {
35362e96ffbSMoti Haimovsky 		rxq->mcq.arm_sn++;
3544eba244bSAdrien Mazarguil 		mlx4_glue->ack_cq_events(rxq->cq, 1);
355b62579d4SAdrien Mazarguil 	}
356b62579d4SAdrien Mazarguil 	return -ret;
357b62579d4SAdrien Mazarguil }
358b62579d4SAdrien Mazarguil 
359b62579d4SAdrien Mazarguil /**
360b62579d4SAdrien Mazarguil  * DPDK callback for Rx queue interrupt enable.
361b62579d4SAdrien Mazarguil  *
362b62579d4SAdrien Mazarguil  * @param dev
363b62579d4SAdrien Mazarguil  *   Pointer to Ethernet device structure.
364b62579d4SAdrien Mazarguil  * @param idx
365b62579d4SAdrien Mazarguil  *   Rx queue index.
366b62579d4SAdrien Mazarguil  *
367b62579d4SAdrien Mazarguil  * @return
368b62579d4SAdrien Mazarguil  *   0 on success, negative errno value otherwise and rte_errno is set.
369b62579d4SAdrien Mazarguil  */
370b62579d4SAdrien Mazarguil int
mlx4_rx_intr_enable(struct rte_eth_dev * dev,uint16_t idx)371b62579d4SAdrien Mazarguil mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
372b62579d4SAdrien Mazarguil {
373be65fdcbSAdrien Mazarguil 	struct rxq *rxq = dev->data->rx_queues[idx];
37462e96ffbSMoti Haimovsky 	int ret = 0;
375b62579d4SAdrien Mazarguil 
37662e96ffbSMoti Haimovsky 	if (!rxq || !rxq->channel) {
377b62579d4SAdrien Mazarguil 		ret = EINVAL;
378b62579d4SAdrien Mazarguil 		rte_errno = ret;
379b62579d4SAdrien Mazarguil 		WARN("unable to arm interrupt on rx queue %d", idx);
38062e96ffbSMoti Haimovsky 	} else {
38162e96ffbSMoti Haimovsky 		mlx4_arm_cq(rxq, 0);
382b62579d4SAdrien Mazarguil 	}
383b62579d4SAdrien Mazarguil 	return -ret;
384b62579d4SAdrien Mazarguil }
385fc1b5ec5SMoti Haimovsky 
386fc1b5ec5SMoti Haimovsky /**
387fc1b5ec5SMoti Haimovsky  * Enable datapath interrupts.
388fc1b5ec5SMoti Haimovsky  *
389fc1b5ec5SMoti Haimovsky  * @param priv
390fc1b5ec5SMoti Haimovsky  *   Pointer to private structure.
391fc1b5ec5SMoti Haimovsky  *
392fc1b5ec5SMoti Haimovsky  * @return
393fc1b5ec5SMoti Haimovsky  *   0 on success, negative errno value otherwise and rte_errno is set.
394fc1b5ec5SMoti Haimovsky  */
395fc1b5ec5SMoti Haimovsky int
mlx4_rxq_intr_enable(struct mlx4_priv * priv)396dbeba4cfSThomas Monjalon mlx4_rxq_intr_enable(struct mlx4_priv *priv)
397fc1b5ec5SMoti Haimovsky {
398295968d1SFerruh Yigit 	const struct rte_eth_intr_conf *const intr_conf =
399099c2c53SYongseok Koh 		&ETH_DEV(priv)->data->dev_conf.intr_conf;
400fc1b5ec5SMoti Haimovsky 
401fc1b5ec5SMoti Haimovsky 	if (intr_conf->rxq && mlx4_rx_intr_vec_enable(priv) < 0)
402fc1b5ec5SMoti Haimovsky 		goto error;
403fc1b5ec5SMoti Haimovsky 	return 0;
404fc1b5ec5SMoti Haimovsky error:
405fc1b5ec5SMoti Haimovsky 	return -rte_errno;
406fc1b5ec5SMoti Haimovsky }
407fc1b5ec5SMoti Haimovsky 
408fc1b5ec5SMoti Haimovsky /**
409fc1b5ec5SMoti Haimovsky  * Disable datapath interrupts, keeping other interrupts intact.
410fc1b5ec5SMoti Haimovsky  *
411fc1b5ec5SMoti Haimovsky  * @param priv
412fc1b5ec5SMoti Haimovsky  *   Pointer to private structure.
413fc1b5ec5SMoti Haimovsky  */
414fc1b5ec5SMoti Haimovsky void
mlx4_rxq_intr_disable(struct mlx4_priv * priv)415dbeba4cfSThomas Monjalon mlx4_rxq_intr_disable(struct mlx4_priv *priv)
416fc1b5ec5SMoti Haimovsky {
417fc1b5ec5SMoti Haimovsky 	int err = rte_errno; /* Make sure rte_errno remains unchanged. */
418fc1b5ec5SMoti Haimovsky 
419fc1b5ec5SMoti Haimovsky 	mlx4_rx_intr_vec_disable(priv);
420fc1b5ec5SMoti Haimovsky 	rte_errno = err;
421fc1b5ec5SMoti Haimovsky }
422