xref: /dpdk/drivers/net/failsafe/failsafe_intr.c (revision f665790a5dbad7b645ff46f31d65e977324e7bfc)
19e0360aeSMoti Haimovsky /* SPDX-License-Identifier: BSD-3-Clause
25feecc57SShahaf Shuler  * Copyright 2018 Mellanox Technologies, Ltd
39e0360aeSMoti Haimovsky  */
49e0360aeSMoti Haimovsky 
59e0360aeSMoti Haimovsky /**
69e0360aeSMoti Haimovsky  * @file
79e0360aeSMoti Haimovsky  * Interrupts handling for failsafe driver.
89e0360aeSMoti Haimovsky  */
99e0360aeSMoti Haimovsky 
10f234e5bdSMoti Haimovsky #if defined(LINUX)
11f234e5bdSMoti Haimovsky #include <sys/epoll.h>
12f234e5bdSMoti Haimovsky #endif
1372b452c5SDmitry Kozlyuk #include <stdlib.h>
149e0360aeSMoti Haimovsky #include <unistd.h>
159e0360aeSMoti Haimovsky 
16709676bcSMoti Haimovsky #include <rte_alarm.h>
17709676bcSMoti Haimovsky #include <rte_errno.h>
18709676bcSMoti Haimovsky #include <rte_ethdev.h>
19709676bcSMoti Haimovsky #include <rte_interrupts.h>
20709676bcSMoti Haimovsky #include <rte_io.h>
21709676bcSMoti Haimovsky #include <rte_service_component.h>
22709676bcSMoti Haimovsky 
239e0360aeSMoti Haimovsky #include "failsafe_private.h"
249e0360aeSMoti Haimovsky 
25f234e5bdSMoti Haimovsky #define NUM_RX_PROXIES (FAILSAFE_MAX_ETHPORTS * RTE_MAX_RXTX_INTR_VEC_ID)
26f234e5bdSMoti Haimovsky 
27f234e5bdSMoti Haimovsky 
28f234e5bdSMoti Haimovsky /**
29f234e5bdSMoti Haimovsky  * Open an epoll file descriptor.
30f234e5bdSMoti Haimovsky  *
31f234e5bdSMoti Haimovsky  * @param flags
32f234e5bdSMoti Haimovsky  *   Flags for defining epoll behavior.
33f234e5bdSMoti Haimovsky  * @return
34f234e5bdSMoti Haimovsky  *   0 on success, negative errno value otherwise.
35f234e5bdSMoti Haimovsky  */
36f234e5bdSMoti Haimovsky static int
37f234e5bdSMoti Haimovsky fs_epoll_create1(int flags)
38f234e5bdSMoti Haimovsky {
39f234e5bdSMoti Haimovsky #if defined(LINUX)
40f234e5bdSMoti Haimovsky 	return epoll_create1(flags);
41f234e5bdSMoti Haimovsky #elif defined(BSD)
42f234e5bdSMoti Haimovsky 	RTE_SET_USED(flags);
43f234e5bdSMoti Haimovsky 	return -ENOTSUP;
44f234e5bdSMoti Haimovsky #endif
45f234e5bdSMoti Haimovsky }
46f234e5bdSMoti Haimovsky 
47f234e5bdSMoti Haimovsky /**
48709676bcSMoti Haimovsky  * Install failsafe Rx event proxy service.
49709676bcSMoti Haimovsky  * The Rx event proxy is the service that listens to Rx events from the
50709676bcSMoti Haimovsky  * subdevices and triggers failsafe Rx events accordingly.
51709676bcSMoti Haimovsky  *
52709676bcSMoti Haimovsky  * @param priv
53709676bcSMoti Haimovsky  *   Pointer to failsafe private structure.
54709676bcSMoti Haimovsky  * @return
55709676bcSMoti Haimovsky  *   0 on success, negative errno value otherwise.
56709676bcSMoti Haimovsky  */
57709676bcSMoti Haimovsky static int
58709676bcSMoti Haimovsky fs_rx_event_proxy_routine(void *data)
59709676bcSMoti Haimovsky {
60709676bcSMoti Haimovsky 	struct fs_priv *priv;
61709676bcSMoti Haimovsky 	struct rxq *rxq;
62709676bcSMoti Haimovsky 	struct rte_epoll_event *events;
63709676bcSMoti Haimovsky 	uint64_t u64;
64709676bcSMoti Haimovsky 	int i, n;
65709676bcSMoti Haimovsky 	int rc = 0;
66709676bcSMoti Haimovsky 
67709676bcSMoti Haimovsky 	u64 = 1;
68709676bcSMoti Haimovsky 	priv = data;
69709676bcSMoti Haimovsky 	events = priv->rxp.evec;
70709676bcSMoti Haimovsky 	n = rte_epoll_wait(priv->rxp.efd, events, NUM_RX_PROXIES, -1);
71709676bcSMoti Haimovsky 	for (i = 0; i < n; i++) {
72709676bcSMoti Haimovsky 		rxq = events[i].epdata.data;
73709676bcSMoti Haimovsky 		if (rxq->enable_events && rxq->event_fd != -1) {
74709676bcSMoti Haimovsky 			if (write(rxq->event_fd, &u64, sizeof(u64)) !=
75709676bcSMoti Haimovsky 			    sizeof(u64)) {
76709676bcSMoti Haimovsky 				ERROR("Failed to proxy Rx event to socket %d",
77709676bcSMoti Haimovsky 				       rxq->event_fd);
78709676bcSMoti Haimovsky 				rc = -EIO;
79709676bcSMoti Haimovsky 			}
80709676bcSMoti Haimovsky 		}
81709676bcSMoti Haimovsky 	}
82709676bcSMoti Haimovsky 	return rc;
83709676bcSMoti Haimovsky }
84709676bcSMoti Haimovsky 
85709676bcSMoti Haimovsky /**
86709676bcSMoti Haimovsky  * Uninstall failsafe Rx event proxy service.
87709676bcSMoti Haimovsky  *
88709676bcSMoti Haimovsky  * @param priv
89709676bcSMoti Haimovsky  *   Pointer to failsafe private structure.
90709676bcSMoti Haimovsky  */
91709676bcSMoti Haimovsky static void
92709676bcSMoti Haimovsky fs_rx_event_proxy_service_uninstall(struct fs_priv *priv)
93709676bcSMoti Haimovsky {
94709676bcSMoti Haimovsky 	/* Unregister the event service. */
95709676bcSMoti Haimovsky 	switch (priv->rxp.sstate) {
96709676bcSMoti Haimovsky 	case SS_RUNNING:
97709676bcSMoti Haimovsky 		rte_service_map_lcore_set(priv->rxp.sid, priv->rxp.scid, 0);
98709676bcSMoti Haimovsky 		/* fall through */
99709676bcSMoti Haimovsky 	case SS_READY:
100709676bcSMoti Haimovsky 		rte_service_runstate_set(priv->rxp.sid, 0);
101709676bcSMoti Haimovsky 		rte_service_set_stats_enable(priv->rxp.sid, 0);
102709676bcSMoti Haimovsky 		rte_service_component_runstate_set(priv->rxp.sid, 0);
103709676bcSMoti Haimovsky 		/* fall through */
104709676bcSMoti Haimovsky 	case SS_REGISTERED:
105709676bcSMoti Haimovsky 		rte_service_component_unregister(priv->rxp.sid);
106709676bcSMoti Haimovsky 		/* fall through */
107709676bcSMoti Haimovsky 	default:
108709676bcSMoti Haimovsky 		break;
109709676bcSMoti Haimovsky 	}
110709676bcSMoti Haimovsky }
111709676bcSMoti Haimovsky 
112709676bcSMoti Haimovsky /**
113709676bcSMoti Haimovsky  * Install the failsafe Rx event proxy service.
114709676bcSMoti Haimovsky  *
115709676bcSMoti Haimovsky  * @param priv
116709676bcSMoti Haimovsky  *   Pointer to failsafe private structure.
117709676bcSMoti Haimovsky  * @return
118709676bcSMoti Haimovsky  *   0 on success, negative errno value otherwise.
119709676bcSMoti Haimovsky  */
120709676bcSMoti Haimovsky static int
121709676bcSMoti Haimovsky fs_rx_event_proxy_service_install(struct fs_priv *priv)
122709676bcSMoti Haimovsky {
123709676bcSMoti Haimovsky 	struct rte_service_spec service;
124709676bcSMoti Haimovsky 	int32_t num_service_cores;
125709676bcSMoti Haimovsky 	int ret = 0;
126709676bcSMoti Haimovsky 
127709676bcSMoti Haimovsky 	num_service_cores = rte_service_lcore_count();
128709676bcSMoti Haimovsky 	if (num_service_cores <= 0) {
129709676bcSMoti Haimovsky 		ERROR("Failed to install Rx interrupts, "
130709676bcSMoti Haimovsky 		      "no service core found");
131709676bcSMoti Haimovsky 		return -ENOTSUP;
132709676bcSMoti Haimovsky 	}
133709676bcSMoti Haimovsky 	/* prepare service info */
134709676bcSMoti Haimovsky 	memset(&service, 0, sizeof(struct rte_service_spec));
135709676bcSMoti Haimovsky 	snprintf(service.name, sizeof(service.name), "%s_Rx_service",
13608647012SRaslan Darawsheh 		 priv->data->name);
13708647012SRaslan Darawsheh 	service.socket_id = priv->data->numa_node;
138709676bcSMoti Haimovsky 	service.callback = fs_rx_event_proxy_routine;
139709676bcSMoti Haimovsky 	service.callback_userdata = priv;
140709676bcSMoti Haimovsky 
141709676bcSMoti Haimovsky 	if (priv->rxp.sstate == SS_NO_SERVICE) {
142709676bcSMoti Haimovsky 		uint32_t service_core_list[num_service_cores];
143709676bcSMoti Haimovsky 
144709676bcSMoti Haimovsky 		/* get a service core to work with */
145709676bcSMoti Haimovsky 		ret = rte_service_lcore_list(service_core_list,
146709676bcSMoti Haimovsky 					     num_service_cores);
147709676bcSMoti Haimovsky 		if (ret <= 0) {
148709676bcSMoti Haimovsky 			ERROR("Failed to install Rx interrupts, "
149709676bcSMoti Haimovsky 			      "service core list empty or corrupted");
150709676bcSMoti Haimovsky 			return -ENOTSUP;
151709676bcSMoti Haimovsky 		}
152709676bcSMoti Haimovsky 		priv->rxp.scid = service_core_list[0];
153709676bcSMoti Haimovsky 		ret = rte_service_lcore_add(priv->rxp.scid);
154709676bcSMoti Haimovsky 		if (ret && ret != -EALREADY) {
155709676bcSMoti Haimovsky 			ERROR("Failed adding service core");
156709676bcSMoti Haimovsky 			return ret;
157709676bcSMoti Haimovsky 		}
158709676bcSMoti Haimovsky 		/* service core may be in "stopped" state, start it */
159709676bcSMoti Haimovsky 		ret = rte_service_lcore_start(priv->rxp.scid);
160709676bcSMoti Haimovsky 		if (ret && (ret != -EALREADY)) {
161709676bcSMoti Haimovsky 			ERROR("Failed to install Rx interrupts, "
162709676bcSMoti Haimovsky 			      "service core not started");
163709676bcSMoti Haimovsky 			return ret;
164709676bcSMoti Haimovsky 		}
165709676bcSMoti Haimovsky 		/* register our service */
166709676bcSMoti Haimovsky 		int32_t ret = rte_service_component_register(&service,
167709676bcSMoti Haimovsky 							     &priv->rxp.sid);
168709676bcSMoti Haimovsky 		if (ret) {
169709676bcSMoti Haimovsky 			ERROR("service register() failed");
170709676bcSMoti Haimovsky 			return -ENOEXEC;
171709676bcSMoti Haimovsky 		}
172709676bcSMoti Haimovsky 		priv->rxp.sstate = SS_REGISTERED;
173709676bcSMoti Haimovsky 		/* run the service */
174709676bcSMoti Haimovsky 		ret = rte_service_component_runstate_set(priv->rxp.sid, 1);
175709676bcSMoti Haimovsky 		if (ret < 0) {
176*f665790aSDavid Marchand 			ERROR("Failed Setting component runstate");
177709676bcSMoti Haimovsky 			return ret;
178709676bcSMoti Haimovsky 		}
179709676bcSMoti Haimovsky 		ret = rte_service_set_stats_enable(priv->rxp.sid, 1);
180709676bcSMoti Haimovsky 		if (ret < 0) {
181*f665790aSDavid Marchand 			ERROR("Failed enabling stats");
182709676bcSMoti Haimovsky 			return ret;
183709676bcSMoti Haimovsky 		}
184709676bcSMoti Haimovsky 		ret = rte_service_runstate_set(priv->rxp.sid, 1);
185709676bcSMoti Haimovsky 		if (ret < 0) {
186*f665790aSDavid Marchand 			ERROR("Failed to run service");
187709676bcSMoti Haimovsky 			return ret;
188709676bcSMoti Haimovsky 		}
189709676bcSMoti Haimovsky 		priv->rxp.sstate = SS_READY;
190709676bcSMoti Haimovsky 		/* map the service with the service core */
191709676bcSMoti Haimovsky 		ret = rte_service_map_lcore_set(priv->rxp.sid,
192709676bcSMoti Haimovsky 						priv->rxp.scid, 1);
193709676bcSMoti Haimovsky 		if (ret) {
194709676bcSMoti Haimovsky 			ERROR("Failed to install Rx interrupts, "
195709676bcSMoti Haimovsky 			      "could not map service core");
196709676bcSMoti Haimovsky 			return ret;
197709676bcSMoti Haimovsky 		}
198709676bcSMoti Haimovsky 		priv->rxp.sstate = SS_RUNNING;
199709676bcSMoti Haimovsky 	}
200709676bcSMoti Haimovsky 	return 0;
201709676bcSMoti Haimovsky }
202709676bcSMoti Haimovsky 
203709676bcSMoti Haimovsky /**
204f234e5bdSMoti Haimovsky  * Install failsafe Rx event proxy subsystem.
205f234e5bdSMoti Haimovsky  * This is the way the failsafe PMD generates Rx events on behalf of its
206f234e5bdSMoti Haimovsky  * subdevices.
207f234e5bdSMoti Haimovsky  *
208f234e5bdSMoti Haimovsky  * @param priv
209f234e5bdSMoti Haimovsky  *   Pointer to failsafe private structure.
210f234e5bdSMoti Haimovsky  * @return
211f234e5bdSMoti Haimovsky  *   0 on success, negative errno value otherwise and rte_errno is set.
212f234e5bdSMoti Haimovsky  */
213f234e5bdSMoti Haimovsky static int
214f234e5bdSMoti Haimovsky fs_rx_event_proxy_install(struct fs_priv *priv)
215f234e5bdSMoti Haimovsky {
216f234e5bdSMoti Haimovsky 	int rc = 0;
217f234e5bdSMoti Haimovsky 
218f234e5bdSMoti Haimovsky 	/*
219f234e5bdSMoti Haimovsky 	 * Create the epoll fd and event vector for the proxy service to
220f234e5bdSMoti Haimovsky 	 * wait on for Rx events generated by the subdevices.
221f234e5bdSMoti Haimovsky 	 */
222f234e5bdSMoti Haimovsky 	priv->rxp.efd = fs_epoll_create1(0);
223f234e5bdSMoti Haimovsky 	if (priv->rxp.efd < 0) {
224f234e5bdSMoti Haimovsky 		rte_errno = errno;
225f234e5bdSMoti Haimovsky 		ERROR("Failed to create epoll,"
226f234e5bdSMoti Haimovsky 		      " Rx interrupts will not be supported");
227f234e5bdSMoti Haimovsky 		return -rte_errno;
228f234e5bdSMoti Haimovsky 	}
229f234e5bdSMoti Haimovsky 	priv->rxp.evec = calloc(NUM_RX_PROXIES, sizeof(*priv->rxp.evec));
230f234e5bdSMoti Haimovsky 	if (priv->rxp.evec == NULL) {
231f234e5bdSMoti Haimovsky 		ERROR("Failed to allocate memory for event vectors,"
232f234e5bdSMoti Haimovsky 		      " Rx interrupts will not be supported");
233f234e5bdSMoti Haimovsky 		rc = -ENOMEM;
234f234e5bdSMoti Haimovsky 		goto error;
235f234e5bdSMoti Haimovsky 	}
236709676bcSMoti Haimovsky 	rc = fs_rx_event_proxy_service_install(priv);
237709676bcSMoti Haimovsky 	if (rc < 0)
238709676bcSMoti Haimovsky 		goto error;
239f234e5bdSMoti Haimovsky 	return 0;
240f234e5bdSMoti Haimovsky error:
241f234e5bdSMoti Haimovsky 	if (priv->rxp.efd >= 0) {
242f234e5bdSMoti Haimovsky 		close(priv->rxp.efd);
243f234e5bdSMoti Haimovsky 		priv->rxp.efd = -1;
244f234e5bdSMoti Haimovsky 	}
245f234e5bdSMoti Haimovsky 	if (priv->rxp.evec != NULL) {
246f234e5bdSMoti Haimovsky 		free(priv->rxp.evec);
247f234e5bdSMoti Haimovsky 		priv->rxp.evec = NULL;
248f234e5bdSMoti Haimovsky 	}
249f234e5bdSMoti Haimovsky 	rte_errno = -rc;
250f234e5bdSMoti Haimovsky 	return rc;
251f234e5bdSMoti Haimovsky }
252f234e5bdSMoti Haimovsky 
253f234e5bdSMoti Haimovsky /**
254f234e5bdSMoti Haimovsky  * RX Interrupt control per subdevice.
255f234e5bdSMoti Haimovsky  *
256f234e5bdSMoti Haimovsky  * @param sdev
257f234e5bdSMoti Haimovsky  *   Pointer to sub-device structure.
258f234e5bdSMoti Haimovsky  * @param op
259f234e5bdSMoti Haimovsky  *   The operation be performed for the vector.
260f234e5bdSMoti Haimovsky  *   Operation type of {RTE_INTR_EVENT_ADD, RTE_INTR_EVENT_DEL}.
261f234e5bdSMoti Haimovsky  * @return
262f234e5bdSMoti Haimovsky  *   - On success, zero.
263f234e5bdSMoti Haimovsky  *   - On failure, a negative value.
264f234e5bdSMoti Haimovsky  */
265f234e5bdSMoti Haimovsky static int
266f234e5bdSMoti Haimovsky failsafe_eth_rx_intr_ctl_subdevice(struct sub_device *sdev, int op)
267f234e5bdSMoti Haimovsky {
268f234e5bdSMoti Haimovsky 	struct rte_eth_dev *dev;
269f234e5bdSMoti Haimovsky 	struct rte_eth_dev *fsdev;
270f234e5bdSMoti Haimovsky 	int epfd;
271f234e5bdSMoti Haimovsky 	uint16_t pid;
272f234e5bdSMoti Haimovsky 	uint16_t qid;
273f234e5bdSMoti Haimovsky 	struct rxq *fsrxq;
274f234e5bdSMoti Haimovsky 	int rc;
275f234e5bdSMoti Haimovsky 	int ret = 0;
276f234e5bdSMoti Haimovsky 
277fa7bb47aSRaslan Darawsheh 	fsdev = fs_dev(sdev);
278f234e5bdSMoti Haimovsky 	if (sdev == NULL || (ETH(sdev) == NULL) ||
279fa7bb47aSRaslan Darawsheh 		fsdev == NULL || (PRIV(fsdev) == NULL)) {
280f234e5bdSMoti Haimovsky 		ERROR("Called with invalid arguments");
281f234e5bdSMoti Haimovsky 		return -EINVAL;
282f234e5bdSMoti Haimovsky 	}
283f234e5bdSMoti Haimovsky 	dev = ETH(sdev);
284fa7bb47aSRaslan Darawsheh 	epfd = PRIV(fsdev)->rxp.efd;
285f234e5bdSMoti Haimovsky 	pid = PORT_ID(sdev);
286f234e5bdSMoti Haimovsky 
287f234e5bdSMoti Haimovsky 	if (epfd <= 0) {
288f234e5bdSMoti Haimovsky 		if (op == RTE_INTR_EVENT_ADD) {
289f234e5bdSMoti Haimovsky 			ERROR("Proxy events are not initialized");
290f234e5bdSMoti Haimovsky 			return -EBADF;
291f234e5bdSMoti Haimovsky 		} else {
292f234e5bdSMoti Haimovsky 			return 0;
293f234e5bdSMoti Haimovsky 		}
294f234e5bdSMoti Haimovsky 	}
295f234e5bdSMoti Haimovsky 	if (dev->data->nb_rx_queues > fsdev->data->nb_rx_queues) {
296f234e5bdSMoti Haimovsky 		ERROR("subdevice has too many queues,"
297f234e5bdSMoti Haimovsky 		      " Interrupts will not be enabled");
298f234e5bdSMoti Haimovsky 			return -E2BIG;
299f234e5bdSMoti Haimovsky 	}
300f234e5bdSMoti Haimovsky 	for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
301f234e5bdSMoti Haimovsky 		fsrxq = fsdev->data->rx_queues[qid];
302f234e5bdSMoti Haimovsky 		rc = rte_eth_dev_rx_intr_ctl_q(pid, qid, epfd,
303f234e5bdSMoti Haimovsky 					       op, (void *)fsrxq);
304f234e5bdSMoti Haimovsky 		if (rc) {
305f234e5bdSMoti Haimovsky 			ERROR("rte_eth_dev_rx_intr_ctl_q failed for "
306f234e5bdSMoti Haimovsky 			      "port %d  queue %d, epfd %d, error %d",
307f234e5bdSMoti Haimovsky 			      pid, qid, epfd, rc);
308f234e5bdSMoti Haimovsky 			ret = rc;
309f234e5bdSMoti Haimovsky 		}
310f234e5bdSMoti Haimovsky 	}
311f234e5bdSMoti Haimovsky 	return ret;
312f234e5bdSMoti Haimovsky }
313f234e5bdSMoti Haimovsky 
314f234e5bdSMoti Haimovsky /**
315f234e5bdSMoti Haimovsky  * Install Rx interrupts subsystem for a subdevice.
316f234e5bdSMoti Haimovsky  * This is a support for dynamically adding subdevices.
317f234e5bdSMoti Haimovsky  *
318f234e5bdSMoti Haimovsky  * @param sdev
319f234e5bdSMoti Haimovsky  *   Pointer to subdevice structure.
320f234e5bdSMoti Haimovsky  *
321f234e5bdSMoti Haimovsky  * @return
322f234e5bdSMoti Haimovsky  *   0 on success, negative errno value otherwise and rte_errno is set.
323f234e5bdSMoti Haimovsky  */
324f234e5bdSMoti Haimovsky int failsafe_rx_intr_install_subdevice(struct sub_device *sdev)
325f234e5bdSMoti Haimovsky {
326f234e5bdSMoti Haimovsky 	int rc;
327f234e5bdSMoti Haimovsky 	int qid;
328f234e5bdSMoti Haimovsky 	struct rte_eth_dev *fsdev;
329f234e5bdSMoti Haimovsky 	struct rxq **rxq;
330295968d1SFerruh Yigit 	const struct rte_eth_intr_conf *const intr_conf =
331f234e5bdSMoti Haimovsky 				&ETH(sdev)->data->dev_conf.intr_conf;
332f234e5bdSMoti Haimovsky 
333fa7bb47aSRaslan Darawsheh 	fsdev = fs_dev(sdev);
334f234e5bdSMoti Haimovsky 	rxq = (struct rxq **)fsdev->data->rx_queues;
335f234e5bdSMoti Haimovsky 	if (intr_conf->rxq == 0)
336f234e5bdSMoti Haimovsky 		return 0;
337f234e5bdSMoti Haimovsky 	rc = failsafe_eth_rx_intr_ctl_subdevice(sdev, RTE_INTR_EVENT_ADD);
338f234e5bdSMoti Haimovsky 	if (rc)
339f234e5bdSMoti Haimovsky 		return rc;
340f234e5bdSMoti Haimovsky 	/* enable interrupts on already-enabled queues */
341f234e5bdSMoti Haimovsky 	for (qid = 0; qid < ETH(sdev)->data->nb_rx_queues; qid++) {
342f234e5bdSMoti Haimovsky 		if (rxq[qid]->enable_events) {
343f234e5bdSMoti Haimovsky 			int ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev),
344f234e5bdSMoti Haimovsky 							     qid);
345f234e5bdSMoti Haimovsky 			if (ret && (ret != -ENOTSUP)) {
346f234e5bdSMoti Haimovsky 				ERROR("Failed to enable interrupts on "
347f234e5bdSMoti Haimovsky 				      "port %d queue %d", PORT_ID(sdev), qid);
348f234e5bdSMoti Haimovsky 				rc = ret;
349f234e5bdSMoti Haimovsky 			}
350f234e5bdSMoti Haimovsky 		}
351f234e5bdSMoti Haimovsky 	}
352f234e5bdSMoti Haimovsky 	return rc;
353f234e5bdSMoti Haimovsky }
354f234e5bdSMoti Haimovsky 
355f234e5bdSMoti Haimovsky /**
356f234e5bdSMoti Haimovsky  * Uninstall Rx interrupts subsystem for a subdevice.
357f234e5bdSMoti Haimovsky  * This is a support for dynamically removing subdevices.
358f234e5bdSMoti Haimovsky  *
359f234e5bdSMoti Haimovsky  * @param sdev
360f234e5bdSMoti Haimovsky  *   Pointer to subdevice structure.
361f234e5bdSMoti Haimovsky  *
362f234e5bdSMoti Haimovsky  * @return
363f234e5bdSMoti Haimovsky  *   0 on success, negative errno value otherwise and rte_errno is set.
364f234e5bdSMoti Haimovsky  */
365f234e5bdSMoti Haimovsky void failsafe_rx_intr_uninstall_subdevice(struct sub_device *sdev)
366f234e5bdSMoti Haimovsky {
367f234e5bdSMoti Haimovsky 	int qid;
368f234e5bdSMoti Haimovsky 	struct rte_eth_dev *fsdev;
369f234e5bdSMoti Haimovsky 	struct rxq *fsrxq;
370f234e5bdSMoti Haimovsky 
371fa7bb47aSRaslan Darawsheh 	fsdev = fs_dev(sdev);
372f234e5bdSMoti Haimovsky 	for (qid = 0; qid < ETH(sdev)->data->nb_rx_queues; qid++) {
373f234e5bdSMoti Haimovsky 		if (qid < fsdev->data->nb_rx_queues) {
374f234e5bdSMoti Haimovsky 			fsrxq = fsdev->data->rx_queues[qid];
375c942a182SIan Dolzhansky 			if (fsrxq != NULL && fsrxq->enable_events)
376f234e5bdSMoti Haimovsky 				rte_eth_dev_rx_intr_disable(PORT_ID(sdev),
377f234e5bdSMoti Haimovsky 							    qid);
378f234e5bdSMoti Haimovsky 		}
379f234e5bdSMoti Haimovsky 	}
380f234e5bdSMoti Haimovsky 	failsafe_eth_rx_intr_ctl_subdevice(sdev, RTE_INTR_EVENT_DEL);
381f234e5bdSMoti Haimovsky }
382f234e5bdSMoti Haimovsky 
383f234e5bdSMoti Haimovsky /**
384f234e5bdSMoti Haimovsky  * Uninstall failsafe Rx event proxy.
385f234e5bdSMoti Haimovsky  *
386f234e5bdSMoti Haimovsky  * @param priv
387f234e5bdSMoti Haimovsky  *   Pointer to failsafe private structure.
388f234e5bdSMoti Haimovsky  */
389f234e5bdSMoti Haimovsky static void
390f234e5bdSMoti Haimovsky fs_rx_event_proxy_uninstall(struct fs_priv *priv)
391f234e5bdSMoti Haimovsky {
392709676bcSMoti Haimovsky 	fs_rx_event_proxy_service_uninstall(priv);
393f234e5bdSMoti Haimovsky 	if (priv->rxp.evec != NULL) {
394f234e5bdSMoti Haimovsky 		free(priv->rxp.evec);
395f234e5bdSMoti Haimovsky 		priv->rxp.evec = NULL;
396f234e5bdSMoti Haimovsky 	}
397b9663f60SYunjian Wang 	if (priv->rxp.efd >= 0) {
398f234e5bdSMoti Haimovsky 		close(priv->rxp.efd);
399f234e5bdSMoti Haimovsky 		priv->rxp.efd = -1;
400f234e5bdSMoti Haimovsky 	}
401f234e5bdSMoti Haimovsky }
402f234e5bdSMoti Haimovsky 
4039e0360aeSMoti Haimovsky /**
4049e0360aeSMoti Haimovsky  * Uninstall failsafe interrupt vector.
4059e0360aeSMoti Haimovsky  *
4069e0360aeSMoti Haimovsky  * @param priv
4079e0360aeSMoti Haimovsky  *   Pointer to failsafe private structure.
4089e0360aeSMoti Haimovsky  */
4099e0360aeSMoti Haimovsky static void
4109e0360aeSMoti Haimovsky fs_rx_intr_vec_uninstall(struct fs_priv *priv)
4119e0360aeSMoti Haimovsky {
4129e0360aeSMoti Haimovsky 	struct rte_intr_handle *intr_handle;
4139e0360aeSMoti Haimovsky 
414d61138d4SHarman Kalra 	intr_handle = priv->intr_handle;
415d61138d4SHarman Kalra 	rte_intr_vec_list_free(intr_handle);
416d61138d4SHarman Kalra 
417d61138d4SHarman Kalra 	rte_intr_nb_efd_set(intr_handle, 0);
4189e0360aeSMoti Haimovsky }
4199e0360aeSMoti Haimovsky 
4209e0360aeSMoti Haimovsky /**
4219e0360aeSMoti Haimovsky  * Installs failsafe interrupt vector to be registered with EAL later on.
4229e0360aeSMoti Haimovsky  *
4239e0360aeSMoti Haimovsky  * @param priv
4249e0360aeSMoti Haimovsky  *   Pointer to failsafe private structure.
4259e0360aeSMoti Haimovsky  *
4269e0360aeSMoti Haimovsky  * @return
4279e0360aeSMoti Haimovsky  *   0 on success, negative errno value otherwise and rte_errno is set.
4289e0360aeSMoti Haimovsky  */
4299e0360aeSMoti Haimovsky static int
4309e0360aeSMoti Haimovsky fs_rx_intr_vec_install(struct fs_priv *priv)
4319e0360aeSMoti Haimovsky {
4329e0360aeSMoti Haimovsky 	unsigned int i;
4339e0360aeSMoti Haimovsky 	unsigned int rxqs_n;
4349e0360aeSMoti Haimovsky 	unsigned int n;
4359e0360aeSMoti Haimovsky 	unsigned int count;
4369e0360aeSMoti Haimovsky 	struct rte_intr_handle *intr_handle;
4379e0360aeSMoti Haimovsky 
43808647012SRaslan Darawsheh 	rxqs_n = priv->data->nb_rx_queues;
4399e0360aeSMoti Haimovsky 	n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
4409e0360aeSMoti Haimovsky 	count = 0;
441d61138d4SHarman Kalra 	intr_handle = priv->intr_handle;
4429e0360aeSMoti Haimovsky 	/* Allocate the interrupt vector of the failsafe Rx proxy interrupts */
443d61138d4SHarman Kalra 	if (rte_intr_vec_list_alloc(intr_handle, NULL, n)) {
4449e0360aeSMoti Haimovsky 		fs_rx_intr_vec_uninstall(priv);
4459e0360aeSMoti Haimovsky 		rte_errno = ENOMEM;
4469e0360aeSMoti Haimovsky 		ERROR("Failed to allocate memory for interrupt vector,"
4479e0360aeSMoti Haimovsky 		      " Rx interrupts will not be supported");
4489e0360aeSMoti Haimovsky 		return -rte_errno;
4499e0360aeSMoti Haimovsky 	}
4509e0360aeSMoti Haimovsky 	for (i = 0; i < n; i++) {
45108647012SRaslan Darawsheh 		struct rxq *rxq = priv->data->rx_queues[i];
4529e0360aeSMoti Haimovsky 
4539e0360aeSMoti Haimovsky 		/* Skip queues that cannot request interrupts. */
4549e0360aeSMoti Haimovsky 		if (rxq == NULL || rxq->event_fd < 0) {
4559e0360aeSMoti Haimovsky 			/* Use invalid intr_vec[] index to disable entry. */
456d61138d4SHarman Kalra 			if (rte_intr_vec_list_index_set(intr_handle, i,
457d61138d4SHarman Kalra 			RTE_INTR_VEC_RXTX_OFFSET + RTE_MAX_RXTX_INTR_VEC_ID))
458d61138d4SHarman Kalra 				return -rte_errno;
4599e0360aeSMoti Haimovsky 			continue;
4609e0360aeSMoti Haimovsky 		}
4619e0360aeSMoti Haimovsky 		if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
4629e0360aeSMoti Haimovsky 			rte_errno = E2BIG;
4639e0360aeSMoti Haimovsky 			ERROR("Too many Rx queues for interrupt vector size"
4649e0360aeSMoti Haimovsky 			      " (%d), Rx interrupts cannot be enabled",
4659e0360aeSMoti Haimovsky 			      RTE_MAX_RXTX_INTR_VEC_ID);
4669e0360aeSMoti Haimovsky 			fs_rx_intr_vec_uninstall(priv);
4679e0360aeSMoti Haimovsky 			return -rte_errno;
4689e0360aeSMoti Haimovsky 		}
469d61138d4SHarman Kalra 		if (rte_intr_vec_list_index_set(intr_handle, i,
470d61138d4SHarman Kalra 					RTE_INTR_VEC_RXTX_OFFSET + count))
471d61138d4SHarman Kalra 			return -rte_errno;
472d61138d4SHarman Kalra 
473d61138d4SHarman Kalra 		if (rte_intr_efds_index_set(intr_handle, count,
474d61138d4SHarman Kalra 						   rxq->event_fd))
475d61138d4SHarman Kalra 			return -rte_errno;
4769e0360aeSMoti Haimovsky 		count++;
4779e0360aeSMoti Haimovsky 	}
4789e0360aeSMoti Haimovsky 	if (count == 0) {
4799e0360aeSMoti Haimovsky 		fs_rx_intr_vec_uninstall(priv);
4809e0360aeSMoti Haimovsky 	} else {
481d61138d4SHarman Kalra 		if (rte_intr_nb_efd_set(intr_handle, count))
482d61138d4SHarman Kalra 			return -rte_errno;
483d61138d4SHarman Kalra 
484d61138d4SHarman Kalra 		if (rte_intr_efd_counter_size_set(intr_handle,
485d61138d4SHarman Kalra 				sizeof(uint64_t)))
486d61138d4SHarman Kalra 			return -rte_errno;
4879e0360aeSMoti Haimovsky 	}
4889e0360aeSMoti Haimovsky 	return 0;
4899e0360aeSMoti Haimovsky }
4909e0360aeSMoti Haimovsky 
4919e0360aeSMoti Haimovsky 
4929e0360aeSMoti Haimovsky /**
4939e0360aeSMoti Haimovsky  * Uninstall failsafe Rx interrupts subsystem.
4949e0360aeSMoti Haimovsky  *
4959e0360aeSMoti Haimovsky  * @param priv
4969e0360aeSMoti Haimovsky  *   Pointer to private structure.
4979e0360aeSMoti Haimovsky  *
4989e0360aeSMoti Haimovsky  * @return
4999e0360aeSMoti Haimovsky  *   0 on success, negative errno value otherwise and rte_errno is set.
5009e0360aeSMoti Haimovsky  */
5019e0360aeSMoti Haimovsky void
5029e0360aeSMoti Haimovsky failsafe_rx_intr_uninstall(struct rte_eth_dev *dev)
5039e0360aeSMoti Haimovsky {
504f234e5bdSMoti Haimovsky 	struct fs_priv *priv;
505f234e5bdSMoti Haimovsky 	struct rte_intr_handle *intr_handle;
506f234e5bdSMoti Haimovsky 
507f234e5bdSMoti Haimovsky 	priv = PRIV(dev);
508d61138d4SHarman Kalra 	intr_handle = priv->intr_handle;
509f234e5bdSMoti Haimovsky 	rte_intr_free_epoll_fd(intr_handle);
510f234e5bdSMoti Haimovsky 	fs_rx_event_proxy_uninstall(priv);
511f234e5bdSMoti Haimovsky 	fs_rx_intr_vec_uninstall(priv);
5129e0360aeSMoti Haimovsky 	dev->intr_handle = NULL;
5139e0360aeSMoti Haimovsky }
5149e0360aeSMoti Haimovsky 
5159e0360aeSMoti Haimovsky /**
5169e0360aeSMoti Haimovsky  * Install failsafe Rx interrupts subsystem.
5179e0360aeSMoti Haimovsky  *
5189e0360aeSMoti Haimovsky  * @param priv
5199e0360aeSMoti Haimovsky  *   Pointer to private structure.
5209e0360aeSMoti Haimovsky  *
5219e0360aeSMoti Haimovsky  * @return
5229e0360aeSMoti Haimovsky  *   0 on success, negative errno value otherwise and rte_errno is set.
5239e0360aeSMoti Haimovsky  */
5249e0360aeSMoti Haimovsky int
5259e0360aeSMoti Haimovsky failsafe_rx_intr_install(struct rte_eth_dev *dev)
5269e0360aeSMoti Haimovsky {
5279e0360aeSMoti Haimovsky 	struct fs_priv *priv = PRIV(dev);
528295968d1SFerruh Yigit 	const struct rte_eth_intr_conf *const intr_conf =
52908647012SRaslan Darawsheh 			&priv->data->dev_conf.intr_conf;
5309e0360aeSMoti Haimovsky 
53156b48dc5SMatan Azrad 	if (intr_conf->rxq == 0 || dev->intr_handle != NULL)
5329e0360aeSMoti Haimovsky 		return 0;
5339e0360aeSMoti Haimovsky 	if (fs_rx_intr_vec_install(priv) < 0)
5349e0360aeSMoti Haimovsky 		return -rte_errno;
535f234e5bdSMoti Haimovsky 	if (fs_rx_event_proxy_install(priv) < 0) {
536f234e5bdSMoti Haimovsky 		fs_rx_intr_vec_uninstall(priv);
537f234e5bdSMoti Haimovsky 		return -rte_errno;
538f234e5bdSMoti Haimovsky 	}
539d61138d4SHarman Kalra 	dev->intr_handle = priv->intr_handle;
5409e0360aeSMoti Haimovsky 	return 0;
5419e0360aeSMoti Haimovsky }
542