xref: /dpdk/drivers/net/failsafe/failsafe_ops.c (revision c7f5dba7d4bb7971fac51755aad09b71b10cef90)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2017 6WIND S.A.
3  * Copyright 2017 Mellanox Technologies, Ltd
4  */
5 
6 #include <stdbool.h>
7 #include <stdint.h>
8 #include <unistd.h>
9 
10 #include <rte_debug.h>
11 #include <rte_atomic.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_flow.h>
15 #include <rte_cycles.h>
16 #include <rte_ethdev.h>
17 
18 #include "failsafe_private.h"
19 
20 static struct rte_eth_dev_info default_infos = {
21 	/* Max possible number of elements */
22 	.max_rx_pktlen = UINT32_MAX,
23 	.max_rx_queues = RTE_MAX_QUEUES_PER_PORT,
24 	.max_tx_queues = RTE_MAX_QUEUES_PER_PORT,
25 	.max_mac_addrs = FAILSAFE_MAX_ETHADDR,
26 	.max_hash_mac_addrs = UINT32_MAX,
27 	.max_vfs = UINT16_MAX,
28 	.max_vmdq_pools = UINT16_MAX,
29 	.rx_desc_lim = {
30 		.nb_max = UINT16_MAX,
31 		.nb_min = 0,
32 		.nb_align = 1,
33 		.nb_seg_max = UINT16_MAX,
34 		.nb_mtu_seg_max = UINT16_MAX,
35 	},
36 	.tx_desc_lim = {
37 		.nb_max = UINT16_MAX,
38 		.nb_min = 0,
39 		.nb_align = 1,
40 		.nb_seg_max = UINT16_MAX,
41 		.nb_mtu_seg_max = UINT16_MAX,
42 	},
43 	/*
44 	 * Set of capabilities that can be verified upon
45 	 * configuring a sub-device.
46 	 */
47 	.rx_offload_capa =
48 		DEV_RX_OFFLOAD_VLAN_STRIP |
49 		DEV_RX_OFFLOAD_IPV4_CKSUM |
50 		DEV_RX_OFFLOAD_UDP_CKSUM |
51 		DEV_RX_OFFLOAD_TCP_CKSUM |
52 		DEV_RX_OFFLOAD_TCP_LRO |
53 		DEV_RX_OFFLOAD_QINQ_STRIP |
54 		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
55 		DEV_RX_OFFLOAD_MACSEC_STRIP |
56 		DEV_RX_OFFLOAD_HEADER_SPLIT |
57 		DEV_RX_OFFLOAD_VLAN_FILTER |
58 		DEV_RX_OFFLOAD_VLAN_EXTEND |
59 		DEV_RX_OFFLOAD_JUMBO_FRAME |
60 		DEV_RX_OFFLOAD_SCATTER |
61 		DEV_RX_OFFLOAD_TIMESTAMP |
62 		DEV_RX_OFFLOAD_SECURITY,
63 	.rx_queue_offload_capa =
64 		DEV_RX_OFFLOAD_VLAN_STRIP |
65 		DEV_RX_OFFLOAD_IPV4_CKSUM |
66 		DEV_RX_OFFLOAD_UDP_CKSUM |
67 		DEV_RX_OFFLOAD_TCP_CKSUM |
68 		DEV_RX_OFFLOAD_TCP_LRO |
69 		DEV_RX_OFFLOAD_QINQ_STRIP |
70 		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
71 		DEV_RX_OFFLOAD_MACSEC_STRIP |
72 		DEV_RX_OFFLOAD_HEADER_SPLIT |
73 		DEV_RX_OFFLOAD_VLAN_FILTER |
74 		DEV_RX_OFFLOAD_VLAN_EXTEND |
75 		DEV_RX_OFFLOAD_JUMBO_FRAME |
76 		DEV_RX_OFFLOAD_SCATTER |
77 		DEV_RX_OFFLOAD_TIMESTAMP |
78 		DEV_RX_OFFLOAD_SECURITY,
79 	.tx_offload_capa =
80 		DEV_TX_OFFLOAD_MULTI_SEGS |
81 		DEV_TX_OFFLOAD_IPV4_CKSUM |
82 		DEV_TX_OFFLOAD_UDP_CKSUM |
83 		DEV_TX_OFFLOAD_TCP_CKSUM |
84 		DEV_TX_OFFLOAD_TCP_TSO,
85 	.flow_type_rss_offloads =
86 			ETH_RSS_IP |
87 			ETH_RSS_UDP |
88 			ETH_RSS_TCP,
89 	.dev_capa =
90 		RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
91 		RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP,
92 };
93 
94 static int
95 fs_dev_configure(struct rte_eth_dev *dev)
96 {
97 	struct sub_device *sdev;
98 	uint8_t i;
99 	int ret;
100 
101 	fs_lock(dev, 0);
102 	FOREACH_SUBDEV(sdev, i, dev) {
103 		int rmv_interrupt = 0;
104 		int lsc_interrupt = 0;
105 		int lsc_enabled;
106 
107 		if (sdev->state != DEV_PROBED &&
108 		    !(PRIV(dev)->alarm_lock == 0 && sdev->state == DEV_ACTIVE))
109 			continue;
110 
111 		rmv_interrupt = ETH(sdev)->data->dev_flags &
112 				RTE_ETH_DEV_INTR_RMV;
113 		if (rmv_interrupt) {
114 			DEBUG("Enabling RMV interrupts for sub_device %d", i);
115 			dev->data->dev_conf.intr_conf.rmv = 1;
116 		} else {
117 			DEBUG("sub_device %d does not support RMV event", i);
118 		}
119 		lsc_enabled = dev->data->dev_conf.intr_conf.lsc;
120 		lsc_interrupt = lsc_enabled &&
121 				(ETH(sdev)->data->dev_flags &
122 				 RTE_ETH_DEV_INTR_LSC);
123 		if (lsc_interrupt) {
124 			DEBUG("Enabling LSC interrupts for sub_device %d", i);
125 			dev->data->dev_conf.intr_conf.lsc = 1;
126 		} else if (lsc_enabled && !lsc_interrupt) {
127 			DEBUG("Disabling LSC interrupts for sub_device %d", i);
128 			dev->data->dev_conf.intr_conf.lsc = 0;
129 		}
130 		DEBUG("Configuring sub-device %d", i);
131 		ret = rte_eth_dev_configure(PORT_ID(sdev),
132 					dev->data->nb_rx_queues,
133 					dev->data->nb_tx_queues,
134 					&dev->data->dev_conf);
135 		if (ret) {
136 			if (!fs_err(sdev, ret))
137 				continue;
138 			ERROR("Could not configure sub_device %d", i);
139 			fs_unlock(dev, 0);
140 			return ret;
141 		}
142 		if (rmv_interrupt && sdev->rmv_callback == 0) {
143 			ret = rte_eth_dev_callback_register(PORT_ID(sdev),
144 					RTE_ETH_EVENT_INTR_RMV,
145 					failsafe_eth_rmv_event_callback,
146 					sdev);
147 			if (ret)
148 				WARN("Failed to register RMV callback for sub_device %d",
149 				     SUB_ID(sdev));
150 			else
151 				sdev->rmv_callback = 1;
152 		}
153 		dev->data->dev_conf.intr_conf.rmv = 0;
154 		if (lsc_interrupt && sdev->lsc_callback == 0) {
155 			ret = rte_eth_dev_callback_register(PORT_ID(sdev),
156 						RTE_ETH_EVENT_INTR_LSC,
157 						failsafe_eth_lsc_event_callback,
158 						dev);
159 			if (ret)
160 				WARN("Failed to register LSC callback for sub_device %d",
161 				     SUB_ID(sdev));
162 			else
163 				sdev->lsc_callback = 1;
164 		}
165 		dev->data->dev_conf.intr_conf.lsc = lsc_enabled;
166 		sdev->state = DEV_ACTIVE;
167 	}
168 	if (PRIV(dev)->state < DEV_ACTIVE)
169 		PRIV(dev)->state = DEV_ACTIVE;
170 	fs_unlock(dev, 0);
171 	return 0;
172 }
173 
174 static void
175 fs_set_queues_state_start(struct rte_eth_dev *dev)
176 {
177 	struct rxq *rxq;
178 	struct txq *txq;
179 	uint16_t i;
180 
181 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
182 		rxq = dev->data->rx_queues[i];
183 		if (rxq != NULL && !rxq->info.conf.rx_deferred_start)
184 			dev->data->rx_queue_state[i] =
185 						RTE_ETH_QUEUE_STATE_STARTED;
186 	}
187 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
188 		txq = dev->data->tx_queues[i];
189 		if (txq != NULL && !txq->info.conf.tx_deferred_start)
190 			dev->data->tx_queue_state[i] =
191 						RTE_ETH_QUEUE_STATE_STARTED;
192 	}
193 }
194 
195 static int
196 fs_dev_start(struct rte_eth_dev *dev)
197 {
198 	struct sub_device *sdev;
199 	uint8_t i;
200 	int ret;
201 
202 	fs_lock(dev, 0);
203 	ret = failsafe_rx_intr_install(dev);
204 	if (ret) {
205 		fs_unlock(dev, 0);
206 		return ret;
207 	}
208 	FOREACH_SUBDEV(sdev, i, dev) {
209 		if (sdev->state != DEV_ACTIVE)
210 			continue;
211 		DEBUG("Starting sub_device %d", i);
212 		ret = rte_eth_dev_start(PORT_ID(sdev));
213 		if (ret) {
214 			if (!fs_err(sdev, ret))
215 				continue;
216 			fs_unlock(dev, 0);
217 			return ret;
218 		}
219 		ret = failsafe_rx_intr_install_subdevice(sdev);
220 		if (ret) {
221 			if (!fs_err(sdev, ret))
222 				continue;
223 			rte_eth_dev_stop(PORT_ID(sdev));
224 			fs_unlock(dev, 0);
225 			return ret;
226 		}
227 		sdev->state = DEV_STARTED;
228 	}
229 	if (PRIV(dev)->state < DEV_STARTED) {
230 		PRIV(dev)->state = DEV_STARTED;
231 		fs_set_queues_state_start(dev);
232 	}
233 	fs_switch_dev(dev, NULL);
234 	fs_unlock(dev, 0);
235 	return 0;
236 }
237 
238 static void
239 fs_set_queues_state_stop(struct rte_eth_dev *dev)
240 {
241 	uint16_t i;
242 
243 	for (i = 0; i < dev->data->nb_rx_queues; i++)
244 		if (dev->data->rx_queues[i] != NULL)
245 			dev->data->rx_queue_state[i] =
246 						RTE_ETH_QUEUE_STATE_STOPPED;
247 	for (i = 0; i < dev->data->nb_tx_queues; i++)
248 		if (dev->data->tx_queues[i] != NULL)
249 			dev->data->tx_queue_state[i] =
250 						RTE_ETH_QUEUE_STATE_STOPPED;
251 }
252 
253 static void
254 fs_dev_stop(struct rte_eth_dev *dev)
255 {
256 	struct sub_device *sdev;
257 	uint8_t i;
258 
259 	fs_lock(dev, 0);
260 	PRIV(dev)->state = DEV_STARTED - 1;
261 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) {
262 		rte_eth_dev_stop(PORT_ID(sdev));
263 		failsafe_rx_intr_uninstall_subdevice(sdev);
264 		sdev->state = DEV_STARTED - 1;
265 	}
266 	failsafe_rx_intr_uninstall(dev);
267 	fs_set_queues_state_stop(dev);
268 	fs_unlock(dev, 0);
269 }
270 
271 static int
272 fs_dev_set_link_up(struct rte_eth_dev *dev)
273 {
274 	struct sub_device *sdev;
275 	uint8_t i;
276 	int ret;
277 
278 	fs_lock(dev, 0);
279 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
280 		DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i);
281 		ret = rte_eth_dev_set_link_up(PORT_ID(sdev));
282 		if ((ret = fs_err(sdev, ret))) {
283 			ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d"
284 			      " with error %d", i, ret);
285 			fs_unlock(dev, 0);
286 			return ret;
287 		}
288 	}
289 	fs_unlock(dev, 0);
290 	return 0;
291 }
292 
293 static int
294 fs_dev_set_link_down(struct rte_eth_dev *dev)
295 {
296 	struct sub_device *sdev;
297 	uint8_t i;
298 	int ret;
299 
300 	fs_lock(dev, 0);
301 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
302 		DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i);
303 		ret = rte_eth_dev_set_link_down(PORT_ID(sdev));
304 		if ((ret = fs_err(sdev, ret))) {
305 			ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d"
306 			      " with error %d", i, ret);
307 			fs_unlock(dev, 0);
308 			return ret;
309 		}
310 	}
311 	fs_unlock(dev, 0);
312 	return 0;
313 }
314 
315 static void fs_dev_free_queues(struct rte_eth_dev *dev);
316 static void
317 fs_dev_close(struct rte_eth_dev *dev)
318 {
319 	struct sub_device *sdev;
320 	uint8_t i;
321 
322 	fs_lock(dev, 0);
323 	failsafe_hotplug_alarm_cancel(dev);
324 	if (PRIV(dev)->state == DEV_STARTED)
325 		dev->dev_ops->dev_stop(dev);
326 	PRIV(dev)->state = DEV_ACTIVE - 1;
327 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
328 		DEBUG("Closing sub_device %d", i);
329 		failsafe_eth_dev_unregister_callbacks(sdev);
330 		rte_eth_dev_close(PORT_ID(sdev));
331 		sdev->state = DEV_ACTIVE - 1;
332 	}
333 	fs_dev_free_queues(dev);
334 	fs_unlock(dev, 0);
335 }
336 
337 static int
338 fs_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
339 {
340 	struct sub_device *sdev;
341 	uint8_t i;
342 	int ret;
343 	int err = 0;
344 	bool failure = true;
345 
346 	fs_lock(dev, 0);
347 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
348 		uint16_t port_id = ETH(sdev)->data->port_id;
349 
350 		ret = rte_eth_dev_rx_queue_stop(port_id, rx_queue_id);
351 		ret = fs_err(sdev, ret);
352 		if (ret) {
353 			ERROR("Rx queue stop failed for subdevice %d", i);
354 			err = ret;
355 		} else {
356 			failure = false;
357 		}
358 	}
359 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
360 	fs_unlock(dev, 0);
361 	/* Return 0 in case of at least one successful queue stop */
362 	return (failure) ? err : 0;
363 }
364 
365 static int
366 fs_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
367 {
368 	struct sub_device *sdev;
369 	uint8_t i;
370 	int ret;
371 
372 	fs_lock(dev, 0);
373 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
374 		uint16_t port_id = ETH(sdev)->data->port_id;
375 
376 		ret = rte_eth_dev_rx_queue_start(port_id, rx_queue_id);
377 		ret = fs_err(sdev, ret);
378 		if (ret) {
379 			ERROR("Rx queue start failed for subdevice %d", i);
380 			fs_rx_queue_stop(dev, rx_queue_id);
381 			fs_unlock(dev, 0);
382 			return ret;
383 		}
384 	}
385 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
386 	fs_unlock(dev, 0);
387 	return 0;
388 }
389 
390 static int
391 fs_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
392 {
393 	struct sub_device *sdev;
394 	uint8_t i;
395 	int ret;
396 	int err = 0;
397 	bool failure = true;
398 
399 	fs_lock(dev, 0);
400 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
401 		uint16_t port_id = ETH(sdev)->data->port_id;
402 
403 		ret = rte_eth_dev_tx_queue_stop(port_id, tx_queue_id);
404 		ret = fs_err(sdev, ret);
405 		if (ret) {
406 			ERROR("Tx queue stop failed for subdevice %d", i);
407 			err = ret;
408 		} else {
409 			failure = false;
410 		}
411 	}
412 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
413 	fs_unlock(dev, 0);
414 	/* Return 0 in case of at least one successful queue stop */
415 	return (failure) ? err : 0;
416 }
417 
418 static int
419 fs_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
420 {
421 	struct sub_device *sdev;
422 	uint8_t i;
423 	int ret;
424 
425 	fs_lock(dev, 0);
426 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
427 		uint16_t port_id = ETH(sdev)->data->port_id;
428 
429 		ret = rte_eth_dev_tx_queue_start(port_id, tx_queue_id);
430 		ret = fs_err(sdev, ret);
431 		if (ret) {
432 			ERROR("Tx queue start failed for subdevice %d", i);
433 			fs_tx_queue_stop(dev, tx_queue_id);
434 			fs_unlock(dev, 0);
435 			return ret;
436 		}
437 	}
438 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
439 	fs_unlock(dev, 0);
440 	return 0;
441 }
442 
443 static void
444 fs_rx_queue_release(void *queue)
445 {
446 	struct rte_eth_dev *dev;
447 	struct sub_device *sdev;
448 	uint8_t i;
449 	struct rxq *rxq;
450 
451 	if (queue == NULL)
452 		return;
453 	rxq = queue;
454 	dev = rxq->priv->dev;
455 	fs_lock(dev, 0);
456 	if (rxq->event_fd > 0)
457 		close(rxq->event_fd);
458 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
459 		if (ETH(sdev)->data->rx_queues != NULL &&
460 		    ETH(sdev)->data->rx_queues[rxq->qid] != NULL) {
461 			SUBOPS(sdev, rx_queue_release)
462 				(ETH(sdev)->data->rx_queues[rxq->qid]);
463 		}
464 	}
465 	dev->data->rx_queues[rxq->qid] = NULL;
466 	rte_free(rxq);
467 	fs_unlock(dev, 0);
468 }
469 
470 static int
471 fs_rx_queue_setup(struct rte_eth_dev *dev,
472 		uint16_t rx_queue_id,
473 		uint16_t nb_rx_desc,
474 		unsigned int socket_id,
475 		const struct rte_eth_rxconf *rx_conf,
476 		struct rte_mempool *mb_pool)
477 {
478 	/*
479 	 * FIXME: Add a proper interface in rte_eal_interrupts for
480 	 * allocating eventfd as an interrupt vector.
481 	 * For the time being, fake as if we are using MSIX interrupts,
482 	 * this will cause rte_intr_efd_enable to allocate an eventfd for us.
483 	 */
484 	struct rte_intr_handle intr_handle = {
485 		.type = RTE_INTR_HANDLE_VFIO_MSIX,
486 		.efds = { -1, },
487 	};
488 	struct sub_device *sdev;
489 	struct rxq *rxq;
490 	uint8_t i;
491 	int ret;
492 
493 	fs_lock(dev, 0);
494 	if (rx_conf->rx_deferred_start) {
495 		FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
496 			if (SUBOPS(sdev, rx_queue_start) == NULL) {
497 				ERROR("Rx queue deferred start is not "
498 					"supported for subdevice %d", i);
499 				fs_unlock(dev, 0);
500 				return -EINVAL;
501 			}
502 		}
503 	}
504 	rxq = dev->data->rx_queues[rx_queue_id];
505 	if (rxq != NULL) {
506 		fs_rx_queue_release(rxq);
507 		dev->data->rx_queues[rx_queue_id] = NULL;
508 	}
509 	rxq = rte_zmalloc(NULL,
510 			  sizeof(*rxq) +
511 			  sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
512 			  RTE_CACHE_LINE_SIZE);
513 	if (rxq == NULL) {
514 		fs_unlock(dev, 0);
515 		return -ENOMEM;
516 	}
517 	FOREACH_SUBDEV(sdev, i, dev)
518 		rte_atomic64_init(&rxq->refcnt[i]);
519 	rxq->qid = rx_queue_id;
520 	rxq->socket_id = socket_id;
521 	rxq->info.mp = mb_pool;
522 	rxq->info.conf = *rx_conf;
523 	rxq->info.nb_desc = nb_rx_desc;
524 	rxq->priv = PRIV(dev);
525 	rxq->sdev = PRIV(dev)->subs;
526 	ret = rte_intr_efd_enable(&intr_handle, 1);
527 	if (ret < 0) {
528 		fs_unlock(dev, 0);
529 		return ret;
530 	}
531 	rxq->event_fd = intr_handle.efds[0];
532 	dev->data->rx_queues[rx_queue_id] = rxq;
533 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
534 		ret = rte_eth_rx_queue_setup(PORT_ID(sdev),
535 				rx_queue_id,
536 				nb_rx_desc, socket_id,
537 				rx_conf, mb_pool);
538 		if ((ret = fs_err(sdev, ret))) {
539 			ERROR("RX queue setup failed for sub_device %d", i);
540 			goto free_rxq;
541 		}
542 	}
543 	fs_unlock(dev, 0);
544 	return 0;
545 free_rxq:
546 	fs_rx_queue_release(rxq);
547 	fs_unlock(dev, 0);
548 	return ret;
549 }
550 
551 static int
552 fs_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
553 {
554 	struct rxq *rxq;
555 	struct sub_device *sdev;
556 	uint8_t i;
557 	int ret;
558 	int rc = 0;
559 
560 	fs_lock(dev, 0);
561 	if (idx >= dev->data->nb_rx_queues) {
562 		rc = -EINVAL;
563 		goto unlock;
564 	}
565 	rxq = dev->data->rx_queues[idx];
566 	if (rxq == NULL || rxq->event_fd <= 0) {
567 		rc = -EINVAL;
568 		goto unlock;
569 	}
570 	/* Fail if proxy service is nor running. */
571 	if (PRIV(dev)->rxp.sstate != SS_RUNNING) {
572 		ERROR("failsafe interrupt services are not running");
573 		rc = -EAGAIN;
574 		goto unlock;
575 	}
576 	rxq->enable_events = 1;
577 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
578 		ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev), idx);
579 		ret = fs_err(sdev, ret);
580 		if (ret)
581 			rc = ret;
582 	}
583 unlock:
584 	fs_unlock(dev, 0);
585 	if (rc)
586 		rte_errno = -rc;
587 	return rc;
588 }
589 
590 static int
591 fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
592 {
593 	struct rxq *rxq;
594 	struct sub_device *sdev;
595 	uint64_t u64;
596 	uint8_t i;
597 	int rc = 0;
598 	int ret;
599 
600 	fs_lock(dev, 0);
601 	if (idx >= dev->data->nb_rx_queues) {
602 		rc = -EINVAL;
603 		goto unlock;
604 	}
605 	rxq = dev->data->rx_queues[idx];
606 	if (rxq == NULL || rxq->event_fd <= 0) {
607 		rc = -EINVAL;
608 		goto unlock;
609 	}
610 	rxq->enable_events = 0;
611 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
612 		ret = rte_eth_dev_rx_intr_disable(PORT_ID(sdev), idx);
613 		ret = fs_err(sdev, ret);
614 		if (ret)
615 			rc = ret;
616 	}
617 	/* Clear pending events */
618 	while (read(rxq->event_fd, &u64, sizeof(uint64_t)) >  0)
619 		;
620 unlock:
621 	fs_unlock(dev, 0);
622 	if (rc)
623 		rte_errno = -rc;
624 	return rc;
625 }
626 
627 static void
628 fs_tx_queue_release(void *queue)
629 {
630 	struct rte_eth_dev *dev;
631 	struct sub_device *sdev;
632 	uint8_t i;
633 	struct txq *txq;
634 
635 	if (queue == NULL)
636 		return;
637 	txq = queue;
638 	dev = txq->priv->dev;
639 	fs_lock(dev, 0);
640 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
641 		if (ETH(sdev)->data->tx_queues != NULL &&
642 		    ETH(sdev)->data->tx_queues[txq->qid] != NULL) {
643 			SUBOPS(sdev, tx_queue_release)
644 				(ETH(sdev)->data->tx_queues[txq->qid]);
645 		}
646 	}
647 	dev->data->tx_queues[txq->qid] = NULL;
648 	rte_free(txq);
649 	fs_unlock(dev, 0);
650 }
651 
652 static int
653 fs_tx_queue_setup(struct rte_eth_dev *dev,
654 		uint16_t tx_queue_id,
655 		uint16_t nb_tx_desc,
656 		unsigned int socket_id,
657 		const struct rte_eth_txconf *tx_conf)
658 {
659 	struct sub_device *sdev;
660 	struct txq *txq;
661 	uint8_t i;
662 	int ret;
663 
664 	fs_lock(dev, 0);
665 	if (tx_conf->tx_deferred_start) {
666 		FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
667 			if (SUBOPS(sdev, tx_queue_start) == NULL) {
668 				ERROR("Tx queue deferred start is not "
669 					"supported for subdevice %d", i);
670 				fs_unlock(dev, 0);
671 				return -EINVAL;
672 			}
673 		}
674 	}
675 	txq = dev->data->tx_queues[tx_queue_id];
676 	if (txq != NULL) {
677 		fs_tx_queue_release(txq);
678 		dev->data->tx_queues[tx_queue_id] = NULL;
679 	}
680 	txq = rte_zmalloc("ethdev TX queue",
681 			  sizeof(*txq) +
682 			  sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
683 			  RTE_CACHE_LINE_SIZE);
684 	if (txq == NULL) {
685 		fs_unlock(dev, 0);
686 		return -ENOMEM;
687 	}
688 	FOREACH_SUBDEV(sdev, i, dev)
689 		rte_atomic64_init(&txq->refcnt[i]);
690 	txq->qid = tx_queue_id;
691 	txq->socket_id = socket_id;
692 	txq->info.conf = *tx_conf;
693 	txq->info.nb_desc = nb_tx_desc;
694 	txq->priv = PRIV(dev);
695 	dev->data->tx_queues[tx_queue_id] = txq;
696 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
697 		ret = rte_eth_tx_queue_setup(PORT_ID(sdev),
698 				tx_queue_id,
699 				nb_tx_desc, socket_id,
700 				tx_conf);
701 		if ((ret = fs_err(sdev, ret))) {
702 			ERROR("TX queue setup failed for sub_device %d", i);
703 			goto free_txq;
704 		}
705 	}
706 	fs_unlock(dev, 0);
707 	return 0;
708 free_txq:
709 	fs_tx_queue_release(txq);
710 	fs_unlock(dev, 0);
711 	return ret;
712 }
713 
714 static void
715 fs_dev_free_queues(struct rte_eth_dev *dev)
716 {
717 	uint16_t i;
718 
719 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
720 		fs_rx_queue_release(dev->data->rx_queues[i]);
721 		dev->data->rx_queues[i] = NULL;
722 	}
723 	dev->data->nb_rx_queues = 0;
724 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
725 		fs_tx_queue_release(dev->data->tx_queues[i]);
726 		dev->data->tx_queues[i] = NULL;
727 	}
728 	dev->data->nb_tx_queues = 0;
729 }
730 
731 static void
732 fs_promiscuous_enable(struct rte_eth_dev *dev)
733 {
734 	struct sub_device *sdev;
735 	uint8_t i;
736 
737 	fs_lock(dev, 0);
738 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
739 		rte_eth_promiscuous_enable(PORT_ID(sdev));
740 	fs_unlock(dev, 0);
741 }
742 
743 static void
744 fs_promiscuous_disable(struct rte_eth_dev *dev)
745 {
746 	struct sub_device *sdev;
747 	uint8_t i;
748 
749 	fs_lock(dev, 0);
750 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
751 		rte_eth_promiscuous_disable(PORT_ID(sdev));
752 	fs_unlock(dev, 0);
753 }
754 
755 static void
756 fs_allmulticast_enable(struct rte_eth_dev *dev)
757 {
758 	struct sub_device *sdev;
759 	uint8_t i;
760 
761 	fs_lock(dev, 0);
762 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
763 		rte_eth_allmulticast_enable(PORT_ID(sdev));
764 	fs_unlock(dev, 0);
765 }
766 
767 static void
768 fs_allmulticast_disable(struct rte_eth_dev *dev)
769 {
770 	struct sub_device *sdev;
771 	uint8_t i;
772 
773 	fs_lock(dev, 0);
774 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
775 		rte_eth_allmulticast_disable(PORT_ID(sdev));
776 	fs_unlock(dev, 0);
777 }
778 
779 static int
780 fs_link_update(struct rte_eth_dev *dev,
781 		int wait_to_complete)
782 {
783 	struct sub_device *sdev;
784 	uint8_t i;
785 	int ret;
786 
787 	fs_lock(dev, 0);
788 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
789 		DEBUG("Calling link_update on sub_device %d", i);
790 		ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete);
791 		if (ret && ret != -1 && sdev->remove == 0 &&
792 		    rte_eth_dev_is_removed(PORT_ID(sdev)) == 0) {
793 			ERROR("Link update failed for sub_device %d with error %d",
794 			      i, ret);
795 			fs_unlock(dev, 0);
796 			return ret;
797 		}
798 	}
799 	if (TX_SUBDEV(dev)) {
800 		struct rte_eth_link *l1;
801 		struct rte_eth_link *l2;
802 
803 		l1 = &dev->data->dev_link;
804 		l2 = &ETH(TX_SUBDEV(dev))->data->dev_link;
805 		if (memcmp(l1, l2, sizeof(*l1))) {
806 			*l1 = *l2;
807 			fs_unlock(dev, 0);
808 			return 0;
809 		}
810 	}
811 	fs_unlock(dev, 0);
812 	return -1;
813 }
814 
815 static int
816 fs_stats_get(struct rte_eth_dev *dev,
817 	     struct rte_eth_stats *stats)
818 {
819 	struct rte_eth_stats backup;
820 	struct sub_device *sdev;
821 	uint8_t i;
822 	int ret;
823 
824 	fs_lock(dev, 0);
825 	rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats));
826 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
827 		struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats;
828 		uint64_t *timestamp = &sdev->stats_snapshot.timestamp;
829 
830 		rte_memcpy(&backup, snapshot, sizeof(backup));
831 		ret = rte_eth_stats_get(PORT_ID(sdev), snapshot);
832 		if (ret) {
833 			if (!fs_err(sdev, ret)) {
834 				rte_memcpy(snapshot, &backup, sizeof(backup));
835 				goto inc;
836 			}
837 			ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d",
838 				  i, ret);
839 			*timestamp = 0;
840 			fs_unlock(dev, 0);
841 			return ret;
842 		}
843 		*timestamp = rte_rdtsc();
844 inc:
845 		failsafe_stats_increment(stats, snapshot);
846 	}
847 	fs_unlock(dev, 0);
848 	return 0;
849 }
850 
851 static void
852 fs_stats_reset(struct rte_eth_dev *dev)
853 {
854 	struct sub_device *sdev;
855 	uint8_t i;
856 
857 	fs_lock(dev, 0);
858 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
859 		rte_eth_stats_reset(PORT_ID(sdev));
860 		memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats));
861 	}
862 	memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats));
863 	fs_unlock(dev, 0);
864 }
865 
866 /**
867  * Fail-safe dev_infos_get rules:
868  *
869  * No sub_device:
870  *   Numerables:
871  *      Use the maximum possible values for any field, so as not
872  *      to impede any further configuration effort.
873  *   Capabilities:
874  *      Limits capabilities to those that are understood by the
875  *      fail-safe PMD. This understanding stems from the fail-safe
876  *      being capable of verifying that the related capability is
877  *      expressed within the device configuration (struct rte_eth_conf).
878  *
879  * At least one probed sub_device:
880  *   Numerables:
881  *      Uses values from the active probed sub_device
882  *      The rationale here is that if any sub_device is less capable
883  *      (for example concerning the number of queues) than the active
884  *      sub_device, then its subsequent configuration will fail.
885  *      It is impossible to foresee this failure when the failing sub_device
886  *      is supposed to be plugged-in later on, so the configuration process
887  *      is the single point of failure and error reporting.
888  *   Capabilities:
889  *      Uses a logical AND of RX capabilities among
890  *      all sub_devices and the default capabilities.
891  *      Uses a logical AND of TX capabilities among
892  *      the active probed sub_device and the default capabilities.
893  *
894  */
895 static void
896 fs_dev_infos_get(struct rte_eth_dev *dev,
897 		  struct rte_eth_dev_info *infos)
898 {
899 	struct sub_device *sdev;
900 	uint8_t i;
901 
902 	sdev = TX_SUBDEV(dev);
903 	if (sdev == NULL) {
904 		DEBUG("No probed device, using default infos");
905 		rte_memcpy(&PRIV(dev)->infos, &default_infos,
906 			   sizeof(default_infos));
907 	} else {
908 		uint64_t rx_offload_capa;
909 		uint64_t rxq_offload_capa;
910 		uint64_t rss_hf_offload_capa;
911 
912 		rx_offload_capa = default_infos.rx_offload_capa;
913 		rxq_offload_capa = default_infos.rx_queue_offload_capa;
914 		rss_hf_offload_capa = default_infos.flow_type_rss_offloads;
915 		FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
916 			rte_eth_dev_info_get(PORT_ID(sdev),
917 					&PRIV(dev)->infos);
918 			rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa;
919 			rxq_offload_capa &=
920 					PRIV(dev)->infos.rx_queue_offload_capa;
921 			rss_hf_offload_capa &=
922 					PRIV(dev)->infos.flow_type_rss_offloads;
923 		}
924 		sdev = TX_SUBDEV(dev);
925 		rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos);
926 		PRIV(dev)->infos.rx_offload_capa = rx_offload_capa;
927 		PRIV(dev)->infos.rx_queue_offload_capa = rxq_offload_capa;
928 		PRIV(dev)->infos.flow_type_rss_offloads = rss_hf_offload_capa;
929 		PRIV(dev)->infos.tx_offload_capa &=
930 					default_infos.tx_offload_capa;
931 		PRIV(dev)->infos.tx_queue_offload_capa &=
932 					default_infos.tx_queue_offload_capa;
933 	}
934 	rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos));
935 }
936 
937 static const uint32_t *
938 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev)
939 {
940 	struct sub_device *sdev;
941 	struct rte_eth_dev *edev;
942 	const uint32_t *ret;
943 
944 	fs_lock(dev, 0);
945 	sdev = TX_SUBDEV(dev);
946 	if (sdev == NULL) {
947 		ret = NULL;
948 		goto unlock;
949 	}
950 	edev = ETH(sdev);
951 	/* ENOTSUP: counts as no supported ptypes */
952 	if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL) {
953 		ret = NULL;
954 		goto unlock;
955 	}
956 	/*
957 	 * The API does not permit to do a clean AND of all ptypes,
958 	 * It is also incomplete by design and we do not really care
959 	 * to have a best possible value in this context.
960 	 * We just return the ptypes of the device of highest
961 	 * priority, usually the PREFERRED device.
962 	 */
963 	ret = SUBOPS(sdev, dev_supported_ptypes_get)(edev);
964 unlock:
965 	fs_unlock(dev, 0);
966 	return ret;
967 }
968 
969 static int
970 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
971 {
972 	struct sub_device *sdev;
973 	uint8_t i;
974 	int ret;
975 
976 	fs_lock(dev, 0);
977 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
978 		DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i);
979 		ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu);
980 		if ((ret = fs_err(sdev, ret))) {
981 			ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d",
982 			      i, ret);
983 			fs_unlock(dev, 0);
984 			return ret;
985 		}
986 	}
987 	fs_unlock(dev, 0);
988 	return 0;
989 }
990 
991 static int
992 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
993 {
994 	struct sub_device *sdev;
995 	uint8_t i;
996 	int ret;
997 
998 	fs_lock(dev, 0);
999 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1000 		DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i);
1001 		ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on);
1002 		if ((ret = fs_err(sdev, ret))) {
1003 			ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d"
1004 			      " with error %d", i, ret);
1005 			fs_unlock(dev, 0);
1006 			return ret;
1007 		}
1008 	}
1009 	fs_unlock(dev, 0);
1010 	return 0;
1011 }
1012 
1013 static int
1014 fs_flow_ctrl_get(struct rte_eth_dev *dev,
1015 		struct rte_eth_fc_conf *fc_conf)
1016 {
1017 	struct sub_device *sdev;
1018 	int ret;
1019 
1020 	fs_lock(dev, 0);
1021 	sdev = TX_SUBDEV(dev);
1022 	if (sdev == NULL) {
1023 		ret = 0;
1024 		goto unlock;
1025 	}
1026 	if (SUBOPS(sdev, flow_ctrl_get) == NULL) {
1027 		ret = -ENOTSUP;
1028 		goto unlock;
1029 	}
1030 	ret = SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf);
1031 unlock:
1032 	fs_unlock(dev, 0);
1033 	return ret;
1034 }
1035 
1036 static int
1037 fs_flow_ctrl_set(struct rte_eth_dev *dev,
1038 		struct rte_eth_fc_conf *fc_conf)
1039 {
1040 	struct sub_device *sdev;
1041 	uint8_t i;
1042 	int ret;
1043 
1044 	fs_lock(dev, 0);
1045 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1046 		DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i);
1047 		ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf);
1048 		if ((ret = fs_err(sdev, ret))) {
1049 			ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d"
1050 			      " with error %d", i, ret);
1051 			fs_unlock(dev, 0);
1052 			return ret;
1053 		}
1054 	}
1055 	fs_unlock(dev, 0);
1056 	return 0;
1057 }
1058 
1059 static void
1060 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
1061 {
1062 	struct sub_device *sdev;
1063 	uint8_t i;
1064 
1065 	fs_lock(dev, 0);
1066 	/* No check: already done within the rte_eth_dev_mac_addr_remove
1067 	 * call for the fail-safe device.
1068 	 */
1069 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
1070 		rte_eth_dev_mac_addr_remove(PORT_ID(sdev),
1071 				&dev->data->mac_addrs[index]);
1072 	PRIV(dev)->mac_addr_pool[index] = 0;
1073 	fs_unlock(dev, 0);
1074 }
1075 
1076 static int
1077 fs_mac_addr_add(struct rte_eth_dev *dev,
1078 		struct ether_addr *mac_addr,
1079 		uint32_t index,
1080 		uint32_t vmdq)
1081 {
1082 	struct sub_device *sdev;
1083 	int ret;
1084 	uint8_t i;
1085 
1086 	RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR);
1087 	fs_lock(dev, 0);
1088 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1089 		ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq);
1090 		if ((ret = fs_err(sdev, ret))) {
1091 			ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %"
1092 			      PRIu8 " with error %d", i, ret);
1093 			fs_unlock(dev, 0);
1094 			return ret;
1095 		}
1096 	}
1097 	if (index >= PRIV(dev)->nb_mac_addr) {
1098 		DEBUG("Growing mac_addrs array");
1099 		PRIV(dev)->nb_mac_addr = index;
1100 	}
1101 	PRIV(dev)->mac_addr_pool[index] = vmdq;
1102 	fs_unlock(dev, 0);
1103 	return 0;
1104 }
1105 
1106 static int
1107 fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
1108 {
1109 	struct sub_device *sdev;
1110 	uint8_t i;
1111 	int ret;
1112 
1113 	fs_lock(dev, 0);
1114 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1115 		ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr);
1116 		ret = fs_err(sdev, ret);
1117 		if (ret) {
1118 			ERROR("Operation rte_eth_dev_mac_addr_set failed for sub_device %d with error %d",
1119 				i, ret);
1120 			fs_unlock(dev, 0);
1121 			return ret;
1122 		}
1123 	}
1124 	fs_unlock(dev, 0);
1125 
1126 	return 0;
1127 }
1128 
1129 static int
1130 fs_set_mc_addr_list(struct rte_eth_dev *dev,
1131 		    struct ether_addr *mc_addr_set, uint32_t nb_mc_addr)
1132 {
1133 	struct sub_device *sdev;
1134 	uint8_t i;
1135 	int ret;
1136 	void *mcast_addrs;
1137 
1138 	fs_lock(dev, 0);
1139 
1140 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1141 		ret = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev),
1142 						   mc_addr_set, nb_mc_addr);
1143 		if (ret != 0) {
1144 			ERROR("Operation rte_eth_dev_set_mc_addr_list failed for sub_device %d with error %d",
1145 			      i, ret);
1146 			goto rollback;
1147 		}
1148 	}
1149 
1150 	mcast_addrs = rte_realloc(PRIV(dev)->mcast_addrs,
1151 		nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0]), 0);
1152 	if (mcast_addrs == NULL && nb_mc_addr > 0) {
1153 		ret = -ENOMEM;
1154 		goto rollback;
1155 	}
1156 	rte_memcpy(mcast_addrs, mc_addr_set,
1157 		   nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0]));
1158 	PRIV(dev)->nb_mcast_addr = nb_mc_addr;
1159 	PRIV(dev)->mcast_addrs = mcast_addrs;
1160 
1161 	fs_unlock(dev, 0);
1162 	return 0;
1163 
1164 rollback:
1165 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1166 		int rc = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev),
1167 			PRIV(dev)->mcast_addrs,	PRIV(dev)->nb_mcast_addr);
1168 		if (rc != 0) {
1169 			ERROR("Multicast MAC address list rollback for sub_device %d failed with error %d",
1170 			      i, rc);
1171 		}
1172 	}
1173 
1174 	fs_unlock(dev, 0);
1175 	return ret;
1176 }
1177 
1178 static int
1179 fs_rss_hash_update(struct rte_eth_dev *dev,
1180 			struct rte_eth_rss_conf *rss_conf)
1181 {
1182 	struct sub_device *sdev;
1183 	uint8_t i;
1184 	int ret;
1185 
1186 	fs_lock(dev, 0);
1187 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1188 		ret = rte_eth_dev_rss_hash_update(PORT_ID(sdev), rss_conf);
1189 		ret = fs_err(sdev, ret);
1190 		if (ret) {
1191 			ERROR("Operation rte_eth_dev_rss_hash_update"
1192 				" failed for sub_device %d with error %d",
1193 				i, ret);
1194 			fs_unlock(dev, 0);
1195 			return ret;
1196 		}
1197 	}
1198 	fs_unlock(dev, 0);
1199 
1200 	return 0;
1201 }
1202 
1203 static int
1204 fs_filter_ctrl(struct rte_eth_dev *dev,
1205 		enum rte_filter_type type,
1206 		enum rte_filter_op op,
1207 		void *arg)
1208 {
1209 	struct sub_device *sdev;
1210 	uint8_t i;
1211 	int ret;
1212 
1213 	if (type == RTE_ETH_FILTER_GENERIC &&
1214 	    op == RTE_ETH_FILTER_GET) {
1215 		*(const void **)arg = &fs_flow_ops;
1216 		return 0;
1217 	}
1218 	fs_lock(dev, 0);
1219 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1220 		DEBUG("Calling rte_eth_dev_filter_ctrl on sub_device %d", i);
1221 		ret = rte_eth_dev_filter_ctrl(PORT_ID(sdev), type, op, arg);
1222 		if ((ret = fs_err(sdev, ret))) {
1223 			ERROR("Operation rte_eth_dev_filter_ctrl failed for sub_device %d"
1224 			      " with error %d", i, ret);
1225 			fs_unlock(dev, 0);
1226 			return ret;
1227 		}
1228 	}
1229 	fs_unlock(dev, 0);
1230 	return 0;
1231 }
1232 
1233 const struct eth_dev_ops failsafe_ops = {
1234 	.dev_configure = fs_dev_configure,
1235 	.dev_start = fs_dev_start,
1236 	.dev_stop = fs_dev_stop,
1237 	.dev_set_link_down = fs_dev_set_link_down,
1238 	.dev_set_link_up = fs_dev_set_link_up,
1239 	.dev_close = fs_dev_close,
1240 	.promiscuous_enable = fs_promiscuous_enable,
1241 	.promiscuous_disable = fs_promiscuous_disable,
1242 	.allmulticast_enable = fs_allmulticast_enable,
1243 	.allmulticast_disable = fs_allmulticast_disable,
1244 	.link_update = fs_link_update,
1245 	.stats_get = fs_stats_get,
1246 	.stats_reset = fs_stats_reset,
1247 	.dev_infos_get = fs_dev_infos_get,
1248 	.dev_supported_ptypes_get = fs_dev_supported_ptypes_get,
1249 	.mtu_set = fs_mtu_set,
1250 	.vlan_filter_set = fs_vlan_filter_set,
1251 	.rx_queue_start = fs_rx_queue_start,
1252 	.rx_queue_stop = fs_rx_queue_stop,
1253 	.tx_queue_start = fs_tx_queue_start,
1254 	.tx_queue_stop = fs_tx_queue_stop,
1255 	.rx_queue_setup = fs_rx_queue_setup,
1256 	.tx_queue_setup = fs_tx_queue_setup,
1257 	.rx_queue_release = fs_rx_queue_release,
1258 	.tx_queue_release = fs_tx_queue_release,
1259 	.rx_queue_intr_enable = fs_rx_intr_enable,
1260 	.rx_queue_intr_disable = fs_rx_intr_disable,
1261 	.flow_ctrl_get = fs_flow_ctrl_get,
1262 	.flow_ctrl_set = fs_flow_ctrl_set,
1263 	.mac_addr_remove = fs_mac_addr_remove,
1264 	.mac_addr_add = fs_mac_addr_add,
1265 	.mac_addr_set = fs_mac_addr_set,
1266 	.set_mc_addr_list = fs_set_mc_addr_list,
1267 	.rss_hash_update = fs_rss_hash_update,
1268 	.filter_ctrl = fs_filter_ctrl,
1269 };
1270