xref: /dpdk/drivers/net/failsafe/failsafe_ops.c (revision 5a4806d304e084573eb2193341add736ef0af50f)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2017 6WIND S.A.
5  *   Copyright 2017 Mellanox.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of 6WIND S.A. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdint.h>
35 
36 #include <rte_debug.h>
37 #include <rte_atomic.h>
38 #include <rte_ethdev.h>
39 #include <rte_malloc.h>
40 #include <rte_flow.h>
41 
42 #include "failsafe_private.h"
43 
44 static struct rte_eth_dev_info default_infos = {
45 	/* Max possible number of elements */
46 	.max_rx_pktlen = UINT32_MAX,
47 	.max_rx_queues = RTE_MAX_QUEUES_PER_PORT,
48 	.max_tx_queues = RTE_MAX_QUEUES_PER_PORT,
49 	.max_mac_addrs = FAILSAFE_MAX_ETHADDR,
50 	.max_hash_mac_addrs = UINT32_MAX,
51 	.max_vfs = UINT16_MAX,
52 	.max_vmdq_pools = UINT16_MAX,
53 	.rx_desc_lim = {
54 		.nb_max = UINT16_MAX,
55 		.nb_min = 0,
56 		.nb_align = 1,
57 		.nb_seg_max = UINT16_MAX,
58 		.nb_mtu_seg_max = UINT16_MAX,
59 	},
60 	.tx_desc_lim = {
61 		.nb_max = UINT16_MAX,
62 		.nb_min = 0,
63 		.nb_align = 1,
64 		.nb_seg_max = UINT16_MAX,
65 		.nb_mtu_seg_max = UINT16_MAX,
66 	},
67 	/*
68 	 * Set of capabilities that can be verified upon
69 	 * configuring a sub-device.
70 	 */
71 	.rx_offload_capa =
72 		DEV_RX_OFFLOAD_VLAN_STRIP |
73 		DEV_RX_OFFLOAD_QINQ_STRIP |
74 		DEV_RX_OFFLOAD_IPV4_CKSUM |
75 		DEV_RX_OFFLOAD_UDP_CKSUM |
76 		DEV_RX_OFFLOAD_TCP_CKSUM |
77 		DEV_RX_OFFLOAD_TCP_LRO,
78 	.tx_offload_capa = 0x0,
79 	.flow_type_rss_offloads = 0x0,
80 };
81 
82 /**
83  * Check whether a specific offloading capability
84  * is supported by a sub_device.
85  *
86  * @return
87  *   0: all requested capabilities are supported by the sub_device
88  *   positive value: This flag at least is not supported by the sub_device
89  */
90 static int
91 fs_port_offload_validate(struct rte_eth_dev *dev,
92 			 struct sub_device *sdev)
93 {
94 	struct rte_eth_dev_info infos = {0};
95 	struct rte_eth_conf *cf;
96 	uint32_t cap;
97 
98 	cf = &dev->data->dev_conf;
99 	SUBOPS(sdev, dev_infos_get)(ETH(sdev), &infos);
100 	/* RX capabilities */
101 	cap = infos.rx_offload_capa;
102 	if (cf->rxmode.hw_vlan_strip &&
103 	    ((cap & DEV_RX_OFFLOAD_VLAN_STRIP) == 0)) {
104 		WARN("VLAN stripping offload requested but not supported by sub_device %d",
105 		      SUB_ID(sdev));
106 		return DEV_RX_OFFLOAD_VLAN_STRIP;
107 	}
108 	if (cf->rxmode.hw_ip_checksum &&
109 	    ((cap & (DEV_RX_OFFLOAD_IPV4_CKSUM |
110 		     DEV_RX_OFFLOAD_UDP_CKSUM |
111 		     DEV_RX_OFFLOAD_TCP_CKSUM)) !=
112 	     (DEV_RX_OFFLOAD_IPV4_CKSUM |
113 	      DEV_RX_OFFLOAD_UDP_CKSUM |
114 	      DEV_RX_OFFLOAD_TCP_CKSUM))) {
115 		WARN("IP checksum offload requested but not supported by sub_device %d",
116 		      SUB_ID(sdev));
117 		return DEV_RX_OFFLOAD_IPV4_CKSUM |
118 		       DEV_RX_OFFLOAD_UDP_CKSUM |
119 		       DEV_RX_OFFLOAD_TCP_CKSUM;
120 	}
121 	if (cf->rxmode.enable_lro &&
122 	    ((cap & DEV_RX_OFFLOAD_TCP_LRO) == 0)) {
123 		WARN("TCP LRO offload requested but not supported by sub_device %d",
124 		      SUB_ID(sdev));
125 		return DEV_RX_OFFLOAD_TCP_LRO;
126 	}
127 	if (cf->rxmode.hw_vlan_extend &&
128 	    ((cap & DEV_RX_OFFLOAD_QINQ_STRIP) == 0)) {
129 		WARN("Stacked VLAN stripping offload requested but not supported by sub_device %d",
130 		      SUB_ID(sdev));
131 		return DEV_RX_OFFLOAD_QINQ_STRIP;
132 	}
133 	/* TX capabilities */
134 	/* Nothing to do, no tx capa supported */
135 	return 0;
136 }
137 
138 /*
139  * Disable the dev_conf flag related to an offload capability flag
140  * within an ethdev configuration.
141  */
142 static int
143 fs_port_disable_offload(struct rte_eth_conf *cf,
144 			uint32_t ol_cap)
145 {
146 	switch (ol_cap) {
147 	case DEV_RX_OFFLOAD_VLAN_STRIP:
148 		INFO("Disabling VLAN stripping offload");
149 		cf->rxmode.hw_vlan_strip = 0;
150 		break;
151 	case DEV_RX_OFFLOAD_IPV4_CKSUM:
152 	case DEV_RX_OFFLOAD_UDP_CKSUM:
153 	case DEV_RX_OFFLOAD_TCP_CKSUM:
154 	case (DEV_RX_OFFLOAD_IPV4_CKSUM |
155 	      DEV_RX_OFFLOAD_UDP_CKSUM |
156 	      DEV_RX_OFFLOAD_TCP_CKSUM):
157 		INFO("Disabling IP checksum offload");
158 		cf->rxmode.hw_ip_checksum = 0;
159 		break;
160 	case DEV_RX_OFFLOAD_TCP_LRO:
161 		INFO("Disabling TCP LRO offload");
162 		cf->rxmode.enable_lro = 0;
163 		break;
164 	case DEV_RX_OFFLOAD_QINQ_STRIP:
165 		INFO("Disabling stacked VLAN stripping offload");
166 		cf->rxmode.hw_vlan_extend = 0;
167 		break;
168 	default:
169 		DEBUG("Unable to disable offload capability: %" PRIx32,
170 		      ol_cap);
171 		return -1;
172 	}
173 	return 0;
174 }
175 
176 static int
177 fs_dev_configure(struct rte_eth_dev *dev)
178 {
179 	struct sub_device *sdev;
180 	uint8_t i;
181 	int capa_flag;
182 	int ret;
183 
184 	FOREACH_SUBDEV(sdev, i, dev) {
185 		if (sdev->state != DEV_PROBED)
186 			continue;
187 		DEBUG("Checking capabilities for sub_device %d", i);
188 		while ((capa_flag = fs_port_offload_validate(dev, sdev))) {
189 			/*
190 			 * Refuse to change configuration if multiple devices
191 			 * are present and we already have configured at least
192 			 * some of them.
193 			 */
194 			if (PRIV(dev)->state >= DEV_ACTIVE &&
195 			    PRIV(dev)->subs_tail > 1) {
196 				ERROR("device already configured, cannot fix live configuration");
197 				return -1;
198 			}
199 			ret = fs_port_disable_offload(&dev->data->dev_conf,
200 						      capa_flag);
201 			if (ret) {
202 				ERROR("Unable to disable offload capability");
203 				return ret;
204 			}
205 		}
206 	}
207 	FOREACH_SUBDEV(sdev, i, dev) {
208 		int rmv_interrupt = 0;
209 		int lsc_interrupt = 0;
210 		int lsc_enabled;
211 
212 		if (sdev->state != DEV_PROBED)
213 			continue;
214 
215 		rmv_interrupt = ETH(sdev)->data->dev_flags &
216 				RTE_ETH_DEV_INTR_RMV;
217 		if (rmv_interrupt) {
218 			DEBUG("Enabling RMV interrupts for sub_device %d", i);
219 			dev->data->dev_conf.intr_conf.rmv = 1;
220 		} else {
221 			DEBUG("sub_device %d does not support RMV event", i);
222 		}
223 		lsc_enabled = dev->data->dev_conf.intr_conf.lsc;
224 		lsc_interrupt = lsc_enabled &&
225 				(ETH(sdev)->data->dev_flags &
226 				 RTE_ETH_DEV_INTR_LSC);
227 		if (lsc_interrupt) {
228 			DEBUG("Enabling LSC interrupts for sub_device %d", i);
229 			dev->data->dev_conf.intr_conf.lsc = 1;
230 		} else if (lsc_enabled && !lsc_interrupt) {
231 			DEBUG("Disabling LSC interrupts for sub_device %d", i);
232 			dev->data->dev_conf.intr_conf.lsc = 0;
233 		}
234 		DEBUG("Configuring sub-device %d", i);
235 		sdev->remove = 0;
236 		ret = rte_eth_dev_configure(PORT_ID(sdev),
237 					dev->data->nb_rx_queues,
238 					dev->data->nb_tx_queues,
239 					&dev->data->dev_conf);
240 		if (ret) {
241 			ERROR("Could not configure sub_device %d", i);
242 			return ret;
243 		}
244 		if (rmv_interrupt) {
245 			ret = rte_eth_dev_callback_register(PORT_ID(sdev),
246 					RTE_ETH_EVENT_INTR_RMV,
247 					failsafe_eth_rmv_event_callback,
248 					sdev);
249 			if (ret)
250 				WARN("Failed to register RMV callback for sub_device %d",
251 				     SUB_ID(sdev));
252 		}
253 		dev->data->dev_conf.intr_conf.rmv = 0;
254 		if (lsc_interrupt) {
255 			ret = rte_eth_dev_callback_register(PORT_ID(sdev),
256 						RTE_ETH_EVENT_INTR_LSC,
257 						failsafe_eth_lsc_event_callback,
258 						dev);
259 			if (ret)
260 				WARN("Failed to register LSC callback for sub_device %d",
261 				     SUB_ID(sdev));
262 		}
263 		dev->data->dev_conf.intr_conf.lsc = lsc_enabled;
264 		sdev->state = DEV_ACTIVE;
265 	}
266 	if (PRIV(dev)->state < DEV_ACTIVE)
267 		PRIV(dev)->state = DEV_ACTIVE;
268 	return 0;
269 }
270 
271 static int
272 fs_dev_start(struct rte_eth_dev *dev)
273 {
274 	struct sub_device *sdev;
275 	uint8_t i;
276 	int ret;
277 
278 	FOREACH_SUBDEV(sdev, i, dev) {
279 		if (sdev->state != DEV_ACTIVE)
280 			continue;
281 		DEBUG("Starting sub_device %d", i);
282 		ret = rte_eth_dev_start(PORT_ID(sdev));
283 		if (ret)
284 			return ret;
285 		sdev->state = DEV_STARTED;
286 	}
287 	if (PRIV(dev)->state < DEV_STARTED)
288 		PRIV(dev)->state = DEV_STARTED;
289 	fs_switch_dev(dev, NULL);
290 	return 0;
291 }
292 
293 static void
294 fs_dev_stop(struct rte_eth_dev *dev)
295 {
296 	struct sub_device *sdev;
297 	uint8_t i;
298 
299 	PRIV(dev)->state = DEV_STARTED - 1;
300 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) {
301 		rte_eth_dev_stop(PORT_ID(sdev));
302 		sdev->state = DEV_STARTED - 1;
303 	}
304 }
305 
306 static int
307 fs_dev_set_link_up(struct rte_eth_dev *dev)
308 {
309 	struct sub_device *sdev;
310 	uint8_t i;
311 	int ret;
312 
313 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
314 		DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i);
315 		ret = rte_eth_dev_set_link_up(PORT_ID(sdev));
316 		if (ret) {
317 			ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d"
318 			      " with error %d", i, ret);
319 			return ret;
320 		}
321 	}
322 	return 0;
323 }
324 
325 static int
326 fs_dev_set_link_down(struct rte_eth_dev *dev)
327 {
328 	struct sub_device *sdev;
329 	uint8_t i;
330 	int ret;
331 
332 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
333 		DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i);
334 		ret = rte_eth_dev_set_link_down(PORT_ID(sdev));
335 		if (ret) {
336 			ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d"
337 			      " with error %d", i, ret);
338 			return ret;
339 		}
340 	}
341 	return 0;
342 }
343 
344 static void fs_dev_free_queues(struct rte_eth_dev *dev);
345 static void
346 fs_dev_close(struct rte_eth_dev *dev)
347 {
348 	struct sub_device *sdev;
349 	uint8_t i;
350 
351 	failsafe_hotplug_alarm_cancel(dev);
352 	if (PRIV(dev)->state == DEV_STARTED)
353 		dev->dev_ops->dev_stop(dev);
354 	PRIV(dev)->state = DEV_ACTIVE - 1;
355 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
356 		DEBUG("Closing sub_device %d", i);
357 		rte_eth_dev_close(PORT_ID(sdev));
358 		sdev->state = DEV_ACTIVE - 1;
359 	}
360 	fs_dev_free_queues(dev);
361 }
362 
363 static void
364 fs_rx_queue_release(void *queue)
365 {
366 	struct rte_eth_dev *dev;
367 	struct sub_device *sdev;
368 	uint8_t i;
369 	struct rxq *rxq;
370 
371 	if (queue == NULL)
372 		return;
373 	rxq = queue;
374 	dev = rxq->priv->dev;
375 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
376 		SUBOPS(sdev, rx_queue_release)
377 			(ETH(sdev)->data->rx_queues[rxq->qid]);
378 	dev->data->rx_queues[rxq->qid] = NULL;
379 	rte_free(rxq);
380 }
381 
382 static int
383 fs_rx_queue_setup(struct rte_eth_dev *dev,
384 		uint16_t rx_queue_id,
385 		uint16_t nb_rx_desc,
386 		unsigned int socket_id,
387 		const struct rte_eth_rxconf *rx_conf,
388 		struct rte_mempool *mb_pool)
389 {
390 	struct sub_device *sdev;
391 	struct rxq *rxq;
392 	uint8_t i;
393 	int ret;
394 
395 	rxq = dev->data->rx_queues[rx_queue_id];
396 	if (rxq != NULL) {
397 		fs_rx_queue_release(rxq);
398 		dev->data->rx_queues[rx_queue_id] = NULL;
399 	}
400 	rxq = rte_zmalloc(NULL,
401 			  sizeof(*rxq) +
402 			  sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
403 			  RTE_CACHE_LINE_SIZE);
404 	if (rxq == NULL)
405 		return -ENOMEM;
406 	FOREACH_SUBDEV(sdev, i, dev)
407 		rte_atomic64_init(&rxq->refcnt[i]);
408 	rxq->qid = rx_queue_id;
409 	rxq->socket_id = socket_id;
410 	rxq->info.mp = mb_pool;
411 	rxq->info.conf = *rx_conf;
412 	rxq->info.nb_desc = nb_rx_desc;
413 	rxq->priv = PRIV(dev);
414 	dev->data->rx_queues[rx_queue_id] = rxq;
415 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
416 		ret = rte_eth_rx_queue_setup(PORT_ID(sdev),
417 				rx_queue_id,
418 				nb_rx_desc, socket_id,
419 				rx_conf, mb_pool);
420 		if (ret) {
421 			ERROR("RX queue setup failed for sub_device %d", i);
422 			goto free_rxq;
423 		}
424 	}
425 	return 0;
426 free_rxq:
427 	fs_rx_queue_release(rxq);
428 	return ret;
429 }
430 
431 static void
432 fs_tx_queue_release(void *queue)
433 {
434 	struct rte_eth_dev *dev;
435 	struct sub_device *sdev;
436 	uint8_t i;
437 	struct txq *txq;
438 
439 	if (queue == NULL)
440 		return;
441 	txq = queue;
442 	dev = txq->priv->dev;
443 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
444 		SUBOPS(sdev, tx_queue_release)
445 			(ETH(sdev)->data->tx_queues[txq->qid]);
446 	dev->data->tx_queues[txq->qid] = NULL;
447 	rte_free(txq);
448 }
449 
450 static int
451 fs_tx_queue_setup(struct rte_eth_dev *dev,
452 		uint16_t tx_queue_id,
453 		uint16_t nb_tx_desc,
454 		unsigned int socket_id,
455 		const struct rte_eth_txconf *tx_conf)
456 {
457 	struct sub_device *sdev;
458 	struct txq *txq;
459 	uint8_t i;
460 	int ret;
461 
462 	txq = dev->data->tx_queues[tx_queue_id];
463 	if (txq != NULL) {
464 		fs_tx_queue_release(txq);
465 		dev->data->tx_queues[tx_queue_id] = NULL;
466 	}
467 	txq = rte_zmalloc("ethdev TX queue",
468 			  sizeof(*txq) +
469 			  sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
470 			  RTE_CACHE_LINE_SIZE);
471 	if (txq == NULL)
472 		return -ENOMEM;
473 	FOREACH_SUBDEV(sdev, i, dev)
474 		rte_atomic64_init(&txq->refcnt[i]);
475 	txq->qid = tx_queue_id;
476 	txq->socket_id = socket_id;
477 	txq->info.conf = *tx_conf;
478 	txq->info.nb_desc = nb_tx_desc;
479 	txq->priv = PRIV(dev);
480 	dev->data->tx_queues[tx_queue_id] = txq;
481 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
482 		ret = rte_eth_tx_queue_setup(PORT_ID(sdev),
483 				tx_queue_id,
484 				nb_tx_desc, socket_id,
485 				tx_conf);
486 		if (ret) {
487 			ERROR("TX queue setup failed for sub_device %d", i);
488 			goto free_txq;
489 		}
490 	}
491 	return 0;
492 free_txq:
493 	fs_tx_queue_release(txq);
494 	return ret;
495 }
496 
497 static void
498 fs_dev_free_queues(struct rte_eth_dev *dev)
499 {
500 	uint16_t i;
501 
502 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
503 		fs_rx_queue_release(dev->data->rx_queues[i]);
504 		dev->data->rx_queues[i] = NULL;
505 	}
506 	dev->data->nb_rx_queues = 0;
507 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
508 		fs_tx_queue_release(dev->data->tx_queues[i]);
509 		dev->data->tx_queues[i] = NULL;
510 	}
511 	dev->data->nb_tx_queues = 0;
512 }
513 
514 static void
515 fs_promiscuous_enable(struct rte_eth_dev *dev)
516 {
517 	struct sub_device *sdev;
518 	uint8_t i;
519 
520 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
521 		rte_eth_promiscuous_enable(PORT_ID(sdev));
522 }
523 
524 static void
525 fs_promiscuous_disable(struct rte_eth_dev *dev)
526 {
527 	struct sub_device *sdev;
528 	uint8_t i;
529 
530 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
531 		rte_eth_promiscuous_disable(PORT_ID(sdev));
532 }
533 
534 static void
535 fs_allmulticast_enable(struct rte_eth_dev *dev)
536 {
537 	struct sub_device *sdev;
538 	uint8_t i;
539 
540 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
541 		rte_eth_allmulticast_enable(PORT_ID(sdev));
542 }
543 
544 static void
545 fs_allmulticast_disable(struct rte_eth_dev *dev)
546 {
547 	struct sub_device *sdev;
548 	uint8_t i;
549 
550 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
551 		rte_eth_allmulticast_disable(PORT_ID(sdev));
552 }
553 
554 static int
555 fs_link_update(struct rte_eth_dev *dev,
556 		int wait_to_complete)
557 {
558 	struct sub_device *sdev;
559 	uint8_t i;
560 	int ret;
561 
562 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
563 		DEBUG("Calling link_update on sub_device %d", i);
564 		ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete);
565 		if (ret && ret != -1) {
566 			ERROR("Link update failed for sub_device %d with error %d",
567 			      i, ret);
568 			return ret;
569 		}
570 	}
571 	if (TX_SUBDEV(dev)) {
572 		struct rte_eth_link *l1;
573 		struct rte_eth_link *l2;
574 
575 		l1 = &dev->data->dev_link;
576 		l2 = &ETH(TX_SUBDEV(dev))->data->dev_link;
577 		if (memcmp(l1, l2, sizeof(*l1))) {
578 			*l1 = *l2;
579 			return 0;
580 		}
581 	}
582 	return -1;
583 }
584 
585 static void
586 fs_stats_get(struct rte_eth_dev *dev,
587 	     struct rte_eth_stats *stats)
588 {
589 	struct sub_device *sdev;
590 	uint8_t i;
591 
592 	rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats));
593 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
594 		rte_eth_stats_get(PORT_ID(sdev), &sdev->stats_snapshot);
595 		failsafe_stats_increment(stats, &sdev->stats_snapshot);
596 	}
597 }
598 
599 static void
600 fs_stats_reset(struct rte_eth_dev *dev)
601 {
602 	struct sub_device *sdev;
603 	uint8_t i;
604 
605 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
606 		rte_eth_stats_reset(PORT_ID(sdev));
607 		memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats));
608 	}
609 	memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats));
610 }
611 
612 /**
613  * Fail-safe dev_infos_get rules:
614  *
615  * No sub_device:
616  *   Numerables:
617  *      Use the maximum possible values for any field, so as not
618  *      to impede any further configuration effort.
619  *   Capabilities:
620  *      Limits capabilities to those that are understood by the
621  *      fail-safe PMD. This understanding stems from the fail-safe
622  *      being capable of verifying that the related capability is
623  *      expressed within the device configuration (struct rte_eth_conf).
624  *
625  * At least one probed sub_device:
626  *   Numerables:
627  *      Uses values from the active probed sub_device
628  *      The rationale here is that if any sub_device is less capable
629  *      (for example concerning the number of queues) than the active
630  *      sub_device, then its subsequent configuration will fail.
631  *      It is impossible to foresee this failure when the failing sub_device
632  *      is supposed to be plugged-in later on, so the configuration process
633  *      is the single point of failure and error reporting.
634  *   Capabilities:
635  *      Uses a logical AND of RX capabilities among
636  *      all sub_devices and the default capabilities.
637  *      Uses a logical AND of TX capabilities among
638  *      the active probed sub_device and the default capabilities.
639  *
640  */
641 static void
642 fs_dev_infos_get(struct rte_eth_dev *dev,
643 		  struct rte_eth_dev_info *infos)
644 {
645 	struct sub_device *sdev;
646 	uint8_t i;
647 
648 	sdev = TX_SUBDEV(dev);
649 	if (sdev == NULL) {
650 		DEBUG("No probed device, using default infos");
651 		rte_memcpy(&PRIV(dev)->infos, &default_infos,
652 			   sizeof(default_infos));
653 	} else {
654 		uint32_t rx_offload_capa;
655 
656 		rx_offload_capa = default_infos.rx_offload_capa;
657 		FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
658 			rte_eth_dev_info_get(PORT_ID(sdev),
659 					&PRIV(dev)->infos);
660 			rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa;
661 		}
662 		sdev = TX_SUBDEV(dev);
663 		rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos);
664 		PRIV(dev)->infos.rx_offload_capa = rx_offload_capa;
665 		PRIV(dev)->infos.tx_offload_capa &=
666 					default_infos.tx_offload_capa;
667 		PRIV(dev)->infos.flow_type_rss_offloads &=
668 					default_infos.flow_type_rss_offloads;
669 	}
670 	rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos));
671 }
672 
673 static const uint32_t *
674 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev)
675 {
676 	struct sub_device *sdev;
677 	struct rte_eth_dev *edev;
678 
679 	sdev = TX_SUBDEV(dev);
680 	if (sdev == NULL)
681 		return NULL;
682 	edev = ETH(sdev);
683 	/* ENOTSUP: counts as no supported ptypes */
684 	if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL)
685 		return NULL;
686 	/*
687 	 * The API does not permit to do a clean AND of all ptypes,
688 	 * It is also incomplete by design and we do not really care
689 	 * to have a best possible value in this context.
690 	 * We just return the ptypes of the device of highest
691 	 * priority, usually the PREFERRED device.
692 	 */
693 	return SUBOPS(sdev, dev_supported_ptypes_get)(edev);
694 }
695 
696 static int
697 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
698 {
699 	struct sub_device *sdev;
700 	uint8_t i;
701 	int ret;
702 
703 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
704 		DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i);
705 		ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu);
706 		if (ret) {
707 			ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d",
708 			      i, ret);
709 			return ret;
710 		}
711 	}
712 	return 0;
713 }
714 
715 static int
716 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
717 {
718 	struct sub_device *sdev;
719 	uint8_t i;
720 	int ret;
721 
722 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
723 		DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i);
724 		ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on);
725 		if (ret) {
726 			ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d"
727 			      " with error %d", i, ret);
728 			return ret;
729 		}
730 	}
731 	return 0;
732 }
733 
734 static int
735 fs_flow_ctrl_get(struct rte_eth_dev *dev,
736 		struct rte_eth_fc_conf *fc_conf)
737 {
738 	struct sub_device *sdev;
739 
740 	sdev = TX_SUBDEV(dev);
741 	if (sdev == NULL)
742 		return 0;
743 	if (SUBOPS(sdev, flow_ctrl_get) == NULL)
744 		return -ENOTSUP;
745 	return SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf);
746 }
747 
748 static int
749 fs_flow_ctrl_set(struct rte_eth_dev *dev,
750 		struct rte_eth_fc_conf *fc_conf)
751 {
752 	struct sub_device *sdev;
753 	uint8_t i;
754 	int ret;
755 
756 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
757 		DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i);
758 		ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf);
759 		if (ret) {
760 			ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d"
761 			      " with error %d", i, ret);
762 			return ret;
763 		}
764 	}
765 	return 0;
766 }
767 
768 static void
769 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
770 {
771 	struct sub_device *sdev;
772 	uint8_t i;
773 
774 	/* No check: already done within the rte_eth_dev_mac_addr_remove
775 	 * call for the fail-safe device.
776 	 */
777 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
778 		rte_eth_dev_mac_addr_remove(PORT_ID(sdev),
779 				&dev->data->mac_addrs[index]);
780 	PRIV(dev)->mac_addr_pool[index] = 0;
781 }
782 
783 static int
784 fs_mac_addr_add(struct rte_eth_dev *dev,
785 		struct ether_addr *mac_addr,
786 		uint32_t index,
787 		uint32_t vmdq)
788 {
789 	struct sub_device *sdev;
790 	int ret;
791 	uint8_t i;
792 
793 	RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR);
794 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
795 		ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq);
796 		if (ret) {
797 			ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %"
798 			      PRIu8 " with error %d", i, ret);
799 			return ret;
800 		}
801 	}
802 	if (index >= PRIV(dev)->nb_mac_addr) {
803 		DEBUG("Growing mac_addrs array");
804 		PRIV(dev)->nb_mac_addr = index;
805 	}
806 	PRIV(dev)->mac_addr_pool[index] = vmdq;
807 	return 0;
808 }
809 
810 static void
811 fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
812 {
813 	struct sub_device *sdev;
814 	uint8_t i;
815 
816 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
817 		rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr);
818 }
819 
820 static int
821 fs_filter_ctrl(struct rte_eth_dev *dev,
822 		enum rte_filter_type type,
823 		enum rte_filter_op op,
824 		void *arg)
825 {
826 	struct sub_device *sdev;
827 	uint8_t i;
828 	int ret;
829 
830 	if (type == RTE_ETH_FILTER_GENERIC &&
831 	    op == RTE_ETH_FILTER_GET) {
832 		*(const void **)arg = &fs_flow_ops;
833 		return 0;
834 	}
835 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
836 		DEBUG("Calling rte_eth_dev_filter_ctrl on sub_device %d", i);
837 		ret = rte_eth_dev_filter_ctrl(PORT_ID(sdev), type, op, arg);
838 		if (ret) {
839 			ERROR("Operation rte_eth_dev_filter_ctrl failed for sub_device %d"
840 			      " with error %d", i, ret);
841 			return ret;
842 		}
843 	}
844 	return 0;
845 }
846 
847 const struct eth_dev_ops failsafe_ops = {
848 	.dev_configure = fs_dev_configure,
849 	.dev_start = fs_dev_start,
850 	.dev_stop = fs_dev_stop,
851 	.dev_set_link_down = fs_dev_set_link_down,
852 	.dev_set_link_up = fs_dev_set_link_up,
853 	.dev_close = fs_dev_close,
854 	.promiscuous_enable = fs_promiscuous_enable,
855 	.promiscuous_disable = fs_promiscuous_disable,
856 	.allmulticast_enable = fs_allmulticast_enable,
857 	.allmulticast_disable = fs_allmulticast_disable,
858 	.link_update = fs_link_update,
859 	.stats_get = fs_stats_get,
860 	.stats_reset = fs_stats_reset,
861 	.dev_infos_get = fs_dev_infos_get,
862 	.dev_supported_ptypes_get = fs_dev_supported_ptypes_get,
863 	.mtu_set = fs_mtu_set,
864 	.vlan_filter_set = fs_vlan_filter_set,
865 	.rx_queue_setup = fs_rx_queue_setup,
866 	.tx_queue_setup = fs_tx_queue_setup,
867 	.rx_queue_release = fs_rx_queue_release,
868 	.tx_queue_release = fs_tx_queue_release,
869 	.flow_ctrl_get = fs_flow_ctrl_get,
870 	.flow_ctrl_set = fs_flow_ctrl_set,
871 	.mac_addr_remove = fs_mac_addr_remove,
872 	.mac_addr_add = fs_mac_addr_add,
873 	.mac_addr_set = fs_mac_addr_set,
874 	.filter_ctrl = fs_filter_ctrl,
875 };
876