xref: /dpdk/drivers/net/failsafe/failsafe_rxtx.c (revision df96fd0d73955bdc7ca3909e772ff2ad903249c6)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2017 6WIND S.A.
3  * Copyright 2017 Mellanox Technologies, Ltd
4  */
5 
6 #include <rte_atomic.h>
7 #include <rte_debug.h>
8 #include <rte_mbuf.h>
9 #include <ethdev_driver.h>
10 
11 #include "failsafe_private.h"
12 
13 static inline int
fs_rx_unsafe(struct sub_device * sdev)14 fs_rx_unsafe(struct sub_device *sdev)
15 {
16 	return (ETH(sdev) == NULL) ||
17 		(ETH(sdev)->rx_pkt_burst == NULL) ||
18 		(sdev->state != DEV_STARTED) ||
19 		(sdev->remove != 0);
20 }
21 
22 static inline int
fs_tx_unsafe(struct sub_device * sdev)23 fs_tx_unsafe(struct sub_device *sdev)
24 {
25 	return (sdev == NULL) ||
26 		(ETH(sdev) == NULL) ||
27 		(ETH(sdev)->tx_pkt_burst == NULL) ||
28 		(sdev->state != DEV_STARTED);
29 }
30 
31 void
failsafe_set_burst_fn(struct rte_eth_dev * dev,int force_safe)32 failsafe_set_burst_fn(struct rte_eth_dev *dev, int force_safe)
33 {
34 	struct sub_device *sdev;
35 	uint8_t i;
36 	int need_safe;
37 	int safe_set;
38 
39 	need_safe = force_safe;
40 	FOREACH_SUBDEV(sdev, i, dev)
41 		need_safe |= fs_rx_unsafe(sdev);
42 	safe_set = (dev->rx_pkt_burst == &failsafe_rx_burst);
43 	if (need_safe && !safe_set) {
44 		DEBUG("Using safe RX bursts%s",
45 		      (force_safe ? " (forced)" : ""));
46 		dev->rx_pkt_burst = &failsafe_rx_burst;
47 	} else if (!need_safe && safe_set) {
48 		DEBUG("Using fast RX bursts");
49 		dev->rx_pkt_burst = &failsafe_rx_burst_fast;
50 	}
51 	need_safe = force_safe || fs_tx_unsafe(TX_SUBDEV(dev));
52 	safe_set = (dev->tx_pkt_burst == &failsafe_tx_burst);
53 	if (need_safe && !safe_set) {
54 		DEBUG("Using safe TX bursts%s",
55 		      (force_safe ? " (forced)" : ""));
56 		dev->tx_pkt_burst = &failsafe_tx_burst;
57 	} else if (!need_safe && safe_set) {
58 		DEBUG("Using fast TX bursts");
59 		dev->tx_pkt_burst = &failsafe_tx_burst_fast;
60 	}
61 	rte_wmb();
62 }
63 
64 /*
65  * Override source port in Rx packets.
66  *
67  * Make Rx packets originate from this PMD instance instead of one of its
68  * sub-devices. This is mandatory to avoid breaking applications.
69  */
70 static void
failsafe_rx_set_port(struct rte_mbuf ** rx_pkts,uint16_t nb_pkts,uint16_t port)71 failsafe_rx_set_port(struct rte_mbuf **rx_pkts, uint16_t nb_pkts, uint16_t port)
72 {
73 	unsigned int i;
74 
75 	for (i = 0; i != nb_pkts; ++i)
76 		rx_pkts[i]->port = port;
77 }
78 
79 uint16_t
failsafe_rx_burst(void * queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)80 failsafe_rx_burst(void *queue,
81 		  struct rte_mbuf **rx_pkts,
82 		  uint16_t nb_pkts)
83 {
84 	struct sub_device *sdev;
85 	struct rxq *rxq;
86 	void *sub_rxq;
87 	uint16_t nb_rx;
88 
89 	rxq = queue;
90 	sdev = rxq->sdev;
91 	do {
92 		if (fs_rx_unsafe(sdev)) {
93 			nb_rx = 0;
94 			sdev = sdev->next;
95 			continue;
96 		}
97 		sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid];
98 		FS_ATOMIC_P(rxq->refcnt[sdev->sid]);
99 		nb_rx = ETH(sdev)->
100 			rx_pkt_burst(sub_rxq, rx_pkts, nb_pkts);
101 		FS_ATOMIC_V(rxq->refcnt[sdev->sid]);
102 		sdev = sdev->next;
103 	} while (nb_rx == 0 && sdev != rxq->sdev);
104 	rxq->sdev = sdev;
105 	if (nb_rx)
106 		failsafe_rx_set_port(rx_pkts, nb_rx,
107 				     rxq->priv->data->port_id);
108 	return nb_rx;
109 }
110 
111 uint16_t
failsafe_rx_burst_fast(void * queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)112 failsafe_rx_burst_fast(void *queue,
113 			 struct rte_mbuf **rx_pkts,
114 			 uint16_t nb_pkts)
115 {
116 	struct sub_device *sdev;
117 	struct rxq *rxq;
118 	void *sub_rxq;
119 	uint16_t nb_rx;
120 
121 	rxq = queue;
122 	sdev = rxq->sdev;
123 	do {
124 		RTE_ASSERT(!fs_rx_unsafe(sdev));
125 		sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid];
126 		FS_ATOMIC_P(rxq->refcnt[sdev->sid]);
127 		nb_rx = ETH(sdev)->
128 			rx_pkt_burst(sub_rxq, rx_pkts, nb_pkts);
129 		FS_ATOMIC_V(rxq->refcnt[sdev->sid]);
130 		sdev = sdev->next;
131 	} while (nb_rx == 0 && sdev != rxq->sdev);
132 	rxq->sdev = sdev;
133 	if (nb_rx)
134 		failsafe_rx_set_port(rx_pkts, nb_rx,
135 				     rxq->priv->data->port_id);
136 	return nb_rx;
137 }
138 
139 uint16_t
failsafe_tx_burst(void * queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)140 failsafe_tx_burst(void *queue,
141 		  struct rte_mbuf **tx_pkts,
142 		  uint16_t nb_pkts)
143 {
144 	struct sub_device *sdev;
145 	struct txq *txq;
146 	void *sub_txq;
147 	uint16_t nb_tx;
148 
149 	txq = queue;
150 	sdev = TX_SUBDEV(&rte_eth_devices[txq->priv->data->port_id]);
151 	if (unlikely(fs_tx_unsafe(sdev)))
152 		return 0;
153 	sub_txq = ETH(sdev)->data->tx_queues[txq->qid];
154 	FS_ATOMIC_P(txq->refcnt[sdev->sid]);
155 	nb_tx = ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts);
156 	FS_ATOMIC_V(txq->refcnt[sdev->sid]);
157 	return nb_tx;
158 }
159 
160 uint16_t
failsafe_tx_burst_fast(void * queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)161 failsafe_tx_burst_fast(void *queue,
162 			 struct rte_mbuf **tx_pkts,
163 			 uint16_t nb_pkts)
164 {
165 	struct sub_device *sdev;
166 	struct txq *txq;
167 	void *sub_txq;
168 	uint16_t nb_tx;
169 
170 	txq = queue;
171 	sdev = TX_SUBDEV(&rte_eth_devices[txq->priv->data->port_id]);
172 	RTE_ASSERT(!fs_tx_unsafe(sdev));
173 	sub_txq = ETH(sdev)->data->tx_queues[txq->qid];
174 	FS_ATOMIC_P(txq->refcnt[sdev->sid]);
175 	nb_tx = ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts);
176 	FS_ATOMIC_V(txq->refcnt[sdev->sid]);
177 	return nb_tx;
178 }
179