xref: /dpdk/drivers/net/enetfec/enet_rxtx.c (revision fe10f6cc1c701696988cf679c210e441a0378513)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2021 NXP
3  */
4 
5 #include <rte_mbuf.h>
6 #include <rte_io.h>
7 #include <ethdev_driver.h>
8 #include "enet_regs.h"
9 #include "enet_ethdev.h"
10 #include "enet_pmd_logs.h"
11 
12 /* This function does enetfec_rx_queue processing. Dequeue packet from Rx queue
13  * When update through the ring, just set the empty indicator.
14  */
15 uint16_t
enetfec_recv_pkts(void * rxq1,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)16 enetfec_recv_pkts(void *rxq1, struct rte_mbuf **rx_pkts,
17 		uint16_t nb_pkts)
18 {
19 	struct rte_mempool *pool;
20 	struct bufdesc *bdp;
21 	struct rte_mbuf *mbuf, *new_mbuf = NULL;
22 	unsigned short status;
23 	unsigned short pkt_len;
24 	int pkt_received = 0, index = 0;
25 	void *data, *mbuf_data;
26 	uint16_t vlan_tag;
27 	struct  bufdesc_ex *ebdp = NULL;
28 	bool    vlan_packet_rcvd = false;
29 	struct enetfec_priv_rx_q *rxq  = (struct enetfec_priv_rx_q *)rxq1;
30 	struct rte_eth_stats *stats = &rxq->fep->stats;
31 	struct rte_eth_conf *eth_conf = &rxq->fep->dev->data->dev_conf;
32 	uint64_t rx_offloads = eth_conf->rxmode.offloads;
33 	pool = rxq->pool;
34 	bdp = rxq->bd.cur;
35 
36 	/* Process the incoming packet */
37 	status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
38 	while ((status & RX_BD_EMPTY) == 0) {
39 		if (pkt_received >= nb_pkts)
40 			break;
41 
42 		/* Check for errors. */
43 		status ^= RX_BD_LAST;
44 		if (status & (RX_BD_LG | RX_BD_SH | RX_BD_NO |
45 			RX_BD_CR | RX_BD_OV | RX_BD_LAST |
46 			RX_BD_TR)) {
47 			stats->ierrors++;
48 			if (status & RX_BD_OV) {
49 				/* FIFO overrun */
50 				/* enet_dump_rx(rxq); */
51 				ENETFEC_DP_LOG(DEBUG, "rx_fifo_error");
52 				goto rx_processing_done;
53 			}
54 			if (status & (RX_BD_LG | RX_BD_SH
55 						| RX_BD_LAST)) {
56 				/* Frame too long or too short. */
57 				ENETFEC_DP_LOG(DEBUG, "rx_length_error");
58 				if (status & RX_BD_LAST)
59 					ENETFEC_DP_LOG(DEBUG, "rcv is not +last");
60 			}
61 			if (status & RX_BD_CR) {     /* CRC Error */
62 				ENETFEC_DP_LOG(DEBUG, "rx_crc_errors");
63 			}
64 			/* Report late collisions as a frame error. */
65 			if (status & (RX_BD_NO | RX_BD_TR))
66 				ENETFEC_DP_LOG(DEBUG, "rx_frame_error");
67 			goto rx_processing_done;
68 		}
69 
70 		new_mbuf = rte_pktmbuf_alloc(pool);
71 		if (unlikely(new_mbuf == NULL)) {
72 			stats->rx_nombuf++;
73 			break;
74 		}
75 
76 		/* Process the incoming frame. */
77 		stats->ipackets++;
78 		pkt_len = rte_le_to_cpu_16(rte_read16(&bdp->bd_datlen));
79 		stats->ibytes += pkt_len;
80 
81 		/* shows data with respect to the data_off field. */
82 		index = enet_get_bd_index(bdp, &rxq->bd);
83 		mbuf = rxq->rx_mbuf[index];
84 
85 		data = rte_pktmbuf_mtod(mbuf, uint8_t *);
86 		mbuf_data = data;
87 		rte_prefetch0(data);
88 		rte_pktmbuf_append((struct rte_mbuf *)mbuf,
89 				pkt_len - 4);
90 
91 		if (rxq->fep->quirks & QUIRK_RACC)
92 			data = rte_pktmbuf_adj(mbuf, 2);
93 
94 		rx_pkts[pkt_received] = mbuf;
95 		pkt_received++;
96 
97 		/* Extract the enhanced buffer descriptor */
98 		ebdp = NULL;
99 		if (rxq->fep->bufdesc_ex)
100 			ebdp = (struct bufdesc_ex *)bdp;
101 
102 		/* If this is a VLAN packet remove the VLAN Tag */
103 		vlan_packet_rcvd = false;
104 		if ((rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN) &&
105 				rxq->fep->bufdesc_ex &&
106 				(rte_read32(&ebdp->bd_esc) &
107 				rte_cpu_to_le_32(BD_ENETFEC_RX_VLAN))) {
108 			/* Push and remove the vlan tag */
109 			struct rte_vlan_hdr *vlan_header =
110 				(struct rte_vlan_hdr *)
111 				((uint8_t *)data + ETH_HLEN);
112 			vlan_tag = rte_be_to_cpu_16(vlan_header->vlan_tci);
113 
114 			vlan_packet_rcvd = true;
115 			memmove((uint8_t *)mbuf_data + RTE_VLAN_HLEN,
116 				data, RTE_ETHER_ADDR_LEN * 2);
117 			rte_pktmbuf_adj(mbuf, RTE_VLAN_HLEN);
118 		}
119 
120 		if (rxq->fep->bufdesc_ex &&
121 			(rxq->fep->flag_csum & RX_FLAG_CSUM_EN)) {
122 			if ((rte_read32(&ebdp->bd_esc) &
123 				rte_cpu_to_le_32(RX_FLAG_CSUM_ERR)) == 0) {
124 				/* don't check it */
125 				mbuf->ol_flags = RTE_MBUF_F_RX_IP_CKSUM_BAD;
126 			} else {
127 				mbuf->ol_flags = RTE_MBUF_F_RX_IP_CKSUM_GOOD;
128 			}
129 		}
130 
131 		/* Handle received VLAN packets */
132 		if (vlan_packet_rcvd) {
133 			mbuf->vlan_tci = vlan_tag;
134 			mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED
135 						| RTE_MBUF_F_RX_VLAN;
136 		}
137 
138 		rxq->rx_mbuf[index] = new_mbuf;
139 		rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(new_mbuf)),
140 				&bdp->bd_bufaddr);
141 rx_processing_done:
142 		/* when rx_processing_done clear the status flags
143 		 * for this buffer
144 		 */
145 		status &= ~RX_BD_STATS;
146 
147 		/* Mark the buffer empty */
148 		status |= RX_BD_EMPTY;
149 
150 		if (rxq->fep->bufdesc_ex) {
151 			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
152 			rte_write32(rte_cpu_to_le_32(RX_BD_INT),
153 				    &ebdp->bd_esc);
154 			rte_write32(0, &ebdp->bd_prot);
155 			rte_write32(0, &ebdp->bd_bdu);
156 		}
157 
158 		/* Make sure the updates to rest of the descriptor are
159 		 * performed before transferring ownership.
160 		 */
161 		rte_wmb();
162 		rte_write16(rte_cpu_to_le_16(status), &bdp->bd_sc);
163 
164 		/* Update BD pointer to next entry */
165 		bdp = enet_get_nextdesc(bdp, &rxq->bd);
166 
167 		/* Doing this here will keep the FEC running while we process
168 		 * incoming frames.
169 		 */
170 		rte_write32(0, rxq->bd.active_reg_desc);
171 		status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
172 	}
173 	rxq->bd.cur = bdp;
174 	return pkt_received;
175 }
176 
177 uint16_t
enetfec_xmit_pkts(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)178 enetfec_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
179 {
180 	struct enetfec_priv_tx_q *txq  =
181 			(struct enetfec_priv_tx_q *)tx_queue;
182 	struct rte_eth_stats *stats = &txq->fep->stats;
183 	struct bufdesc *bdp, *last_bdp;
184 	struct rte_mbuf *mbuf;
185 	unsigned short status;
186 	unsigned short buflen;
187 	unsigned int index, estatus = 0;
188 	unsigned int i, pkt_transmitted = 0;
189 	uint8_t *data;
190 	int tx_st = 1;
191 
192 	while (tx_st) {
193 		if (pkt_transmitted >= nb_pkts) {
194 			tx_st = 0;
195 			break;
196 		}
197 
198 		mbuf = *(tx_pkts);
199 		if (mbuf->nb_segs > 1) {
200 			ENETFEC_DP_LOG(DEBUG, "SG not supported");
201 			return pkt_transmitted;
202 		}
203 
204 		tx_pkts++;
205 		bdp = txq->bd.cur;
206 
207 		/* First clean the ring */
208 		index = enet_get_bd_index(bdp, &txq->bd);
209 		status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
210 
211 		if (status & TX_BD_READY) {
212 			stats->oerrors++;
213 			break;
214 		}
215 		if (txq->tx_mbuf[index]) {
216 			rte_pktmbuf_free(txq->tx_mbuf[index]);
217 			txq->tx_mbuf[index] = NULL;
218 		}
219 
220 		/* Fill in a Tx ring entry */
221 		last_bdp = bdp;
222 		status &= ~TX_BD_STATS;
223 
224 		/* Set buffer length and buffer pointer */
225 		buflen = rte_pktmbuf_pkt_len(mbuf);
226 		stats->opackets++;
227 		stats->obytes += buflen;
228 
229 		status |= (TX_BD_LAST);
230 		data = rte_pktmbuf_mtod(mbuf, void *);
231 		for (i = 0; i <= buflen; i += RTE_CACHE_LINE_SIZE)
232 			dcbf(data + i);
233 
234 		rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),
235 			    &bdp->bd_bufaddr);
236 		rte_write16(rte_cpu_to_le_16(buflen), &bdp->bd_datlen);
237 
238 		if (txq->fep->bufdesc_ex) {
239 			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
240 
241 			if (mbuf->ol_flags == RTE_MBUF_F_RX_IP_CKSUM_GOOD)
242 				estatus |= TX_BD_PINS | TX_BD_IINS;
243 
244 			rte_write32(0, &ebdp->bd_bdu);
245 			rte_write32(rte_cpu_to_le_32(estatus),
246 				    &ebdp->bd_esc);
247 		}
248 
249 		index = enet_get_bd_index(last_bdp, &txq->bd);
250 		/* Save mbuf pointer */
251 		txq->tx_mbuf[index] = mbuf;
252 
253 		/* Make sure the updates to rest of the descriptor are performed
254 		 * before transferring ownership.
255 		 */
256 		status |= (TX_BD_READY | TX_BD_TC);
257 		rte_wmb();
258 		rte_write16(rte_cpu_to_le_16(status), &bdp->bd_sc);
259 
260 		/* Trigger transmission start */
261 		rte_write32(0, txq->bd.active_reg_desc);
262 		pkt_transmitted++;
263 
264 		/* If this was the last BD in the ring, start at the
265 		 * beginning again.
266 		 */
267 		bdp = enet_get_nextdesc(last_bdp, &txq->bd);
268 
269 		/* Make sure the update to bdp and tx_skbuff are performed
270 		 * before txq->bd.cur.
271 		 */
272 		txq->bd.cur = bdp;
273 	}
274 	return pkt_transmitted;
275 }
276