1ecae7157SApeksha Gupta /* SPDX-License-Identifier: BSD-3-Clause
2ecae7157SApeksha Gupta * Copyright 2021 NXP
3ecae7157SApeksha Gupta */
4ecae7157SApeksha Gupta
5ecae7157SApeksha Gupta #include <rte_mbuf.h>
6ecae7157SApeksha Gupta #include <rte_io.h>
7c75b9c3aSApeksha Gupta #include <ethdev_driver.h>
8ecae7157SApeksha Gupta #include "enet_regs.h"
9ecae7157SApeksha Gupta #include "enet_ethdev.h"
10ecae7157SApeksha Gupta #include "enet_pmd_logs.h"
11ecae7157SApeksha Gupta
12ecae7157SApeksha Gupta /* This function does enetfec_rx_queue processing. Dequeue packet from Rx queue
13ecae7157SApeksha Gupta * When update through the ring, just set the empty indicator.
14ecae7157SApeksha Gupta */
15ecae7157SApeksha Gupta uint16_t
enetfec_recv_pkts(void * rxq1,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)16ecae7157SApeksha Gupta enetfec_recv_pkts(void *rxq1, struct rte_mbuf **rx_pkts,
17ecae7157SApeksha Gupta uint16_t nb_pkts)
18ecae7157SApeksha Gupta {
19ecae7157SApeksha Gupta struct rte_mempool *pool;
20ecae7157SApeksha Gupta struct bufdesc *bdp;
21ecae7157SApeksha Gupta struct rte_mbuf *mbuf, *new_mbuf = NULL;
22ecae7157SApeksha Gupta unsigned short status;
23ecae7157SApeksha Gupta unsigned short pkt_len;
24ecae7157SApeksha Gupta int pkt_received = 0, index = 0;
25c75b9c3aSApeksha Gupta void *data, *mbuf_data;
26c75b9c3aSApeksha Gupta uint16_t vlan_tag;
27c75b9c3aSApeksha Gupta struct bufdesc_ex *ebdp = NULL;
28c75b9c3aSApeksha Gupta bool vlan_packet_rcvd = false;
29ecae7157SApeksha Gupta struct enetfec_priv_rx_q *rxq = (struct enetfec_priv_rx_q *)rxq1;
30ecae7157SApeksha Gupta struct rte_eth_stats *stats = &rxq->fep->stats;
31c75b9c3aSApeksha Gupta struct rte_eth_conf *eth_conf = &rxq->fep->dev->data->dev_conf;
32c75b9c3aSApeksha Gupta uint64_t rx_offloads = eth_conf->rxmode.offloads;
33ecae7157SApeksha Gupta pool = rxq->pool;
34ecae7157SApeksha Gupta bdp = rxq->bd.cur;
35ecae7157SApeksha Gupta
36ecae7157SApeksha Gupta /* Process the incoming packet */
37ecae7157SApeksha Gupta status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
38ecae7157SApeksha Gupta while ((status & RX_BD_EMPTY) == 0) {
39ecae7157SApeksha Gupta if (pkt_received >= nb_pkts)
40ecae7157SApeksha Gupta break;
41ecae7157SApeksha Gupta
42ecae7157SApeksha Gupta /* Check for errors. */
43ecae7157SApeksha Gupta status ^= RX_BD_LAST;
44ecae7157SApeksha Gupta if (status & (RX_BD_LG | RX_BD_SH | RX_BD_NO |
45ecae7157SApeksha Gupta RX_BD_CR | RX_BD_OV | RX_BD_LAST |
46ecae7157SApeksha Gupta RX_BD_TR)) {
47ecae7157SApeksha Gupta stats->ierrors++;
48ecae7157SApeksha Gupta if (status & RX_BD_OV) {
49ecae7157SApeksha Gupta /* FIFO overrun */
50ecae7157SApeksha Gupta /* enet_dump_rx(rxq); */
51ecae7157SApeksha Gupta ENETFEC_DP_LOG(DEBUG, "rx_fifo_error");
52ecae7157SApeksha Gupta goto rx_processing_done;
53ecae7157SApeksha Gupta }
54ecae7157SApeksha Gupta if (status & (RX_BD_LG | RX_BD_SH
55ecae7157SApeksha Gupta | RX_BD_LAST)) {
56ecae7157SApeksha Gupta /* Frame too long or too short. */
57ecae7157SApeksha Gupta ENETFEC_DP_LOG(DEBUG, "rx_length_error");
58ecae7157SApeksha Gupta if (status & RX_BD_LAST)
59ecae7157SApeksha Gupta ENETFEC_DP_LOG(DEBUG, "rcv is not +last");
60ecae7157SApeksha Gupta }
61ecae7157SApeksha Gupta if (status & RX_BD_CR) { /* CRC Error */
62ecae7157SApeksha Gupta ENETFEC_DP_LOG(DEBUG, "rx_crc_errors");
63ecae7157SApeksha Gupta }
64ecae7157SApeksha Gupta /* Report late collisions as a frame error. */
65ecae7157SApeksha Gupta if (status & (RX_BD_NO | RX_BD_TR))
66ecae7157SApeksha Gupta ENETFEC_DP_LOG(DEBUG, "rx_frame_error");
67ecae7157SApeksha Gupta goto rx_processing_done;
68ecae7157SApeksha Gupta }
69ecae7157SApeksha Gupta
70*fe10f6ccSApeksha Gupta new_mbuf = rte_pktmbuf_alloc(pool);
71*fe10f6ccSApeksha Gupta if (unlikely(new_mbuf == NULL)) {
72*fe10f6ccSApeksha Gupta stats->rx_nombuf++;
73*fe10f6ccSApeksha Gupta break;
74*fe10f6ccSApeksha Gupta }
75*fe10f6ccSApeksha Gupta
76ecae7157SApeksha Gupta /* Process the incoming frame. */
77ecae7157SApeksha Gupta stats->ipackets++;
78ecae7157SApeksha Gupta pkt_len = rte_le_to_cpu_16(rte_read16(&bdp->bd_datlen));
79ecae7157SApeksha Gupta stats->ibytes += pkt_len;
80ecae7157SApeksha Gupta
81ecae7157SApeksha Gupta /* shows data with respect to the data_off field. */
82ecae7157SApeksha Gupta index = enet_get_bd_index(bdp, &rxq->bd);
83ecae7157SApeksha Gupta mbuf = rxq->rx_mbuf[index];
84ecae7157SApeksha Gupta
85ecae7157SApeksha Gupta data = rte_pktmbuf_mtod(mbuf, uint8_t *);
86c75b9c3aSApeksha Gupta mbuf_data = data;
87ecae7157SApeksha Gupta rte_prefetch0(data);
88ecae7157SApeksha Gupta rte_pktmbuf_append((struct rte_mbuf *)mbuf,
89ecae7157SApeksha Gupta pkt_len - 4);
90ecae7157SApeksha Gupta
91ecae7157SApeksha Gupta if (rxq->fep->quirks & QUIRK_RACC)
92ecae7157SApeksha Gupta data = rte_pktmbuf_adj(mbuf, 2);
93ecae7157SApeksha Gupta
94ecae7157SApeksha Gupta rx_pkts[pkt_received] = mbuf;
95ecae7157SApeksha Gupta pkt_received++;
96c75b9c3aSApeksha Gupta
97c75b9c3aSApeksha Gupta /* Extract the enhanced buffer descriptor */
98c75b9c3aSApeksha Gupta ebdp = NULL;
99c75b9c3aSApeksha Gupta if (rxq->fep->bufdesc_ex)
100c75b9c3aSApeksha Gupta ebdp = (struct bufdesc_ex *)bdp;
101c75b9c3aSApeksha Gupta
102c75b9c3aSApeksha Gupta /* If this is a VLAN packet remove the VLAN Tag */
103c75b9c3aSApeksha Gupta vlan_packet_rcvd = false;
104c75b9c3aSApeksha Gupta if ((rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN) &&
105c75b9c3aSApeksha Gupta rxq->fep->bufdesc_ex &&
106c75b9c3aSApeksha Gupta (rte_read32(&ebdp->bd_esc) &
107c75b9c3aSApeksha Gupta rte_cpu_to_le_32(BD_ENETFEC_RX_VLAN))) {
108c75b9c3aSApeksha Gupta /* Push and remove the vlan tag */
109c75b9c3aSApeksha Gupta struct rte_vlan_hdr *vlan_header =
110c75b9c3aSApeksha Gupta (struct rte_vlan_hdr *)
111c75b9c3aSApeksha Gupta ((uint8_t *)data + ETH_HLEN);
112c75b9c3aSApeksha Gupta vlan_tag = rte_be_to_cpu_16(vlan_header->vlan_tci);
113c75b9c3aSApeksha Gupta
114c75b9c3aSApeksha Gupta vlan_packet_rcvd = true;
11525cf2630SFerruh Yigit memmove((uint8_t *)mbuf_data + RTE_VLAN_HLEN,
116c75b9c3aSApeksha Gupta data, RTE_ETHER_ADDR_LEN * 2);
11725cf2630SFerruh Yigit rte_pktmbuf_adj(mbuf, RTE_VLAN_HLEN);
118c75b9c3aSApeksha Gupta }
119c75b9c3aSApeksha Gupta
120c75b9c3aSApeksha Gupta if (rxq->fep->bufdesc_ex &&
121c75b9c3aSApeksha Gupta (rxq->fep->flag_csum & RX_FLAG_CSUM_EN)) {
122c75b9c3aSApeksha Gupta if ((rte_read32(&ebdp->bd_esc) &
123c75b9c3aSApeksha Gupta rte_cpu_to_le_32(RX_FLAG_CSUM_ERR)) == 0) {
124c75b9c3aSApeksha Gupta /* don't check it */
125c75b9c3aSApeksha Gupta mbuf->ol_flags = RTE_MBUF_F_RX_IP_CKSUM_BAD;
126c75b9c3aSApeksha Gupta } else {
127c75b9c3aSApeksha Gupta mbuf->ol_flags = RTE_MBUF_F_RX_IP_CKSUM_GOOD;
128c75b9c3aSApeksha Gupta }
129c75b9c3aSApeksha Gupta }
130c75b9c3aSApeksha Gupta
131c75b9c3aSApeksha Gupta /* Handle received VLAN packets */
132c75b9c3aSApeksha Gupta if (vlan_packet_rcvd) {
133c75b9c3aSApeksha Gupta mbuf->vlan_tci = vlan_tag;
134c75b9c3aSApeksha Gupta mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED
135c75b9c3aSApeksha Gupta | RTE_MBUF_F_RX_VLAN;
136c75b9c3aSApeksha Gupta }
137c75b9c3aSApeksha Gupta
138ecae7157SApeksha Gupta rxq->rx_mbuf[index] = new_mbuf;
139ecae7157SApeksha Gupta rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(new_mbuf)),
140ecae7157SApeksha Gupta &bdp->bd_bufaddr);
141ecae7157SApeksha Gupta rx_processing_done:
142ecae7157SApeksha Gupta /* when rx_processing_done clear the status flags
143ecae7157SApeksha Gupta * for this buffer
144ecae7157SApeksha Gupta */
145ecae7157SApeksha Gupta status &= ~RX_BD_STATS;
146ecae7157SApeksha Gupta
147ecae7157SApeksha Gupta /* Mark the buffer empty */
148ecae7157SApeksha Gupta status |= RX_BD_EMPTY;
149ecae7157SApeksha Gupta
150ecae7157SApeksha Gupta if (rxq->fep->bufdesc_ex) {
151ecae7157SApeksha Gupta struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
152ecae7157SApeksha Gupta rte_write32(rte_cpu_to_le_32(RX_BD_INT),
153ecae7157SApeksha Gupta &ebdp->bd_esc);
154ecae7157SApeksha Gupta rte_write32(0, &ebdp->bd_prot);
155ecae7157SApeksha Gupta rte_write32(0, &ebdp->bd_bdu);
156ecae7157SApeksha Gupta }
157ecae7157SApeksha Gupta
158ecae7157SApeksha Gupta /* Make sure the updates to rest of the descriptor are
159ecae7157SApeksha Gupta * performed before transferring ownership.
160ecae7157SApeksha Gupta */
161ecae7157SApeksha Gupta rte_wmb();
162ecae7157SApeksha Gupta rte_write16(rte_cpu_to_le_16(status), &bdp->bd_sc);
163ecae7157SApeksha Gupta
164ecae7157SApeksha Gupta /* Update BD pointer to next entry */
165ecae7157SApeksha Gupta bdp = enet_get_nextdesc(bdp, &rxq->bd);
166ecae7157SApeksha Gupta
167ecae7157SApeksha Gupta /* Doing this here will keep the FEC running while we process
168ecae7157SApeksha Gupta * incoming frames.
169ecae7157SApeksha Gupta */
170ecae7157SApeksha Gupta rte_write32(0, rxq->bd.active_reg_desc);
171ecae7157SApeksha Gupta status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
172ecae7157SApeksha Gupta }
173ecae7157SApeksha Gupta rxq->bd.cur = bdp;
174ecae7157SApeksha Gupta return pkt_received;
175ecae7157SApeksha Gupta }
176ecae7157SApeksha Gupta
177ecae7157SApeksha Gupta uint16_t
enetfec_xmit_pkts(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)178ecae7157SApeksha Gupta enetfec_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
179ecae7157SApeksha Gupta {
180ecae7157SApeksha Gupta struct enetfec_priv_tx_q *txq =
181ecae7157SApeksha Gupta (struct enetfec_priv_tx_q *)tx_queue;
182ecae7157SApeksha Gupta struct rte_eth_stats *stats = &txq->fep->stats;
183ecae7157SApeksha Gupta struct bufdesc *bdp, *last_bdp;
184ecae7157SApeksha Gupta struct rte_mbuf *mbuf;
185ecae7157SApeksha Gupta unsigned short status;
186ecae7157SApeksha Gupta unsigned short buflen;
187ecae7157SApeksha Gupta unsigned int index, estatus = 0;
188ecae7157SApeksha Gupta unsigned int i, pkt_transmitted = 0;
189ecae7157SApeksha Gupta uint8_t *data;
190ecae7157SApeksha Gupta int tx_st = 1;
191ecae7157SApeksha Gupta
192ecae7157SApeksha Gupta while (tx_st) {
193ecae7157SApeksha Gupta if (pkt_transmitted >= nb_pkts) {
194ecae7157SApeksha Gupta tx_st = 0;
195ecae7157SApeksha Gupta break;
196ecae7157SApeksha Gupta }
197*fe10f6ccSApeksha Gupta
198*fe10f6ccSApeksha Gupta mbuf = *(tx_pkts);
199*fe10f6ccSApeksha Gupta if (mbuf->nb_segs > 1) {
200*fe10f6ccSApeksha Gupta ENETFEC_DP_LOG(DEBUG, "SG not supported");
201*fe10f6ccSApeksha Gupta return pkt_transmitted;
202*fe10f6ccSApeksha Gupta }
203*fe10f6ccSApeksha Gupta
204*fe10f6ccSApeksha Gupta tx_pkts++;
205ecae7157SApeksha Gupta bdp = txq->bd.cur;
206*fe10f6ccSApeksha Gupta
207ecae7157SApeksha Gupta /* First clean the ring */
208ecae7157SApeksha Gupta index = enet_get_bd_index(bdp, &txq->bd);
209ecae7157SApeksha Gupta status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
210ecae7157SApeksha Gupta
211ecae7157SApeksha Gupta if (status & TX_BD_READY) {
212ecae7157SApeksha Gupta stats->oerrors++;
213ecae7157SApeksha Gupta break;
214ecae7157SApeksha Gupta }
215ecae7157SApeksha Gupta if (txq->tx_mbuf[index]) {
216ecae7157SApeksha Gupta rte_pktmbuf_free(txq->tx_mbuf[index]);
217ecae7157SApeksha Gupta txq->tx_mbuf[index] = NULL;
218ecae7157SApeksha Gupta }
219ecae7157SApeksha Gupta
220ecae7157SApeksha Gupta /* Fill in a Tx ring entry */
221ecae7157SApeksha Gupta last_bdp = bdp;
222ecae7157SApeksha Gupta status &= ~TX_BD_STATS;
223ecae7157SApeksha Gupta
224ecae7157SApeksha Gupta /* Set buffer length and buffer pointer */
225ecae7157SApeksha Gupta buflen = rte_pktmbuf_pkt_len(mbuf);
226ecae7157SApeksha Gupta stats->opackets++;
227ecae7157SApeksha Gupta stats->obytes += buflen;
228ecae7157SApeksha Gupta
229ecae7157SApeksha Gupta status |= (TX_BD_LAST);
230ecae7157SApeksha Gupta data = rte_pktmbuf_mtod(mbuf, void *);
231ecae7157SApeksha Gupta for (i = 0; i <= buflen; i += RTE_CACHE_LINE_SIZE)
232ecae7157SApeksha Gupta dcbf(data + i);
233ecae7157SApeksha Gupta
234ecae7157SApeksha Gupta rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),
235ecae7157SApeksha Gupta &bdp->bd_bufaddr);
236ecae7157SApeksha Gupta rte_write16(rte_cpu_to_le_16(buflen), &bdp->bd_datlen);
237ecae7157SApeksha Gupta
238ecae7157SApeksha Gupta if (txq->fep->bufdesc_ex) {
239ecae7157SApeksha Gupta struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
240c75b9c3aSApeksha Gupta
241c75b9c3aSApeksha Gupta if (mbuf->ol_flags == RTE_MBUF_F_RX_IP_CKSUM_GOOD)
242c75b9c3aSApeksha Gupta estatus |= TX_BD_PINS | TX_BD_IINS;
243c75b9c3aSApeksha Gupta
244ecae7157SApeksha Gupta rte_write32(0, &ebdp->bd_bdu);
245ecae7157SApeksha Gupta rte_write32(rte_cpu_to_le_32(estatus),
246ecae7157SApeksha Gupta &ebdp->bd_esc);
247ecae7157SApeksha Gupta }
248ecae7157SApeksha Gupta
249ecae7157SApeksha Gupta index = enet_get_bd_index(last_bdp, &txq->bd);
250ecae7157SApeksha Gupta /* Save mbuf pointer */
251ecae7157SApeksha Gupta txq->tx_mbuf[index] = mbuf;
252ecae7157SApeksha Gupta
253ecae7157SApeksha Gupta /* Make sure the updates to rest of the descriptor are performed
254ecae7157SApeksha Gupta * before transferring ownership.
255ecae7157SApeksha Gupta */
256ecae7157SApeksha Gupta status |= (TX_BD_READY | TX_BD_TC);
257ecae7157SApeksha Gupta rte_wmb();
258ecae7157SApeksha Gupta rte_write16(rte_cpu_to_le_16(status), &bdp->bd_sc);
259ecae7157SApeksha Gupta
260ecae7157SApeksha Gupta /* Trigger transmission start */
261ecae7157SApeksha Gupta rte_write32(0, txq->bd.active_reg_desc);
262ecae7157SApeksha Gupta pkt_transmitted++;
263ecae7157SApeksha Gupta
264ecae7157SApeksha Gupta /* If this was the last BD in the ring, start at the
265ecae7157SApeksha Gupta * beginning again.
266ecae7157SApeksha Gupta */
267ecae7157SApeksha Gupta bdp = enet_get_nextdesc(last_bdp, &txq->bd);
268ecae7157SApeksha Gupta
269ecae7157SApeksha Gupta /* Make sure the update to bdp and tx_skbuff are performed
270ecae7157SApeksha Gupta * before txq->bd.cur.
271ecae7157SApeksha Gupta */
272ecae7157SApeksha Gupta txq->bd.cur = bdp;
273ecae7157SApeksha Gupta }
274*fe10f6ccSApeksha Gupta return pkt_transmitted;
275ecae7157SApeksha Gupta }
276