xref: /dpdk/drivers/net/enetc/enetc_rxtx.c (revision 25d11a86c56d50947af33d0b79ede622809bd8b9)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 NXP
3  */
4 
5 #include <stdbool.h>
6 #include <stdint.h>
7 #include <unistd.h>
8 
9 #include "rte_ethdev.h"
10 #include "rte_malloc.h"
11 #include "rte_memzone.h"
12 
13 #include "base/enetc_hw.h"
14 #include "enetc.h"
15 #include "enetc_logs.h"
16 
17 #define ENETC_RXBD_BUNDLE 8 /* Number of BDs to update at once */
18 
19 static int
20 enetc_clean_tx_ring(struct enetc_bdr *tx_ring)
21 {
22 	int tx_frm_cnt = 0;
23 	struct enetc_swbd *tx_swbd;
24 	int i;
25 
26 	i = tx_ring->next_to_clean;
27 	tx_swbd = &tx_ring->q_swbd[i];
28 	while ((int)(enetc_rd_reg(tx_ring->tcisr) &
29 	       ENETC_TBCISR_IDX_MASK) != i) {
30 		rte_pktmbuf_free(tx_swbd->buffer_addr);
31 		tx_swbd->buffer_addr = NULL;
32 		tx_swbd++;
33 		i++;
34 		if (unlikely(i == tx_ring->bd_count)) {
35 			i = 0;
36 			tx_swbd = &tx_ring->q_swbd[0];
37 		}
38 
39 		tx_frm_cnt++;
40 	}
41 
42 	tx_ring->next_to_clean = i;
43 	return tx_frm_cnt++;
44 }
45 
46 uint16_t
47 enetc_xmit_pkts(void *tx_queue,
48 		struct rte_mbuf **tx_pkts,
49 		uint16_t nb_pkts)
50 {
51 	struct enetc_swbd *tx_swbd;
52 	int i, start;
53 	struct enetc_tx_bd *txbd;
54 	struct enetc_bdr *tx_ring = (struct enetc_bdr *)tx_queue;
55 
56 	i = tx_ring->next_to_use;
57 	start = 0;
58 	while (nb_pkts--) {
59 		enetc_clean_tx_ring(tx_ring);
60 		tx_ring->q_swbd[i].buffer_addr = tx_pkts[start];
61 		txbd = ENETC_TXBD(*tx_ring, i);
62 		tx_swbd = &tx_ring->q_swbd[i];
63 		txbd->frm_len = tx_pkts[start]->pkt_len;
64 		txbd->buf_len = txbd->frm_len;
65 		txbd->flags = rte_cpu_to_le_16(ENETC_TXBD_FLAGS_F);
66 		txbd->addr = (uint64_t)(uintptr_t)
67 		rte_cpu_to_le_64((size_t)tx_swbd->buffer_addr->buf_addr +
68 				 tx_swbd->buffer_addr->data_off);
69 		i++;
70 		start++;
71 		if (unlikely(i == tx_ring->bd_count))
72 			i = 0;
73 	}
74 
75 	tx_ring->next_to_use = i;
76 	enetc_wr_reg(tx_ring->tcir, i);
77 	return start;
78 }
79 
80 int
81 enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
82 {
83 	struct enetc_swbd *rx_swbd;
84 	union enetc_rx_bd *rxbd;
85 	int i, j;
86 
87 	i = rx_ring->next_to_use;
88 	rx_swbd = &rx_ring->q_swbd[i];
89 	rxbd = ENETC_RXBD(*rx_ring, i);
90 	for (j = 0; j < buff_cnt; j++) {
91 		rx_swbd->buffer_addr =
92 			rte_cpu_to_le_64(rte_mbuf_raw_alloc(rx_ring->mb_pool));
93 		rxbd->w.addr = (uint64_t)(uintptr_t)
94 			       rx_swbd->buffer_addr->buf_addr +
95 			       rx_swbd->buffer_addr->data_off;
96 		/* clear 'R" as well */
97 		rxbd->r.lstatus = 0;
98 		rx_swbd++;
99 		rxbd++;
100 		i++;
101 		if (unlikely(i == rx_ring->bd_count)) {
102 			i = 0;
103 			rxbd = ENETC_RXBD(*rx_ring, 0);
104 			rx_swbd = &rx_ring->q_swbd[i];
105 		}
106 	}
107 
108 	if (likely(j)) {
109 		rx_ring->next_to_alloc = i;
110 		rx_ring->next_to_use = i;
111 		enetc_wr_reg(rx_ring->rcir, i);
112 	}
113 
114 	return j;
115 }
116 
117 
118 static inline void __attribute__((hot))
119 enetc_dev_rx_parse(struct rte_mbuf *m, uint16_t parse_results)
120 {
121 	ENETC_PMD_DP_DEBUG("parse summary = 0x%x   ", parse_results);
122 
123 	m->packet_type = RTE_PTYPE_UNKNOWN;
124 	switch (parse_results) {
125 	case ENETC_PKT_TYPE_ETHER:
126 		m->packet_type = RTE_PTYPE_L2_ETHER;
127 		break;
128 	case ENETC_PKT_TYPE_IPV4:
129 		m->packet_type = RTE_PTYPE_L2_ETHER |
130 				 RTE_PTYPE_L3_IPV4;
131 		break;
132 	case ENETC_PKT_TYPE_IPV6:
133 		m->packet_type = RTE_PTYPE_L2_ETHER |
134 				 RTE_PTYPE_L3_IPV6;
135 		break;
136 	case ENETC_PKT_TYPE_IPV4_TCP:
137 		m->packet_type = RTE_PTYPE_L2_ETHER |
138 				 RTE_PTYPE_L3_IPV4 |
139 				 RTE_PTYPE_L4_TCP;
140 		break;
141 	case ENETC_PKT_TYPE_IPV6_TCP:
142 		m->packet_type = RTE_PTYPE_L2_ETHER |
143 				 RTE_PTYPE_L3_IPV6 |
144 				 RTE_PTYPE_L4_TCP;
145 		break;
146 	case ENETC_PKT_TYPE_IPV4_UDP:
147 		m->packet_type = RTE_PTYPE_L2_ETHER |
148 				 RTE_PTYPE_L3_IPV4 |
149 				 RTE_PTYPE_L4_UDP;
150 		break;
151 	case ENETC_PKT_TYPE_IPV6_UDP:
152 		m->packet_type = RTE_PTYPE_L2_ETHER |
153 				 RTE_PTYPE_L3_IPV6 |
154 				 RTE_PTYPE_L4_UDP;
155 		break;
156 	case ENETC_PKT_TYPE_IPV4_SCTP:
157 		m->packet_type = RTE_PTYPE_L2_ETHER |
158 				 RTE_PTYPE_L3_IPV4 |
159 				 RTE_PTYPE_L4_SCTP;
160 		break;
161 	case ENETC_PKT_TYPE_IPV6_SCTP:
162 		m->packet_type = RTE_PTYPE_L2_ETHER |
163 				 RTE_PTYPE_L3_IPV6 |
164 				 RTE_PTYPE_L4_SCTP;
165 		break;
166 	case ENETC_PKT_TYPE_IPV4_ICMP:
167 		m->packet_type = RTE_PTYPE_L2_ETHER |
168 				 RTE_PTYPE_L3_IPV4 |
169 				 RTE_PTYPE_L4_ICMP;
170 		break;
171 	case ENETC_PKT_TYPE_IPV6_ICMP:
172 		m->packet_type = RTE_PTYPE_L2_ETHER |
173 				 RTE_PTYPE_L3_IPV6 |
174 				 RTE_PTYPE_L4_ICMP;
175 		break;
176 	/* More switch cases can be added */
177 	default:
178 		m->packet_type = RTE_PTYPE_UNKNOWN;
179 	}
180 }
181 
182 static int
183 enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
184 		    struct rte_mbuf **rx_pkts,
185 		    int work_limit)
186 {
187 	int rx_frm_cnt = 0;
188 	int cleaned_cnt, i;
189 	struct enetc_swbd *rx_swbd;
190 
191 	cleaned_cnt = enetc_bd_unused(rx_ring);
192 	/* next descriptor to process */
193 	i = rx_ring->next_to_clean;
194 	rx_swbd = &rx_ring->q_swbd[i];
195 	while (likely(rx_frm_cnt < work_limit)) {
196 		union enetc_rx_bd *rxbd;
197 		uint32_t bd_status;
198 
199 		if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
200 			int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
201 
202 			cleaned_cnt -= count;
203 		}
204 
205 		rxbd = ENETC_RXBD(*rx_ring, i);
206 		bd_status = rte_le_to_cpu_32(rxbd->r.lstatus);
207 		if (!bd_status)
208 			break;
209 
210 		rx_swbd->buffer_addr->pkt_len = rxbd->r.buf_len;
211 		rx_swbd->buffer_addr->data_len = rxbd->r.buf_len;
212 		rx_swbd->buffer_addr->hash.rss = rxbd->r.rss_hash;
213 		rx_swbd->buffer_addr->ol_flags = 0;
214 		enetc_dev_rx_parse(rx_swbd->buffer_addr,
215 				   rxbd->r.parse_summary);
216 		rx_pkts[rx_frm_cnt] = rx_swbd->buffer_addr;
217 		cleaned_cnt++;
218 		rx_swbd++;
219 		i++;
220 		if (unlikely(i == rx_ring->bd_count)) {
221 			i = 0;
222 			rx_swbd = &rx_ring->q_swbd[i];
223 		}
224 
225 		rx_ring->next_to_clean = i;
226 		rx_frm_cnt++;
227 	}
228 
229 	return rx_frm_cnt;
230 }
231 
232 uint16_t
233 enetc_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts,
234 		uint16_t nb_pkts)
235 {
236 	struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
237 
238 	return enetc_clean_rx_ring(rx_ring, rx_pkts, nb_pkts);
239 }
240