xref: /dpdk/drivers/net/enetc/enetc_rxtx.c (revision 200bc52e5aa0d72e70464c9cd22b55cf536ed13c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2019 NXP
3  */
4 
5 #include <stdbool.h>
6 #include <stdint.h>
7 #include <unistd.h>
8 
9 #include "rte_ethdev.h"
10 #include "rte_malloc.h"
11 #include "rte_memzone.h"
12 
13 #include "base/enetc_hw.h"
14 #include "enetc.h"
15 #include "enetc_logs.h"
16 
17 #define ENETC_RXBD_BUNDLE 8 /* Number of BDs to update at once */
18 
19 static int
20 enetc_clean_tx_ring(struct enetc_bdr *tx_ring)
21 {
22 	int tx_frm_cnt = 0;
23 	struct enetc_swbd *tx_swbd;
24 	int i;
25 
26 	i = tx_ring->next_to_clean;
27 	tx_swbd = &tx_ring->q_swbd[i];
28 	while ((int)(enetc_rd_reg(tx_ring->tcisr) &
29 	       ENETC_TBCISR_IDX_MASK) != i) {
30 		rte_pktmbuf_free(tx_swbd->buffer_addr);
31 		tx_swbd->buffer_addr = NULL;
32 		tx_swbd++;
33 		i++;
34 		if (unlikely(i == tx_ring->bd_count)) {
35 			i = 0;
36 			tx_swbd = &tx_ring->q_swbd[0];
37 		}
38 
39 		tx_frm_cnt++;
40 	}
41 
42 	tx_ring->next_to_clean = i;
43 	return tx_frm_cnt++;
44 }
45 
46 uint16_t
47 enetc_xmit_pkts(void *tx_queue,
48 		struct rte_mbuf **tx_pkts,
49 		uint16_t nb_pkts)
50 {
51 	struct enetc_swbd *tx_swbd;
52 	int i, start, bds_to_use;
53 	struct enetc_tx_bd *txbd;
54 	struct enetc_bdr *tx_ring = (struct enetc_bdr *)tx_queue;
55 
56 	i = tx_ring->next_to_use;
57 
58 	bds_to_use = enetc_bd_unused(tx_ring);
59 	if (bds_to_use < nb_pkts)
60 		nb_pkts = bds_to_use;
61 
62 	start = 0;
63 	while (nb_pkts--) {
64 		enetc_clean_tx_ring(tx_ring);
65 		tx_ring->q_swbd[i].buffer_addr = tx_pkts[start];
66 		txbd = ENETC_TXBD(*tx_ring, i);
67 		tx_swbd = &tx_ring->q_swbd[i];
68 		txbd->frm_len = tx_pkts[start]->pkt_len;
69 		txbd->buf_len = txbd->frm_len;
70 		txbd->flags = rte_cpu_to_le_16(ENETC_TXBD_FLAGS_F);
71 		txbd->addr = (uint64_t)(uintptr_t)
72 		rte_cpu_to_le_64((size_t)tx_swbd->buffer_addr->buf_iova +
73 				 tx_swbd->buffer_addr->data_off);
74 		i++;
75 		start++;
76 		if (unlikely(i == tx_ring->bd_count))
77 			i = 0;
78 	}
79 
80 	tx_ring->next_to_use = i;
81 	enetc_wr_reg(tx_ring->tcir, i);
82 	return start;
83 }
84 
85 int
86 enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
87 {
88 	struct enetc_swbd *rx_swbd;
89 	union enetc_rx_bd *rxbd;
90 	int i, j;
91 
92 	i = rx_ring->next_to_use;
93 	rx_swbd = &rx_ring->q_swbd[i];
94 	rxbd = ENETC_RXBD(*rx_ring, i);
95 	for (j = 0; j < buff_cnt; j++) {
96 		rx_swbd->buffer_addr = (void *)(uintptr_t)
97 			rte_cpu_to_le_64((uint64_t)(uintptr_t)
98 					rte_pktmbuf_alloc(rx_ring->mb_pool));
99 		rxbd->w.addr = (uint64_t)(uintptr_t)
100 			       rx_swbd->buffer_addr->buf_iova +
101 			       rx_swbd->buffer_addr->data_off;
102 		/* clear 'R" as well */
103 		rxbd->r.lstatus = 0;
104 		rx_swbd++;
105 		rxbd++;
106 		i++;
107 		if (unlikely(i == rx_ring->bd_count)) {
108 			i = 0;
109 			rxbd = ENETC_RXBD(*rx_ring, 0);
110 			rx_swbd = &rx_ring->q_swbd[i];
111 		}
112 	}
113 
114 	if (likely(j)) {
115 		rx_ring->next_to_alloc = i;
116 		rx_ring->next_to_use = i;
117 		enetc_wr_reg(rx_ring->rcir, i);
118 	}
119 
120 	return j;
121 }
122 
123 static inline void enetc_slow_parsing(struct rte_mbuf *m,
124 				     uint64_t parse_results)
125 {
126 	m->ol_flags &= ~(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
127 
128 	switch (parse_results) {
129 	case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4:
130 		m->packet_type = RTE_PTYPE_L2_ETHER |
131 				 RTE_PTYPE_L3_IPV4;
132 		m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
133 		return;
134 	case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6:
135 		m->packet_type = RTE_PTYPE_L2_ETHER |
136 				 RTE_PTYPE_L3_IPV6;
137 		m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
138 		return;
139 	case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_TCP:
140 		m->packet_type = RTE_PTYPE_L2_ETHER |
141 				 RTE_PTYPE_L3_IPV4 |
142 				 RTE_PTYPE_L4_TCP;
143 		m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
144 			       PKT_RX_L4_CKSUM_BAD;
145 		return;
146 	case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_TCP:
147 		m->packet_type = RTE_PTYPE_L2_ETHER |
148 				 RTE_PTYPE_L3_IPV6 |
149 				 RTE_PTYPE_L4_TCP;
150 		m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
151 			       PKT_RX_L4_CKSUM_BAD;
152 		return;
153 	case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_UDP:
154 		m->packet_type = RTE_PTYPE_L2_ETHER |
155 				 RTE_PTYPE_L3_IPV4 |
156 				 RTE_PTYPE_L4_UDP;
157 		m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
158 			       PKT_RX_L4_CKSUM_BAD;
159 		return;
160 	case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_UDP:
161 		m->packet_type = RTE_PTYPE_L2_ETHER |
162 				 RTE_PTYPE_L3_IPV6 |
163 				 RTE_PTYPE_L4_UDP;
164 		m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
165 			       PKT_RX_L4_CKSUM_BAD;
166 		return;
167 	case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_SCTP:
168 		m->packet_type = RTE_PTYPE_L2_ETHER |
169 				 RTE_PTYPE_L3_IPV4 |
170 				 RTE_PTYPE_L4_SCTP;
171 		m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
172 			       PKT_RX_L4_CKSUM_BAD;
173 		return;
174 	case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_SCTP:
175 		m->packet_type = RTE_PTYPE_L2_ETHER |
176 				 RTE_PTYPE_L3_IPV6 |
177 				 RTE_PTYPE_L4_SCTP;
178 		m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
179 			       PKT_RX_L4_CKSUM_BAD;
180 		return;
181 	case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_ICMP:
182 		m->packet_type = RTE_PTYPE_L2_ETHER |
183 				 RTE_PTYPE_L3_IPV4 |
184 				 RTE_PTYPE_L4_ICMP;
185 		m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
186 			       PKT_RX_L4_CKSUM_BAD;
187 		return;
188 	case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_ICMP:
189 		m->packet_type = RTE_PTYPE_L2_ETHER |
190 				 RTE_PTYPE_L3_IPV6 |
191 				 RTE_PTYPE_L4_ICMP;
192 		m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
193 			       PKT_RX_L4_CKSUM_BAD;
194 		return;
195 	/* More switch cases can be added */
196 	default:
197 		m->packet_type = RTE_PTYPE_UNKNOWN;
198 		m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN |
199 			       PKT_RX_L4_CKSUM_UNKNOWN;
200 	}
201 }
202 
203 
204 static inline void __attribute__((hot))
205 enetc_dev_rx_parse(struct rte_mbuf *m, uint16_t parse_results)
206 {
207 	ENETC_PMD_DP_DEBUG("parse summary = 0x%x   ", parse_results);
208 	m->ol_flags |= PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD;
209 
210 	switch (parse_results) {
211 	case ENETC_PKT_TYPE_ETHER:
212 		m->packet_type = RTE_PTYPE_L2_ETHER;
213 		return;
214 	case ENETC_PKT_TYPE_IPV4:
215 		m->packet_type = RTE_PTYPE_L2_ETHER |
216 				 RTE_PTYPE_L3_IPV4;
217 		return;
218 	case ENETC_PKT_TYPE_IPV6:
219 		m->packet_type = RTE_PTYPE_L2_ETHER |
220 				 RTE_PTYPE_L3_IPV6;
221 		return;
222 	case ENETC_PKT_TYPE_IPV4_TCP:
223 		m->packet_type = RTE_PTYPE_L2_ETHER |
224 				 RTE_PTYPE_L3_IPV4 |
225 				 RTE_PTYPE_L4_TCP;
226 		return;
227 	case ENETC_PKT_TYPE_IPV6_TCP:
228 		m->packet_type = RTE_PTYPE_L2_ETHER |
229 				 RTE_PTYPE_L3_IPV6 |
230 				 RTE_PTYPE_L4_TCP;
231 		return;
232 	case ENETC_PKT_TYPE_IPV4_UDP:
233 		m->packet_type = RTE_PTYPE_L2_ETHER |
234 				 RTE_PTYPE_L3_IPV4 |
235 				 RTE_PTYPE_L4_UDP;
236 		return;
237 	case ENETC_PKT_TYPE_IPV6_UDP:
238 		m->packet_type = RTE_PTYPE_L2_ETHER |
239 				 RTE_PTYPE_L3_IPV6 |
240 				 RTE_PTYPE_L4_UDP;
241 		return;
242 	case ENETC_PKT_TYPE_IPV4_SCTP:
243 		m->packet_type = RTE_PTYPE_L2_ETHER |
244 				 RTE_PTYPE_L3_IPV4 |
245 				 RTE_PTYPE_L4_SCTP;
246 		return;
247 	case ENETC_PKT_TYPE_IPV6_SCTP:
248 		m->packet_type = RTE_PTYPE_L2_ETHER |
249 				 RTE_PTYPE_L3_IPV6 |
250 				 RTE_PTYPE_L4_SCTP;
251 		return;
252 	case ENETC_PKT_TYPE_IPV4_ICMP:
253 		m->packet_type = RTE_PTYPE_L2_ETHER |
254 				 RTE_PTYPE_L3_IPV4 |
255 				 RTE_PTYPE_L4_ICMP;
256 		return;
257 	case ENETC_PKT_TYPE_IPV6_ICMP:
258 		m->packet_type = RTE_PTYPE_L2_ETHER |
259 				 RTE_PTYPE_L3_IPV6 |
260 				 RTE_PTYPE_L4_ICMP;
261 		return;
262 	/* More switch cases can be added */
263 	default:
264 		enetc_slow_parsing(m, parse_results);
265 	}
266 
267 }
268 
269 static int
270 enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
271 		    struct rte_mbuf **rx_pkts,
272 		    int work_limit)
273 {
274 	int rx_frm_cnt = 0;
275 	int cleaned_cnt, i;
276 	struct enetc_swbd *rx_swbd;
277 
278 	cleaned_cnt = enetc_bd_unused(rx_ring);
279 	/* next descriptor to process */
280 	i = rx_ring->next_to_clean;
281 	rx_swbd = &rx_ring->q_swbd[i];
282 	while (likely(rx_frm_cnt < work_limit)) {
283 		union enetc_rx_bd *rxbd;
284 		uint32_t bd_status;
285 
286 		if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
287 			int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
288 
289 			cleaned_cnt -= count;
290 		}
291 
292 		rxbd = ENETC_RXBD(*rx_ring, i);
293 		bd_status = rte_le_to_cpu_32(rxbd->r.lstatus);
294 		if (!bd_status)
295 			break;
296 
297 		rx_swbd->buffer_addr->pkt_len = rxbd->r.buf_len -
298 						rx_ring->crc_len;
299 		rx_swbd->buffer_addr->data_len = rxbd->r.buf_len -
300 						 rx_ring->crc_len;
301 		rx_swbd->buffer_addr->hash.rss = rxbd->r.rss_hash;
302 		rx_swbd->buffer_addr->ol_flags = 0;
303 		enetc_dev_rx_parse(rx_swbd->buffer_addr,
304 				   rxbd->r.parse_summary);
305 		rx_pkts[rx_frm_cnt] = rx_swbd->buffer_addr;
306 		cleaned_cnt++;
307 		rx_swbd++;
308 		i++;
309 		if (unlikely(i == rx_ring->bd_count)) {
310 			i = 0;
311 			rx_swbd = &rx_ring->q_swbd[i];
312 		}
313 
314 		rx_ring->next_to_clean = i;
315 		rx_frm_cnt++;
316 	}
317 
318 	return rx_frm_cnt;
319 }
320 
321 uint16_t
322 enetc_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts,
323 		uint16_t nb_pkts)
324 {
325 	struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
326 
327 	return enetc_clean_rx_ring(rx_ring, rx_pkts, nb_pkts);
328 }
329