1469c6111SGagandeep Singh /* SPDX-License-Identifier: BSD-3-Clause
26efb46f0SAlex Marginean * Copyright 2018-2020 NXP
3469c6111SGagandeep Singh */
4469c6111SGagandeep Singh
5469c6111SGagandeep Singh #include <stdbool.h>
6469c6111SGagandeep Singh #include <stdint.h>
7469c6111SGagandeep Singh #include <unistd.h>
8469c6111SGagandeep Singh
9469c6111SGagandeep Singh #include "rte_ethdev.h"
10469c6111SGagandeep Singh #include "rte_malloc.h"
11469c6111SGagandeep Singh #include "rte_memzone.h"
12469c6111SGagandeep Singh
13469c6111SGagandeep Singh #include "base/enetc_hw.h"
14469c6111SGagandeep Singh #include "enetc.h"
15469c6111SGagandeep Singh #include "enetc_logs.h"
16469c6111SGagandeep Singh
17515e4eabSAlex Marginean #define ENETC_CACHE_LINE_RXBDS (RTE_CACHE_LINE_SIZE / \
18515e4eabSAlex Marginean sizeof(union enetc_rx_bd))
1917d13fe6SAlex Marginean #define ENETC_RXBD_BUNDLE 16 /* Number of buffers to allocate at once */
2017d13fe6SAlex Marginean
21469c6111SGagandeep Singh static int
enetc_clean_tx_ring(struct enetc_bdr * tx_ring)22469c6111SGagandeep Singh enetc_clean_tx_ring(struct enetc_bdr *tx_ring)
23469c6111SGagandeep Singh {
24469c6111SGagandeep Singh int tx_frm_cnt = 0;
254df8bf01SAlex Marginean struct enetc_swbd *tx_swbd, *tx_swbd_base;
264df8bf01SAlex Marginean int i, hwci, bd_count;
274df8bf01SAlex Marginean struct rte_mbuf *m[ENETC_RXBD_BUNDLE];
28469c6111SGagandeep Singh
298cb6b561SAlex Marginean /* we don't need barriers here, we just want a relatively current value
308cb6b561SAlex Marginean * from HW.
318cb6b561SAlex Marginean */
328cb6b561SAlex Marginean hwci = (int)(rte_read32_relaxed(tx_ring->tcisr) &
338cb6b561SAlex Marginean ENETC_TBCISR_IDX_MASK);
348cb6b561SAlex Marginean
354df8bf01SAlex Marginean tx_swbd_base = tx_ring->q_swbd;
364df8bf01SAlex Marginean bd_count = tx_ring->bd_count;
37469c6111SGagandeep Singh i = tx_ring->next_to_clean;
384df8bf01SAlex Marginean tx_swbd = &tx_swbd_base[i];
396efb46f0SAlex Marginean
406efb46f0SAlex Marginean /* we're only reading the CI index once here, which means HW may update
416efb46f0SAlex Marginean * it while we're doing clean-up. We could read the register in a loop
426efb46f0SAlex Marginean * but for now I assume it's OK to leave a few Tx frames for next call.
436efb46f0SAlex Marginean * The issue with reading the register in a loop is that we're stalling
446efb46f0SAlex Marginean * here trying to catch up with HW which keeps sending traffic as long
456efb46f0SAlex Marginean * as it has traffic to send, so in effect we could be waiting here for
466efb46f0SAlex Marginean * the Tx ring to be drained by HW, instead of us doing Rx in that
476efb46f0SAlex Marginean * meantime.
486efb46f0SAlex Marginean */
496efb46f0SAlex Marginean while (i != hwci) {
504df8bf01SAlex Marginean /* It seems calling rte_pktmbuf_free is wasting a lot of cycles,
514df8bf01SAlex Marginean * make a list and call _free when it's done.
524df8bf01SAlex Marginean */
534df8bf01SAlex Marginean if (tx_frm_cnt == ENETC_RXBD_BUNDLE) {
544df8bf01SAlex Marginean rte_pktmbuf_free_bulk(m, tx_frm_cnt);
554df8bf01SAlex Marginean tx_frm_cnt = 0;
564df8bf01SAlex Marginean }
574df8bf01SAlex Marginean
584df8bf01SAlex Marginean m[tx_frm_cnt] = tx_swbd->buffer_addr;
59469c6111SGagandeep Singh tx_swbd->buffer_addr = NULL;
604df8bf01SAlex Marginean
61469c6111SGagandeep Singh i++;
624df8bf01SAlex Marginean tx_swbd++;
634df8bf01SAlex Marginean if (unlikely(i == bd_count)) {
64469c6111SGagandeep Singh i = 0;
654df8bf01SAlex Marginean tx_swbd = tx_swbd_base;
66469c6111SGagandeep Singh }
67469c6111SGagandeep Singh
68469c6111SGagandeep Singh tx_frm_cnt++;
69469c6111SGagandeep Singh }
70469c6111SGagandeep Singh
714df8bf01SAlex Marginean if (tx_frm_cnt)
724df8bf01SAlex Marginean rte_pktmbuf_free_bulk(m, tx_frm_cnt);
734df8bf01SAlex Marginean
74469c6111SGagandeep Singh tx_ring->next_to_clean = i;
754df8bf01SAlex Marginean
764df8bf01SAlex Marginean return 0;
77469c6111SGagandeep Singh }
78469c6111SGagandeep Singh
79469c6111SGagandeep Singh uint16_t
enetc_xmit_pkts(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)80469c6111SGagandeep Singh enetc_xmit_pkts(void *tx_queue,
81469c6111SGagandeep Singh struct rte_mbuf **tx_pkts,
82469c6111SGagandeep Singh uint16_t nb_pkts)
83469c6111SGagandeep Singh {
84469c6111SGagandeep Singh struct enetc_swbd *tx_swbd;
85e091c691SGagandeep Singh int i, start, bds_to_use;
86469c6111SGagandeep Singh struct enetc_tx_bd *txbd;
87469c6111SGagandeep Singh struct enetc_bdr *tx_ring = (struct enetc_bdr *)tx_queue;
88469c6111SGagandeep Singh
89469c6111SGagandeep Singh i = tx_ring->next_to_use;
90e091c691SGagandeep Singh
91e091c691SGagandeep Singh bds_to_use = enetc_bd_unused(tx_ring);
92e091c691SGagandeep Singh if (bds_to_use < nb_pkts)
93e091c691SGagandeep Singh nb_pkts = bds_to_use;
94e091c691SGagandeep Singh
95469c6111SGagandeep Singh start = 0;
96469c6111SGagandeep Singh while (nb_pkts--) {
97469c6111SGagandeep Singh tx_ring->q_swbd[i].buffer_addr = tx_pkts[start];
98469c6111SGagandeep Singh txbd = ENETC_TXBD(*tx_ring, i);
99469c6111SGagandeep Singh tx_swbd = &tx_ring->q_swbd[i];
100469c6111SGagandeep Singh txbd->frm_len = tx_pkts[start]->pkt_len;
101469c6111SGagandeep Singh txbd->buf_len = txbd->frm_len;
102469c6111SGagandeep Singh txbd->flags = rte_cpu_to_le_16(ENETC_TXBD_FLAGS_F);
103469c6111SGagandeep Singh txbd->addr = (uint64_t)(uintptr_t)
104fa45fda8SGagandeep Singh rte_cpu_to_le_64((size_t)tx_swbd->buffer_addr->buf_iova +
105469c6111SGagandeep Singh tx_swbd->buffer_addr->data_off);
106469c6111SGagandeep Singh i++;
107469c6111SGagandeep Singh start++;
108469c6111SGagandeep Singh if (unlikely(i == tx_ring->bd_count))
109469c6111SGagandeep Singh i = 0;
110469c6111SGagandeep Singh }
111469c6111SGagandeep Singh
1129a6d2faeSAlex Marginean /* we're only cleaning up the Tx ring here, on the assumption that
1139a6d2faeSAlex Marginean * software is slower than hardware and hardware completed sending
1149a6d2faeSAlex Marginean * older frames out by now.
1159a6d2faeSAlex Marginean * We're also cleaning up the ring before kicking off Tx for the new
1169a6d2faeSAlex Marginean * batch to minimize chances of contention on the Tx ring
1179a6d2faeSAlex Marginean */
1189a6d2faeSAlex Marginean enetc_clean_tx_ring(tx_ring);
1199a6d2faeSAlex Marginean
120469c6111SGagandeep Singh tx_ring->next_to_use = i;
121469c6111SGagandeep Singh enetc_wr_reg(tx_ring->tcir, i);
122469c6111SGagandeep Singh return start;
123469c6111SGagandeep Singh }
124469c6111SGagandeep Singh
125469c6111SGagandeep Singh int
enetc_refill_rx_ring(struct enetc_bdr * rx_ring,const int buff_cnt)126469c6111SGagandeep Singh enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
127469c6111SGagandeep Singh {
128469c6111SGagandeep Singh struct enetc_swbd *rx_swbd;
129469c6111SGagandeep Singh union enetc_rx_bd *rxbd;
13017d13fe6SAlex Marginean int i, j, k = ENETC_RXBD_BUNDLE;
13117d13fe6SAlex Marginean struct rte_mbuf *m[ENETC_RXBD_BUNDLE];
13217d13fe6SAlex Marginean struct rte_mempool *mb_pool;
133469c6111SGagandeep Singh
134469c6111SGagandeep Singh i = rx_ring->next_to_use;
13517d13fe6SAlex Marginean mb_pool = rx_ring->mb_pool;
136469c6111SGagandeep Singh rx_swbd = &rx_ring->q_swbd[i];
137469c6111SGagandeep Singh rxbd = ENETC_RXBD(*rx_ring, i);
138469c6111SGagandeep Singh for (j = 0; j < buff_cnt; j++) {
13917d13fe6SAlex Marginean /* bulk alloc for the next up to 8 BDs */
14017d13fe6SAlex Marginean if (k == ENETC_RXBD_BUNDLE) {
14117d13fe6SAlex Marginean k = 0;
14217d13fe6SAlex Marginean int m_cnt = RTE_MIN(buff_cnt - j, ENETC_RXBD_BUNDLE);
14317d13fe6SAlex Marginean
14417d13fe6SAlex Marginean if (rte_pktmbuf_alloc_bulk(mb_pool, m, m_cnt))
14517d13fe6SAlex Marginean return -1;
14617d13fe6SAlex Marginean }
14717d13fe6SAlex Marginean
14817d13fe6SAlex Marginean rx_swbd->buffer_addr = m[k];
149469c6111SGagandeep Singh rxbd->w.addr = (uint64_t)(uintptr_t)
150fa45fda8SGagandeep Singh rx_swbd->buffer_addr->buf_iova +
151469c6111SGagandeep Singh rx_swbd->buffer_addr->data_off;
152469c6111SGagandeep Singh /* clear 'R" as well */
153469c6111SGagandeep Singh rxbd->r.lstatus = 0;
154469c6111SGagandeep Singh rx_swbd++;
155469c6111SGagandeep Singh rxbd++;
156469c6111SGagandeep Singh i++;
15717d13fe6SAlex Marginean k++;
158469c6111SGagandeep Singh if (unlikely(i == rx_ring->bd_count)) {
159469c6111SGagandeep Singh i = 0;
160469c6111SGagandeep Singh rxbd = ENETC_RXBD(*rx_ring, 0);
161469c6111SGagandeep Singh rx_swbd = &rx_ring->q_swbd[i];
162469c6111SGagandeep Singh }
163469c6111SGagandeep Singh }
164469c6111SGagandeep Singh
165469c6111SGagandeep Singh if (likely(j)) {
166469c6111SGagandeep Singh rx_ring->next_to_alloc = i;
167469c6111SGagandeep Singh rx_ring->next_to_use = i;
168469c6111SGagandeep Singh enetc_wr_reg(rx_ring->rcir, i);
169469c6111SGagandeep Singh }
170469c6111SGagandeep Singh
171469c6111SGagandeep Singh return j;
172469c6111SGagandeep Singh }
173469c6111SGagandeep Singh
enetc_slow_parsing(struct rte_mbuf * m,uint64_t parse_results)174f5a2e3baSGagandeep Singh static inline void enetc_slow_parsing(struct rte_mbuf *m,
175f5a2e3baSGagandeep Singh uint64_t parse_results)
176f5a2e3baSGagandeep Singh {
177*daa02b5cSOlivier Matz m->ol_flags &= ~(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
178f5a2e3baSGagandeep Singh
179f5a2e3baSGagandeep Singh switch (parse_results) {
180f5a2e3baSGagandeep Singh case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4:
181f5a2e3baSGagandeep Singh m->packet_type = RTE_PTYPE_L2_ETHER |
182f5a2e3baSGagandeep Singh RTE_PTYPE_L3_IPV4;
183*daa02b5cSOlivier Matz m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
184f5a2e3baSGagandeep Singh return;
185f5a2e3baSGagandeep Singh case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6:
186f5a2e3baSGagandeep Singh m->packet_type = RTE_PTYPE_L2_ETHER |
187f5a2e3baSGagandeep Singh RTE_PTYPE_L3_IPV6;
188*daa02b5cSOlivier Matz m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
189f5a2e3baSGagandeep Singh return;
190f5a2e3baSGagandeep Singh case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_TCP:
191f5a2e3baSGagandeep Singh m->packet_type = RTE_PTYPE_L2_ETHER |
192f5a2e3baSGagandeep Singh RTE_PTYPE_L3_IPV4 |
193f5a2e3baSGagandeep Singh RTE_PTYPE_L4_TCP;
194*daa02b5cSOlivier Matz m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD |
195*daa02b5cSOlivier Matz RTE_MBUF_F_RX_L4_CKSUM_BAD;
196f5a2e3baSGagandeep Singh return;
197f5a2e3baSGagandeep Singh case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_TCP:
198f5a2e3baSGagandeep Singh m->packet_type = RTE_PTYPE_L2_ETHER |
199f5a2e3baSGagandeep Singh RTE_PTYPE_L3_IPV6 |
200f5a2e3baSGagandeep Singh RTE_PTYPE_L4_TCP;
201*daa02b5cSOlivier Matz m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD |
202*daa02b5cSOlivier Matz RTE_MBUF_F_RX_L4_CKSUM_BAD;
203f5a2e3baSGagandeep Singh return;
204f5a2e3baSGagandeep Singh case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_UDP:
205f5a2e3baSGagandeep Singh m->packet_type = RTE_PTYPE_L2_ETHER |
206f5a2e3baSGagandeep Singh RTE_PTYPE_L3_IPV4 |
207f5a2e3baSGagandeep Singh RTE_PTYPE_L4_UDP;
208*daa02b5cSOlivier Matz m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD |
209*daa02b5cSOlivier Matz RTE_MBUF_F_RX_L4_CKSUM_BAD;
210f5a2e3baSGagandeep Singh return;
211f5a2e3baSGagandeep Singh case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_UDP:
212f5a2e3baSGagandeep Singh m->packet_type = RTE_PTYPE_L2_ETHER |
213f5a2e3baSGagandeep Singh RTE_PTYPE_L3_IPV6 |
214f5a2e3baSGagandeep Singh RTE_PTYPE_L4_UDP;
215*daa02b5cSOlivier Matz m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD |
216*daa02b5cSOlivier Matz RTE_MBUF_F_RX_L4_CKSUM_BAD;
217f5a2e3baSGagandeep Singh return;
218f5a2e3baSGagandeep Singh case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_SCTP:
219f5a2e3baSGagandeep Singh m->packet_type = RTE_PTYPE_L2_ETHER |
220f5a2e3baSGagandeep Singh RTE_PTYPE_L3_IPV4 |
221f5a2e3baSGagandeep Singh RTE_PTYPE_L4_SCTP;
222*daa02b5cSOlivier Matz m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD |
223*daa02b5cSOlivier Matz RTE_MBUF_F_RX_L4_CKSUM_BAD;
224f5a2e3baSGagandeep Singh return;
225f5a2e3baSGagandeep Singh case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_SCTP:
226f5a2e3baSGagandeep Singh m->packet_type = RTE_PTYPE_L2_ETHER |
227f5a2e3baSGagandeep Singh RTE_PTYPE_L3_IPV6 |
228f5a2e3baSGagandeep Singh RTE_PTYPE_L4_SCTP;
229*daa02b5cSOlivier Matz m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD |
230*daa02b5cSOlivier Matz RTE_MBUF_F_RX_L4_CKSUM_BAD;
231f5a2e3baSGagandeep Singh return;
232f5a2e3baSGagandeep Singh case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_ICMP:
233f5a2e3baSGagandeep Singh m->packet_type = RTE_PTYPE_L2_ETHER |
234f5a2e3baSGagandeep Singh RTE_PTYPE_L3_IPV4 |
235f5a2e3baSGagandeep Singh RTE_PTYPE_L4_ICMP;
236*daa02b5cSOlivier Matz m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD |
237*daa02b5cSOlivier Matz RTE_MBUF_F_RX_L4_CKSUM_BAD;
238f5a2e3baSGagandeep Singh return;
239f5a2e3baSGagandeep Singh case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_ICMP:
240f5a2e3baSGagandeep Singh m->packet_type = RTE_PTYPE_L2_ETHER |
241f5a2e3baSGagandeep Singh RTE_PTYPE_L3_IPV6 |
242f5a2e3baSGagandeep Singh RTE_PTYPE_L4_ICMP;
243*daa02b5cSOlivier Matz m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD |
244*daa02b5cSOlivier Matz RTE_MBUF_F_RX_L4_CKSUM_BAD;
245f5a2e3baSGagandeep Singh return;
246f5a2e3baSGagandeep Singh /* More switch cases can be added */
247f5a2e3baSGagandeep Singh default:
248f5a2e3baSGagandeep Singh m->packet_type = RTE_PTYPE_UNKNOWN;
249*daa02b5cSOlivier Matz m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN |
250*daa02b5cSOlivier Matz RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
251f5a2e3baSGagandeep Singh }
252f5a2e3baSGagandeep Singh }
253f5a2e3baSGagandeep Singh
254469c6111SGagandeep Singh
255e3866e73SThomas Monjalon static inline void __rte_hot
enetc_dev_rx_parse(struct rte_mbuf * m,uint16_t parse_results)256469c6111SGagandeep Singh enetc_dev_rx_parse(struct rte_mbuf *m, uint16_t parse_results)
257469c6111SGagandeep Singh {
258469c6111SGagandeep Singh ENETC_PMD_DP_DEBUG("parse summary = 0x%x ", parse_results);
259*daa02b5cSOlivier Matz m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD;
260469c6111SGagandeep Singh
261469c6111SGagandeep Singh switch (parse_results) {
262469c6111SGagandeep Singh case ENETC_PKT_TYPE_ETHER:
263469c6111SGagandeep Singh m->packet_type = RTE_PTYPE_L2_ETHER;
264f5a2e3baSGagandeep Singh return;
265469c6111SGagandeep Singh case ENETC_PKT_TYPE_IPV4:
266469c6111SGagandeep Singh m->packet_type = RTE_PTYPE_L2_ETHER |
267469c6111SGagandeep Singh RTE_PTYPE_L3_IPV4;
268f5a2e3baSGagandeep Singh return;
269469c6111SGagandeep Singh case ENETC_PKT_TYPE_IPV6:
270469c6111SGagandeep Singh m->packet_type = RTE_PTYPE_L2_ETHER |
271469c6111SGagandeep Singh RTE_PTYPE_L3_IPV6;
272f5a2e3baSGagandeep Singh return;
273469c6111SGagandeep Singh case ENETC_PKT_TYPE_IPV4_TCP:
274469c6111SGagandeep Singh m->packet_type = RTE_PTYPE_L2_ETHER |
275469c6111SGagandeep Singh RTE_PTYPE_L3_IPV4 |
276469c6111SGagandeep Singh RTE_PTYPE_L4_TCP;
277f5a2e3baSGagandeep Singh return;
278469c6111SGagandeep Singh case ENETC_PKT_TYPE_IPV6_TCP:
279469c6111SGagandeep Singh m->packet_type = RTE_PTYPE_L2_ETHER |
280469c6111SGagandeep Singh RTE_PTYPE_L3_IPV6 |
281469c6111SGagandeep Singh RTE_PTYPE_L4_TCP;
282f5a2e3baSGagandeep Singh return;
283469c6111SGagandeep Singh case ENETC_PKT_TYPE_IPV4_UDP:
284469c6111SGagandeep Singh m->packet_type = RTE_PTYPE_L2_ETHER |
285469c6111SGagandeep Singh RTE_PTYPE_L3_IPV4 |
286469c6111SGagandeep Singh RTE_PTYPE_L4_UDP;
287f5a2e3baSGagandeep Singh return;
288469c6111SGagandeep Singh case ENETC_PKT_TYPE_IPV6_UDP:
289469c6111SGagandeep Singh m->packet_type = RTE_PTYPE_L2_ETHER |
290469c6111SGagandeep Singh RTE_PTYPE_L3_IPV6 |
291469c6111SGagandeep Singh RTE_PTYPE_L4_UDP;
292f5a2e3baSGagandeep Singh return;
293469c6111SGagandeep Singh case ENETC_PKT_TYPE_IPV4_SCTP:
294469c6111SGagandeep Singh m->packet_type = RTE_PTYPE_L2_ETHER |
295469c6111SGagandeep Singh RTE_PTYPE_L3_IPV4 |
296469c6111SGagandeep Singh RTE_PTYPE_L4_SCTP;
297f5a2e3baSGagandeep Singh return;
298469c6111SGagandeep Singh case ENETC_PKT_TYPE_IPV6_SCTP:
299469c6111SGagandeep Singh m->packet_type = RTE_PTYPE_L2_ETHER |
300469c6111SGagandeep Singh RTE_PTYPE_L3_IPV6 |
301469c6111SGagandeep Singh RTE_PTYPE_L4_SCTP;
302f5a2e3baSGagandeep Singh return;
303469c6111SGagandeep Singh case ENETC_PKT_TYPE_IPV4_ICMP:
304469c6111SGagandeep Singh m->packet_type = RTE_PTYPE_L2_ETHER |
305469c6111SGagandeep Singh RTE_PTYPE_L3_IPV4 |
306469c6111SGagandeep Singh RTE_PTYPE_L4_ICMP;
307f5a2e3baSGagandeep Singh return;
308469c6111SGagandeep Singh case ENETC_PKT_TYPE_IPV6_ICMP:
309469c6111SGagandeep Singh m->packet_type = RTE_PTYPE_L2_ETHER |
310469c6111SGagandeep Singh RTE_PTYPE_L3_IPV6 |
311469c6111SGagandeep Singh RTE_PTYPE_L4_ICMP;
312f5a2e3baSGagandeep Singh return;
313469c6111SGagandeep Singh /* More switch cases can be added */
314469c6111SGagandeep Singh default:
315f5a2e3baSGagandeep Singh enetc_slow_parsing(m, parse_results);
316469c6111SGagandeep Singh }
317f5a2e3baSGagandeep Singh
318469c6111SGagandeep Singh }
319469c6111SGagandeep Singh
320469c6111SGagandeep Singh static int
enetc_clean_rx_ring(struct enetc_bdr * rx_ring,struct rte_mbuf ** rx_pkts,int work_limit)321469c6111SGagandeep Singh enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
322469c6111SGagandeep Singh struct rte_mbuf **rx_pkts,
323469c6111SGagandeep Singh int work_limit)
324469c6111SGagandeep Singh {
325469c6111SGagandeep Singh int rx_frm_cnt = 0;
326515e4eabSAlex Marginean int cleaned_cnt, i, bd_count;
327469c6111SGagandeep Singh struct enetc_swbd *rx_swbd;
328515e4eabSAlex Marginean union enetc_rx_bd *rxbd;
329469c6111SGagandeep Singh
330469c6111SGagandeep Singh /* next descriptor to process */
331469c6111SGagandeep Singh i = rx_ring->next_to_clean;
332515e4eabSAlex Marginean /* next descriptor to process */
333515e4eabSAlex Marginean rxbd = ENETC_RXBD(*rx_ring, i);
334515e4eabSAlex Marginean rte_prefetch0(rxbd);
335515e4eabSAlex Marginean bd_count = rx_ring->bd_count;
336515e4eabSAlex Marginean /* LS1028A does not have platform cache so any software access following
337515e4eabSAlex Marginean * a hardware write will go directly to DDR. Latency of such a read is
338515e4eabSAlex Marginean * in excess of 100 core cycles, so try to prefetch more in advance to
339515e4eabSAlex Marginean * mitigate this.
340515e4eabSAlex Marginean * How much is worth prefetching really depends on traffic conditions.
341515e4eabSAlex Marginean * With congested Rx this could go up to 4 cache lines or so. But if
342515e4eabSAlex Marginean * software keeps up with hardware and follows behind Rx PI by a cache
343515e4eabSAlex Marginean * line or less then it's harmful in terms of performance to cache more.
344515e4eabSAlex Marginean * We would only prefetch BDs that have yet to be written by ENETC,
345515e4eabSAlex Marginean * which will have to be evicted again anyway.
346515e4eabSAlex Marginean */
347515e4eabSAlex Marginean rte_prefetch0(ENETC_RXBD(*rx_ring,
348515e4eabSAlex Marginean (i + ENETC_CACHE_LINE_RXBDS) % bd_count));
349515e4eabSAlex Marginean rte_prefetch0(ENETC_RXBD(*rx_ring,
350515e4eabSAlex Marginean (i + ENETC_CACHE_LINE_RXBDS * 2) % bd_count));
351515e4eabSAlex Marginean
352515e4eabSAlex Marginean cleaned_cnt = enetc_bd_unused(rx_ring);
353469c6111SGagandeep Singh rx_swbd = &rx_ring->q_swbd[i];
354469c6111SGagandeep Singh while (likely(rx_frm_cnt < work_limit)) {
355469c6111SGagandeep Singh uint32_t bd_status;
356469c6111SGagandeep Singh
357469c6111SGagandeep Singh bd_status = rte_le_to_cpu_32(rxbd->r.lstatus);
358469c6111SGagandeep Singh if (!bd_status)
359469c6111SGagandeep Singh break;
360469c6111SGagandeep Singh
3611d0c3397SGagandeep Singh rx_swbd->buffer_addr->pkt_len = rxbd->r.buf_len -
3621d0c3397SGagandeep Singh rx_ring->crc_len;
3631d0c3397SGagandeep Singh rx_swbd->buffer_addr->data_len = rxbd->r.buf_len -
3641d0c3397SGagandeep Singh rx_ring->crc_len;
365469c6111SGagandeep Singh rx_swbd->buffer_addr->hash.rss = rxbd->r.rss_hash;
366469c6111SGagandeep Singh rx_swbd->buffer_addr->ol_flags = 0;
367469c6111SGagandeep Singh enetc_dev_rx_parse(rx_swbd->buffer_addr,
368469c6111SGagandeep Singh rxbd->r.parse_summary);
369469c6111SGagandeep Singh rx_pkts[rx_frm_cnt] = rx_swbd->buffer_addr;
370469c6111SGagandeep Singh cleaned_cnt++;
371469c6111SGagandeep Singh rx_swbd++;
372469c6111SGagandeep Singh i++;
373469c6111SGagandeep Singh if (unlikely(i == rx_ring->bd_count)) {
374469c6111SGagandeep Singh i = 0;
375469c6111SGagandeep Singh rx_swbd = &rx_ring->q_swbd[i];
376469c6111SGagandeep Singh }
377515e4eabSAlex Marginean rxbd = ENETC_RXBD(*rx_ring, i);
378515e4eabSAlex Marginean rte_prefetch0(ENETC_RXBD(*rx_ring,
379515e4eabSAlex Marginean (i + ENETC_CACHE_LINE_RXBDS) %
380515e4eabSAlex Marginean bd_count));
381515e4eabSAlex Marginean rte_prefetch0(ENETC_RXBD(*rx_ring,
382515e4eabSAlex Marginean (i + ENETC_CACHE_LINE_RXBDS * 2) %
383515e4eabSAlex Marginean bd_count));
384469c6111SGagandeep Singh
385469c6111SGagandeep Singh rx_frm_cnt++;
386469c6111SGagandeep Singh }
387469c6111SGagandeep Singh
388515e4eabSAlex Marginean rx_ring->next_to_clean = i;
38911a6dc42SAlex Marginean enetc_refill_rx_ring(rx_ring, cleaned_cnt);
39011a6dc42SAlex Marginean
391469c6111SGagandeep Singh return rx_frm_cnt;
392469c6111SGagandeep Singh }
393469c6111SGagandeep Singh
394469c6111SGagandeep Singh uint16_t
enetc_recv_pkts(void * rxq,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)395469c6111SGagandeep Singh enetc_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts,
396469c6111SGagandeep Singh uint16_t nb_pkts)
397469c6111SGagandeep Singh {
398469c6111SGagandeep Singh struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
399469c6111SGagandeep Singh
400469c6111SGagandeep Singh return enetc_clean_rx_ring(rx_ring, rx_pkts, nb_pkts);
401469c6111SGagandeep Singh }
402