xref: /dpdk/drivers/net/enetc/enetc_rxtx.c (revision 11a6dc423bc626e28343b1df7a1570a64ca8ebc8)
1469c6111SGagandeep Singh /* SPDX-License-Identifier: BSD-3-Clause
26efb46f0SAlex Marginean  * Copyright 2018-2020 NXP
3469c6111SGagandeep Singh  */
4469c6111SGagandeep Singh 
5469c6111SGagandeep Singh #include <stdbool.h>
6469c6111SGagandeep Singh #include <stdint.h>
7469c6111SGagandeep Singh #include <unistd.h>
8469c6111SGagandeep Singh 
9469c6111SGagandeep Singh #include "rte_ethdev.h"
10469c6111SGagandeep Singh #include "rte_malloc.h"
11469c6111SGagandeep Singh #include "rte_memzone.h"
12469c6111SGagandeep Singh 
13469c6111SGagandeep Singh #include "base/enetc_hw.h"
14469c6111SGagandeep Singh #include "enetc.h"
15469c6111SGagandeep Singh #include "enetc_logs.h"
16469c6111SGagandeep Singh 
17469c6111SGagandeep Singh static int
18469c6111SGagandeep Singh enetc_clean_tx_ring(struct enetc_bdr *tx_ring)
19469c6111SGagandeep Singh {
20469c6111SGagandeep Singh 	int tx_frm_cnt = 0;
21469c6111SGagandeep Singh 	struct enetc_swbd *tx_swbd;
226efb46f0SAlex Marginean 	int i, hwci;
23469c6111SGagandeep Singh 
248cb6b561SAlex Marginean 	/* we don't need barriers here, we just want a relatively current value
258cb6b561SAlex Marginean 	 * from HW.
268cb6b561SAlex Marginean 	 */
278cb6b561SAlex Marginean 	hwci = (int)(rte_read32_relaxed(tx_ring->tcisr) &
288cb6b561SAlex Marginean 		     ENETC_TBCISR_IDX_MASK);
298cb6b561SAlex Marginean 
30469c6111SGagandeep Singh 	i = tx_ring->next_to_clean;
31469c6111SGagandeep Singh 	tx_swbd = &tx_ring->q_swbd[i];
326efb46f0SAlex Marginean 
336efb46f0SAlex Marginean 	/* we're only reading the CI index once here, which means HW may update
346efb46f0SAlex Marginean 	 * it while we're doing clean-up.  We could read the register in a loop
356efb46f0SAlex Marginean 	 * but for now I assume it's OK to leave a few Tx frames for next call.
366efb46f0SAlex Marginean 	 * The issue with reading the register in a loop is that we're stalling
376efb46f0SAlex Marginean 	 * here trying to catch up with HW which keeps sending traffic as long
386efb46f0SAlex Marginean 	 * as it has traffic to send, so in effect we could be waiting here for
396efb46f0SAlex Marginean 	 * the Tx ring to be drained by HW, instead of us doing Rx in that
406efb46f0SAlex Marginean 	 * meantime.
416efb46f0SAlex Marginean 	 */
426efb46f0SAlex Marginean 	while (i != hwci) {
43469c6111SGagandeep Singh 		rte_pktmbuf_free(tx_swbd->buffer_addr);
44469c6111SGagandeep Singh 		tx_swbd->buffer_addr = NULL;
45469c6111SGagandeep Singh 		tx_swbd++;
46469c6111SGagandeep Singh 		i++;
47469c6111SGagandeep Singh 		if (unlikely(i == tx_ring->bd_count)) {
48469c6111SGagandeep Singh 			i = 0;
49469c6111SGagandeep Singh 			tx_swbd = &tx_ring->q_swbd[0];
50469c6111SGagandeep Singh 		}
51469c6111SGagandeep Singh 
52469c6111SGagandeep Singh 		tx_frm_cnt++;
53469c6111SGagandeep Singh 	}
54469c6111SGagandeep Singh 
55469c6111SGagandeep Singh 	tx_ring->next_to_clean = i;
56469c6111SGagandeep Singh 	return tx_frm_cnt++;
57469c6111SGagandeep Singh }
58469c6111SGagandeep Singh 
59469c6111SGagandeep Singh uint16_t
60469c6111SGagandeep Singh enetc_xmit_pkts(void *tx_queue,
61469c6111SGagandeep Singh 		struct rte_mbuf **tx_pkts,
62469c6111SGagandeep Singh 		uint16_t nb_pkts)
63469c6111SGagandeep Singh {
64469c6111SGagandeep Singh 	struct enetc_swbd *tx_swbd;
65e091c691SGagandeep Singh 	int i, start, bds_to_use;
66469c6111SGagandeep Singh 	struct enetc_tx_bd *txbd;
67469c6111SGagandeep Singh 	struct enetc_bdr *tx_ring = (struct enetc_bdr *)tx_queue;
68469c6111SGagandeep Singh 
69469c6111SGagandeep Singh 	i = tx_ring->next_to_use;
70e091c691SGagandeep Singh 
71e091c691SGagandeep Singh 	bds_to_use = enetc_bd_unused(tx_ring);
72e091c691SGagandeep Singh 	if (bds_to_use < nb_pkts)
73e091c691SGagandeep Singh 		nb_pkts = bds_to_use;
74e091c691SGagandeep Singh 
75469c6111SGagandeep Singh 	start = 0;
76469c6111SGagandeep Singh 	while (nb_pkts--) {
77469c6111SGagandeep Singh 		tx_ring->q_swbd[i].buffer_addr = tx_pkts[start];
78469c6111SGagandeep Singh 		txbd = ENETC_TXBD(*tx_ring, i);
79469c6111SGagandeep Singh 		tx_swbd = &tx_ring->q_swbd[i];
80469c6111SGagandeep Singh 		txbd->frm_len = tx_pkts[start]->pkt_len;
81469c6111SGagandeep Singh 		txbd->buf_len = txbd->frm_len;
82469c6111SGagandeep Singh 		txbd->flags = rte_cpu_to_le_16(ENETC_TXBD_FLAGS_F);
83469c6111SGagandeep Singh 		txbd->addr = (uint64_t)(uintptr_t)
84fa45fda8SGagandeep Singh 		rte_cpu_to_le_64((size_t)tx_swbd->buffer_addr->buf_iova +
85469c6111SGagandeep Singh 				 tx_swbd->buffer_addr->data_off);
86469c6111SGagandeep Singh 		i++;
87469c6111SGagandeep Singh 		start++;
88469c6111SGagandeep Singh 		if (unlikely(i == tx_ring->bd_count))
89469c6111SGagandeep Singh 			i = 0;
90469c6111SGagandeep Singh 	}
91469c6111SGagandeep Singh 
929a6d2faeSAlex Marginean 	/* we're only cleaning up the Tx ring here, on the assumption that
939a6d2faeSAlex Marginean 	 * software is slower than hardware and hardware completed sending
949a6d2faeSAlex Marginean 	 * older frames out by now.
959a6d2faeSAlex Marginean 	 * We're also cleaning up the ring before kicking off Tx for the new
969a6d2faeSAlex Marginean 	 * batch to minimize chances of contention on the Tx ring
979a6d2faeSAlex Marginean 	 */
989a6d2faeSAlex Marginean 	enetc_clean_tx_ring(tx_ring);
999a6d2faeSAlex Marginean 
100469c6111SGagandeep Singh 	tx_ring->next_to_use = i;
101469c6111SGagandeep Singh 	enetc_wr_reg(tx_ring->tcir, i);
102469c6111SGagandeep Singh 	return start;
103469c6111SGagandeep Singh }
104469c6111SGagandeep Singh 
105469c6111SGagandeep Singh int
106469c6111SGagandeep Singh enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
107469c6111SGagandeep Singh {
108469c6111SGagandeep Singh 	struct enetc_swbd *rx_swbd;
109469c6111SGagandeep Singh 	union enetc_rx_bd *rxbd;
110469c6111SGagandeep Singh 	int i, j;
111469c6111SGagandeep Singh 
112469c6111SGagandeep Singh 	i = rx_ring->next_to_use;
113469c6111SGagandeep Singh 	rx_swbd = &rx_ring->q_swbd[i];
114469c6111SGagandeep Singh 	rxbd = ENETC_RXBD(*rx_ring, i);
115469c6111SGagandeep Singh 	for (j = 0; j < buff_cnt; j++) {
116389a450fSGagandeep Singh 		rx_swbd->buffer_addr = (void *)(uintptr_t)
117389a450fSGagandeep Singh 			rte_cpu_to_le_64((uint64_t)(uintptr_t)
118389a450fSGagandeep Singh 					rte_pktmbuf_alloc(rx_ring->mb_pool));
119469c6111SGagandeep Singh 		rxbd->w.addr = (uint64_t)(uintptr_t)
120fa45fda8SGagandeep Singh 			       rx_swbd->buffer_addr->buf_iova +
121469c6111SGagandeep Singh 			       rx_swbd->buffer_addr->data_off;
122469c6111SGagandeep Singh 		/* clear 'R" as well */
123469c6111SGagandeep Singh 		rxbd->r.lstatus = 0;
124469c6111SGagandeep Singh 		rx_swbd++;
125469c6111SGagandeep Singh 		rxbd++;
126469c6111SGagandeep Singh 		i++;
127469c6111SGagandeep Singh 		if (unlikely(i == rx_ring->bd_count)) {
128469c6111SGagandeep Singh 			i = 0;
129469c6111SGagandeep Singh 			rxbd = ENETC_RXBD(*rx_ring, 0);
130469c6111SGagandeep Singh 			rx_swbd = &rx_ring->q_swbd[i];
131469c6111SGagandeep Singh 		}
132469c6111SGagandeep Singh 	}
133469c6111SGagandeep Singh 
134469c6111SGagandeep Singh 	if (likely(j)) {
135469c6111SGagandeep Singh 		rx_ring->next_to_alloc = i;
136469c6111SGagandeep Singh 		rx_ring->next_to_use = i;
137469c6111SGagandeep Singh 		enetc_wr_reg(rx_ring->rcir, i);
138469c6111SGagandeep Singh 	}
139469c6111SGagandeep Singh 
140469c6111SGagandeep Singh 	return j;
141469c6111SGagandeep Singh }
142469c6111SGagandeep Singh 
143f5a2e3baSGagandeep Singh static inline void enetc_slow_parsing(struct rte_mbuf *m,
144f5a2e3baSGagandeep Singh 				     uint64_t parse_results)
145f5a2e3baSGagandeep Singh {
146f5a2e3baSGagandeep Singh 	m->ol_flags &= ~(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
147f5a2e3baSGagandeep Singh 
148f5a2e3baSGagandeep Singh 	switch (parse_results) {
149f5a2e3baSGagandeep Singh 	case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4:
150f5a2e3baSGagandeep Singh 		m->packet_type = RTE_PTYPE_L2_ETHER |
151f5a2e3baSGagandeep Singh 				 RTE_PTYPE_L3_IPV4;
152f5a2e3baSGagandeep Singh 		m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
153f5a2e3baSGagandeep Singh 		return;
154f5a2e3baSGagandeep Singh 	case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6:
155f5a2e3baSGagandeep Singh 		m->packet_type = RTE_PTYPE_L2_ETHER |
156f5a2e3baSGagandeep Singh 				 RTE_PTYPE_L3_IPV6;
157f5a2e3baSGagandeep Singh 		m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
158f5a2e3baSGagandeep Singh 		return;
159f5a2e3baSGagandeep Singh 	case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_TCP:
160f5a2e3baSGagandeep Singh 		m->packet_type = RTE_PTYPE_L2_ETHER |
161f5a2e3baSGagandeep Singh 				 RTE_PTYPE_L3_IPV4 |
162f5a2e3baSGagandeep Singh 				 RTE_PTYPE_L4_TCP;
163f5a2e3baSGagandeep Singh 		m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
164f5a2e3baSGagandeep Singh 			       PKT_RX_L4_CKSUM_BAD;
165f5a2e3baSGagandeep Singh 		return;
166f5a2e3baSGagandeep Singh 	case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_TCP:
167f5a2e3baSGagandeep Singh 		m->packet_type = RTE_PTYPE_L2_ETHER |
168f5a2e3baSGagandeep Singh 				 RTE_PTYPE_L3_IPV6 |
169f5a2e3baSGagandeep Singh 				 RTE_PTYPE_L4_TCP;
170f5a2e3baSGagandeep Singh 		m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
171f5a2e3baSGagandeep Singh 			       PKT_RX_L4_CKSUM_BAD;
172f5a2e3baSGagandeep Singh 		return;
173f5a2e3baSGagandeep Singh 	case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_UDP:
174f5a2e3baSGagandeep Singh 		m->packet_type = RTE_PTYPE_L2_ETHER |
175f5a2e3baSGagandeep Singh 				 RTE_PTYPE_L3_IPV4 |
176f5a2e3baSGagandeep Singh 				 RTE_PTYPE_L4_UDP;
177f5a2e3baSGagandeep Singh 		m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
178f5a2e3baSGagandeep Singh 			       PKT_RX_L4_CKSUM_BAD;
179f5a2e3baSGagandeep Singh 		return;
180f5a2e3baSGagandeep Singh 	case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_UDP:
181f5a2e3baSGagandeep Singh 		m->packet_type = RTE_PTYPE_L2_ETHER |
182f5a2e3baSGagandeep Singh 				 RTE_PTYPE_L3_IPV6 |
183f5a2e3baSGagandeep Singh 				 RTE_PTYPE_L4_UDP;
184f5a2e3baSGagandeep Singh 		m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
185f5a2e3baSGagandeep Singh 			       PKT_RX_L4_CKSUM_BAD;
186f5a2e3baSGagandeep Singh 		return;
187f5a2e3baSGagandeep Singh 	case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_SCTP:
188f5a2e3baSGagandeep Singh 		m->packet_type = RTE_PTYPE_L2_ETHER |
189f5a2e3baSGagandeep Singh 				 RTE_PTYPE_L3_IPV4 |
190f5a2e3baSGagandeep Singh 				 RTE_PTYPE_L4_SCTP;
191f5a2e3baSGagandeep Singh 		m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
192f5a2e3baSGagandeep Singh 			       PKT_RX_L4_CKSUM_BAD;
193f5a2e3baSGagandeep Singh 		return;
194f5a2e3baSGagandeep Singh 	case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_SCTP:
195f5a2e3baSGagandeep Singh 		m->packet_type = RTE_PTYPE_L2_ETHER |
196f5a2e3baSGagandeep Singh 				 RTE_PTYPE_L3_IPV6 |
197f5a2e3baSGagandeep Singh 				 RTE_PTYPE_L4_SCTP;
198f5a2e3baSGagandeep Singh 		m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
199f5a2e3baSGagandeep Singh 			       PKT_RX_L4_CKSUM_BAD;
200f5a2e3baSGagandeep Singh 		return;
201f5a2e3baSGagandeep Singh 	case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_ICMP:
202f5a2e3baSGagandeep Singh 		m->packet_type = RTE_PTYPE_L2_ETHER |
203f5a2e3baSGagandeep Singh 				 RTE_PTYPE_L3_IPV4 |
204f5a2e3baSGagandeep Singh 				 RTE_PTYPE_L4_ICMP;
205f5a2e3baSGagandeep Singh 		m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
206f5a2e3baSGagandeep Singh 			       PKT_RX_L4_CKSUM_BAD;
207f5a2e3baSGagandeep Singh 		return;
208f5a2e3baSGagandeep Singh 	case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_ICMP:
209f5a2e3baSGagandeep Singh 		m->packet_type = RTE_PTYPE_L2_ETHER |
210f5a2e3baSGagandeep Singh 				 RTE_PTYPE_L3_IPV6 |
211f5a2e3baSGagandeep Singh 				 RTE_PTYPE_L4_ICMP;
212f5a2e3baSGagandeep Singh 		m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
213f5a2e3baSGagandeep Singh 			       PKT_RX_L4_CKSUM_BAD;
214f5a2e3baSGagandeep Singh 		return;
215f5a2e3baSGagandeep Singh 	/* More switch cases can be added */
216f5a2e3baSGagandeep Singh 	default:
217f5a2e3baSGagandeep Singh 		m->packet_type = RTE_PTYPE_UNKNOWN;
218f5a2e3baSGagandeep Singh 		m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN |
219f5a2e3baSGagandeep Singh 			       PKT_RX_L4_CKSUM_UNKNOWN;
220f5a2e3baSGagandeep Singh 	}
221f5a2e3baSGagandeep Singh }
222f5a2e3baSGagandeep Singh 
223469c6111SGagandeep Singh 
224469c6111SGagandeep Singh static inline void __attribute__((hot))
225469c6111SGagandeep Singh enetc_dev_rx_parse(struct rte_mbuf *m, uint16_t parse_results)
226469c6111SGagandeep Singh {
227469c6111SGagandeep Singh 	ENETC_PMD_DP_DEBUG("parse summary = 0x%x   ", parse_results);
228f5a2e3baSGagandeep Singh 	m->ol_flags |= PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD;
229469c6111SGagandeep Singh 
230469c6111SGagandeep Singh 	switch (parse_results) {
231469c6111SGagandeep Singh 	case ENETC_PKT_TYPE_ETHER:
232469c6111SGagandeep Singh 		m->packet_type = RTE_PTYPE_L2_ETHER;
233f5a2e3baSGagandeep Singh 		return;
234469c6111SGagandeep Singh 	case ENETC_PKT_TYPE_IPV4:
235469c6111SGagandeep Singh 		m->packet_type = RTE_PTYPE_L2_ETHER |
236469c6111SGagandeep Singh 				 RTE_PTYPE_L3_IPV4;
237f5a2e3baSGagandeep Singh 		return;
238469c6111SGagandeep Singh 	case ENETC_PKT_TYPE_IPV6:
239469c6111SGagandeep Singh 		m->packet_type = RTE_PTYPE_L2_ETHER |
240469c6111SGagandeep Singh 				 RTE_PTYPE_L3_IPV6;
241f5a2e3baSGagandeep Singh 		return;
242469c6111SGagandeep Singh 	case ENETC_PKT_TYPE_IPV4_TCP:
243469c6111SGagandeep Singh 		m->packet_type = RTE_PTYPE_L2_ETHER |
244469c6111SGagandeep Singh 				 RTE_PTYPE_L3_IPV4 |
245469c6111SGagandeep Singh 				 RTE_PTYPE_L4_TCP;
246f5a2e3baSGagandeep Singh 		return;
247469c6111SGagandeep Singh 	case ENETC_PKT_TYPE_IPV6_TCP:
248469c6111SGagandeep Singh 		m->packet_type = RTE_PTYPE_L2_ETHER |
249469c6111SGagandeep Singh 				 RTE_PTYPE_L3_IPV6 |
250469c6111SGagandeep Singh 				 RTE_PTYPE_L4_TCP;
251f5a2e3baSGagandeep Singh 		return;
252469c6111SGagandeep Singh 	case ENETC_PKT_TYPE_IPV4_UDP:
253469c6111SGagandeep Singh 		m->packet_type = RTE_PTYPE_L2_ETHER |
254469c6111SGagandeep Singh 				 RTE_PTYPE_L3_IPV4 |
255469c6111SGagandeep Singh 				 RTE_PTYPE_L4_UDP;
256f5a2e3baSGagandeep Singh 		return;
257469c6111SGagandeep Singh 	case ENETC_PKT_TYPE_IPV6_UDP:
258469c6111SGagandeep Singh 		m->packet_type = RTE_PTYPE_L2_ETHER |
259469c6111SGagandeep Singh 				 RTE_PTYPE_L3_IPV6 |
260469c6111SGagandeep Singh 				 RTE_PTYPE_L4_UDP;
261f5a2e3baSGagandeep Singh 		return;
262469c6111SGagandeep Singh 	case ENETC_PKT_TYPE_IPV4_SCTP:
263469c6111SGagandeep Singh 		m->packet_type = RTE_PTYPE_L2_ETHER |
264469c6111SGagandeep Singh 				 RTE_PTYPE_L3_IPV4 |
265469c6111SGagandeep Singh 				 RTE_PTYPE_L4_SCTP;
266f5a2e3baSGagandeep Singh 		return;
267469c6111SGagandeep Singh 	case ENETC_PKT_TYPE_IPV6_SCTP:
268469c6111SGagandeep Singh 		m->packet_type = RTE_PTYPE_L2_ETHER |
269469c6111SGagandeep Singh 				 RTE_PTYPE_L3_IPV6 |
270469c6111SGagandeep Singh 				 RTE_PTYPE_L4_SCTP;
271f5a2e3baSGagandeep Singh 		return;
272469c6111SGagandeep Singh 	case ENETC_PKT_TYPE_IPV4_ICMP:
273469c6111SGagandeep Singh 		m->packet_type = RTE_PTYPE_L2_ETHER |
274469c6111SGagandeep Singh 				 RTE_PTYPE_L3_IPV4 |
275469c6111SGagandeep Singh 				 RTE_PTYPE_L4_ICMP;
276f5a2e3baSGagandeep Singh 		return;
277469c6111SGagandeep Singh 	case ENETC_PKT_TYPE_IPV6_ICMP:
278469c6111SGagandeep Singh 		m->packet_type = RTE_PTYPE_L2_ETHER |
279469c6111SGagandeep Singh 				 RTE_PTYPE_L3_IPV6 |
280469c6111SGagandeep Singh 				 RTE_PTYPE_L4_ICMP;
281f5a2e3baSGagandeep Singh 		return;
282469c6111SGagandeep Singh 	/* More switch cases can be added */
283469c6111SGagandeep Singh 	default:
284f5a2e3baSGagandeep Singh 		enetc_slow_parsing(m, parse_results);
285469c6111SGagandeep Singh 	}
286f5a2e3baSGagandeep Singh 
287469c6111SGagandeep Singh }
288469c6111SGagandeep Singh 
289469c6111SGagandeep Singh static int
290469c6111SGagandeep Singh enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
291469c6111SGagandeep Singh 		    struct rte_mbuf **rx_pkts,
292469c6111SGagandeep Singh 		    int work_limit)
293469c6111SGagandeep Singh {
294469c6111SGagandeep Singh 	int rx_frm_cnt = 0;
295469c6111SGagandeep Singh 	int cleaned_cnt, i;
296469c6111SGagandeep Singh 	struct enetc_swbd *rx_swbd;
297469c6111SGagandeep Singh 
298469c6111SGagandeep Singh 	cleaned_cnt = enetc_bd_unused(rx_ring);
299469c6111SGagandeep Singh 	/* next descriptor to process */
300469c6111SGagandeep Singh 	i = rx_ring->next_to_clean;
301469c6111SGagandeep Singh 	rx_swbd = &rx_ring->q_swbd[i];
302469c6111SGagandeep Singh 	while (likely(rx_frm_cnt < work_limit)) {
303469c6111SGagandeep Singh 		union enetc_rx_bd *rxbd;
304469c6111SGagandeep Singh 		uint32_t bd_status;
305469c6111SGagandeep Singh 
306469c6111SGagandeep Singh 		rxbd = ENETC_RXBD(*rx_ring, i);
307469c6111SGagandeep Singh 		bd_status = rte_le_to_cpu_32(rxbd->r.lstatus);
308469c6111SGagandeep Singh 		if (!bd_status)
309469c6111SGagandeep Singh 			break;
310469c6111SGagandeep Singh 
3111d0c3397SGagandeep Singh 		rx_swbd->buffer_addr->pkt_len = rxbd->r.buf_len -
3121d0c3397SGagandeep Singh 						rx_ring->crc_len;
3131d0c3397SGagandeep Singh 		rx_swbd->buffer_addr->data_len = rxbd->r.buf_len -
3141d0c3397SGagandeep Singh 						 rx_ring->crc_len;
315469c6111SGagandeep Singh 		rx_swbd->buffer_addr->hash.rss = rxbd->r.rss_hash;
316469c6111SGagandeep Singh 		rx_swbd->buffer_addr->ol_flags = 0;
317469c6111SGagandeep Singh 		enetc_dev_rx_parse(rx_swbd->buffer_addr,
318469c6111SGagandeep Singh 				   rxbd->r.parse_summary);
319469c6111SGagandeep Singh 		rx_pkts[rx_frm_cnt] = rx_swbd->buffer_addr;
320469c6111SGagandeep Singh 		cleaned_cnt++;
321469c6111SGagandeep Singh 		rx_swbd++;
322469c6111SGagandeep Singh 		i++;
323469c6111SGagandeep Singh 		if (unlikely(i == rx_ring->bd_count)) {
324469c6111SGagandeep Singh 			i = 0;
325469c6111SGagandeep Singh 			rx_swbd = &rx_ring->q_swbd[i];
326469c6111SGagandeep Singh 		}
327469c6111SGagandeep Singh 
328469c6111SGagandeep Singh 		rx_ring->next_to_clean = i;
329469c6111SGagandeep Singh 		rx_frm_cnt++;
330469c6111SGagandeep Singh 	}
331469c6111SGagandeep Singh 
332*11a6dc42SAlex Marginean 	enetc_refill_rx_ring(rx_ring, cleaned_cnt);
333*11a6dc42SAlex Marginean 
334469c6111SGagandeep Singh 	return rx_frm_cnt;
335469c6111SGagandeep Singh }
336469c6111SGagandeep Singh 
337469c6111SGagandeep Singh uint16_t
338469c6111SGagandeep Singh enetc_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts,
339469c6111SGagandeep Singh 		uint16_t nb_pkts)
340469c6111SGagandeep Singh {
341469c6111SGagandeep Singh 	struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
342469c6111SGagandeep Singh 
343469c6111SGagandeep Singh 	return enetc_clean_rx_ring(rx_ring, rx_pkts, nb_pkts);
344469c6111SGagandeep Singh }
345