xref: /dpdk/drivers/net/gve/gve_rx.c (revision f665790a5dbad7b645ff46f31d65e977324e7bfc)
14bec2d0bSJunfeng Guo /* SPDX-License-Identifier: BSD-3-Clause
24bec2d0bSJunfeng Guo  * Copyright(C) 2022 Intel Corporation
34bec2d0bSJunfeng Guo  */
44bec2d0bSJunfeng Guo 
54bec2d0bSJunfeng Guo #include "gve_ethdev.h"
64bec2d0bSJunfeng Guo #include "base/gve_adminq.h"
74bec2d0bSJunfeng Guo 
8496d4d2cSJunfeng Guo #define GVE_PKT_CONT_BIT_IS_SET(x) (GVE_RXF_PKT_CONT & (x))
9496d4d2cSJunfeng Guo 
104bec2d0bSJunfeng Guo static inline void
11a46583cfSJunfeng Guo gve_rx_refill(struct gve_rx_queue *rxq)
12a46583cfSJunfeng Guo {
13a46583cfSJunfeng Guo 	uint16_t mask = rxq->nb_rx_desc - 1;
14a46583cfSJunfeng Guo 	uint16_t idx = rxq->next_avail & mask;
15a46583cfSJunfeng Guo 	uint32_t next_avail = rxq->next_avail;
16a46583cfSJunfeng Guo 	uint16_t nb_alloc, i;
17a46583cfSJunfeng Guo 	struct rte_mbuf *nmb;
18a46583cfSJunfeng Guo 	int diag;
19a46583cfSJunfeng Guo 
20a46583cfSJunfeng Guo 	/* wrap around */
21a46583cfSJunfeng Guo 	nb_alloc = rxq->nb_rx_desc - idx;
22a46583cfSJunfeng Guo 	if (nb_alloc <= rxq->nb_avail) {
23a46583cfSJunfeng Guo 		diag = rte_pktmbuf_alloc_bulk(rxq->mpool, &rxq->sw_ring[idx], nb_alloc);
24a46583cfSJunfeng Guo 		if (diag < 0) {
25c222ea9cSLevend Sayar 			rxq->stats.no_mbufs_bulk++;
26a46583cfSJunfeng Guo 			for (i = 0; i < nb_alloc; i++) {
27a46583cfSJunfeng Guo 				nmb = rte_pktmbuf_alloc(rxq->mpool);
28a46583cfSJunfeng Guo 				if (!nmb)
29a46583cfSJunfeng Guo 					break;
30a46583cfSJunfeng Guo 				rxq->sw_ring[idx + i] = nmb;
31a46583cfSJunfeng Guo 			}
324f6b1dd8SJunfeng Guo 			if (i != nb_alloc) {
33c222ea9cSLevend Sayar 				rxq->stats.no_mbufs += nb_alloc - i;
34a46583cfSJunfeng Guo 				nb_alloc = i;
35a46583cfSJunfeng Guo 			}
364f6b1dd8SJunfeng Guo 		}
37a46583cfSJunfeng Guo 		rxq->nb_avail -= nb_alloc;
38a46583cfSJunfeng Guo 		next_avail += nb_alloc;
39a46583cfSJunfeng Guo 
40a46583cfSJunfeng Guo 		/* queue page list mode doesn't need real refill. */
41a46583cfSJunfeng Guo 		if (rxq->is_gqi_qpl) {
42a46583cfSJunfeng Guo 			idx += nb_alloc;
43a46583cfSJunfeng Guo 		} else {
44a46583cfSJunfeng Guo 			for (i = 0; i < nb_alloc; i++) {
45a46583cfSJunfeng Guo 				nmb = rxq->sw_ring[idx];
46a46583cfSJunfeng Guo 				rxq->rx_data_ring[idx].addr =
47a46583cfSJunfeng Guo 					rte_cpu_to_be_64(rte_mbuf_data_iova(nmb));
48a46583cfSJunfeng Guo 				idx++;
49a46583cfSJunfeng Guo 			}
50a46583cfSJunfeng Guo 		}
51a46583cfSJunfeng Guo 		if (idx == rxq->nb_rx_desc)
52a46583cfSJunfeng Guo 			idx = 0;
53a46583cfSJunfeng Guo 	}
54a46583cfSJunfeng Guo 
55a46583cfSJunfeng Guo 	if (rxq->nb_avail > 0) {
56a46583cfSJunfeng Guo 		nb_alloc = rxq->nb_avail;
57a46583cfSJunfeng Guo 		if (rxq->nb_rx_desc < idx + rxq->nb_avail)
58a46583cfSJunfeng Guo 			nb_alloc = rxq->nb_rx_desc - idx;
59a46583cfSJunfeng Guo 		diag = rte_pktmbuf_alloc_bulk(rxq->mpool, &rxq->sw_ring[idx], nb_alloc);
60a46583cfSJunfeng Guo 		if (diag < 0) {
61c222ea9cSLevend Sayar 			rxq->stats.no_mbufs_bulk++;
62a46583cfSJunfeng Guo 			for (i = 0; i < nb_alloc; i++) {
63a46583cfSJunfeng Guo 				nmb = rte_pktmbuf_alloc(rxq->mpool);
64a46583cfSJunfeng Guo 				if (!nmb)
65a46583cfSJunfeng Guo 					break;
66a46583cfSJunfeng Guo 				rxq->sw_ring[idx + i] = nmb;
67a46583cfSJunfeng Guo 			}
686fd8846aSLevend Sayar 			if (i != nb_alloc) {
69c222ea9cSLevend Sayar 				rxq->stats.no_mbufs += nb_alloc - i;
70a46583cfSJunfeng Guo 				nb_alloc = i;
71a46583cfSJunfeng Guo 			}
726fd8846aSLevend Sayar 		}
73a46583cfSJunfeng Guo 		rxq->nb_avail -= nb_alloc;
74a46583cfSJunfeng Guo 		next_avail += nb_alloc;
75a46583cfSJunfeng Guo 
76a46583cfSJunfeng Guo 		if (!rxq->is_gqi_qpl) {
77a46583cfSJunfeng Guo 			for (i = 0; i < nb_alloc; i++) {
78a46583cfSJunfeng Guo 				nmb = rxq->sw_ring[idx];
79a46583cfSJunfeng Guo 				rxq->rx_data_ring[idx].addr =
80a46583cfSJunfeng Guo 					rte_cpu_to_be_64(rte_mbuf_data_iova(nmb));
81a46583cfSJunfeng Guo 				idx++;
82a46583cfSJunfeng Guo 			}
83a46583cfSJunfeng Guo 		}
84a46583cfSJunfeng Guo 	}
85a46583cfSJunfeng Guo 
86a46583cfSJunfeng Guo 	if (next_avail != rxq->next_avail) {
87a46583cfSJunfeng Guo 		rte_write32(rte_cpu_to_be_32(next_avail), rxq->qrx_tail);
88a46583cfSJunfeng Guo 		rxq->next_avail = next_avail;
89a46583cfSJunfeng Guo 	}
90a46583cfSJunfeng Guo }
91a46583cfSJunfeng Guo 
92496d4d2cSJunfeng Guo /*
93496d4d2cSJunfeng Guo  * This method processes a single rte_mbuf and handles packet segmentation
94496d4d2cSJunfeng Guo  * In QPL mode it copies data from the mbuf to the gve_rx_queue.
95496d4d2cSJunfeng Guo  */
96496d4d2cSJunfeng Guo static void
97496d4d2cSJunfeng Guo gve_rx_mbuf(struct gve_rx_queue *rxq, struct rte_mbuf *rxe, uint16_t len,
98496d4d2cSJunfeng Guo 	    uint16_t rx_id)
99a46583cfSJunfeng Guo {
100496d4d2cSJunfeng Guo 	uint16_t padding = 0;
101a46583cfSJunfeng Guo 	uint64_t addr;
102a46583cfSJunfeng Guo 
103496d4d2cSJunfeng Guo 	rxe->data_len = len;
104496d4d2cSJunfeng Guo 	if (!rxq->ctx.mbuf_head) {
105496d4d2cSJunfeng Guo 		rxq->ctx.mbuf_head = rxe;
106496d4d2cSJunfeng Guo 		rxq->ctx.mbuf_tail = rxe;
107496d4d2cSJunfeng Guo 		rxe->nb_segs = 1;
108a46583cfSJunfeng Guo 		rxe->pkt_len = len;
109a46583cfSJunfeng Guo 		rxe->data_len = len;
110a46583cfSJunfeng Guo 		rxe->port = rxq->port_id;
111a46583cfSJunfeng Guo 		rxe->ol_flags = 0;
112496d4d2cSJunfeng Guo 		padding = GVE_RX_PAD;
113496d4d2cSJunfeng Guo 	} else {
114496d4d2cSJunfeng Guo 		rxq->ctx.mbuf_head->pkt_len += len;
115496d4d2cSJunfeng Guo 		rxq->ctx.mbuf_head->nb_segs += 1;
116496d4d2cSJunfeng Guo 		rxq->ctx.mbuf_tail->next = rxe;
117496d4d2cSJunfeng Guo 		rxq->ctx.mbuf_tail = rxe;
118496d4d2cSJunfeng Guo 	}
119496d4d2cSJunfeng Guo 	if (rxq->is_gqi_qpl) {
120496d4d2cSJunfeng Guo 		addr = (uint64_t)(rxq->qpl->mz->addr) + rx_id * PAGE_SIZE + padding;
121496d4d2cSJunfeng Guo 		rte_memcpy((void *)((size_t)rxe->buf_addr + rxe->data_off),
122496d4d2cSJunfeng Guo 				    (void *)(size_t)addr, len);
123496d4d2cSJunfeng Guo 	}
124496d4d2cSJunfeng Guo }
125a46583cfSJunfeng Guo 
126496d4d2cSJunfeng Guo /*
127496d4d2cSJunfeng Guo  * This method processes a single packet fragment associated with the
128496d4d2cSJunfeng Guo  * passed packet descriptor.
129496d4d2cSJunfeng Guo  * This methods returns whether the fragment is the last fragment
130496d4d2cSJunfeng Guo  * of a packet.
131496d4d2cSJunfeng Guo  */
132496d4d2cSJunfeng Guo static bool
133496d4d2cSJunfeng Guo gve_rx(struct gve_rx_queue *rxq, volatile struct gve_rx_desc *rxd, uint16_t rx_id)
134496d4d2cSJunfeng Guo {
135496d4d2cSJunfeng Guo 	bool is_last_frag = !GVE_PKT_CONT_BIT_IS_SET(rxd->flags_seq);
136496d4d2cSJunfeng Guo 	uint16_t frag_size = rte_be_to_cpu_16(rxd->len);
137496d4d2cSJunfeng Guo 	struct gve_rx_ctx *ctx = &rxq->ctx;
138496d4d2cSJunfeng Guo 	bool is_first_frag = ctx->total_frags == 0;
139496d4d2cSJunfeng Guo 	struct rte_mbuf *rxe;
140496d4d2cSJunfeng Guo 
141496d4d2cSJunfeng Guo 	if (ctx->drop_pkt)
142496d4d2cSJunfeng Guo 		goto finish_frag;
143496d4d2cSJunfeng Guo 
144496d4d2cSJunfeng Guo 	if (rxd->flags_seq & GVE_RXF_ERR) {
145496d4d2cSJunfeng Guo 		ctx->drop_pkt = true;
146496d4d2cSJunfeng Guo 		rxq->stats.errors++;
147496d4d2cSJunfeng Guo 		goto finish_frag;
148496d4d2cSJunfeng Guo 	}
149496d4d2cSJunfeng Guo 
150496d4d2cSJunfeng Guo 	if (is_first_frag)
151496d4d2cSJunfeng Guo 		frag_size -= GVE_RX_PAD;
152496d4d2cSJunfeng Guo 
153496d4d2cSJunfeng Guo 	rxe = rxq->sw_ring[rx_id];
154496d4d2cSJunfeng Guo 	gve_rx_mbuf(rxq, rxe, frag_size, rx_id);
155496d4d2cSJunfeng Guo 	rxq->stats.bytes += frag_size;
156496d4d2cSJunfeng Guo 
157496d4d2cSJunfeng Guo 	if (is_first_frag) {
158a46583cfSJunfeng Guo 		if (rxd->flags_seq & GVE_RXF_TCP)
159a46583cfSJunfeng Guo 			rxe->packet_type |= RTE_PTYPE_L4_TCP;
160a46583cfSJunfeng Guo 		if (rxd->flags_seq & GVE_RXF_UDP)
161a46583cfSJunfeng Guo 			rxe->packet_type |= RTE_PTYPE_L4_UDP;
162a46583cfSJunfeng Guo 		if (rxd->flags_seq & GVE_RXF_IPV4)
163a46583cfSJunfeng Guo 			rxe->packet_type |= RTE_PTYPE_L3_IPV4;
164a46583cfSJunfeng Guo 		if (rxd->flags_seq & GVE_RXF_IPV6)
165a46583cfSJunfeng Guo 			rxe->packet_type |= RTE_PTYPE_L3_IPV6;
166a46583cfSJunfeng Guo 
167a46583cfSJunfeng Guo 		if (gve_needs_rss(rxd->flags_seq)) {
168a46583cfSJunfeng Guo 			rxe->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
169a46583cfSJunfeng Guo 			rxe->hash.rss = rte_be_to_cpu_32(rxd->rss_hash);
170a46583cfSJunfeng Guo 		}
171496d4d2cSJunfeng Guo 	}
172a46583cfSJunfeng Guo 
173496d4d2cSJunfeng Guo finish_frag:
174496d4d2cSJunfeng Guo 	ctx->total_frags++;
175496d4d2cSJunfeng Guo 	return is_last_frag;
176496d4d2cSJunfeng Guo }
177496d4d2cSJunfeng Guo 
178496d4d2cSJunfeng Guo static void
179496d4d2cSJunfeng Guo gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
180496d4d2cSJunfeng Guo {
181496d4d2cSJunfeng Guo 	ctx->mbuf_head = NULL;
182496d4d2cSJunfeng Guo 	ctx->mbuf_tail = NULL;
183496d4d2cSJunfeng Guo 	ctx->drop_pkt = false;
184496d4d2cSJunfeng Guo 	ctx->total_frags = 0;
185496d4d2cSJunfeng Guo }
186496d4d2cSJunfeng Guo 
187496d4d2cSJunfeng Guo uint16_t
188496d4d2cSJunfeng Guo gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
189496d4d2cSJunfeng Guo {
190496d4d2cSJunfeng Guo 	volatile struct gve_rx_desc *rxr, *rxd;
191496d4d2cSJunfeng Guo 	struct gve_rx_queue *rxq = rx_queue;
192496d4d2cSJunfeng Guo 	struct gve_rx_ctx *ctx = &rxq->ctx;
193496d4d2cSJunfeng Guo 	uint16_t rx_id = rxq->rx_tail;
194496d4d2cSJunfeng Guo 	uint16_t nb_rx;
195496d4d2cSJunfeng Guo 
196496d4d2cSJunfeng Guo 	rxr = rxq->rx_desc_ring;
197496d4d2cSJunfeng Guo 	nb_rx = 0;
198496d4d2cSJunfeng Guo 
199496d4d2cSJunfeng Guo 	while (nb_rx < nb_pkts) {
200496d4d2cSJunfeng Guo 		rxd = &rxr[rx_id];
201496d4d2cSJunfeng Guo 		if (GVE_SEQNO(rxd->flags_seq) != rxq->expected_seqno)
202496d4d2cSJunfeng Guo 			break;
203496d4d2cSJunfeng Guo 
204496d4d2cSJunfeng Guo 		if (gve_rx(rxq, rxd, rx_id)) {
205496d4d2cSJunfeng Guo 			if (!ctx->drop_pkt)
206496d4d2cSJunfeng Guo 				rx_pkts[nb_rx++] = ctx->mbuf_head;
207496d4d2cSJunfeng Guo 			rxq->nb_avail += ctx->total_frags;
208496d4d2cSJunfeng Guo 			gve_rx_ctx_clear(ctx);
209496d4d2cSJunfeng Guo 		}
210a46583cfSJunfeng Guo 
211a46583cfSJunfeng Guo 		rx_id++;
212a46583cfSJunfeng Guo 		if (rx_id == rxq->nb_rx_desc)
213a46583cfSJunfeng Guo 			rx_id = 0;
214a46583cfSJunfeng Guo 
215496d4d2cSJunfeng Guo 		rxq->expected_seqno = gve_next_seqno(rxq->expected_seqno);
216a46583cfSJunfeng Guo 	}
217a46583cfSJunfeng Guo 
218a46583cfSJunfeng Guo 	rxq->rx_tail = rx_id;
219a46583cfSJunfeng Guo 
220a46583cfSJunfeng Guo 	if (rxq->nb_avail > rxq->free_thresh)
221a46583cfSJunfeng Guo 		gve_rx_refill(rxq);
222a46583cfSJunfeng Guo 
223496d4d2cSJunfeng Guo 	if (nb_rx)
224c222ea9cSLevend Sayar 		rxq->stats.packets += nb_rx;
2254f6b1dd8SJunfeng Guo 
226a46583cfSJunfeng Guo 	return nb_rx;
227a46583cfSJunfeng Guo }
228a46583cfSJunfeng Guo 
229a46583cfSJunfeng Guo static inline void
2304bec2d0bSJunfeng Guo gve_reset_rxq(struct gve_rx_queue *rxq)
2314bec2d0bSJunfeng Guo {
2320cfde775SJunfeng Guo 	struct rte_mbuf **sw_ring;
2334bec2d0bSJunfeng Guo 	uint32_t size, i;
2344bec2d0bSJunfeng Guo 
2354bec2d0bSJunfeng Guo 	if (rxq == NULL) {
2364bec2d0bSJunfeng Guo 		PMD_DRV_LOG(ERR, "pointer to rxq is NULL");
2374bec2d0bSJunfeng Guo 		return;
2384bec2d0bSJunfeng Guo 	}
2394bec2d0bSJunfeng Guo 
2404bec2d0bSJunfeng Guo 	size = rxq->nb_rx_desc * sizeof(struct gve_rx_desc);
2414bec2d0bSJunfeng Guo 	for (i = 0; i < size; i++)
2424bec2d0bSJunfeng Guo 		((volatile char *)rxq->rx_desc_ring)[i] = 0;
2434bec2d0bSJunfeng Guo 
2444bec2d0bSJunfeng Guo 	size = rxq->nb_rx_desc * sizeof(union gve_rx_data_slot);
2454bec2d0bSJunfeng Guo 	for (i = 0; i < size; i++)
2464bec2d0bSJunfeng Guo 		((volatile char *)rxq->rx_data_ring)[i] = 0;
2474bec2d0bSJunfeng Guo 
2480cfde775SJunfeng Guo 	sw_ring = rxq->sw_ring;
2494bec2d0bSJunfeng Guo 	for (i = 0; i < rxq->nb_rx_desc; i++)
2504bec2d0bSJunfeng Guo 		sw_ring[i] = NULL;
2514bec2d0bSJunfeng Guo 
2524bec2d0bSJunfeng Guo 	rxq->rx_tail = 0;
2534bec2d0bSJunfeng Guo 	rxq->next_avail = 0;
2544bec2d0bSJunfeng Guo 	rxq->nb_avail = rxq->nb_rx_desc;
2554bec2d0bSJunfeng Guo 	rxq->expected_seqno = 1;
2564bec2d0bSJunfeng Guo }
2574bec2d0bSJunfeng Guo 
2584bec2d0bSJunfeng Guo static inline void
2594bec2d0bSJunfeng Guo gve_release_rxq_mbufs(struct gve_rx_queue *rxq)
2604bec2d0bSJunfeng Guo {
2614bec2d0bSJunfeng Guo 	uint16_t i;
2624bec2d0bSJunfeng Guo 
2634bec2d0bSJunfeng Guo 	for (i = 0; i < rxq->nb_rx_desc; i++) {
2644bec2d0bSJunfeng Guo 		if (rxq->sw_ring[i]) {
2654bec2d0bSJunfeng Guo 			rte_pktmbuf_free_seg(rxq->sw_ring[i]);
2664bec2d0bSJunfeng Guo 			rxq->sw_ring[i] = NULL;
2674bec2d0bSJunfeng Guo 		}
2684bec2d0bSJunfeng Guo 	}
2694bec2d0bSJunfeng Guo 
2704bec2d0bSJunfeng Guo 	rxq->nb_avail = rxq->nb_rx_desc;
2714bec2d0bSJunfeng Guo }
2724bec2d0bSJunfeng Guo 
2734bec2d0bSJunfeng Guo void
27410d9e91aSJunfeng Guo gve_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
2754bec2d0bSJunfeng Guo {
27610d9e91aSJunfeng Guo 	struct gve_rx_queue *q = dev->data->rx_queues[qid];
2774bec2d0bSJunfeng Guo 
2784bec2d0bSJunfeng Guo 	if (!q)
2794bec2d0bSJunfeng Guo 		return;
2804bec2d0bSJunfeng Guo 
2814bec2d0bSJunfeng Guo 	if (q->is_gqi_qpl) {
2827f369975SJoshua Washington 		gve_teardown_queue_page_list(q->hw, q->qpl);
2834bec2d0bSJunfeng Guo 		q->qpl = NULL;
2844bec2d0bSJunfeng Guo 	}
2854bec2d0bSJunfeng Guo 
2864bec2d0bSJunfeng Guo 	gve_release_rxq_mbufs(q);
2874bec2d0bSJunfeng Guo 	rte_free(q->sw_ring);
2884bec2d0bSJunfeng Guo 	rte_memzone_free(q->data_mz);
2894bec2d0bSJunfeng Guo 	rte_memzone_free(q->mz);
2904bec2d0bSJunfeng Guo 	rte_memzone_free(q->qres_mz);
2914bec2d0bSJunfeng Guo 	q->qres = NULL;
2924bec2d0bSJunfeng Guo 	rte_free(q);
2934bec2d0bSJunfeng Guo }
2944bec2d0bSJunfeng Guo 
2954bec2d0bSJunfeng Guo int
2964bec2d0bSJunfeng Guo gve_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
2974bec2d0bSJunfeng Guo 		uint16_t nb_desc, unsigned int socket_id,
2984bec2d0bSJunfeng Guo 		const struct rte_eth_rxconf *conf, struct rte_mempool *pool)
2994bec2d0bSJunfeng Guo {
3004bec2d0bSJunfeng Guo 	struct gve_priv *hw = dev->data->dev_private;
3014bec2d0bSJunfeng Guo 	const struct rte_memzone *mz;
3024bec2d0bSJunfeng Guo 	struct gve_rx_queue *rxq;
3034bec2d0bSJunfeng Guo 	uint16_t free_thresh;
304835021a8SJoshua Washington 	uint32_t mbuf_len;
3054bec2d0bSJunfeng Guo 	int err = 0;
3064bec2d0bSJunfeng Guo 
307cde01d16SJoshua Washington 	/* Ring size is required to be a power of two. */
308cde01d16SJoshua Washington 	if (!rte_is_power_of_2(nb_desc)) {
309*f665790aSDavid Marchand 		PMD_DRV_LOG(ERR, "Invalid ring size %u. GVE ring size must be a power of 2.",
310cde01d16SJoshua Washington 			    nb_desc);
311cde01d16SJoshua Washington 		return -EINVAL;
3124bec2d0bSJunfeng Guo 	}
3134bec2d0bSJunfeng Guo 
3144bec2d0bSJunfeng Guo 	/* Free memory if needed. */
3154bec2d0bSJunfeng Guo 	if (dev->data->rx_queues[queue_id]) {
31610d9e91aSJunfeng Guo 		gve_rx_queue_release(dev, queue_id);
3174bec2d0bSJunfeng Guo 		dev->data->rx_queues[queue_id] = NULL;
3184bec2d0bSJunfeng Guo 	}
3194bec2d0bSJunfeng Guo 
3204bec2d0bSJunfeng Guo 	/* Allocate the RX queue data structure. */
3214bec2d0bSJunfeng Guo 	rxq = rte_zmalloc_socket("gve rxq",
3224bec2d0bSJunfeng Guo 				 sizeof(struct gve_rx_queue),
3234bec2d0bSJunfeng Guo 				 RTE_CACHE_LINE_SIZE,
3244bec2d0bSJunfeng Guo 				 socket_id);
3254bec2d0bSJunfeng Guo 	if (!rxq) {
3264bec2d0bSJunfeng Guo 		PMD_DRV_LOG(ERR, "Failed to allocate memory for rx queue structure");
3274bec2d0bSJunfeng Guo 		err = -ENOMEM;
3284bec2d0bSJunfeng Guo 		goto err_rxq;
3294bec2d0bSJunfeng Guo 	}
3304bec2d0bSJunfeng Guo 
3314bec2d0bSJunfeng Guo 	free_thresh = conf->rx_free_thresh ? conf->rx_free_thresh : GVE_DEFAULT_RX_FREE_THRESH;
3324bec2d0bSJunfeng Guo 	if (free_thresh >= nb_desc) {
3334bec2d0bSJunfeng Guo 		PMD_DRV_LOG(ERR, "rx_free_thresh (%u) must be less than nb_desc (%u) minus 3.",
3344bec2d0bSJunfeng Guo 			    free_thresh, rxq->nb_rx_desc);
3354bec2d0bSJunfeng Guo 		err = -EINVAL;
3364bec2d0bSJunfeng Guo 		goto err_rxq;
3374bec2d0bSJunfeng Guo 	}
3384bec2d0bSJunfeng Guo 
3394bec2d0bSJunfeng Guo 	rxq->nb_rx_desc = nb_desc;
3404bec2d0bSJunfeng Guo 	rxq->free_thresh = free_thresh;
3414bec2d0bSJunfeng Guo 	rxq->queue_id = queue_id;
3424bec2d0bSJunfeng Guo 	rxq->port_id = dev->data->port_id;
3434bec2d0bSJunfeng Guo 	rxq->ntfy_id = hw->num_ntfy_blks / 2 + queue_id;
3444bec2d0bSJunfeng Guo 	rxq->is_gqi_qpl = hw->queue_format == GVE_GQI_QPL_FORMAT;
3454bec2d0bSJunfeng Guo 	rxq->mpool = pool;
3464bec2d0bSJunfeng Guo 	rxq->hw = hw;
3474bec2d0bSJunfeng Guo 	rxq->ntfy_addr = &hw->db_bar2[rte_be_to_cpu_32(hw->irq_dbs[rxq->ntfy_id].id)];
3484bec2d0bSJunfeng Guo 
349835021a8SJoshua Washington 	mbuf_len =
350835021a8SJoshua Washington 		rte_pktmbuf_data_room_size(rxq->mpool) - RTE_PKTMBUF_HEADROOM;
351835021a8SJoshua Washington 	rxq->rx_buf_len =
352835021a8SJoshua Washington 		RTE_MIN((uint16_t)GVE_RX_MAX_BUF_SIZE_GQI,
353835021a8SJoshua Washington 			RTE_ALIGN_FLOOR(mbuf_len, GVE_RX_BUF_ALIGN_GQI));
3544bec2d0bSJunfeng Guo 
3554bec2d0bSJunfeng Guo 	/* Allocate software ring */
3564bec2d0bSJunfeng Guo 	rxq->sw_ring = rte_zmalloc_socket("gve rx sw ring", sizeof(struct rte_mbuf *) * nb_desc,
3574bec2d0bSJunfeng Guo 					  RTE_CACHE_LINE_SIZE, socket_id);
3584bec2d0bSJunfeng Guo 	if (!rxq->sw_ring) {
3594bec2d0bSJunfeng Guo 		PMD_DRV_LOG(ERR, "Failed to allocate memory for SW RX ring");
3604bec2d0bSJunfeng Guo 		err = -ENOMEM;
3614bec2d0bSJunfeng Guo 		goto err_rxq;
3624bec2d0bSJunfeng Guo 	}
3634bec2d0bSJunfeng Guo 
3644bec2d0bSJunfeng Guo 	mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
3654bec2d0bSJunfeng Guo 				      nb_desc * sizeof(struct gve_rx_desc),
3664bec2d0bSJunfeng Guo 				      PAGE_SIZE, socket_id);
3674bec2d0bSJunfeng Guo 	if (mz == NULL) {
3684bec2d0bSJunfeng Guo 		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX");
3694bec2d0bSJunfeng Guo 		err = -ENOMEM;
3704bec2d0bSJunfeng Guo 		goto err_sw_ring;
3714bec2d0bSJunfeng Guo 	}
3724bec2d0bSJunfeng Guo 	rxq->rx_desc_ring = (struct gve_rx_desc *)mz->addr;
3734bec2d0bSJunfeng Guo 	rxq->rx_ring_phys_addr = mz->iova;
3744bec2d0bSJunfeng Guo 	rxq->mz = mz;
3754bec2d0bSJunfeng Guo 
3764bec2d0bSJunfeng Guo 	mz = rte_eth_dma_zone_reserve(dev, "gve rx data ring", queue_id,
3774bec2d0bSJunfeng Guo 				      sizeof(union gve_rx_data_slot) * nb_desc,
3784bec2d0bSJunfeng Guo 				      PAGE_SIZE, socket_id);
3794bec2d0bSJunfeng Guo 	if (mz == NULL) {
3804bec2d0bSJunfeng Guo 		PMD_DRV_LOG(ERR, "Failed to allocate memory for RX data ring");
3814bec2d0bSJunfeng Guo 		err = -ENOMEM;
3824bec2d0bSJunfeng Guo 		goto err_rx_ring;
3834bec2d0bSJunfeng Guo 	}
3844bec2d0bSJunfeng Guo 	rxq->rx_data_ring = (union gve_rx_data_slot *)mz->addr;
3854bec2d0bSJunfeng Guo 	rxq->data_mz = mz;
3867f369975SJoshua Washington 
3877f369975SJoshua Washington 	/* Allocate and register QPL for the queue. */
3884bec2d0bSJunfeng Guo 	if (rxq->is_gqi_qpl) {
3897f369975SJoshua Washington 		rxq->qpl = gve_setup_queue_page_list(hw, queue_id, true,
3906c6543b9SJoshua Washington 						     nb_desc);
3917f369975SJoshua Washington 		if (!rxq->qpl) {
392cde01d16SJoshua Washington 			err = -ENOMEM;
3937f369975SJoshua Washington 			PMD_DRV_LOG(ERR,
3947f369975SJoshua Washington 				    "Failed to alloc rx qpl for queue %hu.",
3957f369975SJoshua Washington 				    queue_id);
3964bec2d0bSJunfeng Guo 			goto err_data_ring;
3974bec2d0bSJunfeng Guo 		}
3984bec2d0bSJunfeng Guo 	}
3994bec2d0bSJunfeng Guo 
4004bec2d0bSJunfeng Guo 	mz = rte_eth_dma_zone_reserve(dev, "rxq_res", queue_id,
4014bec2d0bSJunfeng Guo 				      sizeof(struct gve_queue_resources),
4024bec2d0bSJunfeng Guo 				      PAGE_SIZE, socket_id);
4034bec2d0bSJunfeng Guo 	if (mz == NULL) {
4044bec2d0bSJunfeng Guo 		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX resource");
4054bec2d0bSJunfeng Guo 		err = -ENOMEM;
4067f369975SJoshua Washington 		goto err_qpl;
4074bec2d0bSJunfeng Guo 	}
4084bec2d0bSJunfeng Guo 	rxq->qres = (struct gve_queue_resources *)mz->addr;
4094bec2d0bSJunfeng Guo 	rxq->qres_mz = mz;
4104bec2d0bSJunfeng Guo 
4114bec2d0bSJunfeng Guo 	gve_reset_rxq(rxq);
4124bec2d0bSJunfeng Guo 
4134bec2d0bSJunfeng Guo 	dev->data->rx_queues[queue_id] = rxq;
4144bec2d0bSJunfeng Guo 
4154bec2d0bSJunfeng Guo 	return 0;
4167f369975SJoshua Washington err_qpl:
4177f369975SJoshua Washington 	if (rxq->is_gqi_qpl) {
4187f369975SJoshua Washington 		gve_teardown_queue_page_list(hw, rxq->qpl);
4197f369975SJoshua Washington 		rxq->qpl = NULL;
4207f369975SJoshua Washington 	}
4214bec2d0bSJunfeng Guo err_data_ring:
4224bec2d0bSJunfeng Guo 	rte_memzone_free(rxq->data_mz);
4234bec2d0bSJunfeng Guo err_rx_ring:
4244bec2d0bSJunfeng Guo 	rte_memzone_free(rxq->mz);
4254bec2d0bSJunfeng Guo err_sw_ring:
4264bec2d0bSJunfeng Guo 	rte_free(rxq->sw_ring);
4274bec2d0bSJunfeng Guo err_rxq:
4284bec2d0bSJunfeng Guo 	rte_free(rxq);
4294bec2d0bSJunfeng Guo 	return err;
4304bec2d0bSJunfeng Guo }
4314bec2d0bSJunfeng Guo 
432b044845bSJunfeng Guo static int
433b044845bSJunfeng Guo gve_rxq_mbufs_alloc(struct gve_rx_queue *rxq)
434b044845bSJunfeng Guo {
435b044845bSJunfeng Guo 	struct rte_mbuf *nmb;
436b044845bSJunfeng Guo 	uint16_t i;
437b044845bSJunfeng Guo 	int diag;
438b044845bSJunfeng Guo 
439b044845bSJunfeng Guo 	diag = rte_pktmbuf_alloc_bulk(rxq->mpool, &rxq->sw_ring[0], rxq->nb_rx_desc);
440b044845bSJunfeng Guo 	if (diag < 0) {
441b044845bSJunfeng Guo 		for (i = 0; i < rxq->nb_rx_desc - 1; i++) {
442b044845bSJunfeng Guo 			nmb = rte_pktmbuf_alloc(rxq->mpool);
443b044845bSJunfeng Guo 			if (!nmb)
444b044845bSJunfeng Guo 				break;
445b044845bSJunfeng Guo 			rxq->sw_ring[i] = nmb;
446b044845bSJunfeng Guo 		}
447b044845bSJunfeng Guo 		if (i < rxq->nb_rx_desc - 1)
448b044845bSJunfeng Guo 			return -ENOMEM;
449b044845bSJunfeng Guo 	}
450b044845bSJunfeng Guo 	rxq->nb_avail = 0;
451b044845bSJunfeng Guo 	rxq->next_avail = rxq->nb_rx_desc - 1;
452b044845bSJunfeng Guo 
453b044845bSJunfeng Guo 	for (i = 0; i < rxq->nb_rx_desc; i++) {
454b044845bSJunfeng Guo 		if (rxq->is_gqi_qpl) {
455b044845bSJunfeng Guo 			rxq->rx_data_ring[i].addr = rte_cpu_to_be_64(i * PAGE_SIZE);
456b044845bSJunfeng Guo 		} else {
457b044845bSJunfeng Guo 			if (i == rxq->nb_rx_desc - 1)
458b044845bSJunfeng Guo 				break;
459b044845bSJunfeng Guo 			nmb = rxq->sw_ring[i];
460b044845bSJunfeng Guo 			rxq->rx_data_ring[i].addr = rte_cpu_to_be_64(rte_mbuf_data_iova(nmb));
461b044845bSJunfeng Guo 		}
462b044845bSJunfeng Guo 	}
463b044845bSJunfeng Guo 
464b044845bSJunfeng Guo 	rte_write32(rte_cpu_to_be_32(rxq->next_avail), rxq->qrx_tail);
465b044845bSJunfeng Guo 
466b044845bSJunfeng Guo 	return 0;
467b044845bSJunfeng Guo }
468b044845bSJunfeng Guo 
469b044845bSJunfeng Guo int
470b044845bSJunfeng Guo gve_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
471b044845bSJunfeng Guo {
472b044845bSJunfeng Guo 	struct gve_priv *hw = dev->data->dev_private;
473b044845bSJunfeng Guo 	struct gve_rx_queue *rxq;
474b044845bSJunfeng Guo 	int ret;
475b044845bSJunfeng Guo 
476b044845bSJunfeng Guo 	if (rx_queue_id >= dev->data->nb_rx_queues)
477b044845bSJunfeng Guo 		return -EINVAL;
478b044845bSJunfeng Guo 
479b044845bSJunfeng Guo 	rxq = dev->data->rx_queues[rx_queue_id];
480b044845bSJunfeng Guo 
481b044845bSJunfeng Guo 	rxq->qrx_tail = &hw->db_bar2[rte_be_to_cpu_32(rxq->qres->db_index)];
482b044845bSJunfeng Guo 
483b044845bSJunfeng Guo 	rte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), rxq->ntfy_addr);
484b044845bSJunfeng Guo 
485b044845bSJunfeng Guo 	ret = gve_rxq_mbufs_alloc(rxq);
486b044845bSJunfeng Guo 	if (ret != 0) {
487b044845bSJunfeng Guo 		PMD_DRV_LOG(ERR, "Failed to alloc Rx queue mbuf");
488b044845bSJunfeng Guo 		return ret;
489b044845bSJunfeng Guo 	}
490b044845bSJunfeng Guo 
491b044845bSJunfeng Guo 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
492b044845bSJunfeng Guo 
493b044845bSJunfeng Guo 	return 0;
494b044845bSJunfeng Guo }
495b044845bSJunfeng Guo 
496b044845bSJunfeng Guo int
497b044845bSJunfeng Guo gve_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
498b044845bSJunfeng Guo {
499b044845bSJunfeng Guo 	struct gve_rx_queue *rxq;
500b044845bSJunfeng Guo 
501b044845bSJunfeng Guo 	if (rx_queue_id >= dev->data->nb_rx_queues)
502b044845bSJunfeng Guo 		return -EINVAL;
503b044845bSJunfeng Guo 
504b044845bSJunfeng Guo 	rxq = dev->data->rx_queues[rx_queue_id];
505b044845bSJunfeng Guo 	gve_release_rxq_mbufs(rxq);
506b044845bSJunfeng Guo 	gve_reset_rxq(rxq);
507b044845bSJunfeng Guo 
508b044845bSJunfeng Guo 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
509b044845bSJunfeng Guo 
510b044845bSJunfeng Guo 	return 0;
511b044845bSJunfeng Guo }
512b044845bSJunfeng Guo 
5134bec2d0bSJunfeng Guo void
5144bec2d0bSJunfeng Guo gve_stop_rx_queues(struct rte_eth_dev *dev)
5154bec2d0bSJunfeng Guo {
5164bec2d0bSJunfeng Guo 	struct gve_priv *hw = dev->data->dev_private;
5174bec2d0bSJunfeng Guo 	uint16_t i;
5184bec2d0bSJunfeng Guo 	int err;
5194bec2d0bSJunfeng Guo 
5201e27182eSJunfeng Guo 	if (!gve_is_gqi(hw))
5211e27182eSJunfeng Guo 		return gve_stop_rx_queues_dqo(dev);
5221e27182eSJunfeng Guo 
5234bec2d0bSJunfeng Guo 	err = gve_adminq_destroy_rx_queues(hw, dev->data->nb_rx_queues);
5244bec2d0bSJunfeng Guo 	if (err != 0)
5254bec2d0bSJunfeng Guo 		PMD_DRV_LOG(WARNING, "failed to destroy rxqs");
5264bec2d0bSJunfeng Guo 
527b044845bSJunfeng Guo 	for (i = 0; i < dev->data->nb_rx_queues; i++)
528b044845bSJunfeng Guo 		if (gve_rx_queue_stop(dev, i) != 0)
529b044845bSJunfeng Guo 			PMD_DRV_LOG(WARNING, "Fail to stop Rx queue %d", i);
5304bec2d0bSJunfeng Guo }
531b044845bSJunfeng Guo 
532b044845bSJunfeng Guo void
533b044845bSJunfeng Guo gve_set_rx_function(struct rte_eth_dev *dev)
534b044845bSJunfeng Guo {
535b044845bSJunfeng Guo 	dev->rx_pkt_burst = gve_rx_burst;
5364bec2d0bSJunfeng Guo }
537