xref: /dpdk/drivers/net/gve/gve_rx_dqo.c (revision f8fee84eb48cdf13a7a29f5851a2e2a41045813a)
11dc00f4fSJunfeng Guo /* SPDX-License-Identifier: BSD-3-Clause
21dc00f4fSJunfeng Guo  * Copyright (c) 2022-2023 Google LLC
31dc00f4fSJunfeng Guo  * Copyright (c) 2022-2023 Intel Corporation
41dc00f4fSJunfeng Guo  */
51dc00f4fSJunfeng Guo 
61dc00f4fSJunfeng Guo 
71dc00f4fSJunfeng Guo #include "gve_ethdev.h"
81dc00f4fSJunfeng Guo #include "base/gve_adminq.h"
983e0cc58SJoshua Washington #include "rte_mbuf_ptype.h"
101dc00f4fSJunfeng Guo 
111e27182eSJunfeng Guo static inline void
1245da16b5SJunfeng Guo gve_rx_refill_dqo(struct gve_rx_queue *rxq)
1345da16b5SJunfeng Guo {
1445da16b5SJunfeng Guo 	volatile struct gve_rx_desc_dqo *rx_buf_desc;
155e9933c9SRushil Gupta 	struct rte_mbuf *nmb[rxq->nb_rx_hold];
165e9933c9SRushil Gupta 	uint16_t nb_refill = rxq->nb_rx_hold;
1745da16b5SJunfeng Guo 	uint16_t next_avail = rxq->bufq_tail;
1845da16b5SJunfeng Guo 	struct rte_eth_dev *dev;
1945da16b5SJunfeng Guo 	uint64_t dma_addr;
2045da16b5SJunfeng Guo 	int i;
2145da16b5SJunfeng Guo 
2245da16b5SJunfeng Guo 	if (rxq->nb_rx_hold < rxq->free_thresh)
2345da16b5SJunfeng Guo 		return;
2445da16b5SJunfeng Guo 
2552c9b406SJoshua Washington 	if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mpool, nmb, nb_refill))) {
2698e3bbc9SJunfeng Guo 		rxq->stats.no_mbufs_bulk++;
2752c9b406SJoshua Washington 		rxq->stats.no_mbufs += nb_refill;
2845da16b5SJunfeng Guo 		dev = &rte_eth_devices[rxq->port_id];
2952c9b406SJoshua Washington 		dev->data->rx_mbuf_alloc_failed += nb_refill;
3045da16b5SJunfeng Guo 		PMD_DRV_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
3145da16b5SJunfeng Guo 			    rxq->port_id, rxq->queue_id);
3245da16b5SJunfeng Guo 		return;
3345da16b5SJunfeng Guo 	}
3445da16b5SJunfeng Guo 
3545da16b5SJunfeng Guo 	for (i = 0; i < nb_refill; i++) {
3652c9b406SJoshua Washington 		rx_buf_desc = &rxq->rx_ring[next_avail];
3752c9b406SJoshua Washington 		rxq->sw_ring[next_avail] = nmb[i];
3845da16b5SJunfeng Guo 		dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i]));
3945da16b5SJunfeng Guo 		rx_buf_desc->header_buf_addr = 0;
4045da16b5SJunfeng Guo 		rx_buf_desc->buf_addr = dma_addr;
4152c9b406SJoshua Washington 		next_avail = (next_avail + 1) & (rxq->nb_rx_desc - 1);
4245da16b5SJunfeng Guo 	}
4345da16b5SJunfeng Guo 	rxq->nb_rx_hold -= nb_refill;
4445da16b5SJunfeng Guo 	rte_write32(next_avail, rxq->qrx_tail);
4545da16b5SJunfeng Guo 
4645da16b5SJunfeng Guo 	rxq->bufq_tail = next_avail;
4745da16b5SJunfeng Guo }
4845da16b5SJunfeng Guo 
4983e0cc58SJoshua Washington static inline void
5083e0cc58SJoshua Washington gve_parse_csum_ol_flags(struct rte_mbuf *rx_mbuf,
5183e0cc58SJoshua Washington 	volatile struct gve_rx_compl_desc_dqo *rx_desc)
5283e0cc58SJoshua Washington {
53f2a9e162SRushil Gupta 	if (!rx_desc->l3_l4_processed)
5483e0cc58SJoshua Washington 		return;
55f2a9e162SRushil Gupta 
5683e0cc58SJoshua Washington 	if (rx_mbuf->packet_type & RTE_PTYPE_L3_IPV4) {
57f2a9e162SRushil Gupta 		if (rx_desc->csum_ip_err)
5883e0cc58SJoshua Washington 			rx_mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
59f2a9e162SRushil Gupta 		else
6083e0cc58SJoshua Washington 			rx_mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
61f2a9e162SRushil Gupta 	}
62f2a9e162SRushil Gupta 
63f2a9e162SRushil Gupta 	if (rx_desc->csum_l4_err) {
6483e0cc58SJoshua Washington 		rx_mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
6583e0cc58SJoshua Washington 		return;
66f2a9e162SRushil Gupta 	}
6783e0cc58SJoshua Washington 	if (rx_mbuf->packet_type & RTE_PTYPE_L4_MASK)
6883e0cc58SJoshua Washington 		rx_mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
6983e0cc58SJoshua Washington }
7083e0cc58SJoshua Washington 
7183e0cc58SJoshua Washington static inline void
7283e0cc58SJoshua Washington gve_rx_set_mbuf_ptype(struct gve_priv *priv, struct rte_mbuf *rx_mbuf,
7383e0cc58SJoshua Washington 		      volatile struct gve_rx_compl_desc_dqo *rx_desc)
7483e0cc58SJoshua Washington {
7583e0cc58SJoshua Washington 	struct gve_ptype ptype =
7683e0cc58SJoshua Washington 		priv->ptype_lut_dqo->ptypes[rx_desc->packet_type];
7783e0cc58SJoshua Washington 	rx_mbuf->packet_type = 0;
7883e0cc58SJoshua Washington 
7983e0cc58SJoshua Washington 	switch (ptype.l3_type) {
8083e0cc58SJoshua Washington 	case GVE_L3_TYPE_IPV4:
8183e0cc58SJoshua Washington 		rx_mbuf->packet_type |= RTE_PTYPE_L3_IPV4;
8283e0cc58SJoshua Washington 		break;
8383e0cc58SJoshua Washington 	case GVE_L3_TYPE_IPV6:
8483e0cc58SJoshua Washington 		rx_mbuf->packet_type |= RTE_PTYPE_L3_IPV6;
85f2a9e162SRushil Gupta 		break;
86f2a9e162SRushil Gupta 	default:
87f2a9e162SRushil Gupta 		break;
88f2a9e162SRushil Gupta 	}
8983e0cc58SJoshua Washington 
9083e0cc58SJoshua Washington 	switch (ptype.l4_type) {
9183e0cc58SJoshua Washington 	case GVE_L4_TYPE_TCP:
9283e0cc58SJoshua Washington 		rx_mbuf->packet_type |= RTE_PTYPE_L4_TCP;
9383e0cc58SJoshua Washington 		break;
9483e0cc58SJoshua Washington 	case GVE_L4_TYPE_UDP:
9583e0cc58SJoshua Washington 		rx_mbuf->packet_type |= RTE_PTYPE_L4_UDP;
9683e0cc58SJoshua Washington 		break;
9783e0cc58SJoshua Washington 	case GVE_L4_TYPE_ICMP:
9883e0cc58SJoshua Washington 		rx_mbuf->packet_type |= RTE_PTYPE_L4_ICMP;
9983e0cc58SJoshua Washington 		break;
10083e0cc58SJoshua Washington 	case GVE_L4_TYPE_SCTP:
10183e0cc58SJoshua Washington 		rx_mbuf->packet_type |= RTE_PTYPE_L4_SCTP;
10283e0cc58SJoshua Washington 		break;
10383e0cc58SJoshua Washington 	default:
10483e0cc58SJoshua Washington 		break;
10583e0cc58SJoshua Washington 	}
106f2a9e162SRushil Gupta }
107f2a9e162SRushil Gupta 
10845da16b5SJunfeng Guo uint16_t
10945da16b5SJunfeng Guo gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
11045da16b5SJunfeng Guo {
11145da16b5SJunfeng Guo 	volatile struct gve_rx_compl_desc_dqo *rx_compl_ring;
11245da16b5SJunfeng Guo 	volatile struct gve_rx_compl_desc_dqo *rx_desc;
11345da16b5SJunfeng Guo 	struct gve_rx_queue *rxq;
11445da16b5SJunfeng Guo 	struct rte_mbuf *rxm;
11545da16b5SJunfeng Guo 	uint16_t rx_id_bufq;
11645da16b5SJunfeng Guo 	uint16_t pkt_len;
11745da16b5SJunfeng Guo 	uint16_t rx_id;
11845da16b5SJunfeng Guo 	uint16_t nb_rx;
11998e3bbc9SJunfeng Guo 	uint64_t bytes;
12045da16b5SJunfeng Guo 
12198e3bbc9SJunfeng Guo 	bytes = 0;
12245da16b5SJunfeng Guo 	nb_rx = 0;
12345da16b5SJunfeng Guo 	rxq = rx_queue;
12445da16b5SJunfeng Guo 	rx_id = rxq->rx_tail;
12545da16b5SJunfeng Guo 	rx_id_bufq = rxq->next_avail;
12645da16b5SJunfeng Guo 	rx_compl_ring = rxq->compl_ring;
12745da16b5SJunfeng Guo 
12845da16b5SJunfeng Guo 	while (nb_rx < nb_pkts) {
12945da16b5SJunfeng Guo 		rx_desc = &rx_compl_ring[rx_id];
13045da16b5SJunfeng Guo 
13145da16b5SJunfeng Guo 		/* check status */
13245da16b5SJunfeng Guo 		if (rx_desc->generation != rxq->cur_gen_bit)
13345da16b5SJunfeng Guo 			break;
13445da16b5SJunfeng Guo 
135*f8fee84eSJoshua Washington 		rte_io_rmb();
136*f8fee84eSJoshua Washington 
13798e3bbc9SJunfeng Guo 		if (unlikely(rx_desc->rx_error)) {
13898e3bbc9SJunfeng Guo 			rxq->stats.errors++;
13945da16b5SJunfeng Guo 			continue;
14098e3bbc9SJunfeng Guo 		}
14145da16b5SJunfeng Guo 
14245da16b5SJunfeng Guo 		pkt_len = rx_desc->packet_len;
14345da16b5SJunfeng Guo 
14445da16b5SJunfeng Guo 		rx_id++;
14545da16b5SJunfeng Guo 		if (rx_id == rxq->nb_rx_desc) {
14645da16b5SJunfeng Guo 			rx_id = 0;
14745da16b5SJunfeng Guo 			rxq->cur_gen_bit ^= 1;
14845da16b5SJunfeng Guo 		}
14945da16b5SJunfeng Guo 
15045da16b5SJunfeng Guo 		rxm = rxq->sw_ring[rx_id_bufq];
15145da16b5SJunfeng Guo 		rx_id_bufq++;
15245da16b5SJunfeng Guo 		if (rx_id_bufq == rxq->nb_rx_desc)
15345da16b5SJunfeng Guo 			rx_id_bufq = 0;
15445da16b5SJunfeng Guo 		rxq->nb_rx_hold++;
15545da16b5SJunfeng Guo 
15645da16b5SJunfeng Guo 		rxm->pkt_len = pkt_len;
15745da16b5SJunfeng Guo 		rxm->data_len = pkt_len;
15845da16b5SJunfeng Guo 		rxm->port = rxq->port_id;
15983e0cc58SJoshua Washington 		gve_rx_set_mbuf_ptype(rxq->hw, rxm, rx_desc);
16083e0cc58SJoshua Washington 		rxm->ol_flags = RTE_MBUF_F_RX_RSS_HASH;
16183e0cc58SJoshua Washington 		gve_parse_csum_ol_flags(rxm, rx_desc);
162b0ab5e93SShreesh Adiga 		rxm->hash.rss = rte_le_to_cpu_32(rx_desc->hash);
16345da16b5SJunfeng Guo 
16445da16b5SJunfeng Guo 		rx_pkts[nb_rx++] = rxm;
16598e3bbc9SJunfeng Guo 		bytes += pkt_len;
16645da16b5SJunfeng Guo 	}
16745da16b5SJunfeng Guo 
16845da16b5SJunfeng Guo 	if (nb_rx > 0) {
16945da16b5SJunfeng Guo 		rxq->rx_tail = rx_id;
17045da16b5SJunfeng Guo 		rxq->next_avail = rx_id_bufq;
17145da16b5SJunfeng Guo 
17298e3bbc9SJunfeng Guo 		rxq->stats.packets += nb_rx;
17398e3bbc9SJunfeng Guo 		rxq->stats.bytes += bytes;
17445da16b5SJunfeng Guo 	}
17531d21497SJoshua Washington 	gve_rx_refill_dqo(rxq);
17645da16b5SJunfeng Guo 
17745da16b5SJunfeng Guo 	return nb_rx;
17845da16b5SJunfeng Guo }
17945da16b5SJunfeng Guo 
18045da16b5SJunfeng Guo static inline void
1811e27182eSJunfeng Guo gve_release_rxq_mbufs_dqo(struct gve_rx_queue *rxq)
1821e27182eSJunfeng Guo {
1831e27182eSJunfeng Guo 	uint16_t i;
1841e27182eSJunfeng Guo 
1851e27182eSJunfeng Guo 	for (i = 0; i < rxq->nb_rx_desc; i++) {
1861e27182eSJunfeng Guo 		if (rxq->sw_ring[i]) {
1871e27182eSJunfeng Guo 			rte_pktmbuf_free_seg(rxq->sw_ring[i]);
1881e27182eSJunfeng Guo 			rxq->sw_ring[i] = NULL;
1891e27182eSJunfeng Guo 		}
1901e27182eSJunfeng Guo 	}
1911e27182eSJunfeng Guo 
1921e27182eSJunfeng Guo 	rxq->nb_avail = rxq->nb_rx_desc;
1931e27182eSJunfeng Guo }
1941e27182eSJunfeng Guo 
1951e27182eSJunfeng Guo void
1961e27182eSJunfeng Guo gve_rx_queue_release_dqo(struct rte_eth_dev *dev, uint16_t qid)
1971e27182eSJunfeng Guo {
1981e27182eSJunfeng Guo 	struct gve_rx_queue *q = dev->data->rx_queues[qid];
1991e27182eSJunfeng Guo 
2001e27182eSJunfeng Guo 	if (q == NULL)
2011e27182eSJunfeng Guo 		return;
2021e27182eSJunfeng Guo 
2031e27182eSJunfeng Guo 	gve_release_rxq_mbufs_dqo(q);
2041e27182eSJunfeng Guo 	rte_free(q->sw_ring);
2051e27182eSJunfeng Guo 	rte_memzone_free(q->compl_ring_mz);
2061e27182eSJunfeng Guo 	rte_memzone_free(q->mz);
2071e27182eSJunfeng Guo 	rte_memzone_free(q->qres_mz);
2081e27182eSJunfeng Guo 	q->qres = NULL;
2091e27182eSJunfeng Guo 	rte_free(q);
2101e27182eSJunfeng Guo }
2111e27182eSJunfeng Guo 
2121dc00f4fSJunfeng Guo static void
2131dc00f4fSJunfeng Guo gve_reset_rxq_dqo(struct gve_rx_queue *rxq)
2141dc00f4fSJunfeng Guo {
2151dc00f4fSJunfeng Guo 	struct rte_mbuf **sw_ring;
2161dc00f4fSJunfeng Guo 	uint32_t size, i;
2171dc00f4fSJunfeng Guo 
2181dc00f4fSJunfeng Guo 	if (rxq == NULL) {
2191dc00f4fSJunfeng Guo 		PMD_DRV_LOG(ERR, "pointer to rxq is NULL");
2201dc00f4fSJunfeng Guo 		return;
2211dc00f4fSJunfeng Guo 	}
2221dc00f4fSJunfeng Guo 
2231dc00f4fSJunfeng Guo 	size = rxq->nb_rx_desc * sizeof(struct gve_rx_desc_dqo);
2241dc00f4fSJunfeng Guo 	for (i = 0; i < size; i++)
2251dc00f4fSJunfeng Guo 		((volatile char *)rxq->rx_ring)[i] = 0;
2261dc00f4fSJunfeng Guo 
2271dc00f4fSJunfeng Guo 	size = rxq->nb_rx_desc * sizeof(struct gve_rx_compl_desc_dqo);
2281dc00f4fSJunfeng Guo 	for (i = 0; i < size; i++)
2291dc00f4fSJunfeng Guo 		((volatile char *)rxq->compl_ring)[i] = 0;
2301dc00f4fSJunfeng Guo 
2311dc00f4fSJunfeng Guo 	sw_ring = rxq->sw_ring;
2321dc00f4fSJunfeng Guo 	for (i = 0; i < rxq->nb_rx_desc; i++)
2331dc00f4fSJunfeng Guo 		sw_ring[i] = NULL;
2341dc00f4fSJunfeng Guo 
2351dc00f4fSJunfeng Guo 	rxq->bufq_tail = 0;
2361dc00f4fSJunfeng Guo 	rxq->next_avail = 0;
2371dc00f4fSJunfeng Guo 	rxq->nb_rx_hold = rxq->nb_rx_desc - 1;
2381dc00f4fSJunfeng Guo 
2391dc00f4fSJunfeng Guo 	rxq->rx_tail = 0;
2401dc00f4fSJunfeng Guo 	rxq->cur_gen_bit = 1;
2411dc00f4fSJunfeng Guo }
2421dc00f4fSJunfeng Guo 
2431dc00f4fSJunfeng Guo int
2441dc00f4fSJunfeng Guo gve_rx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
2451dc00f4fSJunfeng Guo 		       uint16_t nb_desc, unsigned int socket_id,
2461dc00f4fSJunfeng Guo 		       const struct rte_eth_rxconf *conf,
2471dc00f4fSJunfeng Guo 		       struct rte_mempool *pool)
2481dc00f4fSJunfeng Guo {
2491dc00f4fSJunfeng Guo 	struct gve_priv *hw = dev->data->dev_private;
2501dc00f4fSJunfeng Guo 	const struct rte_memzone *mz;
2511dc00f4fSJunfeng Guo 	struct gve_rx_queue *rxq;
2521dc00f4fSJunfeng Guo 	uint16_t free_thresh;
253835021a8SJoshua Washington 	uint32_t mbuf_len;
2541dc00f4fSJunfeng Guo 	int err = 0;
2551dc00f4fSJunfeng Guo 
2561e27182eSJunfeng Guo 	/* Free memory if needed */
2571e27182eSJunfeng Guo 	if (dev->data->rx_queues[queue_id]) {
2581e27182eSJunfeng Guo 		gve_rx_queue_release_dqo(dev, queue_id);
2591e27182eSJunfeng Guo 		dev->data->rx_queues[queue_id] = NULL;
2601e27182eSJunfeng Guo 	}
2611e27182eSJunfeng Guo 
2621dc00f4fSJunfeng Guo 	/* Allocate the RX queue data structure. */
2631dc00f4fSJunfeng Guo 	rxq = rte_zmalloc_socket("gve rxq",
2641dc00f4fSJunfeng Guo 				 sizeof(struct gve_rx_queue),
2651dc00f4fSJunfeng Guo 				 RTE_CACHE_LINE_SIZE,
2661dc00f4fSJunfeng Guo 				 socket_id);
2671dc00f4fSJunfeng Guo 	if (rxq == NULL) {
2681dc00f4fSJunfeng Guo 		PMD_DRV_LOG(ERR, "Failed to allocate memory for rx queue structure");
2691dc00f4fSJunfeng Guo 		return -ENOMEM;
2701dc00f4fSJunfeng Guo 	}
2711dc00f4fSJunfeng Guo 
2721dc00f4fSJunfeng Guo 	/* check free_thresh here */
2731dc00f4fSJunfeng Guo 	free_thresh = conf->rx_free_thresh ?
2741dc00f4fSJunfeng Guo 			conf->rx_free_thresh : GVE_DEFAULT_RX_FREE_THRESH;
2751dc00f4fSJunfeng Guo 	if (free_thresh >= nb_desc) {
2761dc00f4fSJunfeng Guo 		PMD_DRV_LOG(ERR, "rx_free_thresh (%u) must be less than nb_desc (%u).",
2771dc00f4fSJunfeng Guo 			    free_thresh, rxq->nb_rx_desc);
2781dc00f4fSJunfeng Guo 		err = -EINVAL;
2791dc00f4fSJunfeng Guo 		goto free_rxq;
2801dc00f4fSJunfeng Guo 	}
2811dc00f4fSJunfeng Guo 
2821dc00f4fSJunfeng Guo 	rxq->nb_rx_desc = nb_desc;
2831dc00f4fSJunfeng Guo 	rxq->free_thresh = free_thresh;
2841dc00f4fSJunfeng Guo 	rxq->queue_id = queue_id;
2851dc00f4fSJunfeng Guo 	rxq->port_id = dev->data->port_id;
2861dc00f4fSJunfeng Guo 	rxq->ntfy_id = hw->num_ntfy_blks / 2 + queue_id;
2871dc00f4fSJunfeng Guo 
2881dc00f4fSJunfeng Guo 	rxq->mpool = pool;
2891dc00f4fSJunfeng Guo 	rxq->hw = hw;
2901dc00f4fSJunfeng Guo 	rxq->ntfy_addr = &hw->db_bar2[rte_be_to_cpu_32(hw->irq_dbs[rxq->ntfy_id].id)];
2911dc00f4fSJunfeng Guo 
292835021a8SJoshua Washington 	mbuf_len =
2931dc00f4fSJunfeng Guo 		rte_pktmbuf_data_room_size(rxq->mpool) - RTE_PKTMBUF_HEADROOM;
294835021a8SJoshua Washington 	rxq->rx_buf_len =
295835021a8SJoshua Washington 		RTE_MIN((uint16_t)GVE_RX_MAX_BUF_SIZE_DQO,
296835021a8SJoshua Washington 			RTE_ALIGN_FLOOR(mbuf_len, GVE_RX_BUF_ALIGN_DQO));
2971dc00f4fSJunfeng Guo 
2981dc00f4fSJunfeng Guo 	/* Allocate software ring */
2991dc00f4fSJunfeng Guo 	rxq->sw_ring = rte_zmalloc_socket("gve rx sw ring",
3001dc00f4fSJunfeng Guo 					  nb_desc * sizeof(struct rte_mbuf *),
3011dc00f4fSJunfeng Guo 					  RTE_CACHE_LINE_SIZE, socket_id);
3021dc00f4fSJunfeng Guo 	if (rxq->sw_ring == NULL) {
3031dc00f4fSJunfeng Guo 		PMD_DRV_LOG(ERR, "Failed to allocate memory for SW RX ring");
3041dc00f4fSJunfeng Guo 		err = -ENOMEM;
3051dc00f4fSJunfeng Guo 		goto free_rxq;
3061dc00f4fSJunfeng Guo 	}
3071dc00f4fSJunfeng Guo 
3081dc00f4fSJunfeng Guo 	/* Allocate RX buffer queue */
3091dc00f4fSJunfeng Guo 	mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
3101dc00f4fSJunfeng Guo 				      nb_desc * sizeof(struct gve_rx_desc_dqo),
3111dc00f4fSJunfeng Guo 				      PAGE_SIZE, socket_id);
3121dc00f4fSJunfeng Guo 	if (mz == NULL) {
3131dc00f4fSJunfeng Guo 		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX buffer queue");
3141dc00f4fSJunfeng Guo 		err = -ENOMEM;
3151dc00f4fSJunfeng Guo 		goto free_rxq_sw_ring;
3161dc00f4fSJunfeng Guo 	}
3171dc00f4fSJunfeng Guo 	rxq->rx_ring = (struct gve_rx_desc_dqo *)mz->addr;
3181dc00f4fSJunfeng Guo 	rxq->rx_ring_phys_addr = mz->iova;
3191dc00f4fSJunfeng Guo 	rxq->mz = mz;
3201dc00f4fSJunfeng Guo 
3211dc00f4fSJunfeng Guo 	/* Allocate RX completion queue */
3221dc00f4fSJunfeng Guo 	mz = rte_eth_dma_zone_reserve(dev, "compl_ring", queue_id,
3231dc00f4fSJunfeng Guo 				      nb_desc * sizeof(struct gve_rx_compl_desc_dqo),
3241dc00f4fSJunfeng Guo 				      PAGE_SIZE, socket_id);
3251dc00f4fSJunfeng Guo 	if (mz == NULL) {
3261dc00f4fSJunfeng Guo 		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX completion queue");
3271dc00f4fSJunfeng Guo 		err = -ENOMEM;
3281dc00f4fSJunfeng Guo 		goto free_rxq_mz;
3291dc00f4fSJunfeng Guo 	}
3301dc00f4fSJunfeng Guo 	/* Zero all the descriptors in the ring */
3311dc00f4fSJunfeng Guo 	memset(mz->addr, 0, nb_desc * sizeof(struct gve_rx_compl_desc_dqo));
3321dc00f4fSJunfeng Guo 	rxq->compl_ring = (struct gve_rx_compl_desc_dqo *)mz->addr;
3331dc00f4fSJunfeng Guo 	rxq->compl_ring_phys_addr = mz->iova;
3341dc00f4fSJunfeng Guo 	rxq->compl_ring_mz = mz;
3351dc00f4fSJunfeng Guo 
3361dc00f4fSJunfeng Guo 	mz = rte_eth_dma_zone_reserve(dev, "rxq_res", queue_id,
3371dc00f4fSJunfeng Guo 				      sizeof(struct gve_queue_resources),
3381dc00f4fSJunfeng Guo 				      PAGE_SIZE, socket_id);
3391dc00f4fSJunfeng Guo 	if (mz == NULL) {
3401dc00f4fSJunfeng Guo 		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX resource");
3411dc00f4fSJunfeng Guo 		err = -ENOMEM;
3421dc00f4fSJunfeng Guo 		goto free_rxq_cq_mz;
3431dc00f4fSJunfeng Guo 	}
3441dc00f4fSJunfeng Guo 	rxq->qres = (struct gve_queue_resources *)mz->addr;
3451dc00f4fSJunfeng Guo 	rxq->qres_mz = mz;
3461dc00f4fSJunfeng Guo 
3471dc00f4fSJunfeng Guo 	gve_reset_rxq_dqo(rxq);
3481dc00f4fSJunfeng Guo 
3491dc00f4fSJunfeng Guo 	dev->data->rx_queues[queue_id] = rxq;
3501dc00f4fSJunfeng Guo 
3511dc00f4fSJunfeng Guo 	return 0;
3521dc00f4fSJunfeng Guo 
3531dc00f4fSJunfeng Guo free_rxq_cq_mz:
3541dc00f4fSJunfeng Guo 	rte_memzone_free(rxq->compl_ring_mz);
3551dc00f4fSJunfeng Guo free_rxq_mz:
3561dc00f4fSJunfeng Guo 	rte_memzone_free(rxq->mz);
3571dc00f4fSJunfeng Guo free_rxq_sw_ring:
3581dc00f4fSJunfeng Guo 	rte_free(rxq->sw_ring);
3591dc00f4fSJunfeng Guo free_rxq:
3601dc00f4fSJunfeng Guo 	rte_free(rxq);
3611dc00f4fSJunfeng Guo 	return err;
3621dc00f4fSJunfeng Guo }
3631e27182eSJunfeng Guo 
364b044845bSJunfeng Guo static int
365b044845bSJunfeng Guo gve_rxq_mbufs_alloc_dqo(struct gve_rx_queue *rxq)
366b044845bSJunfeng Guo {
367b044845bSJunfeng Guo 	struct rte_mbuf *nmb;
368265daac8SJoshua Washington 	uint16_t rx_mask;
369b044845bSJunfeng Guo 	uint16_t i;
370b044845bSJunfeng Guo 	int diag;
371b044845bSJunfeng Guo 
372265daac8SJoshua Washington 	rx_mask = rxq->nb_rx_desc - 1;
373265daac8SJoshua Washington 	diag = rte_pktmbuf_alloc_bulk(rxq->mpool, &rxq->sw_ring[0],
374265daac8SJoshua Washington 				      rx_mask);
375b044845bSJunfeng Guo 	if (diag < 0) {
376b044845bSJunfeng Guo 		rxq->stats.no_mbufs_bulk++;
377265daac8SJoshua Washington 		for (i = 0; i < rx_mask; i++) {
378b044845bSJunfeng Guo 			nmb = rte_pktmbuf_alloc(rxq->mpool);
379b044845bSJunfeng Guo 			if (!nmb)
380b044845bSJunfeng Guo 				break;
381b044845bSJunfeng Guo 			rxq->sw_ring[i] = nmb;
382b044845bSJunfeng Guo 		}
383b044845bSJunfeng Guo 		if (i < rxq->nb_rx_desc - 1) {
384265daac8SJoshua Washington 			rxq->stats.no_mbufs += rx_mask - i;
385b044845bSJunfeng Guo 			return -ENOMEM;
386b044845bSJunfeng Guo 		}
387b044845bSJunfeng Guo 	}
388b044845bSJunfeng Guo 
389265daac8SJoshua Washington 	for (i = 0; i < rx_mask; i++) {
390b044845bSJunfeng Guo 		nmb = rxq->sw_ring[i];
391b044845bSJunfeng Guo 		rxq->rx_ring[i].buf_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
392b044845bSJunfeng Guo 		rxq->rx_ring[i].buf_id = rte_cpu_to_le_16(i);
393b044845bSJunfeng Guo 	}
394265daac8SJoshua Washington 	rxq->rx_ring[rx_mask].buf_id = rte_cpu_to_le_16(rx_mask);
395b044845bSJunfeng Guo 
396b044845bSJunfeng Guo 	rxq->nb_rx_hold = 0;
397265daac8SJoshua Washington 	rxq->bufq_tail = rx_mask;
398b044845bSJunfeng Guo 
399b044845bSJunfeng Guo 	rte_write32(rxq->bufq_tail, rxq->qrx_tail);
400b044845bSJunfeng Guo 
401b044845bSJunfeng Guo 	return 0;
402b044845bSJunfeng Guo }
403b044845bSJunfeng Guo 
404b044845bSJunfeng Guo int
405b044845bSJunfeng Guo gve_rx_queue_start_dqo(struct rte_eth_dev *dev, uint16_t rx_queue_id)
406b044845bSJunfeng Guo {
407b044845bSJunfeng Guo 	struct gve_priv *hw = dev->data->dev_private;
408b044845bSJunfeng Guo 	struct gve_rx_queue *rxq;
409b044845bSJunfeng Guo 	int ret;
410b044845bSJunfeng Guo 
411b044845bSJunfeng Guo 	if (rx_queue_id >= dev->data->nb_rx_queues)
412b044845bSJunfeng Guo 		return -EINVAL;
413b044845bSJunfeng Guo 
414b044845bSJunfeng Guo 	rxq = dev->data->rx_queues[rx_queue_id];
415b044845bSJunfeng Guo 
416b044845bSJunfeng Guo 	rxq->qrx_tail = &hw->db_bar2[rte_be_to_cpu_32(rxq->qres->db_index)];
417b044845bSJunfeng Guo 
418b044845bSJunfeng Guo 	rte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), rxq->ntfy_addr);
419b044845bSJunfeng Guo 
420b044845bSJunfeng Guo 	ret = gve_rxq_mbufs_alloc_dqo(rxq);
421b044845bSJunfeng Guo 	if (ret != 0) {
422b044845bSJunfeng Guo 		PMD_DRV_LOG(ERR, "Failed to alloc Rx queue mbuf");
423b044845bSJunfeng Guo 		return ret;
424b044845bSJunfeng Guo 	}
425b044845bSJunfeng Guo 
426b044845bSJunfeng Guo 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
427b044845bSJunfeng Guo 
428b044845bSJunfeng Guo 	return 0;
429b044845bSJunfeng Guo }
430b044845bSJunfeng Guo 
431b044845bSJunfeng Guo int
432b044845bSJunfeng Guo gve_rx_queue_stop_dqo(struct rte_eth_dev *dev, uint16_t rx_queue_id)
433b044845bSJunfeng Guo {
434b044845bSJunfeng Guo 	struct gve_rx_queue *rxq;
435b044845bSJunfeng Guo 
436b044845bSJunfeng Guo 	if (rx_queue_id >= dev->data->nb_rx_queues)
437b044845bSJunfeng Guo 		return -EINVAL;
438b044845bSJunfeng Guo 
439b044845bSJunfeng Guo 	rxq = dev->data->rx_queues[rx_queue_id];
440b044845bSJunfeng Guo 	gve_release_rxq_mbufs_dqo(rxq);
441b044845bSJunfeng Guo 	gve_reset_rxq_dqo(rxq);
442b044845bSJunfeng Guo 
443b044845bSJunfeng Guo 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
444b044845bSJunfeng Guo 
445b044845bSJunfeng Guo 	return 0;
446b044845bSJunfeng Guo }
447b044845bSJunfeng Guo 
4481e27182eSJunfeng Guo void
4491e27182eSJunfeng Guo gve_stop_rx_queues_dqo(struct rte_eth_dev *dev)
4501e27182eSJunfeng Guo {
4511e27182eSJunfeng Guo 	struct gve_priv *hw = dev->data->dev_private;
4521e27182eSJunfeng Guo 	uint16_t i;
4531e27182eSJunfeng Guo 	int err;
4541e27182eSJunfeng Guo 
4551e27182eSJunfeng Guo 	err = gve_adminq_destroy_rx_queues(hw, dev->data->nb_rx_queues);
4561e27182eSJunfeng Guo 	if (err != 0)
4571e27182eSJunfeng Guo 		PMD_DRV_LOG(WARNING, "failed to destroy rxqs");
4581e27182eSJunfeng Guo 
459b044845bSJunfeng Guo 	for (i = 0; i < dev->data->nb_rx_queues; i++)
460b044845bSJunfeng Guo 		if (gve_rx_queue_stop_dqo(dev, i) != 0)
461b044845bSJunfeng Guo 			PMD_DRV_LOG(WARNING, "Fail to stop Rx queue %d", i);
4621e27182eSJunfeng Guo }
463b044845bSJunfeng Guo 
464b044845bSJunfeng Guo void
465b044845bSJunfeng Guo gve_set_rx_function_dqo(struct rte_eth_dev *dev)
466b044845bSJunfeng Guo {
467b044845bSJunfeng Guo 	dev->rx_pkt_burst = gve_rx_burst_dqo;
4681e27182eSJunfeng Guo }
469