xref: /dpdk/drivers/net/gve/gve_ethdev.c (revision 7bb1168d984aaa7e204c52d13c4701eac0f82989)
1457967cdSJunfeng Guo /* SPDX-License-Identifier: BSD-3-Clause
2457967cdSJunfeng Guo  * Copyright(C) 2022 Intel Corporation
3457967cdSJunfeng Guo  */
4457967cdSJunfeng Guo 
5457967cdSJunfeng Guo #include "gve_ethdev.h"
6457967cdSJunfeng Guo #include "base/gve_adminq.h"
7457967cdSJunfeng Guo #include "base/gve_register.h"
8748d0e7fSRushil Gupta #include "base/gve_osdep.h"
9748d0e7fSRushil Gupta #include "gve_version.h"
10457967cdSJunfeng Guo 
11457967cdSJunfeng Guo static void
12457967cdSJunfeng Guo gve_write_version(uint8_t *driver_version_register)
13457967cdSJunfeng Guo {
14748d0e7fSRushil Gupta 	const char *c = gve_version_string();
15457967cdSJunfeng Guo 	while (*c) {
16457967cdSJunfeng Guo 		writeb(*c, driver_version_register);
17457967cdSJunfeng Guo 		c++;
18457967cdSJunfeng Guo 	}
19457967cdSJunfeng Guo 	writeb('\n', driver_version_register);
20457967cdSJunfeng Guo }
21457967cdSJunfeng Guo 
22457967cdSJunfeng Guo static int
234bec2d0bSJunfeng Guo gve_alloc_queue_page_list(struct gve_priv *priv, uint32_t id, uint32_t pages)
244bec2d0bSJunfeng Guo {
254bec2d0bSJunfeng Guo 	char z_name[RTE_MEMZONE_NAMESIZE];
264bec2d0bSJunfeng Guo 	struct gve_queue_page_list *qpl;
274bec2d0bSJunfeng Guo 	const struct rte_memzone *mz;
284bec2d0bSJunfeng Guo 	dma_addr_t page_bus;
294bec2d0bSJunfeng Guo 	uint32_t i;
304bec2d0bSJunfeng Guo 
314bec2d0bSJunfeng Guo 	if (priv->num_registered_pages + pages >
324bec2d0bSJunfeng Guo 	    priv->max_registered_pages) {
334bec2d0bSJunfeng Guo 		PMD_DRV_LOG(ERR, "Pages %" PRIu64 " > max registered pages %" PRIu64,
344bec2d0bSJunfeng Guo 			    priv->num_registered_pages + pages,
354bec2d0bSJunfeng Guo 			    priv->max_registered_pages);
364bec2d0bSJunfeng Guo 		return -EINVAL;
374bec2d0bSJunfeng Guo 	}
384bec2d0bSJunfeng Guo 	qpl = &priv->qpl[id];
394bec2d0bSJunfeng Guo 	snprintf(z_name, sizeof(z_name), "gve_%s_qpl%d", priv->pci_dev->device.name, id);
404bec2d0bSJunfeng Guo 	mz = rte_memzone_reserve_aligned(z_name, pages * PAGE_SIZE,
414bec2d0bSJunfeng Guo 					 rte_socket_id(),
424bec2d0bSJunfeng Guo 					 RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
434bec2d0bSJunfeng Guo 	if (mz == NULL) {
444bec2d0bSJunfeng Guo 		PMD_DRV_LOG(ERR, "Failed to alloc %s.", z_name);
454bec2d0bSJunfeng Guo 		return -ENOMEM;
464bec2d0bSJunfeng Guo 	}
474bec2d0bSJunfeng Guo 	qpl->page_buses = rte_zmalloc("qpl page buses", pages * sizeof(dma_addr_t), 0);
484bec2d0bSJunfeng Guo 	if (qpl->page_buses == NULL) {
494bec2d0bSJunfeng Guo 		PMD_DRV_LOG(ERR, "Failed to alloc qpl %u page buses", id);
504bec2d0bSJunfeng Guo 		return -ENOMEM;
514bec2d0bSJunfeng Guo 	}
524bec2d0bSJunfeng Guo 	page_bus = mz->iova;
534bec2d0bSJunfeng Guo 	for (i = 0; i < pages; i++) {
544bec2d0bSJunfeng Guo 		qpl->page_buses[i] = page_bus;
554bec2d0bSJunfeng Guo 		page_bus += PAGE_SIZE;
564bec2d0bSJunfeng Guo 	}
574bec2d0bSJunfeng Guo 	qpl->id = id;
584bec2d0bSJunfeng Guo 	qpl->mz = mz;
594bec2d0bSJunfeng Guo 	qpl->num_entries = pages;
604bec2d0bSJunfeng Guo 
614bec2d0bSJunfeng Guo 	priv->num_registered_pages += pages;
624bec2d0bSJunfeng Guo 
634bec2d0bSJunfeng Guo 	return 0;
644bec2d0bSJunfeng Guo }
654bec2d0bSJunfeng Guo 
664bec2d0bSJunfeng Guo static void
674bec2d0bSJunfeng Guo gve_free_qpls(struct gve_priv *priv)
684bec2d0bSJunfeng Guo {
694bec2d0bSJunfeng Guo 	uint16_t nb_txqs = priv->max_nb_txq;
704bec2d0bSJunfeng Guo 	uint16_t nb_rxqs = priv->max_nb_rxq;
714bec2d0bSJunfeng Guo 	uint32_t i;
724bec2d0bSJunfeng Guo 
7388cfffc6SJunfeng Guo 	if (priv->queue_format != GVE_GQI_QPL_FORMAT)
7488cfffc6SJunfeng Guo 		return;
7588cfffc6SJunfeng Guo 
764bec2d0bSJunfeng Guo 	for (i = 0; i < nb_txqs + nb_rxqs; i++) {
774bec2d0bSJunfeng Guo 		if (priv->qpl[i].mz != NULL)
784bec2d0bSJunfeng Guo 			rte_memzone_free(priv->qpl[i].mz);
794bec2d0bSJunfeng Guo 		rte_free(priv->qpl[i].page_buses);
804bec2d0bSJunfeng Guo 	}
814bec2d0bSJunfeng Guo 
824bec2d0bSJunfeng Guo 	rte_free(priv->qpl);
834bec2d0bSJunfeng Guo }
844bec2d0bSJunfeng Guo 
854bec2d0bSJunfeng Guo static int
8671dea04cSJunfeng Guo gve_dev_configure(struct rte_eth_dev *dev)
87457967cdSJunfeng Guo {
8871dea04cSJunfeng Guo 	struct gve_priv *priv = dev->data->dev_private;
8971dea04cSJunfeng Guo 
9071dea04cSJunfeng Guo 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
9171dea04cSJunfeng Guo 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
9271dea04cSJunfeng Guo 
9371dea04cSJunfeng Guo 	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
9471dea04cSJunfeng Guo 		priv->enable_rsc = 1;
9571dea04cSJunfeng Guo 
96457967cdSJunfeng Guo 	return 0;
97457967cdSJunfeng Guo }
98457967cdSJunfeng Guo 
99457967cdSJunfeng Guo static int
100440f551dSJunfeng Guo gve_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
101440f551dSJunfeng Guo {
102440f551dSJunfeng Guo 	struct gve_priv *priv = dev->data->dev_private;
103440f551dSJunfeng Guo 	struct rte_eth_link link;
104440f551dSJunfeng Guo 	int err;
105440f551dSJunfeng Guo 
106440f551dSJunfeng Guo 	memset(&link, 0, sizeof(link));
107440f551dSJunfeng Guo 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
108440f551dSJunfeng Guo 	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
109440f551dSJunfeng Guo 
110440f551dSJunfeng Guo 	if (!dev->data->dev_started) {
111440f551dSJunfeng Guo 		link.link_status = RTE_ETH_LINK_DOWN;
112440f551dSJunfeng Guo 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
113440f551dSJunfeng Guo 	} else {
114440f551dSJunfeng Guo 		link.link_status = RTE_ETH_LINK_UP;
115440f551dSJunfeng Guo 		PMD_DRV_LOG(DEBUG, "Get link status from hw");
116440f551dSJunfeng Guo 		err = gve_adminq_report_link_speed(priv);
117440f551dSJunfeng Guo 		if (err) {
118440f551dSJunfeng Guo 			PMD_DRV_LOG(ERR, "Failed to get link speed.");
119440f551dSJunfeng Guo 			priv->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
120440f551dSJunfeng Guo 		}
121440f551dSJunfeng Guo 		link.link_speed = priv->link_speed;
122440f551dSJunfeng Guo 	}
123440f551dSJunfeng Guo 
124440f551dSJunfeng Guo 	return rte_eth_linkstatus_set(dev, &link);
125440f551dSJunfeng Guo }
126440f551dSJunfeng Guo 
127440f551dSJunfeng Guo static int
128b044845bSJunfeng Guo gve_start_queues(struct rte_eth_dev *dev)
129457967cdSJunfeng Guo {
1304bec2d0bSJunfeng Guo 	struct gve_priv *priv = dev->data->dev_private;
131b044845bSJunfeng Guo 	uint16_t num_queues;
1324bec2d0bSJunfeng Guo 	uint16_t i;
133b044845bSJunfeng Guo 	int ret;
1344bec2d0bSJunfeng Guo 
135b044845bSJunfeng Guo 	num_queues = dev->data->nb_tx_queues;
1364bec2d0bSJunfeng Guo 	priv->txqs = (struct gve_tx_queue **)dev->data->tx_queues;
137b044845bSJunfeng Guo 	ret = gve_adminq_create_tx_queues(priv, num_queues);
138b044845bSJunfeng Guo 	if (ret != 0) {
139b044845bSJunfeng Guo 		PMD_DRV_LOG(ERR, "Failed to create %u tx queues.", num_queues);
140b044845bSJunfeng Guo 		return ret;
1414bec2d0bSJunfeng Guo 	}
142b044845bSJunfeng Guo 	for (i = 0; i < num_queues; i++)
143b044845bSJunfeng Guo 		if (gve_tx_queue_start(dev, i) != 0) {
144b044845bSJunfeng Guo 			PMD_DRV_LOG(ERR, "Fail to start Tx queue %d", i);
145b044845bSJunfeng Guo 			goto err_tx;
1464bec2d0bSJunfeng Guo 		}
1474bec2d0bSJunfeng Guo 
1484bec2d0bSJunfeng Guo 	num_queues = dev->data->nb_rx_queues;
1494bec2d0bSJunfeng Guo 	priv->rxqs = (struct gve_rx_queue **)dev->data->rx_queues;
150b044845bSJunfeng Guo 	ret = gve_adminq_create_rx_queues(priv, num_queues);
151b044845bSJunfeng Guo 	if (ret != 0) {
152b044845bSJunfeng Guo 		PMD_DRV_LOG(ERR, "Failed to create %u rx queues.", num_queues);
1534bec2d0bSJunfeng Guo 		goto err_tx;
1544bec2d0bSJunfeng Guo 	}
1554bec2d0bSJunfeng Guo 	for (i = 0; i < num_queues; i++) {
15688cfffc6SJunfeng Guo 		if (gve_is_gqi(priv))
157b044845bSJunfeng Guo 			ret = gve_rx_queue_start(dev, i);
15888cfffc6SJunfeng Guo 		else
159b044845bSJunfeng Guo 			ret = gve_rx_queue_start_dqo(dev, i);
160b044845bSJunfeng Guo 		if (ret != 0) {
161b044845bSJunfeng Guo 			PMD_DRV_LOG(ERR, "Fail to start Rx queue %d", i);
1624bec2d0bSJunfeng Guo 			goto err_rx;
1634bec2d0bSJunfeng Guo 		}
1644bec2d0bSJunfeng Guo 	}
1654bec2d0bSJunfeng Guo 
166457967cdSJunfeng Guo 	return 0;
1674bec2d0bSJunfeng Guo 
1684bec2d0bSJunfeng Guo err_rx:
1694bec2d0bSJunfeng Guo 	gve_stop_rx_queues(dev);
1704bec2d0bSJunfeng Guo err_tx:
1714bec2d0bSJunfeng Guo 	gve_stop_tx_queues(dev);
172b044845bSJunfeng Guo 	return ret;
173b044845bSJunfeng Guo }
174b044845bSJunfeng Guo 
175b044845bSJunfeng Guo static int
176b044845bSJunfeng Guo gve_dev_start(struct rte_eth_dev *dev)
177b044845bSJunfeng Guo {
178b044845bSJunfeng Guo 	int ret;
179b044845bSJunfeng Guo 
180b044845bSJunfeng Guo 	ret = gve_start_queues(dev);
181b044845bSJunfeng Guo 	if (ret != 0) {
182b044845bSJunfeng Guo 		PMD_DRV_LOG(ERR, "Failed to start queues");
183b044845bSJunfeng Guo 		return ret;
184b044845bSJunfeng Guo 	}
185b044845bSJunfeng Guo 
186b044845bSJunfeng Guo 	dev->data->dev_started = 1;
187b044845bSJunfeng Guo 	gve_link_update(dev, 0);
188b044845bSJunfeng Guo 
189b044845bSJunfeng Guo 	return 0;
190457967cdSJunfeng Guo }
191457967cdSJunfeng Guo 
192457967cdSJunfeng Guo static int
193457967cdSJunfeng Guo gve_dev_stop(struct rte_eth_dev *dev)
194457967cdSJunfeng Guo {
195457967cdSJunfeng Guo 	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
1964bec2d0bSJunfeng Guo 
1974bec2d0bSJunfeng Guo 	gve_stop_tx_queues(dev);
1984bec2d0bSJunfeng Guo 	gve_stop_rx_queues(dev);
1994bec2d0bSJunfeng Guo 
200457967cdSJunfeng Guo 	dev->data->dev_started = 0;
201457967cdSJunfeng Guo 
202457967cdSJunfeng Guo 	return 0;
203457967cdSJunfeng Guo }
204457967cdSJunfeng Guo 
205457967cdSJunfeng Guo static int
206457967cdSJunfeng Guo gve_dev_close(struct rte_eth_dev *dev)
207457967cdSJunfeng Guo {
2084bec2d0bSJunfeng Guo 	struct gve_priv *priv = dev->data->dev_private;
209457967cdSJunfeng Guo 	int err = 0;
2104bec2d0bSJunfeng Guo 	uint16_t i;
211457967cdSJunfeng Guo 
212457967cdSJunfeng Guo 	if (dev->data->dev_started) {
213457967cdSJunfeng Guo 		err = gve_dev_stop(dev);
214457967cdSJunfeng Guo 		if (err != 0)
215457967cdSJunfeng Guo 			PMD_DRV_LOG(ERR, "Failed to stop dev.");
216457967cdSJunfeng Guo 	}
217457967cdSJunfeng Guo 
2181e27182eSJunfeng Guo 	if (gve_is_gqi(priv)) {
21910d9e91aSJunfeng Guo 		for (i = 0; i < dev->data->nb_tx_queues; i++)
22010d9e91aSJunfeng Guo 			gve_tx_queue_release(dev, i);
2214bec2d0bSJunfeng Guo 
22210d9e91aSJunfeng Guo 		for (i = 0; i < dev->data->nb_rx_queues; i++)
22310d9e91aSJunfeng Guo 			gve_rx_queue_release(dev, i);
2241e27182eSJunfeng Guo 	} else {
2251e27182eSJunfeng Guo 		for (i = 0; i < dev->data->nb_tx_queues; i++)
2261e27182eSJunfeng Guo 			gve_tx_queue_release_dqo(dev, i);
2271e27182eSJunfeng Guo 
2281e27182eSJunfeng Guo 		for (i = 0; i < dev->data->nb_rx_queues; i++)
2291e27182eSJunfeng Guo 			gve_rx_queue_release_dqo(dev, i);
2301e27182eSJunfeng Guo 	}
2314bec2d0bSJunfeng Guo 
2324bec2d0bSJunfeng Guo 	gve_free_qpls(priv);
2334bec2d0bSJunfeng Guo 	rte_free(priv->adminq);
2344bec2d0bSJunfeng Guo 
235457967cdSJunfeng Guo 	dev->data->mac_addrs = NULL;
236457967cdSJunfeng Guo 
237457967cdSJunfeng Guo 	return err;
238457967cdSJunfeng Guo }
239457967cdSJunfeng Guo 
240f19c864eSJunfeng Guo static int
241748d0e7fSRushil Gupta gve_verify_driver_compatibility(struct gve_priv *priv)
242748d0e7fSRushil Gupta {
243748d0e7fSRushil Gupta 	const struct rte_memzone *driver_info_mem;
244748d0e7fSRushil Gupta 	struct gve_driver_info *driver_info;
245748d0e7fSRushil Gupta 	int err;
246748d0e7fSRushil Gupta 
247748d0e7fSRushil Gupta 	driver_info_mem = rte_memzone_reserve_aligned("verify_driver_compatibility",
248748d0e7fSRushil Gupta 			sizeof(struct gve_driver_info),
249748d0e7fSRushil Gupta 			rte_socket_id(),
250748d0e7fSRushil Gupta 			RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
251748d0e7fSRushil Gupta 
252748d0e7fSRushil Gupta 	if (driver_info_mem == NULL) {
253748d0e7fSRushil Gupta 		PMD_DRV_LOG(ERR,
254748d0e7fSRushil Gupta 		    "Could not alloc memzone for driver compatibility");
255748d0e7fSRushil Gupta 		return -ENOMEM;
256748d0e7fSRushil Gupta 	}
257748d0e7fSRushil Gupta 	driver_info = (struct gve_driver_info *)driver_info_mem->addr;
258748d0e7fSRushil Gupta 
259748d0e7fSRushil Gupta 	*driver_info = (struct gve_driver_info) {
260748d0e7fSRushil Gupta 		.os_type = 5, /* DPDK */
261748d0e7fSRushil Gupta 		.driver_major = GVE_VERSION_MAJOR,
262748d0e7fSRushil Gupta 		.driver_minor = GVE_VERSION_MINOR,
263748d0e7fSRushil Gupta 		.driver_sub = GVE_VERSION_SUB,
264748d0e7fSRushil Gupta 		.os_version_major = cpu_to_be32(DPDK_VERSION_MAJOR),
265748d0e7fSRushil Gupta 		.os_version_minor = cpu_to_be32(DPDK_VERSION_MINOR),
266748d0e7fSRushil Gupta 		.os_version_sub = cpu_to_be32(DPDK_VERSION_SUB),
267748d0e7fSRushil Gupta 		.driver_capability_flags = {
268748d0e7fSRushil Gupta 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS1),
269748d0e7fSRushil Gupta 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS2),
270748d0e7fSRushil Gupta 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS3),
271748d0e7fSRushil Gupta 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS4),
272748d0e7fSRushil Gupta 		},
273748d0e7fSRushil Gupta 	};
274748d0e7fSRushil Gupta 
275748d0e7fSRushil Gupta 	populate_driver_version_strings((char *)driver_info->os_version_str1,
276748d0e7fSRushil Gupta 			(char *)driver_info->os_version_str2);
277748d0e7fSRushil Gupta 
278748d0e7fSRushil Gupta 	err = gve_adminq_verify_driver_compatibility(priv,
279748d0e7fSRushil Gupta 		sizeof(struct gve_driver_info),
280748d0e7fSRushil Gupta 		(dma_addr_t)driver_info_mem->iova);
281748d0e7fSRushil Gupta 	/* It's ok if the device doesn't support this */
282748d0e7fSRushil Gupta 	if (err == -EOPNOTSUPP)
283748d0e7fSRushil Gupta 		err = 0;
284748d0e7fSRushil Gupta 
285748d0e7fSRushil Gupta 	rte_memzone_free(driver_info_mem);
286748d0e7fSRushil Gupta 	return err;
287748d0e7fSRushil Gupta }
288748d0e7fSRushil Gupta 
289748d0e7fSRushil Gupta static int
29071dea04cSJunfeng Guo gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
29171dea04cSJunfeng Guo {
29271dea04cSJunfeng Guo 	struct gve_priv *priv = dev->data->dev_private;
29371dea04cSJunfeng Guo 
29471dea04cSJunfeng Guo 	dev_info->device = dev->device;
29571dea04cSJunfeng Guo 	dev_info->max_mac_addrs = 1;
29671dea04cSJunfeng Guo 	dev_info->max_rx_queues = priv->max_nb_rxq;
29771dea04cSJunfeng Guo 	dev_info->max_tx_queues = priv->max_nb_txq;
29871dea04cSJunfeng Guo 	dev_info->min_rx_bufsize = GVE_MIN_BUF_SIZE;
29971dea04cSJunfeng Guo 	dev_info->max_rx_pktlen = GVE_MAX_RX_PKTLEN;
30071dea04cSJunfeng Guo 	dev_info->max_mtu = GVE_MAX_MTU;
30171dea04cSJunfeng Guo 	dev_info->min_mtu = GVE_MIN_MTU;
30271dea04cSJunfeng Guo 
30371dea04cSJunfeng Guo 	dev_info->rx_offload_capa = 0;
304a46583cfSJunfeng Guo 	dev_info->tx_offload_capa =
305a46583cfSJunfeng Guo 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS	|
306a46583cfSJunfeng Guo 		RTE_ETH_TX_OFFLOAD_UDP_CKSUM	|
307a46583cfSJunfeng Guo 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM	|
308a46583cfSJunfeng Guo 		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM	|
309a46583cfSJunfeng Guo 		RTE_ETH_TX_OFFLOAD_TCP_TSO;
31071dea04cSJunfeng Guo 
31171dea04cSJunfeng Guo 	if (priv->queue_format == GVE_DQO_RDA_FORMAT)
31271dea04cSJunfeng Guo 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
31371dea04cSJunfeng Guo 
31471dea04cSJunfeng Guo 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
31571dea04cSJunfeng Guo 		.rx_free_thresh = GVE_DEFAULT_RX_FREE_THRESH,
31671dea04cSJunfeng Guo 		.rx_drop_en = 0,
31771dea04cSJunfeng Guo 		.offloads = 0,
31871dea04cSJunfeng Guo 	};
31971dea04cSJunfeng Guo 
32071dea04cSJunfeng Guo 	dev_info->default_txconf = (struct rte_eth_txconf) {
32171dea04cSJunfeng Guo 		.tx_free_thresh = GVE_DEFAULT_TX_FREE_THRESH,
322a14d391cSJunfeng Guo 		.tx_rs_thresh = GVE_DEFAULT_TX_RS_THRESH,
32371dea04cSJunfeng Guo 		.offloads = 0,
32471dea04cSJunfeng Guo 	};
32571dea04cSJunfeng Guo 
32671dea04cSJunfeng Guo 	dev_info->default_rxportconf.ring_size = priv->rx_desc_cnt;
32771dea04cSJunfeng Guo 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
32871dea04cSJunfeng Guo 		.nb_max = priv->rx_desc_cnt,
32971dea04cSJunfeng Guo 		.nb_min = priv->rx_desc_cnt,
33071dea04cSJunfeng Guo 		.nb_align = 1,
33171dea04cSJunfeng Guo 	};
33271dea04cSJunfeng Guo 
33371dea04cSJunfeng Guo 	dev_info->default_txportconf.ring_size = priv->tx_desc_cnt;
33471dea04cSJunfeng Guo 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
33571dea04cSJunfeng Guo 		.nb_max = priv->tx_desc_cnt,
33671dea04cSJunfeng Guo 		.nb_min = priv->tx_desc_cnt,
33771dea04cSJunfeng Guo 		.nb_align = 1,
33871dea04cSJunfeng Guo 	};
33971dea04cSJunfeng Guo 
34071dea04cSJunfeng Guo 	return 0;
34171dea04cSJunfeng Guo }
34271dea04cSJunfeng Guo 
34371dea04cSJunfeng Guo static int
3444f6b1dd8SJunfeng Guo gve_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3454f6b1dd8SJunfeng Guo {
3464f6b1dd8SJunfeng Guo 	uint16_t i;
3474f6b1dd8SJunfeng Guo 
3484f6b1dd8SJunfeng Guo 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
3494f6b1dd8SJunfeng Guo 		struct gve_tx_queue *txq = dev->data->tx_queues[i];
3504f6b1dd8SJunfeng Guo 		if (txq == NULL)
3514f6b1dd8SJunfeng Guo 			continue;
3524f6b1dd8SJunfeng Guo 
353c222ea9cSLevend Sayar 		stats->opackets += txq->stats.packets;
354c222ea9cSLevend Sayar 		stats->obytes += txq->stats.bytes;
355c222ea9cSLevend Sayar 		stats->oerrors += txq->stats.errors;
3564f6b1dd8SJunfeng Guo 	}
3574f6b1dd8SJunfeng Guo 
3584f6b1dd8SJunfeng Guo 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
3594f6b1dd8SJunfeng Guo 		struct gve_rx_queue *rxq = dev->data->rx_queues[i];
3604f6b1dd8SJunfeng Guo 		if (rxq == NULL)
3614f6b1dd8SJunfeng Guo 			continue;
3624f6b1dd8SJunfeng Guo 
363c222ea9cSLevend Sayar 		stats->ipackets += rxq->stats.packets;
364c222ea9cSLevend Sayar 		stats->ibytes += rxq->stats.bytes;
365c222ea9cSLevend Sayar 		stats->ierrors += rxq->stats.errors;
366c222ea9cSLevend Sayar 		stats->rx_nombuf += rxq->stats.no_mbufs;
3674f6b1dd8SJunfeng Guo 	}
3684f6b1dd8SJunfeng Guo 
3694f6b1dd8SJunfeng Guo 	return 0;
3704f6b1dd8SJunfeng Guo }
3714f6b1dd8SJunfeng Guo 
3724f6b1dd8SJunfeng Guo static int
3734f6b1dd8SJunfeng Guo gve_dev_stats_reset(struct rte_eth_dev *dev)
3744f6b1dd8SJunfeng Guo {
3754f6b1dd8SJunfeng Guo 	uint16_t i;
3764f6b1dd8SJunfeng Guo 
3774f6b1dd8SJunfeng Guo 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
3784f6b1dd8SJunfeng Guo 		struct gve_tx_queue *txq = dev->data->tx_queues[i];
3794f6b1dd8SJunfeng Guo 		if (txq == NULL)
3804f6b1dd8SJunfeng Guo 			continue;
3814f6b1dd8SJunfeng Guo 
382c222ea9cSLevend Sayar 		memset(&txq->stats, 0, sizeof(txq->stats));
3834f6b1dd8SJunfeng Guo 	}
3844f6b1dd8SJunfeng Guo 
3854f6b1dd8SJunfeng Guo 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
3864f6b1dd8SJunfeng Guo 		struct gve_rx_queue *rxq = dev->data->rx_queues[i];
3874f6b1dd8SJunfeng Guo 		if (rxq == NULL)
3884f6b1dd8SJunfeng Guo 			continue;
3894f6b1dd8SJunfeng Guo 
390c222ea9cSLevend Sayar 		memset(&rxq->stats, 0, sizeof(rxq->stats));
3914f6b1dd8SJunfeng Guo 	}
3924f6b1dd8SJunfeng Guo 
3934f6b1dd8SJunfeng Guo 	return 0;
3944f6b1dd8SJunfeng Guo }
3954f6b1dd8SJunfeng Guo 
3964f6b1dd8SJunfeng Guo static int
397f19c864eSJunfeng Guo gve_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
398f19c864eSJunfeng Guo {
399f19c864eSJunfeng Guo 	struct gve_priv *priv = dev->data->dev_private;
400f19c864eSJunfeng Guo 	int err;
401f19c864eSJunfeng Guo 
402f19c864eSJunfeng Guo 	if (mtu < RTE_ETHER_MIN_MTU || mtu > priv->max_mtu) {
403f19c864eSJunfeng Guo 		PMD_DRV_LOG(ERR, "MIN MTU is %u, MAX MTU is %u",
404f19c864eSJunfeng Guo 			    RTE_ETHER_MIN_MTU, priv->max_mtu);
405f19c864eSJunfeng Guo 		return -EINVAL;
406f19c864eSJunfeng Guo 	}
407f19c864eSJunfeng Guo 
408f19c864eSJunfeng Guo 	/* mtu setting is forbidden if port is start */
409f19c864eSJunfeng Guo 	if (dev->data->dev_started) {
410f19c864eSJunfeng Guo 		PMD_DRV_LOG(ERR, "Port must be stopped before configuration");
411f19c864eSJunfeng Guo 		return -EBUSY;
412f19c864eSJunfeng Guo 	}
413f19c864eSJunfeng Guo 
414f19c864eSJunfeng Guo 	err = gve_adminq_set_mtu(priv, mtu);
415f19c864eSJunfeng Guo 	if (err) {
416f19c864eSJunfeng Guo 		PMD_DRV_LOG(ERR, "Failed to set mtu as %u err = %d", mtu, err);
417f19c864eSJunfeng Guo 		return err;
418f19c864eSJunfeng Guo 	}
419f19c864eSJunfeng Guo 
420f19c864eSJunfeng Guo 	return 0;
421f19c864eSJunfeng Guo }
422f19c864eSJunfeng Guo 
423c222ea9cSLevend Sayar #define TX_QUEUE_STATS_OFFSET(x) offsetof(struct gve_tx_stats, x)
424c222ea9cSLevend Sayar #define RX_QUEUE_STATS_OFFSET(x) offsetof(struct gve_rx_stats, x)
425c222ea9cSLevend Sayar 
426c222ea9cSLevend Sayar static const struct gve_xstats_name_offset tx_xstats_name_offset[] = {
427c222ea9cSLevend Sayar 	{ "packets", TX_QUEUE_STATS_OFFSET(packets) },
428c222ea9cSLevend Sayar 	{ "bytes",   TX_QUEUE_STATS_OFFSET(bytes) },
429c222ea9cSLevend Sayar 	{ "errors",  TX_QUEUE_STATS_OFFSET(errors) },
430c222ea9cSLevend Sayar };
431c222ea9cSLevend Sayar 
432c222ea9cSLevend Sayar static const struct gve_xstats_name_offset rx_xstats_name_offset[] = {
433c222ea9cSLevend Sayar 	{ "packets",                RX_QUEUE_STATS_OFFSET(packets) },
434c222ea9cSLevend Sayar 	{ "bytes",                  RX_QUEUE_STATS_OFFSET(bytes) },
435c222ea9cSLevend Sayar 	{ "errors",                 RX_QUEUE_STATS_OFFSET(errors) },
436c222ea9cSLevend Sayar 	{ "mbuf_alloc_errors",      RX_QUEUE_STATS_OFFSET(no_mbufs) },
437c222ea9cSLevend Sayar 	{ "mbuf_alloc_errors_bulk", RX_QUEUE_STATS_OFFSET(no_mbufs_bulk) },
438c222ea9cSLevend Sayar };
439c222ea9cSLevend Sayar 
440c222ea9cSLevend Sayar static int
441c222ea9cSLevend Sayar gve_xstats_count(struct rte_eth_dev *dev)
442c222ea9cSLevend Sayar {
443c222ea9cSLevend Sayar 	uint16_t i, count = 0;
444c222ea9cSLevend Sayar 
445c222ea9cSLevend Sayar 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
446c222ea9cSLevend Sayar 		if (dev->data->tx_queues[i])
447c222ea9cSLevend Sayar 			count += RTE_DIM(tx_xstats_name_offset);
448c222ea9cSLevend Sayar 	}
449c222ea9cSLevend Sayar 
450c222ea9cSLevend Sayar 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
451c222ea9cSLevend Sayar 		if (dev->data->rx_queues[i])
452c222ea9cSLevend Sayar 			count += RTE_DIM(rx_xstats_name_offset);
453c222ea9cSLevend Sayar 	}
454c222ea9cSLevend Sayar 
455c222ea9cSLevend Sayar 	return count;
456c222ea9cSLevend Sayar }
457c222ea9cSLevend Sayar 
458c222ea9cSLevend Sayar static int
459c222ea9cSLevend Sayar gve_xstats_get(struct rte_eth_dev *dev,
460c222ea9cSLevend Sayar 			struct rte_eth_xstat *xstats,
461c222ea9cSLevend Sayar 			unsigned int size)
462c222ea9cSLevend Sayar {
463c222ea9cSLevend Sayar 	uint16_t i, j, count = gve_xstats_count(dev);
464c222ea9cSLevend Sayar 	const char *stats;
465c222ea9cSLevend Sayar 
466c222ea9cSLevend Sayar 	if (xstats == NULL || size < count)
467c222ea9cSLevend Sayar 		return count;
468c222ea9cSLevend Sayar 
469c222ea9cSLevend Sayar 	count = 0;
470c222ea9cSLevend Sayar 
471c222ea9cSLevend Sayar 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
472c222ea9cSLevend Sayar 		const struct gve_tx_queue *txq = dev->data->tx_queues[i];
473c222ea9cSLevend Sayar 		if (txq == NULL)
474c222ea9cSLevend Sayar 			continue;
475c222ea9cSLevend Sayar 
476c222ea9cSLevend Sayar 		stats = (const char *)&txq->stats;
477c222ea9cSLevend Sayar 		for (j = 0; j < RTE_DIM(tx_xstats_name_offset); j++, count++) {
478c222ea9cSLevend Sayar 			xstats[count].id = count;
479c222ea9cSLevend Sayar 			xstats[count].value = *(const uint64_t *)
480c222ea9cSLevend Sayar 				(stats + tx_xstats_name_offset[j].offset);
481c222ea9cSLevend Sayar 		}
482c222ea9cSLevend Sayar 	}
483c222ea9cSLevend Sayar 
484c222ea9cSLevend Sayar 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
485c222ea9cSLevend Sayar 		const struct gve_rx_queue *rxq = dev->data->rx_queues[i];
486c222ea9cSLevend Sayar 		if (rxq == NULL)
487c222ea9cSLevend Sayar 			continue;
488c222ea9cSLevend Sayar 
489c222ea9cSLevend Sayar 		stats = (const char *)&rxq->stats;
490c222ea9cSLevend Sayar 		for (j = 0; j < RTE_DIM(rx_xstats_name_offset); j++, count++) {
491c222ea9cSLevend Sayar 			xstats[count].id = count;
492c222ea9cSLevend Sayar 			xstats[count].value = *(const uint64_t *)
493c222ea9cSLevend Sayar 				(stats + rx_xstats_name_offset[j].offset);
494c222ea9cSLevend Sayar 		}
495c222ea9cSLevend Sayar 	}
496c222ea9cSLevend Sayar 
497c222ea9cSLevend Sayar 	return count;
498c222ea9cSLevend Sayar }
499c222ea9cSLevend Sayar 
500c222ea9cSLevend Sayar static int
501c222ea9cSLevend Sayar gve_xstats_get_names(struct rte_eth_dev *dev,
502c222ea9cSLevend Sayar 			struct rte_eth_xstat_name *xstats_names,
503c222ea9cSLevend Sayar 			unsigned int size)
504c222ea9cSLevend Sayar {
505c222ea9cSLevend Sayar 	uint16_t i, j, count = gve_xstats_count(dev);
506c222ea9cSLevend Sayar 
507c222ea9cSLevend Sayar 	if (xstats_names == NULL || size < count)
508c222ea9cSLevend Sayar 		return count;
509c222ea9cSLevend Sayar 
510c222ea9cSLevend Sayar 	count = 0;
511c222ea9cSLevend Sayar 
512c222ea9cSLevend Sayar 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
513c222ea9cSLevend Sayar 		if (dev->data->tx_queues[i] == NULL)
514c222ea9cSLevend Sayar 			continue;
515c222ea9cSLevend Sayar 
516c222ea9cSLevend Sayar 		for (j = 0; j < RTE_DIM(tx_xstats_name_offset); j++)
517c222ea9cSLevend Sayar 			snprintf(xstats_names[count++].name,
518c222ea9cSLevend Sayar 				 RTE_ETH_XSTATS_NAME_SIZE,
519c222ea9cSLevend Sayar 				 "tx_q%u_%s", i, tx_xstats_name_offset[j].name);
520c222ea9cSLevend Sayar 	}
521c222ea9cSLevend Sayar 
522c222ea9cSLevend Sayar 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
523c222ea9cSLevend Sayar 		if (dev->data->rx_queues[i] == NULL)
524c222ea9cSLevend Sayar 			continue;
525c222ea9cSLevend Sayar 
526c222ea9cSLevend Sayar 		for (j = 0; j < RTE_DIM(rx_xstats_name_offset); j++)
527c222ea9cSLevend Sayar 			snprintf(xstats_names[count++].name,
528c222ea9cSLevend Sayar 				 RTE_ETH_XSTATS_NAME_SIZE,
529c222ea9cSLevend Sayar 				 "rx_q%u_%s", i, rx_xstats_name_offset[j].name);
530c222ea9cSLevend Sayar 	}
531c222ea9cSLevend Sayar 
532c222ea9cSLevend Sayar 	return count;
533c222ea9cSLevend Sayar }
534c222ea9cSLevend Sayar 
535457967cdSJunfeng Guo static const struct eth_dev_ops gve_eth_dev_ops = {
536457967cdSJunfeng Guo 	.dev_configure        = gve_dev_configure,
537457967cdSJunfeng Guo 	.dev_start            = gve_dev_start,
538457967cdSJunfeng Guo 	.dev_stop             = gve_dev_stop,
539457967cdSJunfeng Guo 	.dev_close            = gve_dev_close,
54071dea04cSJunfeng Guo 	.dev_infos_get        = gve_dev_info_get,
5414bec2d0bSJunfeng Guo 	.rx_queue_setup       = gve_rx_queue_setup,
5424bec2d0bSJunfeng Guo 	.tx_queue_setup       = gve_tx_queue_setup,
54310d9e91aSJunfeng Guo 	.rx_queue_release     = gve_rx_queue_release,
54410d9e91aSJunfeng Guo 	.tx_queue_release     = gve_tx_queue_release,
545b044845bSJunfeng Guo 	.rx_queue_start       = gve_rx_queue_start,
546b044845bSJunfeng Guo 	.tx_queue_start       = gve_tx_queue_start,
547b044845bSJunfeng Guo 	.rx_queue_stop        = gve_rx_queue_stop,
548b044845bSJunfeng Guo 	.tx_queue_stop        = gve_tx_queue_stop,
549440f551dSJunfeng Guo 	.link_update          = gve_link_update,
5504f6b1dd8SJunfeng Guo 	.stats_get            = gve_dev_stats_get,
5514f6b1dd8SJunfeng Guo 	.stats_reset          = gve_dev_stats_reset,
552f19c864eSJunfeng Guo 	.mtu_set              = gve_dev_mtu_set,
553c222ea9cSLevend Sayar 	.xstats_get           = gve_xstats_get,
554c222ea9cSLevend Sayar 	.xstats_get_names     = gve_xstats_get_names,
555457967cdSJunfeng Guo };
556457967cdSJunfeng Guo 
557a14d391cSJunfeng Guo static const struct eth_dev_ops gve_eth_dev_ops_dqo = {
558a14d391cSJunfeng Guo 	.dev_configure        = gve_dev_configure,
559a14d391cSJunfeng Guo 	.dev_start            = gve_dev_start,
560a14d391cSJunfeng Guo 	.dev_stop             = gve_dev_stop,
561a14d391cSJunfeng Guo 	.dev_close            = gve_dev_close,
562a14d391cSJunfeng Guo 	.dev_infos_get        = gve_dev_info_get,
5631dc00f4fSJunfeng Guo 	.rx_queue_setup       = gve_rx_queue_setup_dqo,
564a14d391cSJunfeng Guo 	.tx_queue_setup       = gve_tx_queue_setup_dqo,
5651e27182eSJunfeng Guo 	.rx_queue_release     = gve_rx_queue_release_dqo,
5661e27182eSJunfeng Guo 	.tx_queue_release     = gve_tx_queue_release_dqo,
567b044845bSJunfeng Guo 	.rx_queue_start       = gve_rx_queue_start_dqo,
568b044845bSJunfeng Guo 	.tx_queue_start       = gve_tx_queue_start_dqo,
569b044845bSJunfeng Guo 	.rx_queue_stop        = gve_rx_queue_stop_dqo,
570b044845bSJunfeng Guo 	.tx_queue_stop        = gve_tx_queue_stop_dqo,
571a14d391cSJunfeng Guo 	.link_update          = gve_link_update,
572a14d391cSJunfeng Guo 	.stats_get            = gve_dev_stats_get,
573a14d391cSJunfeng Guo 	.stats_reset          = gve_dev_stats_reset,
574a14d391cSJunfeng Guo 	.mtu_set              = gve_dev_mtu_set,
575a14d391cSJunfeng Guo 	.xstats_get           = gve_xstats_get,
576a14d391cSJunfeng Guo 	.xstats_get_names     = gve_xstats_get_names,
577a14d391cSJunfeng Guo };
578a14d391cSJunfeng Guo 
579457967cdSJunfeng Guo static void
580457967cdSJunfeng Guo gve_free_counter_array(struct gve_priv *priv)
581457967cdSJunfeng Guo {
582457967cdSJunfeng Guo 	rte_memzone_free(priv->cnt_array_mz);
583457967cdSJunfeng Guo 	priv->cnt_array = NULL;
584457967cdSJunfeng Guo }
585457967cdSJunfeng Guo 
586457967cdSJunfeng Guo static void
587457967cdSJunfeng Guo gve_free_irq_db(struct gve_priv *priv)
588457967cdSJunfeng Guo {
589457967cdSJunfeng Guo 	rte_memzone_free(priv->irq_dbs_mz);
590457967cdSJunfeng Guo 	priv->irq_dbs = NULL;
591457967cdSJunfeng Guo }
592457967cdSJunfeng Guo 
593457967cdSJunfeng Guo static void
594457967cdSJunfeng Guo gve_teardown_device_resources(struct gve_priv *priv)
595457967cdSJunfeng Guo {
596457967cdSJunfeng Guo 	int err;
597457967cdSJunfeng Guo 
598457967cdSJunfeng Guo 	/* Tell device its resources are being freed */
599457967cdSJunfeng Guo 	if (gve_get_device_resources_ok(priv)) {
600457967cdSJunfeng Guo 		err = gve_adminq_deconfigure_device_resources(priv);
601457967cdSJunfeng Guo 		if (err)
602457967cdSJunfeng Guo 			PMD_DRV_LOG(ERR, "Could not deconfigure device resources: err=%d", err);
603457967cdSJunfeng Guo 	}
604457967cdSJunfeng Guo 	gve_free_counter_array(priv);
605457967cdSJunfeng Guo 	gve_free_irq_db(priv);
606457967cdSJunfeng Guo 	gve_clear_device_resources_ok(priv);
607457967cdSJunfeng Guo }
608457967cdSJunfeng Guo 
609457967cdSJunfeng Guo static int
610457967cdSJunfeng Guo pci_dev_msix_vec_count(struct rte_pci_device *pdev)
611457967cdSJunfeng Guo {
612baa9c550SDavid Marchand 	off_t msix_pos = rte_pci_find_capability(pdev, RTE_PCI_CAP_ID_MSIX);
613457967cdSJunfeng Guo 	uint16_t control;
614457967cdSJunfeng Guo 
615a10b6e53SDavid Marchand 	if (msix_pos > 0 && rte_pci_read_config(pdev, &control, sizeof(control),
616*7bb1168dSDavid Marchand 			msix_pos + RTE_PCI_MSIX_FLAGS) == sizeof(control))
617*7bb1168dSDavid Marchand 		return (control & RTE_PCI_MSIX_FLAGS_QSIZE) + 1;
618a10b6e53SDavid Marchand 
619a10b6e53SDavid Marchand 	return 0;
620457967cdSJunfeng Guo }
621457967cdSJunfeng Guo 
622457967cdSJunfeng Guo static int
623457967cdSJunfeng Guo gve_setup_device_resources(struct gve_priv *priv)
624457967cdSJunfeng Guo {
625457967cdSJunfeng Guo 	char z_name[RTE_MEMZONE_NAMESIZE];
626457967cdSJunfeng Guo 	const struct rte_memzone *mz;
627457967cdSJunfeng Guo 	int err = 0;
628457967cdSJunfeng Guo 
629457967cdSJunfeng Guo 	snprintf(z_name, sizeof(z_name), "gve_%s_cnt_arr", priv->pci_dev->device.name);
630457967cdSJunfeng Guo 	mz = rte_memzone_reserve_aligned(z_name,
631457967cdSJunfeng Guo 					 priv->num_event_counters * sizeof(*priv->cnt_array),
632457967cdSJunfeng Guo 					 rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
633457967cdSJunfeng Guo 					 PAGE_SIZE);
634457967cdSJunfeng Guo 	if (mz == NULL) {
635457967cdSJunfeng Guo 		PMD_DRV_LOG(ERR, "Could not alloc memzone for count array");
636457967cdSJunfeng Guo 		return -ENOMEM;
637457967cdSJunfeng Guo 	}
638457967cdSJunfeng Guo 	priv->cnt_array = (rte_be32_t *)mz->addr;
639457967cdSJunfeng Guo 	priv->cnt_array_mz = mz;
640457967cdSJunfeng Guo 
641457967cdSJunfeng Guo 	snprintf(z_name, sizeof(z_name), "gve_%s_irqmz", priv->pci_dev->device.name);
642457967cdSJunfeng Guo 	mz = rte_memzone_reserve_aligned(z_name,
643457967cdSJunfeng Guo 					 sizeof(*priv->irq_dbs) * (priv->num_ntfy_blks),
644457967cdSJunfeng Guo 					 rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
645457967cdSJunfeng Guo 					 PAGE_SIZE);
646457967cdSJunfeng Guo 	if (mz == NULL) {
647457967cdSJunfeng Guo 		PMD_DRV_LOG(ERR, "Could not alloc memzone for irq_dbs");
648457967cdSJunfeng Guo 		err = -ENOMEM;
649457967cdSJunfeng Guo 		goto free_cnt_array;
650457967cdSJunfeng Guo 	}
651457967cdSJunfeng Guo 	priv->irq_dbs = (struct gve_irq_db *)mz->addr;
652457967cdSJunfeng Guo 	priv->irq_dbs_mz = mz;
653457967cdSJunfeng Guo 
654457967cdSJunfeng Guo 	err = gve_adminq_configure_device_resources(priv,
655457967cdSJunfeng Guo 						    priv->cnt_array_mz->iova,
656457967cdSJunfeng Guo 						    priv->num_event_counters,
657457967cdSJunfeng Guo 						    priv->irq_dbs_mz->iova,
658457967cdSJunfeng Guo 						    priv->num_ntfy_blks);
659457967cdSJunfeng Guo 	if (unlikely(err)) {
660457967cdSJunfeng Guo 		PMD_DRV_LOG(ERR, "Could not config device resources: err=%d", err);
661457967cdSJunfeng Guo 		goto free_irq_dbs;
662457967cdSJunfeng Guo 	}
663457967cdSJunfeng Guo 	return 0;
664457967cdSJunfeng Guo 
665457967cdSJunfeng Guo free_irq_dbs:
666457967cdSJunfeng Guo 	gve_free_irq_db(priv);
667457967cdSJunfeng Guo free_cnt_array:
668457967cdSJunfeng Guo 	gve_free_counter_array(priv);
669457967cdSJunfeng Guo 
670457967cdSJunfeng Guo 	return err;
671457967cdSJunfeng Guo }
672457967cdSJunfeng Guo 
673457967cdSJunfeng Guo static int
674457967cdSJunfeng Guo gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
675457967cdSJunfeng Guo {
6764bec2d0bSJunfeng Guo 	uint16_t pages;
677457967cdSJunfeng Guo 	int num_ntfy;
6784bec2d0bSJunfeng Guo 	uint32_t i;
679457967cdSJunfeng Guo 	int err;
680457967cdSJunfeng Guo 
681457967cdSJunfeng Guo 	/* Set up the adminq */
682457967cdSJunfeng Guo 	err = gve_adminq_alloc(priv);
683457967cdSJunfeng Guo 	if (err) {
684457967cdSJunfeng Guo 		PMD_DRV_LOG(ERR, "Failed to alloc admin queue: err=%d", err);
685457967cdSJunfeng Guo 		return err;
686457967cdSJunfeng Guo 	}
687748d0e7fSRushil Gupta 	err = gve_verify_driver_compatibility(priv);
688748d0e7fSRushil Gupta 	if (err) {
689748d0e7fSRushil Gupta 		PMD_DRV_LOG(ERR, "Could not verify driver compatibility: err=%d", err);
690748d0e7fSRushil Gupta 		goto free_adminq;
691748d0e7fSRushil Gupta 	}
692457967cdSJunfeng Guo 
693457967cdSJunfeng Guo 	if (skip_describe_device)
694457967cdSJunfeng Guo 		goto setup_device;
695457967cdSJunfeng Guo 
696457967cdSJunfeng Guo 	/* Get the initial information we need from the device */
697457967cdSJunfeng Guo 	err = gve_adminq_describe_device(priv);
698457967cdSJunfeng Guo 	if (err) {
699457967cdSJunfeng Guo 		PMD_DRV_LOG(ERR, "Could not get device information: err=%d", err);
700457967cdSJunfeng Guo 		goto free_adminq;
701457967cdSJunfeng Guo 	}
702457967cdSJunfeng Guo 
703457967cdSJunfeng Guo 	num_ntfy = pci_dev_msix_vec_count(priv->pci_dev);
704457967cdSJunfeng Guo 	if (num_ntfy <= 0) {
705457967cdSJunfeng Guo 		PMD_DRV_LOG(ERR, "Could not count MSI-x vectors");
706457967cdSJunfeng Guo 		err = -EIO;
707457967cdSJunfeng Guo 		goto free_adminq;
708457967cdSJunfeng Guo 	} else if (num_ntfy < GVE_MIN_MSIX) {
709457967cdSJunfeng Guo 		PMD_DRV_LOG(ERR, "GVE needs at least %d MSI-x vectors, but only has %d",
710457967cdSJunfeng Guo 			    GVE_MIN_MSIX, num_ntfy);
711457967cdSJunfeng Guo 		err = -EINVAL;
712457967cdSJunfeng Guo 		goto free_adminq;
713457967cdSJunfeng Guo 	}
714457967cdSJunfeng Guo 
715457967cdSJunfeng Guo 	priv->num_registered_pages = 0;
716457967cdSJunfeng Guo 
717457967cdSJunfeng Guo 	/* gvnic has one Notification Block per MSI-x vector, except for the
718457967cdSJunfeng Guo 	 * management vector
719457967cdSJunfeng Guo 	 */
720457967cdSJunfeng Guo 	priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
721457967cdSJunfeng Guo 	priv->mgmt_msix_idx = priv->num_ntfy_blks;
722457967cdSJunfeng Guo 
723457967cdSJunfeng Guo 	priv->max_nb_txq = RTE_MIN(priv->max_nb_txq, priv->num_ntfy_blks / 2);
724457967cdSJunfeng Guo 	priv->max_nb_rxq = RTE_MIN(priv->max_nb_rxq, priv->num_ntfy_blks / 2);
725457967cdSJunfeng Guo 
726457967cdSJunfeng Guo 	if (priv->default_num_queues > 0) {
727457967cdSJunfeng Guo 		priv->max_nb_txq = RTE_MIN(priv->default_num_queues, priv->max_nb_txq);
728457967cdSJunfeng Guo 		priv->max_nb_rxq = RTE_MIN(priv->default_num_queues, priv->max_nb_rxq);
729457967cdSJunfeng Guo 	}
730457967cdSJunfeng Guo 
731457967cdSJunfeng Guo 	PMD_DRV_LOG(INFO, "Max TX queues %d, Max RX queues %d",
732457967cdSJunfeng Guo 		    priv->max_nb_txq, priv->max_nb_rxq);
733457967cdSJunfeng Guo 
7344bec2d0bSJunfeng Guo 	/* In GQI_QPL queue format:
7354bec2d0bSJunfeng Guo 	 * Allocate queue page lists according to max queue number
7364bec2d0bSJunfeng Guo 	 * tx qpl id should start from 0 while rx qpl id should start
7374bec2d0bSJunfeng Guo 	 * from priv->max_nb_txq
7384bec2d0bSJunfeng Guo 	 */
7394bec2d0bSJunfeng Guo 	if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
7404bec2d0bSJunfeng Guo 		priv->qpl = rte_zmalloc("gve_qpl",
7414bec2d0bSJunfeng Guo 					(priv->max_nb_txq + priv->max_nb_rxq) *
7424bec2d0bSJunfeng Guo 					sizeof(struct gve_queue_page_list), 0);
7434bec2d0bSJunfeng Guo 		if (priv->qpl == NULL) {
7444bec2d0bSJunfeng Guo 			PMD_DRV_LOG(ERR, "Failed to alloc qpl.");
7454bec2d0bSJunfeng Guo 			err = -ENOMEM;
7464bec2d0bSJunfeng Guo 			goto free_adminq;
7474bec2d0bSJunfeng Guo 		}
7484bec2d0bSJunfeng Guo 
7494bec2d0bSJunfeng Guo 		for (i = 0; i < priv->max_nb_txq + priv->max_nb_rxq; i++) {
7504bec2d0bSJunfeng Guo 			if (i < priv->max_nb_txq)
7514bec2d0bSJunfeng Guo 				pages = priv->tx_pages_per_qpl;
7524bec2d0bSJunfeng Guo 			else
7534bec2d0bSJunfeng Guo 				pages = priv->rx_data_slot_cnt;
7544bec2d0bSJunfeng Guo 			err = gve_alloc_queue_page_list(priv, i, pages);
7554bec2d0bSJunfeng Guo 			if (err != 0) {
7564bec2d0bSJunfeng Guo 				PMD_DRV_LOG(ERR, "Failed to alloc qpl %u.", i);
7574bec2d0bSJunfeng Guo 				goto err_qpl;
7584bec2d0bSJunfeng Guo 			}
7594bec2d0bSJunfeng Guo 		}
7604bec2d0bSJunfeng Guo 	}
7614bec2d0bSJunfeng Guo 
762457967cdSJunfeng Guo setup_device:
763457967cdSJunfeng Guo 	err = gve_setup_device_resources(priv);
764457967cdSJunfeng Guo 	if (!err)
765457967cdSJunfeng Guo 		return 0;
7664bec2d0bSJunfeng Guo err_qpl:
7674bec2d0bSJunfeng Guo 	gve_free_qpls(priv);
768457967cdSJunfeng Guo free_adminq:
769457967cdSJunfeng Guo 	gve_adminq_free(priv);
770457967cdSJunfeng Guo 	return err;
771457967cdSJunfeng Guo }
772457967cdSJunfeng Guo 
773457967cdSJunfeng Guo static void
774457967cdSJunfeng Guo gve_teardown_priv_resources(struct gve_priv *priv)
775457967cdSJunfeng Guo {
776457967cdSJunfeng Guo 	gve_teardown_device_resources(priv);
777457967cdSJunfeng Guo 	gve_adminq_free(priv);
778457967cdSJunfeng Guo }
779457967cdSJunfeng Guo 
780457967cdSJunfeng Guo static int
781457967cdSJunfeng Guo gve_dev_init(struct rte_eth_dev *eth_dev)
782457967cdSJunfeng Guo {
783457967cdSJunfeng Guo 	struct gve_priv *priv = eth_dev->data->dev_private;
784457967cdSJunfeng Guo 	int max_tx_queues, max_rx_queues;
785457967cdSJunfeng Guo 	struct rte_pci_device *pci_dev;
786457967cdSJunfeng Guo 	struct gve_registers *reg_bar;
787457967cdSJunfeng Guo 	rte_be32_t *db_bar;
788457967cdSJunfeng Guo 	int err;
789457967cdSJunfeng Guo 
790457967cdSJunfeng Guo 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
791457967cdSJunfeng Guo 		return 0;
792457967cdSJunfeng Guo 
793457967cdSJunfeng Guo 	pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
794457967cdSJunfeng Guo 
795457967cdSJunfeng Guo 	reg_bar = pci_dev->mem_resource[GVE_REG_BAR].addr;
796457967cdSJunfeng Guo 	if (!reg_bar) {
797457967cdSJunfeng Guo 		PMD_DRV_LOG(ERR, "Failed to map pci bar!");
798457967cdSJunfeng Guo 		return -ENOMEM;
799457967cdSJunfeng Guo 	}
800457967cdSJunfeng Guo 
801457967cdSJunfeng Guo 	db_bar = pci_dev->mem_resource[GVE_DB_BAR].addr;
802457967cdSJunfeng Guo 	if (!db_bar) {
803457967cdSJunfeng Guo 		PMD_DRV_LOG(ERR, "Failed to map doorbell bar!");
804457967cdSJunfeng Guo 		return -ENOMEM;
805457967cdSJunfeng Guo 	}
806457967cdSJunfeng Guo 
807457967cdSJunfeng Guo 	gve_write_version(&reg_bar->driver_version);
808457967cdSJunfeng Guo 	/* Get max queues to alloc etherdev */
809457967cdSJunfeng Guo 	max_tx_queues = ioread32be(&reg_bar->max_tx_queues);
810457967cdSJunfeng Guo 	max_rx_queues = ioread32be(&reg_bar->max_rx_queues);
811457967cdSJunfeng Guo 
812457967cdSJunfeng Guo 	priv->reg_bar0 = reg_bar;
813457967cdSJunfeng Guo 	priv->db_bar2 = db_bar;
814457967cdSJunfeng Guo 	priv->pci_dev = pci_dev;
815457967cdSJunfeng Guo 	priv->state_flags = 0x0;
816457967cdSJunfeng Guo 
817457967cdSJunfeng Guo 	priv->max_nb_txq = max_tx_queues;
818457967cdSJunfeng Guo 	priv->max_nb_rxq = max_rx_queues;
819457967cdSJunfeng Guo 
820457967cdSJunfeng Guo 	err = gve_init_priv(priv, false);
821457967cdSJunfeng Guo 	if (err)
822457967cdSJunfeng Guo 		return err;
823457967cdSJunfeng Guo 
824a46583cfSJunfeng Guo 	if (gve_is_gqi(priv)) {
825a14d391cSJunfeng Guo 		eth_dev->dev_ops = &gve_eth_dev_ops;
826b044845bSJunfeng Guo 		gve_set_rx_function(eth_dev);
827b044845bSJunfeng Guo 		gve_set_tx_function(eth_dev);
828a46583cfSJunfeng Guo 	} else {
829a14d391cSJunfeng Guo 		eth_dev->dev_ops = &gve_eth_dev_ops_dqo;
830b044845bSJunfeng Guo 		gve_set_rx_function_dqo(eth_dev);
831b044845bSJunfeng Guo 		gve_set_tx_function_dqo(eth_dev);
832a46583cfSJunfeng Guo 	}
833a46583cfSJunfeng Guo 
834457967cdSJunfeng Guo 	eth_dev->data->mac_addrs = &priv->dev_addr;
835457967cdSJunfeng Guo 
836457967cdSJunfeng Guo 	return 0;
837457967cdSJunfeng Guo }
838457967cdSJunfeng Guo 
839457967cdSJunfeng Guo static int
840457967cdSJunfeng Guo gve_dev_uninit(struct rte_eth_dev *eth_dev)
841457967cdSJunfeng Guo {
842457967cdSJunfeng Guo 	struct gve_priv *priv = eth_dev->data->dev_private;
843457967cdSJunfeng Guo 
844457967cdSJunfeng Guo 	gve_teardown_priv_resources(priv);
845457967cdSJunfeng Guo 
846457967cdSJunfeng Guo 	eth_dev->data->mac_addrs = NULL;
847457967cdSJunfeng Guo 
848457967cdSJunfeng Guo 	return 0;
849457967cdSJunfeng Guo }
850457967cdSJunfeng Guo 
851457967cdSJunfeng Guo static int
852457967cdSJunfeng Guo gve_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
853457967cdSJunfeng Guo 	      struct rte_pci_device *pci_dev)
854457967cdSJunfeng Guo {
855457967cdSJunfeng Guo 	return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct gve_priv), gve_dev_init);
856457967cdSJunfeng Guo }
857457967cdSJunfeng Guo 
858457967cdSJunfeng Guo static int
859457967cdSJunfeng Guo gve_pci_remove(struct rte_pci_device *pci_dev)
860457967cdSJunfeng Guo {
861457967cdSJunfeng Guo 	return rte_eth_dev_pci_generic_remove(pci_dev, gve_dev_uninit);
862457967cdSJunfeng Guo }
863457967cdSJunfeng Guo 
864457967cdSJunfeng Guo static const struct rte_pci_id pci_id_gve_map[] = {
865457967cdSJunfeng Guo 	{ RTE_PCI_DEVICE(GOOGLE_VENDOR_ID, GVE_DEV_ID) },
866457967cdSJunfeng Guo 	{ .device_id = 0 },
867457967cdSJunfeng Guo };
868457967cdSJunfeng Guo 
869457967cdSJunfeng Guo static struct rte_pci_driver rte_gve_pmd = {
870457967cdSJunfeng Guo 	.id_table = pci_id_gve_map,
871457967cdSJunfeng Guo 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
872457967cdSJunfeng Guo 	.probe = gve_pci_probe,
873457967cdSJunfeng Guo 	.remove = gve_pci_remove,
874457967cdSJunfeng Guo };
875457967cdSJunfeng Guo 
876457967cdSJunfeng Guo RTE_PMD_REGISTER_PCI(net_gve, rte_gve_pmd);
877457967cdSJunfeng Guo RTE_PMD_REGISTER_PCI_TABLE(net_gve, pci_id_gve_map);
878457967cdSJunfeng Guo RTE_PMD_REGISTER_KMOD_DEP(net_gve, "* igb_uio | vfio-pci");
879457967cdSJunfeng Guo RTE_LOG_REGISTER_SUFFIX(gve_logtype_driver, driver, NOTICE);
880