xref: /dpdk/drivers/net/gve/gve_ethdev.c (revision 7174c8891dcfb2a148e03c5fe2f200742b2dadbe)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2022-2023 Intel Corporation
3  * Copyright(C) 2023 Google LLC
4  */
5 
6 #include "gve_ethdev.h"
7 #include "base/gve_adminq.h"
8 #include "base/gve_register.h"
9 #include "base/gve_osdep.h"
10 #include "gve_version.h"
11 #include "rte_ether.h"
12 #include "gve_rss.h"
13 
14 static void
15 gve_write_version(uint8_t *driver_version_register)
16 {
17 	const char *c = gve_version_string();
18 	while (*c) {
19 		writeb(*c, driver_version_register);
20 		c++;
21 	}
22 	writeb('\n', driver_version_register);
23 }
24 
25 static struct gve_queue_page_list *
26 gve_alloc_queue_page_list(const char *name, uint32_t num_pages)
27 {
28 	struct gve_queue_page_list *qpl;
29 	const struct rte_memzone *mz;
30 	dma_addr_t page_bus;
31 	uint32_t i;
32 
33 	qpl = rte_zmalloc("qpl struct",	sizeof(struct gve_queue_page_list), 0);
34 	if (!qpl)
35 		return NULL;
36 
37 	mz = rte_memzone_reserve_aligned(name, num_pages * PAGE_SIZE,
38 					 rte_socket_id(),
39 					 RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
40 	if (mz == NULL) {
41 		PMD_DRV_LOG(ERR, "Failed to alloc %s.", name);
42 		goto free_qpl_struct;
43 	}
44 	qpl->page_buses = rte_zmalloc("qpl page buses",
45 		num_pages * sizeof(dma_addr_t), 0);
46 	if (qpl->page_buses == NULL) {
47 		PMD_DRV_LOG(ERR, "Failed to alloc qpl page buses");
48 		goto free_qpl_memzone;
49 	}
50 	page_bus = mz->iova;
51 	for (i = 0; i < num_pages; i++) {
52 		qpl->page_buses[i] = page_bus;
53 		page_bus += PAGE_SIZE;
54 	}
55 	qpl->mz = mz;
56 	qpl->num_entries = num_pages;
57 	return qpl;
58 
59 free_qpl_memzone:
60 	rte_memzone_free(qpl->mz);
61 free_qpl_struct:
62 	rte_free(qpl);
63 	return NULL;
64 }
65 
66 static void
67 gve_free_queue_page_list(struct gve_queue_page_list *qpl)
68 {
69 	if (qpl->mz) {
70 		rte_memzone_free(qpl->mz);
71 		qpl->mz = NULL;
72 	}
73 	if (qpl->page_buses) {
74 		rte_free(qpl->page_buses);
75 		qpl->page_buses = NULL;
76 	}
77 	rte_free(qpl);
78 }
79 
80 struct gve_queue_page_list *
81 gve_setup_queue_page_list(struct gve_priv *priv, uint16_t queue_id, bool is_rx,
82 	uint32_t num_pages)
83 {
84 	const char *queue_type_string = is_rx ? "rx" : "tx";
85 	char qpl_name[RTE_MEMZONE_NAMESIZE];
86 	struct gve_queue_page_list *qpl;
87 	int err;
88 
89 	/* Allocate a new QPL. */
90 	snprintf(qpl_name, sizeof(qpl_name), "gve_%s_%s_qpl%d",
91 		priv->pci_dev->device.name, queue_type_string, queue_id);
92 	qpl = gve_alloc_queue_page_list(qpl_name, num_pages);
93 	if (!qpl) {
94 		PMD_DRV_LOG(ERR,
95 			    "Failed to alloc %s qpl for queue %hu.",
96 			    queue_type_string, queue_id);
97 		return NULL;
98 	}
99 
100 	/* Assign the QPL an ID. */
101 	qpl->id = queue_id;
102 	if (is_rx)
103 		qpl->id += priv->max_nb_txq;
104 
105 	/* Validate page registration limit and register QPLs. */
106 	if (priv->num_registered_pages + qpl->num_entries >
107 	    priv->max_registered_pages) {
108 		PMD_DRV_LOG(ERR, "Pages %" PRIu64 " > max registered pages %" PRIu64,
109 			    priv->num_registered_pages + qpl->num_entries,
110 			    priv->max_registered_pages);
111 		goto cleanup_qpl;
112 	}
113 	err = gve_adminq_register_page_list(priv, qpl);
114 	if (err) {
115 		PMD_DRV_LOG(ERR,
116 			    "Failed to register %s qpl for queue %hu.",
117 			    queue_type_string, queue_id);
118 		goto cleanup_qpl;
119 	}
120 	priv->num_registered_pages += qpl->num_entries;
121 	return qpl;
122 
123 cleanup_qpl:
124 	gve_free_queue_page_list(qpl);
125 	return NULL;
126 }
127 
128 int
129 gve_teardown_queue_page_list(struct gve_priv *priv,
130 	struct gve_queue_page_list *qpl)
131 {
132 	int err = gve_adminq_unregister_page_list(priv, qpl->id);
133 	if (err) {
134 		PMD_DRV_LOG(CRIT, "Unable to unregister qpl %d!", qpl->id);
135 		return err;
136 	}
137 	priv->num_registered_pages -= qpl->num_entries;
138 	gve_free_queue_page_list(qpl);
139 	return 0;
140 }
141 
142 static int
143 gve_dev_configure(struct rte_eth_dev *dev)
144 {
145 	struct gve_priv *priv = dev->data->dev_private;
146 
147 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
148 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
149 		priv->rss_config.alg = GVE_RSS_HASH_TOEPLITZ;
150 	}
151 
152 	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
153 		priv->enable_rsc = 1;
154 
155 	/* Reset RSS RETA in case number of queues changed. */
156 	if (priv->rss_config.indir) {
157 		struct gve_rss_config update_reta_config;
158 		gve_init_rss_config_from_priv(priv, &update_reta_config);
159 		gve_generate_rss_reta(dev, &update_reta_config);
160 
161 		int err = gve_adminq_configure_rss(priv, &update_reta_config);
162 		if (err)
163 			PMD_DRV_LOG(ERR,
164 				"Could not reconfigure RSS redirection table.");
165 		else
166 			gve_update_priv_rss_config(priv, &update_reta_config);
167 
168 		gve_free_rss_config(&update_reta_config);
169 		return err;
170 	}
171 
172 	return 0;
173 }
174 
175 static int
176 gve_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
177 {
178 	struct gve_priv *priv = dev->data->dev_private;
179 	struct rte_eth_link link;
180 	int err;
181 
182 	memset(&link, 0, sizeof(link));
183 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
184 	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
185 
186 	if (!dev->data->dev_started) {
187 		link.link_status = RTE_ETH_LINK_DOWN;
188 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
189 	} else {
190 		link.link_status = RTE_ETH_LINK_UP;
191 		PMD_DRV_LOG(DEBUG, "Get link status from hw");
192 		err = gve_adminq_report_link_speed(priv);
193 		if (err) {
194 			PMD_DRV_LOG(ERR, "Failed to get link speed.");
195 			priv->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
196 		}
197 		link.link_speed = priv->link_speed;
198 	}
199 
200 	return rte_eth_linkstatus_set(dev, &link);
201 }
202 
203 static int
204 gve_alloc_stats_report(struct gve_priv *priv,
205 		uint16_t nb_tx_queues, uint16_t nb_rx_queues)
206 {
207 	char z_name[RTE_MEMZONE_NAMESIZE];
208 	int tx_stats_cnt;
209 	int rx_stats_cnt;
210 
211 	tx_stats_cnt = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
212 		nb_tx_queues;
213 	rx_stats_cnt = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
214 		nb_rx_queues;
215 	priv->stats_report_len = sizeof(struct gve_stats_report) +
216 		sizeof(struct stats) * (tx_stats_cnt + rx_stats_cnt);
217 
218 	snprintf(z_name, sizeof(z_name), "gve_stats_report_%s",
219 			priv->pci_dev->device.name);
220 	priv->stats_report_mem = rte_memzone_reserve_aligned(z_name,
221 			priv->stats_report_len,
222 			rte_socket_id(),
223 			RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
224 
225 	if (!priv->stats_report_mem)
226 		return -ENOMEM;
227 
228 	/* offset by skipping stats written by gve. */
229 	priv->stats_start_idx = (GVE_TX_STATS_REPORT_NUM * nb_tx_queues) +
230 		(GVE_RX_STATS_REPORT_NUM * nb_rx_queues);
231 	priv->stats_end_idx = priv->stats_start_idx +
232 		(NIC_TX_STATS_REPORT_NUM * nb_tx_queues) +
233 		(NIC_RX_STATS_REPORT_NUM * nb_rx_queues) - 1;
234 
235 	return 0;
236 }
237 
238 static void
239 gve_free_stats_report(struct rte_eth_dev *dev)
240 {
241 	struct gve_priv *priv = dev->data->dev_private;
242 	rte_memzone_free(priv->stats_report_mem);
243 	priv->stats_report_mem = NULL;
244 }
245 
246 /* Read Rx NIC stats from shared region */
247 static void
248 gve_get_imissed_from_nic(struct rte_eth_dev *dev)
249 {
250 	struct gve_stats_report *stats_report;
251 	struct gve_rx_queue *rxq;
252 	struct gve_priv *priv;
253 	struct stats stat;
254 	int queue_id;
255 	int stat_id;
256 	int i;
257 
258 	priv = dev->data->dev_private;
259 	if (!priv->stats_report_mem)
260 		return;
261 	stats_report = (struct gve_stats_report *)
262 		priv->stats_report_mem->addr;
263 	for (i = priv->stats_start_idx; i <= priv->stats_end_idx; i++) {
264 		stat = stats_report->stats[i];
265 		queue_id = cpu_to_be32(stat.queue_id);
266 		rxq = dev->data->rx_queues[queue_id];
267 		if (rxq == NULL)
268 			continue;
269 		stat_id = cpu_to_be32(stat.stat_name);
270 		/* Update imissed. */
271 		if (stat_id == RX_NO_BUFFERS_POSTED)
272 			rxq->stats.imissed = cpu_to_be64(stat.value);
273 	}
274 }
275 
276 static int
277 gve_start_queues(struct rte_eth_dev *dev)
278 {
279 	struct gve_priv *priv = dev->data->dev_private;
280 	uint16_t num_queues;
281 	uint16_t i;
282 	int ret;
283 
284 	num_queues = dev->data->nb_tx_queues;
285 	priv->txqs = (struct gve_tx_queue **)dev->data->tx_queues;
286 	ret = gve_adminq_create_tx_queues(priv, num_queues);
287 	if (ret != 0) {
288 		PMD_DRV_LOG(ERR, "Failed to create %u tx queues.", num_queues);
289 		return ret;
290 	}
291 	for (i = 0; i < num_queues; i++) {
292 		if (gve_is_gqi(priv))
293 			ret = gve_tx_queue_start(dev, i);
294 		else
295 			ret = gve_tx_queue_start_dqo(dev, i);
296 		if (ret != 0) {
297 			PMD_DRV_LOG(ERR, "Fail to start Tx queue %d", i);
298 			goto err_tx;
299 		}
300 	}
301 
302 	num_queues = dev->data->nb_rx_queues;
303 	priv->rxqs = (struct gve_rx_queue **)dev->data->rx_queues;
304 	ret = gve_adminq_create_rx_queues(priv, num_queues);
305 	if (ret != 0) {
306 		PMD_DRV_LOG(ERR, "Failed to create %u rx queues.", num_queues);
307 		goto err_tx;
308 	}
309 	for (i = 0; i < num_queues; i++) {
310 		if (gve_is_gqi(priv))
311 			ret = gve_rx_queue_start(dev, i);
312 		else
313 			ret = gve_rx_queue_start_dqo(dev, i);
314 		if (ret != 0) {
315 			PMD_DRV_LOG(ERR, "Fail to start Rx queue %d", i);
316 			goto err_rx;
317 		}
318 	}
319 
320 	return 0;
321 
322 err_rx:
323 	if (gve_is_gqi(priv))
324 		gve_stop_rx_queues(dev);
325 	else
326 		gve_stop_rx_queues_dqo(dev);
327 err_tx:
328 	if (gve_is_gqi(priv))
329 		gve_stop_tx_queues(dev);
330 	else
331 		gve_stop_tx_queues_dqo(dev);
332 	return ret;
333 }
334 
335 static int
336 gve_dev_start(struct rte_eth_dev *dev)
337 {
338 	struct gve_priv *priv;
339 	int ret;
340 
341 	ret = gve_start_queues(dev);
342 	if (ret != 0) {
343 		PMD_DRV_LOG(ERR, "Failed to start queues");
344 		return ret;
345 	}
346 
347 	dev->data->dev_started = 1;
348 	gve_link_update(dev, 0);
349 
350 	priv = dev->data->dev_private;
351 	/* No stats available yet for Dqo. */
352 	if (gve_is_gqi(priv)) {
353 		ret = gve_alloc_stats_report(priv,
354 				dev->data->nb_tx_queues,
355 				dev->data->nb_rx_queues);
356 		if (ret != 0) {
357 			PMD_DRV_LOG(ERR,
358 				"Failed to allocate region for stats reporting.");
359 			return ret;
360 		}
361 		ret = gve_adminq_report_stats(priv, priv->stats_report_len,
362 				priv->stats_report_mem->iova,
363 				GVE_STATS_REPORT_TIMER_PERIOD);
364 		if (ret != 0) {
365 			PMD_DRV_LOG(ERR, "gve_adminq_report_stats command failed.");
366 			return ret;
367 		}
368 	}
369 
370 	return 0;
371 }
372 
373 static int
374 gve_dev_stop(struct rte_eth_dev *dev)
375 {
376 	struct gve_priv *priv = dev->data->dev_private;
377 	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
378 
379 	if (gve_is_gqi(priv)) {
380 		gve_stop_tx_queues(dev);
381 		gve_stop_rx_queues(dev);
382 	} else {
383 		gve_stop_tx_queues_dqo(dev);
384 		gve_stop_rx_queues_dqo(dev);
385 	}
386 
387 	dev->data->dev_started = 0;
388 
389 	if (gve_is_gqi(dev->data->dev_private))
390 		gve_free_stats_report(dev);
391 
392 	return 0;
393 }
394 
395 static int
396 gve_dev_close(struct rte_eth_dev *dev)
397 {
398 	struct gve_priv *priv = dev->data->dev_private;
399 	int err = 0;
400 	uint16_t i;
401 
402 	if (dev->data->dev_started) {
403 		err = gve_dev_stop(dev);
404 		if (err != 0)
405 			PMD_DRV_LOG(ERR, "Failed to stop dev.");
406 	}
407 
408 	if (gve_is_gqi(priv)) {
409 		for (i = 0; i < dev->data->nb_tx_queues; i++)
410 			gve_tx_queue_release(dev, i);
411 
412 		for (i = 0; i < dev->data->nb_rx_queues; i++)
413 			gve_rx_queue_release(dev, i);
414 	} else {
415 		for (i = 0; i < dev->data->nb_tx_queues; i++)
416 			gve_tx_queue_release_dqo(dev, i);
417 
418 		for (i = 0; i < dev->data->nb_rx_queues; i++)
419 			gve_rx_queue_release_dqo(dev, i);
420 	}
421 
422 	rte_free(priv->adminq);
423 
424 	dev->data->mac_addrs = NULL;
425 
426 	return err;
427 }
428 
429 static int
430 gve_verify_driver_compatibility(struct gve_priv *priv)
431 {
432 	const struct rte_memzone *driver_info_mem;
433 	struct gve_driver_info *driver_info;
434 	int err;
435 
436 	driver_info_mem = rte_memzone_reserve_aligned("verify_driver_compatibility",
437 			sizeof(struct gve_driver_info),
438 			rte_socket_id(),
439 			RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
440 
441 	if (driver_info_mem == NULL) {
442 		PMD_DRV_LOG(ERR,
443 		    "Could not alloc memzone for driver compatibility");
444 		return -ENOMEM;
445 	}
446 	driver_info = (struct gve_driver_info *)driver_info_mem->addr;
447 
448 	*driver_info = (struct gve_driver_info) {
449 		.os_type = 5, /* DPDK */
450 		.driver_major = GVE_VERSION_MAJOR,
451 		.driver_minor = GVE_VERSION_MINOR,
452 		.driver_sub = GVE_VERSION_SUB,
453 		.os_version_major = cpu_to_be32(DPDK_VERSION_MAJOR),
454 		.os_version_minor = cpu_to_be32(DPDK_VERSION_MINOR),
455 		.os_version_sub = cpu_to_be32(DPDK_VERSION_SUB),
456 		.driver_capability_flags = {
457 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS1),
458 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS2),
459 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS3),
460 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS4),
461 		},
462 	};
463 
464 	populate_driver_version_strings((char *)driver_info->os_version_str1,
465 			(char *)driver_info->os_version_str2);
466 
467 	err = gve_adminq_verify_driver_compatibility(priv,
468 		sizeof(struct gve_driver_info),
469 		(dma_addr_t)driver_info_mem->iova);
470 	/* It's ok if the device doesn't support this */
471 	if (err == -EOPNOTSUPP)
472 		err = 0;
473 
474 	rte_memzone_free(driver_info_mem);
475 	return err;
476 }
477 
478 static int
479 gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
480 {
481 	struct gve_priv *priv = dev->data->dev_private;
482 
483 	dev_info->device = dev->device;
484 	dev_info->max_mac_addrs = 1;
485 	dev_info->max_rx_queues = priv->max_nb_rxq;
486 	dev_info->max_tx_queues = priv->max_nb_txq;
487 	if (gve_is_gqi(priv)) {
488 		dev_info->min_rx_bufsize = GVE_RX_MIN_BUF_SIZE_GQI;
489 		dev_info->max_rx_bufsize = GVE_RX_MAX_BUF_SIZE_GQI;
490 	} else {
491 		dev_info->min_rx_bufsize = GVE_RX_MIN_BUF_SIZE_DQO;
492 		dev_info->max_rx_bufsize = GVE_RX_MAX_BUF_SIZE_DQO;
493 	}
494 
495 	dev_info->max_rx_pktlen = priv->max_mtu + RTE_ETHER_HDR_LEN;
496 	dev_info->max_mtu = priv->max_mtu;
497 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
498 
499 	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_RSS_HASH;
500 	dev_info->tx_offload_capa =
501 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS	|
502 		RTE_ETH_TX_OFFLOAD_UDP_CKSUM	|
503 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM	|
504 		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM	|
505 		RTE_ETH_TX_OFFLOAD_TCP_TSO;
506 
507 	if (!gve_is_gqi(priv)) {
508 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
509 		dev_info->rx_offload_capa |=
510 				RTE_ETH_RX_OFFLOAD_IPV4_CKSUM   |
511 				RTE_ETH_RX_OFFLOAD_UDP_CKSUM	|
512 				RTE_ETH_RX_OFFLOAD_TCP_CKSUM	|
513 				RTE_ETH_RX_OFFLOAD_TCP_LRO;
514 	}
515 
516 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
517 		.rx_free_thresh = GVE_DEFAULT_RX_FREE_THRESH,
518 		.rx_drop_en = 0,
519 		.offloads = 0,
520 	};
521 
522 	dev_info->default_txconf = (struct rte_eth_txconf) {
523 		.tx_free_thresh = GVE_DEFAULT_TX_FREE_THRESH,
524 		.tx_rs_thresh = GVE_DEFAULT_TX_RS_THRESH,
525 		.offloads = 0,
526 	};
527 
528 	dev_info->default_rxportconf.ring_size = priv->default_rx_desc_cnt;
529 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
530 		.nb_max = priv->max_rx_desc_cnt,
531 		.nb_min = priv->min_rx_desc_cnt,
532 		.nb_align = 1,
533 	};
534 
535 	dev_info->default_txportconf.ring_size = priv->default_tx_desc_cnt;
536 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
537 		.nb_max = priv->max_tx_desc_cnt,
538 		.nb_min = priv->min_tx_desc_cnt,
539 		.nb_align = 1,
540 	};
541 
542 	dev_info->flow_type_rss_offloads = GVE_RTE_RSS_OFFLOAD_ALL;
543 	dev_info->hash_key_size = GVE_RSS_HASH_KEY_SIZE;
544 	dev_info->reta_size = GVE_RSS_INDIR_SIZE;
545 
546 	return 0;
547 }
548 
549 static int
550 gve_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
551 {
552 	uint16_t i;
553 	if (gve_is_gqi(dev->data->dev_private))
554 		gve_get_imissed_from_nic(dev);
555 
556 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
557 		struct gve_tx_queue *txq = dev->data->tx_queues[i];
558 		if (txq == NULL)
559 			continue;
560 
561 		stats->opackets += txq->stats.packets;
562 		stats->obytes += txq->stats.bytes;
563 		stats->oerrors += txq->stats.errors;
564 	}
565 
566 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
567 		struct gve_rx_queue *rxq = dev->data->rx_queues[i];
568 		if (rxq == NULL)
569 			continue;
570 
571 		stats->ipackets += rxq->stats.packets;
572 		stats->ibytes += rxq->stats.bytes;
573 		stats->ierrors += rxq->stats.errors;
574 		stats->rx_nombuf += rxq->stats.no_mbufs;
575 		stats->imissed += rxq->stats.imissed;
576 	}
577 
578 	return 0;
579 }
580 
581 static int
582 gve_dev_stats_reset(struct rte_eth_dev *dev)
583 {
584 	uint16_t i;
585 
586 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
587 		struct gve_tx_queue *txq = dev->data->tx_queues[i];
588 		if (txq == NULL)
589 			continue;
590 
591 		memset(&txq->stats, 0, sizeof(txq->stats));
592 	}
593 
594 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
595 		struct gve_rx_queue *rxq = dev->data->rx_queues[i];
596 		if (rxq == NULL)
597 			continue;
598 
599 		memset(&rxq->stats, 0, sizeof(rxq->stats));
600 	}
601 
602 	return 0;
603 }
604 
605 static int
606 gve_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
607 {
608 	struct gve_priv *priv = dev->data->dev_private;
609 	int err;
610 
611 	if (mtu < RTE_ETHER_MIN_MTU || mtu > priv->max_mtu) {
612 		PMD_DRV_LOG(ERR, "MIN MTU is %u, MAX MTU is %u",
613 			    RTE_ETHER_MIN_MTU, priv->max_mtu);
614 		return -EINVAL;
615 	}
616 
617 	/* mtu setting is forbidden if port is start */
618 	if (dev->data->dev_started) {
619 		PMD_DRV_LOG(ERR, "Port must be stopped before configuration");
620 		return -EBUSY;
621 	}
622 
623 	err = gve_adminq_set_mtu(priv, mtu);
624 	if (err) {
625 		PMD_DRV_LOG(ERR, "Failed to set mtu as %u err = %d", mtu, err);
626 		return err;
627 	}
628 
629 	return 0;
630 }
631 
632 #define TX_QUEUE_STATS_OFFSET(x) offsetof(struct gve_tx_stats, x)
633 #define RX_QUEUE_STATS_OFFSET(x) offsetof(struct gve_rx_stats, x)
634 
635 static const struct gve_xstats_name_offset tx_xstats_name_offset[] = {
636 	{ "packets", TX_QUEUE_STATS_OFFSET(packets) },
637 	{ "bytes",   TX_QUEUE_STATS_OFFSET(bytes) },
638 	{ "errors",  TX_QUEUE_STATS_OFFSET(errors) },
639 };
640 
641 static const struct gve_xstats_name_offset rx_xstats_name_offset[] = {
642 	{ "packets",                RX_QUEUE_STATS_OFFSET(packets) },
643 	{ "bytes",                  RX_QUEUE_STATS_OFFSET(bytes) },
644 	{ "errors",                 RX_QUEUE_STATS_OFFSET(errors) },
645 	{ "mbuf_alloc_errors",      RX_QUEUE_STATS_OFFSET(no_mbufs) },
646 	{ "mbuf_alloc_errors_bulk", RX_QUEUE_STATS_OFFSET(no_mbufs_bulk) },
647 	{ "imissed",                RX_QUEUE_STATS_OFFSET(imissed) },
648 };
649 
650 static int
651 gve_xstats_count(struct rte_eth_dev *dev)
652 {
653 	uint16_t i, count = 0;
654 
655 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
656 		if (dev->data->tx_queues[i])
657 			count += RTE_DIM(tx_xstats_name_offset);
658 	}
659 
660 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
661 		if (dev->data->rx_queues[i])
662 			count += RTE_DIM(rx_xstats_name_offset);
663 	}
664 
665 	return count;
666 }
667 
668 static int
669 gve_xstats_get(struct rte_eth_dev *dev,
670 			struct rte_eth_xstat *xstats,
671 			unsigned int size)
672 {
673 	uint16_t i, j, count = gve_xstats_count(dev);
674 	const char *stats;
675 
676 	if (gve_is_gqi(dev->data->dev_private))
677 		gve_get_imissed_from_nic(dev);
678 
679 	if (xstats == NULL || size < count)
680 		return count;
681 
682 	count = 0;
683 
684 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
685 		const struct gve_tx_queue *txq = dev->data->tx_queues[i];
686 		if (txq == NULL)
687 			continue;
688 
689 		stats = (const char *)&txq->stats;
690 		for (j = 0; j < RTE_DIM(tx_xstats_name_offset); j++, count++) {
691 			xstats[count].id = count;
692 			xstats[count].value = *(const uint64_t *)
693 				(stats + tx_xstats_name_offset[j].offset);
694 		}
695 	}
696 
697 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
698 		const struct gve_rx_queue *rxq = dev->data->rx_queues[i];
699 		if (rxq == NULL)
700 			continue;
701 
702 		stats = (const char *)&rxq->stats;
703 		for (j = 0; j < RTE_DIM(rx_xstats_name_offset); j++, count++) {
704 			xstats[count].id = count;
705 			xstats[count].value = *(const uint64_t *)
706 				(stats + rx_xstats_name_offset[j].offset);
707 		}
708 	}
709 
710 	return count;
711 }
712 
713 static int
714 gve_xstats_get_names(struct rte_eth_dev *dev,
715 			struct rte_eth_xstat_name *xstats_names,
716 			unsigned int size)
717 {
718 	uint16_t i, j, count = gve_xstats_count(dev);
719 
720 	if (xstats_names == NULL || size < count)
721 		return count;
722 
723 	count = 0;
724 
725 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
726 		if (dev->data->tx_queues[i] == NULL)
727 			continue;
728 
729 		for (j = 0; j < RTE_DIM(tx_xstats_name_offset); j++)
730 			snprintf(xstats_names[count++].name,
731 				 RTE_ETH_XSTATS_NAME_SIZE,
732 				 "tx_q%u_%s", i, tx_xstats_name_offset[j].name);
733 	}
734 
735 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
736 		if (dev->data->rx_queues[i] == NULL)
737 			continue;
738 
739 		for (j = 0; j < RTE_DIM(rx_xstats_name_offset); j++)
740 			snprintf(xstats_names[count++].name,
741 				 RTE_ETH_XSTATS_NAME_SIZE,
742 				 "rx_q%u_%s", i, rx_xstats_name_offset[j].name);
743 	}
744 
745 	return count;
746 }
747 
748 
749 static int
750 gve_rss_hash_update(struct rte_eth_dev *dev,
751 			struct rte_eth_rss_conf *rss_conf)
752 {
753 	struct gve_priv *priv = dev->data->dev_private;
754 	struct gve_rss_config gve_rss_conf;
755 	int rss_reta_size;
756 	int err;
757 
758 	if (gve_validate_rss_hf(rss_conf->rss_hf)) {
759 		PMD_DRV_LOG(ERR, "Unsupported hash function.");
760 		return -EINVAL;
761 	}
762 
763 	if (rss_conf->algorithm != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
764 		rss_conf->algorithm != RTE_ETH_HASH_FUNCTION_DEFAULT) {
765 		PMD_DRV_LOG(ERR, "Device only supports Toeplitz algorithm.");
766 		return -EINVAL;
767 	}
768 
769 	if (rss_conf->rss_key_len) {
770 		if (rss_conf->rss_key_len != GVE_RSS_HASH_KEY_SIZE) {
771 			PMD_DRV_LOG(ERR,
772 				"Invalid hash key size. Only RSS hash key size "
773 				"of %u supported", GVE_RSS_HASH_KEY_SIZE);
774 			return -EINVAL;
775 		}
776 
777 		if (!rss_conf->rss_key) {
778 			PMD_DRV_LOG(ERR, "RSS key must be non-null.");
779 			return -EINVAL;
780 		}
781 	} else {
782 		if (!priv->rss_config.key_size) {
783 			PMD_DRV_LOG(ERR, "RSS key must be initialized before "
784 				"any other configuration.");
785 			return -EINVAL;
786 		}
787 		rss_conf->rss_key_len = priv->rss_config.key_size;
788 	}
789 
790 	rss_reta_size = priv->rss_config.indir ?
791 			priv->rss_config.indir_size :
792 			GVE_RSS_INDIR_SIZE;
793 	err = gve_init_rss_config(&gve_rss_conf, rss_conf->rss_key_len,
794 		rss_reta_size);
795 	if (err)
796 		return err;
797 
798 	gve_rss_conf.alg = GVE_RSS_HASH_TOEPLITZ;
799 	err = gve_update_rss_hash_types(priv, &gve_rss_conf, rss_conf);
800 	if (err)
801 		goto err;
802 	err = gve_update_rss_key(priv, &gve_rss_conf, rss_conf);
803 	if (err)
804 		goto err;
805 
806 	/* Set redirection table to default or preexisting. */
807 	if (!priv->rss_config.indir)
808 		gve_generate_rss_reta(dev, &gve_rss_conf);
809 	else
810 		memcpy(gve_rss_conf.indir, priv->rss_config.indir,
811 			gve_rss_conf.indir_size * sizeof(*priv->rss_config.indir));
812 
813 	err = gve_adminq_configure_rss(priv, &gve_rss_conf);
814 	if (!err)
815 		gve_update_priv_rss_config(priv, &gve_rss_conf);
816 
817 err:
818 	gve_free_rss_config(&gve_rss_conf);
819 	return err;
820 }
821 
822 static int
823 gve_rss_hash_conf_get(struct rte_eth_dev *dev,
824 			struct rte_eth_rss_conf *rss_conf)
825 {
826 	struct gve_priv *priv = dev->data->dev_private;
827 
828 	if (!(dev->data->dev_conf.rxmode.offloads &
829 			RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
830 		PMD_DRV_LOG(ERR, "RSS not configured.");
831 		return -ENOTSUP;
832 	}
833 
834 
835 	gve_to_rte_rss_hf(priv->rss_config.hash_types, rss_conf);
836 	rss_conf->rss_key_len = priv->rss_config.key_size;
837 	if (rss_conf->rss_key) {
838 		if (!priv->rss_config.key) {
839 			PMD_DRV_LOG(ERR, "Unable to retrieve default RSS hash key.");
840 			return -ENOTSUP;
841 		}
842 		memcpy(rss_conf->rss_key, priv->rss_config.key,
843 			rss_conf->rss_key_len * sizeof(*rss_conf->rss_key));
844 	}
845 
846 	return 0;
847 }
848 
849 static int
850 gve_rss_reta_update(struct rte_eth_dev *dev,
851 	struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
852 {
853 	struct gve_priv *priv = dev->data->dev_private;
854 	struct gve_rss_config gve_rss_conf;
855 	int table_id;
856 	int err;
857 	int i;
858 
859 	/* RSS key must be set before the redirection table can be set. */
860 	if (!priv->rss_config.key || priv->rss_config.key_size == 0) {
861 		PMD_DRV_LOG(ERR, "RSS hash key msut be set before the "
862 			"redirection table can be updated.");
863 		return -ENOTSUP;
864 	}
865 
866 	if (reta_size != GVE_RSS_INDIR_SIZE) {
867 		PMD_DRV_LOG(ERR, "Redirection table must have %hu elements",
868 			(uint16_t)GVE_RSS_INDIR_SIZE);
869 		return -EINVAL;
870 	}
871 
872 	err = gve_init_rss_config_from_priv(priv, &gve_rss_conf);
873 	if (err) {
874 		PMD_DRV_LOG(ERR, "Error allocating new RSS config.");
875 		return err;
876 	}
877 
878 	table_id = 0;
879 	for (i = 0; i < priv->rss_config.indir_size; i++) {
880 		int table_entry = i % RTE_ETH_RETA_GROUP_SIZE;
881 		if (reta_conf[table_id].mask & (1ULL << table_entry))
882 			gve_rss_conf.indir[i] =
883 				reta_conf[table_id].reta[table_entry];
884 
885 		if (table_entry == RTE_ETH_RETA_GROUP_SIZE - 1)
886 			table_id++;
887 	}
888 
889 	err = gve_adminq_configure_rss(priv, &gve_rss_conf);
890 	if (err)
891 		PMD_DRV_LOG(ERR, "Problem configuring RSS with device.");
892 	else
893 		gve_update_priv_rss_config(priv, &gve_rss_conf);
894 
895 	gve_free_rss_config(&gve_rss_conf);
896 	return err;
897 }
898 
899 static int
900 gve_rss_reta_query(struct rte_eth_dev *dev,
901 	struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
902 {
903 	struct gve_priv *priv = dev->data->dev_private;
904 	int table_id;
905 	int i;
906 
907 	if (!(dev->data->dev_conf.rxmode.offloads &
908 		RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
909 		PMD_DRV_LOG(ERR, "RSS not configured.");
910 		return -ENOTSUP;
911 	}
912 
913 	/* RSS key must be set before the redirection table can be queried. */
914 	if (!priv->rss_config.key) {
915 		PMD_DRV_LOG(ERR, "RSS hash key must be set before the "
916 			"redirection table can be initialized.");
917 		return -ENOTSUP;
918 	}
919 
920 	if (reta_size != priv->rss_config.indir_size) {
921 		PMD_DRV_LOG(ERR, "RSS redirection table must have %d entries.",
922 			priv->rss_config.indir_size);
923 		return -EINVAL;
924 	}
925 
926 	table_id = 0;
927 	for (i = 0; i < priv->rss_config.indir_size; i++) {
928 		int table_entry = i % RTE_ETH_RETA_GROUP_SIZE;
929 		if (reta_conf[table_id].mask & (1ULL << table_entry))
930 			reta_conf[table_id].reta[table_entry] =
931 				priv->rss_config.indir[i];
932 
933 		if (table_entry == RTE_ETH_RETA_GROUP_SIZE - 1)
934 			table_id++;
935 	}
936 
937 	return 0;
938 }
939 
940 static const struct eth_dev_ops gve_eth_dev_ops = {
941 	.dev_configure        = gve_dev_configure,
942 	.dev_start            = gve_dev_start,
943 	.dev_stop             = gve_dev_stop,
944 	.dev_close            = gve_dev_close,
945 	.dev_infos_get        = gve_dev_info_get,
946 	.rx_queue_setup       = gve_rx_queue_setup,
947 	.tx_queue_setup       = gve_tx_queue_setup,
948 	.rx_queue_release     = gve_rx_queue_release,
949 	.tx_queue_release     = gve_tx_queue_release,
950 	.rx_queue_start       = gve_rx_queue_start,
951 	.tx_queue_start       = gve_tx_queue_start,
952 	.rx_queue_stop        = gve_rx_queue_stop,
953 	.tx_queue_stop        = gve_tx_queue_stop,
954 	.link_update          = gve_link_update,
955 	.stats_get            = gve_dev_stats_get,
956 	.stats_reset          = gve_dev_stats_reset,
957 	.mtu_set              = gve_dev_mtu_set,
958 	.xstats_get           = gve_xstats_get,
959 	.xstats_get_names     = gve_xstats_get_names,
960 	.rss_hash_update      = gve_rss_hash_update,
961 	.rss_hash_conf_get    = gve_rss_hash_conf_get,
962 	.reta_update          = gve_rss_reta_update,
963 	.reta_query           = gve_rss_reta_query,
964 };
965 
966 static const struct eth_dev_ops gve_eth_dev_ops_dqo = {
967 	.dev_configure        = gve_dev_configure,
968 	.dev_start            = gve_dev_start,
969 	.dev_stop             = gve_dev_stop,
970 	.dev_close            = gve_dev_close,
971 	.dev_infos_get        = gve_dev_info_get,
972 	.rx_queue_setup       = gve_rx_queue_setup_dqo,
973 	.tx_queue_setup       = gve_tx_queue_setup_dqo,
974 	.rx_queue_release     = gve_rx_queue_release_dqo,
975 	.tx_queue_release     = gve_tx_queue_release_dqo,
976 	.rx_queue_start       = gve_rx_queue_start_dqo,
977 	.tx_queue_start       = gve_tx_queue_start_dqo,
978 	.rx_queue_stop        = gve_rx_queue_stop_dqo,
979 	.tx_queue_stop        = gve_tx_queue_stop_dqo,
980 	.link_update          = gve_link_update,
981 	.stats_get            = gve_dev_stats_get,
982 	.stats_reset          = gve_dev_stats_reset,
983 	.mtu_set              = gve_dev_mtu_set,
984 	.xstats_get           = gve_xstats_get,
985 	.xstats_get_names     = gve_xstats_get_names,
986 	.rss_hash_update      = gve_rss_hash_update,
987 	.rss_hash_conf_get    = gve_rss_hash_conf_get,
988 	.reta_update          = gve_rss_reta_update,
989 	.reta_query           = gve_rss_reta_query,
990 };
991 
992 static void
993 gve_free_counter_array(struct gve_priv *priv)
994 {
995 	rte_memzone_free(priv->cnt_array_mz);
996 	priv->cnt_array = NULL;
997 }
998 
999 static void
1000 gve_free_irq_db(struct gve_priv *priv)
1001 {
1002 	rte_memzone_free(priv->irq_dbs_mz);
1003 	priv->irq_dbs = NULL;
1004 }
1005 
1006 static void
1007 gve_teardown_device_resources(struct gve_priv *priv)
1008 {
1009 	int err;
1010 
1011 	/* Tell device its resources are being freed */
1012 	if (gve_get_device_resources_ok(priv)) {
1013 		err = gve_adminq_deconfigure_device_resources(priv);
1014 		if (err)
1015 			PMD_DRV_LOG(ERR, "Could not deconfigure device resources: err=%d", err);
1016 	}
1017 
1018 	if (!gve_is_gqi(priv)) {
1019 		rte_free(priv->ptype_lut_dqo);
1020 		priv->ptype_lut_dqo = NULL;
1021 	}
1022 	gve_free_counter_array(priv);
1023 	gve_free_irq_db(priv);
1024 	gve_clear_device_resources_ok(priv);
1025 }
1026 
1027 static int
1028 pci_dev_msix_vec_count(struct rte_pci_device *pdev)
1029 {
1030 	off_t msix_pos = rte_pci_find_capability(pdev, RTE_PCI_CAP_ID_MSIX);
1031 	uint16_t control;
1032 
1033 	if (msix_pos > 0 && rte_pci_read_config(pdev, &control, sizeof(control),
1034 			msix_pos + RTE_PCI_MSIX_FLAGS) == sizeof(control))
1035 		return (control & RTE_PCI_MSIX_FLAGS_QSIZE) + 1;
1036 
1037 	return 0;
1038 }
1039 
1040 static int
1041 gve_setup_device_resources(struct gve_priv *priv)
1042 {
1043 	char z_name[RTE_MEMZONE_NAMESIZE];
1044 	const struct rte_memzone *mz;
1045 	int err = 0;
1046 
1047 	snprintf(z_name, sizeof(z_name), "gve_%s_cnt_arr", priv->pci_dev->device.name);
1048 	mz = rte_memzone_reserve_aligned(z_name,
1049 					 priv->num_event_counters * sizeof(*priv->cnt_array),
1050 					 rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
1051 					 PAGE_SIZE);
1052 	if (mz == NULL) {
1053 		PMD_DRV_LOG(ERR, "Could not alloc memzone for count array");
1054 		return -ENOMEM;
1055 	}
1056 	priv->cnt_array = (rte_be32_t *)mz->addr;
1057 	priv->cnt_array_mz = mz;
1058 
1059 	snprintf(z_name, sizeof(z_name), "gve_%s_irqmz", priv->pci_dev->device.name);
1060 	mz = rte_memzone_reserve_aligned(z_name,
1061 					 sizeof(*priv->irq_dbs) * (priv->num_ntfy_blks),
1062 					 rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
1063 					 PAGE_SIZE);
1064 	if (mz == NULL) {
1065 		PMD_DRV_LOG(ERR, "Could not alloc memzone for irq_dbs");
1066 		err = -ENOMEM;
1067 		goto free_cnt_array;
1068 	}
1069 	priv->irq_dbs = (struct gve_irq_db *)mz->addr;
1070 	priv->irq_dbs_mz = mz;
1071 
1072 	err = gve_adminq_configure_device_resources(priv,
1073 						    priv->cnt_array_mz->iova,
1074 						    priv->num_event_counters,
1075 						    priv->irq_dbs_mz->iova,
1076 						    priv->num_ntfy_blks);
1077 	if (unlikely(err)) {
1078 		PMD_DRV_LOG(ERR, "Could not config device resources: err=%d", err);
1079 		goto free_irq_dbs;
1080 	}
1081 	if (!gve_is_gqi(priv)) {
1082 		priv->ptype_lut_dqo = rte_zmalloc("gve_ptype_lut_dqo",
1083 			sizeof(struct gve_ptype_lut), 0);
1084 		if (priv->ptype_lut_dqo == NULL) {
1085 			PMD_DRV_LOG(ERR, "Failed to alloc ptype lut.");
1086 			err = -ENOMEM;
1087 			goto free_irq_dbs;
1088 		}
1089 		err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo);
1090 		if (unlikely(err)) {
1091 			PMD_DRV_LOG(ERR, "Failed to get ptype map: err=%d", err);
1092 			goto free_ptype_lut;
1093 		}
1094 	}
1095 
1096 	return 0;
1097 free_ptype_lut:
1098 	rte_free(priv->ptype_lut_dqo);
1099 	priv->ptype_lut_dqo = NULL;
1100 free_irq_dbs:
1101 	gve_free_irq_db(priv);
1102 free_cnt_array:
1103 	gve_free_counter_array(priv);
1104 
1105 	return err;
1106 }
1107 
1108 static void
1109 gve_set_default_ring_size_bounds(struct gve_priv *priv)
1110 {
1111 	priv->max_tx_desc_cnt = GVE_DEFAULT_MAX_RING_SIZE;
1112 	priv->max_rx_desc_cnt = GVE_DEFAULT_MAX_RING_SIZE;
1113 	priv->min_tx_desc_cnt = GVE_DEFAULT_MIN_TX_RING_SIZE;
1114 	priv->min_rx_desc_cnt = GVE_DEFAULT_MIN_RX_RING_SIZE;
1115 }
1116 
1117 static int
1118 gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
1119 {
1120 	int num_ntfy;
1121 	int err;
1122 
1123 	/* Set up the adminq */
1124 	err = gve_adminq_alloc(priv);
1125 	if (err) {
1126 		PMD_DRV_LOG(ERR, "Failed to alloc admin queue: err=%d", err);
1127 		return err;
1128 	}
1129 	err = gve_verify_driver_compatibility(priv);
1130 	if (err) {
1131 		PMD_DRV_LOG(ERR, "Could not verify driver compatibility: err=%d", err);
1132 		goto free_adminq;
1133 	}
1134 
1135 	/* Set default descriptor counts */
1136 	gve_set_default_ring_size_bounds(priv);
1137 
1138 	if (skip_describe_device)
1139 		goto setup_device;
1140 
1141 	/* Get the initial information we need from the device */
1142 	err = gve_adminq_describe_device(priv);
1143 	if (err) {
1144 		PMD_DRV_LOG(ERR, "Could not get device information: err=%d", err);
1145 		goto free_adminq;
1146 	}
1147 
1148 	num_ntfy = pci_dev_msix_vec_count(priv->pci_dev);
1149 	if (num_ntfy <= 0) {
1150 		PMD_DRV_LOG(ERR, "Could not count MSI-x vectors");
1151 		err = -EIO;
1152 		goto free_adminq;
1153 	} else if (num_ntfy < GVE_MIN_MSIX) {
1154 		PMD_DRV_LOG(ERR, "GVE needs at least %d MSI-x vectors, but only has %d",
1155 			    GVE_MIN_MSIX, num_ntfy);
1156 		err = -EINVAL;
1157 		goto free_adminq;
1158 	}
1159 
1160 	priv->num_registered_pages = 0;
1161 
1162 	/* gvnic has one Notification Block per MSI-x vector, except for the
1163 	 * management vector
1164 	 */
1165 	priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
1166 	priv->mgmt_msix_idx = priv->num_ntfy_blks;
1167 
1168 	priv->max_nb_txq = RTE_MIN(priv->max_nb_txq, priv->num_ntfy_blks / 2);
1169 	priv->max_nb_rxq = RTE_MIN(priv->max_nb_rxq, priv->num_ntfy_blks / 2);
1170 
1171 	if (priv->default_num_queues > 0) {
1172 		priv->max_nb_txq = RTE_MIN(priv->default_num_queues, priv->max_nb_txq);
1173 		priv->max_nb_rxq = RTE_MIN(priv->default_num_queues, priv->max_nb_rxq);
1174 	}
1175 
1176 	PMD_DRV_LOG(INFO, "Max TX queues %d, Max RX queues %d",
1177 		    priv->max_nb_txq, priv->max_nb_rxq);
1178 
1179 setup_device:
1180 	err = gve_setup_device_resources(priv);
1181 	if (!err)
1182 		return 0;
1183 free_adminq:
1184 	gve_adminq_free(priv);
1185 	return err;
1186 }
1187 
1188 static void
1189 gve_teardown_priv_resources(struct gve_priv *priv)
1190 {
1191 	gve_teardown_device_resources(priv);
1192 	gve_adminq_free(priv);
1193 }
1194 
1195 static int
1196 gve_dev_init(struct rte_eth_dev *eth_dev)
1197 {
1198 	struct gve_priv *priv = eth_dev->data->dev_private;
1199 	int max_tx_queues, max_rx_queues;
1200 	struct rte_pci_device *pci_dev;
1201 	struct gve_registers *reg_bar;
1202 	rte_be32_t *db_bar;
1203 	int err;
1204 
1205 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1206 		if (gve_is_gqi(priv)) {
1207 			gve_set_rx_function(eth_dev);
1208 			gve_set_tx_function(eth_dev);
1209 			eth_dev->dev_ops = &gve_eth_dev_ops;
1210 		} else {
1211 			gve_set_rx_function_dqo(eth_dev);
1212 			gve_set_tx_function_dqo(eth_dev);
1213 			eth_dev->dev_ops = &gve_eth_dev_ops_dqo;
1214 		}
1215 		return 0;
1216 	}
1217 
1218 	pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1219 
1220 	reg_bar = pci_dev->mem_resource[GVE_REG_BAR].addr;
1221 	if (!reg_bar) {
1222 		PMD_DRV_LOG(ERR, "Failed to map pci bar!");
1223 		return -ENOMEM;
1224 	}
1225 
1226 	db_bar = pci_dev->mem_resource[GVE_DB_BAR].addr;
1227 	if (!db_bar) {
1228 		PMD_DRV_LOG(ERR, "Failed to map doorbell bar!");
1229 		return -ENOMEM;
1230 	}
1231 
1232 	gve_write_version(&reg_bar->driver_version);
1233 	/* Get max queues to alloc etherdev */
1234 	max_tx_queues = ioread32be(&reg_bar->max_tx_queues);
1235 	max_rx_queues = ioread32be(&reg_bar->max_rx_queues);
1236 
1237 	priv->reg_bar0 = reg_bar;
1238 	priv->db_bar2 = db_bar;
1239 	priv->pci_dev = pci_dev;
1240 	priv->state_flags = 0x0;
1241 
1242 	priv->max_nb_txq = max_tx_queues;
1243 	priv->max_nb_rxq = max_rx_queues;
1244 
1245 	err = gve_init_priv(priv, false);
1246 	if (err)
1247 		return err;
1248 
1249 	if (gve_is_gqi(priv)) {
1250 		eth_dev->dev_ops = &gve_eth_dev_ops;
1251 		gve_set_rx_function(eth_dev);
1252 		gve_set_tx_function(eth_dev);
1253 	} else {
1254 		eth_dev->dev_ops = &gve_eth_dev_ops_dqo;
1255 		gve_set_rx_function_dqo(eth_dev);
1256 		gve_set_tx_function_dqo(eth_dev);
1257 	}
1258 
1259 	eth_dev->data->mac_addrs = &priv->dev_addr;
1260 
1261 	return 0;
1262 }
1263 
1264 static int
1265 gve_dev_uninit(struct rte_eth_dev *eth_dev)
1266 {
1267 	struct gve_priv *priv = eth_dev->data->dev_private;
1268 
1269 	gve_teardown_priv_resources(priv);
1270 
1271 	eth_dev->data->mac_addrs = NULL;
1272 
1273 	return 0;
1274 }
1275 
1276 static int
1277 gve_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
1278 	      struct rte_pci_device *pci_dev)
1279 {
1280 	return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct gve_priv), gve_dev_init);
1281 }
1282 
1283 static int
1284 gve_pci_remove(struct rte_pci_device *pci_dev)
1285 {
1286 	return rte_eth_dev_pci_generic_remove(pci_dev, gve_dev_uninit);
1287 }
1288 
1289 static const struct rte_pci_id pci_id_gve_map[] = {
1290 	{ RTE_PCI_DEVICE(GOOGLE_VENDOR_ID, GVE_DEV_ID) },
1291 	{ .device_id = 0 },
1292 };
1293 
1294 static struct rte_pci_driver rte_gve_pmd = {
1295 	.id_table = pci_id_gve_map,
1296 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1297 	.probe = gve_pci_probe,
1298 	.remove = gve_pci_remove,
1299 };
1300 
1301 RTE_PMD_REGISTER_PCI(net_gve, rte_gve_pmd);
1302 RTE_PMD_REGISTER_PCI_TABLE(net_gve, pci_id_gve_map);
1303 RTE_PMD_REGISTER_KMOD_DEP(net_gve, "* igb_uio | vfio-pci");
1304 RTE_LOG_REGISTER_SUFFIX(gve_logtype_driver, driver, NOTICE);
1305