xref: /dpdk/drivers/net/gve/gve_ethdev.c (revision b9a87346b05c562dd6005ee025eca67a1a80bea8)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2022-2023 Intel Corporation
3  * Copyright(C) 2023 Google LLC
4  */
5 
6 #include "gve_ethdev.h"
7 #include "base/gve_adminq.h"
8 #include "base/gve_register.h"
9 #include "base/gve_osdep.h"
10 #include "gve_version.h"
11 #include "rte_ether.h"
12 #include "gve_rss.h"
13 
14 static void
15 gve_write_version(uint8_t *driver_version_register)
16 {
17 	const char *c = gve_version_string();
18 	while (*c) {
19 		writeb(*c, driver_version_register);
20 		c++;
21 	}
22 	writeb('\n', driver_version_register);
23 }
24 
25 static int
26 gve_alloc_queue_page_list(struct gve_priv *priv, uint32_t id, uint32_t pages)
27 {
28 	char z_name[RTE_MEMZONE_NAMESIZE];
29 	struct gve_queue_page_list *qpl;
30 	const struct rte_memzone *mz;
31 	dma_addr_t page_bus;
32 	uint32_t i;
33 
34 	if (priv->num_registered_pages + pages >
35 	    priv->max_registered_pages) {
36 		PMD_DRV_LOG(ERR, "Pages %" PRIu64 " > max registered pages %" PRIu64,
37 			    priv->num_registered_pages + pages,
38 			    priv->max_registered_pages);
39 		return -EINVAL;
40 	}
41 	qpl = &priv->qpl[id];
42 	snprintf(z_name, sizeof(z_name), "gve_%s_qpl%d", priv->pci_dev->device.name, id);
43 	mz = rte_memzone_reserve_aligned(z_name, pages * PAGE_SIZE,
44 					 rte_socket_id(),
45 					 RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
46 	if (mz == NULL) {
47 		PMD_DRV_LOG(ERR, "Failed to alloc %s.", z_name);
48 		return -ENOMEM;
49 	}
50 	qpl->page_buses = rte_zmalloc("qpl page buses", pages * sizeof(dma_addr_t), 0);
51 	if (qpl->page_buses == NULL) {
52 		PMD_DRV_LOG(ERR, "Failed to alloc qpl %u page buses", id);
53 		return -ENOMEM;
54 	}
55 	page_bus = mz->iova;
56 	for (i = 0; i < pages; i++) {
57 		qpl->page_buses[i] = page_bus;
58 		page_bus += PAGE_SIZE;
59 	}
60 	qpl->id = id;
61 	qpl->mz = mz;
62 	qpl->num_entries = pages;
63 
64 	priv->num_registered_pages += pages;
65 
66 	return 0;
67 }
68 
69 static void
70 gve_free_qpls(struct gve_priv *priv)
71 {
72 	uint16_t nb_txqs = priv->max_nb_txq;
73 	uint16_t nb_rxqs = priv->max_nb_rxq;
74 	uint32_t i;
75 
76 	if (priv->queue_format != GVE_GQI_QPL_FORMAT)
77 		return;
78 
79 	for (i = 0; i < nb_txqs + nb_rxqs; i++) {
80 		if (priv->qpl[i].mz != NULL)
81 			rte_memzone_free(priv->qpl[i].mz);
82 		rte_free(priv->qpl[i].page_buses);
83 	}
84 
85 	rte_free(priv->qpl);
86 }
87 
88 static int
89 gve_dev_configure(struct rte_eth_dev *dev)
90 {
91 	struct gve_priv *priv = dev->data->dev_private;
92 
93 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
94 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
95 		priv->rss_config.alg = GVE_RSS_HASH_TOEPLITZ;
96 	}
97 
98 	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
99 		priv->enable_rsc = 1;
100 
101 	/* Reset RSS RETA in case number of queues changed. */
102 	if (priv->rss_config.indir) {
103 		struct gve_rss_config update_reta_config;
104 		gve_init_rss_config_from_priv(priv, &update_reta_config);
105 		gve_generate_rss_reta(dev, &update_reta_config);
106 
107 		int err = gve_adminq_configure_rss(priv, &update_reta_config);
108 		if (err)
109 			PMD_DRV_LOG(ERR,
110 				"Could not reconfigure RSS redirection table.");
111 		else
112 			gve_update_priv_rss_config(priv, &update_reta_config);
113 
114 		gve_free_rss_config(&update_reta_config);
115 		return err;
116 	}
117 
118 	return 0;
119 }
120 
121 static int
122 gve_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
123 {
124 	struct gve_priv *priv = dev->data->dev_private;
125 	struct rte_eth_link link;
126 	int err;
127 
128 	memset(&link, 0, sizeof(link));
129 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
130 	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
131 
132 	if (!dev->data->dev_started) {
133 		link.link_status = RTE_ETH_LINK_DOWN;
134 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
135 	} else {
136 		link.link_status = RTE_ETH_LINK_UP;
137 		PMD_DRV_LOG(DEBUG, "Get link status from hw");
138 		err = gve_adminq_report_link_speed(priv);
139 		if (err) {
140 			PMD_DRV_LOG(ERR, "Failed to get link speed.");
141 			priv->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
142 		}
143 		link.link_speed = priv->link_speed;
144 	}
145 
146 	return rte_eth_linkstatus_set(dev, &link);
147 }
148 
149 static int
150 gve_alloc_stats_report(struct gve_priv *priv,
151 		uint16_t nb_tx_queues, uint16_t nb_rx_queues)
152 {
153 	char z_name[RTE_MEMZONE_NAMESIZE];
154 	int tx_stats_cnt;
155 	int rx_stats_cnt;
156 
157 	tx_stats_cnt = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
158 		nb_tx_queues;
159 	rx_stats_cnt = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
160 		nb_rx_queues;
161 	priv->stats_report_len = sizeof(struct gve_stats_report) +
162 		sizeof(struct stats) * (tx_stats_cnt + rx_stats_cnt);
163 
164 	snprintf(z_name, sizeof(z_name), "gve_stats_report_%s",
165 			priv->pci_dev->device.name);
166 	priv->stats_report_mem = rte_memzone_reserve_aligned(z_name,
167 			priv->stats_report_len,
168 			rte_socket_id(),
169 			RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
170 
171 	if (!priv->stats_report_mem)
172 		return -ENOMEM;
173 
174 	/* offset by skipping stats written by gve. */
175 	priv->stats_start_idx = (GVE_TX_STATS_REPORT_NUM * nb_tx_queues) +
176 		(GVE_RX_STATS_REPORT_NUM * nb_rx_queues);
177 	priv->stats_end_idx = priv->stats_start_idx +
178 		(NIC_TX_STATS_REPORT_NUM * nb_tx_queues) +
179 		(NIC_RX_STATS_REPORT_NUM * nb_rx_queues) - 1;
180 
181 	return 0;
182 }
183 
184 static void
185 gve_free_stats_report(struct rte_eth_dev *dev)
186 {
187 	struct gve_priv *priv = dev->data->dev_private;
188 	rte_memzone_free(priv->stats_report_mem);
189 	priv->stats_report_mem = NULL;
190 }
191 
192 /* Read Rx NIC stats from shared region */
193 static void
194 gve_get_imissed_from_nic(struct rte_eth_dev *dev)
195 {
196 	struct gve_stats_report *stats_report;
197 	struct gve_rx_queue *rxq;
198 	struct gve_priv *priv;
199 	struct stats stat;
200 	int queue_id;
201 	int stat_id;
202 	int i;
203 
204 	priv = dev->data->dev_private;
205 	if (!priv->stats_report_mem)
206 		return;
207 	stats_report = (struct gve_stats_report *)
208 		priv->stats_report_mem->addr;
209 	for (i = priv->stats_start_idx; i <= priv->stats_end_idx; i++) {
210 		stat = stats_report->stats[i];
211 		queue_id = cpu_to_be32(stat.queue_id);
212 		rxq = dev->data->rx_queues[queue_id];
213 		if (rxq == NULL)
214 			continue;
215 		stat_id = cpu_to_be32(stat.stat_name);
216 		/* Update imissed. */
217 		if (stat_id == RX_NO_BUFFERS_POSTED)
218 			rxq->stats.imissed = cpu_to_be64(stat.value);
219 	}
220 }
221 
222 static int
223 gve_start_queues(struct rte_eth_dev *dev)
224 {
225 	struct gve_priv *priv = dev->data->dev_private;
226 	uint16_t num_queues;
227 	uint16_t i;
228 	int ret;
229 
230 	num_queues = dev->data->nb_tx_queues;
231 	priv->txqs = (struct gve_tx_queue **)dev->data->tx_queues;
232 	ret = gve_adminq_create_tx_queues(priv, num_queues);
233 	if (ret != 0) {
234 		PMD_DRV_LOG(ERR, "Failed to create %u tx queues.", num_queues);
235 		return ret;
236 	}
237 	for (i = 0; i < num_queues; i++)
238 		if (gve_tx_queue_start(dev, i) != 0) {
239 			PMD_DRV_LOG(ERR, "Fail to start Tx queue %d", i);
240 			goto err_tx;
241 		}
242 
243 	num_queues = dev->data->nb_rx_queues;
244 	priv->rxqs = (struct gve_rx_queue **)dev->data->rx_queues;
245 	ret = gve_adminq_create_rx_queues(priv, num_queues);
246 	if (ret != 0) {
247 		PMD_DRV_LOG(ERR, "Failed to create %u rx queues.", num_queues);
248 		goto err_tx;
249 	}
250 	for (i = 0; i < num_queues; i++) {
251 		if (gve_is_gqi(priv))
252 			ret = gve_rx_queue_start(dev, i);
253 		else
254 			ret = gve_rx_queue_start_dqo(dev, i);
255 		if (ret != 0) {
256 			PMD_DRV_LOG(ERR, "Fail to start Rx queue %d", i);
257 			goto err_rx;
258 		}
259 	}
260 
261 	return 0;
262 
263 err_rx:
264 	gve_stop_rx_queues(dev);
265 err_tx:
266 	gve_stop_tx_queues(dev);
267 	return ret;
268 }
269 
270 static int
271 gve_dev_start(struct rte_eth_dev *dev)
272 {
273 	struct gve_priv *priv;
274 	int ret;
275 
276 	ret = gve_start_queues(dev);
277 	if (ret != 0) {
278 		PMD_DRV_LOG(ERR, "Failed to start queues");
279 		return ret;
280 	}
281 
282 	dev->data->dev_started = 1;
283 	gve_link_update(dev, 0);
284 
285 	priv = dev->data->dev_private;
286 	/* No stats available yet for Dqo. */
287 	if (gve_is_gqi(priv)) {
288 		ret = gve_alloc_stats_report(priv,
289 				dev->data->nb_tx_queues,
290 				dev->data->nb_rx_queues);
291 		if (ret != 0) {
292 			PMD_DRV_LOG(ERR,
293 				"Failed to allocate region for stats reporting.");
294 			return ret;
295 		}
296 		ret = gve_adminq_report_stats(priv, priv->stats_report_len,
297 				priv->stats_report_mem->iova,
298 				GVE_STATS_REPORT_TIMER_PERIOD);
299 		if (ret != 0) {
300 			PMD_DRV_LOG(ERR, "gve_adminq_report_stats command failed.");
301 			return ret;
302 		}
303 	}
304 
305 	return 0;
306 }
307 
308 static int
309 gve_dev_stop(struct rte_eth_dev *dev)
310 {
311 	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
312 
313 	gve_stop_tx_queues(dev);
314 	gve_stop_rx_queues(dev);
315 
316 	dev->data->dev_started = 0;
317 
318 	if (gve_is_gqi(dev->data->dev_private))
319 		gve_free_stats_report(dev);
320 
321 	return 0;
322 }
323 
324 static int
325 gve_dev_close(struct rte_eth_dev *dev)
326 {
327 	struct gve_priv *priv = dev->data->dev_private;
328 	int err = 0;
329 	uint16_t i;
330 
331 	if (dev->data->dev_started) {
332 		err = gve_dev_stop(dev);
333 		if (err != 0)
334 			PMD_DRV_LOG(ERR, "Failed to stop dev.");
335 	}
336 
337 	if (gve_is_gqi(priv)) {
338 		for (i = 0; i < dev->data->nb_tx_queues; i++)
339 			gve_tx_queue_release(dev, i);
340 
341 		for (i = 0; i < dev->data->nb_rx_queues; i++)
342 			gve_rx_queue_release(dev, i);
343 	} else {
344 		for (i = 0; i < dev->data->nb_tx_queues; i++)
345 			gve_tx_queue_release_dqo(dev, i);
346 
347 		for (i = 0; i < dev->data->nb_rx_queues; i++)
348 			gve_rx_queue_release_dqo(dev, i);
349 	}
350 
351 	gve_free_qpls(priv);
352 	rte_free(priv->adminq);
353 
354 	dev->data->mac_addrs = NULL;
355 
356 	return err;
357 }
358 
359 static int
360 gve_verify_driver_compatibility(struct gve_priv *priv)
361 {
362 	const struct rte_memzone *driver_info_mem;
363 	struct gve_driver_info *driver_info;
364 	int err;
365 
366 	driver_info_mem = rte_memzone_reserve_aligned("verify_driver_compatibility",
367 			sizeof(struct gve_driver_info),
368 			rte_socket_id(),
369 			RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
370 
371 	if (driver_info_mem == NULL) {
372 		PMD_DRV_LOG(ERR,
373 		    "Could not alloc memzone for driver compatibility");
374 		return -ENOMEM;
375 	}
376 	driver_info = (struct gve_driver_info *)driver_info_mem->addr;
377 
378 	*driver_info = (struct gve_driver_info) {
379 		.os_type = 5, /* DPDK */
380 		.driver_major = GVE_VERSION_MAJOR,
381 		.driver_minor = GVE_VERSION_MINOR,
382 		.driver_sub = GVE_VERSION_SUB,
383 		.os_version_major = cpu_to_be32(DPDK_VERSION_MAJOR),
384 		.os_version_minor = cpu_to_be32(DPDK_VERSION_MINOR),
385 		.os_version_sub = cpu_to_be32(DPDK_VERSION_SUB),
386 		.driver_capability_flags = {
387 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS1),
388 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS2),
389 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS3),
390 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS4),
391 		},
392 	};
393 
394 	populate_driver_version_strings((char *)driver_info->os_version_str1,
395 			(char *)driver_info->os_version_str2);
396 
397 	err = gve_adminq_verify_driver_compatibility(priv,
398 		sizeof(struct gve_driver_info),
399 		(dma_addr_t)driver_info_mem->iova);
400 	/* It's ok if the device doesn't support this */
401 	if (err == -EOPNOTSUPP)
402 		err = 0;
403 
404 	rte_memzone_free(driver_info_mem);
405 	return err;
406 }
407 
408 static int
409 gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
410 {
411 	struct gve_priv *priv = dev->data->dev_private;
412 
413 	dev_info->device = dev->device;
414 	dev_info->max_mac_addrs = 1;
415 	dev_info->max_rx_queues = priv->max_nb_rxq;
416 	dev_info->max_tx_queues = priv->max_nb_txq;
417 	if (gve_is_gqi(priv)) {
418 		dev_info->min_rx_bufsize = GVE_RX_MIN_BUF_SIZE_GQI;
419 		dev_info->max_rx_bufsize = GVE_RX_MAX_BUF_SIZE_GQI;
420 	} else {
421 		dev_info->min_rx_bufsize = GVE_RX_MIN_BUF_SIZE_DQO;
422 		dev_info->max_rx_bufsize = GVE_RX_MAX_BUF_SIZE_DQO;
423 	}
424 
425 	dev_info->max_rx_pktlen = priv->max_mtu + RTE_ETHER_HDR_LEN;
426 	dev_info->max_mtu = priv->max_mtu;
427 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
428 
429 	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_RSS_HASH;
430 	dev_info->tx_offload_capa =
431 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS	|
432 		RTE_ETH_TX_OFFLOAD_UDP_CKSUM	|
433 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM	|
434 		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM	|
435 		RTE_ETH_TX_OFFLOAD_TCP_TSO;
436 
437 	if (!gve_is_gqi(priv)) {
438 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
439 		dev_info->rx_offload_capa |=
440 				RTE_ETH_RX_OFFLOAD_IPV4_CKSUM   |
441 				RTE_ETH_RX_OFFLOAD_UDP_CKSUM	|
442 				RTE_ETH_RX_OFFLOAD_TCP_CKSUM	|
443 				RTE_ETH_RX_OFFLOAD_TCP_LRO;
444 	}
445 
446 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
447 		.rx_free_thresh = GVE_DEFAULT_RX_FREE_THRESH,
448 		.rx_drop_en = 0,
449 		.offloads = 0,
450 	};
451 
452 	dev_info->default_txconf = (struct rte_eth_txconf) {
453 		.tx_free_thresh = GVE_DEFAULT_TX_FREE_THRESH,
454 		.tx_rs_thresh = GVE_DEFAULT_TX_RS_THRESH,
455 		.offloads = 0,
456 	};
457 
458 	dev_info->default_rxportconf.ring_size = priv->rx_desc_cnt;
459 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
460 		.nb_max = gve_is_gqi(priv) ? priv->rx_desc_cnt : GVE_MAX_QUEUE_SIZE_DQO,
461 		.nb_min = priv->rx_desc_cnt,
462 		.nb_align = 1,
463 	};
464 
465 	dev_info->default_txportconf.ring_size = priv->tx_desc_cnt;
466 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
467 		.nb_max = gve_is_gqi(priv) ? priv->tx_desc_cnt : GVE_MAX_QUEUE_SIZE_DQO,
468 		.nb_min = priv->tx_desc_cnt,
469 		.nb_align = 1,
470 	};
471 
472 	dev_info->flow_type_rss_offloads = GVE_RTE_RSS_OFFLOAD_ALL;
473 	dev_info->hash_key_size = GVE_RSS_HASH_KEY_SIZE;
474 	dev_info->reta_size = GVE_RSS_INDIR_SIZE;
475 
476 	return 0;
477 }
478 
479 static int
480 gve_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
481 {
482 	uint16_t i;
483 	if (gve_is_gqi(dev->data->dev_private))
484 		gve_get_imissed_from_nic(dev);
485 
486 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
487 		struct gve_tx_queue *txq = dev->data->tx_queues[i];
488 		if (txq == NULL)
489 			continue;
490 
491 		stats->opackets += txq->stats.packets;
492 		stats->obytes += txq->stats.bytes;
493 		stats->oerrors += txq->stats.errors;
494 	}
495 
496 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
497 		struct gve_rx_queue *rxq = dev->data->rx_queues[i];
498 		if (rxq == NULL)
499 			continue;
500 
501 		stats->ipackets += rxq->stats.packets;
502 		stats->ibytes += rxq->stats.bytes;
503 		stats->ierrors += rxq->stats.errors;
504 		stats->rx_nombuf += rxq->stats.no_mbufs;
505 		stats->imissed += rxq->stats.imissed;
506 	}
507 
508 	return 0;
509 }
510 
511 static int
512 gve_dev_stats_reset(struct rte_eth_dev *dev)
513 {
514 	uint16_t i;
515 
516 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
517 		struct gve_tx_queue *txq = dev->data->tx_queues[i];
518 		if (txq == NULL)
519 			continue;
520 
521 		memset(&txq->stats, 0, sizeof(txq->stats));
522 	}
523 
524 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
525 		struct gve_rx_queue *rxq = dev->data->rx_queues[i];
526 		if (rxq == NULL)
527 			continue;
528 
529 		memset(&rxq->stats, 0, sizeof(rxq->stats));
530 	}
531 
532 	return 0;
533 }
534 
535 static int
536 gve_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
537 {
538 	struct gve_priv *priv = dev->data->dev_private;
539 	int err;
540 
541 	if (mtu < RTE_ETHER_MIN_MTU || mtu > priv->max_mtu) {
542 		PMD_DRV_LOG(ERR, "MIN MTU is %u, MAX MTU is %u",
543 			    RTE_ETHER_MIN_MTU, priv->max_mtu);
544 		return -EINVAL;
545 	}
546 
547 	/* mtu setting is forbidden if port is start */
548 	if (dev->data->dev_started) {
549 		PMD_DRV_LOG(ERR, "Port must be stopped before configuration");
550 		return -EBUSY;
551 	}
552 
553 	err = gve_adminq_set_mtu(priv, mtu);
554 	if (err) {
555 		PMD_DRV_LOG(ERR, "Failed to set mtu as %u err = %d", mtu, err);
556 		return err;
557 	}
558 
559 	return 0;
560 }
561 
562 #define TX_QUEUE_STATS_OFFSET(x) offsetof(struct gve_tx_stats, x)
563 #define RX_QUEUE_STATS_OFFSET(x) offsetof(struct gve_rx_stats, x)
564 
565 static const struct gve_xstats_name_offset tx_xstats_name_offset[] = {
566 	{ "packets", TX_QUEUE_STATS_OFFSET(packets) },
567 	{ "bytes",   TX_QUEUE_STATS_OFFSET(bytes) },
568 	{ "errors",  TX_QUEUE_STATS_OFFSET(errors) },
569 };
570 
571 static const struct gve_xstats_name_offset rx_xstats_name_offset[] = {
572 	{ "packets",                RX_QUEUE_STATS_OFFSET(packets) },
573 	{ "bytes",                  RX_QUEUE_STATS_OFFSET(bytes) },
574 	{ "errors",                 RX_QUEUE_STATS_OFFSET(errors) },
575 	{ "mbuf_alloc_errors",      RX_QUEUE_STATS_OFFSET(no_mbufs) },
576 	{ "mbuf_alloc_errors_bulk", RX_QUEUE_STATS_OFFSET(no_mbufs_bulk) },
577 	{ "imissed",                RX_QUEUE_STATS_OFFSET(imissed) },
578 };
579 
580 static int
581 gve_xstats_count(struct rte_eth_dev *dev)
582 {
583 	uint16_t i, count = 0;
584 
585 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
586 		if (dev->data->tx_queues[i])
587 			count += RTE_DIM(tx_xstats_name_offset);
588 	}
589 
590 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
591 		if (dev->data->rx_queues[i])
592 			count += RTE_DIM(rx_xstats_name_offset);
593 	}
594 
595 	return count;
596 }
597 
598 static int
599 gve_xstats_get(struct rte_eth_dev *dev,
600 			struct rte_eth_xstat *xstats,
601 			unsigned int size)
602 {
603 	uint16_t i, j, count = gve_xstats_count(dev);
604 	const char *stats;
605 
606 	if (gve_is_gqi(dev->data->dev_private))
607 		gve_get_imissed_from_nic(dev);
608 
609 	if (xstats == NULL || size < count)
610 		return count;
611 
612 	count = 0;
613 
614 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
615 		const struct gve_tx_queue *txq = dev->data->tx_queues[i];
616 		if (txq == NULL)
617 			continue;
618 
619 		stats = (const char *)&txq->stats;
620 		for (j = 0; j < RTE_DIM(tx_xstats_name_offset); j++, count++) {
621 			xstats[count].id = count;
622 			xstats[count].value = *(const uint64_t *)
623 				(stats + tx_xstats_name_offset[j].offset);
624 		}
625 	}
626 
627 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
628 		const struct gve_rx_queue *rxq = dev->data->rx_queues[i];
629 		if (rxq == NULL)
630 			continue;
631 
632 		stats = (const char *)&rxq->stats;
633 		for (j = 0; j < RTE_DIM(rx_xstats_name_offset); j++, count++) {
634 			xstats[count].id = count;
635 			xstats[count].value = *(const uint64_t *)
636 				(stats + rx_xstats_name_offset[j].offset);
637 		}
638 	}
639 
640 	return count;
641 }
642 
643 static int
644 gve_xstats_get_names(struct rte_eth_dev *dev,
645 			struct rte_eth_xstat_name *xstats_names,
646 			unsigned int size)
647 {
648 	uint16_t i, j, count = gve_xstats_count(dev);
649 
650 	if (xstats_names == NULL || size < count)
651 		return count;
652 
653 	count = 0;
654 
655 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
656 		if (dev->data->tx_queues[i] == NULL)
657 			continue;
658 
659 		for (j = 0; j < RTE_DIM(tx_xstats_name_offset); j++)
660 			snprintf(xstats_names[count++].name,
661 				 RTE_ETH_XSTATS_NAME_SIZE,
662 				 "tx_q%u_%s", i, tx_xstats_name_offset[j].name);
663 	}
664 
665 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
666 		if (dev->data->rx_queues[i] == NULL)
667 			continue;
668 
669 		for (j = 0; j < RTE_DIM(rx_xstats_name_offset); j++)
670 			snprintf(xstats_names[count++].name,
671 				 RTE_ETH_XSTATS_NAME_SIZE,
672 				 "rx_q%u_%s", i, rx_xstats_name_offset[j].name);
673 	}
674 
675 	return count;
676 }
677 
678 
679 static int
680 gve_rss_hash_update(struct rte_eth_dev *dev,
681 			struct rte_eth_rss_conf *rss_conf)
682 {
683 	struct gve_priv *priv = dev->data->dev_private;
684 	struct gve_rss_config gve_rss_conf;
685 	int rss_reta_size;
686 	int err;
687 
688 	if (gve_validate_rss_hf(rss_conf->rss_hf)) {
689 		PMD_DRV_LOG(ERR, "Unsupported hash function.");
690 		return -EINVAL;
691 	}
692 
693 	if (rss_conf->algorithm != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
694 		rss_conf->algorithm != RTE_ETH_HASH_FUNCTION_DEFAULT) {
695 		PMD_DRV_LOG(ERR, "Device only supports Toeplitz algorithm.");
696 		return -EINVAL;
697 	}
698 
699 	if (rss_conf->rss_key_len) {
700 		if (rss_conf->rss_key_len != GVE_RSS_HASH_KEY_SIZE) {
701 			PMD_DRV_LOG(ERR,
702 				"Invalid hash key size. Only RSS hash key size "
703 				"of %u supported", GVE_RSS_HASH_KEY_SIZE);
704 			return -EINVAL;
705 		}
706 
707 		if (!rss_conf->rss_key) {
708 			PMD_DRV_LOG(ERR, "RSS key must be non-null.");
709 			return -EINVAL;
710 		}
711 	} else {
712 		if (!priv->rss_config.key_size) {
713 			PMD_DRV_LOG(ERR, "RSS key must be initialized before "
714 				"any other configuration.");
715 			return -EINVAL;
716 		}
717 		rss_conf->rss_key_len = priv->rss_config.key_size;
718 	}
719 
720 	rss_reta_size = priv->rss_config.indir ?
721 			priv->rss_config.indir_size :
722 			GVE_RSS_INDIR_SIZE;
723 	err = gve_init_rss_config(&gve_rss_conf, rss_conf->rss_key_len,
724 		rss_reta_size);
725 	if (err)
726 		return err;
727 
728 	gve_rss_conf.alg = GVE_RSS_HASH_TOEPLITZ;
729 	err = gve_update_rss_hash_types(priv, &gve_rss_conf, rss_conf);
730 	if (err)
731 		goto err;
732 	err = gve_update_rss_key(priv, &gve_rss_conf, rss_conf);
733 	if (err)
734 		goto err;
735 
736 	/* Set redirection table to default or preexisting. */
737 	if (!priv->rss_config.indir)
738 		gve_generate_rss_reta(dev, &gve_rss_conf);
739 	else
740 		memcpy(gve_rss_conf.indir, priv->rss_config.indir,
741 			gve_rss_conf.indir_size * sizeof(*priv->rss_config.indir));
742 
743 	err = gve_adminq_configure_rss(priv, &gve_rss_conf);
744 	if (!err)
745 		gve_update_priv_rss_config(priv, &gve_rss_conf);
746 
747 err:
748 	gve_free_rss_config(&gve_rss_conf);
749 	return err;
750 }
751 
752 static int
753 gve_rss_hash_conf_get(struct rte_eth_dev *dev,
754 			struct rte_eth_rss_conf *rss_conf)
755 {
756 	struct gve_priv *priv = dev->data->dev_private;
757 
758 	if (!(dev->data->dev_conf.rxmode.offloads &
759 			RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
760 		PMD_DRV_LOG(ERR, "RSS not configured.");
761 		return -ENOTSUP;
762 	}
763 
764 
765 	gve_to_rte_rss_hf(priv->rss_config.hash_types, rss_conf);
766 	rss_conf->rss_key_len = priv->rss_config.key_size;
767 	if (rss_conf->rss_key) {
768 		if (!priv->rss_config.key) {
769 			PMD_DRV_LOG(ERR, "Unable to retrieve default RSS hash key.");
770 			return -ENOTSUP;
771 		}
772 		memcpy(rss_conf->rss_key, priv->rss_config.key,
773 			rss_conf->rss_key_len * sizeof(*rss_conf->rss_key));
774 	}
775 
776 	return 0;
777 }
778 
779 static int
780 gve_rss_reta_update(struct rte_eth_dev *dev,
781 	struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
782 {
783 	struct gve_priv *priv = dev->data->dev_private;
784 	struct gve_rss_config gve_rss_conf;
785 	int table_id;
786 	int err;
787 	int i;
788 
789 	/* RSS key must be set before the redirection table can be set. */
790 	if (!priv->rss_config.key || priv->rss_config.key_size == 0) {
791 		PMD_DRV_LOG(ERR, "RSS hash key msut be set before the "
792 			"redirection table can be updated.");
793 		return -ENOTSUP;
794 	}
795 
796 	if (reta_size != GVE_RSS_INDIR_SIZE) {
797 		PMD_DRV_LOG(ERR, "Redirection table must have %hu elements",
798 			(uint16_t)GVE_RSS_INDIR_SIZE);
799 		return -EINVAL;
800 	}
801 
802 	err = gve_init_rss_config_from_priv(priv, &gve_rss_conf);
803 	if (err) {
804 		PMD_DRV_LOG(ERR, "Error allocating new RSS config.");
805 		return err;
806 	}
807 
808 	table_id = 0;
809 	for (i = 0; i < priv->rss_config.indir_size; i++) {
810 		int table_entry = i % RTE_ETH_RETA_GROUP_SIZE;
811 		if (reta_conf[table_id].mask & (1ULL << table_entry))
812 			gve_rss_conf.indir[i] =
813 				reta_conf[table_id].reta[table_entry];
814 
815 		if (table_entry == RTE_ETH_RETA_GROUP_SIZE - 1)
816 			table_id++;
817 	}
818 
819 	err = gve_adminq_configure_rss(priv, &gve_rss_conf);
820 	if (err)
821 		PMD_DRV_LOG(ERR, "Problem configuring RSS with device.");
822 	else
823 		gve_update_priv_rss_config(priv, &gve_rss_conf);
824 
825 	gve_free_rss_config(&gve_rss_conf);
826 	return err;
827 }
828 
829 static int
830 gve_rss_reta_query(struct rte_eth_dev *dev,
831 	struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
832 {
833 	struct gve_priv *priv = dev->data->dev_private;
834 	int table_id;
835 	int i;
836 
837 	if (!(dev->data->dev_conf.rxmode.offloads &
838 		RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
839 		PMD_DRV_LOG(ERR, "RSS not configured.");
840 		return -ENOTSUP;
841 	}
842 
843 	/* RSS key must be set before the redirection table can be queried. */
844 	if (!priv->rss_config.key) {
845 		PMD_DRV_LOG(ERR, "RSS hash key must be set before the "
846 			"redirection table can be initialized.");
847 		return -ENOTSUP;
848 	}
849 
850 	if (reta_size != priv->rss_config.indir_size) {
851 		PMD_DRV_LOG(ERR, "RSS redirection table must have %d entries.",
852 			priv->rss_config.indir_size);
853 		return -EINVAL;
854 	}
855 
856 	table_id = 0;
857 	for (i = 0; i < priv->rss_config.indir_size; i++) {
858 		int table_entry = i % RTE_ETH_RETA_GROUP_SIZE;
859 		if (reta_conf[table_id].mask & (1ULL << table_entry))
860 			reta_conf[table_id].reta[table_entry] =
861 				priv->rss_config.indir[i];
862 
863 		if (table_entry == RTE_ETH_RETA_GROUP_SIZE - 1)
864 			table_id++;
865 	}
866 
867 	return 0;
868 }
869 
870 static const struct eth_dev_ops gve_eth_dev_ops = {
871 	.dev_configure        = gve_dev_configure,
872 	.dev_start            = gve_dev_start,
873 	.dev_stop             = gve_dev_stop,
874 	.dev_close            = gve_dev_close,
875 	.dev_infos_get        = gve_dev_info_get,
876 	.rx_queue_setup       = gve_rx_queue_setup,
877 	.tx_queue_setup       = gve_tx_queue_setup,
878 	.rx_queue_release     = gve_rx_queue_release,
879 	.tx_queue_release     = gve_tx_queue_release,
880 	.rx_queue_start       = gve_rx_queue_start,
881 	.tx_queue_start       = gve_tx_queue_start,
882 	.rx_queue_stop        = gve_rx_queue_stop,
883 	.tx_queue_stop        = gve_tx_queue_stop,
884 	.link_update          = gve_link_update,
885 	.stats_get            = gve_dev_stats_get,
886 	.stats_reset          = gve_dev_stats_reset,
887 	.mtu_set              = gve_dev_mtu_set,
888 	.xstats_get           = gve_xstats_get,
889 	.xstats_get_names     = gve_xstats_get_names,
890 	.rss_hash_update      = gve_rss_hash_update,
891 	.rss_hash_conf_get    = gve_rss_hash_conf_get,
892 	.reta_update          = gve_rss_reta_update,
893 	.reta_query           = gve_rss_reta_query,
894 };
895 
896 static const struct eth_dev_ops gve_eth_dev_ops_dqo = {
897 	.dev_configure        = gve_dev_configure,
898 	.dev_start            = gve_dev_start,
899 	.dev_stop             = gve_dev_stop,
900 	.dev_close            = gve_dev_close,
901 	.dev_infos_get        = gve_dev_info_get,
902 	.rx_queue_setup       = gve_rx_queue_setup_dqo,
903 	.tx_queue_setup       = gve_tx_queue_setup_dqo,
904 	.rx_queue_release     = gve_rx_queue_release_dqo,
905 	.tx_queue_release     = gve_tx_queue_release_dqo,
906 	.rx_queue_start       = gve_rx_queue_start_dqo,
907 	.tx_queue_start       = gve_tx_queue_start_dqo,
908 	.rx_queue_stop        = gve_rx_queue_stop_dqo,
909 	.tx_queue_stop        = gve_tx_queue_stop_dqo,
910 	.link_update          = gve_link_update,
911 	.stats_get            = gve_dev_stats_get,
912 	.stats_reset          = gve_dev_stats_reset,
913 	.mtu_set              = gve_dev_mtu_set,
914 	.xstats_get           = gve_xstats_get,
915 	.xstats_get_names     = gve_xstats_get_names,
916 	.rss_hash_update      = gve_rss_hash_update,
917 	.rss_hash_conf_get    = gve_rss_hash_conf_get,
918 	.reta_update          = gve_rss_reta_update,
919 	.reta_query           = gve_rss_reta_query,
920 };
921 
922 static void
923 gve_free_counter_array(struct gve_priv *priv)
924 {
925 	rte_memzone_free(priv->cnt_array_mz);
926 	priv->cnt_array = NULL;
927 }
928 
929 static void
930 gve_free_irq_db(struct gve_priv *priv)
931 {
932 	rte_memzone_free(priv->irq_dbs_mz);
933 	priv->irq_dbs = NULL;
934 }
935 
936 static void
937 gve_teardown_device_resources(struct gve_priv *priv)
938 {
939 	int err;
940 
941 	/* Tell device its resources are being freed */
942 	if (gve_get_device_resources_ok(priv)) {
943 		err = gve_adminq_deconfigure_device_resources(priv);
944 		if (err)
945 			PMD_DRV_LOG(ERR, "Could not deconfigure device resources: err=%d", err);
946 	}
947 
948 	if (!gve_is_gqi(priv)) {
949 		rte_free(priv->ptype_lut_dqo);
950 		priv->ptype_lut_dqo = NULL;
951 	}
952 	gve_free_counter_array(priv);
953 	gve_free_irq_db(priv);
954 	gve_clear_device_resources_ok(priv);
955 }
956 
957 static int
958 pci_dev_msix_vec_count(struct rte_pci_device *pdev)
959 {
960 	off_t msix_pos = rte_pci_find_capability(pdev, RTE_PCI_CAP_ID_MSIX);
961 	uint16_t control;
962 
963 	if (msix_pos > 0 && rte_pci_read_config(pdev, &control, sizeof(control),
964 			msix_pos + RTE_PCI_MSIX_FLAGS) == sizeof(control))
965 		return (control & RTE_PCI_MSIX_FLAGS_QSIZE) + 1;
966 
967 	return 0;
968 }
969 
970 static int
971 gve_setup_device_resources(struct gve_priv *priv)
972 {
973 	char z_name[RTE_MEMZONE_NAMESIZE];
974 	const struct rte_memzone *mz;
975 	int err = 0;
976 
977 	snprintf(z_name, sizeof(z_name), "gve_%s_cnt_arr", priv->pci_dev->device.name);
978 	mz = rte_memzone_reserve_aligned(z_name,
979 					 priv->num_event_counters * sizeof(*priv->cnt_array),
980 					 rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
981 					 PAGE_SIZE);
982 	if (mz == NULL) {
983 		PMD_DRV_LOG(ERR, "Could not alloc memzone for count array");
984 		return -ENOMEM;
985 	}
986 	priv->cnt_array = (rte_be32_t *)mz->addr;
987 	priv->cnt_array_mz = mz;
988 
989 	snprintf(z_name, sizeof(z_name), "gve_%s_irqmz", priv->pci_dev->device.name);
990 	mz = rte_memzone_reserve_aligned(z_name,
991 					 sizeof(*priv->irq_dbs) * (priv->num_ntfy_blks),
992 					 rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
993 					 PAGE_SIZE);
994 	if (mz == NULL) {
995 		PMD_DRV_LOG(ERR, "Could not alloc memzone for irq_dbs");
996 		err = -ENOMEM;
997 		goto free_cnt_array;
998 	}
999 	priv->irq_dbs = (struct gve_irq_db *)mz->addr;
1000 	priv->irq_dbs_mz = mz;
1001 
1002 	err = gve_adminq_configure_device_resources(priv,
1003 						    priv->cnt_array_mz->iova,
1004 						    priv->num_event_counters,
1005 						    priv->irq_dbs_mz->iova,
1006 						    priv->num_ntfy_blks);
1007 	if (unlikely(err)) {
1008 		PMD_DRV_LOG(ERR, "Could not config device resources: err=%d", err);
1009 		goto free_irq_dbs;
1010 	}
1011 	if (!gve_is_gqi(priv)) {
1012 		priv->ptype_lut_dqo = rte_zmalloc("gve_ptype_lut_dqo",
1013 			sizeof(struct gve_ptype_lut), 0);
1014 		if (priv->ptype_lut_dqo == NULL) {
1015 			PMD_DRV_LOG(ERR, "Failed to alloc ptype lut.");
1016 			err = -ENOMEM;
1017 			goto free_irq_dbs;
1018 		}
1019 		err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo);
1020 		if (unlikely(err)) {
1021 			PMD_DRV_LOG(ERR, "Failed to get ptype map: err=%d", err);
1022 			goto free_ptype_lut;
1023 		}
1024 	}
1025 
1026 	return 0;
1027 free_ptype_lut:
1028 	rte_free(priv->ptype_lut_dqo);
1029 	priv->ptype_lut_dqo = NULL;
1030 free_irq_dbs:
1031 	gve_free_irq_db(priv);
1032 free_cnt_array:
1033 	gve_free_counter_array(priv);
1034 
1035 	return err;
1036 }
1037 
1038 static int
1039 gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
1040 {
1041 	uint16_t pages;
1042 	int num_ntfy;
1043 	uint32_t i;
1044 	int err;
1045 
1046 	/* Set up the adminq */
1047 	err = gve_adminq_alloc(priv);
1048 	if (err) {
1049 		PMD_DRV_LOG(ERR, "Failed to alloc admin queue: err=%d", err);
1050 		return err;
1051 	}
1052 	err = gve_verify_driver_compatibility(priv);
1053 	if (err) {
1054 		PMD_DRV_LOG(ERR, "Could not verify driver compatibility: err=%d", err);
1055 		goto free_adminq;
1056 	}
1057 
1058 	if (skip_describe_device)
1059 		goto setup_device;
1060 
1061 	/* Get the initial information we need from the device */
1062 	err = gve_adminq_describe_device(priv);
1063 	if (err) {
1064 		PMD_DRV_LOG(ERR, "Could not get device information: err=%d", err);
1065 		goto free_adminq;
1066 	}
1067 
1068 	num_ntfy = pci_dev_msix_vec_count(priv->pci_dev);
1069 	if (num_ntfy <= 0) {
1070 		PMD_DRV_LOG(ERR, "Could not count MSI-x vectors");
1071 		err = -EIO;
1072 		goto free_adminq;
1073 	} else if (num_ntfy < GVE_MIN_MSIX) {
1074 		PMD_DRV_LOG(ERR, "GVE needs at least %d MSI-x vectors, but only has %d",
1075 			    GVE_MIN_MSIX, num_ntfy);
1076 		err = -EINVAL;
1077 		goto free_adminq;
1078 	}
1079 
1080 	priv->num_registered_pages = 0;
1081 
1082 	/* gvnic has one Notification Block per MSI-x vector, except for the
1083 	 * management vector
1084 	 */
1085 	priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
1086 	priv->mgmt_msix_idx = priv->num_ntfy_blks;
1087 
1088 	priv->max_nb_txq = RTE_MIN(priv->max_nb_txq, priv->num_ntfy_blks / 2);
1089 	priv->max_nb_rxq = RTE_MIN(priv->max_nb_rxq, priv->num_ntfy_blks / 2);
1090 
1091 	if (priv->default_num_queues > 0) {
1092 		priv->max_nb_txq = RTE_MIN(priv->default_num_queues, priv->max_nb_txq);
1093 		priv->max_nb_rxq = RTE_MIN(priv->default_num_queues, priv->max_nb_rxq);
1094 	}
1095 
1096 	PMD_DRV_LOG(INFO, "Max TX queues %d, Max RX queues %d",
1097 		    priv->max_nb_txq, priv->max_nb_rxq);
1098 
1099 	/* In GQI_QPL queue format:
1100 	 * Allocate queue page lists according to max queue number
1101 	 * tx qpl id should start from 0 while rx qpl id should start
1102 	 * from priv->max_nb_txq
1103 	 */
1104 	if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
1105 		priv->qpl = rte_zmalloc("gve_qpl",
1106 					(priv->max_nb_txq + priv->max_nb_rxq) *
1107 					sizeof(struct gve_queue_page_list), 0);
1108 		if (priv->qpl == NULL) {
1109 			PMD_DRV_LOG(ERR, "Failed to alloc qpl.");
1110 			err = -ENOMEM;
1111 			goto free_adminq;
1112 		}
1113 
1114 		for (i = 0; i < priv->max_nb_txq + priv->max_nb_rxq; i++) {
1115 			if (i < priv->max_nb_txq)
1116 				pages = priv->tx_pages_per_qpl;
1117 			else
1118 				pages = priv->rx_data_slot_cnt;
1119 			err = gve_alloc_queue_page_list(priv, i, pages);
1120 			if (err != 0) {
1121 				PMD_DRV_LOG(ERR, "Failed to alloc qpl %u.", i);
1122 				goto err_qpl;
1123 			}
1124 		}
1125 	}
1126 
1127 setup_device:
1128 	err = gve_setup_device_resources(priv);
1129 	if (!err)
1130 		return 0;
1131 err_qpl:
1132 	gve_free_qpls(priv);
1133 free_adminq:
1134 	gve_adminq_free(priv);
1135 	return err;
1136 }
1137 
1138 static void
1139 gve_teardown_priv_resources(struct gve_priv *priv)
1140 {
1141 	gve_teardown_device_resources(priv);
1142 	gve_adminq_free(priv);
1143 }
1144 
1145 static int
1146 gve_dev_init(struct rte_eth_dev *eth_dev)
1147 {
1148 	struct gve_priv *priv = eth_dev->data->dev_private;
1149 	int max_tx_queues, max_rx_queues;
1150 	struct rte_pci_device *pci_dev;
1151 	struct gve_registers *reg_bar;
1152 	rte_be32_t *db_bar;
1153 	int err;
1154 
1155 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1156 		return 0;
1157 
1158 	pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1159 
1160 	reg_bar = pci_dev->mem_resource[GVE_REG_BAR].addr;
1161 	if (!reg_bar) {
1162 		PMD_DRV_LOG(ERR, "Failed to map pci bar!");
1163 		return -ENOMEM;
1164 	}
1165 
1166 	db_bar = pci_dev->mem_resource[GVE_DB_BAR].addr;
1167 	if (!db_bar) {
1168 		PMD_DRV_LOG(ERR, "Failed to map doorbell bar!");
1169 		return -ENOMEM;
1170 	}
1171 
1172 	gve_write_version(&reg_bar->driver_version);
1173 	/* Get max queues to alloc etherdev */
1174 	max_tx_queues = ioread32be(&reg_bar->max_tx_queues);
1175 	max_rx_queues = ioread32be(&reg_bar->max_rx_queues);
1176 
1177 	priv->reg_bar0 = reg_bar;
1178 	priv->db_bar2 = db_bar;
1179 	priv->pci_dev = pci_dev;
1180 	priv->state_flags = 0x0;
1181 
1182 	priv->max_nb_txq = max_tx_queues;
1183 	priv->max_nb_rxq = max_rx_queues;
1184 
1185 	err = gve_init_priv(priv, false);
1186 	if (err)
1187 		return err;
1188 
1189 	if (gve_is_gqi(priv)) {
1190 		eth_dev->dev_ops = &gve_eth_dev_ops;
1191 		gve_set_rx_function(eth_dev);
1192 		gve_set_tx_function(eth_dev);
1193 	} else {
1194 		eth_dev->dev_ops = &gve_eth_dev_ops_dqo;
1195 		gve_set_rx_function_dqo(eth_dev);
1196 		gve_set_tx_function_dqo(eth_dev);
1197 	}
1198 
1199 	eth_dev->data->mac_addrs = &priv->dev_addr;
1200 
1201 	return 0;
1202 }
1203 
1204 static int
1205 gve_dev_uninit(struct rte_eth_dev *eth_dev)
1206 {
1207 	struct gve_priv *priv = eth_dev->data->dev_private;
1208 
1209 	gve_teardown_priv_resources(priv);
1210 
1211 	eth_dev->data->mac_addrs = NULL;
1212 
1213 	return 0;
1214 }
1215 
1216 static int
1217 gve_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
1218 	      struct rte_pci_device *pci_dev)
1219 {
1220 	return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct gve_priv), gve_dev_init);
1221 }
1222 
1223 static int
1224 gve_pci_remove(struct rte_pci_device *pci_dev)
1225 {
1226 	return rte_eth_dev_pci_generic_remove(pci_dev, gve_dev_uninit);
1227 }
1228 
1229 static const struct rte_pci_id pci_id_gve_map[] = {
1230 	{ RTE_PCI_DEVICE(GOOGLE_VENDOR_ID, GVE_DEV_ID) },
1231 	{ .device_id = 0 },
1232 };
1233 
1234 static struct rte_pci_driver rte_gve_pmd = {
1235 	.id_table = pci_id_gve_map,
1236 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1237 	.probe = gve_pci_probe,
1238 	.remove = gve_pci_remove,
1239 };
1240 
1241 RTE_PMD_REGISTER_PCI(net_gve, rte_gve_pmd);
1242 RTE_PMD_REGISTER_PCI_TABLE(net_gve, pci_id_gve_map);
1243 RTE_PMD_REGISTER_KMOD_DEP(net_gve, "* igb_uio | vfio-pci");
1244 RTE_LOG_REGISTER_SUFFIX(gve_logtype_driver, driver, NOTICE);
1245