xref: /dpdk/drivers/net/gve/gve_ethdev.c (revision d67bce4b4197f7f7cddb182c7c08bd00ec06adbb)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2022-2023 Intel Corporation
3  * Copyright(C) 2023 Google LLC
4  */
5 
6 #include "gve_ethdev.h"
7 #include "base/gve_adminq.h"
8 #include "base/gve_register.h"
9 #include "base/gve_osdep.h"
10 #include "gve_version.h"
11 #include "rte_ether.h"
12 #include "gve_rss.h"
13 
14 static void
15 gve_write_version(uint8_t *driver_version_register)
16 {
17 	const char *c = gve_version_string();
18 	while (*c) {
19 		writeb(*c, driver_version_register);
20 		c++;
21 	}
22 	writeb('\n', driver_version_register);
23 }
24 
25 static struct gve_queue_page_list *
26 gve_alloc_queue_page_list(const char *name, uint32_t num_pages)
27 {
28 	struct gve_queue_page_list *qpl;
29 	const struct rte_memzone *mz;
30 	dma_addr_t page_bus;
31 	uint32_t i;
32 
33 	qpl = rte_zmalloc("qpl struct",	sizeof(struct gve_queue_page_list), 0);
34 	if (!qpl)
35 		return NULL;
36 
37 	mz = rte_memzone_reserve_aligned(name, num_pages * PAGE_SIZE,
38 					 rte_socket_id(),
39 					 RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
40 	if (mz == NULL) {
41 		PMD_DRV_LOG(ERR, "Failed to alloc %s.", name);
42 		goto free_qpl_struct;
43 	}
44 	qpl->page_buses = rte_zmalloc("qpl page buses",
45 		num_pages * sizeof(dma_addr_t), 0);
46 	if (qpl->page_buses == NULL) {
47 		PMD_DRV_LOG(ERR, "Failed to alloc qpl page buses");
48 		goto free_qpl_memzone;
49 	}
50 	page_bus = mz->iova;
51 	for (i = 0; i < num_pages; i++) {
52 		qpl->page_buses[i] = page_bus;
53 		page_bus += PAGE_SIZE;
54 	}
55 	qpl->mz = mz;
56 	qpl->num_entries = num_pages;
57 	return qpl;
58 
59 free_qpl_memzone:
60 	rte_memzone_free(qpl->mz);
61 free_qpl_struct:
62 	rte_free(qpl);
63 	return NULL;
64 }
65 
66 static void
67 gve_free_queue_page_list(struct gve_queue_page_list *qpl)
68 {
69 	if (qpl->mz) {
70 		rte_memzone_free(qpl->mz);
71 		qpl->mz = NULL;
72 	}
73 	if (qpl->page_buses) {
74 		rte_free(qpl->page_buses);
75 		qpl->page_buses = NULL;
76 	}
77 	rte_free(qpl);
78 }
79 
80 struct gve_queue_page_list *
81 gve_setup_queue_page_list(struct gve_priv *priv, uint16_t queue_id, bool is_rx,
82 	uint32_t num_pages)
83 {
84 	const char *queue_type_string = is_rx ? "rx" : "tx";
85 	char qpl_name[RTE_MEMZONE_NAMESIZE];
86 	struct gve_queue_page_list *qpl;
87 	int err;
88 
89 	/* Allocate a new QPL. */
90 	snprintf(qpl_name, sizeof(qpl_name), "gve_%s_%s_qpl%d",
91 		priv->pci_dev->device.name, queue_type_string, queue_id);
92 	qpl = gve_alloc_queue_page_list(qpl_name, num_pages);
93 	if (!qpl) {
94 		PMD_DRV_LOG(ERR,
95 			    "Failed to alloc %s qpl for queue %hu.",
96 			    queue_type_string, queue_id);
97 		return NULL;
98 	}
99 
100 	/* Assign the QPL an ID. */
101 	qpl->id = queue_id;
102 	if (is_rx)
103 		qpl->id += priv->max_nb_txq;
104 
105 	/* Validate page registration limit and register QPLs. */
106 	if (priv->num_registered_pages + qpl->num_entries >
107 	    priv->max_registered_pages) {
108 		PMD_DRV_LOG(ERR, "Pages %" PRIu64 " > max registered pages %" PRIu64,
109 			    priv->num_registered_pages + qpl->num_entries,
110 			    priv->max_registered_pages);
111 		goto cleanup_qpl;
112 	}
113 	err = gve_adminq_register_page_list(priv, qpl);
114 	if (err) {
115 		PMD_DRV_LOG(ERR,
116 			    "Failed to register %s qpl for queue %hu.",
117 			    queue_type_string, queue_id);
118 		goto cleanup_qpl;
119 	}
120 	priv->num_registered_pages += qpl->num_entries;
121 	return qpl;
122 
123 cleanup_qpl:
124 	gve_free_queue_page_list(qpl);
125 	return NULL;
126 }
127 
128 int
129 gve_teardown_queue_page_list(struct gve_priv *priv,
130 	struct gve_queue_page_list *qpl)
131 {
132 	int err = gve_adminq_unregister_page_list(priv, qpl->id);
133 	if (err) {
134 		PMD_DRV_LOG(CRIT, "Unable to unregister qpl %d!", qpl->id);
135 		return err;
136 	}
137 	priv->num_registered_pages -= qpl->num_entries;
138 	gve_free_queue_page_list(qpl);
139 	return 0;
140 }
141 
142 static int
143 gve_dev_configure(struct rte_eth_dev *dev)
144 {
145 	struct gve_priv *priv = dev->data->dev_private;
146 
147 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
148 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
149 		priv->rss_config.alg = GVE_RSS_HASH_TOEPLITZ;
150 	}
151 
152 	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
153 		priv->enable_rsc = 1;
154 
155 	/* Reset RSS RETA in case number of queues changed. */
156 	if (priv->rss_config.indir) {
157 		struct gve_rss_config update_reta_config;
158 		gve_init_rss_config_from_priv(priv, &update_reta_config);
159 		gve_generate_rss_reta(dev, &update_reta_config);
160 
161 		int err = gve_adminq_configure_rss(priv, &update_reta_config);
162 		if (err)
163 			PMD_DRV_LOG(ERR,
164 				"Could not reconfigure RSS redirection table.");
165 		else
166 			gve_update_priv_rss_config(priv, &update_reta_config);
167 
168 		gve_free_rss_config(&update_reta_config);
169 		return err;
170 	}
171 
172 	return 0;
173 }
174 
175 static int
176 gve_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
177 {
178 	struct gve_priv *priv = dev->data->dev_private;
179 	struct rte_eth_link link;
180 	int err;
181 
182 	memset(&link, 0, sizeof(link));
183 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
184 	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
185 
186 	if (!dev->data->dev_started) {
187 		link.link_status = RTE_ETH_LINK_DOWN;
188 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
189 	} else {
190 		link.link_status = RTE_ETH_LINK_UP;
191 		PMD_DRV_LOG(DEBUG, "Get link status from hw");
192 		err = gve_adminq_report_link_speed(priv);
193 		if (err) {
194 			PMD_DRV_LOG(ERR, "Failed to get link speed.");
195 			priv->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
196 		}
197 		link.link_speed = priv->link_speed;
198 	}
199 
200 	return rte_eth_linkstatus_set(dev, &link);
201 }
202 
203 static int
204 gve_alloc_stats_report(struct gve_priv *priv,
205 		uint16_t nb_tx_queues, uint16_t nb_rx_queues)
206 {
207 	char z_name[RTE_MEMZONE_NAMESIZE];
208 	int tx_stats_cnt;
209 	int rx_stats_cnt;
210 
211 	tx_stats_cnt = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
212 		nb_tx_queues;
213 	rx_stats_cnt = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
214 		nb_rx_queues;
215 	priv->stats_report_len = sizeof(struct gve_stats_report) +
216 		sizeof(struct stats) * (tx_stats_cnt + rx_stats_cnt);
217 
218 	snprintf(z_name, sizeof(z_name), "gve_stats_report_%s",
219 			priv->pci_dev->device.name);
220 	priv->stats_report_mem = rte_memzone_reserve_aligned(z_name,
221 			priv->stats_report_len,
222 			rte_socket_id(),
223 			RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
224 
225 	if (!priv->stats_report_mem)
226 		return -ENOMEM;
227 
228 	/* offset by skipping stats written by gve. */
229 	priv->stats_start_idx = (GVE_TX_STATS_REPORT_NUM * nb_tx_queues) +
230 		(GVE_RX_STATS_REPORT_NUM * nb_rx_queues);
231 	priv->stats_end_idx = priv->stats_start_idx +
232 		(NIC_TX_STATS_REPORT_NUM * nb_tx_queues) +
233 		(NIC_RX_STATS_REPORT_NUM * nb_rx_queues) - 1;
234 
235 	return 0;
236 }
237 
238 static void
239 gve_free_stats_report(struct rte_eth_dev *dev)
240 {
241 	struct gve_priv *priv = dev->data->dev_private;
242 	rte_memzone_free(priv->stats_report_mem);
243 	priv->stats_report_mem = NULL;
244 }
245 
246 /* Read Rx NIC stats from shared region */
247 static void
248 gve_get_imissed_from_nic(struct rte_eth_dev *dev)
249 {
250 	struct gve_stats_report *stats_report;
251 	struct gve_rx_queue *rxq;
252 	struct gve_priv *priv;
253 	struct stats stat;
254 	int queue_id;
255 	int stat_id;
256 	int i;
257 
258 	priv = dev->data->dev_private;
259 	if (!priv->stats_report_mem)
260 		return;
261 	stats_report = (struct gve_stats_report *)
262 		priv->stats_report_mem->addr;
263 	for (i = priv->stats_start_idx; i <= priv->stats_end_idx; i++) {
264 		stat = stats_report->stats[i];
265 		queue_id = cpu_to_be32(stat.queue_id);
266 		rxq = dev->data->rx_queues[queue_id];
267 		if (rxq == NULL)
268 			continue;
269 		stat_id = cpu_to_be32(stat.stat_name);
270 		/* Update imissed. */
271 		if (stat_id == RX_NO_BUFFERS_POSTED)
272 			rxq->stats.imissed = cpu_to_be64(stat.value);
273 	}
274 }
275 
276 static int
277 gve_start_queues(struct rte_eth_dev *dev)
278 {
279 	struct gve_priv *priv = dev->data->dev_private;
280 	uint16_t num_queues;
281 	uint16_t i;
282 	int ret;
283 
284 	num_queues = dev->data->nb_tx_queues;
285 	priv->txqs = (struct gve_tx_queue **)dev->data->tx_queues;
286 	ret = gve_adminq_create_tx_queues(priv, num_queues);
287 	if (ret != 0) {
288 		PMD_DRV_LOG(ERR, "Failed to create %u tx queues.", num_queues);
289 		return ret;
290 	}
291 	for (i = 0; i < num_queues; i++)
292 		if (gve_tx_queue_start(dev, i) != 0) {
293 			PMD_DRV_LOG(ERR, "Fail to start Tx queue %d", i);
294 			goto err_tx;
295 		}
296 
297 	num_queues = dev->data->nb_rx_queues;
298 	priv->rxqs = (struct gve_rx_queue **)dev->data->rx_queues;
299 	ret = gve_adminq_create_rx_queues(priv, num_queues);
300 	if (ret != 0) {
301 		PMD_DRV_LOG(ERR, "Failed to create %u rx queues.", num_queues);
302 		goto err_tx;
303 	}
304 	for (i = 0; i < num_queues; i++) {
305 		if (gve_is_gqi(priv))
306 			ret = gve_rx_queue_start(dev, i);
307 		else
308 			ret = gve_rx_queue_start_dqo(dev, i);
309 		if (ret != 0) {
310 			PMD_DRV_LOG(ERR, "Fail to start Rx queue %d", i);
311 			goto err_rx;
312 		}
313 	}
314 
315 	return 0;
316 
317 err_rx:
318 	gve_stop_rx_queues(dev);
319 err_tx:
320 	gve_stop_tx_queues(dev);
321 	return ret;
322 }
323 
324 static int
325 gve_dev_start(struct rte_eth_dev *dev)
326 {
327 	struct gve_priv *priv;
328 	int ret;
329 
330 	ret = gve_start_queues(dev);
331 	if (ret != 0) {
332 		PMD_DRV_LOG(ERR, "Failed to start queues");
333 		return ret;
334 	}
335 
336 	dev->data->dev_started = 1;
337 	gve_link_update(dev, 0);
338 
339 	priv = dev->data->dev_private;
340 	/* No stats available yet for Dqo. */
341 	if (gve_is_gqi(priv)) {
342 		ret = gve_alloc_stats_report(priv,
343 				dev->data->nb_tx_queues,
344 				dev->data->nb_rx_queues);
345 		if (ret != 0) {
346 			PMD_DRV_LOG(ERR,
347 				"Failed to allocate region for stats reporting.");
348 			return ret;
349 		}
350 		ret = gve_adminq_report_stats(priv, priv->stats_report_len,
351 				priv->stats_report_mem->iova,
352 				GVE_STATS_REPORT_TIMER_PERIOD);
353 		if (ret != 0) {
354 			PMD_DRV_LOG(ERR, "gve_adminq_report_stats command failed.");
355 			return ret;
356 		}
357 	}
358 
359 	return 0;
360 }
361 
362 static int
363 gve_dev_stop(struct rte_eth_dev *dev)
364 {
365 	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
366 
367 	gve_stop_tx_queues(dev);
368 	gve_stop_rx_queues(dev);
369 
370 	dev->data->dev_started = 0;
371 
372 	if (gve_is_gqi(dev->data->dev_private))
373 		gve_free_stats_report(dev);
374 
375 	return 0;
376 }
377 
378 static int
379 gve_dev_close(struct rte_eth_dev *dev)
380 {
381 	struct gve_priv *priv = dev->data->dev_private;
382 	int err = 0;
383 	uint16_t i;
384 
385 	if (dev->data->dev_started) {
386 		err = gve_dev_stop(dev);
387 		if (err != 0)
388 			PMD_DRV_LOG(ERR, "Failed to stop dev.");
389 	}
390 
391 	if (gve_is_gqi(priv)) {
392 		for (i = 0; i < dev->data->nb_tx_queues; i++)
393 			gve_tx_queue_release(dev, i);
394 
395 		for (i = 0; i < dev->data->nb_rx_queues; i++)
396 			gve_rx_queue_release(dev, i);
397 	} else {
398 		for (i = 0; i < dev->data->nb_tx_queues; i++)
399 			gve_tx_queue_release_dqo(dev, i);
400 
401 		for (i = 0; i < dev->data->nb_rx_queues; i++)
402 			gve_rx_queue_release_dqo(dev, i);
403 	}
404 
405 	rte_free(priv->adminq);
406 
407 	dev->data->mac_addrs = NULL;
408 
409 	return err;
410 }
411 
412 static int
413 gve_verify_driver_compatibility(struct gve_priv *priv)
414 {
415 	const struct rte_memzone *driver_info_mem;
416 	struct gve_driver_info *driver_info;
417 	int err;
418 
419 	driver_info_mem = rte_memzone_reserve_aligned("verify_driver_compatibility",
420 			sizeof(struct gve_driver_info),
421 			rte_socket_id(),
422 			RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
423 
424 	if (driver_info_mem == NULL) {
425 		PMD_DRV_LOG(ERR,
426 		    "Could not alloc memzone for driver compatibility");
427 		return -ENOMEM;
428 	}
429 	driver_info = (struct gve_driver_info *)driver_info_mem->addr;
430 
431 	*driver_info = (struct gve_driver_info) {
432 		.os_type = 5, /* DPDK */
433 		.driver_major = GVE_VERSION_MAJOR,
434 		.driver_minor = GVE_VERSION_MINOR,
435 		.driver_sub = GVE_VERSION_SUB,
436 		.os_version_major = cpu_to_be32(DPDK_VERSION_MAJOR),
437 		.os_version_minor = cpu_to_be32(DPDK_VERSION_MINOR),
438 		.os_version_sub = cpu_to_be32(DPDK_VERSION_SUB),
439 		.driver_capability_flags = {
440 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS1),
441 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS2),
442 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS3),
443 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS4),
444 		},
445 	};
446 
447 	populate_driver_version_strings((char *)driver_info->os_version_str1,
448 			(char *)driver_info->os_version_str2);
449 
450 	err = gve_adminq_verify_driver_compatibility(priv,
451 		sizeof(struct gve_driver_info),
452 		(dma_addr_t)driver_info_mem->iova);
453 	/* It's ok if the device doesn't support this */
454 	if (err == -EOPNOTSUPP)
455 		err = 0;
456 
457 	rte_memzone_free(driver_info_mem);
458 	return err;
459 }
460 
461 static int
462 gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
463 {
464 	struct gve_priv *priv = dev->data->dev_private;
465 
466 	dev_info->device = dev->device;
467 	dev_info->max_mac_addrs = 1;
468 	dev_info->max_rx_queues = priv->max_nb_rxq;
469 	dev_info->max_tx_queues = priv->max_nb_txq;
470 	if (gve_is_gqi(priv)) {
471 		dev_info->min_rx_bufsize = GVE_RX_MIN_BUF_SIZE_GQI;
472 		dev_info->max_rx_bufsize = GVE_RX_MAX_BUF_SIZE_GQI;
473 	} else {
474 		dev_info->min_rx_bufsize = GVE_RX_MIN_BUF_SIZE_DQO;
475 		dev_info->max_rx_bufsize = GVE_RX_MAX_BUF_SIZE_DQO;
476 	}
477 
478 	dev_info->max_rx_pktlen = priv->max_mtu + RTE_ETHER_HDR_LEN;
479 	dev_info->max_mtu = priv->max_mtu;
480 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
481 
482 	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_RSS_HASH;
483 	dev_info->tx_offload_capa =
484 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS	|
485 		RTE_ETH_TX_OFFLOAD_UDP_CKSUM	|
486 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM	|
487 		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM	|
488 		RTE_ETH_TX_OFFLOAD_TCP_TSO;
489 
490 	if (!gve_is_gqi(priv)) {
491 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
492 		dev_info->rx_offload_capa |=
493 				RTE_ETH_RX_OFFLOAD_IPV4_CKSUM   |
494 				RTE_ETH_RX_OFFLOAD_UDP_CKSUM	|
495 				RTE_ETH_RX_OFFLOAD_TCP_CKSUM	|
496 				RTE_ETH_RX_OFFLOAD_TCP_LRO;
497 	}
498 
499 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
500 		.rx_free_thresh = GVE_DEFAULT_RX_FREE_THRESH,
501 		.rx_drop_en = 0,
502 		.offloads = 0,
503 	};
504 
505 	dev_info->default_txconf = (struct rte_eth_txconf) {
506 		.tx_free_thresh = GVE_DEFAULT_TX_FREE_THRESH,
507 		.tx_rs_thresh = GVE_DEFAULT_TX_RS_THRESH,
508 		.offloads = 0,
509 	};
510 
511 	dev_info->default_rxportconf.ring_size = priv->default_rx_desc_cnt;
512 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
513 		.nb_max = priv->max_rx_desc_cnt,
514 		.nb_min = priv->min_rx_desc_cnt,
515 		.nb_align = 1,
516 	};
517 
518 	dev_info->default_txportconf.ring_size = priv->default_tx_desc_cnt;
519 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
520 		.nb_max = priv->max_tx_desc_cnt,
521 		.nb_min = priv->min_tx_desc_cnt,
522 		.nb_align = 1,
523 	};
524 
525 	dev_info->flow_type_rss_offloads = GVE_RTE_RSS_OFFLOAD_ALL;
526 	dev_info->hash_key_size = GVE_RSS_HASH_KEY_SIZE;
527 	dev_info->reta_size = GVE_RSS_INDIR_SIZE;
528 
529 	return 0;
530 }
531 
532 static int
533 gve_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
534 {
535 	uint16_t i;
536 	if (gve_is_gqi(dev->data->dev_private))
537 		gve_get_imissed_from_nic(dev);
538 
539 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
540 		struct gve_tx_queue *txq = dev->data->tx_queues[i];
541 		if (txq == NULL)
542 			continue;
543 
544 		stats->opackets += txq->stats.packets;
545 		stats->obytes += txq->stats.bytes;
546 		stats->oerrors += txq->stats.errors;
547 	}
548 
549 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
550 		struct gve_rx_queue *rxq = dev->data->rx_queues[i];
551 		if (rxq == NULL)
552 			continue;
553 
554 		stats->ipackets += rxq->stats.packets;
555 		stats->ibytes += rxq->stats.bytes;
556 		stats->ierrors += rxq->stats.errors;
557 		stats->rx_nombuf += rxq->stats.no_mbufs;
558 		stats->imissed += rxq->stats.imissed;
559 	}
560 
561 	return 0;
562 }
563 
564 static int
565 gve_dev_stats_reset(struct rte_eth_dev *dev)
566 {
567 	uint16_t i;
568 
569 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
570 		struct gve_tx_queue *txq = dev->data->tx_queues[i];
571 		if (txq == NULL)
572 			continue;
573 
574 		memset(&txq->stats, 0, sizeof(txq->stats));
575 	}
576 
577 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
578 		struct gve_rx_queue *rxq = dev->data->rx_queues[i];
579 		if (rxq == NULL)
580 			continue;
581 
582 		memset(&rxq->stats, 0, sizeof(rxq->stats));
583 	}
584 
585 	return 0;
586 }
587 
588 static int
589 gve_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
590 {
591 	struct gve_priv *priv = dev->data->dev_private;
592 	int err;
593 
594 	if (mtu < RTE_ETHER_MIN_MTU || mtu > priv->max_mtu) {
595 		PMD_DRV_LOG(ERR, "MIN MTU is %u, MAX MTU is %u",
596 			    RTE_ETHER_MIN_MTU, priv->max_mtu);
597 		return -EINVAL;
598 	}
599 
600 	/* mtu setting is forbidden if port is start */
601 	if (dev->data->dev_started) {
602 		PMD_DRV_LOG(ERR, "Port must be stopped before configuration");
603 		return -EBUSY;
604 	}
605 
606 	err = gve_adminq_set_mtu(priv, mtu);
607 	if (err) {
608 		PMD_DRV_LOG(ERR, "Failed to set mtu as %u err = %d", mtu, err);
609 		return err;
610 	}
611 
612 	return 0;
613 }
614 
615 #define TX_QUEUE_STATS_OFFSET(x) offsetof(struct gve_tx_stats, x)
616 #define RX_QUEUE_STATS_OFFSET(x) offsetof(struct gve_rx_stats, x)
617 
618 static const struct gve_xstats_name_offset tx_xstats_name_offset[] = {
619 	{ "packets", TX_QUEUE_STATS_OFFSET(packets) },
620 	{ "bytes",   TX_QUEUE_STATS_OFFSET(bytes) },
621 	{ "errors",  TX_QUEUE_STATS_OFFSET(errors) },
622 };
623 
624 static const struct gve_xstats_name_offset rx_xstats_name_offset[] = {
625 	{ "packets",                RX_QUEUE_STATS_OFFSET(packets) },
626 	{ "bytes",                  RX_QUEUE_STATS_OFFSET(bytes) },
627 	{ "errors",                 RX_QUEUE_STATS_OFFSET(errors) },
628 	{ "mbuf_alloc_errors",      RX_QUEUE_STATS_OFFSET(no_mbufs) },
629 	{ "mbuf_alloc_errors_bulk", RX_QUEUE_STATS_OFFSET(no_mbufs_bulk) },
630 	{ "imissed",                RX_QUEUE_STATS_OFFSET(imissed) },
631 };
632 
633 static int
634 gve_xstats_count(struct rte_eth_dev *dev)
635 {
636 	uint16_t i, count = 0;
637 
638 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
639 		if (dev->data->tx_queues[i])
640 			count += RTE_DIM(tx_xstats_name_offset);
641 	}
642 
643 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
644 		if (dev->data->rx_queues[i])
645 			count += RTE_DIM(rx_xstats_name_offset);
646 	}
647 
648 	return count;
649 }
650 
651 static int
652 gve_xstats_get(struct rte_eth_dev *dev,
653 			struct rte_eth_xstat *xstats,
654 			unsigned int size)
655 {
656 	uint16_t i, j, count = gve_xstats_count(dev);
657 	const char *stats;
658 
659 	if (gve_is_gqi(dev->data->dev_private))
660 		gve_get_imissed_from_nic(dev);
661 
662 	if (xstats == NULL || size < count)
663 		return count;
664 
665 	count = 0;
666 
667 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
668 		const struct gve_tx_queue *txq = dev->data->tx_queues[i];
669 		if (txq == NULL)
670 			continue;
671 
672 		stats = (const char *)&txq->stats;
673 		for (j = 0; j < RTE_DIM(tx_xstats_name_offset); j++, count++) {
674 			xstats[count].id = count;
675 			xstats[count].value = *(const uint64_t *)
676 				(stats + tx_xstats_name_offset[j].offset);
677 		}
678 	}
679 
680 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
681 		const struct gve_rx_queue *rxq = dev->data->rx_queues[i];
682 		if (rxq == NULL)
683 			continue;
684 
685 		stats = (const char *)&rxq->stats;
686 		for (j = 0; j < RTE_DIM(rx_xstats_name_offset); j++, count++) {
687 			xstats[count].id = count;
688 			xstats[count].value = *(const uint64_t *)
689 				(stats + rx_xstats_name_offset[j].offset);
690 		}
691 	}
692 
693 	return count;
694 }
695 
696 static int
697 gve_xstats_get_names(struct rte_eth_dev *dev,
698 			struct rte_eth_xstat_name *xstats_names,
699 			unsigned int size)
700 {
701 	uint16_t i, j, count = gve_xstats_count(dev);
702 
703 	if (xstats_names == NULL || size < count)
704 		return count;
705 
706 	count = 0;
707 
708 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
709 		if (dev->data->tx_queues[i] == NULL)
710 			continue;
711 
712 		for (j = 0; j < RTE_DIM(tx_xstats_name_offset); j++)
713 			snprintf(xstats_names[count++].name,
714 				 RTE_ETH_XSTATS_NAME_SIZE,
715 				 "tx_q%u_%s", i, tx_xstats_name_offset[j].name);
716 	}
717 
718 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
719 		if (dev->data->rx_queues[i] == NULL)
720 			continue;
721 
722 		for (j = 0; j < RTE_DIM(rx_xstats_name_offset); j++)
723 			snprintf(xstats_names[count++].name,
724 				 RTE_ETH_XSTATS_NAME_SIZE,
725 				 "rx_q%u_%s", i, rx_xstats_name_offset[j].name);
726 	}
727 
728 	return count;
729 }
730 
731 
732 static int
733 gve_rss_hash_update(struct rte_eth_dev *dev,
734 			struct rte_eth_rss_conf *rss_conf)
735 {
736 	struct gve_priv *priv = dev->data->dev_private;
737 	struct gve_rss_config gve_rss_conf;
738 	int rss_reta_size;
739 	int err;
740 
741 	if (gve_validate_rss_hf(rss_conf->rss_hf)) {
742 		PMD_DRV_LOG(ERR, "Unsupported hash function.");
743 		return -EINVAL;
744 	}
745 
746 	if (rss_conf->algorithm != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
747 		rss_conf->algorithm != RTE_ETH_HASH_FUNCTION_DEFAULT) {
748 		PMD_DRV_LOG(ERR, "Device only supports Toeplitz algorithm.");
749 		return -EINVAL;
750 	}
751 
752 	if (rss_conf->rss_key_len) {
753 		if (rss_conf->rss_key_len != GVE_RSS_HASH_KEY_SIZE) {
754 			PMD_DRV_LOG(ERR,
755 				"Invalid hash key size. Only RSS hash key size "
756 				"of %u supported", GVE_RSS_HASH_KEY_SIZE);
757 			return -EINVAL;
758 		}
759 
760 		if (!rss_conf->rss_key) {
761 			PMD_DRV_LOG(ERR, "RSS key must be non-null.");
762 			return -EINVAL;
763 		}
764 	} else {
765 		if (!priv->rss_config.key_size) {
766 			PMD_DRV_LOG(ERR, "RSS key must be initialized before "
767 				"any other configuration.");
768 			return -EINVAL;
769 		}
770 		rss_conf->rss_key_len = priv->rss_config.key_size;
771 	}
772 
773 	rss_reta_size = priv->rss_config.indir ?
774 			priv->rss_config.indir_size :
775 			GVE_RSS_INDIR_SIZE;
776 	err = gve_init_rss_config(&gve_rss_conf, rss_conf->rss_key_len,
777 		rss_reta_size);
778 	if (err)
779 		return err;
780 
781 	gve_rss_conf.alg = GVE_RSS_HASH_TOEPLITZ;
782 	err = gve_update_rss_hash_types(priv, &gve_rss_conf, rss_conf);
783 	if (err)
784 		goto err;
785 	err = gve_update_rss_key(priv, &gve_rss_conf, rss_conf);
786 	if (err)
787 		goto err;
788 
789 	/* Set redirection table to default or preexisting. */
790 	if (!priv->rss_config.indir)
791 		gve_generate_rss_reta(dev, &gve_rss_conf);
792 	else
793 		memcpy(gve_rss_conf.indir, priv->rss_config.indir,
794 			gve_rss_conf.indir_size * sizeof(*priv->rss_config.indir));
795 
796 	err = gve_adminq_configure_rss(priv, &gve_rss_conf);
797 	if (!err)
798 		gve_update_priv_rss_config(priv, &gve_rss_conf);
799 
800 err:
801 	gve_free_rss_config(&gve_rss_conf);
802 	return err;
803 }
804 
805 static int
806 gve_rss_hash_conf_get(struct rte_eth_dev *dev,
807 			struct rte_eth_rss_conf *rss_conf)
808 {
809 	struct gve_priv *priv = dev->data->dev_private;
810 
811 	if (!(dev->data->dev_conf.rxmode.offloads &
812 			RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
813 		PMD_DRV_LOG(ERR, "RSS not configured.");
814 		return -ENOTSUP;
815 	}
816 
817 
818 	gve_to_rte_rss_hf(priv->rss_config.hash_types, rss_conf);
819 	rss_conf->rss_key_len = priv->rss_config.key_size;
820 	if (rss_conf->rss_key) {
821 		if (!priv->rss_config.key) {
822 			PMD_DRV_LOG(ERR, "Unable to retrieve default RSS hash key.");
823 			return -ENOTSUP;
824 		}
825 		memcpy(rss_conf->rss_key, priv->rss_config.key,
826 			rss_conf->rss_key_len * sizeof(*rss_conf->rss_key));
827 	}
828 
829 	return 0;
830 }
831 
832 static int
833 gve_rss_reta_update(struct rte_eth_dev *dev,
834 	struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
835 {
836 	struct gve_priv *priv = dev->data->dev_private;
837 	struct gve_rss_config gve_rss_conf;
838 	int table_id;
839 	int err;
840 	int i;
841 
842 	/* RSS key must be set before the redirection table can be set. */
843 	if (!priv->rss_config.key || priv->rss_config.key_size == 0) {
844 		PMD_DRV_LOG(ERR, "RSS hash key msut be set before the "
845 			"redirection table can be updated.");
846 		return -ENOTSUP;
847 	}
848 
849 	if (reta_size != GVE_RSS_INDIR_SIZE) {
850 		PMD_DRV_LOG(ERR, "Redirection table must have %hu elements",
851 			(uint16_t)GVE_RSS_INDIR_SIZE);
852 		return -EINVAL;
853 	}
854 
855 	err = gve_init_rss_config_from_priv(priv, &gve_rss_conf);
856 	if (err) {
857 		PMD_DRV_LOG(ERR, "Error allocating new RSS config.");
858 		return err;
859 	}
860 
861 	table_id = 0;
862 	for (i = 0; i < priv->rss_config.indir_size; i++) {
863 		int table_entry = i % RTE_ETH_RETA_GROUP_SIZE;
864 		if (reta_conf[table_id].mask & (1ULL << table_entry))
865 			gve_rss_conf.indir[i] =
866 				reta_conf[table_id].reta[table_entry];
867 
868 		if (table_entry == RTE_ETH_RETA_GROUP_SIZE - 1)
869 			table_id++;
870 	}
871 
872 	err = gve_adminq_configure_rss(priv, &gve_rss_conf);
873 	if (err)
874 		PMD_DRV_LOG(ERR, "Problem configuring RSS with device.");
875 	else
876 		gve_update_priv_rss_config(priv, &gve_rss_conf);
877 
878 	gve_free_rss_config(&gve_rss_conf);
879 	return err;
880 }
881 
882 static int
883 gve_rss_reta_query(struct rte_eth_dev *dev,
884 	struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
885 {
886 	struct gve_priv *priv = dev->data->dev_private;
887 	int table_id;
888 	int i;
889 
890 	if (!(dev->data->dev_conf.rxmode.offloads &
891 		RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
892 		PMD_DRV_LOG(ERR, "RSS not configured.");
893 		return -ENOTSUP;
894 	}
895 
896 	/* RSS key must be set before the redirection table can be queried. */
897 	if (!priv->rss_config.key) {
898 		PMD_DRV_LOG(ERR, "RSS hash key must be set before the "
899 			"redirection table can be initialized.");
900 		return -ENOTSUP;
901 	}
902 
903 	if (reta_size != priv->rss_config.indir_size) {
904 		PMD_DRV_LOG(ERR, "RSS redirection table must have %d entries.",
905 			priv->rss_config.indir_size);
906 		return -EINVAL;
907 	}
908 
909 	table_id = 0;
910 	for (i = 0; i < priv->rss_config.indir_size; i++) {
911 		int table_entry = i % RTE_ETH_RETA_GROUP_SIZE;
912 		if (reta_conf[table_id].mask & (1ULL << table_entry))
913 			reta_conf[table_id].reta[table_entry] =
914 				priv->rss_config.indir[i];
915 
916 		if (table_entry == RTE_ETH_RETA_GROUP_SIZE - 1)
917 			table_id++;
918 	}
919 
920 	return 0;
921 }
922 
923 static const struct eth_dev_ops gve_eth_dev_ops = {
924 	.dev_configure        = gve_dev_configure,
925 	.dev_start            = gve_dev_start,
926 	.dev_stop             = gve_dev_stop,
927 	.dev_close            = gve_dev_close,
928 	.dev_infos_get        = gve_dev_info_get,
929 	.rx_queue_setup       = gve_rx_queue_setup,
930 	.tx_queue_setup       = gve_tx_queue_setup,
931 	.rx_queue_release     = gve_rx_queue_release,
932 	.tx_queue_release     = gve_tx_queue_release,
933 	.rx_queue_start       = gve_rx_queue_start,
934 	.tx_queue_start       = gve_tx_queue_start,
935 	.rx_queue_stop        = gve_rx_queue_stop,
936 	.tx_queue_stop        = gve_tx_queue_stop,
937 	.link_update          = gve_link_update,
938 	.stats_get            = gve_dev_stats_get,
939 	.stats_reset          = gve_dev_stats_reset,
940 	.mtu_set              = gve_dev_mtu_set,
941 	.xstats_get           = gve_xstats_get,
942 	.xstats_get_names     = gve_xstats_get_names,
943 	.rss_hash_update      = gve_rss_hash_update,
944 	.rss_hash_conf_get    = gve_rss_hash_conf_get,
945 	.reta_update          = gve_rss_reta_update,
946 	.reta_query           = gve_rss_reta_query,
947 };
948 
949 static const struct eth_dev_ops gve_eth_dev_ops_dqo = {
950 	.dev_configure        = gve_dev_configure,
951 	.dev_start            = gve_dev_start,
952 	.dev_stop             = gve_dev_stop,
953 	.dev_close            = gve_dev_close,
954 	.dev_infos_get        = gve_dev_info_get,
955 	.rx_queue_setup       = gve_rx_queue_setup_dqo,
956 	.tx_queue_setup       = gve_tx_queue_setup_dqo,
957 	.rx_queue_release     = gve_rx_queue_release_dqo,
958 	.tx_queue_release     = gve_tx_queue_release_dqo,
959 	.rx_queue_start       = gve_rx_queue_start_dqo,
960 	.tx_queue_start       = gve_tx_queue_start_dqo,
961 	.rx_queue_stop        = gve_rx_queue_stop_dqo,
962 	.tx_queue_stop        = gve_tx_queue_stop_dqo,
963 	.link_update          = gve_link_update,
964 	.stats_get            = gve_dev_stats_get,
965 	.stats_reset          = gve_dev_stats_reset,
966 	.mtu_set              = gve_dev_mtu_set,
967 	.xstats_get           = gve_xstats_get,
968 	.xstats_get_names     = gve_xstats_get_names,
969 	.rss_hash_update      = gve_rss_hash_update,
970 	.rss_hash_conf_get    = gve_rss_hash_conf_get,
971 	.reta_update          = gve_rss_reta_update,
972 	.reta_query           = gve_rss_reta_query,
973 };
974 
975 static void
976 gve_free_counter_array(struct gve_priv *priv)
977 {
978 	rte_memzone_free(priv->cnt_array_mz);
979 	priv->cnt_array = NULL;
980 }
981 
982 static void
983 gve_free_irq_db(struct gve_priv *priv)
984 {
985 	rte_memzone_free(priv->irq_dbs_mz);
986 	priv->irq_dbs = NULL;
987 }
988 
989 static void
990 gve_teardown_device_resources(struct gve_priv *priv)
991 {
992 	int err;
993 
994 	/* Tell device its resources are being freed */
995 	if (gve_get_device_resources_ok(priv)) {
996 		err = gve_adminq_deconfigure_device_resources(priv);
997 		if (err)
998 			PMD_DRV_LOG(ERR, "Could not deconfigure device resources: err=%d", err);
999 	}
1000 
1001 	if (!gve_is_gqi(priv)) {
1002 		rte_free(priv->ptype_lut_dqo);
1003 		priv->ptype_lut_dqo = NULL;
1004 	}
1005 	gve_free_counter_array(priv);
1006 	gve_free_irq_db(priv);
1007 	gve_clear_device_resources_ok(priv);
1008 }
1009 
1010 static int
1011 pci_dev_msix_vec_count(struct rte_pci_device *pdev)
1012 {
1013 	off_t msix_pos = rte_pci_find_capability(pdev, RTE_PCI_CAP_ID_MSIX);
1014 	uint16_t control;
1015 
1016 	if (msix_pos > 0 && rte_pci_read_config(pdev, &control, sizeof(control),
1017 			msix_pos + RTE_PCI_MSIX_FLAGS) == sizeof(control))
1018 		return (control & RTE_PCI_MSIX_FLAGS_QSIZE) + 1;
1019 
1020 	return 0;
1021 }
1022 
1023 static int
1024 gve_setup_device_resources(struct gve_priv *priv)
1025 {
1026 	char z_name[RTE_MEMZONE_NAMESIZE];
1027 	const struct rte_memzone *mz;
1028 	int err = 0;
1029 
1030 	snprintf(z_name, sizeof(z_name), "gve_%s_cnt_arr", priv->pci_dev->device.name);
1031 	mz = rte_memzone_reserve_aligned(z_name,
1032 					 priv->num_event_counters * sizeof(*priv->cnt_array),
1033 					 rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
1034 					 PAGE_SIZE);
1035 	if (mz == NULL) {
1036 		PMD_DRV_LOG(ERR, "Could not alloc memzone for count array");
1037 		return -ENOMEM;
1038 	}
1039 	priv->cnt_array = (rte_be32_t *)mz->addr;
1040 	priv->cnt_array_mz = mz;
1041 
1042 	snprintf(z_name, sizeof(z_name), "gve_%s_irqmz", priv->pci_dev->device.name);
1043 	mz = rte_memzone_reserve_aligned(z_name,
1044 					 sizeof(*priv->irq_dbs) * (priv->num_ntfy_blks),
1045 					 rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
1046 					 PAGE_SIZE);
1047 	if (mz == NULL) {
1048 		PMD_DRV_LOG(ERR, "Could not alloc memzone for irq_dbs");
1049 		err = -ENOMEM;
1050 		goto free_cnt_array;
1051 	}
1052 	priv->irq_dbs = (struct gve_irq_db *)mz->addr;
1053 	priv->irq_dbs_mz = mz;
1054 
1055 	err = gve_adminq_configure_device_resources(priv,
1056 						    priv->cnt_array_mz->iova,
1057 						    priv->num_event_counters,
1058 						    priv->irq_dbs_mz->iova,
1059 						    priv->num_ntfy_blks);
1060 	if (unlikely(err)) {
1061 		PMD_DRV_LOG(ERR, "Could not config device resources: err=%d", err);
1062 		goto free_irq_dbs;
1063 	}
1064 	if (!gve_is_gqi(priv)) {
1065 		priv->ptype_lut_dqo = rte_zmalloc("gve_ptype_lut_dqo",
1066 			sizeof(struct gve_ptype_lut), 0);
1067 		if (priv->ptype_lut_dqo == NULL) {
1068 			PMD_DRV_LOG(ERR, "Failed to alloc ptype lut.");
1069 			err = -ENOMEM;
1070 			goto free_irq_dbs;
1071 		}
1072 		err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo);
1073 		if (unlikely(err)) {
1074 			PMD_DRV_LOG(ERR, "Failed to get ptype map: err=%d", err);
1075 			goto free_ptype_lut;
1076 		}
1077 	}
1078 
1079 	return 0;
1080 free_ptype_lut:
1081 	rte_free(priv->ptype_lut_dqo);
1082 	priv->ptype_lut_dqo = NULL;
1083 free_irq_dbs:
1084 	gve_free_irq_db(priv);
1085 free_cnt_array:
1086 	gve_free_counter_array(priv);
1087 
1088 	return err;
1089 }
1090 
1091 static void
1092 gve_set_default_ring_size_bounds(struct gve_priv *priv)
1093 {
1094 	priv->max_tx_desc_cnt = GVE_DEFAULT_MAX_RING_SIZE;
1095 	priv->max_rx_desc_cnt = GVE_DEFAULT_MAX_RING_SIZE;
1096 	priv->min_tx_desc_cnt = GVE_DEFAULT_MIN_TX_RING_SIZE;
1097 	priv->min_rx_desc_cnt = GVE_DEFAULT_MIN_RX_RING_SIZE;
1098 }
1099 
1100 static int
1101 gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
1102 {
1103 	int num_ntfy;
1104 	int err;
1105 
1106 	/* Set up the adminq */
1107 	err = gve_adminq_alloc(priv);
1108 	if (err) {
1109 		PMD_DRV_LOG(ERR, "Failed to alloc admin queue: err=%d", err);
1110 		return err;
1111 	}
1112 	err = gve_verify_driver_compatibility(priv);
1113 	if (err) {
1114 		PMD_DRV_LOG(ERR, "Could not verify driver compatibility: err=%d", err);
1115 		goto free_adminq;
1116 	}
1117 
1118 	/* Set default descriptor counts */
1119 	gve_set_default_ring_size_bounds(priv);
1120 
1121 	if (skip_describe_device)
1122 		goto setup_device;
1123 
1124 	/* Get the initial information we need from the device */
1125 	err = gve_adminq_describe_device(priv);
1126 	if (err) {
1127 		PMD_DRV_LOG(ERR, "Could not get device information: err=%d", err);
1128 		goto free_adminq;
1129 	}
1130 
1131 	num_ntfy = pci_dev_msix_vec_count(priv->pci_dev);
1132 	if (num_ntfy <= 0) {
1133 		PMD_DRV_LOG(ERR, "Could not count MSI-x vectors");
1134 		err = -EIO;
1135 		goto free_adminq;
1136 	} else if (num_ntfy < GVE_MIN_MSIX) {
1137 		PMD_DRV_LOG(ERR, "GVE needs at least %d MSI-x vectors, but only has %d",
1138 			    GVE_MIN_MSIX, num_ntfy);
1139 		err = -EINVAL;
1140 		goto free_adminq;
1141 	}
1142 
1143 	priv->num_registered_pages = 0;
1144 
1145 	/* gvnic has one Notification Block per MSI-x vector, except for the
1146 	 * management vector
1147 	 */
1148 	priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
1149 	priv->mgmt_msix_idx = priv->num_ntfy_blks;
1150 
1151 	priv->max_nb_txq = RTE_MIN(priv->max_nb_txq, priv->num_ntfy_blks / 2);
1152 	priv->max_nb_rxq = RTE_MIN(priv->max_nb_rxq, priv->num_ntfy_blks / 2);
1153 
1154 	if (priv->default_num_queues > 0) {
1155 		priv->max_nb_txq = RTE_MIN(priv->default_num_queues, priv->max_nb_txq);
1156 		priv->max_nb_rxq = RTE_MIN(priv->default_num_queues, priv->max_nb_rxq);
1157 	}
1158 
1159 	PMD_DRV_LOG(INFO, "Max TX queues %d, Max RX queues %d",
1160 		    priv->max_nb_txq, priv->max_nb_rxq);
1161 
1162 setup_device:
1163 	err = gve_setup_device_resources(priv);
1164 	if (!err)
1165 		return 0;
1166 free_adminq:
1167 	gve_adminq_free(priv);
1168 	return err;
1169 }
1170 
1171 static void
1172 gve_teardown_priv_resources(struct gve_priv *priv)
1173 {
1174 	gve_teardown_device_resources(priv);
1175 	gve_adminq_free(priv);
1176 }
1177 
1178 static int
1179 gve_dev_init(struct rte_eth_dev *eth_dev)
1180 {
1181 	struct gve_priv *priv = eth_dev->data->dev_private;
1182 	int max_tx_queues, max_rx_queues;
1183 	struct rte_pci_device *pci_dev;
1184 	struct gve_registers *reg_bar;
1185 	rte_be32_t *db_bar;
1186 	int err;
1187 
1188 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1189 		if (gve_is_gqi(priv)) {
1190 			gve_set_rx_function(eth_dev);
1191 			gve_set_tx_function(eth_dev);
1192 			eth_dev->dev_ops = &gve_eth_dev_ops;
1193 		} else {
1194 			gve_set_rx_function_dqo(eth_dev);
1195 			gve_set_tx_function_dqo(eth_dev);
1196 			eth_dev->dev_ops = &gve_eth_dev_ops_dqo;
1197 		}
1198 		return 0;
1199 	}
1200 
1201 	pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1202 
1203 	reg_bar = pci_dev->mem_resource[GVE_REG_BAR].addr;
1204 	if (!reg_bar) {
1205 		PMD_DRV_LOG(ERR, "Failed to map pci bar!");
1206 		return -ENOMEM;
1207 	}
1208 
1209 	db_bar = pci_dev->mem_resource[GVE_DB_BAR].addr;
1210 	if (!db_bar) {
1211 		PMD_DRV_LOG(ERR, "Failed to map doorbell bar!");
1212 		return -ENOMEM;
1213 	}
1214 
1215 	gve_write_version(&reg_bar->driver_version);
1216 	/* Get max queues to alloc etherdev */
1217 	max_tx_queues = ioread32be(&reg_bar->max_tx_queues);
1218 	max_rx_queues = ioread32be(&reg_bar->max_rx_queues);
1219 
1220 	priv->reg_bar0 = reg_bar;
1221 	priv->db_bar2 = db_bar;
1222 	priv->pci_dev = pci_dev;
1223 	priv->state_flags = 0x0;
1224 
1225 	priv->max_nb_txq = max_tx_queues;
1226 	priv->max_nb_rxq = max_rx_queues;
1227 
1228 	err = gve_init_priv(priv, false);
1229 	if (err)
1230 		return err;
1231 
1232 	if (gve_is_gqi(priv)) {
1233 		eth_dev->dev_ops = &gve_eth_dev_ops;
1234 		gve_set_rx_function(eth_dev);
1235 		gve_set_tx_function(eth_dev);
1236 	} else {
1237 		eth_dev->dev_ops = &gve_eth_dev_ops_dqo;
1238 		gve_set_rx_function_dqo(eth_dev);
1239 		gve_set_tx_function_dqo(eth_dev);
1240 	}
1241 
1242 	eth_dev->data->mac_addrs = &priv->dev_addr;
1243 
1244 	return 0;
1245 }
1246 
1247 static int
1248 gve_dev_uninit(struct rte_eth_dev *eth_dev)
1249 {
1250 	struct gve_priv *priv = eth_dev->data->dev_private;
1251 
1252 	gve_teardown_priv_resources(priv);
1253 
1254 	eth_dev->data->mac_addrs = NULL;
1255 
1256 	return 0;
1257 }
1258 
1259 static int
1260 gve_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
1261 	      struct rte_pci_device *pci_dev)
1262 {
1263 	return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct gve_priv), gve_dev_init);
1264 }
1265 
1266 static int
1267 gve_pci_remove(struct rte_pci_device *pci_dev)
1268 {
1269 	return rte_eth_dev_pci_generic_remove(pci_dev, gve_dev_uninit);
1270 }
1271 
1272 static const struct rte_pci_id pci_id_gve_map[] = {
1273 	{ RTE_PCI_DEVICE(GOOGLE_VENDOR_ID, GVE_DEV_ID) },
1274 	{ .device_id = 0 },
1275 };
1276 
1277 static struct rte_pci_driver rte_gve_pmd = {
1278 	.id_table = pci_id_gve_map,
1279 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1280 	.probe = gve_pci_probe,
1281 	.remove = gve_pci_remove,
1282 };
1283 
1284 RTE_PMD_REGISTER_PCI(net_gve, rte_gve_pmd);
1285 RTE_PMD_REGISTER_PCI_TABLE(net_gve, pci_id_gve_map);
1286 RTE_PMD_REGISTER_KMOD_DEP(net_gve, "* igb_uio | vfio-pci");
1287 RTE_LOG_REGISTER_SUFFIX(gve_logtype_driver, driver, NOTICE);
1288