xref: /dpdk/drivers/net/gve/gve_ethdev.c (revision cfa443351ef581b7189467842ca102ab710cb7d2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2022-2023 Intel Corporation
3  * Copyright(C) 2023 Google LLC
4  */
5 
6 #include "gve_ethdev.h"
7 #include "base/gve_adminq.h"
8 #include "base/gve_register.h"
9 #include "base/gve_osdep.h"
10 #include "gve_version.h"
11 #include "rte_ether.h"
12 #include "gve_rss.h"
13 
14 static void
15 gve_write_version(uint8_t *driver_version_register)
16 {
17 	const char *c = gve_version_string();
18 	while (*c) {
19 		writeb(*c, driver_version_register);
20 		c++;
21 	}
22 	writeb('\n', driver_version_register);
23 }
24 
25 static int
26 gve_alloc_queue_page_list(struct gve_priv *priv, uint32_t id, uint32_t pages)
27 {
28 	char z_name[RTE_MEMZONE_NAMESIZE];
29 	struct gve_queue_page_list *qpl;
30 	const struct rte_memzone *mz;
31 	dma_addr_t page_bus;
32 	uint32_t i;
33 
34 	if (priv->num_registered_pages + pages >
35 	    priv->max_registered_pages) {
36 		PMD_DRV_LOG(ERR, "Pages %" PRIu64 " > max registered pages %" PRIu64,
37 			    priv->num_registered_pages + pages,
38 			    priv->max_registered_pages);
39 		return -EINVAL;
40 	}
41 	qpl = &priv->qpl[id];
42 	snprintf(z_name, sizeof(z_name), "gve_%s_qpl%d", priv->pci_dev->device.name, id);
43 	mz = rte_memzone_reserve_aligned(z_name, pages * PAGE_SIZE,
44 					 rte_socket_id(),
45 					 RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
46 	if (mz == NULL) {
47 		PMD_DRV_LOG(ERR, "Failed to alloc %s.", z_name);
48 		return -ENOMEM;
49 	}
50 	qpl->page_buses = rte_zmalloc("qpl page buses", pages * sizeof(dma_addr_t), 0);
51 	if (qpl->page_buses == NULL) {
52 		PMD_DRV_LOG(ERR, "Failed to alloc qpl %u page buses", id);
53 		return -ENOMEM;
54 	}
55 	page_bus = mz->iova;
56 	for (i = 0; i < pages; i++) {
57 		qpl->page_buses[i] = page_bus;
58 		page_bus += PAGE_SIZE;
59 	}
60 	qpl->id = id;
61 	qpl->mz = mz;
62 	qpl->num_entries = pages;
63 
64 	priv->num_registered_pages += pages;
65 
66 	return 0;
67 }
68 
69 static void
70 gve_free_qpls(struct gve_priv *priv)
71 {
72 	uint16_t nb_txqs = priv->max_nb_txq;
73 	uint16_t nb_rxqs = priv->max_nb_rxq;
74 	uint32_t i;
75 
76 	if (priv->queue_format != GVE_GQI_QPL_FORMAT)
77 		return;
78 
79 	for (i = 0; i < nb_txqs + nb_rxqs; i++) {
80 		rte_memzone_free(priv->qpl[i].mz);
81 		rte_free(priv->qpl[i].page_buses);
82 	}
83 
84 	rte_free(priv->qpl);
85 }
86 
87 static int
88 gve_dev_configure(struct rte_eth_dev *dev)
89 {
90 	struct gve_priv *priv = dev->data->dev_private;
91 
92 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
93 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
94 		priv->rss_config.alg = GVE_RSS_HASH_TOEPLITZ;
95 	}
96 
97 	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
98 		priv->enable_rsc = 1;
99 
100 	/* Reset RSS RETA in case number of queues changed. */
101 	if (priv->rss_config.indir) {
102 		struct gve_rss_config update_reta_config;
103 		gve_init_rss_config_from_priv(priv, &update_reta_config);
104 		gve_generate_rss_reta(dev, &update_reta_config);
105 
106 		int err = gve_adminq_configure_rss(priv, &update_reta_config);
107 		if (err)
108 			PMD_DRV_LOG(ERR,
109 				"Could not reconfigure RSS redirection table.");
110 		else
111 			gve_update_priv_rss_config(priv, &update_reta_config);
112 
113 		gve_free_rss_config(&update_reta_config);
114 		return err;
115 	}
116 
117 	return 0;
118 }
119 
120 static int
121 gve_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
122 {
123 	struct gve_priv *priv = dev->data->dev_private;
124 	struct rte_eth_link link;
125 	int err;
126 
127 	memset(&link, 0, sizeof(link));
128 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
129 	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
130 
131 	if (!dev->data->dev_started) {
132 		link.link_status = RTE_ETH_LINK_DOWN;
133 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
134 	} else {
135 		link.link_status = RTE_ETH_LINK_UP;
136 		PMD_DRV_LOG(DEBUG, "Get link status from hw");
137 		err = gve_adminq_report_link_speed(priv);
138 		if (err) {
139 			PMD_DRV_LOG(ERR, "Failed to get link speed.");
140 			priv->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
141 		}
142 		link.link_speed = priv->link_speed;
143 	}
144 
145 	return rte_eth_linkstatus_set(dev, &link);
146 }
147 
148 static int
149 gve_alloc_stats_report(struct gve_priv *priv,
150 		uint16_t nb_tx_queues, uint16_t nb_rx_queues)
151 {
152 	char z_name[RTE_MEMZONE_NAMESIZE];
153 	int tx_stats_cnt;
154 	int rx_stats_cnt;
155 
156 	tx_stats_cnt = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
157 		nb_tx_queues;
158 	rx_stats_cnt = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
159 		nb_rx_queues;
160 	priv->stats_report_len = sizeof(struct gve_stats_report) +
161 		sizeof(struct stats) * (tx_stats_cnt + rx_stats_cnt);
162 
163 	snprintf(z_name, sizeof(z_name), "gve_stats_report_%s",
164 			priv->pci_dev->device.name);
165 	priv->stats_report_mem = rte_memzone_reserve_aligned(z_name,
166 			priv->stats_report_len,
167 			rte_socket_id(),
168 			RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
169 
170 	if (!priv->stats_report_mem)
171 		return -ENOMEM;
172 
173 	/* offset by skipping stats written by gve. */
174 	priv->stats_start_idx = (GVE_TX_STATS_REPORT_NUM * nb_tx_queues) +
175 		(GVE_RX_STATS_REPORT_NUM * nb_rx_queues);
176 	priv->stats_end_idx = priv->stats_start_idx +
177 		(NIC_TX_STATS_REPORT_NUM * nb_tx_queues) +
178 		(NIC_RX_STATS_REPORT_NUM * nb_rx_queues) - 1;
179 
180 	return 0;
181 }
182 
183 static void
184 gve_free_stats_report(struct rte_eth_dev *dev)
185 {
186 	struct gve_priv *priv = dev->data->dev_private;
187 	rte_memzone_free(priv->stats_report_mem);
188 	priv->stats_report_mem = NULL;
189 }
190 
191 /* Read Rx NIC stats from shared region */
192 static void
193 gve_get_imissed_from_nic(struct rte_eth_dev *dev)
194 {
195 	struct gve_stats_report *stats_report;
196 	struct gve_rx_queue *rxq;
197 	struct gve_priv *priv;
198 	struct stats stat;
199 	int queue_id;
200 	int stat_id;
201 	int i;
202 
203 	priv = dev->data->dev_private;
204 	if (!priv->stats_report_mem)
205 		return;
206 	stats_report = (struct gve_stats_report *)
207 		priv->stats_report_mem->addr;
208 	for (i = priv->stats_start_idx; i <= priv->stats_end_idx; i++) {
209 		stat = stats_report->stats[i];
210 		queue_id = cpu_to_be32(stat.queue_id);
211 		rxq = dev->data->rx_queues[queue_id];
212 		if (rxq == NULL)
213 			continue;
214 		stat_id = cpu_to_be32(stat.stat_name);
215 		/* Update imissed. */
216 		if (stat_id == RX_NO_BUFFERS_POSTED)
217 			rxq->stats.imissed = cpu_to_be64(stat.value);
218 	}
219 }
220 
221 static int
222 gve_start_queues(struct rte_eth_dev *dev)
223 {
224 	struct gve_priv *priv = dev->data->dev_private;
225 	uint16_t num_queues;
226 	uint16_t i;
227 	int ret;
228 
229 	num_queues = dev->data->nb_tx_queues;
230 	priv->txqs = (struct gve_tx_queue **)dev->data->tx_queues;
231 	ret = gve_adminq_create_tx_queues(priv, num_queues);
232 	if (ret != 0) {
233 		PMD_DRV_LOG(ERR, "Failed to create %u tx queues.", num_queues);
234 		return ret;
235 	}
236 	for (i = 0; i < num_queues; i++)
237 		if (gve_tx_queue_start(dev, i) != 0) {
238 			PMD_DRV_LOG(ERR, "Fail to start Tx queue %d", i);
239 			goto err_tx;
240 		}
241 
242 	num_queues = dev->data->nb_rx_queues;
243 	priv->rxqs = (struct gve_rx_queue **)dev->data->rx_queues;
244 	ret = gve_adminq_create_rx_queues(priv, num_queues);
245 	if (ret != 0) {
246 		PMD_DRV_LOG(ERR, "Failed to create %u rx queues.", num_queues);
247 		goto err_tx;
248 	}
249 	for (i = 0; i < num_queues; i++) {
250 		if (gve_is_gqi(priv))
251 			ret = gve_rx_queue_start(dev, i);
252 		else
253 			ret = gve_rx_queue_start_dqo(dev, i);
254 		if (ret != 0) {
255 			PMD_DRV_LOG(ERR, "Fail to start Rx queue %d", i);
256 			goto err_rx;
257 		}
258 	}
259 
260 	return 0;
261 
262 err_rx:
263 	gve_stop_rx_queues(dev);
264 err_tx:
265 	gve_stop_tx_queues(dev);
266 	return ret;
267 }
268 
269 static int
270 gve_dev_start(struct rte_eth_dev *dev)
271 {
272 	struct gve_priv *priv;
273 	int ret;
274 
275 	ret = gve_start_queues(dev);
276 	if (ret != 0) {
277 		PMD_DRV_LOG(ERR, "Failed to start queues");
278 		return ret;
279 	}
280 
281 	dev->data->dev_started = 1;
282 	gve_link_update(dev, 0);
283 
284 	priv = dev->data->dev_private;
285 	/* No stats available yet for Dqo. */
286 	if (gve_is_gqi(priv)) {
287 		ret = gve_alloc_stats_report(priv,
288 				dev->data->nb_tx_queues,
289 				dev->data->nb_rx_queues);
290 		if (ret != 0) {
291 			PMD_DRV_LOG(ERR,
292 				"Failed to allocate region for stats reporting.");
293 			return ret;
294 		}
295 		ret = gve_adminq_report_stats(priv, priv->stats_report_len,
296 				priv->stats_report_mem->iova,
297 				GVE_STATS_REPORT_TIMER_PERIOD);
298 		if (ret != 0) {
299 			PMD_DRV_LOG(ERR, "gve_adminq_report_stats command failed.");
300 			return ret;
301 		}
302 	}
303 
304 	return 0;
305 }
306 
307 static int
308 gve_dev_stop(struct rte_eth_dev *dev)
309 {
310 	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
311 
312 	gve_stop_tx_queues(dev);
313 	gve_stop_rx_queues(dev);
314 
315 	dev->data->dev_started = 0;
316 
317 	if (gve_is_gqi(dev->data->dev_private))
318 		gve_free_stats_report(dev);
319 
320 	return 0;
321 }
322 
323 static int
324 gve_dev_close(struct rte_eth_dev *dev)
325 {
326 	struct gve_priv *priv = dev->data->dev_private;
327 	int err = 0;
328 	uint16_t i;
329 
330 	if (dev->data->dev_started) {
331 		err = gve_dev_stop(dev);
332 		if (err != 0)
333 			PMD_DRV_LOG(ERR, "Failed to stop dev.");
334 	}
335 
336 	if (gve_is_gqi(priv)) {
337 		for (i = 0; i < dev->data->nb_tx_queues; i++)
338 			gve_tx_queue_release(dev, i);
339 
340 		for (i = 0; i < dev->data->nb_rx_queues; i++)
341 			gve_rx_queue_release(dev, i);
342 	} else {
343 		for (i = 0; i < dev->data->nb_tx_queues; i++)
344 			gve_tx_queue_release_dqo(dev, i);
345 
346 		for (i = 0; i < dev->data->nb_rx_queues; i++)
347 			gve_rx_queue_release_dqo(dev, i);
348 	}
349 
350 	gve_free_qpls(priv);
351 	rte_free(priv->adminq);
352 
353 	dev->data->mac_addrs = NULL;
354 
355 	return err;
356 }
357 
358 static int
359 gve_verify_driver_compatibility(struct gve_priv *priv)
360 {
361 	const struct rte_memzone *driver_info_mem;
362 	struct gve_driver_info *driver_info;
363 	int err;
364 
365 	driver_info_mem = rte_memzone_reserve_aligned("verify_driver_compatibility",
366 			sizeof(struct gve_driver_info),
367 			rte_socket_id(),
368 			RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
369 
370 	if (driver_info_mem == NULL) {
371 		PMD_DRV_LOG(ERR,
372 		    "Could not alloc memzone for driver compatibility");
373 		return -ENOMEM;
374 	}
375 	driver_info = (struct gve_driver_info *)driver_info_mem->addr;
376 
377 	*driver_info = (struct gve_driver_info) {
378 		.os_type = 5, /* DPDK */
379 		.driver_major = GVE_VERSION_MAJOR,
380 		.driver_minor = GVE_VERSION_MINOR,
381 		.driver_sub = GVE_VERSION_SUB,
382 		.os_version_major = cpu_to_be32(DPDK_VERSION_MAJOR),
383 		.os_version_minor = cpu_to_be32(DPDK_VERSION_MINOR),
384 		.os_version_sub = cpu_to_be32(DPDK_VERSION_SUB),
385 		.driver_capability_flags = {
386 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS1),
387 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS2),
388 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS3),
389 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS4),
390 		},
391 	};
392 
393 	populate_driver_version_strings((char *)driver_info->os_version_str1,
394 			(char *)driver_info->os_version_str2);
395 
396 	err = gve_adminq_verify_driver_compatibility(priv,
397 		sizeof(struct gve_driver_info),
398 		(dma_addr_t)driver_info_mem->iova);
399 	/* It's ok if the device doesn't support this */
400 	if (err == -EOPNOTSUPP)
401 		err = 0;
402 
403 	rte_memzone_free(driver_info_mem);
404 	return err;
405 }
406 
407 static int
408 gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
409 {
410 	struct gve_priv *priv = dev->data->dev_private;
411 
412 	dev_info->device = dev->device;
413 	dev_info->max_mac_addrs = 1;
414 	dev_info->max_rx_queues = priv->max_nb_rxq;
415 	dev_info->max_tx_queues = priv->max_nb_txq;
416 	if (gve_is_gqi(priv)) {
417 		dev_info->min_rx_bufsize = GVE_RX_MIN_BUF_SIZE_GQI;
418 		dev_info->max_rx_bufsize = GVE_RX_MAX_BUF_SIZE_GQI;
419 	} else {
420 		dev_info->min_rx_bufsize = GVE_RX_MIN_BUF_SIZE_DQO;
421 		dev_info->max_rx_bufsize = GVE_RX_MAX_BUF_SIZE_DQO;
422 	}
423 
424 	dev_info->max_rx_pktlen = priv->max_mtu + RTE_ETHER_HDR_LEN;
425 	dev_info->max_mtu = priv->max_mtu;
426 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
427 
428 	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_RSS_HASH;
429 	dev_info->tx_offload_capa =
430 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS	|
431 		RTE_ETH_TX_OFFLOAD_UDP_CKSUM	|
432 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM	|
433 		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM	|
434 		RTE_ETH_TX_OFFLOAD_TCP_TSO;
435 
436 	if (!gve_is_gqi(priv)) {
437 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
438 		dev_info->rx_offload_capa |=
439 				RTE_ETH_RX_OFFLOAD_IPV4_CKSUM   |
440 				RTE_ETH_RX_OFFLOAD_UDP_CKSUM	|
441 				RTE_ETH_RX_OFFLOAD_TCP_CKSUM	|
442 				RTE_ETH_RX_OFFLOAD_TCP_LRO;
443 	}
444 
445 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
446 		.rx_free_thresh = GVE_DEFAULT_RX_FREE_THRESH,
447 		.rx_drop_en = 0,
448 		.offloads = 0,
449 	};
450 
451 	dev_info->default_txconf = (struct rte_eth_txconf) {
452 		.tx_free_thresh = GVE_DEFAULT_TX_FREE_THRESH,
453 		.tx_rs_thresh = GVE_DEFAULT_TX_RS_THRESH,
454 		.offloads = 0,
455 	};
456 
457 	dev_info->default_rxportconf.ring_size = priv->rx_desc_cnt;
458 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
459 		.nb_max = gve_is_gqi(priv) ? priv->rx_desc_cnt : GVE_MAX_QUEUE_SIZE_DQO,
460 		.nb_min = priv->rx_desc_cnt,
461 		.nb_align = 1,
462 	};
463 
464 	dev_info->default_txportconf.ring_size = priv->tx_desc_cnt;
465 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
466 		.nb_max = gve_is_gqi(priv) ? priv->tx_desc_cnt : GVE_MAX_QUEUE_SIZE_DQO,
467 		.nb_min = priv->tx_desc_cnt,
468 		.nb_align = 1,
469 	};
470 
471 	dev_info->flow_type_rss_offloads = GVE_RTE_RSS_OFFLOAD_ALL;
472 	dev_info->hash_key_size = GVE_RSS_HASH_KEY_SIZE;
473 	dev_info->reta_size = GVE_RSS_INDIR_SIZE;
474 
475 	return 0;
476 }
477 
478 static int
479 gve_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
480 {
481 	uint16_t i;
482 	if (gve_is_gqi(dev->data->dev_private))
483 		gve_get_imissed_from_nic(dev);
484 
485 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
486 		struct gve_tx_queue *txq = dev->data->tx_queues[i];
487 		if (txq == NULL)
488 			continue;
489 
490 		stats->opackets += txq->stats.packets;
491 		stats->obytes += txq->stats.bytes;
492 		stats->oerrors += txq->stats.errors;
493 	}
494 
495 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
496 		struct gve_rx_queue *rxq = dev->data->rx_queues[i];
497 		if (rxq == NULL)
498 			continue;
499 
500 		stats->ipackets += rxq->stats.packets;
501 		stats->ibytes += rxq->stats.bytes;
502 		stats->ierrors += rxq->stats.errors;
503 		stats->rx_nombuf += rxq->stats.no_mbufs;
504 		stats->imissed += rxq->stats.imissed;
505 	}
506 
507 	return 0;
508 }
509 
510 static int
511 gve_dev_stats_reset(struct rte_eth_dev *dev)
512 {
513 	uint16_t i;
514 
515 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
516 		struct gve_tx_queue *txq = dev->data->tx_queues[i];
517 		if (txq == NULL)
518 			continue;
519 
520 		memset(&txq->stats, 0, sizeof(txq->stats));
521 	}
522 
523 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
524 		struct gve_rx_queue *rxq = dev->data->rx_queues[i];
525 		if (rxq == NULL)
526 			continue;
527 
528 		memset(&rxq->stats, 0, sizeof(rxq->stats));
529 	}
530 
531 	return 0;
532 }
533 
534 static int
535 gve_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
536 {
537 	struct gve_priv *priv = dev->data->dev_private;
538 	int err;
539 
540 	if (mtu < RTE_ETHER_MIN_MTU || mtu > priv->max_mtu) {
541 		PMD_DRV_LOG(ERR, "MIN MTU is %u, MAX MTU is %u",
542 			    RTE_ETHER_MIN_MTU, priv->max_mtu);
543 		return -EINVAL;
544 	}
545 
546 	/* mtu setting is forbidden if port is start */
547 	if (dev->data->dev_started) {
548 		PMD_DRV_LOG(ERR, "Port must be stopped before configuration");
549 		return -EBUSY;
550 	}
551 
552 	err = gve_adminq_set_mtu(priv, mtu);
553 	if (err) {
554 		PMD_DRV_LOG(ERR, "Failed to set mtu as %u err = %d", mtu, err);
555 		return err;
556 	}
557 
558 	return 0;
559 }
560 
561 #define TX_QUEUE_STATS_OFFSET(x) offsetof(struct gve_tx_stats, x)
562 #define RX_QUEUE_STATS_OFFSET(x) offsetof(struct gve_rx_stats, x)
563 
564 static const struct gve_xstats_name_offset tx_xstats_name_offset[] = {
565 	{ "packets", TX_QUEUE_STATS_OFFSET(packets) },
566 	{ "bytes",   TX_QUEUE_STATS_OFFSET(bytes) },
567 	{ "errors",  TX_QUEUE_STATS_OFFSET(errors) },
568 };
569 
570 static const struct gve_xstats_name_offset rx_xstats_name_offset[] = {
571 	{ "packets",                RX_QUEUE_STATS_OFFSET(packets) },
572 	{ "bytes",                  RX_QUEUE_STATS_OFFSET(bytes) },
573 	{ "errors",                 RX_QUEUE_STATS_OFFSET(errors) },
574 	{ "mbuf_alloc_errors",      RX_QUEUE_STATS_OFFSET(no_mbufs) },
575 	{ "mbuf_alloc_errors_bulk", RX_QUEUE_STATS_OFFSET(no_mbufs_bulk) },
576 	{ "imissed",                RX_QUEUE_STATS_OFFSET(imissed) },
577 };
578 
579 static int
580 gve_xstats_count(struct rte_eth_dev *dev)
581 {
582 	uint16_t i, count = 0;
583 
584 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
585 		if (dev->data->tx_queues[i])
586 			count += RTE_DIM(tx_xstats_name_offset);
587 	}
588 
589 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
590 		if (dev->data->rx_queues[i])
591 			count += RTE_DIM(rx_xstats_name_offset);
592 	}
593 
594 	return count;
595 }
596 
597 static int
598 gve_xstats_get(struct rte_eth_dev *dev,
599 			struct rte_eth_xstat *xstats,
600 			unsigned int size)
601 {
602 	uint16_t i, j, count = gve_xstats_count(dev);
603 	const char *stats;
604 
605 	if (gve_is_gqi(dev->data->dev_private))
606 		gve_get_imissed_from_nic(dev);
607 
608 	if (xstats == NULL || size < count)
609 		return count;
610 
611 	count = 0;
612 
613 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
614 		const struct gve_tx_queue *txq = dev->data->tx_queues[i];
615 		if (txq == NULL)
616 			continue;
617 
618 		stats = (const char *)&txq->stats;
619 		for (j = 0; j < RTE_DIM(tx_xstats_name_offset); j++, count++) {
620 			xstats[count].id = count;
621 			xstats[count].value = *(const uint64_t *)
622 				(stats + tx_xstats_name_offset[j].offset);
623 		}
624 	}
625 
626 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
627 		const struct gve_rx_queue *rxq = dev->data->rx_queues[i];
628 		if (rxq == NULL)
629 			continue;
630 
631 		stats = (const char *)&rxq->stats;
632 		for (j = 0; j < RTE_DIM(rx_xstats_name_offset); j++, count++) {
633 			xstats[count].id = count;
634 			xstats[count].value = *(const uint64_t *)
635 				(stats + rx_xstats_name_offset[j].offset);
636 		}
637 	}
638 
639 	return count;
640 }
641 
642 static int
643 gve_xstats_get_names(struct rte_eth_dev *dev,
644 			struct rte_eth_xstat_name *xstats_names,
645 			unsigned int size)
646 {
647 	uint16_t i, j, count = gve_xstats_count(dev);
648 
649 	if (xstats_names == NULL || size < count)
650 		return count;
651 
652 	count = 0;
653 
654 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
655 		if (dev->data->tx_queues[i] == NULL)
656 			continue;
657 
658 		for (j = 0; j < RTE_DIM(tx_xstats_name_offset); j++)
659 			snprintf(xstats_names[count++].name,
660 				 RTE_ETH_XSTATS_NAME_SIZE,
661 				 "tx_q%u_%s", i, tx_xstats_name_offset[j].name);
662 	}
663 
664 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
665 		if (dev->data->rx_queues[i] == NULL)
666 			continue;
667 
668 		for (j = 0; j < RTE_DIM(rx_xstats_name_offset); j++)
669 			snprintf(xstats_names[count++].name,
670 				 RTE_ETH_XSTATS_NAME_SIZE,
671 				 "rx_q%u_%s", i, rx_xstats_name_offset[j].name);
672 	}
673 
674 	return count;
675 }
676 
677 
678 static int
679 gve_rss_hash_update(struct rte_eth_dev *dev,
680 			struct rte_eth_rss_conf *rss_conf)
681 {
682 	struct gve_priv *priv = dev->data->dev_private;
683 	struct gve_rss_config gve_rss_conf;
684 	int rss_reta_size;
685 	int err;
686 
687 	if (gve_validate_rss_hf(rss_conf->rss_hf)) {
688 		PMD_DRV_LOG(ERR, "Unsupported hash function.");
689 		return -EINVAL;
690 	}
691 
692 	if (rss_conf->algorithm != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
693 		rss_conf->algorithm != RTE_ETH_HASH_FUNCTION_DEFAULT) {
694 		PMD_DRV_LOG(ERR, "Device only supports Toeplitz algorithm.");
695 		return -EINVAL;
696 	}
697 
698 	if (rss_conf->rss_key_len) {
699 		if (rss_conf->rss_key_len != GVE_RSS_HASH_KEY_SIZE) {
700 			PMD_DRV_LOG(ERR,
701 				"Invalid hash key size. Only RSS hash key size "
702 				"of %u supported", GVE_RSS_HASH_KEY_SIZE);
703 			return -EINVAL;
704 		}
705 
706 		if (!rss_conf->rss_key) {
707 			PMD_DRV_LOG(ERR, "RSS key must be non-null.");
708 			return -EINVAL;
709 		}
710 	} else {
711 		if (!priv->rss_config.key_size) {
712 			PMD_DRV_LOG(ERR, "RSS key must be initialized before "
713 				"any other configuration.");
714 			return -EINVAL;
715 		}
716 		rss_conf->rss_key_len = priv->rss_config.key_size;
717 	}
718 
719 	rss_reta_size = priv->rss_config.indir ?
720 			priv->rss_config.indir_size :
721 			GVE_RSS_INDIR_SIZE;
722 	err = gve_init_rss_config(&gve_rss_conf, rss_conf->rss_key_len,
723 		rss_reta_size);
724 	if (err)
725 		return err;
726 
727 	gve_rss_conf.alg = GVE_RSS_HASH_TOEPLITZ;
728 	err = gve_update_rss_hash_types(priv, &gve_rss_conf, rss_conf);
729 	if (err)
730 		goto err;
731 	err = gve_update_rss_key(priv, &gve_rss_conf, rss_conf);
732 	if (err)
733 		goto err;
734 
735 	/* Set redirection table to default or preexisting. */
736 	if (!priv->rss_config.indir)
737 		gve_generate_rss_reta(dev, &gve_rss_conf);
738 	else
739 		memcpy(gve_rss_conf.indir, priv->rss_config.indir,
740 			gve_rss_conf.indir_size * sizeof(*priv->rss_config.indir));
741 
742 	err = gve_adminq_configure_rss(priv, &gve_rss_conf);
743 	if (!err)
744 		gve_update_priv_rss_config(priv, &gve_rss_conf);
745 
746 err:
747 	gve_free_rss_config(&gve_rss_conf);
748 	return err;
749 }
750 
751 static int
752 gve_rss_hash_conf_get(struct rte_eth_dev *dev,
753 			struct rte_eth_rss_conf *rss_conf)
754 {
755 	struct gve_priv *priv = dev->data->dev_private;
756 
757 	if (!(dev->data->dev_conf.rxmode.offloads &
758 			RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
759 		PMD_DRV_LOG(ERR, "RSS not configured.");
760 		return -ENOTSUP;
761 	}
762 
763 
764 	gve_to_rte_rss_hf(priv->rss_config.hash_types, rss_conf);
765 	rss_conf->rss_key_len = priv->rss_config.key_size;
766 	if (rss_conf->rss_key) {
767 		if (!priv->rss_config.key) {
768 			PMD_DRV_LOG(ERR, "Unable to retrieve default RSS hash key.");
769 			return -ENOTSUP;
770 		}
771 		memcpy(rss_conf->rss_key, priv->rss_config.key,
772 			rss_conf->rss_key_len * sizeof(*rss_conf->rss_key));
773 	}
774 
775 	return 0;
776 }
777 
778 static int
779 gve_rss_reta_update(struct rte_eth_dev *dev,
780 	struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
781 {
782 	struct gve_priv *priv = dev->data->dev_private;
783 	struct gve_rss_config gve_rss_conf;
784 	int table_id;
785 	int err;
786 	int i;
787 
788 	/* RSS key must be set before the redirection table can be set. */
789 	if (!priv->rss_config.key || priv->rss_config.key_size == 0) {
790 		PMD_DRV_LOG(ERR, "RSS hash key msut be set before the "
791 			"redirection table can be updated.");
792 		return -ENOTSUP;
793 	}
794 
795 	if (reta_size != GVE_RSS_INDIR_SIZE) {
796 		PMD_DRV_LOG(ERR, "Redirection table must have %hu elements",
797 			(uint16_t)GVE_RSS_INDIR_SIZE);
798 		return -EINVAL;
799 	}
800 
801 	err = gve_init_rss_config_from_priv(priv, &gve_rss_conf);
802 	if (err) {
803 		PMD_DRV_LOG(ERR, "Error allocating new RSS config.");
804 		return err;
805 	}
806 
807 	table_id = 0;
808 	for (i = 0; i < priv->rss_config.indir_size; i++) {
809 		int table_entry = i % RTE_ETH_RETA_GROUP_SIZE;
810 		if (reta_conf[table_id].mask & (1ULL << table_entry))
811 			gve_rss_conf.indir[i] =
812 				reta_conf[table_id].reta[table_entry];
813 
814 		if (table_entry == RTE_ETH_RETA_GROUP_SIZE - 1)
815 			table_id++;
816 	}
817 
818 	err = gve_adminq_configure_rss(priv, &gve_rss_conf);
819 	if (err)
820 		PMD_DRV_LOG(ERR, "Problem configuring RSS with device.");
821 	else
822 		gve_update_priv_rss_config(priv, &gve_rss_conf);
823 
824 	gve_free_rss_config(&gve_rss_conf);
825 	return err;
826 }
827 
828 static int
829 gve_rss_reta_query(struct rte_eth_dev *dev,
830 	struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
831 {
832 	struct gve_priv *priv = dev->data->dev_private;
833 	int table_id;
834 	int i;
835 
836 	if (!(dev->data->dev_conf.rxmode.offloads &
837 		RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
838 		PMD_DRV_LOG(ERR, "RSS not configured.");
839 		return -ENOTSUP;
840 	}
841 
842 	/* RSS key must be set before the redirection table can be queried. */
843 	if (!priv->rss_config.key) {
844 		PMD_DRV_LOG(ERR, "RSS hash key must be set before the "
845 			"redirection table can be initialized.");
846 		return -ENOTSUP;
847 	}
848 
849 	if (reta_size != priv->rss_config.indir_size) {
850 		PMD_DRV_LOG(ERR, "RSS redirection table must have %d entries.",
851 			priv->rss_config.indir_size);
852 		return -EINVAL;
853 	}
854 
855 	table_id = 0;
856 	for (i = 0; i < priv->rss_config.indir_size; i++) {
857 		int table_entry = i % RTE_ETH_RETA_GROUP_SIZE;
858 		if (reta_conf[table_id].mask & (1ULL << table_entry))
859 			reta_conf[table_id].reta[table_entry] =
860 				priv->rss_config.indir[i];
861 
862 		if (table_entry == RTE_ETH_RETA_GROUP_SIZE - 1)
863 			table_id++;
864 	}
865 
866 	return 0;
867 }
868 
869 static const struct eth_dev_ops gve_eth_dev_ops = {
870 	.dev_configure        = gve_dev_configure,
871 	.dev_start            = gve_dev_start,
872 	.dev_stop             = gve_dev_stop,
873 	.dev_close            = gve_dev_close,
874 	.dev_infos_get        = gve_dev_info_get,
875 	.rx_queue_setup       = gve_rx_queue_setup,
876 	.tx_queue_setup       = gve_tx_queue_setup,
877 	.rx_queue_release     = gve_rx_queue_release,
878 	.tx_queue_release     = gve_tx_queue_release,
879 	.rx_queue_start       = gve_rx_queue_start,
880 	.tx_queue_start       = gve_tx_queue_start,
881 	.rx_queue_stop        = gve_rx_queue_stop,
882 	.tx_queue_stop        = gve_tx_queue_stop,
883 	.link_update          = gve_link_update,
884 	.stats_get            = gve_dev_stats_get,
885 	.stats_reset          = gve_dev_stats_reset,
886 	.mtu_set              = gve_dev_mtu_set,
887 	.xstats_get           = gve_xstats_get,
888 	.xstats_get_names     = gve_xstats_get_names,
889 	.rss_hash_update      = gve_rss_hash_update,
890 	.rss_hash_conf_get    = gve_rss_hash_conf_get,
891 	.reta_update          = gve_rss_reta_update,
892 	.reta_query           = gve_rss_reta_query,
893 };
894 
895 static const struct eth_dev_ops gve_eth_dev_ops_dqo = {
896 	.dev_configure        = gve_dev_configure,
897 	.dev_start            = gve_dev_start,
898 	.dev_stop             = gve_dev_stop,
899 	.dev_close            = gve_dev_close,
900 	.dev_infos_get        = gve_dev_info_get,
901 	.rx_queue_setup       = gve_rx_queue_setup_dqo,
902 	.tx_queue_setup       = gve_tx_queue_setup_dqo,
903 	.rx_queue_release     = gve_rx_queue_release_dqo,
904 	.tx_queue_release     = gve_tx_queue_release_dqo,
905 	.rx_queue_start       = gve_rx_queue_start_dqo,
906 	.tx_queue_start       = gve_tx_queue_start_dqo,
907 	.rx_queue_stop        = gve_rx_queue_stop_dqo,
908 	.tx_queue_stop        = gve_tx_queue_stop_dqo,
909 	.link_update          = gve_link_update,
910 	.stats_get            = gve_dev_stats_get,
911 	.stats_reset          = gve_dev_stats_reset,
912 	.mtu_set              = gve_dev_mtu_set,
913 	.xstats_get           = gve_xstats_get,
914 	.xstats_get_names     = gve_xstats_get_names,
915 	.rss_hash_update      = gve_rss_hash_update,
916 	.rss_hash_conf_get    = gve_rss_hash_conf_get,
917 	.reta_update          = gve_rss_reta_update,
918 	.reta_query           = gve_rss_reta_query,
919 };
920 
921 static void
922 gve_free_counter_array(struct gve_priv *priv)
923 {
924 	rte_memzone_free(priv->cnt_array_mz);
925 	priv->cnt_array = NULL;
926 }
927 
928 static void
929 gve_free_irq_db(struct gve_priv *priv)
930 {
931 	rte_memzone_free(priv->irq_dbs_mz);
932 	priv->irq_dbs = NULL;
933 }
934 
935 static void
936 gve_teardown_device_resources(struct gve_priv *priv)
937 {
938 	int err;
939 
940 	/* Tell device its resources are being freed */
941 	if (gve_get_device_resources_ok(priv)) {
942 		err = gve_adminq_deconfigure_device_resources(priv);
943 		if (err)
944 			PMD_DRV_LOG(ERR, "Could not deconfigure device resources: err=%d", err);
945 	}
946 
947 	if (!gve_is_gqi(priv)) {
948 		rte_free(priv->ptype_lut_dqo);
949 		priv->ptype_lut_dqo = NULL;
950 	}
951 	gve_free_counter_array(priv);
952 	gve_free_irq_db(priv);
953 	gve_clear_device_resources_ok(priv);
954 }
955 
956 static int
957 pci_dev_msix_vec_count(struct rte_pci_device *pdev)
958 {
959 	off_t msix_pos = rte_pci_find_capability(pdev, RTE_PCI_CAP_ID_MSIX);
960 	uint16_t control;
961 
962 	if (msix_pos > 0 && rte_pci_read_config(pdev, &control, sizeof(control),
963 			msix_pos + RTE_PCI_MSIX_FLAGS) == sizeof(control))
964 		return (control & RTE_PCI_MSIX_FLAGS_QSIZE) + 1;
965 
966 	return 0;
967 }
968 
969 static int
970 gve_setup_device_resources(struct gve_priv *priv)
971 {
972 	char z_name[RTE_MEMZONE_NAMESIZE];
973 	const struct rte_memzone *mz;
974 	int err = 0;
975 
976 	snprintf(z_name, sizeof(z_name), "gve_%s_cnt_arr", priv->pci_dev->device.name);
977 	mz = rte_memzone_reserve_aligned(z_name,
978 					 priv->num_event_counters * sizeof(*priv->cnt_array),
979 					 rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
980 					 PAGE_SIZE);
981 	if (mz == NULL) {
982 		PMD_DRV_LOG(ERR, "Could not alloc memzone for count array");
983 		return -ENOMEM;
984 	}
985 	priv->cnt_array = (rte_be32_t *)mz->addr;
986 	priv->cnt_array_mz = mz;
987 
988 	snprintf(z_name, sizeof(z_name), "gve_%s_irqmz", priv->pci_dev->device.name);
989 	mz = rte_memzone_reserve_aligned(z_name,
990 					 sizeof(*priv->irq_dbs) * (priv->num_ntfy_blks),
991 					 rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
992 					 PAGE_SIZE);
993 	if (mz == NULL) {
994 		PMD_DRV_LOG(ERR, "Could not alloc memzone for irq_dbs");
995 		err = -ENOMEM;
996 		goto free_cnt_array;
997 	}
998 	priv->irq_dbs = (struct gve_irq_db *)mz->addr;
999 	priv->irq_dbs_mz = mz;
1000 
1001 	err = gve_adminq_configure_device_resources(priv,
1002 						    priv->cnt_array_mz->iova,
1003 						    priv->num_event_counters,
1004 						    priv->irq_dbs_mz->iova,
1005 						    priv->num_ntfy_blks);
1006 	if (unlikely(err)) {
1007 		PMD_DRV_LOG(ERR, "Could not config device resources: err=%d", err);
1008 		goto free_irq_dbs;
1009 	}
1010 	if (!gve_is_gqi(priv)) {
1011 		priv->ptype_lut_dqo = rte_zmalloc("gve_ptype_lut_dqo",
1012 			sizeof(struct gve_ptype_lut), 0);
1013 		if (priv->ptype_lut_dqo == NULL) {
1014 			PMD_DRV_LOG(ERR, "Failed to alloc ptype lut.");
1015 			err = -ENOMEM;
1016 			goto free_irq_dbs;
1017 		}
1018 		err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo);
1019 		if (unlikely(err)) {
1020 			PMD_DRV_LOG(ERR, "Failed to get ptype map: err=%d", err);
1021 			goto free_ptype_lut;
1022 		}
1023 	}
1024 
1025 	return 0;
1026 free_ptype_lut:
1027 	rte_free(priv->ptype_lut_dqo);
1028 	priv->ptype_lut_dqo = NULL;
1029 free_irq_dbs:
1030 	gve_free_irq_db(priv);
1031 free_cnt_array:
1032 	gve_free_counter_array(priv);
1033 
1034 	return err;
1035 }
1036 
1037 static int
1038 gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
1039 {
1040 	uint16_t pages;
1041 	int num_ntfy;
1042 	uint32_t i;
1043 	int err;
1044 
1045 	/* Set up the adminq */
1046 	err = gve_adminq_alloc(priv);
1047 	if (err) {
1048 		PMD_DRV_LOG(ERR, "Failed to alloc admin queue: err=%d", err);
1049 		return err;
1050 	}
1051 	err = gve_verify_driver_compatibility(priv);
1052 	if (err) {
1053 		PMD_DRV_LOG(ERR, "Could not verify driver compatibility: err=%d", err);
1054 		goto free_adminq;
1055 	}
1056 
1057 	if (skip_describe_device)
1058 		goto setup_device;
1059 
1060 	/* Get the initial information we need from the device */
1061 	err = gve_adminq_describe_device(priv);
1062 	if (err) {
1063 		PMD_DRV_LOG(ERR, "Could not get device information: err=%d", err);
1064 		goto free_adminq;
1065 	}
1066 
1067 	num_ntfy = pci_dev_msix_vec_count(priv->pci_dev);
1068 	if (num_ntfy <= 0) {
1069 		PMD_DRV_LOG(ERR, "Could not count MSI-x vectors");
1070 		err = -EIO;
1071 		goto free_adminq;
1072 	} else if (num_ntfy < GVE_MIN_MSIX) {
1073 		PMD_DRV_LOG(ERR, "GVE needs at least %d MSI-x vectors, but only has %d",
1074 			    GVE_MIN_MSIX, num_ntfy);
1075 		err = -EINVAL;
1076 		goto free_adminq;
1077 	}
1078 
1079 	priv->num_registered_pages = 0;
1080 
1081 	/* gvnic has one Notification Block per MSI-x vector, except for the
1082 	 * management vector
1083 	 */
1084 	priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
1085 	priv->mgmt_msix_idx = priv->num_ntfy_blks;
1086 
1087 	priv->max_nb_txq = RTE_MIN(priv->max_nb_txq, priv->num_ntfy_blks / 2);
1088 	priv->max_nb_rxq = RTE_MIN(priv->max_nb_rxq, priv->num_ntfy_blks / 2);
1089 
1090 	if (priv->default_num_queues > 0) {
1091 		priv->max_nb_txq = RTE_MIN(priv->default_num_queues, priv->max_nb_txq);
1092 		priv->max_nb_rxq = RTE_MIN(priv->default_num_queues, priv->max_nb_rxq);
1093 	}
1094 
1095 	PMD_DRV_LOG(INFO, "Max TX queues %d, Max RX queues %d",
1096 		    priv->max_nb_txq, priv->max_nb_rxq);
1097 
1098 	/* In GQI_QPL queue format:
1099 	 * Allocate queue page lists according to max queue number
1100 	 * tx qpl id should start from 0 while rx qpl id should start
1101 	 * from priv->max_nb_txq
1102 	 */
1103 	if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
1104 		priv->qpl = rte_zmalloc("gve_qpl",
1105 					(priv->max_nb_txq + priv->max_nb_rxq) *
1106 					sizeof(struct gve_queue_page_list), 0);
1107 		if (priv->qpl == NULL) {
1108 			PMD_DRV_LOG(ERR, "Failed to alloc qpl.");
1109 			err = -ENOMEM;
1110 			goto free_adminq;
1111 		}
1112 
1113 		for (i = 0; i < priv->max_nb_txq + priv->max_nb_rxq; i++) {
1114 			if (i < priv->max_nb_txq)
1115 				pages = priv->tx_pages_per_qpl;
1116 			else
1117 				pages = priv->rx_data_slot_cnt;
1118 			err = gve_alloc_queue_page_list(priv, i, pages);
1119 			if (err != 0) {
1120 				PMD_DRV_LOG(ERR, "Failed to alloc qpl %u.", i);
1121 				goto err_qpl;
1122 			}
1123 		}
1124 	}
1125 
1126 setup_device:
1127 	err = gve_setup_device_resources(priv);
1128 	if (!err)
1129 		return 0;
1130 err_qpl:
1131 	gve_free_qpls(priv);
1132 free_adminq:
1133 	gve_adminq_free(priv);
1134 	return err;
1135 }
1136 
1137 static void
1138 gve_teardown_priv_resources(struct gve_priv *priv)
1139 {
1140 	gve_teardown_device_resources(priv);
1141 	gve_adminq_free(priv);
1142 }
1143 
1144 static int
1145 gve_dev_init(struct rte_eth_dev *eth_dev)
1146 {
1147 	struct gve_priv *priv = eth_dev->data->dev_private;
1148 	int max_tx_queues, max_rx_queues;
1149 	struct rte_pci_device *pci_dev;
1150 	struct gve_registers *reg_bar;
1151 	rte_be32_t *db_bar;
1152 	int err;
1153 
1154 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1155 		return 0;
1156 
1157 	pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1158 
1159 	reg_bar = pci_dev->mem_resource[GVE_REG_BAR].addr;
1160 	if (!reg_bar) {
1161 		PMD_DRV_LOG(ERR, "Failed to map pci bar!");
1162 		return -ENOMEM;
1163 	}
1164 
1165 	db_bar = pci_dev->mem_resource[GVE_DB_BAR].addr;
1166 	if (!db_bar) {
1167 		PMD_DRV_LOG(ERR, "Failed to map doorbell bar!");
1168 		return -ENOMEM;
1169 	}
1170 
1171 	gve_write_version(&reg_bar->driver_version);
1172 	/* Get max queues to alloc etherdev */
1173 	max_tx_queues = ioread32be(&reg_bar->max_tx_queues);
1174 	max_rx_queues = ioread32be(&reg_bar->max_rx_queues);
1175 
1176 	priv->reg_bar0 = reg_bar;
1177 	priv->db_bar2 = db_bar;
1178 	priv->pci_dev = pci_dev;
1179 	priv->state_flags = 0x0;
1180 
1181 	priv->max_nb_txq = max_tx_queues;
1182 	priv->max_nb_rxq = max_rx_queues;
1183 
1184 	err = gve_init_priv(priv, false);
1185 	if (err)
1186 		return err;
1187 
1188 	if (gve_is_gqi(priv)) {
1189 		eth_dev->dev_ops = &gve_eth_dev_ops;
1190 		gve_set_rx_function(eth_dev);
1191 		gve_set_tx_function(eth_dev);
1192 	} else {
1193 		eth_dev->dev_ops = &gve_eth_dev_ops_dqo;
1194 		gve_set_rx_function_dqo(eth_dev);
1195 		gve_set_tx_function_dqo(eth_dev);
1196 	}
1197 
1198 	eth_dev->data->mac_addrs = &priv->dev_addr;
1199 
1200 	return 0;
1201 }
1202 
1203 static int
1204 gve_dev_uninit(struct rte_eth_dev *eth_dev)
1205 {
1206 	struct gve_priv *priv = eth_dev->data->dev_private;
1207 
1208 	gve_teardown_priv_resources(priv);
1209 
1210 	eth_dev->data->mac_addrs = NULL;
1211 
1212 	return 0;
1213 }
1214 
1215 static int
1216 gve_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
1217 	      struct rte_pci_device *pci_dev)
1218 {
1219 	return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct gve_priv), gve_dev_init);
1220 }
1221 
1222 static int
1223 gve_pci_remove(struct rte_pci_device *pci_dev)
1224 {
1225 	return rte_eth_dev_pci_generic_remove(pci_dev, gve_dev_uninit);
1226 }
1227 
1228 static const struct rte_pci_id pci_id_gve_map[] = {
1229 	{ RTE_PCI_DEVICE(GOOGLE_VENDOR_ID, GVE_DEV_ID) },
1230 	{ .device_id = 0 },
1231 };
1232 
1233 static struct rte_pci_driver rte_gve_pmd = {
1234 	.id_table = pci_id_gve_map,
1235 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1236 	.probe = gve_pci_probe,
1237 	.remove = gve_pci_remove,
1238 };
1239 
1240 RTE_PMD_REGISTER_PCI(net_gve, rte_gve_pmd);
1241 RTE_PMD_REGISTER_PCI_TABLE(net_gve, pci_id_gve_map);
1242 RTE_PMD_REGISTER_KMOD_DEP(net_gve, "* igb_uio | vfio-pci");
1243 RTE_LOG_REGISTER_SUFFIX(gve_logtype_driver, driver, NOTICE);
1244