xref: /dpdk/drivers/net/gve/gve_ethdev.c (revision 0d4ea0b303c5cb09ea53fea7b43f361b21c7e4ea)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2022-2023 Intel Corporation
3  * Copyright(C) 2023 Google LLC
4  */
5 
6 #include "gve_ethdev.h"
7 #include "base/gve_adminq.h"
8 #include "base/gve_register.h"
9 #include "base/gve_osdep.h"
10 #include "gve_version.h"
11 #include "rte_ether.h"
12 #include "gve_rss.h"
13 
14 static void
15 gve_write_version(uint8_t *driver_version_register)
16 {
17 	const char *c = gve_version_string();
18 	while (*c) {
19 		writeb(*c, driver_version_register);
20 		c++;
21 	}
22 	writeb('\n', driver_version_register);
23 }
24 
25 static int
26 gve_alloc_queue_page_list(struct gve_priv *priv, uint32_t id, uint32_t pages)
27 {
28 	char z_name[RTE_MEMZONE_NAMESIZE];
29 	struct gve_queue_page_list *qpl;
30 	const struct rte_memzone *mz;
31 	dma_addr_t page_bus;
32 	uint32_t i;
33 
34 	if (priv->num_registered_pages + pages >
35 	    priv->max_registered_pages) {
36 		PMD_DRV_LOG(ERR, "Pages %" PRIu64 " > max registered pages %" PRIu64,
37 			    priv->num_registered_pages + pages,
38 			    priv->max_registered_pages);
39 		return -EINVAL;
40 	}
41 	qpl = &priv->qpl[id];
42 	snprintf(z_name, sizeof(z_name), "gve_%s_qpl%d", priv->pci_dev->device.name, id);
43 	mz = rte_memzone_reserve_aligned(z_name, pages * PAGE_SIZE,
44 					 rte_socket_id(),
45 					 RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
46 	if (mz == NULL) {
47 		PMD_DRV_LOG(ERR, "Failed to alloc %s.", z_name);
48 		return -ENOMEM;
49 	}
50 	qpl->page_buses = rte_zmalloc("qpl page buses", pages * sizeof(dma_addr_t), 0);
51 	if (qpl->page_buses == NULL) {
52 		PMD_DRV_LOG(ERR, "Failed to alloc qpl %u page buses", id);
53 		return -ENOMEM;
54 	}
55 	page_bus = mz->iova;
56 	for (i = 0; i < pages; i++) {
57 		qpl->page_buses[i] = page_bus;
58 		page_bus += PAGE_SIZE;
59 	}
60 	qpl->id = id;
61 	qpl->mz = mz;
62 	qpl->num_entries = pages;
63 
64 	priv->num_registered_pages += pages;
65 
66 	return 0;
67 }
68 
69 static void
70 gve_free_qpls(struct gve_priv *priv)
71 {
72 	uint16_t nb_txqs = priv->max_nb_txq;
73 	uint16_t nb_rxqs = priv->max_nb_rxq;
74 	uint32_t i;
75 
76 	if (priv->queue_format != GVE_GQI_QPL_FORMAT)
77 		return;
78 
79 	for (i = 0; i < nb_txqs + nb_rxqs; i++) {
80 		if (priv->qpl[i].mz != NULL)
81 			rte_memzone_free(priv->qpl[i].mz);
82 		rte_free(priv->qpl[i].page_buses);
83 	}
84 
85 	rte_free(priv->qpl);
86 }
87 
88 static int
89 gve_dev_configure(struct rte_eth_dev *dev)
90 {
91 	struct gve_priv *priv = dev->data->dev_private;
92 
93 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
94 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
95 		priv->rss_config.alg = GVE_RSS_HASH_TOEPLITZ;
96 	}
97 
98 	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
99 		priv->enable_rsc = 1;
100 
101 	/* Reset RSS RETA in case number of queues changed. */
102 	if (priv->rss_config.indir) {
103 		struct gve_rss_config update_reta_config;
104 		gve_init_rss_config_from_priv(priv, &update_reta_config);
105 		gve_generate_rss_reta(dev, &update_reta_config);
106 
107 		int err = gve_adminq_configure_rss(priv, &update_reta_config);
108 		if (err)
109 			PMD_DRV_LOG(ERR,
110 				"Could not reconfigure RSS redirection table.");
111 		else
112 			gve_update_priv_rss_config(priv, &update_reta_config);
113 
114 		gve_free_rss_config(&update_reta_config);
115 		return err;
116 	}
117 
118 	return 0;
119 }
120 
121 static int
122 gve_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
123 {
124 	struct gve_priv *priv = dev->data->dev_private;
125 	struct rte_eth_link link;
126 	int err;
127 
128 	memset(&link, 0, sizeof(link));
129 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
130 	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
131 
132 	if (!dev->data->dev_started) {
133 		link.link_status = RTE_ETH_LINK_DOWN;
134 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
135 	} else {
136 		link.link_status = RTE_ETH_LINK_UP;
137 		PMD_DRV_LOG(DEBUG, "Get link status from hw");
138 		err = gve_adminq_report_link_speed(priv);
139 		if (err) {
140 			PMD_DRV_LOG(ERR, "Failed to get link speed.");
141 			priv->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
142 		}
143 		link.link_speed = priv->link_speed;
144 	}
145 
146 	return rte_eth_linkstatus_set(dev, &link);
147 }
148 
149 static int
150 gve_alloc_stats_report(struct gve_priv *priv,
151 		uint16_t nb_tx_queues, uint16_t nb_rx_queues)
152 {
153 	char z_name[RTE_MEMZONE_NAMESIZE];
154 	int tx_stats_cnt;
155 	int rx_stats_cnt;
156 
157 	tx_stats_cnt = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
158 		nb_tx_queues;
159 	rx_stats_cnt = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
160 		nb_rx_queues;
161 	priv->stats_report_len = sizeof(struct gve_stats_report) +
162 		sizeof(struct stats) * (tx_stats_cnt + rx_stats_cnt);
163 
164 	snprintf(z_name, sizeof(z_name), "gve_stats_report_%s",
165 			priv->pci_dev->device.name);
166 	priv->stats_report_mem = rte_memzone_reserve_aligned(z_name,
167 			priv->stats_report_len,
168 			rte_socket_id(),
169 			RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
170 
171 	if (!priv->stats_report_mem)
172 		return -ENOMEM;
173 
174 	/* offset by skipping stats written by gve. */
175 	priv->stats_start_idx = (GVE_TX_STATS_REPORT_NUM * nb_tx_queues) +
176 		(GVE_RX_STATS_REPORT_NUM * nb_rx_queues);
177 	priv->stats_end_idx = priv->stats_start_idx +
178 		(NIC_TX_STATS_REPORT_NUM * nb_tx_queues) +
179 		(NIC_RX_STATS_REPORT_NUM * nb_rx_queues) - 1;
180 
181 	return 0;
182 }
183 
184 static void
185 gve_free_stats_report(struct rte_eth_dev *dev)
186 {
187 	struct gve_priv *priv = dev->data->dev_private;
188 	rte_memzone_free(priv->stats_report_mem);
189 	priv->stats_report_mem = NULL;
190 }
191 
192 /* Read Rx NIC stats from shared region */
193 static void
194 gve_get_imissed_from_nic(struct rte_eth_dev *dev)
195 {
196 	struct gve_stats_report *stats_report;
197 	struct gve_rx_queue *rxq;
198 	struct gve_priv *priv;
199 	struct stats stat;
200 	int queue_id;
201 	int stat_id;
202 	int i;
203 
204 	priv = dev->data->dev_private;
205 	if (!priv->stats_report_mem)
206 		return;
207 	stats_report = (struct gve_stats_report *)
208 		priv->stats_report_mem->addr;
209 	for (i = priv->stats_start_idx; i <= priv->stats_end_idx; i++) {
210 		stat = stats_report->stats[i];
211 		queue_id = cpu_to_be32(stat.queue_id);
212 		rxq = dev->data->rx_queues[queue_id];
213 		if (rxq == NULL)
214 			continue;
215 		stat_id = cpu_to_be32(stat.stat_name);
216 		/* Update imissed. */
217 		if (stat_id == RX_NO_BUFFERS_POSTED)
218 			rxq->stats.imissed = cpu_to_be64(stat.value);
219 	}
220 }
221 
222 static int
223 gve_start_queues(struct rte_eth_dev *dev)
224 {
225 	struct gve_priv *priv = dev->data->dev_private;
226 	uint16_t num_queues;
227 	uint16_t i;
228 	int ret;
229 
230 	num_queues = dev->data->nb_tx_queues;
231 	priv->txqs = (struct gve_tx_queue **)dev->data->tx_queues;
232 	ret = gve_adminq_create_tx_queues(priv, num_queues);
233 	if (ret != 0) {
234 		PMD_DRV_LOG(ERR, "Failed to create %u tx queues.", num_queues);
235 		return ret;
236 	}
237 	for (i = 0; i < num_queues; i++)
238 		if (gve_tx_queue_start(dev, i) != 0) {
239 			PMD_DRV_LOG(ERR, "Fail to start Tx queue %d", i);
240 			goto err_tx;
241 		}
242 
243 	num_queues = dev->data->nb_rx_queues;
244 	priv->rxqs = (struct gve_rx_queue **)dev->data->rx_queues;
245 	ret = gve_adminq_create_rx_queues(priv, num_queues);
246 	if (ret != 0) {
247 		PMD_DRV_LOG(ERR, "Failed to create %u rx queues.", num_queues);
248 		goto err_tx;
249 	}
250 	for (i = 0; i < num_queues; i++) {
251 		if (gve_is_gqi(priv))
252 			ret = gve_rx_queue_start(dev, i);
253 		else
254 			ret = gve_rx_queue_start_dqo(dev, i);
255 		if (ret != 0) {
256 			PMD_DRV_LOG(ERR, "Fail to start Rx queue %d", i);
257 			goto err_rx;
258 		}
259 	}
260 
261 	return 0;
262 
263 err_rx:
264 	gve_stop_rx_queues(dev);
265 err_tx:
266 	gve_stop_tx_queues(dev);
267 	return ret;
268 }
269 
270 static int
271 gve_dev_start(struct rte_eth_dev *dev)
272 {
273 	struct gve_priv *priv;
274 	int ret;
275 
276 	ret = gve_start_queues(dev);
277 	if (ret != 0) {
278 		PMD_DRV_LOG(ERR, "Failed to start queues");
279 		return ret;
280 	}
281 
282 	dev->data->dev_started = 1;
283 	gve_link_update(dev, 0);
284 
285 	priv = dev->data->dev_private;
286 	/* No stats available yet for Dqo. */
287 	if (gve_is_gqi(priv)) {
288 		ret = gve_alloc_stats_report(priv,
289 				dev->data->nb_tx_queues,
290 				dev->data->nb_rx_queues);
291 		if (ret != 0) {
292 			PMD_DRV_LOG(ERR,
293 				"Failed to allocate region for stats reporting.");
294 			return ret;
295 		}
296 		ret = gve_adminq_report_stats(priv, priv->stats_report_len,
297 				priv->stats_report_mem->iova,
298 				GVE_STATS_REPORT_TIMER_PERIOD);
299 		if (ret != 0) {
300 			PMD_DRV_LOG(ERR, "gve_adminq_report_stats command failed.");
301 			return ret;
302 		}
303 	}
304 
305 	return 0;
306 }
307 
308 static int
309 gve_dev_stop(struct rte_eth_dev *dev)
310 {
311 	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
312 
313 	gve_stop_tx_queues(dev);
314 	gve_stop_rx_queues(dev);
315 
316 	dev->data->dev_started = 0;
317 
318 	if (gve_is_gqi(dev->data->dev_private))
319 		gve_free_stats_report(dev);
320 
321 	return 0;
322 }
323 
324 static int
325 gve_dev_close(struct rte_eth_dev *dev)
326 {
327 	struct gve_priv *priv = dev->data->dev_private;
328 	int err = 0;
329 	uint16_t i;
330 
331 	if (dev->data->dev_started) {
332 		err = gve_dev_stop(dev);
333 		if (err != 0)
334 			PMD_DRV_LOG(ERR, "Failed to stop dev.");
335 	}
336 
337 	if (gve_is_gqi(priv)) {
338 		for (i = 0; i < dev->data->nb_tx_queues; i++)
339 			gve_tx_queue_release(dev, i);
340 
341 		for (i = 0; i < dev->data->nb_rx_queues; i++)
342 			gve_rx_queue_release(dev, i);
343 	} else {
344 		for (i = 0; i < dev->data->nb_tx_queues; i++)
345 			gve_tx_queue_release_dqo(dev, i);
346 
347 		for (i = 0; i < dev->data->nb_rx_queues; i++)
348 			gve_rx_queue_release_dqo(dev, i);
349 	}
350 
351 	gve_free_qpls(priv);
352 	rte_free(priv->adminq);
353 
354 	dev->data->mac_addrs = NULL;
355 
356 	return err;
357 }
358 
359 static int
360 gve_verify_driver_compatibility(struct gve_priv *priv)
361 {
362 	const struct rte_memzone *driver_info_mem;
363 	struct gve_driver_info *driver_info;
364 	int err;
365 
366 	driver_info_mem = rte_memzone_reserve_aligned("verify_driver_compatibility",
367 			sizeof(struct gve_driver_info),
368 			rte_socket_id(),
369 			RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
370 
371 	if (driver_info_mem == NULL) {
372 		PMD_DRV_LOG(ERR,
373 		    "Could not alloc memzone for driver compatibility");
374 		return -ENOMEM;
375 	}
376 	driver_info = (struct gve_driver_info *)driver_info_mem->addr;
377 
378 	*driver_info = (struct gve_driver_info) {
379 		.os_type = 5, /* DPDK */
380 		.driver_major = GVE_VERSION_MAJOR,
381 		.driver_minor = GVE_VERSION_MINOR,
382 		.driver_sub = GVE_VERSION_SUB,
383 		.os_version_major = cpu_to_be32(DPDK_VERSION_MAJOR),
384 		.os_version_minor = cpu_to_be32(DPDK_VERSION_MINOR),
385 		.os_version_sub = cpu_to_be32(DPDK_VERSION_SUB),
386 		.driver_capability_flags = {
387 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS1),
388 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS2),
389 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS3),
390 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS4),
391 		},
392 	};
393 
394 	populate_driver_version_strings((char *)driver_info->os_version_str1,
395 			(char *)driver_info->os_version_str2);
396 
397 	err = gve_adminq_verify_driver_compatibility(priv,
398 		sizeof(struct gve_driver_info),
399 		(dma_addr_t)driver_info_mem->iova);
400 	/* It's ok if the device doesn't support this */
401 	if (err == -EOPNOTSUPP)
402 		err = 0;
403 
404 	rte_memzone_free(driver_info_mem);
405 	return err;
406 }
407 
408 static int
409 gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
410 {
411 	struct gve_priv *priv = dev->data->dev_private;
412 
413 	dev_info->device = dev->device;
414 	dev_info->max_mac_addrs = 1;
415 	dev_info->max_rx_queues = priv->max_nb_rxq;
416 	dev_info->max_tx_queues = priv->max_nb_txq;
417 	if (gve_is_gqi(priv)) {
418 		dev_info->min_rx_bufsize = GVE_RX_MIN_BUF_SIZE_GQI;
419 		dev_info->max_rx_bufsize = GVE_RX_MAX_BUF_SIZE_GQI;
420 	} else {
421 		dev_info->min_rx_bufsize = GVE_RX_MIN_BUF_SIZE_DQO;
422 		dev_info->max_rx_bufsize = GVE_RX_MAX_BUF_SIZE_DQO;
423 	}
424 
425 	dev_info->max_rx_pktlen = priv->max_mtu + RTE_ETHER_HDR_LEN;
426 	dev_info->max_mtu = priv->max_mtu;
427 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
428 
429 	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_RSS_HASH;
430 	dev_info->tx_offload_capa =
431 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS	|
432 		RTE_ETH_TX_OFFLOAD_UDP_CKSUM	|
433 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM	|
434 		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM	|
435 		RTE_ETH_TX_OFFLOAD_TCP_TSO;
436 
437 	if (priv->queue_format == GVE_DQO_RDA_FORMAT)
438 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
439 
440 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
441 		.rx_free_thresh = GVE_DEFAULT_RX_FREE_THRESH,
442 		.rx_drop_en = 0,
443 		.offloads = 0,
444 	};
445 
446 	dev_info->default_txconf = (struct rte_eth_txconf) {
447 		.tx_free_thresh = GVE_DEFAULT_TX_FREE_THRESH,
448 		.tx_rs_thresh = GVE_DEFAULT_TX_RS_THRESH,
449 		.offloads = 0,
450 	};
451 
452 	dev_info->default_rxportconf.ring_size = priv->rx_desc_cnt;
453 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
454 		.nb_max = gve_is_gqi(priv) ? priv->rx_desc_cnt : GVE_MAX_QUEUE_SIZE_DQO,
455 		.nb_min = priv->rx_desc_cnt,
456 		.nb_align = 1,
457 	};
458 
459 	dev_info->default_txportconf.ring_size = priv->tx_desc_cnt;
460 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
461 		.nb_max = gve_is_gqi(priv) ? priv->tx_desc_cnt : GVE_MAX_QUEUE_SIZE_DQO,
462 		.nb_min = priv->tx_desc_cnt,
463 		.nb_align = 1,
464 	};
465 
466 	dev_info->flow_type_rss_offloads = GVE_RTE_RSS_OFFLOAD_ALL;
467 	dev_info->hash_key_size = GVE_RSS_HASH_KEY_SIZE;
468 	dev_info->reta_size = GVE_RSS_INDIR_SIZE;
469 
470 	return 0;
471 }
472 
473 static int
474 gve_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
475 {
476 	uint16_t i;
477 	if (gve_is_gqi(dev->data->dev_private))
478 		gve_get_imissed_from_nic(dev);
479 
480 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
481 		struct gve_tx_queue *txq = dev->data->tx_queues[i];
482 		if (txq == NULL)
483 			continue;
484 
485 		stats->opackets += txq->stats.packets;
486 		stats->obytes += txq->stats.bytes;
487 		stats->oerrors += txq->stats.errors;
488 	}
489 
490 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
491 		struct gve_rx_queue *rxq = dev->data->rx_queues[i];
492 		if (rxq == NULL)
493 			continue;
494 
495 		stats->ipackets += rxq->stats.packets;
496 		stats->ibytes += rxq->stats.bytes;
497 		stats->ierrors += rxq->stats.errors;
498 		stats->rx_nombuf += rxq->stats.no_mbufs;
499 		stats->imissed += rxq->stats.imissed;
500 	}
501 
502 	return 0;
503 }
504 
505 static int
506 gve_dev_stats_reset(struct rte_eth_dev *dev)
507 {
508 	uint16_t i;
509 
510 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
511 		struct gve_tx_queue *txq = dev->data->tx_queues[i];
512 		if (txq == NULL)
513 			continue;
514 
515 		memset(&txq->stats, 0, sizeof(txq->stats));
516 	}
517 
518 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
519 		struct gve_rx_queue *rxq = dev->data->rx_queues[i];
520 		if (rxq == NULL)
521 			continue;
522 
523 		memset(&rxq->stats, 0, sizeof(rxq->stats));
524 	}
525 
526 	return 0;
527 }
528 
529 static int
530 gve_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
531 {
532 	struct gve_priv *priv = dev->data->dev_private;
533 	int err;
534 
535 	if (mtu < RTE_ETHER_MIN_MTU || mtu > priv->max_mtu) {
536 		PMD_DRV_LOG(ERR, "MIN MTU is %u, MAX MTU is %u",
537 			    RTE_ETHER_MIN_MTU, priv->max_mtu);
538 		return -EINVAL;
539 	}
540 
541 	/* mtu setting is forbidden if port is start */
542 	if (dev->data->dev_started) {
543 		PMD_DRV_LOG(ERR, "Port must be stopped before configuration");
544 		return -EBUSY;
545 	}
546 
547 	err = gve_adminq_set_mtu(priv, mtu);
548 	if (err) {
549 		PMD_DRV_LOG(ERR, "Failed to set mtu as %u err = %d", mtu, err);
550 		return err;
551 	}
552 
553 	return 0;
554 }
555 
556 #define TX_QUEUE_STATS_OFFSET(x) offsetof(struct gve_tx_stats, x)
557 #define RX_QUEUE_STATS_OFFSET(x) offsetof(struct gve_rx_stats, x)
558 
559 static const struct gve_xstats_name_offset tx_xstats_name_offset[] = {
560 	{ "packets", TX_QUEUE_STATS_OFFSET(packets) },
561 	{ "bytes",   TX_QUEUE_STATS_OFFSET(bytes) },
562 	{ "errors",  TX_QUEUE_STATS_OFFSET(errors) },
563 };
564 
565 static const struct gve_xstats_name_offset rx_xstats_name_offset[] = {
566 	{ "packets",                RX_QUEUE_STATS_OFFSET(packets) },
567 	{ "bytes",                  RX_QUEUE_STATS_OFFSET(bytes) },
568 	{ "errors",                 RX_QUEUE_STATS_OFFSET(errors) },
569 	{ "mbuf_alloc_errors",      RX_QUEUE_STATS_OFFSET(no_mbufs) },
570 	{ "mbuf_alloc_errors_bulk", RX_QUEUE_STATS_OFFSET(no_mbufs_bulk) },
571 	{ "imissed",                RX_QUEUE_STATS_OFFSET(imissed) },
572 };
573 
574 static int
575 gve_xstats_count(struct rte_eth_dev *dev)
576 {
577 	uint16_t i, count = 0;
578 
579 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
580 		if (dev->data->tx_queues[i])
581 			count += RTE_DIM(tx_xstats_name_offset);
582 	}
583 
584 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
585 		if (dev->data->rx_queues[i])
586 			count += RTE_DIM(rx_xstats_name_offset);
587 	}
588 
589 	return count;
590 }
591 
592 static int
593 gve_xstats_get(struct rte_eth_dev *dev,
594 			struct rte_eth_xstat *xstats,
595 			unsigned int size)
596 {
597 	uint16_t i, j, count = gve_xstats_count(dev);
598 	const char *stats;
599 
600 	if (gve_is_gqi(dev->data->dev_private))
601 		gve_get_imissed_from_nic(dev);
602 
603 	if (xstats == NULL || size < count)
604 		return count;
605 
606 	count = 0;
607 
608 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
609 		const struct gve_tx_queue *txq = dev->data->tx_queues[i];
610 		if (txq == NULL)
611 			continue;
612 
613 		stats = (const char *)&txq->stats;
614 		for (j = 0; j < RTE_DIM(tx_xstats_name_offset); j++, count++) {
615 			xstats[count].id = count;
616 			xstats[count].value = *(const uint64_t *)
617 				(stats + tx_xstats_name_offset[j].offset);
618 		}
619 	}
620 
621 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
622 		const struct gve_rx_queue *rxq = dev->data->rx_queues[i];
623 		if (rxq == NULL)
624 			continue;
625 
626 		stats = (const char *)&rxq->stats;
627 		for (j = 0; j < RTE_DIM(rx_xstats_name_offset); j++, count++) {
628 			xstats[count].id = count;
629 			xstats[count].value = *(const uint64_t *)
630 				(stats + rx_xstats_name_offset[j].offset);
631 		}
632 	}
633 
634 	return count;
635 }
636 
637 static int
638 gve_xstats_get_names(struct rte_eth_dev *dev,
639 			struct rte_eth_xstat_name *xstats_names,
640 			unsigned int size)
641 {
642 	uint16_t i, j, count = gve_xstats_count(dev);
643 
644 	if (xstats_names == NULL || size < count)
645 		return count;
646 
647 	count = 0;
648 
649 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
650 		if (dev->data->tx_queues[i] == NULL)
651 			continue;
652 
653 		for (j = 0; j < RTE_DIM(tx_xstats_name_offset); j++)
654 			snprintf(xstats_names[count++].name,
655 				 RTE_ETH_XSTATS_NAME_SIZE,
656 				 "tx_q%u_%s", i, tx_xstats_name_offset[j].name);
657 	}
658 
659 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
660 		if (dev->data->rx_queues[i] == NULL)
661 			continue;
662 
663 		for (j = 0; j < RTE_DIM(rx_xstats_name_offset); j++)
664 			snprintf(xstats_names[count++].name,
665 				 RTE_ETH_XSTATS_NAME_SIZE,
666 				 "rx_q%u_%s", i, rx_xstats_name_offset[j].name);
667 	}
668 
669 	return count;
670 }
671 
672 
673 static int
674 gve_rss_hash_update(struct rte_eth_dev *dev,
675 			struct rte_eth_rss_conf *rss_conf)
676 {
677 	struct gve_priv *priv = dev->data->dev_private;
678 	struct gve_rss_config gve_rss_conf;
679 	int rss_reta_size;
680 	int err;
681 
682 	if (gve_validate_rss_hf(rss_conf->rss_hf)) {
683 		PMD_DRV_LOG(ERR, "Unsupported hash function.");
684 		return -EINVAL;
685 	}
686 
687 	if (rss_conf->algorithm != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
688 		rss_conf->algorithm != RTE_ETH_HASH_FUNCTION_DEFAULT) {
689 		PMD_DRV_LOG(ERR, "Device only supports Toeplitz algorithm.");
690 		return -EINVAL;
691 	}
692 
693 	if (rss_conf->rss_key_len) {
694 		if (rss_conf->rss_key_len != GVE_RSS_HASH_KEY_SIZE) {
695 			PMD_DRV_LOG(ERR,
696 				"Invalid hash key size. Only RSS hash key size "
697 				"of %u supported", GVE_RSS_HASH_KEY_SIZE);
698 			return -EINVAL;
699 		}
700 
701 		if (!rss_conf->rss_key) {
702 			PMD_DRV_LOG(ERR, "RSS key must be non-null.");
703 			return -EINVAL;
704 		}
705 	} else {
706 		if (!priv->rss_config.key_size) {
707 			PMD_DRV_LOG(ERR, "RSS key must be initialized before "
708 				"any other configuration.");
709 			return -EINVAL;
710 		}
711 		rss_conf->rss_key_len = priv->rss_config.key_size;
712 	}
713 
714 	rss_reta_size = priv->rss_config.indir ?
715 			priv->rss_config.indir_size :
716 			GVE_RSS_INDIR_SIZE;
717 	err = gve_init_rss_config(&gve_rss_conf, rss_conf->rss_key_len,
718 		rss_reta_size);
719 	if (err)
720 		return err;
721 
722 	gve_rss_conf.alg = GVE_RSS_HASH_TOEPLITZ;
723 	err = gve_update_rss_hash_types(priv, &gve_rss_conf, rss_conf);
724 	if (err)
725 		goto err;
726 	err = gve_update_rss_key(priv, &gve_rss_conf, rss_conf);
727 	if (err)
728 		goto err;
729 
730 	/* Set redirection table to default or preexisting. */
731 	if (!priv->rss_config.indir)
732 		gve_generate_rss_reta(dev, &gve_rss_conf);
733 	else
734 		memcpy(gve_rss_conf.indir, priv->rss_config.indir,
735 			gve_rss_conf.indir_size * sizeof(*priv->rss_config.indir));
736 
737 	err = gve_adminq_configure_rss(priv, &gve_rss_conf);
738 	if (!err)
739 		gve_update_priv_rss_config(priv, &gve_rss_conf);
740 
741 err:
742 	gve_free_rss_config(&gve_rss_conf);
743 	return err;
744 }
745 
746 static int
747 gve_rss_hash_conf_get(struct rte_eth_dev *dev,
748 			struct rte_eth_rss_conf *rss_conf)
749 {
750 	struct gve_priv *priv = dev->data->dev_private;
751 
752 	if (!(dev->data->dev_conf.rxmode.offloads &
753 			RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
754 		PMD_DRV_LOG(ERR, "RSS not configured.");
755 		return -ENOTSUP;
756 	}
757 
758 
759 	gve_to_rte_rss_hf(priv->rss_config.hash_types, rss_conf);
760 	rss_conf->rss_key_len = priv->rss_config.key_size;
761 	if (rss_conf->rss_key) {
762 		if (!priv->rss_config.key) {
763 			PMD_DRV_LOG(ERR, "Unable to retrieve default RSS hash key.");
764 			return -ENOTSUP;
765 		}
766 		memcpy(rss_conf->rss_key, priv->rss_config.key,
767 			rss_conf->rss_key_len * sizeof(*rss_conf->rss_key));
768 	}
769 
770 	return 0;
771 }
772 
773 static int
774 gve_rss_reta_update(struct rte_eth_dev *dev,
775 	struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
776 {
777 	struct gve_priv *priv = dev->data->dev_private;
778 	struct gve_rss_config gve_rss_conf;
779 	int table_id;
780 	int err;
781 	int i;
782 
783 	/* RSS key must be set before the redirection table can be set. */
784 	if (!priv->rss_config.key || priv->rss_config.key_size == 0) {
785 		PMD_DRV_LOG(ERR, "RSS hash key msut be set before the "
786 			"redirection table can be updated.");
787 		return -ENOTSUP;
788 	}
789 
790 	if (reta_size != GVE_RSS_INDIR_SIZE) {
791 		PMD_DRV_LOG(ERR, "Redirection table must have %hu elements",
792 			(uint16_t)GVE_RSS_INDIR_SIZE);
793 		return -EINVAL;
794 	}
795 
796 	err = gve_init_rss_config_from_priv(priv, &gve_rss_conf);
797 	if (err) {
798 		PMD_DRV_LOG(ERR, "Error allocating new RSS config.");
799 		return err;
800 	}
801 
802 	table_id = 0;
803 	for (i = 0; i < priv->rss_config.indir_size; i++) {
804 		int table_entry = i % RTE_ETH_RETA_GROUP_SIZE;
805 		if (reta_conf[table_id].mask & (1ULL << table_entry))
806 			gve_rss_conf.indir[i] =
807 				reta_conf[table_id].reta[table_entry];
808 
809 		if (table_entry == RTE_ETH_RETA_GROUP_SIZE - 1)
810 			table_id++;
811 	}
812 
813 	err = gve_adminq_configure_rss(priv, &gve_rss_conf);
814 	if (err)
815 		PMD_DRV_LOG(ERR, "Problem configuring RSS with device.");
816 	else
817 		gve_update_priv_rss_config(priv, &gve_rss_conf);
818 
819 	gve_free_rss_config(&gve_rss_conf);
820 	return err;
821 }
822 
823 static int
824 gve_rss_reta_query(struct rte_eth_dev *dev,
825 	struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
826 {
827 	struct gve_priv *priv = dev->data->dev_private;
828 	int table_id;
829 	int i;
830 
831 	if (!(dev->data->dev_conf.rxmode.offloads &
832 		RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
833 		PMD_DRV_LOG(ERR, "RSS not configured.");
834 		return -ENOTSUP;
835 	}
836 
837 	/* RSS key must be set before the redirection table can be queried. */
838 	if (!priv->rss_config.key) {
839 		PMD_DRV_LOG(ERR, "RSS hash key must be set before the "
840 			"redirection table can be initialized.");
841 		return -ENOTSUP;
842 	}
843 
844 	if (reta_size != priv->rss_config.indir_size) {
845 		PMD_DRV_LOG(ERR, "RSS redirection table must have %d entries.",
846 			priv->rss_config.indir_size);
847 		return -EINVAL;
848 	}
849 
850 	table_id = 0;
851 	for (i = 0; i < priv->rss_config.indir_size; i++) {
852 		int table_entry = i % RTE_ETH_RETA_GROUP_SIZE;
853 		if (reta_conf[table_id].mask & (1ULL << table_entry))
854 			reta_conf[table_id].reta[table_entry] =
855 				priv->rss_config.indir[i];
856 
857 		if (table_entry == RTE_ETH_RETA_GROUP_SIZE - 1)
858 			table_id++;
859 	}
860 
861 	return 0;
862 }
863 
864 static const struct eth_dev_ops gve_eth_dev_ops = {
865 	.dev_configure        = gve_dev_configure,
866 	.dev_start            = gve_dev_start,
867 	.dev_stop             = gve_dev_stop,
868 	.dev_close            = gve_dev_close,
869 	.dev_infos_get        = gve_dev_info_get,
870 	.rx_queue_setup       = gve_rx_queue_setup,
871 	.tx_queue_setup       = gve_tx_queue_setup,
872 	.rx_queue_release     = gve_rx_queue_release,
873 	.tx_queue_release     = gve_tx_queue_release,
874 	.rx_queue_start       = gve_rx_queue_start,
875 	.tx_queue_start       = gve_tx_queue_start,
876 	.rx_queue_stop        = gve_rx_queue_stop,
877 	.tx_queue_stop        = gve_tx_queue_stop,
878 	.link_update          = gve_link_update,
879 	.stats_get            = gve_dev_stats_get,
880 	.stats_reset          = gve_dev_stats_reset,
881 	.mtu_set              = gve_dev_mtu_set,
882 	.xstats_get           = gve_xstats_get,
883 	.xstats_get_names     = gve_xstats_get_names,
884 	.rss_hash_update      = gve_rss_hash_update,
885 	.rss_hash_conf_get    = gve_rss_hash_conf_get,
886 	.reta_update          = gve_rss_reta_update,
887 	.reta_query           = gve_rss_reta_query,
888 };
889 
890 static const struct eth_dev_ops gve_eth_dev_ops_dqo = {
891 	.dev_configure        = gve_dev_configure,
892 	.dev_start            = gve_dev_start,
893 	.dev_stop             = gve_dev_stop,
894 	.dev_close            = gve_dev_close,
895 	.dev_infos_get        = gve_dev_info_get,
896 	.rx_queue_setup       = gve_rx_queue_setup_dqo,
897 	.tx_queue_setup       = gve_tx_queue_setup_dqo,
898 	.rx_queue_release     = gve_rx_queue_release_dqo,
899 	.tx_queue_release     = gve_tx_queue_release_dqo,
900 	.rx_queue_start       = gve_rx_queue_start_dqo,
901 	.tx_queue_start       = gve_tx_queue_start_dqo,
902 	.rx_queue_stop        = gve_rx_queue_stop_dqo,
903 	.tx_queue_stop        = gve_tx_queue_stop_dqo,
904 	.link_update          = gve_link_update,
905 	.stats_get            = gve_dev_stats_get,
906 	.stats_reset          = gve_dev_stats_reset,
907 	.mtu_set              = gve_dev_mtu_set,
908 	.xstats_get           = gve_xstats_get,
909 	.xstats_get_names     = gve_xstats_get_names,
910 	.rss_hash_update      = gve_rss_hash_update,
911 	.rss_hash_conf_get    = gve_rss_hash_conf_get,
912 	.reta_update          = gve_rss_reta_update,
913 	.reta_query           = gve_rss_reta_query,
914 };
915 
916 static void
917 gve_free_counter_array(struct gve_priv *priv)
918 {
919 	rte_memzone_free(priv->cnt_array_mz);
920 	priv->cnt_array = NULL;
921 }
922 
923 static void
924 gve_free_irq_db(struct gve_priv *priv)
925 {
926 	rte_memzone_free(priv->irq_dbs_mz);
927 	priv->irq_dbs = NULL;
928 }
929 
930 static void
931 gve_teardown_device_resources(struct gve_priv *priv)
932 {
933 	int err;
934 
935 	/* Tell device its resources are being freed */
936 	if (gve_get_device_resources_ok(priv)) {
937 		err = gve_adminq_deconfigure_device_resources(priv);
938 		if (err)
939 			PMD_DRV_LOG(ERR, "Could not deconfigure device resources: err=%d", err);
940 	}
941 	gve_free_counter_array(priv);
942 	gve_free_irq_db(priv);
943 	gve_clear_device_resources_ok(priv);
944 }
945 
946 static int
947 pci_dev_msix_vec_count(struct rte_pci_device *pdev)
948 {
949 	off_t msix_pos = rte_pci_find_capability(pdev, RTE_PCI_CAP_ID_MSIX);
950 	uint16_t control;
951 
952 	if (msix_pos > 0 && rte_pci_read_config(pdev, &control, sizeof(control),
953 			msix_pos + RTE_PCI_MSIX_FLAGS) == sizeof(control))
954 		return (control & RTE_PCI_MSIX_FLAGS_QSIZE) + 1;
955 
956 	return 0;
957 }
958 
959 static int
960 gve_setup_device_resources(struct gve_priv *priv)
961 {
962 	char z_name[RTE_MEMZONE_NAMESIZE];
963 	const struct rte_memzone *mz;
964 	int err = 0;
965 
966 	snprintf(z_name, sizeof(z_name), "gve_%s_cnt_arr", priv->pci_dev->device.name);
967 	mz = rte_memzone_reserve_aligned(z_name,
968 					 priv->num_event_counters * sizeof(*priv->cnt_array),
969 					 rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
970 					 PAGE_SIZE);
971 	if (mz == NULL) {
972 		PMD_DRV_LOG(ERR, "Could not alloc memzone for count array");
973 		return -ENOMEM;
974 	}
975 	priv->cnt_array = (rte_be32_t *)mz->addr;
976 	priv->cnt_array_mz = mz;
977 
978 	snprintf(z_name, sizeof(z_name), "gve_%s_irqmz", priv->pci_dev->device.name);
979 	mz = rte_memzone_reserve_aligned(z_name,
980 					 sizeof(*priv->irq_dbs) * (priv->num_ntfy_blks),
981 					 rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
982 					 PAGE_SIZE);
983 	if (mz == NULL) {
984 		PMD_DRV_LOG(ERR, "Could not alloc memzone for irq_dbs");
985 		err = -ENOMEM;
986 		goto free_cnt_array;
987 	}
988 	priv->irq_dbs = (struct gve_irq_db *)mz->addr;
989 	priv->irq_dbs_mz = mz;
990 
991 	err = gve_adminq_configure_device_resources(priv,
992 						    priv->cnt_array_mz->iova,
993 						    priv->num_event_counters,
994 						    priv->irq_dbs_mz->iova,
995 						    priv->num_ntfy_blks);
996 	if (unlikely(err)) {
997 		PMD_DRV_LOG(ERR, "Could not config device resources: err=%d", err);
998 		goto free_irq_dbs;
999 	}
1000 	return 0;
1001 
1002 free_irq_dbs:
1003 	gve_free_irq_db(priv);
1004 free_cnt_array:
1005 	gve_free_counter_array(priv);
1006 
1007 	return err;
1008 }
1009 
1010 static int
1011 gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
1012 {
1013 	uint16_t pages;
1014 	int num_ntfy;
1015 	uint32_t i;
1016 	int err;
1017 
1018 	/* Set up the adminq */
1019 	err = gve_adminq_alloc(priv);
1020 	if (err) {
1021 		PMD_DRV_LOG(ERR, "Failed to alloc admin queue: err=%d", err);
1022 		return err;
1023 	}
1024 	err = gve_verify_driver_compatibility(priv);
1025 	if (err) {
1026 		PMD_DRV_LOG(ERR, "Could not verify driver compatibility: err=%d", err);
1027 		goto free_adminq;
1028 	}
1029 
1030 	if (skip_describe_device)
1031 		goto setup_device;
1032 
1033 	/* Get the initial information we need from the device */
1034 	err = gve_adminq_describe_device(priv);
1035 	if (err) {
1036 		PMD_DRV_LOG(ERR, "Could not get device information: err=%d", err);
1037 		goto free_adminq;
1038 	}
1039 
1040 	num_ntfy = pci_dev_msix_vec_count(priv->pci_dev);
1041 	if (num_ntfy <= 0) {
1042 		PMD_DRV_LOG(ERR, "Could not count MSI-x vectors");
1043 		err = -EIO;
1044 		goto free_adminq;
1045 	} else if (num_ntfy < GVE_MIN_MSIX) {
1046 		PMD_DRV_LOG(ERR, "GVE needs at least %d MSI-x vectors, but only has %d",
1047 			    GVE_MIN_MSIX, num_ntfy);
1048 		err = -EINVAL;
1049 		goto free_adminq;
1050 	}
1051 
1052 	priv->num_registered_pages = 0;
1053 
1054 	/* gvnic has one Notification Block per MSI-x vector, except for the
1055 	 * management vector
1056 	 */
1057 	priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
1058 	priv->mgmt_msix_idx = priv->num_ntfy_blks;
1059 
1060 	priv->max_nb_txq = RTE_MIN(priv->max_nb_txq, priv->num_ntfy_blks / 2);
1061 	priv->max_nb_rxq = RTE_MIN(priv->max_nb_rxq, priv->num_ntfy_blks / 2);
1062 
1063 	if (priv->default_num_queues > 0) {
1064 		priv->max_nb_txq = RTE_MIN(priv->default_num_queues, priv->max_nb_txq);
1065 		priv->max_nb_rxq = RTE_MIN(priv->default_num_queues, priv->max_nb_rxq);
1066 	}
1067 
1068 	PMD_DRV_LOG(INFO, "Max TX queues %d, Max RX queues %d",
1069 		    priv->max_nb_txq, priv->max_nb_rxq);
1070 
1071 	/* In GQI_QPL queue format:
1072 	 * Allocate queue page lists according to max queue number
1073 	 * tx qpl id should start from 0 while rx qpl id should start
1074 	 * from priv->max_nb_txq
1075 	 */
1076 	if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
1077 		priv->qpl = rte_zmalloc("gve_qpl",
1078 					(priv->max_nb_txq + priv->max_nb_rxq) *
1079 					sizeof(struct gve_queue_page_list), 0);
1080 		if (priv->qpl == NULL) {
1081 			PMD_DRV_LOG(ERR, "Failed to alloc qpl.");
1082 			err = -ENOMEM;
1083 			goto free_adminq;
1084 		}
1085 
1086 		for (i = 0; i < priv->max_nb_txq + priv->max_nb_rxq; i++) {
1087 			if (i < priv->max_nb_txq)
1088 				pages = priv->tx_pages_per_qpl;
1089 			else
1090 				pages = priv->rx_data_slot_cnt;
1091 			err = gve_alloc_queue_page_list(priv, i, pages);
1092 			if (err != 0) {
1093 				PMD_DRV_LOG(ERR, "Failed to alloc qpl %u.", i);
1094 				goto err_qpl;
1095 			}
1096 		}
1097 	}
1098 
1099 setup_device:
1100 	err = gve_setup_device_resources(priv);
1101 	if (!err)
1102 		return 0;
1103 err_qpl:
1104 	gve_free_qpls(priv);
1105 free_adminq:
1106 	gve_adminq_free(priv);
1107 	return err;
1108 }
1109 
1110 static void
1111 gve_teardown_priv_resources(struct gve_priv *priv)
1112 {
1113 	gve_teardown_device_resources(priv);
1114 	gve_adminq_free(priv);
1115 }
1116 
1117 static int
1118 gve_dev_init(struct rte_eth_dev *eth_dev)
1119 {
1120 	struct gve_priv *priv = eth_dev->data->dev_private;
1121 	int max_tx_queues, max_rx_queues;
1122 	struct rte_pci_device *pci_dev;
1123 	struct gve_registers *reg_bar;
1124 	rte_be32_t *db_bar;
1125 	int err;
1126 
1127 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1128 		return 0;
1129 
1130 	pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1131 
1132 	reg_bar = pci_dev->mem_resource[GVE_REG_BAR].addr;
1133 	if (!reg_bar) {
1134 		PMD_DRV_LOG(ERR, "Failed to map pci bar!");
1135 		return -ENOMEM;
1136 	}
1137 
1138 	db_bar = pci_dev->mem_resource[GVE_DB_BAR].addr;
1139 	if (!db_bar) {
1140 		PMD_DRV_LOG(ERR, "Failed to map doorbell bar!");
1141 		return -ENOMEM;
1142 	}
1143 
1144 	gve_write_version(&reg_bar->driver_version);
1145 	/* Get max queues to alloc etherdev */
1146 	max_tx_queues = ioread32be(&reg_bar->max_tx_queues);
1147 	max_rx_queues = ioread32be(&reg_bar->max_rx_queues);
1148 
1149 	priv->reg_bar0 = reg_bar;
1150 	priv->db_bar2 = db_bar;
1151 	priv->pci_dev = pci_dev;
1152 	priv->state_flags = 0x0;
1153 
1154 	priv->max_nb_txq = max_tx_queues;
1155 	priv->max_nb_rxq = max_rx_queues;
1156 
1157 	err = gve_init_priv(priv, false);
1158 	if (err)
1159 		return err;
1160 
1161 	if (gve_is_gqi(priv)) {
1162 		eth_dev->dev_ops = &gve_eth_dev_ops;
1163 		gve_set_rx_function(eth_dev);
1164 		gve_set_tx_function(eth_dev);
1165 	} else {
1166 		eth_dev->dev_ops = &gve_eth_dev_ops_dqo;
1167 		gve_set_rx_function_dqo(eth_dev);
1168 		gve_set_tx_function_dqo(eth_dev);
1169 	}
1170 
1171 	eth_dev->data->mac_addrs = &priv->dev_addr;
1172 
1173 	return 0;
1174 }
1175 
1176 static int
1177 gve_dev_uninit(struct rte_eth_dev *eth_dev)
1178 {
1179 	struct gve_priv *priv = eth_dev->data->dev_private;
1180 
1181 	gve_teardown_priv_resources(priv);
1182 
1183 	eth_dev->data->mac_addrs = NULL;
1184 
1185 	return 0;
1186 }
1187 
1188 static int
1189 gve_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
1190 	      struct rte_pci_device *pci_dev)
1191 {
1192 	return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct gve_priv), gve_dev_init);
1193 }
1194 
1195 static int
1196 gve_pci_remove(struct rte_pci_device *pci_dev)
1197 {
1198 	return rte_eth_dev_pci_generic_remove(pci_dev, gve_dev_uninit);
1199 }
1200 
1201 static const struct rte_pci_id pci_id_gve_map[] = {
1202 	{ RTE_PCI_DEVICE(GOOGLE_VENDOR_ID, GVE_DEV_ID) },
1203 	{ .device_id = 0 },
1204 };
1205 
1206 static struct rte_pci_driver rte_gve_pmd = {
1207 	.id_table = pci_id_gve_map,
1208 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1209 	.probe = gve_pci_probe,
1210 	.remove = gve_pci_remove,
1211 };
1212 
1213 RTE_PMD_REGISTER_PCI(net_gve, rte_gve_pmd);
1214 RTE_PMD_REGISTER_PCI_TABLE(net_gve, pci_id_gve_map);
1215 RTE_PMD_REGISTER_KMOD_DEP(net_gve, "* igb_uio | vfio-pci");
1216 RTE_LOG_REGISTER_SUFFIX(gve_logtype_driver, driver, NOTICE);
1217