xref: /dpdk/drivers/net/gve/gve_ethdev.c (revision 02d36ef6a9528e0f4a3403956e66bcea5fadbf8c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2022 Intel Corporation
3  */
4 
5 #include "gve_ethdev.h"
6 #include "base/gve_adminq.h"
7 #include "base/gve_register.h"
8 
9 const char gve_version_str[] = GVE_VERSION;
10 static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
11 
12 static void
13 gve_write_version(uint8_t *driver_version_register)
14 {
15 	const char *c = gve_version_prefix;
16 
17 	while (*c) {
18 		writeb(*c, driver_version_register);
19 		c++;
20 	}
21 
22 	c = gve_version_str;
23 	while (*c) {
24 		writeb(*c, driver_version_register);
25 		c++;
26 	}
27 	writeb('\n', driver_version_register);
28 }
29 
30 static int
31 gve_alloc_queue_page_list(struct gve_priv *priv, uint32_t id, uint32_t pages)
32 {
33 	char z_name[RTE_MEMZONE_NAMESIZE];
34 	struct gve_queue_page_list *qpl;
35 	const struct rte_memzone *mz;
36 	dma_addr_t page_bus;
37 	uint32_t i;
38 
39 	if (priv->num_registered_pages + pages >
40 	    priv->max_registered_pages) {
41 		PMD_DRV_LOG(ERR, "Pages %" PRIu64 " > max registered pages %" PRIu64,
42 			    priv->num_registered_pages + pages,
43 			    priv->max_registered_pages);
44 		return -EINVAL;
45 	}
46 	qpl = &priv->qpl[id];
47 	snprintf(z_name, sizeof(z_name), "gve_%s_qpl%d", priv->pci_dev->device.name, id);
48 	mz = rte_memzone_reserve_aligned(z_name, pages * PAGE_SIZE,
49 					 rte_socket_id(),
50 					 RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
51 	if (mz == NULL) {
52 		PMD_DRV_LOG(ERR, "Failed to alloc %s.", z_name);
53 		return -ENOMEM;
54 	}
55 	qpl->page_buses = rte_zmalloc("qpl page buses", pages * sizeof(dma_addr_t), 0);
56 	if (qpl->page_buses == NULL) {
57 		PMD_DRV_LOG(ERR, "Failed to alloc qpl %u page buses", id);
58 		return -ENOMEM;
59 	}
60 	page_bus = mz->iova;
61 	for (i = 0; i < pages; i++) {
62 		qpl->page_buses[i] = page_bus;
63 		page_bus += PAGE_SIZE;
64 	}
65 	qpl->id = id;
66 	qpl->mz = mz;
67 	qpl->num_entries = pages;
68 
69 	priv->num_registered_pages += pages;
70 
71 	return 0;
72 }
73 
74 static void
75 gve_free_qpls(struct gve_priv *priv)
76 {
77 	uint16_t nb_txqs = priv->max_nb_txq;
78 	uint16_t nb_rxqs = priv->max_nb_rxq;
79 	uint32_t i;
80 
81 	for (i = 0; i < nb_txqs + nb_rxqs; i++) {
82 		if (priv->qpl[i].mz != NULL)
83 			rte_memzone_free(priv->qpl[i].mz);
84 		rte_free(priv->qpl[i].page_buses);
85 	}
86 
87 	rte_free(priv->qpl);
88 }
89 
90 static int
91 gve_dev_configure(struct rte_eth_dev *dev)
92 {
93 	struct gve_priv *priv = dev->data->dev_private;
94 
95 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
96 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
97 
98 	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
99 		priv->enable_rsc = 1;
100 
101 	return 0;
102 }
103 
104 static int
105 gve_refill_pages(struct gve_rx_queue *rxq)
106 {
107 	struct rte_mbuf *nmb;
108 	uint16_t i;
109 	int diag;
110 
111 	diag = rte_pktmbuf_alloc_bulk(rxq->mpool, &rxq->sw_ring[0], rxq->nb_rx_desc);
112 	if (diag < 0) {
113 		for (i = 0; i < rxq->nb_rx_desc - 1; i++) {
114 			nmb = rte_pktmbuf_alloc(rxq->mpool);
115 			if (!nmb)
116 				break;
117 			rxq->sw_ring[i] = nmb;
118 		}
119 		if (i < rxq->nb_rx_desc - 1)
120 			return -ENOMEM;
121 	}
122 	rxq->nb_avail = 0;
123 	rxq->next_avail = rxq->nb_rx_desc - 1;
124 
125 	for (i = 0; i < rxq->nb_rx_desc; i++) {
126 		if (rxq->is_gqi_qpl) {
127 			rxq->rx_data_ring[i].addr = rte_cpu_to_be_64(i * PAGE_SIZE);
128 		} else {
129 			if (i == rxq->nb_rx_desc - 1)
130 				break;
131 			nmb = rxq->sw_ring[i];
132 			rxq->rx_data_ring[i].addr = rte_cpu_to_be_64(rte_mbuf_data_iova(nmb));
133 		}
134 	}
135 
136 	rte_write32(rte_cpu_to_be_32(rxq->next_avail), rxq->qrx_tail);
137 
138 	return 0;
139 }
140 
141 static int
142 gve_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
143 {
144 	struct gve_priv *priv = dev->data->dev_private;
145 	struct rte_eth_link link;
146 	int err;
147 
148 	memset(&link, 0, sizeof(link));
149 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
150 	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
151 
152 	if (!dev->data->dev_started) {
153 		link.link_status = RTE_ETH_LINK_DOWN;
154 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
155 	} else {
156 		link.link_status = RTE_ETH_LINK_UP;
157 		PMD_DRV_LOG(DEBUG, "Get link status from hw");
158 		err = gve_adminq_report_link_speed(priv);
159 		if (err) {
160 			PMD_DRV_LOG(ERR, "Failed to get link speed.");
161 			priv->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
162 		}
163 		link.link_speed = priv->link_speed;
164 	}
165 
166 	return rte_eth_linkstatus_set(dev, &link);
167 }
168 
169 static int
170 gve_dev_start(struct rte_eth_dev *dev)
171 {
172 	uint16_t num_queues = dev->data->nb_tx_queues;
173 	struct gve_priv *priv = dev->data->dev_private;
174 	struct gve_tx_queue *txq;
175 	struct gve_rx_queue *rxq;
176 	uint16_t i;
177 	int err;
178 
179 	priv->txqs = (struct gve_tx_queue **)dev->data->tx_queues;
180 	err = gve_adminq_create_tx_queues(priv, num_queues);
181 	if (err) {
182 		PMD_DRV_LOG(ERR, "failed to create %u tx queues.", num_queues);
183 		return err;
184 	}
185 	for (i = 0; i < num_queues; i++) {
186 		txq = priv->txqs[i];
187 		txq->qtx_tail =
188 		&priv->db_bar2[rte_be_to_cpu_32(txq->qres->db_index)];
189 		txq->qtx_head =
190 		&priv->cnt_array[rte_be_to_cpu_32(txq->qres->counter_index)];
191 
192 		rte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), txq->ntfy_addr);
193 	}
194 
195 	num_queues = dev->data->nb_rx_queues;
196 	priv->rxqs = (struct gve_rx_queue **)dev->data->rx_queues;
197 	err = gve_adminq_create_rx_queues(priv, num_queues);
198 	if (err) {
199 		PMD_DRV_LOG(ERR, "failed to create %u rx queues.", num_queues);
200 		goto err_tx;
201 	}
202 	for (i = 0; i < num_queues; i++) {
203 		rxq = priv->rxqs[i];
204 		rxq->qrx_tail =
205 		&priv->db_bar2[rte_be_to_cpu_32(rxq->qres->db_index)];
206 
207 		rte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), rxq->ntfy_addr);
208 
209 		err = gve_refill_pages(rxq);
210 		if (err) {
211 			PMD_DRV_LOG(ERR, "Failed to refill for RX");
212 			goto err_rx;
213 		}
214 	}
215 
216 	dev->data->dev_started = 1;
217 	gve_link_update(dev, 0);
218 
219 	return 0;
220 
221 err_rx:
222 	gve_stop_rx_queues(dev);
223 err_tx:
224 	gve_stop_tx_queues(dev);
225 	return err;
226 }
227 
228 static int
229 gve_dev_stop(struct rte_eth_dev *dev)
230 {
231 	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
232 
233 	gve_stop_tx_queues(dev);
234 	gve_stop_rx_queues(dev);
235 
236 	dev->data->dev_started = 0;
237 
238 	return 0;
239 }
240 
241 static int
242 gve_dev_close(struct rte_eth_dev *dev)
243 {
244 	struct gve_priv *priv = dev->data->dev_private;
245 	int err = 0;
246 	uint16_t i;
247 
248 	if (dev->data->dev_started) {
249 		err = gve_dev_stop(dev);
250 		if (err != 0)
251 			PMD_DRV_LOG(ERR, "Failed to stop dev.");
252 	}
253 
254 	for (i = 0; i < dev->data->nb_tx_queues; i++)
255 		gve_tx_queue_release(dev, i);
256 
257 	for (i = 0; i < dev->data->nb_rx_queues; i++)
258 		gve_rx_queue_release(dev, i);
259 
260 	gve_free_qpls(priv);
261 	rte_free(priv->adminq);
262 
263 	dev->data->mac_addrs = NULL;
264 
265 	return err;
266 }
267 
268 static int
269 gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
270 {
271 	struct gve_priv *priv = dev->data->dev_private;
272 
273 	dev_info->device = dev->device;
274 	dev_info->max_mac_addrs = 1;
275 	dev_info->max_rx_queues = priv->max_nb_rxq;
276 	dev_info->max_tx_queues = priv->max_nb_txq;
277 	dev_info->min_rx_bufsize = GVE_MIN_BUF_SIZE;
278 	dev_info->max_rx_pktlen = GVE_MAX_RX_PKTLEN;
279 	dev_info->max_mtu = GVE_MAX_MTU;
280 	dev_info->min_mtu = GVE_MIN_MTU;
281 
282 	dev_info->rx_offload_capa = 0;
283 	dev_info->tx_offload_capa =
284 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS	|
285 		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM	|
286 		RTE_ETH_TX_OFFLOAD_UDP_CKSUM	|
287 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM	|
288 		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM	|
289 		RTE_ETH_TX_OFFLOAD_TCP_TSO;
290 
291 	if (priv->queue_format == GVE_DQO_RDA_FORMAT)
292 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
293 
294 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
295 		.rx_free_thresh = GVE_DEFAULT_RX_FREE_THRESH,
296 		.rx_drop_en = 0,
297 		.offloads = 0,
298 	};
299 
300 	dev_info->default_txconf = (struct rte_eth_txconf) {
301 		.tx_free_thresh = GVE_DEFAULT_TX_FREE_THRESH,
302 		.offloads = 0,
303 	};
304 
305 	dev_info->default_rxportconf.ring_size = priv->rx_desc_cnt;
306 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
307 		.nb_max = priv->rx_desc_cnt,
308 		.nb_min = priv->rx_desc_cnt,
309 		.nb_align = 1,
310 	};
311 
312 	dev_info->default_txportconf.ring_size = priv->tx_desc_cnt;
313 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
314 		.nb_max = priv->tx_desc_cnt,
315 		.nb_min = priv->tx_desc_cnt,
316 		.nb_align = 1,
317 	};
318 
319 	return 0;
320 }
321 
322 static int
323 gve_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
324 {
325 	struct gve_priv *priv = dev->data->dev_private;
326 	int err;
327 
328 	if (mtu < RTE_ETHER_MIN_MTU || mtu > priv->max_mtu) {
329 		PMD_DRV_LOG(ERR, "MIN MTU is %u, MAX MTU is %u",
330 			    RTE_ETHER_MIN_MTU, priv->max_mtu);
331 		return -EINVAL;
332 	}
333 
334 	/* mtu setting is forbidden if port is start */
335 	if (dev->data->dev_started) {
336 		PMD_DRV_LOG(ERR, "Port must be stopped before configuration");
337 		return -EBUSY;
338 	}
339 
340 	err = gve_adminq_set_mtu(priv, mtu);
341 	if (err) {
342 		PMD_DRV_LOG(ERR, "Failed to set mtu as %u err = %d", mtu, err);
343 		return err;
344 	}
345 
346 	return 0;
347 }
348 
349 static const struct eth_dev_ops gve_eth_dev_ops = {
350 	.dev_configure        = gve_dev_configure,
351 	.dev_start            = gve_dev_start,
352 	.dev_stop             = gve_dev_stop,
353 	.dev_close            = gve_dev_close,
354 	.dev_infos_get        = gve_dev_info_get,
355 	.rx_queue_setup       = gve_rx_queue_setup,
356 	.tx_queue_setup       = gve_tx_queue_setup,
357 	.rx_queue_release     = gve_rx_queue_release,
358 	.tx_queue_release     = gve_tx_queue_release,
359 	.link_update          = gve_link_update,
360 	.mtu_set              = gve_dev_mtu_set,
361 };
362 
363 static void
364 gve_free_counter_array(struct gve_priv *priv)
365 {
366 	rte_memzone_free(priv->cnt_array_mz);
367 	priv->cnt_array = NULL;
368 }
369 
370 static void
371 gve_free_irq_db(struct gve_priv *priv)
372 {
373 	rte_memzone_free(priv->irq_dbs_mz);
374 	priv->irq_dbs = NULL;
375 }
376 
377 static void
378 gve_teardown_device_resources(struct gve_priv *priv)
379 {
380 	int err;
381 
382 	/* Tell device its resources are being freed */
383 	if (gve_get_device_resources_ok(priv)) {
384 		err = gve_adminq_deconfigure_device_resources(priv);
385 		if (err)
386 			PMD_DRV_LOG(ERR, "Could not deconfigure device resources: err=%d", err);
387 	}
388 	gve_free_counter_array(priv);
389 	gve_free_irq_db(priv);
390 	gve_clear_device_resources_ok(priv);
391 }
392 
393 static uint8_t
394 pci_dev_find_capability(struct rte_pci_device *pdev, int cap)
395 {
396 	uint8_t pos, id;
397 	uint16_t ent;
398 	int loops;
399 	int ret;
400 
401 	ret = rte_pci_read_config(pdev, &pos, sizeof(pos), PCI_CAPABILITY_LIST);
402 	if (ret != sizeof(pos))
403 		return 0;
404 
405 	loops = (PCI_CFG_SPACE_SIZE - PCI_STD_HEADER_SIZEOF) / PCI_CAP_SIZEOF;
406 
407 	while (pos && loops--) {
408 		ret = rte_pci_read_config(pdev, &ent, sizeof(ent), pos);
409 		if (ret != sizeof(ent))
410 			return 0;
411 
412 		id = ent & 0xff;
413 		if (id == 0xff)
414 			break;
415 
416 		if (id == cap)
417 			return pos;
418 
419 		pos = (ent >> 8);
420 	}
421 
422 	return 0;
423 }
424 
425 static int
426 pci_dev_msix_vec_count(struct rte_pci_device *pdev)
427 {
428 	uint8_t msix_cap = pci_dev_find_capability(pdev, PCI_CAP_ID_MSIX);
429 	uint16_t control;
430 	int ret;
431 
432 	if (!msix_cap)
433 		return 0;
434 
435 	ret = rte_pci_read_config(pdev, &control, sizeof(control), msix_cap + PCI_MSIX_FLAGS);
436 	if (ret != sizeof(control))
437 		return 0;
438 
439 	return (control & PCI_MSIX_FLAGS_QSIZE) + 1;
440 }
441 
442 static int
443 gve_setup_device_resources(struct gve_priv *priv)
444 {
445 	char z_name[RTE_MEMZONE_NAMESIZE];
446 	const struct rte_memzone *mz;
447 	int err = 0;
448 
449 	snprintf(z_name, sizeof(z_name), "gve_%s_cnt_arr", priv->pci_dev->device.name);
450 	mz = rte_memzone_reserve_aligned(z_name,
451 					 priv->num_event_counters * sizeof(*priv->cnt_array),
452 					 rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
453 					 PAGE_SIZE);
454 	if (mz == NULL) {
455 		PMD_DRV_LOG(ERR, "Could not alloc memzone for count array");
456 		return -ENOMEM;
457 	}
458 	priv->cnt_array = (rte_be32_t *)mz->addr;
459 	priv->cnt_array_mz = mz;
460 
461 	snprintf(z_name, sizeof(z_name), "gve_%s_irqmz", priv->pci_dev->device.name);
462 	mz = rte_memzone_reserve_aligned(z_name,
463 					 sizeof(*priv->irq_dbs) * (priv->num_ntfy_blks),
464 					 rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
465 					 PAGE_SIZE);
466 	if (mz == NULL) {
467 		PMD_DRV_LOG(ERR, "Could not alloc memzone for irq_dbs");
468 		err = -ENOMEM;
469 		goto free_cnt_array;
470 	}
471 	priv->irq_dbs = (struct gve_irq_db *)mz->addr;
472 	priv->irq_dbs_mz = mz;
473 
474 	err = gve_adminq_configure_device_resources(priv,
475 						    priv->cnt_array_mz->iova,
476 						    priv->num_event_counters,
477 						    priv->irq_dbs_mz->iova,
478 						    priv->num_ntfy_blks);
479 	if (unlikely(err)) {
480 		PMD_DRV_LOG(ERR, "Could not config device resources: err=%d", err);
481 		goto free_irq_dbs;
482 	}
483 	return 0;
484 
485 free_irq_dbs:
486 	gve_free_irq_db(priv);
487 free_cnt_array:
488 	gve_free_counter_array(priv);
489 
490 	return err;
491 }
492 
493 static int
494 gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
495 {
496 	uint16_t pages;
497 	int num_ntfy;
498 	uint32_t i;
499 	int err;
500 
501 	/* Set up the adminq */
502 	err = gve_adminq_alloc(priv);
503 	if (err) {
504 		PMD_DRV_LOG(ERR, "Failed to alloc admin queue: err=%d", err);
505 		return err;
506 	}
507 
508 	if (skip_describe_device)
509 		goto setup_device;
510 
511 	/* Get the initial information we need from the device */
512 	err = gve_adminq_describe_device(priv);
513 	if (err) {
514 		PMD_DRV_LOG(ERR, "Could not get device information: err=%d", err);
515 		goto free_adminq;
516 	}
517 
518 	num_ntfy = pci_dev_msix_vec_count(priv->pci_dev);
519 	if (num_ntfy <= 0) {
520 		PMD_DRV_LOG(ERR, "Could not count MSI-x vectors");
521 		err = -EIO;
522 		goto free_adminq;
523 	} else if (num_ntfy < GVE_MIN_MSIX) {
524 		PMD_DRV_LOG(ERR, "GVE needs at least %d MSI-x vectors, but only has %d",
525 			    GVE_MIN_MSIX, num_ntfy);
526 		err = -EINVAL;
527 		goto free_adminq;
528 	}
529 
530 	priv->num_registered_pages = 0;
531 
532 	/* gvnic has one Notification Block per MSI-x vector, except for the
533 	 * management vector
534 	 */
535 	priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
536 	priv->mgmt_msix_idx = priv->num_ntfy_blks;
537 
538 	priv->max_nb_txq = RTE_MIN(priv->max_nb_txq, priv->num_ntfy_blks / 2);
539 	priv->max_nb_rxq = RTE_MIN(priv->max_nb_rxq, priv->num_ntfy_blks / 2);
540 
541 	if (priv->default_num_queues > 0) {
542 		priv->max_nb_txq = RTE_MIN(priv->default_num_queues, priv->max_nb_txq);
543 		priv->max_nb_rxq = RTE_MIN(priv->default_num_queues, priv->max_nb_rxq);
544 	}
545 
546 	PMD_DRV_LOG(INFO, "Max TX queues %d, Max RX queues %d",
547 		    priv->max_nb_txq, priv->max_nb_rxq);
548 
549 	/* In GQI_QPL queue format:
550 	 * Allocate queue page lists according to max queue number
551 	 * tx qpl id should start from 0 while rx qpl id should start
552 	 * from priv->max_nb_txq
553 	 */
554 	if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
555 		priv->qpl = rte_zmalloc("gve_qpl",
556 					(priv->max_nb_txq + priv->max_nb_rxq) *
557 					sizeof(struct gve_queue_page_list), 0);
558 		if (priv->qpl == NULL) {
559 			PMD_DRV_LOG(ERR, "Failed to alloc qpl.");
560 			err = -ENOMEM;
561 			goto free_adminq;
562 		}
563 
564 		for (i = 0; i < priv->max_nb_txq + priv->max_nb_rxq; i++) {
565 			if (i < priv->max_nb_txq)
566 				pages = priv->tx_pages_per_qpl;
567 			else
568 				pages = priv->rx_data_slot_cnt;
569 			err = gve_alloc_queue_page_list(priv, i, pages);
570 			if (err != 0) {
571 				PMD_DRV_LOG(ERR, "Failed to alloc qpl %u.", i);
572 				goto err_qpl;
573 			}
574 		}
575 	}
576 
577 setup_device:
578 	err = gve_setup_device_resources(priv);
579 	if (!err)
580 		return 0;
581 err_qpl:
582 	gve_free_qpls(priv);
583 free_adminq:
584 	gve_adminq_free(priv);
585 	return err;
586 }
587 
588 static void
589 gve_teardown_priv_resources(struct gve_priv *priv)
590 {
591 	gve_teardown_device_resources(priv);
592 	gve_adminq_free(priv);
593 }
594 
595 static int
596 gve_dev_init(struct rte_eth_dev *eth_dev)
597 {
598 	struct gve_priv *priv = eth_dev->data->dev_private;
599 	int max_tx_queues, max_rx_queues;
600 	struct rte_pci_device *pci_dev;
601 	struct gve_registers *reg_bar;
602 	rte_be32_t *db_bar;
603 	int err;
604 
605 	eth_dev->dev_ops = &gve_eth_dev_ops;
606 
607 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
608 		return 0;
609 
610 	pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
611 
612 	reg_bar = pci_dev->mem_resource[GVE_REG_BAR].addr;
613 	if (!reg_bar) {
614 		PMD_DRV_LOG(ERR, "Failed to map pci bar!");
615 		return -ENOMEM;
616 	}
617 
618 	db_bar = pci_dev->mem_resource[GVE_DB_BAR].addr;
619 	if (!db_bar) {
620 		PMD_DRV_LOG(ERR, "Failed to map doorbell bar!");
621 		return -ENOMEM;
622 	}
623 
624 	gve_write_version(&reg_bar->driver_version);
625 	/* Get max queues to alloc etherdev */
626 	max_tx_queues = ioread32be(&reg_bar->max_tx_queues);
627 	max_rx_queues = ioread32be(&reg_bar->max_rx_queues);
628 
629 	priv->reg_bar0 = reg_bar;
630 	priv->db_bar2 = db_bar;
631 	priv->pci_dev = pci_dev;
632 	priv->state_flags = 0x0;
633 
634 	priv->max_nb_txq = max_tx_queues;
635 	priv->max_nb_rxq = max_rx_queues;
636 
637 	err = gve_init_priv(priv, false);
638 	if (err)
639 		return err;
640 
641 	if (gve_is_gqi(priv)) {
642 		eth_dev->rx_pkt_burst = gve_rx_burst;
643 		eth_dev->tx_pkt_burst = gve_tx_burst;
644 	} else {
645 		PMD_DRV_LOG(ERR, "DQO_RDA is not implemented and will be added in the future");
646 	}
647 
648 	eth_dev->data->mac_addrs = &priv->dev_addr;
649 
650 	return 0;
651 }
652 
653 static int
654 gve_dev_uninit(struct rte_eth_dev *eth_dev)
655 {
656 	struct gve_priv *priv = eth_dev->data->dev_private;
657 
658 	gve_teardown_priv_resources(priv);
659 
660 	eth_dev->data->mac_addrs = NULL;
661 
662 	return 0;
663 }
664 
665 static int
666 gve_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
667 	      struct rte_pci_device *pci_dev)
668 {
669 	return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct gve_priv), gve_dev_init);
670 }
671 
672 static int
673 gve_pci_remove(struct rte_pci_device *pci_dev)
674 {
675 	return rte_eth_dev_pci_generic_remove(pci_dev, gve_dev_uninit);
676 }
677 
678 static const struct rte_pci_id pci_id_gve_map[] = {
679 	{ RTE_PCI_DEVICE(GOOGLE_VENDOR_ID, GVE_DEV_ID) },
680 	{ .device_id = 0 },
681 };
682 
683 static struct rte_pci_driver rte_gve_pmd = {
684 	.id_table = pci_id_gve_map,
685 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
686 	.probe = gve_pci_probe,
687 	.remove = gve_pci_remove,
688 };
689 
690 RTE_PMD_REGISTER_PCI(net_gve, rte_gve_pmd);
691 RTE_PMD_REGISTER_PCI_TABLE(net_gve, pci_id_gve_map);
692 RTE_PMD_REGISTER_KMOD_DEP(net_gve, "* igb_uio | vfio-pci");
693 RTE_LOG_REGISTER_SUFFIX(gve_logtype_driver, driver, NOTICE);
694