xref: /dpdk/drivers/net/gve/gve_ethdev.c (revision 4b53e9802b6b6040ad5622b1414aaa93d9581d0c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2022 Intel Corporation
3  */
4 
5 #include "gve_ethdev.h"
6 #include "base/gve_adminq.h"
7 #include "base/gve_register.h"
8 
9 const char gve_version_str[] = GVE_VERSION;
10 static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
11 
12 static void
13 gve_write_version(uint8_t *driver_version_register)
14 {
15 	const char *c = gve_version_prefix;
16 
17 	while (*c) {
18 		writeb(*c, driver_version_register);
19 		c++;
20 	}
21 
22 	c = gve_version_str;
23 	while (*c) {
24 		writeb(*c, driver_version_register);
25 		c++;
26 	}
27 	writeb('\n', driver_version_register);
28 }
29 
30 static int
31 gve_alloc_queue_page_list(struct gve_priv *priv, uint32_t id, uint32_t pages)
32 {
33 	char z_name[RTE_MEMZONE_NAMESIZE];
34 	struct gve_queue_page_list *qpl;
35 	const struct rte_memzone *mz;
36 	dma_addr_t page_bus;
37 	uint32_t i;
38 
39 	if (priv->num_registered_pages + pages >
40 	    priv->max_registered_pages) {
41 		PMD_DRV_LOG(ERR, "Pages %" PRIu64 " > max registered pages %" PRIu64,
42 			    priv->num_registered_pages + pages,
43 			    priv->max_registered_pages);
44 		return -EINVAL;
45 	}
46 	qpl = &priv->qpl[id];
47 	snprintf(z_name, sizeof(z_name), "gve_%s_qpl%d", priv->pci_dev->device.name, id);
48 	mz = rte_memzone_reserve_aligned(z_name, pages * PAGE_SIZE,
49 					 rte_socket_id(),
50 					 RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
51 	if (mz == NULL) {
52 		PMD_DRV_LOG(ERR, "Failed to alloc %s.", z_name);
53 		return -ENOMEM;
54 	}
55 	qpl->page_buses = rte_zmalloc("qpl page buses", pages * sizeof(dma_addr_t), 0);
56 	if (qpl->page_buses == NULL) {
57 		PMD_DRV_LOG(ERR, "Failed to alloc qpl %u page buses", id);
58 		return -ENOMEM;
59 	}
60 	page_bus = mz->iova;
61 	for (i = 0; i < pages; i++) {
62 		qpl->page_buses[i] = page_bus;
63 		page_bus += PAGE_SIZE;
64 	}
65 	qpl->id = id;
66 	qpl->mz = mz;
67 	qpl->num_entries = pages;
68 
69 	priv->num_registered_pages += pages;
70 
71 	return 0;
72 }
73 
74 static void
75 gve_free_qpls(struct gve_priv *priv)
76 {
77 	uint16_t nb_txqs = priv->max_nb_txq;
78 	uint16_t nb_rxqs = priv->max_nb_rxq;
79 	uint32_t i;
80 
81 	for (i = 0; i < nb_txqs + nb_rxqs; i++) {
82 		if (priv->qpl[i].mz != NULL)
83 			rte_memzone_free(priv->qpl[i].mz);
84 		if (priv->qpl[i].page_buses != NULL)
85 			rte_free(priv->qpl[i].page_buses);
86 	}
87 
88 	if (priv->qpl != NULL)
89 		rte_free(priv->qpl);
90 }
91 
92 static int
93 gve_dev_configure(struct rte_eth_dev *dev)
94 {
95 	struct gve_priv *priv = dev->data->dev_private;
96 
97 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
98 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
99 
100 	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
101 		priv->enable_rsc = 1;
102 
103 	return 0;
104 }
105 
106 static int
107 gve_refill_pages(struct gve_rx_queue *rxq)
108 {
109 	struct rte_mbuf *nmb;
110 	uint16_t i;
111 	int diag;
112 
113 	diag = rte_pktmbuf_alloc_bulk(rxq->mpool, &rxq->sw_ring[0], rxq->nb_rx_desc);
114 	if (diag < 0) {
115 		for (i = 0; i < rxq->nb_rx_desc - 1; i++) {
116 			nmb = rte_pktmbuf_alloc(rxq->mpool);
117 			if (!nmb)
118 				break;
119 			rxq->sw_ring[i] = nmb;
120 		}
121 		if (i < rxq->nb_rx_desc - 1)
122 			return -ENOMEM;
123 	}
124 	rxq->nb_avail = 0;
125 	rxq->next_avail = rxq->nb_rx_desc - 1;
126 
127 	for (i = 0; i < rxq->nb_rx_desc; i++) {
128 		if (rxq->is_gqi_qpl) {
129 			rxq->rx_data_ring[i].addr = rte_cpu_to_be_64(i * PAGE_SIZE);
130 		} else {
131 			if (i == rxq->nb_rx_desc - 1)
132 				break;
133 			nmb = rxq->sw_ring[i];
134 			rxq->rx_data_ring[i].addr = rte_cpu_to_be_64(rte_mbuf_data_iova(nmb));
135 		}
136 	}
137 
138 	rte_write32(rte_cpu_to_be_32(rxq->next_avail), rxq->qrx_tail);
139 
140 	return 0;
141 }
142 
143 static int
144 gve_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
145 {
146 	struct gve_priv *priv = dev->data->dev_private;
147 	struct rte_eth_link link;
148 	int err;
149 
150 	memset(&link, 0, sizeof(link));
151 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
152 	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
153 
154 	if (!dev->data->dev_started) {
155 		link.link_status = RTE_ETH_LINK_DOWN;
156 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
157 	} else {
158 		link.link_status = RTE_ETH_LINK_UP;
159 		PMD_DRV_LOG(DEBUG, "Get link status from hw");
160 		err = gve_adminq_report_link_speed(priv);
161 		if (err) {
162 			PMD_DRV_LOG(ERR, "Failed to get link speed.");
163 			priv->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
164 		}
165 		link.link_speed = priv->link_speed;
166 	}
167 
168 	return rte_eth_linkstatus_set(dev, &link);
169 }
170 
171 static int
172 gve_dev_start(struct rte_eth_dev *dev)
173 {
174 	uint16_t num_queues = dev->data->nb_tx_queues;
175 	struct gve_priv *priv = dev->data->dev_private;
176 	struct gve_tx_queue *txq;
177 	struct gve_rx_queue *rxq;
178 	uint16_t i;
179 	int err;
180 
181 	priv->txqs = (struct gve_tx_queue **)dev->data->tx_queues;
182 	err = gve_adminq_create_tx_queues(priv, num_queues);
183 	if (err) {
184 		PMD_DRV_LOG(ERR, "failed to create %u tx queues.", num_queues);
185 		return err;
186 	}
187 	for (i = 0; i < num_queues; i++) {
188 		txq = priv->txqs[i];
189 		txq->qtx_tail =
190 		&priv->db_bar2[rte_be_to_cpu_32(txq->qres->db_index)];
191 		txq->qtx_head =
192 		&priv->cnt_array[rte_be_to_cpu_32(txq->qres->counter_index)];
193 
194 		rte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), txq->ntfy_addr);
195 	}
196 
197 	num_queues = dev->data->nb_rx_queues;
198 	priv->rxqs = (struct gve_rx_queue **)dev->data->rx_queues;
199 	err = gve_adminq_create_rx_queues(priv, num_queues);
200 	if (err) {
201 		PMD_DRV_LOG(ERR, "failed to create %u rx queues.", num_queues);
202 		goto err_tx;
203 	}
204 	for (i = 0; i < num_queues; i++) {
205 		rxq = priv->rxqs[i];
206 		rxq->qrx_tail =
207 		&priv->db_bar2[rte_be_to_cpu_32(rxq->qres->db_index)];
208 
209 		rte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), rxq->ntfy_addr);
210 
211 		err = gve_refill_pages(rxq);
212 		if (err) {
213 			PMD_DRV_LOG(ERR, "Failed to refill for RX");
214 			goto err_rx;
215 		}
216 	}
217 
218 	dev->data->dev_started = 1;
219 	gve_link_update(dev, 0);
220 
221 	return 0;
222 
223 err_rx:
224 	gve_stop_rx_queues(dev);
225 err_tx:
226 	gve_stop_tx_queues(dev);
227 	return err;
228 }
229 
230 static int
231 gve_dev_stop(struct rte_eth_dev *dev)
232 {
233 	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
234 
235 	gve_stop_tx_queues(dev);
236 	gve_stop_rx_queues(dev);
237 
238 	dev->data->dev_started = 0;
239 
240 	return 0;
241 }
242 
243 static int
244 gve_dev_close(struct rte_eth_dev *dev)
245 {
246 	struct gve_priv *priv = dev->data->dev_private;
247 	struct gve_tx_queue *txq;
248 	struct gve_rx_queue *rxq;
249 	int err = 0;
250 	uint16_t i;
251 
252 	if (dev->data->dev_started) {
253 		err = gve_dev_stop(dev);
254 		if (err != 0)
255 			PMD_DRV_LOG(ERR, "Failed to stop dev.");
256 	}
257 
258 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
259 		txq = dev->data->tx_queues[i];
260 		gve_tx_queue_release(txq);
261 	}
262 
263 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
264 		rxq = dev->data->rx_queues[i];
265 		gve_rx_queue_release(rxq);
266 	}
267 
268 	gve_free_qpls(priv);
269 	rte_free(priv->adminq);
270 
271 	dev->data->mac_addrs = NULL;
272 
273 	return err;
274 }
275 
276 static int
277 gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
278 {
279 	struct gve_priv *priv = dev->data->dev_private;
280 
281 	dev_info->device = dev->device;
282 	dev_info->max_mac_addrs = 1;
283 	dev_info->max_rx_queues = priv->max_nb_rxq;
284 	dev_info->max_tx_queues = priv->max_nb_txq;
285 	dev_info->min_rx_bufsize = GVE_MIN_BUF_SIZE;
286 	dev_info->max_rx_pktlen = GVE_MAX_RX_PKTLEN;
287 	dev_info->max_mtu = GVE_MAX_MTU;
288 	dev_info->min_mtu = GVE_MIN_MTU;
289 
290 	dev_info->rx_offload_capa = 0;
291 	dev_info->tx_offload_capa =
292 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS	|
293 		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM	|
294 		RTE_ETH_TX_OFFLOAD_UDP_CKSUM	|
295 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM	|
296 		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM	|
297 		RTE_ETH_TX_OFFLOAD_TCP_TSO;
298 
299 	if (priv->queue_format == GVE_DQO_RDA_FORMAT)
300 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
301 
302 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
303 		.rx_free_thresh = GVE_DEFAULT_RX_FREE_THRESH,
304 		.rx_drop_en = 0,
305 		.offloads = 0,
306 	};
307 
308 	dev_info->default_txconf = (struct rte_eth_txconf) {
309 		.tx_free_thresh = GVE_DEFAULT_TX_FREE_THRESH,
310 		.offloads = 0,
311 	};
312 
313 	dev_info->default_rxportconf.ring_size = priv->rx_desc_cnt;
314 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
315 		.nb_max = priv->rx_desc_cnt,
316 		.nb_min = priv->rx_desc_cnt,
317 		.nb_align = 1,
318 	};
319 
320 	dev_info->default_txportconf.ring_size = priv->tx_desc_cnt;
321 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
322 		.nb_max = priv->tx_desc_cnt,
323 		.nb_min = priv->tx_desc_cnt,
324 		.nb_align = 1,
325 	};
326 
327 	return 0;
328 }
329 
330 static int
331 gve_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
332 {
333 	struct gve_priv *priv = dev->data->dev_private;
334 	int err;
335 
336 	if (mtu < RTE_ETHER_MIN_MTU || mtu > priv->max_mtu) {
337 		PMD_DRV_LOG(ERR, "MIN MTU is %u, MAX MTU is %u",
338 			    RTE_ETHER_MIN_MTU, priv->max_mtu);
339 		return -EINVAL;
340 	}
341 
342 	/* mtu setting is forbidden if port is start */
343 	if (dev->data->dev_started) {
344 		PMD_DRV_LOG(ERR, "Port must be stopped before configuration");
345 		return -EBUSY;
346 	}
347 
348 	err = gve_adminq_set_mtu(priv, mtu);
349 	if (err) {
350 		PMD_DRV_LOG(ERR, "Failed to set mtu as %u err = %d", mtu, err);
351 		return err;
352 	}
353 
354 	return 0;
355 }
356 
357 static const struct eth_dev_ops gve_eth_dev_ops = {
358 	.dev_configure        = gve_dev_configure,
359 	.dev_start            = gve_dev_start,
360 	.dev_stop             = gve_dev_stop,
361 	.dev_close            = gve_dev_close,
362 	.dev_infos_get        = gve_dev_info_get,
363 	.rx_queue_setup       = gve_rx_queue_setup,
364 	.tx_queue_setup       = gve_tx_queue_setup,
365 	.link_update          = gve_link_update,
366 	.mtu_set              = gve_dev_mtu_set,
367 };
368 
369 static void
370 gve_free_counter_array(struct gve_priv *priv)
371 {
372 	rte_memzone_free(priv->cnt_array_mz);
373 	priv->cnt_array = NULL;
374 }
375 
376 static void
377 gve_free_irq_db(struct gve_priv *priv)
378 {
379 	rte_memzone_free(priv->irq_dbs_mz);
380 	priv->irq_dbs = NULL;
381 }
382 
383 static void
384 gve_teardown_device_resources(struct gve_priv *priv)
385 {
386 	int err;
387 
388 	/* Tell device its resources are being freed */
389 	if (gve_get_device_resources_ok(priv)) {
390 		err = gve_adminq_deconfigure_device_resources(priv);
391 		if (err)
392 			PMD_DRV_LOG(ERR, "Could not deconfigure device resources: err=%d", err);
393 	}
394 	gve_free_counter_array(priv);
395 	gve_free_irq_db(priv);
396 	gve_clear_device_resources_ok(priv);
397 }
398 
399 static uint8_t
400 pci_dev_find_capability(struct rte_pci_device *pdev, int cap)
401 {
402 	uint8_t pos, id;
403 	uint16_t ent;
404 	int loops;
405 	int ret;
406 
407 	ret = rte_pci_read_config(pdev, &pos, sizeof(pos), PCI_CAPABILITY_LIST);
408 	if (ret != sizeof(pos))
409 		return 0;
410 
411 	loops = (PCI_CFG_SPACE_SIZE - PCI_STD_HEADER_SIZEOF) / PCI_CAP_SIZEOF;
412 
413 	while (pos && loops--) {
414 		ret = rte_pci_read_config(pdev, &ent, sizeof(ent), pos);
415 		if (ret != sizeof(ent))
416 			return 0;
417 
418 		id = ent & 0xff;
419 		if (id == 0xff)
420 			break;
421 
422 		if (id == cap)
423 			return pos;
424 
425 		pos = (ent >> 8);
426 	}
427 
428 	return 0;
429 }
430 
431 static int
432 pci_dev_msix_vec_count(struct rte_pci_device *pdev)
433 {
434 	uint8_t msix_cap = pci_dev_find_capability(pdev, PCI_CAP_ID_MSIX);
435 	uint16_t control;
436 	int ret;
437 
438 	if (!msix_cap)
439 		return 0;
440 
441 	ret = rte_pci_read_config(pdev, &control, sizeof(control), msix_cap + PCI_MSIX_FLAGS);
442 	if (ret != sizeof(control))
443 		return 0;
444 
445 	return (control & PCI_MSIX_FLAGS_QSIZE) + 1;
446 }
447 
448 static int
449 gve_setup_device_resources(struct gve_priv *priv)
450 {
451 	char z_name[RTE_MEMZONE_NAMESIZE];
452 	const struct rte_memzone *mz;
453 	int err = 0;
454 
455 	snprintf(z_name, sizeof(z_name), "gve_%s_cnt_arr", priv->pci_dev->device.name);
456 	mz = rte_memzone_reserve_aligned(z_name,
457 					 priv->num_event_counters * sizeof(*priv->cnt_array),
458 					 rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
459 					 PAGE_SIZE);
460 	if (mz == NULL) {
461 		PMD_DRV_LOG(ERR, "Could not alloc memzone for count array");
462 		return -ENOMEM;
463 	}
464 	priv->cnt_array = (rte_be32_t *)mz->addr;
465 	priv->cnt_array_mz = mz;
466 
467 	snprintf(z_name, sizeof(z_name), "gve_%s_irqmz", priv->pci_dev->device.name);
468 	mz = rte_memzone_reserve_aligned(z_name,
469 					 sizeof(*priv->irq_dbs) * (priv->num_ntfy_blks),
470 					 rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
471 					 PAGE_SIZE);
472 	if (mz == NULL) {
473 		PMD_DRV_LOG(ERR, "Could not alloc memzone for irq_dbs");
474 		err = -ENOMEM;
475 		goto free_cnt_array;
476 	}
477 	priv->irq_dbs = (struct gve_irq_db *)mz->addr;
478 	priv->irq_dbs_mz = mz;
479 
480 	err = gve_adminq_configure_device_resources(priv,
481 						    priv->cnt_array_mz->iova,
482 						    priv->num_event_counters,
483 						    priv->irq_dbs_mz->iova,
484 						    priv->num_ntfy_blks);
485 	if (unlikely(err)) {
486 		PMD_DRV_LOG(ERR, "Could not config device resources: err=%d", err);
487 		goto free_irq_dbs;
488 	}
489 	return 0;
490 
491 free_irq_dbs:
492 	gve_free_irq_db(priv);
493 free_cnt_array:
494 	gve_free_counter_array(priv);
495 
496 	return err;
497 }
498 
499 static int
500 gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
501 {
502 	uint16_t pages;
503 	int num_ntfy;
504 	uint32_t i;
505 	int err;
506 
507 	/* Set up the adminq */
508 	err = gve_adminq_alloc(priv);
509 	if (err) {
510 		PMD_DRV_LOG(ERR, "Failed to alloc admin queue: err=%d", err);
511 		return err;
512 	}
513 
514 	if (skip_describe_device)
515 		goto setup_device;
516 
517 	/* Get the initial information we need from the device */
518 	err = gve_adminq_describe_device(priv);
519 	if (err) {
520 		PMD_DRV_LOG(ERR, "Could not get device information: err=%d", err);
521 		goto free_adminq;
522 	}
523 
524 	num_ntfy = pci_dev_msix_vec_count(priv->pci_dev);
525 	if (num_ntfy <= 0) {
526 		PMD_DRV_LOG(ERR, "Could not count MSI-x vectors");
527 		err = -EIO;
528 		goto free_adminq;
529 	} else if (num_ntfy < GVE_MIN_MSIX) {
530 		PMD_DRV_LOG(ERR, "GVE needs at least %d MSI-x vectors, but only has %d",
531 			    GVE_MIN_MSIX, num_ntfy);
532 		err = -EINVAL;
533 		goto free_adminq;
534 	}
535 
536 	priv->num_registered_pages = 0;
537 
538 	/* gvnic has one Notification Block per MSI-x vector, except for the
539 	 * management vector
540 	 */
541 	priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
542 	priv->mgmt_msix_idx = priv->num_ntfy_blks;
543 
544 	priv->max_nb_txq = RTE_MIN(priv->max_nb_txq, priv->num_ntfy_blks / 2);
545 	priv->max_nb_rxq = RTE_MIN(priv->max_nb_rxq, priv->num_ntfy_blks / 2);
546 
547 	if (priv->default_num_queues > 0) {
548 		priv->max_nb_txq = RTE_MIN(priv->default_num_queues, priv->max_nb_txq);
549 		priv->max_nb_rxq = RTE_MIN(priv->default_num_queues, priv->max_nb_rxq);
550 	}
551 
552 	PMD_DRV_LOG(INFO, "Max TX queues %d, Max RX queues %d",
553 		    priv->max_nb_txq, priv->max_nb_rxq);
554 
555 	/* In GQI_QPL queue format:
556 	 * Allocate queue page lists according to max queue number
557 	 * tx qpl id should start from 0 while rx qpl id should start
558 	 * from priv->max_nb_txq
559 	 */
560 	if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
561 		priv->qpl = rte_zmalloc("gve_qpl",
562 					(priv->max_nb_txq + priv->max_nb_rxq) *
563 					sizeof(struct gve_queue_page_list), 0);
564 		if (priv->qpl == NULL) {
565 			PMD_DRV_LOG(ERR, "Failed to alloc qpl.");
566 			err = -ENOMEM;
567 			goto free_adminq;
568 		}
569 
570 		for (i = 0; i < priv->max_nb_txq + priv->max_nb_rxq; i++) {
571 			if (i < priv->max_nb_txq)
572 				pages = priv->tx_pages_per_qpl;
573 			else
574 				pages = priv->rx_data_slot_cnt;
575 			err = gve_alloc_queue_page_list(priv, i, pages);
576 			if (err != 0) {
577 				PMD_DRV_LOG(ERR, "Failed to alloc qpl %u.", i);
578 				goto err_qpl;
579 			}
580 		}
581 	}
582 
583 setup_device:
584 	err = gve_setup_device_resources(priv);
585 	if (!err)
586 		return 0;
587 err_qpl:
588 	gve_free_qpls(priv);
589 free_adminq:
590 	gve_adminq_free(priv);
591 	return err;
592 }
593 
594 static void
595 gve_teardown_priv_resources(struct gve_priv *priv)
596 {
597 	gve_teardown_device_resources(priv);
598 	gve_adminq_free(priv);
599 }
600 
601 static int
602 gve_dev_init(struct rte_eth_dev *eth_dev)
603 {
604 	struct gve_priv *priv = eth_dev->data->dev_private;
605 	int max_tx_queues, max_rx_queues;
606 	struct rte_pci_device *pci_dev;
607 	struct gve_registers *reg_bar;
608 	rte_be32_t *db_bar;
609 	int err;
610 
611 	eth_dev->dev_ops = &gve_eth_dev_ops;
612 
613 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
614 		return 0;
615 
616 	pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
617 
618 	reg_bar = pci_dev->mem_resource[GVE_REG_BAR].addr;
619 	if (!reg_bar) {
620 		PMD_DRV_LOG(ERR, "Failed to map pci bar!");
621 		return -ENOMEM;
622 	}
623 
624 	db_bar = pci_dev->mem_resource[GVE_DB_BAR].addr;
625 	if (!db_bar) {
626 		PMD_DRV_LOG(ERR, "Failed to map doorbell bar!");
627 		return -ENOMEM;
628 	}
629 
630 	gve_write_version(&reg_bar->driver_version);
631 	/* Get max queues to alloc etherdev */
632 	max_tx_queues = ioread32be(&reg_bar->max_tx_queues);
633 	max_rx_queues = ioread32be(&reg_bar->max_rx_queues);
634 
635 	priv->reg_bar0 = reg_bar;
636 	priv->db_bar2 = db_bar;
637 	priv->pci_dev = pci_dev;
638 	priv->state_flags = 0x0;
639 
640 	priv->max_nb_txq = max_tx_queues;
641 	priv->max_nb_rxq = max_rx_queues;
642 
643 	err = gve_init_priv(priv, false);
644 	if (err)
645 		return err;
646 
647 	if (gve_is_gqi(priv)) {
648 		eth_dev->rx_pkt_burst = gve_rx_burst;
649 		eth_dev->tx_pkt_burst = gve_tx_burst;
650 	} else {
651 		PMD_DRV_LOG(ERR, "DQO_RDA is not implemented and will be added in the future");
652 	}
653 
654 	eth_dev->data->mac_addrs = &priv->dev_addr;
655 
656 	return 0;
657 }
658 
659 static int
660 gve_dev_uninit(struct rte_eth_dev *eth_dev)
661 {
662 	struct gve_priv *priv = eth_dev->data->dev_private;
663 
664 	gve_teardown_priv_resources(priv);
665 
666 	eth_dev->data->mac_addrs = NULL;
667 
668 	return 0;
669 }
670 
671 static int
672 gve_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
673 	      struct rte_pci_device *pci_dev)
674 {
675 	return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct gve_priv), gve_dev_init);
676 }
677 
678 static int
679 gve_pci_remove(struct rte_pci_device *pci_dev)
680 {
681 	return rte_eth_dev_pci_generic_remove(pci_dev, gve_dev_uninit);
682 }
683 
684 static const struct rte_pci_id pci_id_gve_map[] = {
685 	{ RTE_PCI_DEVICE(GOOGLE_VENDOR_ID, GVE_DEV_ID) },
686 	{ .device_id = 0 },
687 };
688 
689 static struct rte_pci_driver rte_gve_pmd = {
690 	.id_table = pci_id_gve_map,
691 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
692 	.probe = gve_pci_probe,
693 	.remove = gve_pci_remove,
694 };
695 
696 RTE_PMD_REGISTER_PCI(net_gve, rte_gve_pmd);
697 RTE_PMD_REGISTER_PCI_TABLE(net_gve, pci_id_gve_map);
698 RTE_PMD_REGISTER_KMOD_DEP(net_gve, "* igb_uio | vfio-pci");
699 RTE_LOG_REGISTER_SUFFIX(gve_logtype_driver, driver, NOTICE);
700