xref: /dpdk/drivers/net/gve/gve_ethdev.c (revision 4aa10e5dc1b0fd6cc5b1b18770ac603e2c33a66c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2022 Intel Corporation
3  */
4 
5 #include "gve_ethdev.h"
6 #include "base/gve_adminq.h"
7 #include "base/gve_register.h"
8 
9 const char gve_version_str[] = GVE_VERSION;
10 static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
11 
12 static void
13 gve_write_version(uint8_t *driver_version_register)
14 {
15 	const char *c = gve_version_prefix;
16 
17 	while (*c) {
18 		writeb(*c, driver_version_register);
19 		c++;
20 	}
21 
22 	c = gve_version_str;
23 	while (*c) {
24 		writeb(*c, driver_version_register);
25 		c++;
26 	}
27 	writeb('\n', driver_version_register);
28 }
29 
30 static int
31 gve_alloc_queue_page_list(struct gve_priv *priv, uint32_t id, uint32_t pages)
32 {
33 	char z_name[RTE_MEMZONE_NAMESIZE];
34 	struct gve_queue_page_list *qpl;
35 	const struct rte_memzone *mz;
36 	dma_addr_t page_bus;
37 	uint32_t i;
38 
39 	if (priv->num_registered_pages + pages >
40 	    priv->max_registered_pages) {
41 		PMD_DRV_LOG(ERR, "Pages %" PRIu64 " > max registered pages %" PRIu64,
42 			    priv->num_registered_pages + pages,
43 			    priv->max_registered_pages);
44 		return -EINVAL;
45 	}
46 	qpl = &priv->qpl[id];
47 	snprintf(z_name, sizeof(z_name), "gve_%s_qpl%d", priv->pci_dev->device.name, id);
48 	mz = rte_memzone_reserve_aligned(z_name, pages * PAGE_SIZE,
49 					 rte_socket_id(),
50 					 RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
51 	if (mz == NULL) {
52 		PMD_DRV_LOG(ERR, "Failed to alloc %s.", z_name);
53 		return -ENOMEM;
54 	}
55 	qpl->page_buses = rte_zmalloc("qpl page buses", pages * sizeof(dma_addr_t), 0);
56 	if (qpl->page_buses == NULL) {
57 		PMD_DRV_LOG(ERR, "Failed to alloc qpl %u page buses", id);
58 		return -ENOMEM;
59 	}
60 	page_bus = mz->iova;
61 	for (i = 0; i < pages; i++) {
62 		qpl->page_buses[i] = page_bus;
63 		page_bus += PAGE_SIZE;
64 	}
65 	qpl->id = id;
66 	qpl->mz = mz;
67 	qpl->num_entries = pages;
68 
69 	priv->num_registered_pages += pages;
70 
71 	return 0;
72 }
73 
74 static void
75 gve_free_qpls(struct gve_priv *priv)
76 {
77 	uint16_t nb_txqs = priv->max_nb_txq;
78 	uint16_t nb_rxqs = priv->max_nb_rxq;
79 	uint32_t i;
80 
81 	for (i = 0; i < nb_txqs + nb_rxqs; i++) {
82 		if (priv->qpl[i].mz != NULL)
83 			rte_memzone_free(priv->qpl[i].mz);
84 		rte_free(priv->qpl[i].page_buses);
85 	}
86 
87 	rte_free(priv->qpl);
88 }
89 
90 static int
91 gve_dev_configure(struct rte_eth_dev *dev)
92 {
93 	struct gve_priv *priv = dev->data->dev_private;
94 
95 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
96 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
97 
98 	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
99 		priv->enable_rsc = 1;
100 
101 	return 0;
102 }
103 
104 static int
105 gve_refill_pages(struct gve_rx_queue *rxq)
106 {
107 	struct rte_mbuf *nmb;
108 	uint16_t i;
109 	int diag;
110 
111 	diag = rte_pktmbuf_alloc_bulk(rxq->mpool, &rxq->sw_ring[0], rxq->nb_rx_desc);
112 	if (diag < 0) {
113 		for (i = 0; i < rxq->nb_rx_desc - 1; i++) {
114 			nmb = rte_pktmbuf_alloc(rxq->mpool);
115 			if (!nmb)
116 				break;
117 			rxq->sw_ring[i] = nmb;
118 		}
119 		if (i < rxq->nb_rx_desc - 1)
120 			return -ENOMEM;
121 	}
122 	rxq->nb_avail = 0;
123 	rxq->next_avail = rxq->nb_rx_desc - 1;
124 
125 	for (i = 0; i < rxq->nb_rx_desc; i++) {
126 		if (rxq->is_gqi_qpl) {
127 			rxq->rx_data_ring[i].addr = rte_cpu_to_be_64(i * PAGE_SIZE);
128 		} else {
129 			if (i == rxq->nb_rx_desc - 1)
130 				break;
131 			nmb = rxq->sw_ring[i];
132 			rxq->rx_data_ring[i].addr = rte_cpu_to_be_64(rte_mbuf_data_iova(nmb));
133 		}
134 	}
135 
136 	rte_write32(rte_cpu_to_be_32(rxq->next_avail), rxq->qrx_tail);
137 
138 	return 0;
139 }
140 
141 static int
142 gve_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
143 {
144 	struct gve_priv *priv = dev->data->dev_private;
145 	struct rte_eth_link link;
146 	int err;
147 
148 	memset(&link, 0, sizeof(link));
149 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
150 	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
151 
152 	if (!dev->data->dev_started) {
153 		link.link_status = RTE_ETH_LINK_DOWN;
154 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
155 	} else {
156 		link.link_status = RTE_ETH_LINK_UP;
157 		PMD_DRV_LOG(DEBUG, "Get link status from hw");
158 		err = gve_adminq_report_link_speed(priv);
159 		if (err) {
160 			PMD_DRV_LOG(ERR, "Failed to get link speed.");
161 			priv->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
162 		}
163 		link.link_speed = priv->link_speed;
164 	}
165 
166 	return rte_eth_linkstatus_set(dev, &link);
167 }
168 
169 static int
170 gve_dev_start(struct rte_eth_dev *dev)
171 {
172 	uint16_t num_queues = dev->data->nb_tx_queues;
173 	struct gve_priv *priv = dev->data->dev_private;
174 	struct gve_tx_queue *txq;
175 	struct gve_rx_queue *rxq;
176 	uint16_t i;
177 	int err;
178 
179 	priv->txqs = (struct gve_tx_queue **)dev->data->tx_queues;
180 	err = gve_adminq_create_tx_queues(priv, num_queues);
181 	if (err) {
182 		PMD_DRV_LOG(ERR, "failed to create %u tx queues.", num_queues);
183 		return err;
184 	}
185 	for (i = 0; i < num_queues; i++) {
186 		txq = priv->txqs[i];
187 		txq->qtx_tail =
188 		&priv->db_bar2[rte_be_to_cpu_32(txq->qres->db_index)];
189 		txq->qtx_head =
190 		&priv->cnt_array[rte_be_to_cpu_32(txq->qres->counter_index)];
191 
192 		rte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), txq->ntfy_addr);
193 	}
194 
195 	num_queues = dev->data->nb_rx_queues;
196 	priv->rxqs = (struct gve_rx_queue **)dev->data->rx_queues;
197 	err = gve_adminq_create_rx_queues(priv, num_queues);
198 	if (err) {
199 		PMD_DRV_LOG(ERR, "failed to create %u rx queues.", num_queues);
200 		goto err_tx;
201 	}
202 	for (i = 0; i < num_queues; i++) {
203 		rxq = priv->rxqs[i];
204 		rxq->qrx_tail =
205 		&priv->db_bar2[rte_be_to_cpu_32(rxq->qres->db_index)];
206 
207 		rte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), rxq->ntfy_addr);
208 
209 		err = gve_refill_pages(rxq);
210 		if (err) {
211 			PMD_DRV_LOG(ERR, "Failed to refill for RX");
212 			goto err_rx;
213 		}
214 	}
215 
216 	dev->data->dev_started = 1;
217 	gve_link_update(dev, 0);
218 
219 	return 0;
220 
221 err_rx:
222 	gve_stop_rx_queues(dev);
223 err_tx:
224 	gve_stop_tx_queues(dev);
225 	return err;
226 }
227 
228 static int
229 gve_dev_stop(struct rte_eth_dev *dev)
230 {
231 	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
232 
233 	gve_stop_tx_queues(dev);
234 	gve_stop_rx_queues(dev);
235 
236 	dev->data->dev_started = 0;
237 
238 	return 0;
239 }
240 
241 static int
242 gve_dev_close(struct rte_eth_dev *dev)
243 {
244 	struct gve_priv *priv = dev->data->dev_private;
245 	int err = 0;
246 	uint16_t i;
247 
248 	if (dev->data->dev_started) {
249 		err = gve_dev_stop(dev);
250 		if (err != 0)
251 			PMD_DRV_LOG(ERR, "Failed to stop dev.");
252 	}
253 
254 	for (i = 0; i < dev->data->nb_tx_queues; i++)
255 		gve_tx_queue_release(dev, i);
256 
257 	for (i = 0; i < dev->data->nb_rx_queues; i++)
258 		gve_rx_queue_release(dev, i);
259 
260 	gve_free_qpls(priv);
261 	rte_free(priv->adminq);
262 
263 	dev->data->mac_addrs = NULL;
264 
265 	return err;
266 }
267 
268 static int
269 gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
270 {
271 	struct gve_priv *priv = dev->data->dev_private;
272 
273 	dev_info->device = dev->device;
274 	dev_info->max_mac_addrs = 1;
275 	dev_info->max_rx_queues = priv->max_nb_rxq;
276 	dev_info->max_tx_queues = priv->max_nb_txq;
277 	dev_info->min_rx_bufsize = GVE_MIN_BUF_SIZE;
278 	dev_info->max_rx_pktlen = GVE_MAX_RX_PKTLEN;
279 	dev_info->max_mtu = GVE_MAX_MTU;
280 	dev_info->min_mtu = GVE_MIN_MTU;
281 
282 	dev_info->rx_offload_capa = 0;
283 	dev_info->tx_offload_capa =
284 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS	|
285 		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM	|
286 		RTE_ETH_TX_OFFLOAD_UDP_CKSUM	|
287 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM	|
288 		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM	|
289 		RTE_ETH_TX_OFFLOAD_TCP_TSO;
290 
291 	if (priv->queue_format == GVE_DQO_RDA_FORMAT)
292 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
293 
294 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
295 		.rx_free_thresh = GVE_DEFAULT_RX_FREE_THRESH,
296 		.rx_drop_en = 0,
297 		.offloads = 0,
298 	};
299 
300 	dev_info->default_txconf = (struct rte_eth_txconf) {
301 		.tx_free_thresh = GVE_DEFAULT_TX_FREE_THRESH,
302 		.offloads = 0,
303 	};
304 
305 	dev_info->default_rxportconf.ring_size = priv->rx_desc_cnt;
306 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
307 		.nb_max = priv->rx_desc_cnt,
308 		.nb_min = priv->rx_desc_cnt,
309 		.nb_align = 1,
310 	};
311 
312 	dev_info->default_txportconf.ring_size = priv->tx_desc_cnt;
313 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
314 		.nb_max = priv->tx_desc_cnt,
315 		.nb_min = priv->tx_desc_cnt,
316 		.nb_align = 1,
317 	};
318 
319 	return 0;
320 }
321 
322 static int
323 gve_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
324 {
325 	uint16_t i;
326 
327 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
328 		struct gve_tx_queue *txq = dev->data->tx_queues[i];
329 		if (txq == NULL)
330 			continue;
331 
332 		stats->opackets += txq->packets;
333 		stats->obytes += txq->bytes;
334 		stats->oerrors += txq->errors;
335 	}
336 
337 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
338 		struct gve_rx_queue *rxq = dev->data->rx_queues[i];
339 		if (rxq == NULL)
340 			continue;
341 
342 		stats->ipackets += rxq->packets;
343 		stats->ibytes += rxq->bytes;
344 		stats->ierrors += rxq->errors;
345 		stats->rx_nombuf += rxq->no_mbufs;
346 	}
347 
348 	return 0;
349 }
350 
351 static int
352 gve_dev_stats_reset(struct rte_eth_dev *dev)
353 {
354 	uint16_t i;
355 
356 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
357 		struct gve_tx_queue *txq = dev->data->tx_queues[i];
358 		if (txq == NULL)
359 			continue;
360 
361 		txq->packets  = 0;
362 		txq->bytes = 0;
363 		txq->errors = 0;
364 	}
365 
366 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
367 		struct gve_rx_queue *rxq = dev->data->rx_queues[i];
368 		if (rxq == NULL)
369 			continue;
370 
371 		rxq->packets  = 0;
372 		rxq->bytes = 0;
373 		rxq->errors = 0;
374 		rxq->no_mbufs = 0;
375 	}
376 
377 	return 0;
378 }
379 
380 static int
381 gve_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
382 {
383 	struct gve_priv *priv = dev->data->dev_private;
384 	int err;
385 
386 	if (mtu < RTE_ETHER_MIN_MTU || mtu > priv->max_mtu) {
387 		PMD_DRV_LOG(ERR, "MIN MTU is %u, MAX MTU is %u",
388 			    RTE_ETHER_MIN_MTU, priv->max_mtu);
389 		return -EINVAL;
390 	}
391 
392 	/* mtu setting is forbidden if port is start */
393 	if (dev->data->dev_started) {
394 		PMD_DRV_LOG(ERR, "Port must be stopped before configuration");
395 		return -EBUSY;
396 	}
397 
398 	err = gve_adminq_set_mtu(priv, mtu);
399 	if (err) {
400 		PMD_DRV_LOG(ERR, "Failed to set mtu as %u err = %d", mtu, err);
401 		return err;
402 	}
403 
404 	return 0;
405 }
406 
407 static const struct eth_dev_ops gve_eth_dev_ops = {
408 	.dev_configure        = gve_dev_configure,
409 	.dev_start            = gve_dev_start,
410 	.dev_stop             = gve_dev_stop,
411 	.dev_close            = gve_dev_close,
412 	.dev_infos_get        = gve_dev_info_get,
413 	.rx_queue_setup       = gve_rx_queue_setup,
414 	.tx_queue_setup       = gve_tx_queue_setup,
415 	.rx_queue_release     = gve_rx_queue_release,
416 	.tx_queue_release     = gve_tx_queue_release,
417 	.link_update          = gve_link_update,
418 	.stats_get            = gve_dev_stats_get,
419 	.stats_reset          = gve_dev_stats_reset,
420 	.mtu_set              = gve_dev_mtu_set,
421 };
422 
423 static void
424 gve_free_counter_array(struct gve_priv *priv)
425 {
426 	rte_memzone_free(priv->cnt_array_mz);
427 	priv->cnt_array = NULL;
428 }
429 
430 static void
431 gve_free_irq_db(struct gve_priv *priv)
432 {
433 	rte_memzone_free(priv->irq_dbs_mz);
434 	priv->irq_dbs = NULL;
435 }
436 
437 static void
438 gve_teardown_device_resources(struct gve_priv *priv)
439 {
440 	int err;
441 
442 	/* Tell device its resources are being freed */
443 	if (gve_get_device_resources_ok(priv)) {
444 		err = gve_adminq_deconfigure_device_resources(priv);
445 		if (err)
446 			PMD_DRV_LOG(ERR, "Could not deconfigure device resources: err=%d", err);
447 	}
448 	gve_free_counter_array(priv);
449 	gve_free_irq_db(priv);
450 	gve_clear_device_resources_ok(priv);
451 }
452 
453 static uint8_t
454 pci_dev_find_capability(struct rte_pci_device *pdev, int cap)
455 {
456 	uint8_t pos, id;
457 	uint16_t ent;
458 	int loops;
459 	int ret;
460 
461 	ret = rte_pci_read_config(pdev, &pos, sizeof(pos), PCI_CAPABILITY_LIST);
462 	if (ret != sizeof(pos))
463 		return 0;
464 
465 	loops = (PCI_CFG_SPACE_SIZE - PCI_STD_HEADER_SIZEOF) / PCI_CAP_SIZEOF;
466 
467 	while (pos && loops--) {
468 		ret = rte_pci_read_config(pdev, &ent, sizeof(ent), pos);
469 		if (ret != sizeof(ent))
470 			return 0;
471 
472 		id = ent & 0xff;
473 		if (id == 0xff)
474 			break;
475 
476 		if (id == cap)
477 			return pos;
478 
479 		pos = (ent >> 8);
480 	}
481 
482 	return 0;
483 }
484 
485 static int
486 pci_dev_msix_vec_count(struct rte_pci_device *pdev)
487 {
488 	uint8_t msix_cap = pci_dev_find_capability(pdev, PCI_CAP_ID_MSIX);
489 	uint16_t control;
490 	int ret;
491 
492 	if (!msix_cap)
493 		return 0;
494 
495 	ret = rte_pci_read_config(pdev, &control, sizeof(control), msix_cap + PCI_MSIX_FLAGS);
496 	if (ret != sizeof(control))
497 		return 0;
498 
499 	return (control & PCI_MSIX_FLAGS_QSIZE) + 1;
500 }
501 
502 static int
503 gve_setup_device_resources(struct gve_priv *priv)
504 {
505 	char z_name[RTE_MEMZONE_NAMESIZE];
506 	const struct rte_memzone *mz;
507 	int err = 0;
508 
509 	snprintf(z_name, sizeof(z_name), "gve_%s_cnt_arr", priv->pci_dev->device.name);
510 	mz = rte_memzone_reserve_aligned(z_name,
511 					 priv->num_event_counters * sizeof(*priv->cnt_array),
512 					 rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
513 					 PAGE_SIZE);
514 	if (mz == NULL) {
515 		PMD_DRV_LOG(ERR, "Could not alloc memzone for count array");
516 		return -ENOMEM;
517 	}
518 	priv->cnt_array = (rte_be32_t *)mz->addr;
519 	priv->cnt_array_mz = mz;
520 
521 	snprintf(z_name, sizeof(z_name), "gve_%s_irqmz", priv->pci_dev->device.name);
522 	mz = rte_memzone_reserve_aligned(z_name,
523 					 sizeof(*priv->irq_dbs) * (priv->num_ntfy_blks),
524 					 rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
525 					 PAGE_SIZE);
526 	if (mz == NULL) {
527 		PMD_DRV_LOG(ERR, "Could not alloc memzone for irq_dbs");
528 		err = -ENOMEM;
529 		goto free_cnt_array;
530 	}
531 	priv->irq_dbs = (struct gve_irq_db *)mz->addr;
532 	priv->irq_dbs_mz = mz;
533 
534 	err = gve_adminq_configure_device_resources(priv,
535 						    priv->cnt_array_mz->iova,
536 						    priv->num_event_counters,
537 						    priv->irq_dbs_mz->iova,
538 						    priv->num_ntfy_blks);
539 	if (unlikely(err)) {
540 		PMD_DRV_LOG(ERR, "Could not config device resources: err=%d", err);
541 		goto free_irq_dbs;
542 	}
543 	return 0;
544 
545 free_irq_dbs:
546 	gve_free_irq_db(priv);
547 free_cnt_array:
548 	gve_free_counter_array(priv);
549 
550 	return err;
551 }
552 
553 static int
554 gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
555 {
556 	uint16_t pages;
557 	int num_ntfy;
558 	uint32_t i;
559 	int err;
560 
561 	/* Set up the adminq */
562 	err = gve_adminq_alloc(priv);
563 	if (err) {
564 		PMD_DRV_LOG(ERR, "Failed to alloc admin queue: err=%d", err);
565 		return err;
566 	}
567 
568 	if (skip_describe_device)
569 		goto setup_device;
570 
571 	/* Get the initial information we need from the device */
572 	err = gve_adminq_describe_device(priv);
573 	if (err) {
574 		PMD_DRV_LOG(ERR, "Could not get device information: err=%d", err);
575 		goto free_adminq;
576 	}
577 
578 	num_ntfy = pci_dev_msix_vec_count(priv->pci_dev);
579 	if (num_ntfy <= 0) {
580 		PMD_DRV_LOG(ERR, "Could not count MSI-x vectors");
581 		err = -EIO;
582 		goto free_adminq;
583 	} else if (num_ntfy < GVE_MIN_MSIX) {
584 		PMD_DRV_LOG(ERR, "GVE needs at least %d MSI-x vectors, but only has %d",
585 			    GVE_MIN_MSIX, num_ntfy);
586 		err = -EINVAL;
587 		goto free_adminq;
588 	}
589 
590 	priv->num_registered_pages = 0;
591 
592 	/* gvnic has one Notification Block per MSI-x vector, except for the
593 	 * management vector
594 	 */
595 	priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
596 	priv->mgmt_msix_idx = priv->num_ntfy_blks;
597 
598 	priv->max_nb_txq = RTE_MIN(priv->max_nb_txq, priv->num_ntfy_blks / 2);
599 	priv->max_nb_rxq = RTE_MIN(priv->max_nb_rxq, priv->num_ntfy_blks / 2);
600 
601 	if (priv->default_num_queues > 0) {
602 		priv->max_nb_txq = RTE_MIN(priv->default_num_queues, priv->max_nb_txq);
603 		priv->max_nb_rxq = RTE_MIN(priv->default_num_queues, priv->max_nb_rxq);
604 	}
605 
606 	PMD_DRV_LOG(INFO, "Max TX queues %d, Max RX queues %d",
607 		    priv->max_nb_txq, priv->max_nb_rxq);
608 
609 	/* In GQI_QPL queue format:
610 	 * Allocate queue page lists according to max queue number
611 	 * tx qpl id should start from 0 while rx qpl id should start
612 	 * from priv->max_nb_txq
613 	 */
614 	if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
615 		priv->qpl = rte_zmalloc("gve_qpl",
616 					(priv->max_nb_txq + priv->max_nb_rxq) *
617 					sizeof(struct gve_queue_page_list), 0);
618 		if (priv->qpl == NULL) {
619 			PMD_DRV_LOG(ERR, "Failed to alloc qpl.");
620 			err = -ENOMEM;
621 			goto free_adminq;
622 		}
623 
624 		for (i = 0; i < priv->max_nb_txq + priv->max_nb_rxq; i++) {
625 			if (i < priv->max_nb_txq)
626 				pages = priv->tx_pages_per_qpl;
627 			else
628 				pages = priv->rx_data_slot_cnt;
629 			err = gve_alloc_queue_page_list(priv, i, pages);
630 			if (err != 0) {
631 				PMD_DRV_LOG(ERR, "Failed to alloc qpl %u.", i);
632 				goto err_qpl;
633 			}
634 		}
635 	}
636 
637 setup_device:
638 	err = gve_setup_device_resources(priv);
639 	if (!err)
640 		return 0;
641 err_qpl:
642 	gve_free_qpls(priv);
643 free_adminq:
644 	gve_adminq_free(priv);
645 	return err;
646 }
647 
648 static void
649 gve_teardown_priv_resources(struct gve_priv *priv)
650 {
651 	gve_teardown_device_resources(priv);
652 	gve_adminq_free(priv);
653 }
654 
655 static int
656 gve_dev_init(struct rte_eth_dev *eth_dev)
657 {
658 	struct gve_priv *priv = eth_dev->data->dev_private;
659 	int max_tx_queues, max_rx_queues;
660 	struct rte_pci_device *pci_dev;
661 	struct gve_registers *reg_bar;
662 	rte_be32_t *db_bar;
663 	int err;
664 
665 	eth_dev->dev_ops = &gve_eth_dev_ops;
666 
667 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
668 		return 0;
669 
670 	pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
671 
672 	reg_bar = pci_dev->mem_resource[GVE_REG_BAR].addr;
673 	if (!reg_bar) {
674 		PMD_DRV_LOG(ERR, "Failed to map pci bar!");
675 		return -ENOMEM;
676 	}
677 
678 	db_bar = pci_dev->mem_resource[GVE_DB_BAR].addr;
679 	if (!db_bar) {
680 		PMD_DRV_LOG(ERR, "Failed to map doorbell bar!");
681 		return -ENOMEM;
682 	}
683 
684 	gve_write_version(&reg_bar->driver_version);
685 	/* Get max queues to alloc etherdev */
686 	max_tx_queues = ioread32be(&reg_bar->max_tx_queues);
687 	max_rx_queues = ioread32be(&reg_bar->max_rx_queues);
688 
689 	priv->reg_bar0 = reg_bar;
690 	priv->db_bar2 = db_bar;
691 	priv->pci_dev = pci_dev;
692 	priv->state_flags = 0x0;
693 
694 	priv->max_nb_txq = max_tx_queues;
695 	priv->max_nb_rxq = max_rx_queues;
696 
697 	err = gve_init_priv(priv, false);
698 	if (err)
699 		return err;
700 
701 	if (gve_is_gqi(priv)) {
702 		eth_dev->rx_pkt_burst = gve_rx_burst;
703 		eth_dev->tx_pkt_burst = gve_tx_burst;
704 	} else {
705 		PMD_DRV_LOG(ERR, "DQO_RDA is not implemented and will be added in the future");
706 	}
707 
708 	eth_dev->data->mac_addrs = &priv->dev_addr;
709 
710 	return 0;
711 }
712 
713 static int
714 gve_dev_uninit(struct rte_eth_dev *eth_dev)
715 {
716 	struct gve_priv *priv = eth_dev->data->dev_private;
717 
718 	gve_teardown_priv_resources(priv);
719 
720 	eth_dev->data->mac_addrs = NULL;
721 
722 	return 0;
723 }
724 
725 static int
726 gve_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
727 	      struct rte_pci_device *pci_dev)
728 {
729 	return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct gve_priv), gve_dev_init);
730 }
731 
732 static int
733 gve_pci_remove(struct rte_pci_device *pci_dev)
734 {
735 	return rte_eth_dev_pci_generic_remove(pci_dev, gve_dev_uninit);
736 }
737 
738 static const struct rte_pci_id pci_id_gve_map[] = {
739 	{ RTE_PCI_DEVICE(GOOGLE_VENDOR_ID, GVE_DEV_ID) },
740 	{ .device_id = 0 },
741 };
742 
743 static struct rte_pci_driver rte_gve_pmd = {
744 	.id_table = pci_id_gve_map,
745 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
746 	.probe = gve_pci_probe,
747 	.remove = gve_pci_remove,
748 };
749 
750 RTE_PMD_REGISTER_PCI(net_gve, rte_gve_pmd);
751 RTE_PMD_REGISTER_PCI_TABLE(net_gve, pci_id_gve_map);
752 RTE_PMD_REGISTER_KMOD_DEP(net_gve, "* igb_uio | vfio-pci");
753 RTE_LOG_REGISTER_SUFFIX(gve_logtype_driver, driver, NOTICE);
754