xref: /dpdk/drivers/net/gve/gve_ethdev.c (revision f19c864e4dcb57a9ec6ace3597afcbcca5e50f31)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2022 Intel Corporation
3  */
4 
5 #include "gve_ethdev.h"
6 #include "base/gve_adminq.h"
7 #include "base/gve_register.h"
8 
9 const char gve_version_str[] = GVE_VERSION;
10 static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
11 
12 static void
13 gve_write_version(uint8_t *driver_version_register)
14 {
15 	const char *c = gve_version_prefix;
16 
17 	while (*c) {
18 		writeb(*c, driver_version_register);
19 		c++;
20 	}
21 
22 	c = gve_version_str;
23 	while (*c) {
24 		writeb(*c, driver_version_register);
25 		c++;
26 	}
27 	writeb('\n', driver_version_register);
28 }
29 
30 static int
31 gve_dev_configure(__rte_unused struct rte_eth_dev *dev)
32 {
33 	return 0;
34 }
35 
36 static int
37 gve_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
38 {
39 	struct gve_priv *priv = dev->data->dev_private;
40 	struct rte_eth_link link;
41 	int err;
42 
43 	memset(&link, 0, sizeof(link));
44 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
45 	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
46 
47 	if (!dev->data->dev_started) {
48 		link.link_status = RTE_ETH_LINK_DOWN;
49 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
50 	} else {
51 		link.link_status = RTE_ETH_LINK_UP;
52 		PMD_DRV_LOG(DEBUG, "Get link status from hw");
53 		err = gve_adminq_report_link_speed(priv);
54 		if (err) {
55 			PMD_DRV_LOG(ERR, "Failed to get link speed.");
56 			priv->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
57 		}
58 		link.link_speed = priv->link_speed;
59 	}
60 
61 	return rte_eth_linkstatus_set(dev, &link);
62 }
63 
64 static int
65 gve_dev_start(struct rte_eth_dev *dev)
66 {
67 	dev->data->dev_started = 1;
68 	gve_link_update(dev, 0);
69 
70 	return 0;
71 }
72 
73 static int
74 gve_dev_stop(struct rte_eth_dev *dev)
75 {
76 	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
77 	dev->data->dev_started = 0;
78 
79 	return 0;
80 }
81 
82 static int
83 gve_dev_close(struct rte_eth_dev *dev)
84 {
85 	int err = 0;
86 
87 	if (dev->data->dev_started) {
88 		err = gve_dev_stop(dev);
89 		if (err != 0)
90 			PMD_DRV_LOG(ERR, "Failed to stop dev.");
91 	}
92 
93 	dev->data->mac_addrs = NULL;
94 
95 	return err;
96 }
97 
98 static int
99 gve_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
100 {
101 	struct gve_priv *priv = dev->data->dev_private;
102 	int err;
103 
104 	if (mtu < RTE_ETHER_MIN_MTU || mtu > priv->max_mtu) {
105 		PMD_DRV_LOG(ERR, "MIN MTU is %u, MAX MTU is %u",
106 			    RTE_ETHER_MIN_MTU, priv->max_mtu);
107 		return -EINVAL;
108 	}
109 
110 	/* mtu setting is forbidden if port is start */
111 	if (dev->data->dev_started) {
112 		PMD_DRV_LOG(ERR, "Port must be stopped before configuration");
113 		return -EBUSY;
114 	}
115 
116 	err = gve_adminq_set_mtu(priv, mtu);
117 	if (err) {
118 		PMD_DRV_LOG(ERR, "Failed to set mtu as %u err = %d", mtu, err);
119 		return err;
120 	}
121 
122 	return 0;
123 }
124 
125 static const struct eth_dev_ops gve_eth_dev_ops = {
126 	.dev_configure        = gve_dev_configure,
127 	.dev_start            = gve_dev_start,
128 	.dev_stop             = gve_dev_stop,
129 	.dev_close            = gve_dev_close,
130 	.link_update          = gve_link_update,
131 	.mtu_set              = gve_dev_mtu_set,
132 };
133 
134 static void
135 gve_free_counter_array(struct gve_priv *priv)
136 {
137 	rte_memzone_free(priv->cnt_array_mz);
138 	priv->cnt_array = NULL;
139 }
140 
141 static void
142 gve_free_irq_db(struct gve_priv *priv)
143 {
144 	rte_memzone_free(priv->irq_dbs_mz);
145 	priv->irq_dbs = NULL;
146 }
147 
148 static void
149 gve_teardown_device_resources(struct gve_priv *priv)
150 {
151 	int err;
152 
153 	/* Tell device its resources are being freed */
154 	if (gve_get_device_resources_ok(priv)) {
155 		err = gve_adminq_deconfigure_device_resources(priv);
156 		if (err)
157 			PMD_DRV_LOG(ERR, "Could not deconfigure device resources: err=%d", err);
158 	}
159 	gve_free_counter_array(priv);
160 	gve_free_irq_db(priv);
161 	gve_clear_device_resources_ok(priv);
162 }
163 
164 static uint8_t
165 pci_dev_find_capability(struct rte_pci_device *pdev, int cap)
166 {
167 	uint8_t pos, id;
168 	uint16_t ent;
169 	int loops;
170 	int ret;
171 
172 	ret = rte_pci_read_config(pdev, &pos, sizeof(pos), PCI_CAPABILITY_LIST);
173 	if (ret != sizeof(pos))
174 		return 0;
175 
176 	loops = (PCI_CFG_SPACE_SIZE - PCI_STD_HEADER_SIZEOF) / PCI_CAP_SIZEOF;
177 
178 	while (pos && loops--) {
179 		ret = rte_pci_read_config(pdev, &ent, sizeof(ent), pos);
180 		if (ret != sizeof(ent))
181 			return 0;
182 
183 		id = ent & 0xff;
184 		if (id == 0xff)
185 			break;
186 
187 		if (id == cap)
188 			return pos;
189 
190 		pos = (ent >> 8);
191 	}
192 
193 	return 0;
194 }
195 
196 static int
197 pci_dev_msix_vec_count(struct rte_pci_device *pdev)
198 {
199 	uint8_t msix_cap = pci_dev_find_capability(pdev, PCI_CAP_ID_MSIX);
200 	uint16_t control;
201 	int ret;
202 
203 	if (!msix_cap)
204 		return 0;
205 
206 	ret = rte_pci_read_config(pdev, &control, sizeof(control), msix_cap + PCI_MSIX_FLAGS);
207 	if (ret != sizeof(control))
208 		return 0;
209 
210 	return (control & PCI_MSIX_FLAGS_QSIZE) + 1;
211 }
212 
213 static int
214 gve_setup_device_resources(struct gve_priv *priv)
215 {
216 	char z_name[RTE_MEMZONE_NAMESIZE];
217 	const struct rte_memzone *mz;
218 	int err = 0;
219 
220 	snprintf(z_name, sizeof(z_name), "gve_%s_cnt_arr", priv->pci_dev->device.name);
221 	mz = rte_memzone_reserve_aligned(z_name,
222 					 priv->num_event_counters * sizeof(*priv->cnt_array),
223 					 rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
224 					 PAGE_SIZE);
225 	if (mz == NULL) {
226 		PMD_DRV_LOG(ERR, "Could not alloc memzone for count array");
227 		return -ENOMEM;
228 	}
229 	priv->cnt_array = (rte_be32_t *)mz->addr;
230 	priv->cnt_array_mz = mz;
231 
232 	snprintf(z_name, sizeof(z_name), "gve_%s_irqmz", priv->pci_dev->device.name);
233 	mz = rte_memzone_reserve_aligned(z_name,
234 					 sizeof(*priv->irq_dbs) * (priv->num_ntfy_blks),
235 					 rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
236 					 PAGE_SIZE);
237 	if (mz == NULL) {
238 		PMD_DRV_LOG(ERR, "Could not alloc memzone for irq_dbs");
239 		err = -ENOMEM;
240 		goto free_cnt_array;
241 	}
242 	priv->irq_dbs = (struct gve_irq_db *)mz->addr;
243 	priv->irq_dbs_mz = mz;
244 
245 	err = gve_adminq_configure_device_resources(priv,
246 						    priv->cnt_array_mz->iova,
247 						    priv->num_event_counters,
248 						    priv->irq_dbs_mz->iova,
249 						    priv->num_ntfy_blks);
250 	if (unlikely(err)) {
251 		PMD_DRV_LOG(ERR, "Could not config device resources: err=%d", err);
252 		goto free_irq_dbs;
253 	}
254 	return 0;
255 
256 free_irq_dbs:
257 	gve_free_irq_db(priv);
258 free_cnt_array:
259 	gve_free_counter_array(priv);
260 
261 	return err;
262 }
263 
264 static int
265 gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
266 {
267 	int num_ntfy;
268 	int err;
269 
270 	/* Set up the adminq */
271 	err = gve_adminq_alloc(priv);
272 	if (err) {
273 		PMD_DRV_LOG(ERR, "Failed to alloc admin queue: err=%d", err);
274 		return err;
275 	}
276 
277 	if (skip_describe_device)
278 		goto setup_device;
279 
280 	/* Get the initial information we need from the device */
281 	err = gve_adminq_describe_device(priv);
282 	if (err) {
283 		PMD_DRV_LOG(ERR, "Could not get device information: err=%d", err);
284 		goto free_adminq;
285 	}
286 
287 	num_ntfy = pci_dev_msix_vec_count(priv->pci_dev);
288 	if (num_ntfy <= 0) {
289 		PMD_DRV_LOG(ERR, "Could not count MSI-x vectors");
290 		err = -EIO;
291 		goto free_adminq;
292 	} else if (num_ntfy < GVE_MIN_MSIX) {
293 		PMD_DRV_LOG(ERR, "GVE needs at least %d MSI-x vectors, but only has %d",
294 			    GVE_MIN_MSIX, num_ntfy);
295 		err = -EINVAL;
296 		goto free_adminq;
297 	}
298 
299 	priv->num_registered_pages = 0;
300 
301 	/* gvnic has one Notification Block per MSI-x vector, except for the
302 	 * management vector
303 	 */
304 	priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
305 	priv->mgmt_msix_idx = priv->num_ntfy_blks;
306 
307 	priv->max_nb_txq = RTE_MIN(priv->max_nb_txq, priv->num_ntfy_blks / 2);
308 	priv->max_nb_rxq = RTE_MIN(priv->max_nb_rxq, priv->num_ntfy_blks / 2);
309 
310 	if (priv->default_num_queues > 0) {
311 		priv->max_nb_txq = RTE_MIN(priv->default_num_queues, priv->max_nb_txq);
312 		priv->max_nb_rxq = RTE_MIN(priv->default_num_queues, priv->max_nb_rxq);
313 	}
314 
315 	PMD_DRV_LOG(INFO, "Max TX queues %d, Max RX queues %d",
316 		    priv->max_nb_txq, priv->max_nb_rxq);
317 
318 setup_device:
319 	err = gve_setup_device_resources(priv);
320 	if (!err)
321 		return 0;
322 free_adminq:
323 	gve_adminq_free(priv);
324 	return err;
325 }
326 
327 static void
328 gve_teardown_priv_resources(struct gve_priv *priv)
329 {
330 	gve_teardown_device_resources(priv);
331 	gve_adminq_free(priv);
332 }
333 
334 static int
335 gve_dev_init(struct rte_eth_dev *eth_dev)
336 {
337 	struct gve_priv *priv = eth_dev->data->dev_private;
338 	int max_tx_queues, max_rx_queues;
339 	struct rte_pci_device *pci_dev;
340 	struct gve_registers *reg_bar;
341 	rte_be32_t *db_bar;
342 	int err;
343 
344 	eth_dev->dev_ops = &gve_eth_dev_ops;
345 
346 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
347 		return 0;
348 
349 	pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
350 
351 	reg_bar = pci_dev->mem_resource[GVE_REG_BAR].addr;
352 	if (!reg_bar) {
353 		PMD_DRV_LOG(ERR, "Failed to map pci bar!");
354 		return -ENOMEM;
355 	}
356 
357 	db_bar = pci_dev->mem_resource[GVE_DB_BAR].addr;
358 	if (!db_bar) {
359 		PMD_DRV_LOG(ERR, "Failed to map doorbell bar!");
360 		return -ENOMEM;
361 	}
362 
363 	gve_write_version(&reg_bar->driver_version);
364 	/* Get max queues to alloc etherdev */
365 	max_tx_queues = ioread32be(&reg_bar->max_tx_queues);
366 	max_rx_queues = ioread32be(&reg_bar->max_rx_queues);
367 
368 	priv->reg_bar0 = reg_bar;
369 	priv->db_bar2 = db_bar;
370 	priv->pci_dev = pci_dev;
371 	priv->state_flags = 0x0;
372 
373 	priv->max_nb_txq = max_tx_queues;
374 	priv->max_nb_rxq = max_rx_queues;
375 
376 	err = gve_init_priv(priv, false);
377 	if (err)
378 		return err;
379 
380 	eth_dev->data->mac_addrs = &priv->dev_addr;
381 
382 	return 0;
383 }
384 
385 static int
386 gve_dev_uninit(struct rte_eth_dev *eth_dev)
387 {
388 	struct gve_priv *priv = eth_dev->data->dev_private;
389 
390 	gve_teardown_priv_resources(priv);
391 
392 	eth_dev->data->mac_addrs = NULL;
393 
394 	return 0;
395 }
396 
397 static int
398 gve_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
399 	      struct rte_pci_device *pci_dev)
400 {
401 	return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct gve_priv), gve_dev_init);
402 }
403 
404 static int
405 gve_pci_remove(struct rte_pci_device *pci_dev)
406 {
407 	return rte_eth_dev_pci_generic_remove(pci_dev, gve_dev_uninit);
408 }
409 
410 static const struct rte_pci_id pci_id_gve_map[] = {
411 	{ RTE_PCI_DEVICE(GOOGLE_VENDOR_ID, GVE_DEV_ID) },
412 	{ .device_id = 0 },
413 };
414 
415 static struct rte_pci_driver rte_gve_pmd = {
416 	.id_table = pci_id_gve_map,
417 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
418 	.probe = gve_pci_probe,
419 	.remove = gve_pci_remove,
420 };
421 
422 RTE_PMD_REGISTER_PCI(net_gve, rte_gve_pmd);
423 RTE_PMD_REGISTER_PCI_TABLE(net_gve, pci_id_gve_map);
424 RTE_PMD_REGISTER_KMOD_DEP(net_gve, "* igb_uio | vfio-pci");
425 RTE_LOG_REGISTER_SUFFIX(gve_logtype_driver, driver, NOTICE);
426