xref: /dpdk/drivers/net/netvsc/hn_vf.c (revision 2d0c29a37a9c080c1cccb1ad7941aba2ccf5437e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2018 Microsoft Corp.
3  * All rights reserved.
4  */
5 
6 #include <stdio.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <stdbool.h>
10 #include <errno.h>
11 #include <unistd.h>
12 #include <dirent.h>
13 #include <fcntl.h>
14 #include <sys/types.h>
15 #include <sys/uio.h>
16 
17 #include <rte_ether.h>
18 #include <rte_ethdev.h>
19 #include <rte_ethdev_driver.h>
20 #include <rte_lcore.h>
21 #include <rte_memory.h>
22 #include <rte_bus_vmbus.h>
23 #include <rte_pci.h>
24 #include <rte_bus_pci.h>
25 #include <rte_log.h>
26 #include <rte_string_fns.h>
27 
28 #include "hn_logs.h"
29 #include "hn_var.h"
30 #include "hn_nvs.h"
31 
32 /* Search for VF with matching MAC address, return port id */
33 static int hn_vf_match(const struct rte_eth_dev *dev)
34 {
35 	const struct ether_addr *mac = dev->data->mac_addrs;
36 	int i;
37 
38 	RTE_ETH_FOREACH_DEV(i) {
39 		const struct rte_eth_dev *vf_dev = &rte_eth_devices[i];
40 		const struct ether_addr *vf_mac = vf_dev->data->mac_addrs;
41 
42 		if (vf_dev == dev)
43 			continue;
44 
45 		if (is_same_ether_addr(mac, vf_mac))
46 			return i;
47 	}
48 	return -ENOENT;
49 }
50 
51 
52 /*
53  * Attach new PCI VF device and return the port_id
54  */
55 static int hn_vf_attach(struct hn_data *hv, uint16_t port_id)
56 {
57 	struct rte_eth_dev_owner owner = { .id = RTE_ETH_DEV_NO_OWNER };
58 	int ret;
59 
60 	if (hn_vf_attached(hv)) {
61 		PMD_DRV_LOG(ERR, "VF already attached");
62 		return -EEXIST;
63 	}
64 
65 	ret = rte_eth_dev_owner_get(port_id, &owner);
66 	if (ret < 0) {
67 		PMD_DRV_LOG(ERR, "Can not find owner for port %d", port_id);
68 		return ret;
69 	}
70 
71 	if (owner.id != RTE_ETH_DEV_NO_OWNER) {
72 		PMD_DRV_LOG(ERR, "Port %u already owned by other device %s",
73 			    port_id, owner.name);
74 		return -EBUSY;
75 	}
76 
77 	ret = rte_eth_dev_owner_set(port_id, &hv->owner);
78 	if (ret < 0) {
79 		PMD_DRV_LOG(ERR, "Can set owner for port %d", port_id);
80 		return ret;
81 	}
82 
83 	PMD_DRV_LOG(DEBUG, "Attach VF device %u", port_id);
84 	hv->vf_port = port_id;
85 	rte_smp_wmb();
86 
87 	return 0;
88 }
89 
90 /* Add new VF device to synthetic device */
91 int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv)
92 {
93 	int port, err;
94 
95 	port = hn_vf_match(dev);
96 	if (port < 0) {
97 		PMD_DRV_LOG(NOTICE, "No matching MAC found");
98 		return port;
99 	}
100 
101 	rte_spinlock_lock(&hv->vf_lock);
102 	err = hn_vf_attach(hv, port);
103 
104 	if (err == 0) {
105 		dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
106 		hv->vf_intr = (struct rte_intr_handle) {
107 			.fd = -1,
108 			.type = RTE_INTR_HANDLE_EXT,
109 		};
110 		dev->intr_handle = &hv->vf_intr;
111 		hn_nvs_set_datapath(hv, NVS_DATAPATH_VF);
112 	}
113 	rte_spinlock_unlock(&hv->vf_lock);
114 
115 	return err;
116 }
117 
118 /* Remove new VF device */
119 static void hn_vf_remove(struct hn_data *hv)
120 {
121 
122 	rte_spinlock_lock(&hv->vf_lock);
123 
124 	if (!hn_vf_attached(hv)) {
125 		PMD_DRV_LOG(ERR, "VF path not active");
126 	} else {
127 		/* Stop incoming packets from arriving on VF */
128 		hn_nvs_set_datapath(hv, NVS_DATAPATH_SYNTHETIC);
129 
130 		/* Stop transmission over VF */
131 		hv->vf_port = HN_INVALID_PORT;
132 		rte_smp_wmb();
133 
134 		/* Give back ownership */
135 		rte_eth_dev_owner_unset(hv->vf_port, hv->owner.id);
136 	}
137 	rte_spinlock_unlock(&hv->vf_lock);
138 }
139 
140 /* Handle VF association message from host */
141 void
142 hn_nvs_handle_vfassoc(struct rte_eth_dev *dev,
143 		      const struct vmbus_chanpkt_hdr *hdr,
144 		      const void *data)
145 {
146 	struct hn_data *hv = dev->data->dev_private;
147 	const struct hn_nvs_vf_association *vf_assoc = data;
148 
149 	if (unlikely(vmbus_chanpkt_datalen(hdr) < sizeof(*vf_assoc))) {
150 		PMD_DRV_LOG(ERR, "invalid vf association NVS");
151 		return;
152 	}
153 
154 	PMD_DRV_LOG(DEBUG, "VF serial %u %s port %u",
155 		    vf_assoc->serial,
156 		    vf_assoc->allocated ? "add to" : "remove from",
157 		    dev->data->port_id);
158 
159 	hv->vf_present = vf_assoc->allocated;
160 
161 	if (dev->state != RTE_ETH_DEV_ATTACHED)
162 		return;
163 
164 	if (vf_assoc->allocated)
165 		hn_vf_add(dev, hv);
166 	else
167 		hn_vf_remove(hv);
168 }
169 
170 /*
171  * Merge the info from the VF and synthetic path.
172  * use the default config of the VF
173  * and the minimum number of queues and buffer sizes.
174  */
175 static void hn_vf_info_merge(struct rte_eth_dev *vf_dev,
176 			     struct rte_eth_dev_info *info)
177 {
178 	struct rte_eth_dev_info vf_info;
179 
180 	rte_eth_dev_info_get(vf_dev->data->port_id, &vf_info);
181 
182 	info->speed_capa = vf_info.speed_capa;
183 	info->default_rxportconf = vf_info.default_rxportconf;
184 	info->default_txportconf = vf_info.default_txportconf;
185 
186 	info->max_rx_queues = RTE_MIN(vf_info.max_rx_queues,
187 				      info->max_rx_queues);
188 	info->rx_offload_capa &= vf_info.rx_offload_capa;
189 	info->rx_queue_offload_capa &= vf_info.rx_queue_offload_capa;
190 	info->flow_type_rss_offloads &= vf_info.flow_type_rss_offloads;
191 
192 	info->max_tx_queues = RTE_MIN(vf_info.max_tx_queues,
193 				      info->max_tx_queues);
194 	info->tx_offload_capa &= vf_info.tx_offload_capa;
195 	info->tx_queue_offload_capa &= vf_info.tx_queue_offload_capa;
196 
197 	info->min_rx_bufsize = RTE_MAX(vf_info.min_rx_bufsize,
198 				       info->min_rx_bufsize);
199 	info->max_rx_pktlen  = RTE_MAX(vf_info.max_rx_pktlen,
200 				       info->max_rx_pktlen);
201 }
202 
203 void hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info)
204 {
205 	struct rte_eth_dev *vf_dev;
206 
207 	rte_spinlock_lock(&hv->vf_lock);
208 	vf_dev = hn_get_vf_dev(hv);
209 	if (vf_dev)
210 		hn_vf_info_merge(vf_dev, info);
211 	rte_spinlock_unlock(&hv->vf_lock);
212 }
213 
214 int hn_vf_link_update(struct rte_eth_dev *dev,
215 		      int wait_to_complete)
216 {
217 	struct hn_data *hv = dev->data->dev_private;
218 	struct rte_eth_dev *vf_dev;
219 	int ret = 0;
220 
221 	rte_spinlock_lock(&hv->vf_lock);
222 	vf_dev = hn_get_vf_dev(hv);
223 	if (vf_dev && vf_dev->dev_ops->link_update)
224 		ret = (*vf_dev->dev_ops->link_update)(vf_dev, wait_to_complete);
225 	rte_spinlock_unlock(&hv->vf_lock);
226 
227 	return ret;
228 }
229 
230 /* called when VF has link state interrupts enabled */
231 static int hn_vf_lsc_event(uint16_t port_id __rte_unused,
232 			   enum rte_eth_event_type event,
233 			   void *cb_arg, void *out __rte_unused)
234 {
235 	struct rte_eth_dev *dev = cb_arg;
236 
237 	if (event != RTE_ETH_EVENT_INTR_LSC)
238 		return 0;
239 
240 	/* if link state has changed pass on */
241 	if (hn_dev_link_update(dev, 0) == 0)
242 		return 0; /* no change */
243 
244 	return _rte_eth_dev_callback_process(dev,
245 					     RTE_ETH_EVENT_INTR_LSC,
246 					     NULL);
247 }
248 
249 static int _hn_vf_configure(struct rte_eth_dev *dev,
250 			    uint16_t vf_port,
251 			    const struct rte_eth_conf *dev_conf)
252 {
253 	struct rte_eth_conf vf_conf = *dev_conf;
254 	struct rte_eth_dev *vf_dev;
255 	int ret;
256 
257 	vf_dev = &rte_eth_devices[vf_port];
258 	if (dev_conf->intr_conf.lsc &&
259 	    (vf_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
260 		PMD_DRV_LOG(DEBUG, "enabling LSC for VF %u",
261 			    vf_port);
262 		vf_conf.intr_conf.lsc = 1;
263 	} else {
264 		PMD_DRV_LOG(DEBUG, "disabling LSC for VF %u",
265 			    vf_port);
266 		vf_conf.intr_conf.lsc = 0;
267 	}
268 
269 	ret = rte_eth_dev_configure(vf_port,
270 				    dev->data->nb_rx_queues,
271 				    dev->data->nb_tx_queues,
272 				    &vf_conf);
273 	if (ret) {
274 		PMD_DRV_LOG(ERR,
275 			    "VF configuration failed: %d", ret);
276 	} else if (vf_conf.intr_conf.lsc) {
277 		ret = rte_eth_dev_callback_register(vf_port,
278 						    RTE_ETH_DEV_INTR_LSC,
279 						    hn_vf_lsc_event, dev);
280 		if (ret)
281 			PMD_DRV_LOG(ERR,
282 				    "Failed to register LSC callback for VF %u",
283 				    vf_port);
284 	}
285 	return ret;
286 }
287 
288 /*
289  * Configure VF if present.
290  * Force VF to have same number of queues as synthetic device
291  */
292 int hn_vf_configure(struct rte_eth_dev *dev,
293 		    const struct rte_eth_conf *dev_conf)
294 {
295 	struct hn_data *hv = dev->data->dev_private;
296 	int ret = 0;
297 
298 	rte_spinlock_lock(&hv->vf_lock);
299 	if (hv->vf_port != HN_INVALID_PORT)
300 		ret = _hn_vf_configure(dev, hv->vf_port, dev_conf);
301 	rte_spinlock_unlock(&hv->vf_lock);
302 	return ret;
303 }
304 
305 const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev)
306 {
307 	struct hn_data *hv = dev->data->dev_private;
308 	struct rte_eth_dev *vf_dev;
309 	const uint32_t *ptypes = NULL;
310 
311 	rte_spinlock_lock(&hv->vf_lock);
312 	vf_dev = hn_get_vf_dev(hv);
313 	if (vf_dev && vf_dev->dev_ops->dev_supported_ptypes_get)
314 		ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev);
315 	rte_spinlock_unlock(&hv->vf_lock);
316 
317 	return ptypes;
318 }
319 
320 int hn_vf_start(struct rte_eth_dev *dev)
321 {
322 	struct hn_data *hv = dev->data->dev_private;
323 	struct rte_eth_dev *vf_dev;
324 	int ret = 0;
325 
326 	rte_spinlock_lock(&hv->vf_lock);
327 	vf_dev = hn_get_vf_dev(hv);
328 	if (vf_dev)
329 		ret = rte_eth_dev_start(vf_dev->data->port_id);
330 	rte_spinlock_unlock(&hv->vf_lock);
331 	return ret;
332 }
333 
334 void hn_vf_stop(struct rte_eth_dev *dev)
335 {
336 	struct hn_data *hv = dev->data->dev_private;
337 	struct rte_eth_dev *vf_dev;
338 
339 	rte_spinlock_lock(&hv->vf_lock);
340 	vf_dev = hn_get_vf_dev(hv);
341 	if (vf_dev)
342 		rte_eth_dev_stop(vf_dev->data->port_id);
343 	rte_spinlock_unlock(&hv->vf_lock);
344 }
345 
346 /* If VF is present, then cascade configuration down */
347 #define VF_ETHDEV_FUNC(dev, func)				\
348 	{							\
349 		struct hn_data *hv = (dev)->data->dev_private;	\
350 		struct rte_eth_dev *vf_dev;			\
351 		rte_spinlock_lock(&hv->vf_lock);		\
352 		vf_dev = hn_get_vf_dev(hv);			\
353 		if (vf_dev)					\
354 			func(vf_dev->data->port_id);		\
355 		rte_spinlock_unlock(&hv->vf_lock);		\
356 	}
357 
358 void hn_vf_reset(struct rte_eth_dev *dev)
359 {
360 	VF_ETHDEV_FUNC(dev, rte_eth_dev_reset);
361 }
362 
363 void hn_vf_close(struct rte_eth_dev *dev)
364 {
365 	VF_ETHDEV_FUNC(dev, rte_eth_dev_close);
366 }
367 
368 void hn_vf_stats_reset(struct rte_eth_dev *dev)
369 {
370 	VF_ETHDEV_FUNC(dev, rte_eth_stats_reset);
371 }
372 
373 void hn_vf_allmulticast_enable(struct rte_eth_dev *dev)
374 {
375 	VF_ETHDEV_FUNC(dev, rte_eth_allmulticast_enable);
376 }
377 
378 void hn_vf_allmulticast_disable(struct rte_eth_dev *dev)
379 {
380 	VF_ETHDEV_FUNC(dev, rte_eth_allmulticast_disable);
381 }
382 
383 void hn_vf_promiscuous_enable(struct rte_eth_dev *dev)
384 {
385 	VF_ETHDEV_FUNC(dev, rte_eth_promiscuous_enable);
386 }
387 
388 void hn_vf_promiscuous_disable(struct rte_eth_dev *dev)
389 {
390 	VF_ETHDEV_FUNC(dev, rte_eth_promiscuous_disable);
391 }
392 
393 int hn_vf_mc_addr_list(struct rte_eth_dev *dev,
394 			struct ether_addr *mc_addr_set,
395 			uint32_t nb_mc_addr)
396 {
397 	struct hn_data *hv = dev->data->dev_private;
398 	struct rte_eth_dev *vf_dev;
399 	int ret = 0;
400 
401 	rte_spinlock_lock(&hv->vf_lock);
402 	vf_dev = hn_get_vf_dev(hv);
403 	if (vf_dev)
404 		ret = rte_eth_dev_set_mc_addr_list(vf_dev->data->port_id,
405 						   mc_addr_set, nb_mc_addr);
406 	rte_spinlock_unlock(&hv->vf_lock);
407 	return ret;
408 }
409 
410 int hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
411 			 uint16_t queue_idx, uint16_t nb_desc,
412 			 unsigned int socket_id,
413 			 const struct rte_eth_txconf *tx_conf)
414 {
415 	struct hn_data *hv = dev->data->dev_private;
416 	struct rte_eth_dev *vf_dev;
417 	int ret = 0;
418 
419 	rte_spinlock_lock(&hv->vf_lock);
420 	vf_dev = hn_get_vf_dev(hv);
421 	if (vf_dev)
422 		ret = rte_eth_tx_queue_setup(vf_dev->data->port_id,
423 					     queue_idx, nb_desc,
424 					     socket_id, tx_conf);
425 	rte_spinlock_unlock(&hv->vf_lock);
426 	return ret;
427 }
428 
429 void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id)
430 {
431 	struct rte_eth_dev *vf_dev;
432 
433 	rte_spinlock_lock(&hv->vf_lock);
434 	vf_dev = hn_get_vf_dev(hv);
435 	if (vf_dev && vf_dev->dev_ops->tx_queue_release) {
436 		void *subq = vf_dev->data->tx_queues[queue_id];
437 
438 		(*vf_dev->dev_ops->tx_queue_release)(subq);
439 	}
440 
441 	rte_spinlock_unlock(&hv->vf_lock);
442 }
443 
444 int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
445 			 uint16_t queue_idx, uint16_t nb_desc,
446 			 unsigned int socket_id,
447 			 const struct rte_eth_rxconf *rx_conf,
448 			 struct rte_mempool *mp)
449 {
450 	struct hn_data *hv = dev->data->dev_private;
451 	struct rte_eth_dev *vf_dev;
452 	int ret = 0;
453 
454 	rte_spinlock_lock(&hv->vf_lock);
455 	vf_dev = hn_get_vf_dev(hv);
456 	if (vf_dev)
457 		ret = rte_eth_rx_queue_setup(vf_dev->data->port_id,
458 					     queue_idx, nb_desc,
459 					     socket_id, rx_conf, mp);
460 	rte_spinlock_unlock(&hv->vf_lock);
461 	return ret;
462 }
463 
464 void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id)
465 {
466 	struct rte_eth_dev *vf_dev;
467 
468 	rte_spinlock_lock(&hv->vf_lock);
469 	vf_dev = hn_get_vf_dev(hv);
470 	if (vf_dev && vf_dev->dev_ops->rx_queue_release) {
471 		void *subq = vf_dev->data->rx_queues[queue_id];
472 
473 		(*vf_dev->dev_ops->rx_queue_release)(subq);
474 	}
475 	rte_spinlock_unlock(&hv->vf_lock);
476 }
477 
478 int hn_vf_stats_get(struct rte_eth_dev *dev,
479 		    struct rte_eth_stats *stats)
480 {
481 	struct hn_data *hv = dev->data->dev_private;
482 	struct rte_eth_dev *vf_dev;
483 	int ret = 0;
484 
485 	rte_spinlock_lock(&hv->vf_lock);
486 	vf_dev = hn_get_vf_dev(hv);
487 	if (vf_dev)
488 		ret = rte_eth_stats_get(vf_dev->data->port_id, stats);
489 	rte_spinlock_unlock(&hv->vf_lock);
490 	return ret;
491 }
492 
493 int hn_vf_xstats_get_names(struct rte_eth_dev *dev,
494 			   struct rte_eth_xstat_name *names,
495 			   unsigned int n)
496 {
497 	struct hn_data *hv = dev->data->dev_private;
498 	struct rte_eth_dev *vf_dev;
499 	int i, count = 0;
500 	char tmp[RTE_ETH_XSTATS_NAME_SIZE];
501 
502 	rte_spinlock_lock(&hv->vf_lock);
503 	vf_dev = hn_get_vf_dev(hv);
504 	if (vf_dev && vf_dev->dev_ops->xstats_get_names)
505 		count = vf_dev->dev_ops->xstats_get_names(vf_dev, names, n);
506 	rte_spinlock_unlock(&hv->vf_lock);
507 
508 	/* add vf_ prefix to xstat names */
509 	if (names) {
510 		for (i = 0; i < count; i++) {
511 			snprintf(tmp, sizeof(tmp), "vf_%s", names[i].name);
512 			strlcpy(names[i].name, tmp, sizeof(names[i].name));
513 		}
514 	}
515 
516 	return count;
517 }
518 
519 int hn_vf_xstats_get(struct rte_eth_dev *dev,
520 		     struct rte_eth_xstat *xstats,
521 		     unsigned int n)
522 {
523 	struct hn_data *hv = dev->data->dev_private;
524 	struct rte_eth_dev *vf_dev;
525 	int count = 0;
526 
527 	rte_spinlock_lock(&hv->vf_lock);
528 	vf_dev = hn_get_vf_dev(hv);
529 	if (vf_dev && vf_dev->dev_ops->xstats_get)
530 		count = vf_dev->dev_ops->xstats_get(vf_dev, xstats, n);
531 	rte_spinlock_unlock(&hv->vf_lock);
532 
533 	return count;
534 }
535 
536 void hn_vf_xstats_reset(struct rte_eth_dev *dev)
537 {
538 	struct hn_data *hv = dev->data->dev_private;
539 	struct rte_eth_dev *vf_dev;
540 
541 	rte_spinlock_lock(&hv->vf_lock);
542 	vf_dev = hn_get_vf_dev(hv);
543 	if (vf_dev && vf_dev->dev_ops->xstats_reset)
544 		vf_dev->dev_ops->xstats_reset(vf_dev);
545 	rte_spinlock_unlock(&hv->vf_lock);
546 }
547