xref: /dpdk/drivers/net/netvsc/hn_vf.c (revision e57f8c38dc99b32a747b0aa4a0442a167e52d085)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2018 Microsoft Corp.
3  * All rights reserved.
4  */
5 
6 #include <stdio.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <stdbool.h>
10 #include <errno.h>
11 #include <unistd.h>
12 #include <dirent.h>
13 #include <fcntl.h>
14 #include <sys/types.h>
15 #include <sys/uio.h>
16 
17 #include <rte_ether.h>
18 #include <rte_ethdev.h>
19 #include <ethdev_driver.h>
20 #include <rte_lcore.h>
21 #include <rte_memory.h>
22 #include <bus_vmbus_driver.h>
23 #include <rte_pci.h>
24 #include <bus_pci_driver.h>
25 #include <rte_log.h>
26 #include <rte_string_fns.h>
27 #include <rte_alarm.h>
28 
29 #include "hn_logs.h"
30 #include "hn_var.h"
31 #include "hn_nvs.h"
32 
33 /* Search for VF with matching MAC address, return port id */
34 static int hn_vf_match(const struct rte_eth_dev *dev)
35 {
36 	const struct rte_ether_addr *mac = dev->data->mac_addrs;
37 	int i;
38 
39 	RTE_ETH_FOREACH_DEV(i) {
40 		const struct rte_eth_dev *vf_dev = &rte_eth_devices[i];
41 		const struct rte_ether_addr *vf_mac = vf_dev->data->mac_addrs;
42 
43 		if (vf_dev == dev)
44 			continue;
45 
46 		if (rte_is_same_ether_addr(mac, vf_mac))
47 			return i;
48 	}
49 	return -ENOENT;
50 }
51 
52 
53 /*
54  * Attach new PCI VF device and return the port_id
55  */
56 static int hn_vf_attach(struct rte_eth_dev *dev, struct hn_data *hv)
57 {
58 	struct rte_eth_dev_owner owner = { .id = RTE_ETH_DEV_NO_OWNER };
59 	int port, ret;
60 
61 	if (hv->vf_ctx.vf_attached) {
62 		PMD_DRV_LOG(ERR, "VF already attached");
63 		return 0;
64 	}
65 
66 	port = hn_vf_match(dev);
67 	if (port < 0) {
68 		PMD_DRV_LOG(NOTICE, "Couldn't find port for VF");
69 		return port;
70 	}
71 
72 	PMD_DRV_LOG(NOTICE, "found matching VF port %d", port);
73 	ret = rte_eth_dev_owner_get(port, &owner);
74 	if (ret < 0) {
75 		PMD_DRV_LOG(ERR, "Can not find owner for port %d", port);
76 		return ret;
77 	}
78 
79 	if (owner.id != RTE_ETH_DEV_NO_OWNER) {
80 		PMD_DRV_LOG(ERR, "Port %u already owned by other device %s",
81 			    port, owner.name);
82 		return -EBUSY;
83 	}
84 
85 	ret = rte_eth_dev_owner_set(port, &hv->owner);
86 	if (ret < 0) {
87 		PMD_DRV_LOG(ERR, "Can set owner for port %d", port);
88 		return ret;
89 	}
90 
91 	PMD_DRV_LOG(DEBUG, "Attach VF device %u", port);
92 	hv->vf_ctx.vf_attached = true;
93 	hv->vf_ctx.vf_port = port;
94 	return 0;
95 }
96 
97 static void hn_vf_remove(struct hn_data *hv);
98 
99 static void hn_remove_delayed(void *args)
100 {
101 	struct hn_data *hv = args;
102 	uint16_t port_id = hv->vf_ctx.vf_port;
103 	struct rte_device *dev = rte_eth_devices[port_id].device;
104 	int ret;
105 
106 	/* Tell VSP to switch data path to synthetic */
107 	hn_vf_remove(hv);
108 
109 	PMD_DRV_LOG(NOTICE, "Start to remove port %d", port_id);
110 	rte_rwlock_write_lock(&hv->vf_lock);
111 
112 	/* Give back ownership */
113 	ret = rte_eth_dev_owner_unset(port_id, hv->owner.id);
114 	if (ret)
115 		PMD_DRV_LOG(ERR, "rte_eth_dev_owner_unset failed ret=%d",
116 			    ret);
117 	hv->vf_ctx.vf_attached = false;
118 
119 	ret = rte_eth_dev_callback_unregister(port_id, RTE_ETH_EVENT_INTR_RMV,
120 					      hn_eth_rmv_event_callback, hv);
121 	if (ret)
122 		PMD_DRV_LOG(ERR,
123 			    "rte_eth_dev_callback_unregister failed ret=%d",
124 			    ret);
125 
126 	/* Detach and release port_id from system */
127 	ret = rte_eth_dev_stop(port_id);
128 	if (ret)
129 		PMD_DRV_LOG(ERR, "rte_eth_dev_stop failed port_id=%u ret=%d",
130 			    port_id, ret);
131 
132 	/* Record the device parameters for possible hotplug events */
133 	if (dev->devargs && dev->devargs->args)
134 		hv->vf_devargs = strdup(dev->devargs->args);
135 
136 	ret = rte_eth_dev_close(port_id);
137 	if (ret)
138 		PMD_DRV_LOG(ERR, "rte_eth_dev_close failed port_id=%u ret=%d",
139 			    port_id, ret);
140 
141 	ret = rte_dev_remove(dev);
142 	hv->vf_ctx.vf_state = vf_removed;
143 
144 	rte_rwlock_write_unlock(&hv->vf_lock);
145 }
146 
147 int hn_eth_rmv_event_callback(uint16_t port_id,
148 			      enum rte_eth_event_type event __rte_unused,
149 			      void *cb_arg, void *out __rte_unused)
150 {
151 	struct hn_data *hv = cb_arg;
152 
153 	PMD_DRV_LOG(NOTICE, "Removing VF portid %d", port_id);
154 	rte_eal_alarm_set(1, hn_remove_delayed, hv);
155 
156 	return 0;
157 }
158 
159 static int hn_setup_vf_queues(int port, struct rte_eth_dev *dev)
160 {
161 	struct hn_rx_queue *rx_queue;
162 	struct rte_eth_txq_info txinfo;
163 	struct rte_eth_rxq_info rxinfo;
164 	int i, ret = 0;
165 
166 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
167 		ret = rte_eth_tx_queue_info_get(dev->data->port_id, i, &txinfo);
168 		if (ret) {
169 			PMD_DRV_LOG(ERR,
170 				    "rte_eth_tx_queue_info_get failed ret=%d",
171 				    ret);
172 			return ret;
173 		}
174 
175 		ret = rte_eth_tx_queue_setup(port, i, txinfo.nb_desc, 0,
176 					     &txinfo.conf);
177 		if (ret) {
178 			PMD_DRV_LOG(ERR,
179 				    "rte_eth_tx_queue_setup failed ret=%d",
180 				    ret);
181 			return ret;
182 		}
183 	}
184 
185 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
186 		ret = rte_eth_rx_queue_info_get(dev->data->port_id, i, &rxinfo);
187 		if (ret) {
188 			PMD_DRV_LOG(ERR,
189 				    "rte_eth_rx_queue_info_get failed ret=%d",
190 				    ret);
191 			return ret;
192 		}
193 
194 		rx_queue = dev->data->rx_queues[i];
195 
196 		ret = rte_eth_rx_queue_setup(port, i, rxinfo.nb_desc, 0,
197 					     &rxinfo.conf, rx_queue->mb_pool);
198 		if (ret) {
199 			PMD_DRV_LOG(ERR,
200 				    "rte_eth_rx_queue_setup failed ret=%d",
201 				    ret);
202 			return ret;
203 		}
204 	}
205 
206 	return ret;
207 }
208 
209 int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv);
210 
211 static void hn_vf_add_retry(void *args)
212 {
213 	struct rte_eth_dev *dev = args;
214 	struct hn_data *hv = dev->data->dev_private;
215 
216 	hn_vf_add(dev, hv);
217 }
218 
219 int hn_vf_configure(struct rte_eth_dev *dev,
220 		    const struct rte_eth_conf *dev_conf);
221 
222 /* Add new VF device to synthetic device */
223 int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv)
224 {
225 	int ret, port;
226 
227 	if (!hv->vf_ctx.vf_vsp_reported || hv->vf_ctx.vf_vsc_switched)
228 		return 0;
229 
230 	rte_rwlock_write_lock(&hv->vf_lock);
231 
232 	ret = hn_vf_attach(dev, hv);
233 	if (ret) {
234 		PMD_DRV_LOG(NOTICE,
235 			    "RNDIS reports VF but device not found, retrying");
236 		rte_eal_alarm_set(1000000, hn_vf_add_retry, dev);
237 		goto exit;
238 	}
239 
240 	port = hv->vf_ctx.vf_port;
241 
242 	/* If the primary device has started, this is a VF hot add.
243 	 * Configure and start VF device.
244 	 */
245 	if (dev->data->dev_started) {
246 		if (rte_eth_devices[port].data->dev_started) {
247 			PMD_DRV_LOG(ERR, "VF already started on hot add");
248 			goto exit;
249 		}
250 
251 		PMD_DRV_LOG(NOTICE, "configuring VF port %d", port);
252 		ret = hn_vf_configure(dev, &dev->data->dev_conf);
253 		if (ret) {
254 			PMD_DRV_LOG(ERR, "Failed to configure VF port %d",
255 				    port);
256 			goto exit;
257 		}
258 
259 		ret = hn_setup_vf_queues(port, dev);
260 		if (ret) {
261 			PMD_DRV_LOG(ERR,
262 				    "Failed to configure VF queues port %d",
263 				    port);
264 			goto exit;
265 		}
266 
267 		ret = rte_eth_dev_set_mtu(port, dev->data->mtu);
268 		if (ret) {
269 			PMD_DRV_LOG(ERR, "Failed to set VF MTU");
270 			goto exit;
271 		}
272 
273 		PMD_DRV_LOG(NOTICE, "Starting VF port %d", port);
274 		ret = rte_eth_dev_start(port);
275 		if (ret) {
276 			PMD_DRV_LOG(ERR, "rte_eth_dev_start failed ret=%d",
277 				    ret);
278 			goto exit;
279 		}
280 		hv->vf_ctx.vf_state = vf_started;
281 	}
282 
283 	ret = hn_nvs_set_datapath(hv, NVS_DATAPATH_VF);
284 	if (ret == 0)
285 		hv->vf_ctx.vf_vsc_switched = true;
286 
287 exit:
288 	rte_rwlock_write_unlock(&hv->vf_lock);
289 	return ret;
290 }
291 
292 /* Switch data path to VF device */
293 static void hn_vf_remove(struct hn_data *hv)
294 {
295 	int ret;
296 
297 	if (!hv->vf_ctx.vf_vsc_switched) {
298 		PMD_DRV_LOG(ERR, "VF path not active");
299 		return;
300 	}
301 
302 	rte_rwlock_write_lock(&hv->vf_lock);
303 	if (!hv->vf_ctx.vf_vsc_switched) {
304 		PMD_DRV_LOG(ERR, "VF path not active");
305 	} else {
306 		/* Stop incoming packets from arriving on VF */
307 		ret = hn_nvs_set_datapath(hv, NVS_DATAPATH_SYNTHETIC);
308 		if (ret == 0)
309 			hv->vf_ctx.vf_vsc_switched = false;
310 	}
311 	rte_rwlock_write_unlock(&hv->vf_lock);
312 }
313 
314 /* Handle VF association message from host */
315 void
316 hn_nvs_handle_vfassoc(struct rte_eth_dev *dev,
317 		      const struct vmbus_chanpkt_hdr *hdr,
318 		      const void *data)
319 {
320 	struct hn_data *hv = dev->data->dev_private;
321 	const struct hn_nvs_vf_association *vf_assoc = data;
322 
323 	if (unlikely(vmbus_chanpkt_datalen(hdr) < sizeof(*vf_assoc))) {
324 		PMD_DRV_LOG(ERR, "invalid vf association NVS");
325 		return;
326 	}
327 
328 	PMD_DRV_LOG(DEBUG, "VF serial %u %s port %u",
329 		    vf_assoc->serial,
330 		    vf_assoc->allocated ? "add to" : "remove from",
331 		    dev->data->port_id);
332 
333 	hv->vf_ctx.vf_vsp_reported = vf_assoc->allocated;
334 
335 	if (dev->state == RTE_ETH_DEV_ATTACHED) {
336 		if (vf_assoc->allocated)
337 			hn_vf_add(dev, hv);
338 		else
339 			hn_vf_remove(hv);
340 	}
341 }
342 
343 static void
344 hn_vf_merge_desc_lim(struct rte_eth_desc_lim *lim,
345 		     const struct rte_eth_desc_lim *vf_lim)
346 {
347 	lim->nb_max = RTE_MIN(vf_lim->nb_max, lim->nb_max);
348 	lim->nb_min = RTE_MAX(vf_lim->nb_min, lim->nb_min);
349 	lim->nb_align = RTE_MAX(vf_lim->nb_align, lim->nb_align);
350 	lim->nb_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max);
351 	lim->nb_mtu_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max);
352 }
353 
354 /*
355  * Merge the info from the VF and synthetic path.
356  * use the default config of the VF
357  * and the minimum number of queues and buffer sizes.
358  */
359 static int hn_vf_info_merge(struct rte_eth_dev *vf_dev,
360 			     struct rte_eth_dev_info *info)
361 {
362 	struct rte_eth_dev_info vf_info;
363 	int ret;
364 
365 	ret = rte_eth_dev_info_get(vf_dev->data->port_id, &vf_info);
366 	if (ret != 0)
367 		return ret;
368 
369 	info->speed_capa = vf_info.speed_capa;
370 	info->default_rxportconf = vf_info.default_rxportconf;
371 	info->default_txportconf = vf_info.default_txportconf;
372 
373 	info->max_rx_queues = RTE_MIN(vf_info.max_rx_queues,
374 				      info->max_rx_queues);
375 	info->rx_offload_capa &= vf_info.rx_offload_capa;
376 	info->rx_queue_offload_capa &= vf_info.rx_queue_offload_capa;
377 	info->flow_type_rss_offloads &= vf_info.flow_type_rss_offloads;
378 
379 	info->max_tx_queues = RTE_MIN(vf_info.max_tx_queues,
380 				      info->max_tx_queues);
381 	info->tx_offload_capa &= vf_info.tx_offload_capa;
382 	info->tx_queue_offload_capa &= vf_info.tx_queue_offload_capa;
383 	hn_vf_merge_desc_lim(&info->tx_desc_lim, &vf_info.tx_desc_lim);
384 
385 	info->min_rx_bufsize = RTE_MAX(vf_info.min_rx_bufsize,
386 				       info->min_rx_bufsize);
387 	info->max_rx_pktlen  = RTE_MAX(vf_info.max_rx_pktlen,
388 				       info->max_rx_pktlen);
389 	hn_vf_merge_desc_lim(&info->rx_desc_lim, &vf_info.rx_desc_lim);
390 
391 	return 0;
392 }
393 
394 int hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info)
395 {
396 	struct rte_eth_dev *vf_dev;
397 	int ret = 0;
398 
399 	rte_rwlock_read_lock(&hv->vf_lock);
400 	vf_dev = hn_get_vf_dev(hv);
401 	if (vf_dev)
402 		ret = hn_vf_info_merge(vf_dev, info);
403 	rte_rwlock_read_unlock(&hv->vf_lock);
404 	return ret;
405 }
406 
407 int hn_vf_configure(struct rte_eth_dev *dev,
408 		    const struct rte_eth_conf *dev_conf)
409 {
410 	struct hn_data *hv = dev->data->dev_private;
411 	struct rte_eth_conf vf_conf = *dev_conf;
412 	int ret = 0;
413 
414 	/* link state interrupt does not matter here. */
415 	vf_conf.intr_conf.lsc = 0;
416 
417 	/* need to monitor removal event */
418 	vf_conf.intr_conf.rmv = 1;
419 
420 	if (hv->vf_ctx.vf_attached) {
421 		ret = rte_eth_dev_callback_register(hv->vf_ctx.vf_port,
422 						    RTE_ETH_EVENT_INTR_RMV,
423 						    hn_eth_rmv_event_callback,
424 						    hv);
425 		if (ret) {
426 			PMD_DRV_LOG(ERR,
427 				    "Registering callback failed for vf port %d ret %d",
428 				    hv->vf_ctx.vf_port, ret);
429 			return ret;
430 		}
431 
432 		ret = rte_eth_dev_configure(hv->vf_ctx.vf_port,
433 					    dev->data->nb_rx_queues,
434 					    dev->data->nb_tx_queues,
435 					    &vf_conf);
436 		if (ret) {
437 			PMD_DRV_LOG(ERR, "VF configuration failed: %d", ret);
438 
439 			rte_eth_dev_callback_unregister(hv->vf_ctx.vf_port,
440 							RTE_ETH_EVENT_INTR_RMV,
441 							hn_eth_rmv_event_callback,
442 							hv);
443 
444 			return ret;
445 		}
446 
447 		hv->vf_ctx.vf_state = vf_configured;
448 	}
449 
450 	return ret;
451 }
452 
453 /* Configure VF if present.
454  * VF device will have the same number of queues as the synthetic device
455  */
456 int hn_vf_configure_locked(struct rte_eth_dev *dev,
457 			   const struct rte_eth_conf *dev_conf)
458 {
459 	struct hn_data *hv = dev->data->dev_private;
460 	int ret = 0;
461 
462 	rte_rwlock_write_lock(&hv->vf_lock);
463 	ret = hn_vf_configure(dev, dev_conf);
464 	rte_rwlock_write_unlock(&hv->vf_lock);
465 
466 	return ret;
467 }
468 
469 const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev,
470 				       size_t *no_of_elements)
471 {
472 	struct hn_data *hv = dev->data->dev_private;
473 	struct rte_eth_dev *vf_dev;
474 	const uint32_t *ptypes = NULL;
475 
476 	rte_rwlock_read_lock(&hv->vf_lock);
477 	vf_dev = hn_get_vf_dev(hv);
478 	if (vf_dev && vf_dev->dev_ops->dev_supported_ptypes_get)
479 		ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev,
480 							      no_of_elements);
481 	rte_rwlock_read_unlock(&hv->vf_lock);
482 
483 	return ptypes;
484 }
485 
486 int hn_vf_start(struct rte_eth_dev *dev)
487 {
488 	struct hn_data *hv = dev->data->dev_private;
489 	struct rte_eth_dev *vf_dev;
490 	int ret = 0;
491 
492 	rte_rwlock_read_lock(&hv->vf_lock);
493 	vf_dev = hn_get_vf_dev(hv);
494 	if (vf_dev)
495 		ret = rte_eth_dev_start(vf_dev->data->port_id);
496 	rte_rwlock_read_unlock(&hv->vf_lock);
497 	return ret;
498 }
499 
500 int hn_vf_stop(struct rte_eth_dev *dev)
501 {
502 	struct hn_data *hv = dev->data->dev_private;
503 	struct rte_eth_dev *vf_dev;
504 	int ret = 0;
505 
506 	rte_rwlock_read_lock(&hv->vf_lock);
507 	vf_dev = hn_get_vf_dev(hv);
508 	if (vf_dev) {
509 		ret = rte_eth_dev_stop(vf_dev->data->port_id);
510 		if (ret != 0)
511 			PMD_DRV_LOG(ERR, "Failed to stop device on port %u",
512 				    vf_dev->data->port_id);
513 	}
514 	rte_rwlock_read_unlock(&hv->vf_lock);
515 
516 	return ret;
517 }
518 
519 /* If VF is present, then cascade configuration down */
520 #define VF_ETHDEV_FUNC(dev, func)				\
521 	{							\
522 		struct hn_data *hv = (dev)->data->dev_private;	\
523 		struct rte_eth_dev *vf_dev;			\
524 		rte_rwlock_read_lock(&hv->vf_lock);		\
525 		vf_dev = hn_get_vf_dev(hv);			\
526 		if (vf_dev)					\
527 			func(vf_dev->data->port_id);		\
528 		rte_rwlock_read_unlock(&hv->vf_lock);		\
529 	}
530 
531 /* If VF is present, then cascade configuration down */
532 #define VF_ETHDEV_FUNC_RET_STATUS(dev, func)			\
533 	{							\
534 		struct hn_data *hv = (dev)->data->dev_private;	\
535 		struct rte_eth_dev *vf_dev;			\
536 		int ret = 0;					\
537 		rte_rwlock_read_lock(&hv->vf_lock);		\
538 		vf_dev = hn_get_vf_dev(hv);			\
539 		if (vf_dev)					\
540 			ret = func(vf_dev->data->port_id);	\
541 		rte_rwlock_read_unlock(&hv->vf_lock);		\
542 		return ret;					\
543 	}
544 
545 int hn_vf_close(struct rte_eth_dev *dev)
546 {
547 	int ret = 0;
548 	struct hn_data *hv = dev->data->dev_private;
549 
550 	rte_eal_alarm_cancel(hn_vf_add_retry, dev);
551 
552 	rte_rwlock_read_lock(&hv->vf_lock);
553 	if (hv->vf_ctx.vf_attached) {
554 		rte_eth_dev_callback_unregister(hv->vf_ctx.vf_port,
555 						RTE_ETH_EVENT_INTR_RMV,
556 						hn_eth_rmv_event_callback,
557 						hv);
558 		rte_eal_alarm_cancel(hn_remove_delayed, hv);
559 		ret = rte_eth_dev_close(hv->vf_ctx.vf_port);
560 		hv->vf_ctx.vf_attached = false;
561 	}
562 	rte_rwlock_read_unlock(&hv->vf_lock);
563 
564 	return ret;
565 }
566 
567 int hn_vf_stats_reset(struct rte_eth_dev *dev)
568 {
569 	VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_stats_reset);
570 }
571 
572 int hn_vf_allmulticast_enable(struct rte_eth_dev *dev)
573 {
574 	VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_enable);
575 }
576 
577 int hn_vf_allmulticast_disable(struct rte_eth_dev *dev)
578 {
579 	VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_disable);
580 }
581 
582 int hn_vf_promiscuous_enable(struct rte_eth_dev *dev)
583 {
584 	VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_enable);
585 }
586 
587 int hn_vf_promiscuous_disable(struct rte_eth_dev *dev)
588 {
589 	VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_disable);
590 }
591 
592 int hn_vf_mc_addr_list(struct rte_eth_dev *dev,
593 			struct rte_ether_addr *mc_addr_set,
594 			uint32_t nb_mc_addr)
595 {
596 	struct hn_data *hv = dev->data->dev_private;
597 	struct rte_eth_dev *vf_dev;
598 	int ret = 0;
599 
600 	rte_rwlock_read_lock(&hv->vf_lock);
601 	vf_dev = hn_get_vf_dev(hv);
602 	if (vf_dev)
603 		ret = rte_eth_dev_set_mc_addr_list(vf_dev->data->port_id,
604 						   mc_addr_set, nb_mc_addr);
605 	rte_rwlock_read_unlock(&hv->vf_lock);
606 	return ret;
607 }
608 
609 int hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
610 			 uint16_t queue_idx, uint16_t nb_desc,
611 			 unsigned int socket_id,
612 			 const struct rte_eth_txconf *tx_conf)
613 {
614 	struct hn_data *hv = dev->data->dev_private;
615 	struct rte_eth_dev *vf_dev;
616 	int ret = 0;
617 
618 	rte_rwlock_read_lock(&hv->vf_lock);
619 	vf_dev = hn_get_vf_dev(hv);
620 	if (vf_dev)
621 		ret = rte_eth_tx_queue_setup(vf_dev->data->port_id,
622 					     queue_idx, nb_desc,
623 					     socket_id, tx_conf);
624 	rte_rwlock_read_unlock(&hv->vf_lock);
625 	return ret;
626 }
627 
628 void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id)
629 {
630 	struct rte_eth_dev *vf_dev;
631 
632 	rte_rwlock_read_lock(&hv->vf_lock);
633 	vf_dev = hn_get_vf_dev(hv);
634 	if (vf_dev && vf_dev->dev_ops->tx_queue_release)
635 		(*vf_dev->dev_ops->tx_queue_release)(vf_dev, queue_id);
636 
637 	rte_rwlock_read_unlock(&hv->vf_lock);
638 }
639 
640 int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
641 			 uint16_t queue_idx, uint16_t nb_desc,
642 			 unsigned int socket_id,
643 			 const struct rte_eth_rxconf *rx_conf,
644 			 struct rte_mempool *mp)
645 {
646 	struct hn_data *hv = dev->data->dev_private;
647 	struct rte_eth_dev *vf_dev;
648 	int ret = 0;
649 
650 	rte_rwlock_read_lock(&hv->vf_lock);
651 	vf_dev = hn_get_vf_dev(hv);
652 	if (vf_dev)
653 		ret = rte_eth_rx_queue_setup(vf_dev->data->port_id,
654 					     queue_idx, nb_desc,
655 					     socket_id, rx_conf, mp);
656 	rte_rwlock_read_unlock(&hv->vf_lock);
657 	return ret;
658 }
659 
660 void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id)
661 {
662 	struct rte_eth_dev *vf_dev;
663 
664 	rte_rwlock_read_lock(&hv->vf_lock);
665 	vf_dev = hn_get_vf_dev(hv);
666 	if (vf_dev && vf_dev->dev_ops->rx_queue_release)
667 		(*vf_dev->dev_ops->rx_queue_release)(vf_dev, queue_id);
668 	rte_rwlock_read_unlock(&hv->vf_lock);
669 }
670 
671 int hn_vf_stats_get(struct rte_eth_dev *dev,
672 		    struct rte_eth_stats *stats)
673 {
674 	struct hn_data *hv = dev->data->dev_private;
675 	struct rte_eth_dev *vf_dev;
676 	int ret = 0;
677 
678 	rte_rwlock_read_lock(&hv->vf_lock);
679 	vf_dev = hn_get_vf_dev(hv);
680 	if (vf_dev)
681 		ret = rte_eth_stats_get(vf_dev->data->port_id, stats);
682 	rte_rwlock_read_unlock(&hv->vf_lock);
683 	return ret;
684 }
685 
686 int hn_vf_xstats_get_names(struct rte_eth_dev *dev,
687 			   struct rte_eth_xstat_name *names,
688 			   unsigned int n)
689 {
690 	struct hn_data *hv = dev->data->dev_private;
691 	struct rte_eth_dev *vf_dev;
692 	int i, count = 0;
693 
694 	rte_rwlock_read_lock(&hv->vf_lock);
695 	vf_dev = hn_get_vf_dev(hv);
696 	if (vf_dev)
697 		count = rte_eth_xstats_get_names(vf_dev->data->port_id,
698 						 names, n);
699 	rte_rwlock_read_unlock(&hv->vf_lock);
700 
701 	/* add vf_ prefix to xstat names */
702 	if (names) {
703 		for (i = 0; i < count; i++) {
704 			char tmp[RTE_ETH_XSTATS_NAME_SIZE];
705 
706 			snprintf(tmp, sizeof(tmp), "vf_%s", names[i].name);
707 			strlcpy(names[i].name, tmp, sizeof(names[i].name));
708 		}
709 	}
710 
711 	return count;
712 }
713 
714 int hn_vf_xstats_get(struct rte_eth_dev *dev,
715 		     struct rte_eth_xstat *xstats,
716 		     unsigned int offset,
717 		     unsigned int n)
718 {
719 	struct hn_data *hv = dev->data->dev_private;
720 	struct rte_eth_dev *vf_dev;
721 	int i, count = 0;
722 
723 	rte_rwlock_read_lock(&hv->vf_lock);
724 	vf_dev = hn_get_vf_dev(hv);
725 	if (vf_dev)
726 		count = rte_eth_xstats_get(vf_dev->data->port_id,
727 					   xstats + offset, n - offset);
728 	rte_rwlock_read_unlock(&hv->vf_lock);
729 
730 	/* Offset id's for VF stats */
731 	if (count > 0) {
732 		for (i = 0; i < count; i++)
733 			xstats[i + offset].id += offset;
734 	}
735 
736 	return count;
737 }
738 
739 int hn_vf_xstats_reset(struct rte_eth_dev *dev)
740 {
741 	struct hn_data *hv = dev->data->dev_private;
742 	struct rte_eth_dev *vf_dev;
743 	int ret;
744 
745 	rte_rwlock_read_lock(&hv->vf_lock);
746 	vf_dev = hn_get_vf_dev(hv);
747 	if (vf_dev)
748 		ret = rte_eth_xstats_reset(vf_dev->data->port_id);
749 	else
750 		ret = -EINVAL;
751 	rte_rwlock_read_unlock(&hv->vf_lock);
752 
753 	return ret;
754 }
755 
756 int hn_vf_rss_hash_update(struct rte_eth_dev *dev,
757 			  struct rte_eth_rss_conf *rss_conf)
758 {
759 	struct hn_data *hv = dev->data->dev_private;
760 	struct rte_eth_dev *vf_dev;
761 	int ret = 0;
762 
763 	rte_rwlock_read_lock(&hv->vf_lock);
764 	vf_dev = hn_get_vf_dev(hv);
765 	if (vf_dev && vf_dev->dev_ops->rss_hash_update)
766 		ret = vf_dev->dev_ops->rss_hash_update(vf_dev, rss_conf);
767 	rte_rwlock_read_unlock(&hv->vf_lock);
768 
769 	return ret;
770 }
771 
772 int hn_vf_reta_hash_update(struct rte_eth_dev *dev,
773 			   struct rte_eth_rss_reta_entry64 *reta_conf,
774 			   uint16_t reta_size)
775 {
776 	struct hn_data *hv = dev->data->dev_private;
777 	struct rte_eth_dev *vf_dev;
778 	int ret = 0;
779 
780 	rte_rwlock_read_lock(&hv->vf_lock);
781 	vf_dev = hn_get_vf_dev(hv);
782 	if (vf_dev && vf_dev->dev_ops->reta_update)
783 		ret = vf_dev->dev_ops->reta_update(vf_dev,
784 						   reta_conf, reta_size);
785 	rte_rwlock_read_unlock(&hv->vf_lock);
786 
787 	return ret;
788 }
789 
790 int hn_vf_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
791 {
792 	struct hn_data *hv = dev->data->dev_private;
793 	struct rte_eth_dev *vf_dev;
794 	int ret = 0;
795 
796 	rte_rwlock_read_lock(&hv->vf_lock);
797 	vf_dev = hn_get_vf_dev(hv);
798 	if (hv->vf_ctx.vf_vsc_switched && vf_dev)
799 		ret = rte_eth_dev_set_mtu(vf_dev->data->port_id, mtu);
800 	rte_rwlock_read_unlock(&hv->vf_lock);
801 
802 	return ret;
803 }
804