xref: /dpdk/drivers/net/netvsc/hn_vf.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2018 Microsoft Corp.
3  * All rights reserved.
4  */
5 
6 #include <stdio.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <stdbool.h>
10 #include <errno.h>
11 #include <unistd.h>
12 #include <dirent.h>
13 #include <fcntl.h>
14 #include <sys/types.h>
15 #include <sys/uio.h>
16 
17 #include <rte_ether.h>
18 #include <rte_ethdev.h>
19 #include <ethdev_driver.h>
20 #include <rte_lcore.h>
21 #include <rte_memory.h>
22 #include <rte_bus_vmbus.h>
23 #include <rte_pci.h>
24 #include <rte_bus_pci.h>
25 #include <rte_log.h>
26 #include <rte_string_fns.h>
27 #include <rte_alarm.h>
28 
29 #include "hn_logs.h"
30 #include "hn_var.h"
31 #include "hn_nvs.h"
32 
33 /* Search for VF with matching MAC address, return port id */
34 static int hn_vf_match(const struct rte_eth_dev *dev)
35 {
36 	const struct rte_ether_addr *mac = dev->data->mac_addrs;
37 	int i;
38 
39 	RTE_ETH_FOREACH_DEV(i) {
40 		const struct rte_eth_dev *vf_dev = &rte_eth_devices[i];
41 		const struct rte_ether_addr *vf_mac = vf_dev->data->mac_addrs;
42 
43 		if (vf_dev == dev)
44 			continue;
45 
46 		if (rte_is_same_ether_addr(mac, vf_mac))
47 			return i;
48 	}
49 	return -ENOENT;
50 }
51 
52 
53 /*
54  * Attach new PCI VF device and return the port_id
55  */
56 static int hn_vf_attach(struct rte_eth_dev *dev, struct hn_data *hv)
57 {
58 	struct rte_eth_dev_owner owner = { .id = RTE_ETH_DEV_NO_OWNER };
59 	int port, ret;
60 
61 	if (hv->vf_ctx.vf_attached) {
62 		PMD_DRV_LOG(ERR, "VF already attached");
63 		return 0;
64 	}
65 
66 	port = hn_vf_match(dev);
67 	if (port < 0) {
68 		PMD_DRV_LOG(NOTICE, "Couldn't find port for VF");
69 		return port;
70 	}
71 
72 	PMD_DRV_LOG(NOTICE, "found matching VF port %d\n", port);
73 	ret = rte_eth_dev_owner_get(port, &owner);
74 	if (ret < 0) {
75 		PMD_DRV_LOG(ERR, "Can not find owner for port %d", port);
76 		return ret;
77 	}
78 
79 	if (owner.id != RTE_ETH_DEV_NO_OWNER) {
80 		PMD_DRV_LOG(ERR, "Port %u already owned by other device %s",
81 			    port, owner.name);
82 		return -EBUSY;
83 	}
84 
85 	ret = rte_eth_dev_owner_set(port, &hv->owner);
86 	if (ret < 0) {
87 		PMD_DRV_LOG(ERR, "Can set owner for port %d", port);
88 		return ret;
89 	}
90 
91 	PMD_DRV_LOG(DEBUG, "Attach VF device %u", port);
92 	hv->vf_ctx.vf_attached = true;
93 	hv->vf_ctx.vf_port = port;
94 	return 0;
95 }
96 
97 static void hn_vf_remove(struct hn_data *hv);
98 
99 static void hn_remove_delayed(void *args)
100 {
101 	struct hn_data *hv = args;
102 	uint16_t port_id = hv->vf_ctx.vf_port;
103 	struct rte_device *dev = rte_eth_devices[port_id].device;
104 	int ret;
105 
106 	/* Tell VSP to switch data path to synthentic */
107 	hn_vf_remove(hv);
108 
109 	PMD_DRV_LOG(NOTICE, "Start to remove port %d\n", port_id);
110 	rte_rwlock_write_lock(&hv->vf_lock);
111 
112 	/* Give back ownership */
113 	ret = rte_eth_dev_owner_unset(port_id, hv->owner.id);
114 	if (ret)
115 		PMD_DRV_LOG(ERR, "rte_eth_dev_owner_unset failed ret=%d\n",
116 			    ret);
117 	hv->vf_ctx.vf_attached = false;
118 
119 	ret = rte_eth_dev_callback_unregister(port_id, RTE_ETH_EVENT_INTR_RMV,
120 					      hn_eth_rmv_event_callback, hv);
121 	if (ret)
122 		PMD_DRV_LOG(ERR,
123 			    "rte_eth_dev_callback_unregister failed ret=%d\n",
124 			    ret);
125 
126 	/* Detach and release port_id from system */
127 	ret = rte_eth_dev_stop(port_id);
128 	if (ret)
129 		PMD_DRV_LOG(ERR, "rte_eth_dev_stop failed port_id=%u ret=%d\n",
130 			    port_id, ret);
131 
132 	ret = rte_eth_dev_close(port_id);
133 	if (ret)
134 		PMD_DRV_LOG(ERR, "rte_eth_dev_close failed port_id=%u ret=%d\n",
135 			    port_id, ret);
136 
137 	ret = rte_dev_remove(dev);
138 	hv->vf_ctx.vf_state = vf_removed;
139 
140 	rte_rwlock_write_unlock(&hv->vf_lock);
141 }
142 
143 int hn_eth_rmv_event_callback(uint16_t port_id,
144 			      enum rte_eth_event_type event __rte_unused,
145 			      void *cb_arg, void *out __rte_unused)
146 {
147 	struct hn_data *hv = cb_arg;
148 
149 	PMD_DRV_LOG(NOTICE, "Removing VF portid %d\n", port_id);
150 	rte_eal_alarm_set(1, hn_remove_delayed, hv);
151 
152 	return 0;
153 }
154 
155 static int hn_setup_vf_queues(int port, struct rte_eth_dev *dev)
156 {
157 	struct hn_rx_queue *rx_queue;
158 	struct rte_eth_txq_info txinfo;
159 	struct rte_eth_rxq_info rxinfo;
160 	int i, ret = 0;
161 
162 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
163 		ret = rte_eth_tx_queue_info_get(dev->data->port_id, i, &txinfo);
164 		if (ret) {
165 			PMD_DRV_LOG(ERR,
166 				    "rte_eth_tx_queue_info_get failed ret=%d\n",
167 				    ret);
168 			return ret;
169 		}
170 
171 		ret = rte_eth_tx_queue_setup(port, i, txinfo.nb_desc, 0,
172 					     &txinfo.conf);
173 		if (ret) {
174 			PMD_DRV_LOG(ERR,
175 				    "rte_eth_tx_queue_setup failed ret=%d\n",
176 				    ret);
177 			return ret;
178 		}
179 	}
180 
181 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
182 		ret = rte_eth_rx_queue_info_get(dev->data->port_id, i, &rxinfo);
183 		if (ret) {
184 			PMD_DRV_LOG(ERR,
185 				    "rte_eth_rx_queue_info_get failed ret=%d\n",
186 				    ret);
187 			return ret;
188 		}
189 
190 		rx_queue = dev->data->rx_queues[i];
191 
192 		ret = rte_eth_rx_queue_setup(port, i, rxinfo.nb_desc, 0,
193 					     &rxinfo.conf, rx_queue->mb_pool);
194 		if (ret) {
195 			PMD_DRV_LOG(ERR,
196 				    "rte_eth_rx_queue_setup failed ret=%d\n",
197 				    ret);
198 			return ret;
199 		}
200 	}
201 
202 	return ret;
203 }
204 
205 int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv);
206 
207 static void hn_vf_add_retry(void *args)
208 {
209 	struct rte_eth_dev *dev = args;
210 	struct hn_data *hv = dev->data->dev_private;
211 
212 	hn_vf_add(dev, hv);
213 }
214 
215 int hn_vf_configure(struct rte_eth_dev *dev,
216 		    const struct rte_eth_conf *dev_conf);
217 
218 /* Add new VF device to synthetic device */
219 int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv)
220 {
221 	int ret, port;
222 
223 	if (!hv->vf_ctx.vf_vsp_reported || hv->vf_ctx.vf_vsc_switched)
224 		return 0;
225 
226 	rte_rwlock_write_lock(&hv->vf_lock);
227 
228 	ret = hn_vf_attach(dev, hv);
229 	if (ret) {
230 		PMD_DRV_LOG(NOTICE,
231 			    "RNDIS reports VF but device not found, retrying");
232 		rte_eal_alarm_set(1000000, hn_vf_add_retry, dev);
233 		goto exit;
234 	}
235 
236 	port = hv->vf_ctx.vf_port;
237 
238 	/* If the primary device has started, this is a VF host add.
239 	 * Configure and start VF device.
240 	 */
241 	if (dev->data->dev_started) {
242 		if (rte_eth_devices[port].data->dev_started) {
243 			PMD_DRV_LOG(ERR, "VF already started on hot add");
244 			goto exit;
245 		}
246 
247 		PMD_DRV_LOG(NOTICE, "configuring VF port %d\n", port);
248 		ret = hn_vf_configure(dev, &dev->data->dev_conf);
249 		if (ret) {
250 			PMD_DRV_LOG(ERR, "Failed to configure VF port %d\n",
251 				    port);
252 			goto exit;
253 		}
254 
255 		ret = hn_setup_vf_queues(port, dev);
256 		if (ret) {
257 			PMD_DRV_LOG(ERR,
258 				    "Failed to configure VF queues port %d\n",
259 				    port);
260 			goto exit;
261 		}
262 
263 		PMD_DRV_LOG(NOTICE, "Starting VF port %d\n", port);
264 		ret = rte_eth_dev_start(port);
265 		if (ret) {
266 			PMD_DRV_LOG(ERR, "rte_eth_dev_start failed ret=%d\n",
267 				    ret);
268 			goto exit;
269 		}
270 		hv->vf_ctx.vf_state = vf_started;
271 	}
272 
273 	ret = hn_nvs_set_datapath(hv, NVS_DATAPATH_VF);
274 	if (ret == 0)
275 		hv->vf_ctx.vf_vsc_switched = true;
276 
277 exit:
278 	rte_rwlock_write_unlock(&hv->vf_lock);
279 	return ret;
280 }
281 
282 /* Switch data path to VF device */
283 static void hn_vf_remove(struct hn_data *hv)
284 {
285 	int ret;
286 
287 	if (!hv->vf_ctx.vf_vsc_switched) {
288 		PMD_DRV_LOG(ERR, "VF path not active");
289 		return;
290 	}
291 
292 	rte_rwlock_write_lock(&hv->vf_lock);
293 	if (!hv->vf_ctx.vf_vsc_switched) {
294 		PMD_DRV_LOG(ERR, "VF path not active");
295 	} else {
296 		/* Stop incoming packets from arriving on VF */
297 		ret = hn_nvs_set_datapath(hv, NVS_DATAPATH_SYNTHETIC);
298 		if (ret == 0)
299 			hv->vf_ctx.vf_vsc_switched = false;
300 	}
301 	rte_rwlock_write_unlock(&hv->vf_lock);
302 }
303 
304 /* Handle VF association message from host */
305 void
306 hn_nvs_handle_vfassoc(struct rte_eth_dev *dev,
307 		      const struct vmbus_chanpkt_hdr *hdr,
308 		      const void *data)
309 {
310 	struct hn_data *hv = dev->data->dev_private;
311 	const struct hn_nvs_vf_association *vf_assoc = data;
312 
313 	if (unlikely(vmbus_chanpkt_datalen(hdr) < sizeof(*vf_assoc))) {
314 		PMD_DRV_LOG(ERR, "invalid vf association NVS");
315 		return;
316 	}
317 
318 	PMD_DRV_LOG(DEBUG, "VF serial %u %s port %u",
319 		    vf_assoc->serial,
320 		    vf_assoc->allocated ? "add to" : "remove from",
321 		    dev->data->port_id);
322 
323 	hv->vf_ctx.vf_vsp_reported = vf_assoc->allocated;
324 
325 	if (dev->state == RTE_ETH_DEV_ATTACHED) {
326 		if (vf_assoc->allocated)
327 			hn_vf_add(dev, hv);
328 		else
329 			hn_vf_remove(hv);
330 	}
331 }
332 
333 static void
334 hn_vf_merge_desc_lim(struct rte_eth_desc_lim *lim,
335 		     const struct rte_eth_desc_lim *vf_lim)
336 {
337 	lim->nb_max = RTE_MIN(vf_lim->nb_max, lim->nb_max);
338 	lim->nb_min = RTE_MAX(vf_lim->nb_min, lim->nb_min);
339 	lim->nb_align = RTE_MAX(vf_lim->nb_align, lim->nb_align);
340 	lim->nb_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max);
341 	lim->nb_mtu_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max);
342 }
343 
344 /*
345  * Merge the info from the VF and synthetic path.
346  * use the default config of the VF
347  * and the minimum number of queues and buffer sizes.
348  */
349 static int hn_vf_info_merge(struct rte_eth_dev *vf_dev,
350 			     struct rte_eth_dev_info *info)
351 {
352 	struct rte_eth_dev_info vf_info;
353 	int ret;
354 
355 	ret = rte_eth_dev_info_get(vf_dev->data->port_id, &vf_info);
356 	if (ret != 0)
357 		return ret;
358 
359 	info->speed_capa = vf_info.speed_capa;
360 	info->default_rxportconf = vf_info.default_rxportconf;
361 	info->default_txportconf = vf_info.default_txportconf;
362 
363 	info->max_rx_queues = RTE_MIN(vf_info.max_rx_queues,
364 				      info->max_rx_queues);
365 	info->rx_offload_capa &= vf_info.rx_offload_capa;
366 	info->rx_queue_offload_capa &= vf_info.rx_queue_offload_capa;
367 	info->flow_type_rss_offloads &= vf_info.flow_type_rss_offloads;
368 
369 	info->max_tx_queues = RTE_MIN(vf_info.max_tx_queues,
370 				      info->max_tx_queues);
371 	info->tx_offload_capa &= vf_info.tx_offload_capa;
372 	info->tx_queue_offload_capa &= vf_info.tx_queue_offload_capa;
373 	hn_vf_merge_desc_lim(&info->tx_desc_lim, &vf_info.tx_desc_lim);
374 
375 	info->min_rx_bufsize = RTE_MAX(vf_info.min_rx_bufsize,
376 				       info->min_rx_bufsize);
377 	info->max_rx_pktlen  = RTE_MAX(vf_info.max_rx_pktlen,
378 				       info->max_rx_pktlen);
379 	hn_vf_merge_desc_lim(&info->rx_desc_lim, &vf_info.rx_desc_lim);
380 
381 	return 0;
382 }
383 
384 int hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info)
385 {
386 	struct rte_eth_dev *vf_dev;
387 	int ret = 0;
388 
389 	rte_rwlock_read_lock(&hv->vf_lock);
390 	vf_dev = hn_get_vf_dev(hv);
391 	if (vf_dev)
392 		ret = hn_vf_info_merge(vf_dev, info);
393 	rte_rwlock_read_unlock(&hv->vf_lock);
394 	return ret;
395 }
396 
397 int hn_vf_configure(struct rte_eth_dev *dev,
398 		    const struct rte_eth_conf *dev_conf)
399 {
400 	struct hn_data *hv = dev->data->dev_private;
401 	struct rte_eth_conf vf_conf = *dev_conf;
402 	int ret = 0;
403 
404 	/* link state interrupt does not matter here. */
405 	vf_conf.intr_conf.lsc = 0;
406 
407 	/* need to monitor removal event */
408 	vf_conf.intr_conf.rmv = 1;
409 
410 	if (hv->vf_ctx.vf_attached) {
411 		ret = rte_eth_dev_callback_register(hv->vf_ctx.vf_port,
412 						    RTE_ETH_EVENT_INTR_RMV,
413 						    hn_eth_rmv_event_callback,
414 						    hv);
415 		if (ret) {
416 			PMD_DRV_LOG(ERR,
417 				    "Registering callback failed for "
418 				    "vf port %d ret %d\n",
419 				    hv->vf_ctx.vf_port, ret);
420 			return ret;
421 		}
422 
423 		ret = rte_eth_dev_configure(hv->vf_ctx.vf_port,
424 					    dev->data->nb_rx_queues,
425 					    dev->data->nb_tx_queues,
426 					    &vf_conf);
427 		if (ret) {
428 			PMD_DRV_LOG(ERR, "VF configuration failed: %d", ret);
429 
430 			rte_eth_dev_callback_unregister(hv->vf_ctx.vf_port,
431 							RTE_ETH_EVENT_INTR_RMV,
432 							hn_eth_rmv_event_callback,
433 							hv);
434 
435 			return ret;
436 		}
437 
438 		hv->vf_ctx.vf_state = vf_configured;
439 	}
440 
441 	return ret;
442 }
443 
444 /* Configure VF if present.
445  * VF device will have the same number of queues as the synthetic device
446  */
447 int hn_vf_configure_locked(struct rte_eth_dev *dev,
448 			   const struct rte_eth_conf *dev_conf)
449 {
450 	struct hn_data *hv = dev->data->dev_private;
451 	int ret = 0;
452 
453 	rte_rwlock_write_lock(&hv->vf_lock);
454 	ret = hn_vf_configure(dev, dev_conf);
455 	rte_rwlock_write_unlock(&hv->vf_lock);
456 
457 	return ret;
458 }
459 
460 const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev)
461 {
462 	struct hn_data *hv = dev->data->dev_private;
463 	struct rte_eth_dev *vf_dev;
464 	const uint32_t *ptypes = NULL;
465 
466 	rte_rwlock_read_lock(&hv->vf_lock);
467 	vf_dev = hn_get_vf_dev(hv);
468 	if (vf_dev && vf_dev->dev_ops->dev_supported_ptypes_get)
469 		ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev);
470 	rte_rwlock_read_unlock(&hv->vf_lock);
471 
472 	return ptypes;
473 }
474 
475 int hn_vf_start(struct rte_eth_dev *dev)
476 {
477 	struct hn_data *hv = dev->data->dev_private;
478 	struct rte_eth_dev *vf_dev;
479 	int ret = 0;
480 
481 	rte_rwlock_read_lock(&hv->vf_lock);
482 	vf_dev = hn_get_vf_dev(hv);
483 	if (vf_dev)
484 		ret = rte_eth_dev_start(vf_dev->data->port_id);
485 	rte_rwlock_read_unlock(&hv->vf_lock);
486 	return ret;
487 }
488 
489 int hn_vf_stop(struct rte_eth_dev *dev)
490 {
491 	struct hn_data *hv = dev->data->dev_private;
492 	struct rte_eth_dev *vf_dev;
493 	int ret = 0;
494 
495 	rte_rwlock_read_lock(&hv->vf_lock);
496 	vf_dev = hn_get_vf_dev(hv);
497 	if (vf_dev) {
498 		ret = rte_eth_dev_stop(vf_dev->data->port_id);
499 		if (ret != 0)
500 			PMD_DRV_LOG(ERR, "Failed to stop device on port %u",
501 				    vf_dev->data->port_id);
502 	}
503 	rte_rwlock_read_unlock(&hv->vf_lock);
504 
505 	return ret;
506 }
507 
508 /* If VF is present, then cascade configuration down */
509 #define VF_ETHDEV_FUNC(dev, func)				\
510 	{							\
511 		struct hn_data *hv = (dev)->data->dev_private;	\
512 		struct rte_eth_dev *vf_dev;			\
513 		rte_rwlock_read_lock(&hv->vf_lock);		\
514 		vf_dev = hn_get_vf_dev(hv);			\
515 		if (vf_dev)					\
516 			func(vf_dev->data->port_id);		\
517 		rte_rwlock_read_unlock(&hv->vf_lock);		\
518 	}
519 
520 /* If VF is present, then cascade configuration down */
521 #define VF_ETHDEV_FUNC_RET_STATUS(dev, func)			\
522 	{							\
523 		struct hn_data *hv = (dev)->data->dev_private;	\
524 		struct rte_eth_dev *vf_dev;			\
525 		int ret = 0;					\
526 		rte_rwlock_read_lock(&hv->vf_lock);		\
527 		vf_dev = hn_get_vf_dev(hv);			\
528 		if (vf_dev)					\
529 			ret = func(vf_dev->data->port_id);	\
530 		rte_rwlock_read_unlock(&hv->vf_lock);		\
531 		return ret;					\
532 	}
533 
534 void hn_vf_reset(struct rte_eth_dev *dev)
535 {
536 	VF_ETHDEV_FUNC(dev, rte_eth_dev_reset);
537 }
538 
539 int hn_vf_close(struct rte_eth_dev *dev)
540 {
541 	int ret = 0;
542 	struct hn_data *hv = dev->data->dev_private;
543 
544 	rte_eal_alarm_cancel(hn_vf_add_retry, dev);
545 
546 	rte_rwlock_read_lock(&hv->vf_lock);
547 	if (hv->vf_ctx.vf_attached) {
548 		rte_eth_dev_callback_unregister(hv->vf_ctx.vf_port,
549 						RTE_ETH_EVENT_INTR_RMV,
550 						hn_eth_rmv_event_callback,
551 						hv);
552 		rte_eal_alarm_cancel(hn_remove_delayed, hv);
553 		ret = rte_eth_dev_close(hv->vf_ctx.vf_port);
554 		hv->vf_ctx.vf_attached = false;
555 	}
556 	rte_rwlock_read_unlock(&hv->vf_lock);
557 
558 	return ret;
559 }
560 
561 int hn_vf_stats_reset(struct rte_eth_dev *dev)
562 {
563 	VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_stats_reset);
564 }
565 
566 int hn_vf_allmulticast_enable(struct rte_eth_dev *dev)
567 {
568 	VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_enable);
569 }
570 
571 int hn_vf_allmulticast_disable(struct rte_eth_dev *dev)
572 {
573 	VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_disable);
574 }
575 
576 int hn_vf_promiscuous_enable(struct rte_eth_dev *dev)
577 {
578 	VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_enable);
579 }
580 
581 int hn_vf_promiscuous_disable(struct rte_eth_dev *dev)
582 {
583 	VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_disable);
584 }
585 
586 int hn_vf_mc_addr_list(struct rte_eth_dev *dev,
587 			struct rte_ether_addr *mc_addr_set,
588 			uint32_t nb_mc_addr)
589 {
590 	struct hn_data *hv = dev->data->dev_private;
591 	struct rte_eth_dev *vf_dev;
592 	int ret = 0;
593 
594 	rte_rwlock_read_lock(&hv->vf_lock);
595 	vf_dev = hn_get_vf_dev(hv);
596 	if (vf_dev)
597 		ret = rte_eth_dev_set_mc_addr_list(vf_dev->data->port_id,
598 						   mc_addr_set, nb_mc_addr);
599 	rte_rwlock_read_unlock(&hv->vf_lock);
600 	return ret;
601 }
602 
603 int hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
604 			 uint16_t queue_idx, uint16_t nb_desc,
605 			 unsigned int socket_id,
606 			 const struct rte_eth_txconf *tx_conf)
607 {
608 	struct hn_data *hv = dev->data->dev_private;
609 	struct rte_eth_dev *vf_dev;
610 	int ret = 0;
611 
612 	rte_rwlock_read_lock(&hv->vf_lock);
613 	vf_dev = hn_get_vf_dev(hv);
614 	if (vf_dev)
615 		ret = rte_eth_tx_queue_setup(vf_dev->data->port_id,
616 					     queue_idx, nb_desc,
617 					     socket_id, tx_conf);
618 	rte_rwlock_read_unlock(&hv->vf_lock);
619 	return ret;
620 }
621 
622 void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id)
623 {
624 	struct rte_eth_dev *vf_dev;
625 
626 	rte_rwlock_read_lock(&hv->vf_lock);
627 	vf_dev = hn_get_vf_dev(hv);
628 	if (vf_dev && vf_dev->dev_ops->tx_queue_release) {
629 		void *subq = vf_dev->data->tx_queues[queue_id];
630 
631 		(*vf_dev->dev_ops->tx_queue_release)(subq);
632 	}
633 
634 	rte_rwlock_read_unlock(&hv->vf_lock);
635 }
636 
637 int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
638 			 uint16_t queue_idx, uint16_t nb_desc,
639 			 unsigned int socket_id,
640 			 const struct rte_eth_rxconf *rx_conf,
641 			 struct rte_mempool *mp)
642 {
643 	struct hn_data *hv = dev->data->dev_private;
644 	struct rte_eth_dev *vf_dev;
645 	int ret = 0;
646 
647 	rte_rwlock_read_lock(&hv->vf_lock);
648 	vf_dev = hn_get_vf_dev(hv);
649 	if (vf_dev)
650 		ret = rte_eth_rx_queue_setup(vf_dev->data->port_id,
651 					     queue_idx, nb_desc,
652 					     socket_id, rx_conf, mp);
653 	rte_rwlock_read_unlock(&hv->vf_lock);
654 	return ret;
655 }
656 
657 void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id)
658 {
659 	struct rte_eth_dev *vf_dev;
660 
661 	rte_rwlock_read_lock(&hv->vf_lock);
662 	vf_dev = hn_get_vf_dev(hv);
663 	if (vf_dev && vf_dev->dev_ops->rx_queue_release) {
664 		void *subq = vf_dev->data->rx_queues[queue_id];
665 
666 		(*vf_dev->dev_ops->rx_queue_release)(subq);
667 	}
668 	rte_rwlock_read_unlock(&hv->vf_lock);
669 }
670 
671 int hn_vf_stats_get(struct rte_eth_dev *dev,
672 		    struct rte_eth_stats *stats)
673 {
674 	struct hn_data *hv = dev->data->dev_private;
675 	struct rte_eth_dev *vf_dev;
676 	int ret = 0;
677 
678 	rte_rwlock_read_lock(&hv->vf_lock);
679 	vf_dev = hn_get_vf_dev(hv);
680 	if (vf_dev)
681 		ret = rte_eth_stats_get(vf_dev->data->port_id, stats);
682 	rte_rwlock_read_unlock(&hv->vf_lock);
683 	return ret;
684 }
685 
686 int hn_vf_xstats_get_names(struct rte_eth_dev *dev,
687 			   struct rte_eth_xstat_name *names,
688 			   unsigned int n)
689 {
690 	struct hn_data *hv = dev->data->dev_private;
691 	struct rte_eth_dev *vf_dev;
692 	int i, count = 0;
693 
694 	rte_rwlock_read_lock(&hv->vf_lock);
695 	vf_dev = hn_get_vf_dev(hv);
696 	if (vf_dev)
697 		count = rte_eth_xstats_get_names(vf_dev->data->port_id,
698 						 names, n);
699 	rte_rwlock_read_unlock(&hv->vf_lock);
700 
701 	/* add vf_ prefix to xstat names */
702 	if (names) {
703 		for (i = 0; i < count; i++) {
704 			char tmp[RTE_ETH_XSTATS_NAME_SIZE];
705 
706 			snprintf(tmp, sizeof(tmp), "vf_%s", names[i].name);
707 			strlcpy(names[i].name, tmp, sizeof(names[i].name));
708 		}
709 	}
710 
711 	return count;
712 }
713 
714 int hn_vf_xstats_get(struct rte_eth_dev *dev,
715 		     struct rte_eth_xstat *xstats,
716 		     unsigned int offset,
717 		     unsigned int n)
718 {
719 	struct hn_data *hv = dev->data->dev_private;
720 	struct rte_eth_dev *vf_dev;
721 	int i, count = 0;
722 
723 	rte_rwlock_read_lock(&hv->vf_lock);
724 	vf_dev = hn_get_vf_dev(hv);
725 	if (vf_dev)
726 		count = rte_eth_xstats_get(vf_dev->data->port_id,
727 					   xstats + offset, n - offset);
728 	rte_rwlock_read_unlock(&hv->vf_lock);
729 
730 	/* Offset id's for VF stats */
731 	if (count > 0) {
732 		for (i = 0; i < count; i++)
733 			xstats[i + offset].id += offset;
734 	}
735 
736 	return count;
737 }
738 
739 int hn_vf_xstats_reset(struct rte_eth_dev *dev)
740 {
741 	struct hn_data *hv = dev->data->dev_private;
742 	struct rte_eth_dev *vf_dev;
743 	int ret;
744 
745 	rte_rwlock_read_lock(&hv->vf_lock);
746 	vf_dev = hn_get_vf_dev(hv);
747 	if (vf_dev)
748 		ret = rte_eth_xstats_reset(vf_dev->data->port_id);
749 	else
750 		ret = -EINVAL;
751 	rte_rwlock_read_unlock(&hv->vf_lock);
752 
753 	return ret;
754 }
755 
756 int hn_vf_rss_hash_update(struct rte_eth_dev *dev,
757 			  struct rte_eth_rss_conf *rss_conf)
758 {
759 	struct hn_data *hv = dev->data->dev_private;
760 	struct rte_eth_dev *vf_dev;
761 	int ret = 0;
762 
763 	rte_rwlock_read_lock(&hv->vf_lock);
764 	vf_dev = hn_get_vf_dev(hv);
765 	if (vf_dev && vf_dev->dev_ops->rss_hash_update)
766 		ret = vf_dev->dev_ops->rss_hash_update(vf_dev, rss_conf);
767 	rte_rwlock_read_unlock(&hv->vf_lock);
768 
769 	return ret;
770 }
771 
772 int hn_vf_reta_hash_update(struct rte_eth_dev *dev,
773 			   struct rte_eth_rss_reta_entry64 *reta_conf,
774 			   uint16_t reta_size)
775 {
776 	struct hn_data *hv = dev->data->dev_private;
777 	struct rte_eth_dev *vf_dev;
778 	int ret = 0;
779 
780 	rte_rwlock_read_lock(&hv->vf_lock);
781 	vf_dev = hn_get_vf_dev(hv);
782 	if (vf_dev && vf_dev->dev_ops->reta_update)
783 		ret = vf_dev->dev_ops->reta_update(vf_dev,
784 						   reta_conf, reta_size);
785 	rte_rwlock_read_unlock(&hv->vf_lock);
786 
787 	return ret;
788 }
789