xref: /dpdk/drivers/net/netvsc/hn_ethdev.c (revision afbc22bf51ab98b9b61b11eb6d38278a9d577111)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2018 Microsoft Corporation
3  * Copyright(c) 2013-2016 Brocade Communications Systems, Inc.
4  * All rights reserved.
5  */
6 
7 #include <stdint.h>
8 #include <string.h>
9 #include <stdio.h>
10 #include <errno.h>
11 #include <unistd.h>
12 
13 #include <rte_ethdev.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_memzone.h>
17 #include <rte_devargs.h>
18 #include <rte_malloc.h>
19 #include <rte_kvargs.h>
20 #include <rte_atomic.h>
21 #include <rte_branch_prediction.h>
22 #include <rte_ether.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_cycles.h>
25 #include <rte_errno.h>
26 #include <rte_memory.h>
27 #include <rte_eal.h>
28 #include <rte_dev.h>
29 #include <rte_bus_vmbus.h>
30 
31 #include "hn_logs.h"
32 #include "hn_var.h"
33 #include "hn_rndis.h"
34 #include "hn_nvs.h"
35 #include "ndis.h"
36 
37 #define HN_TX_OFFLOAD_CAPS (DEV_TX_OFFLOAD_IPV4_CKSUM | \
38 			    DEV_TX_OFFLOAD_TCP_CKSUM  | \
39 			    DEV_TX_OFFLOAD_UDP_CKSUM  | \
40 			    DEV_TX_OFFLOAD_TCP_TSO    | \
41 			    DEV_TX_OFFLOAD_MULTI_SEGS | \
42 			    DEV_TX_OFFLOAD_VLAN_INSERT)
43 
44 #define HN_RX_OFFLOAD_CAPS (DEV_RX_OFFLOAD_CHECKSUM | \
45 			    DEV_RX_OFFLOAD_VLAN_STRIP)
46 
47 int hn_logtype_init;
48 int hn_logtype_driver;
49 
50 struct hn_xstats_name_off {
51 	char name[RTE_ETH_XSTATS_NAME_SIZE];
52 	unsigned int offset;
53 };
54 
55 static const struct hn_xstats_name_off hn_stat_strings[] = {
56 	{ "good_packets",           offsetof(struct hn_stats, packets) },
57 	{ "good_bytes",             offsetof(struct hn_stats, bytes) },
58 	{ "errors",                 offsetof(struct hn_stats, errors) },
59 	{ "ring full",              offsetof(struct hn_stats, ring_full) },
60 	{ "multicast_packets",      offsetof(struct hn_stats, multicast) },
61 	{ "broadcast_packets",      offsetof(struct hn_stats, broadcast) },
62 	{ "undersize_packets",      offsetof(struct hn_stats, size_bins[0]) },
63 	{ "size_64_packets",        offsetof(struct hn_stats, size_bins[1]) },
64 	{ "size_65_127_packets",    offsetof(struct hn_stats, size_bins[2]) },
65 	{ "size_128_255_packets",   offsetof(struct hn_stats, size_bins[3]) },
66 	{ "size_256_511_packets",   offsetof(struct hn_stats, size_bins[4]) },
67 	{ "size_512_1023_packets",  offsetof(struct hn_stats, size_bins[5]) },
68 	{ "size_1024_1518_packets", offsetof(struct hn_stats, size_bins[6]) },
69 	{ "size_1519_max_packets",  offsetof(struct hn_stats, size_bins[7]) },
70 };
71 
72 static struct rte_eth_dev *
73 eth_dev_vmbus_allocate(struct rte_vmbus_device *dev, size_t private_data_size)
74 {
75 	struct rte_eth_dev *eth_dev;
76 	const char *name;
77 
78 	if (!dev)
79 		return NULL;
80 
81 	name = dev->device.name;
82 
83 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
84 		eth_dev = rte_eth_dev_allocate(name);
85 		if (!eth_dev) {
86 			PMD_DRV_LOG(NOTICE, "can not allocate rte ethdev");
87 			return NULL;
88 		}
89 
90 		if (private_data_size) {
91 			eth_dev->data->dev_private =
92 				rte_zmalloc_socket(name, private_data_size,
93 						     RTE_CACHE_LINE_SIZE, dev->device.numa_node);
94 			if (!eth_dev->data->dev_private) {
95 				PMD_DRV_LOG(NOTICE, "can not allocate driver data");
96 				rte_eth_dev_release_port(eth_dev);
97 				return NULL;
98 			}
99 		}
100 	} else {
101 		eth_dev = rte_eth_dev_attach_secondary(name);
102 		if (!eth_dev) {
103 			PMD_DRV_LOG(NOTICE, "can not attach secondary");
104 			return NULL;
105 		}
106 	}
107 
108 	eth_dev->device = &dev->device;
109 
110 	/* interrupt is simulated */
111 	dev->intr_handle.type = RTE_INTR_HANDLE_EXT;
112 	eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
113 	eth_dev->intr_handle = &dev->intr_handle;
114 
115 	return eth_dev;
116 }
117 
118 static void
119 eth_dev_vmbus_release(struct rte_eth_dev *eth_dev)
120 {
121 	/* mac_addrs must not be freed alone because part of dev_private */
122 	eth_dev->data->mac_addrs = NULL;
123 	/* free ether device */
124 	rte_eth_dev_release_port(eth_dev);
125 
126 	eth_dev->device = NULL;
127 	eth_dev->intr_handle = NULL;
128 }
129 
130 /* handle "latency=X" from devargs */
131 static int hn_set_latency(const char *key, const char *value, void *opaque)
132 {
133 	struct hn_data *hv = opaque;
134 	char *endp = NULL;
135 	unsigned long lat;
136 
137 	errno = 0;
138 	lat = strtoul(value, &endp, 0);
139 
140 	if (*value == '\0' || *endp != '\0') {
141 		PMD_DRV_LOG(ERR, "invalid parameter %s=%s", key, value);
142 		return -EINVAL;
143 	}
144 
145 	PMD_DRV_LOG(DEBUG, "set latency %lu usec", lat);
146 
147 	hv->latency = lat * 1000;	/* usec to nsec */
148 	return 0;
149 }
150 
151 /* Parse device arguments */
152 static int hn_parse_args(const struct rte_eth_dev *dev)
153 {
154 	struct hn_data *hv = dev->data->dev_private;
155 	struct rte_devargs *devargs = dev->device->devargs;
156 	static const char * const valid_keys[] = {
157 		"latency",
158 		NULL
159 	};
160 	struct rte_kvargs *kvlist;
161 	int ret;
162 
163 	if (!devargs)
164 		return 0;
165 
166 	PMD_INIT_LOG(DEBUG, "device args %s %s",
167 		     devargs->name, devargs->args);
168 
169 	kvlist = rte_kvargs_parse(devargs->args, valid_keys);
170 	if (!kvlist) {
171 		PMD_DRV_LOG(NOTICE, "invalid parameters");
172 		return -EINVAL;
173 	}
174 
175 	ret = rte_kvargs_process(kvlist, "latency", hn_set_latency, hv);
176 	if (ret)
177 		PMD_DRV_LOG(ERR, "Unable to process latency arg\n");
178 
179 	rte_kvargs_free(kvlist);
180 	return ret;
181 }
182 
183 /* Update link status.
184  * Note: the DPDK definition of "wait_to_complete"
185  *   means block this call until link is up.
186  *   which is not worth supporting.
187  */
188 int
189 hn_dev_link_update(struct rte_eth_dev *dev,
190 		   int wait_to_complete)
191 {
192 	struct hn_data *hv = dev->data->dev_private;
193 	struct rte_eth_link link, old;
194 	int error;
195 
196 	old = dev->data->dev_link;
197 
198 	error = hn_rndis_get_linkstatus(hv);
199 	if (error)
200 		return error;
201 
202 	hn_rndis_get_linkspeed(hv);
203 
204 	hn_vf_link_update(dev, wait_to_complete);
205 
206 	link = (struct rte_eth_link) {
207 		.link_duplex = ETH_LINK_FULL_DUPLEX,
208 		.link_autoneg = ETH_LINK_SPEED_FIXED,
209 		.link_speed = hv->link_speed / 10000,
210 	};
211 
212 	if (hv->link_status == NDIS_MEDIA_STATE_CONNECTED)
213 		link.link_status = ETH_LINK_UP;
214 	else
215 		link.link_status = ETH_LINK_DOWN;
216 
217 	if (old.link_status == link.link_status)
218 		return 0;
219 
220 	PMD_INIT_LOG(DEBUG, "Port %d is %s", dev->data->port_id,
221 		     (link.link_status == ETH_LINK_UP) ? "up" : "down");
222 
223 	return rte_eth_linkstatus_set(dev, &link);
224 }
225 
226 static void hn_dev_info_get(struct rte_eth_dev *dev,
227 			    struct rte_eth_dev_info *dev_info)
228 {
229 	struct hn_data *hv = dev->data->dev_private;
230 
231 	dev_info->speed_capa = ETH_LINK_SPEED_10G;
232 	dev_info->min_rx_bufsize = HN_MIN_RX_BUF_SIZE;
233 	dev_info->max_rx_pktlen  = HN_MAX_XFER_LEN;
234 	dev_info->max_mac_addrs  = 1;
235 
236 	dev_info->hash_key_size = NDIS_HASH_KEYSIZE_TOEPLITZ;
237 	dev_info->flow_type_rss_offloads =
238 		ETH_RSS_IPV4 | ETH_RSS_IPV6 | ETH_RSS_TCP | ETH_RSS_UDP;
239 
240 	dev_info->max_rx_queues = hv->max_queues;
241 	dev_info->max_tx_queues = hv->max_queues;
242 
243 	hn_rndis_get_offload(hv, dev_info);
244 	hn_vf_info_get(hv, dev_info);
245 }
246 
247 static void
248 hn_dev_promiscuous_enable(struct rte_eth_dev *dev)
249 {
250 	struct hn_data *hv = dev->data->dev_private;
251 
252 	hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_PROMISCUOUS);
253 	hn_vf_promiscuous_enable(dev);
254 }
255 
256 static void
257 hn_dev_promiscuous_disable(struct rte_eth_dev *dev)
258 {
259 	struct hn_data *hv = dev->data->dev_private;
260 	uint32_t filter;
261 
262 	filter = NDIS_PACKET_TYPE_DIRECTED | NDIS_PACKET_TYPE_BROADCAST;
263 	if (dev->data->all_multicast)
264 		filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
265 	hn_rndis_set_rxfilter(hv, filter);
266 	hn_vf_promiscuous_disable(dev);
267 }
268 
269 static void
270 hn_dev_allmulticast_enable(struct rte_eth_dev *dev)
271 {
272 	struct hn_data *hv = dev->data->dev_private;
273 
274 	hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_DIRECTED |
275 			      NDIS_PACKET_TYPE_ALL_MULTICAST |
276 			NDIS_PACKET_TYPE_BROADCAST);
277 	hn_vf_allmulticast_enable(dev);
278 }
279 
280 static void
281 hn_dev_allmulticast_disable(struct rte_eth_dev *dev)
282 {
283 	struct hn_data *hv = dev->data->dev_private;
284 
285 	hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_DIRECTED |
286 			     NDIS_PACKET_TYPE_BROADCAST);
287 	hn_vf_allmulticast_disable(dev);
288 }
289 
290 static int
291 hn_dev_mc_addr_list(struct rte_eth_dev *dev,
292 		     struct ether_addr *mc_addr_set,
293 		     uint32_t nb_mc_addr)
294 {
295 	/* No filtering on the synthetic path, but can do it on VF */
296 	return hn_vf_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
297 }
298 
299 /* Setup shared rx/tx queue data */
300 static int hn_subchan_configure(struct hn_data *hv,
301 				uint32_t subchan)
302 {
303 	struct vmbus_channel *primary = hn_primary_chan(hv);
304 	int err;
305 	unsigned int retry = 0;
306 
307 	PMD_DRV_LOG(DEBUG,
308 		    "open %u subchannels", subchan);
309 
310 	/* Send create sub channels command */
311 	err = hn_nvs_alloc_subchans(hv, &subchan);
312 	if (err)
313 		return  err;
314 
315 	while (subchan > 0) {
316 		struct vmbus_channel *new_sc;
317 		uint16_t chn_index;
318 
319 		err = rte_vmbus_subchan_open(primary, &new_sc);
320 		if (err == -ENOENT && ++retry < 1000) {
321 			/* This can happen if not ready yet */
322 			rte_delay_ms(10);
323 			continue;
324 		}
325 
326 		if (err) {
327 			PMD_DRV_LOG(ERR,
328 				    "open subchannel failed: %d", err);
329 			return err;
330 		}
331 
332 		rte_vmbus_set_latency(hv->vmbus, new_sc, hv->latency);
333 
334 		retry = 0;
335 		chn_index = rte_vmbus_sub_channel_index(new_sc);
336 		if (chn_index == 0 || chn_index > hv->max_queues) {
337 			PMD_DRV_LOG(ERR,
338 				    "Invalid subchannel offermsg channel %u",
339 				    chn_index);
340 			return -EIO;
341 		}
342 
343 		PMD_DRV_LOG(DEBUG, "new sub channel %u", chn_index);
344 		hv->channels[chn_index] = new_sc;
345 		--subchan;
346 	}
347 
348 	return err;
349 }
350 
351 static int hn_dev_configure(struct rte_eth_dev *dev)
352 {
353 	const struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
354 	const struct rte_eth_rxmode *rxmode = &dev_conf->rxmode;
355 	const struct rte_eth_txmode *txmode = &dev_conf->txmode;
356 
357 	const struct rte_eth_rss_conf *rss_conf =
358 		&dev_conf->rx_adv_conf.rss_conf;
359 	struct hn_data *hv = dev->data->dev_private;
360 	uint64_t unsupported;
361 	int err, subchan;
362 
363 	PMD_INIT_FUNC_TRACE();
364 
365 	unsupported = txmode->offloads & ~HN_TX_OFFLOAD_CAPS;
366 	if (unsupported) {
367 		PMD_DRV_LOG(NOTICE,
368 			    "unsupported TX offload: %#" PRIx64,
369 			    unsupported);
370 		return -EINVAL;
371 	}
372 
373 	unsupported = rxmode->offloads & ~HN_RX_OFFLOAD_CAPS;
374 	if (unsupported) {
375 		PMD_DRV_LOG(NOTICE,
376 			    "unsupported RX offload: %#" PRIx64,
377 			    rxmode->offloads);
378 		return -EINVAL;
379 	}
380 
381 	err = hn_rndis_conf_offload(hv, txmode->offloads,
382 				    rxmode->offloads);
383 	if (err) {
384 		PMD_DRV_LOG(NOTICE,
385 			    "offload configure failed");
386 		return err;
387 	}
388 
389 	hv->num_queues = RTE_MAX(dev->data->nb_rx_queues,
390 				 dev->data->nb_tx_queues);
391 	subchan = hv->num_queues - 1;
392 	if (subchan > 0) {
393 		err = hn_subchan_configure(hv, subchan);
394 		if (err) {
395 			PMD_DRV_LOG(NOTICE,
396 				    "subchannel configuration failed");
397 			return err;
398 		}
399 
400 		err = hn_rndis_conf_rss(hv, rss_conf);
401 		if (err) {
402 			PMD_DRV_LOG(NOTICE,
403 				    "rss configuration failed");
404 			return err;
405 		}
406 	}
407 
408 	return hn_vf_configure(dev, dev_conf);
409 }
410 
411 static int hn_dev_stats_get(struct rte_eth_dev *dev,
412 			    struct rte_eth_stats *stats)
413 {
414 	unsigned int i;
415 
416 	hn_vf_stats_get(dev, stats);
417 
418 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
419 		const struct hn_tx_queue *txq = dev->data->tx_queues[i];
420 
421 		if (!txq)
422 			continue;
423 
424 		stats->opackets += txq->stats.packets;
425 		stats->obytes += txq->stats.bytes;
426 		stats->oerrors += txq->stats.errors;
427 
428 		if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
429 			stats->q_opackets[i] = txq->stats.packets;
430 			stats->q_obytes[i] = txq->stats.bytes;
431 		}
432 	}
433 
434 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
435 		const struct hn_rx_queue *rxq = dev->data->rx_queues[i];
436 
437 		if (!rxq)
438 			continue;
439 
440 		stats->ipackets += rxq->stats.packets;
441 		stats->ibytes += rxq->stats.bytes;
442 		stats->ierrors += rxq->stats.errors;
443 		stats->imissed += rxq->stats.ring_full;
444 
445 		if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
446 			stats->q_ipackets[i] = rxq->stats.packets;
447 			stats->q_ibytes[i] = rxq->stats.bytes;
448 		}
449 	}
450 
451 	stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
452 	return 0;
453 }
454 
455 static void
456 hn_dev_stats_reset(struct rte_eth_dev *dev)
457 {
458 	unsigned int i;
459 
460 	PMD_INIT_FUNC_TRACE();
461 
462 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
463 		struct hn_tx_queue *txq = dev->data->tx_queues[i];
464 
465 		if (!txq)
466 			continue;
467 		memset(&txq->stats, 0, sizeof(struct hn_stats));
468 	}
469 
470 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
471 		struct hn_rx_queue *rxq = dev->data->rx_queues[i];
472 
473 		if (!rxq)
474 			continue;
475 
476 		memset(&rxq->stats, 0, sizeof(struct hn_stats));
477 	}
478 }
479 
480 static void
481 hn_dev_xstats_reset(struct rte_eth_dev *dev)
482 {
483 	hn_dev_stats_reset(dev);
484 	hn_vf_xstats_reset(dev);
485 }
486 
487 static int
488 hn_dev_xstats_count(struct rte_eth_dev *dev)
489 {
490 	int ret, count;
491 
492 	count = dev->data->nb_tx_queues * RTE_DIM(hn_stat_strings);
493 	count += dev->data->nb_rx_queues * RTE_DIM(hn_stat_strings);
494 
495 	ret = hn_vf_xstats_get_names(dev, NULL, 0);
496 	if (ret < 0)
497 		return ret;
498 
499 	return count + ret;
500 }
501 
502 static int
503 hn_dev_xstats_get_names(struct rte_eth_dev *dev,
504 			struct rte_eth_xstat_name *xstats_names,
505 			unsigned int limit)
506 {
507 	unsigned int i, t, count = 0;
508 	int ret;
509 
510 	if (!xstats_names)
511 		return hn_dev_xstats_count(dev);
512 
513 	/* Note: limit checked in rte_eth_xstats_names() */
514 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
515 		const struct hn_tx_queue *txq = dev->data->tx_queues[i];
516 
517 		if (!txq)
518 			continue;
519 
520 		if (count >= limit)
521 			break;
522 
523 		for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
524 			snprintf(xstats_names[count++].name,
525 				 RTE_ETH_XSTATS_NAME_SIZE,
526 				 "tx_q%u_%s", i, hn_stat_strings[t].name);
527 	}
528 
529 	for (i = 0; i < dev->data->nb_rx_queues; i++)  {
530 		const struct hn_rx_queue *rxq = dev->data->rx_queues[i];
531 
532 		if (!rxq)
533 			continue;
534 
535 		if (count >= limit)
536 			break;
537 
538 		for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
539 			snprintf(xstats_names[count++].name,
540 				 RTE_ETH_XSTATS_NAME_SIZE,
541 				 "rx_q%u_%s", i,
542 				 hn_stat_strings[t].name);
543 	}
544 
545 	ret = hn_vf_xstats_get_names(dev, xstats_names + count,
546 				     limit - count);
547 	if (ret < 0)
548 		return ret;
549 
550 	return count + ret;
551 }
552 
553 static int
554 hn_dev_xstats_get(struct rte_eth_dev *dev,
555 		  struct rte_eth_xstat *xstats,
556 		  unsigned int n)
557 {
558 	unsigned int i, t, count = 0;
559 	const unsigned int nstats = hn_dev_xstats_count(dev);
560 	const char *stats;
561 	int ret;
562 
563 	PMD_INIT_FUNC_TRACE();
564 
565 	if (n < nstats)
566 		return nstats;
567 
568 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
569 		const struct hn_tx_queue *txq = dev->data->tx_queues[i];
570 
571 		if (!txq)
572 			continue;
573 
574 		stats = (const char *)&txq->stats;
575 		for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
576 			xstats[count++].value = *(const uint64_t *)
577 				(stats + hn_stat_strings[t].offset);
578 	}
579 
580 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
581 		const struct hn_rx_queue *rxq = dev->data->rx_queues[i];
582 
583 		if (!rxq)
584 			continue;
585 
586 		stats = (const char *)&rxq->stats;
587 		for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
588 			xstats[count++].value = *(const uint64_t *)
589 				(stats + hn_stat_strings[t].offset);
590 	}
591 
592 	ret = hn_vf_xstats_get(dev, xstats + count, n - count);
593 	if (ret < 0)
594 		return ret;
595 
596 	return count + ret;
597 }
598 
599 static int
600 hn_dev_start(struct rte_eth_dev *dev)
601 {
602 	struct hn_data *hv = dev->data->dev_private;
603 	int error;
604 
605 	PMD_INIT_FUNC_TRACE();
606 
607 	error = hn_rndis_set_rxfilter(hv,
608 				      NDIS_PACKET_TYPE_BROADCAST |
609 				      NDIS_PACKET_TYPE_ALL_MULTICAST |
610 				      NDIS_PACKET_TYPE_DIRECTED);
611 	if (error)
612 		return error;
613 
614 	error = hn_vf_start(dev);
615 	if (error)
616 		hn_rndis_set_rxfilter(hv, 0);
617 
618 	return error;
619 }
620 
621 static void
622 hn_dev_stop(struct rte_eth_dev *dev)
623 {
624 	struct hn_data *hv = dev->data->dev_private;
625 
626 	PMD_INIT_FUNC_TRACE();
627 
628 	hn_rndis_set_rxfilter(hv, 0);
629 	hn_vf_stop(dev);
630 }
631 
632 static void
633 hn_dev_close(struct rte_eth_dev *dev __rte_unused)
634 {
635 	PMD_INIT_LOG(DEBUG, "close");
636 
637 	hn_vf_close(dev);
638 }
639 
640 static const struct eth_dev_ops hn_eth_dev_ops = {
641 	.dev_configure		= hn_dev_configure,
642 	.dev_start		= hn_dev_start,
643 	.dev_stop		= hn_dev_stop,
644 	.dev_close		= hn_dev_close,
645 	.dev_infos_get		= hn_dev_info_get,
646 	.dev_supported_ptypes_get = hn_vf_supported_ptypes,
647 	.promiscuous_enable     = hn_dev_promiscuous_enable,
648 	.promiscuous_disable    = hn_dev_promiscuous_disable,
649 	.allmulticast_enable    = hn_dev_allmulticast_enable,
650 	.allmulticast_disable   = hn_dev_allmulticast_disable,
651 	.set_mc_addr_list	= hn_dev_mc_addr_list,
652 	.tx_queue_setup		= hn_dev_tx_queue_setup,
653 	.tx_queue_release	= hn_dev_tx_queue_release,
654 	.tx_done_cleanup        = hn_dev_tx_done_cleanup,
655 	.rx_queue_setup		= hn_dev_rx_queue_setup,
656 	.rx_queue_release	= hn_dev_rx_queue_release,
657 	.link_update		= hn_dev_link_update,
658 	.stats_get		= hn_dev_stats_get,
659 	.stats_reset            = hn_dev_stats_reset,
660 	.xstats_get		= hn_dev_xstats_get,
661 	.xstats_get_names	= hn_dev_xstats_get_names,
662 	.xstats_reset		= hn_dev_xstats_reset,
663 };
664 
665 /*
666  * Setup connection between PMD and kernel.
667  */
668 static int
669 hn_attach(struct hn_data *hv, unsigned int mtu)
670 {
671 	int error;
672 
673 	/* Attach NVS */
674 	error = hn_nvs_attach(hv, mtu);
675 	if (error)
676 		goto failed_nvs;
677 
678 	/* Attach RNDIS */
679 	error = hn_rndis_attach(hv);
680 	if (error)
681 		goto failed_rndis;
682 
683 	/*
684 	 * NOTE:
685 	 * Under certain conditions on certain versions of Hyper-V,
686 	 * the RNDIS rxfilter is _not_ zero on the hypervisor side
687 	 * after the successful RNDIS initialization.
688 	 */
689 	hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_NONE);
690 	return 0;
691 failed_rndis:
692 	hn_nvs_detach(hv);
693 failed_nvs:
694 	return error;
695 }
696 
697 static void
698 hn_detach(struct hn_data *hv)
699 {
700 	hn_nvs_detach(hv);
701 	hn_rndis_detach(hv);
702 }
703 
704 static int
705 eth_hn_dev_init(struct rte_eth_dev *eth_dev)
706 {
707 	struct hn_data *hv = eth_dev->data->dev_private;
708 	struct rte_device *device = eth_dev->device;
709 	struct rte_vmbus_device *vmbus;
710 	unsigned int rxr_cnt;
711 	int err, max_chan;
712 
713 	PMD_INIT_FUNC_TRACE();
714 
715 	vmbus = container_of(device, struct rte_vmbus_device, device);
716 	eth_dev->dev_ops = &hn_eth_dev_ops;
717 	eth_dev->tx_pkt_burst = &hn_xmit_pkts;
718 	eth_dev->rx_pkt_burst = &hn_recv_pkts;
719 
720 	/*
721 	 * for secondary processes, we don't initialize any further as primary
722 	 * has already done this work.
723 	 */
724 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
725 		return 0;
726 
727 	/* Since Hyper-V only supports one MAC address, just use local data */
728 	eth_dev->data->mac_addrs = &hv->mac_addr;
729 
730 	hv->vmbus = vmbus;
731 	hv->rxbuf_res = &vmbus->resource[HV_RECV_BUF_MAP];
732 	hv->chim_res  = &vmbus->resource[HV_SEND_BUF_MAP];
733 	hv->port_id = eth_dev->data->port_id;
734 	hv->latency = HN_CHAN_LATENCY_NS;
735 	hv->max_queues = 1;
736 
737 	err = hn_parse_args(eth_dev);
738 	if (err)
739 		return err;
740 
741 	strlcpy(hv->owner.name, eth_dev->device->name,
742 		RTE_ETH_MAX_OWNER_NAME_LEN);
743 	err = rte_eth_dev_owner_new(&hv->owner.id);
744 	if (err) {
745 		PMD_INIT_LOG(ERR, "Can not get owner id");
746 		return err;
747 	}
748 
749 	/* Initialize primary channel input for control operations */
750 	err = rte_vmbus_chan_open(vmbus, &hv->channels[0]);
751 	if (err)
752 		return err;
753 
754 	rte_vmbus_set_latency(hv->vmbus, hv->channels[0], hv->latency);
755 
756 	hv->primary = hn_rx_queue_alloc(hv, 0,
757 					eth_dev->device->numa_node);
758 
759 	if (!hv->primary)
760 		return -ENOMEM;
761 
762 	err = hn_attach(hv, ETHER_MTU);
763 	if  (err)
764 		goto failed;
765 
766 	err = hn_tx_pool_init(eth_dev);
767 	if (err)
768 		goto failed;
769 
770 	err = hn_rndis_get_eaddr(hv, hv->mac_addr.addr_bytes);
771 	if (err)
772 		goto failed;
773 
774 	/* Multi queue requires later versions of windows server */
775 	if (hv->nvs_ver < NVS_VERSION_5)
776 		return 0;
777 
778 	max_chan = rte_vmbus_max_channels(vmbus);
779 	PMD_INIT_LOG(DEBUG, "VMBus max channels %d", max_chan);
780 	if (max_chan <= 0)
781 		goto failed;
782 
783 	if (hn_rndis_query_rsscaps(hv, &rxr_cnt) != 0)
784 		rxr_cnt = 1;
785 
786 	hv->max_queues = RTE_MIN(rxr_cnt, (unsigned int)max_chan);
787 
788 	/* If VF was reported but not added, do it now */
789 	if (hv->vf_present && !hv->vf_dev) {
790 		PMD_INIT_LOG(DEBUG, "Adding VF device");
791 
792 		err = hn_vf_add(eth_dev, hv);
793 		if (err)
794 			goto failed;
795 	}
796 
797 	return 0;
798 
799 failed:
800 	PMD_INIT_LOG(NOTICE, "device init failed");
801 
802 	hn_detach(hv);
803 	return err;
804 }
805 
806 static int
807 eth_hn_dev_uninit(struct rte_eth_dev *eth_dev)
808 {
809 	struct hn_data *hv = eth_dev->data->dev_private;
810 
811 	PMD_INIT_FUNC_TRACE();
812 
813 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
814 		return 0;
815 
816 	hn_dev_stop(eth_dev);
817 	hn_dev_close(eth_dev);
818 
819 	eth_dev->dev_ops = NULL;
820 	eth_dev->tx_pkt_burst = NULL;
821 	eth_dev->rx_pkt_burst = NULL;
822 
823 	hn_detach(hv);
824 	rte_vmbus_chan_close(hv->primary->chan);
825 	rte_free(hv->primary);
826 	rte_eth_dev_owner_delete(hv->owner.id);
827 
828 	return 0;
829 }
830 
831 static int eth_hn_probe(struct rte_vmbus_driver *drv __rte_unused,
832 			struct rte_vmbus_device *dev)
833 {
834 	struct rte_eth_dev *eth_dev;
835 	int ret;
836 
837 	PMD_INIT_FUNC_TRACE();
838 
839 	eth_dev = eth_dev_vmbus_allocate(dev, sizeof(struct hn_data));
840 	if (!eth_dev)
841 		return -ENOMEM;
842 
843 	ret = eth_hn_dev_init(eth_dev);
844 	if (ret)
845 		eth_dev_vmbus_release(eth_dev);
846 	else
847 		rte_eth_dev_probing_finish(eth_dev);
848 
849 	return ret;
850 }
851 
852 static int eth_hn_remove(struct rte_vmbus_device *dev)
853 {
854 	struct rte_eth_dev *eth_dev;
855 	int ret;
856 
857 	PMD_INIT_FUNC_TRACE();
858 
859 	eth_dev = rte_eth_dev_allocated(dev->device.name);
860 	if (!eth_dev)
861 		return -ENODEV;
862 
863 	ret = eth_hn_dev_uninit(eth_dev);
864 	if (ret)
865 		return ret;
866 
867 	eth_dev_vmbus_release(eth_dev);
868 	return 0;
869 }
870 
871 /* Network device GUID */
872 static const rte_uuid_t hn_net_ids[] = {
873 	/*  f8615163-df3e-46c5-913f-f2d2f965ed0e */
874 	RTE_UUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x913f, 0xf2d2f965ed0eULL),
875 	{ 0 }
876 };
877 
878 static struct rte_vmbus_driver rte_netvsc_pmd = {
879 	.id_table = hn_net_ids,
880 	.probe = eth_hn_probe,
881 	.remove = eth_hn_remove,
882 };
883 
884 RTE_PMD_REGISTER_VMBUS(net_netvsc, rte_netvsc_pmd);
885 RTE_PMD_REGISTER_KMOD_DEP(net_netvsc, "* uio_hv_generic");
886 
887 RTE_INIT(hn_init_log)
888 {
889 	hn_logtype_init = rte_log_register("pmd.net.netvsc.init");
890 	if (hn_logtype_init >= 0)
891 		rte_log_set_level(hn_logtype_init, RTE_LOG_NOTICE);
892 	hn_logtype_driver = rte_log_register("pmd.net.netvsc.driver");
893 	if (hn_logtype_driver >= 0)
894 		rte_log_set_level(hn_logtype_driver, RTE_LOG_NOTICE);
895 }
896