xref: /netbsd-src/sys/dev/hyperv/if_hvn.c (revision 7330f729ccf0bd976a06f95fad452fe774fc7fd1)
1 /*	$NetBSD: if_hvn.c,v 1.5 2019/10/01 18:00:08 chs Exp $	*/
2 /*	$OpenBSD: if_hvn.c,v 1.39 2018/03/11 14:31:34 mikeb Exp $	*/
3 
4 /*-
5  * Copyright (c) 2009-2012,2016 Microsoft Corp.
6  * Copyright (c) 2010-2012 Citrix Inc.
7  * Copyright (c) 2012 NetApp Inc.
8  * Copyright (c) 2016 Mike Belopuhov <mike@esdenera.com>
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice unmodified, this list of conditions, and the following
16  *    disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * The OpenBSD port was done under funding by Esdenera Networks GmbH.
35  */
36 
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: if_hvn.c,v 1.5 2019/10/01 18:00:08 chs Exp $");
39 
40 #ifdef _KERNEL_OPT
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #include "opt_net_mpsafe.h"
44 #endif
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/device.h>
50 #include <sys/atomic.h>
51 #include <sys/bus.h>
52 #include <sys/intr.h>
53 #include <sys/kmem.h>
54 
55 #include <net/if.h>
56 #include <net/if_ether.h>
57 #include <net/if_media.h>
58 
59 #include <net/bpf.h>
60 
61 #include <dev/ic/ndisreg.h>
62 #include <dev/ic/rndisreg.h>
63 
64 #include <dev/hyperv/vmbusvar.h>
65 #include <dev/hyperv/if_hvnreg.h>
66 
67 #ifndef EVL_PRIO_BITS
68 #define EVL_PRIO_BITS	13
69 #endif
70 
71 #define HVN_NVS_MSGSIZE			32
72 #define HVN_NVS_BUFSIZE			PAGE_SIZE
73 
74 /*
75  * RNDIS control interface
76  */
77 #define HVN_RNDIS_CTLREQS		4
78 #define HVN_RNDIS_BUFSIZE		512
79 
80 struct rndis_cmd {
81 	uint32_t			rc_id;
82 	struct hvn_nvs_rndis		rc_msg;
83 	void				*rc_req;
84 	bus_dmamap_t			rc_dmap;
85 	bus_dma_segment_t		rc_segs;
86 	int				rc_nsegs;
87 	uint64_t			rc_gpa;
88 	struct rndis_packet_msg		rc_cmp;
89 	uint32_t			rc_cmplen;
90 	uint8_t				rc_cmpbuf[HVN_RNDIS_BUFSIZE];
91 	int				rc_done;
92 	TAILQ_ENTRY(rndis_cmd)		rc_entry;
93 };
94 TAILQ_HEAD(rndis_queue, rndis_cmd);
95 
96 #define HVN_MAXMTU			(9 * 1024)
97 
98 #define HVN_RNDIS_XFER_SIZE		2048
99 
100 /*
101  * Tx ring
102  */
103 #define HVN_TX_DESC			256
104 #define HVN_TX_FRAGS			15		/* 31 is the max */
105 #define HVN_TX_FRAG_SIZE		PAGE_SIZE
106 #define HVN_TX_PKT_SIZE			16384
107 
108 #define HVN_RNDIS_PKT_LEN					\
109 	(sizeof(struct rndis_packet_msg) +			\
110 	 sizeof(struct rndis_pktinfo) + NDIS_VLAN_INFO_SIZE +	\
111 	 sizeof(struct rndis_pktinfo) + NDIS_TXCSUM_INFO_SIZE)
112 
113 struct hvn_tx_desc {
114 	uint32_t			txd_id;
115 	int				txd_ready;
116 	struct vmbus_gpa		txd_sgl[HVN_TX_FRAGS + 1];
117 	int				txd_nsge;
118 	struct mbuf			*txd_buf;
119 	bus_dmamap_t			txd_dmap;
120 	struct vmbus_gpa		txd_gpa;
121 	struct rndis_packet_msg		*txd_req;
122 };
123 
124 struct hvn_softc {
125 	device_t			sc_dev;
126 
127 	struct vmbus_softc		*sc_vmbus;
128 	struct vmbus_channel		*sc_chan;
129 	bus_dma_tag_t			sc_dmat;
130 
131 	struct ethercom			sc_ec;
132 	struct ifmedia			sc_media;
133 	struct if_percpuq		*sc_ipq;
134 	int				sc_link_state;
135 	int				sc_promisc;
136 
137 	uint32_t			sc_flags;
138 #define	HVN_SCF_ATTACHED	__BIT(0)
139 
140 	/* NVS protocol */
141 	int				sc_proto;
142 	uint32_t			sc_nvstid;
143 	uint8_t				sc_nvsrsp[HVN_NVS_MSGSIZE];
144 	uint8_t				*sc_nvsbuf;
145 	int				sc_nvsdone;
146 
147 	/* RNDIS protocol */
148 	int				sc_ndisver;
149 	uint32_t			sc_rndisrid;
150 	struct rndis_queue		sc_cntl_sq; /* submission queue */
151 	kmutex_t			sc_cntl_sqlck;
152 	struct rndis_queue		sc_cntl_cq; /* completion queue */
153 	kmutex_t			sc_cntl_cqlck;
154 	struct rndis_queue		sc_cntl_fq; /* free queue */
155 	kmutex_t			sc_cntl_fqlck;
156 	struct rndis_cmd		sc_cntl_msgs[HVN_RNDIS_CTLREQS];
157 	struct hvn_nvs_rndis		sc_data_msg;
158 
159 	/* Rx ring */
160 	uint8_t				*sc_rx_ring;
161 	int				sc_rx_size;
162 	uint32_t			sc_rx_hndl;
163 	struct hyperv_dma		sc_rx_dma;
164 
165 	/* Tx ring */
166 	uint32_t			sc_tx_next;
167 	uint32_t			sc_tx_avail;
168 	struct hvn_tx_desc		sc_tx_desc[HVN_TX_DESC];
169 	bus_dmamap_t			sc_tx_rmap;
170 	uint8_t				*sc_tx_msgs;
171 	bus_dma_segment_t		sc_tx_mseg;
172 };
173 
174 #define SC2IFP(_sc_)	(&(_sc_)->sc_ec.ec_if)
175 #define IFP2SC(_ifp_)	((_ifp_)->if_softc)
176 
177 
178 static int	hvn_match(device_t, cfdata_t, void *);
179 static void	hvn_attach(device_t, device_t, void *);
180 static int	hvn_detach(device_t, int);
181 
182 CFATTACH_DECL_NEW(hvn, sizeof(struct hvn_softc),
183     hvn_match, hvn_attach, hvn_detach, NULL);
184 
185 static int	hvn_ioctl(struct ifnet *, u_long, void *);
186 static int	hvn_media_change(struct ifnet *);
187 static void	hvn_media_status(struct ifnet *, struct ifmediareq *);
188 static int	hvn_iff(struct hvn_softc *);
189 static int	hvn_init(struct ifnet *);
190 static void	hvn_stop(struct ifnet *, int);
191 static void	hvn_start(struct ifnet *);
192 static int	hvn_encap(struct hvn_softc *, struct mbuf *,
193 		    struct hvn_tx_desc **);
194 static void	hvn_decap(struct hvn_softc *, struct hvn_tx_desc *);
195 static void	hvn_txeof(struct hvn_softc *, uint64_t);
196 static int	hvn_rx_ring_create(struct hvn_softc *);
197 static int	hvn_rx_ring_destroy(struct hvn_softc *);
198 static int	hvn_tx_ring_create(struct hvn_softc *);
199 static void	hvn_tx_ring_destroy(struct hvn_softc *);
200 static int	hvn_set_capabilities(struct hvn_softc *);
201 static int	hvn_get_lladdr(struct hvn_softc *, uint8_t *);
202 static void	hvn_get_link_status(struct hvn_softc *);
203 
204 /* NSVP */
205 static int	hvn_nvs_attach(struct hvn_softc *);
206 static void	hvn_nvs_intr(void *);
207 static int	hvn_nvs_cmd(struct hvn_softc *, void *, size_t, uint64_t, int);
208 static int	hvn_nvs_ack(struct hvn_softc *, uint64_t);
209 static void	hvn_nvs_detach(struct hvn_softc *);
210 
211 /* RNDIS */
212 static int	hvn_rndis_attach(struct hvn_softc *);
213 static int	hvn_rndis_cmd(struct hvn_softc *, struct rndis_cmd *, int);
214 static void	hvn_rndis_input(struct hvn_softc *, uint64_t, void *);
215 static void	hvn_rxeof(struct hvn_softc *, uint8_t *, uint32_t);
216 static void	hvn_rndis_complete(struct hvn_softc *, uint8_t *, uint32_t);
217 static int	hvn_rndis_output(struct hvn_softc *, struct hvn_tx_desc *);
218 static void	hvn_rndis_status(struct hvn_softc *, uint8_t *, uint32_t);
219 static int	hvn_rndis_query(struct hvn_softc *, uint32_t, void *, size_t *);
220 static int	hvn_rndis_set(struct hvn_softc *, uint32_t, void *, size_t);
221 static int	hvn_rndis_open(struct hvn_softc *);
222 static int	hvn_rndis_close(struct hvn_softc *);
223 static void	hvn_rndis_detach(struct hvn_softc *);
224 
225 static int
226 hvn_match(device_t parent, cfdata_t match, void *aux)
227 {
228 	struct vmbus_attach_args *aa = aux;
229 
230 	if (memcmp(aa->aa_type, &hyperv_guid_network, sizeof(*aa->aa_type)))
231 		return 0;
232 	return 1;
233 }
234 
235 static void
236 hvn_attach(device_t parent, device_t self, void *aux)
237 {
238 	struct hvn_softc *sc = device_private(self);
239 	struct vmbus_attach_args *aa = aux;
240 	struct ifnet *ifp = SC2IFP(sc);
241 	uint8_t enaddr[ETHER_ADDR_LEN];
242 	int error;
243 
244 	sc->sc_dev = self;
245 	sc->sc_vmbus = (struct vmbus_softc *)device_private(parent);
246 	sc->sc_chan = aa->aa_chan;
247 	sc->sc_dmat = sc->sc_vmbus->sc_dmat;
248 
249 	aprint_naive("\n");
250 	aprint_normal(": Hyper-V NetVSC\n");
251 
252 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
253 
254 	if (hvn_nvs_attach(sc)) {
255 		aprint_error_dev(self, "failed to init NVSP\n");
256 		return;
257 	}
258 
259 	if (hvn_rx_ring_create(sc)) {
260 		aprint_error_dev(self, "failed to create Rx ring\n");
261 		goto fail1;
262 	}
263 
264 	if (hvn_tx_ring_create(sc)) {
265 		aprint_error_dev(self, "failed to create Tx ring\n");
266 		goto fail1;
267 	}
268 
269 	ifp->if_softc = sc;
270 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
271 	ifp->if_ioctl = hvn_ioctl;
272 	ifp->if_start = hvn_start;
273 	ifp->if_init = hvn_init;
274 	ifp->if_stop = hvn_stop;
275 	ifp->if_capabilities = IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx;
276 	ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx;
277 	ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx;
278 	if (sc->sc_ndisver > NDIS_VERSION_6_30) {
279 		ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Tx;
280 		ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Rx;
281 		ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Tx;
282 		ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Rx;
283 	}
284 	if (sc->sc_proto >= HVN_NVS_PROTO_VERSION_2) {
285 		sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
286 		sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
287 		sc->sc_ec.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
288 	}
289 
290 	IFQ_SET_MAXLEN(&ifp->if_snd, HVN_TX_DESC - 1);
291 	IFQ_SET_READY(&ifp->if_snd);
292 
293 	/* Initialize ifmedia structures. */
294 	sc->sc_ec.ec_ifmedia = &sc->sc_media;
295 	ifmedia_init(&sc->sc_media, IFM_IMASK, hvn_media_change,
296 	    hvn_media_status);
297 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
298 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_MANUAL);
299 
300 	error = if_initialize(ifp);
301 	if (error) {
302 		aprint_error_dev(self, "if_initialize failed(%d)\n", error);
303 		goto fail2;
304 	}
305 	sc->sc_ipq = if_percpuq_create(ifp);
306 	if_deferred_start_init(ifp, NULL);
307 
308 	if (hvn_rndis_attach(sc)) {
309 		aprint_error_dev(self, "failed to init RNDIS\n");
310 		goto fail1;
311 	}
312 
313 	aprint_normal_dev(self, "NVS %d.%d NDIS %d.%d\n",
314 	    sc->sc_proto >> 16, sc->sc_proto & 0xffff,
315 	    sc->sc_ndisver >> 16 , sc->sc_ndisver & 0xffff);
316 
317 	if (hvn_set_capabilities(sc)) {
318 		aprint_error_dev(self, "failed to setup offloading\n");
319 		goto fail2;
320 	}
321 
322 	if (hvn_get_lladdr(sc, enaddr)) {
323 		aprint_error_dev(self,
324 		    "failed to obtain an ethernet address\n");
325 		goto fail2;
326 	}
327 	aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(enaddr));
328 
329 	ether_ifattach(ifp, enaddr);
330 	if_register(ifp);
331 
332 	if (pmf_device_register(self, NULL, NULL))
333 		pmf_class_network_register(self, ifp);
334 	else
335 		aprint_error_dev(self, "couldn't establish power handler\n");
336 
337 	SET(sc->sc_flags, HVN_SCF_ATTACHED);
338 	return;
339 
340 fail2:	hvn_rndis_detach(sc);
341 fail1:	hvn_rx_ring_destroy(sc);
342 	hvn_tx_ring_destroy(sc);
343 	hvn_nvs_detach(sc);
344 }
345 
346 static int
347 hvn_detach(device_t self, int flags)
348 {
349 	struct hvn_softc *sc = device_private(self);
350 	struct ifnet *ifp = SC2IFP(sc);
351 
352 	if (!ISSET(sc->sc_flags, HVN_SCF_ATTACHED))
353 		return 0;
354 
355 	hvn_stop(ifp, 1);
356 
357 	pmf_device_deregister(self);
358 
359 	ether_ifdetach(ifp);
360 	if_detach(ifp);
361 	if_percpuq_destroy(sc->sc_ipq);
362 
363 	hvn_rndis_detach(sc);
364 	hvn_rx_ring_destroy(sc);
365 	hvn_tx_ring_destroy(sc);
366 	hvn_nvs_detach(sc);
367 
368 	return 0;
369 }
370 
371 static int
372 hvn_ioctl(struct ifnet *ifp, u_long command, void * data)
373 {
374 	struct hvn_softc *sc = IFP2SC(ifp);
375 	int s, error = 0;
376 
377 	s = splnet();
378 
379 	switch (command) {
380 	case SIOCSIFFLAGS:
381 		if (ifp->if_flags & IFF_UP) {
382 			if (ifp->if_flags & IFF_RUNNING)
383 				error = ENETRESET;
384 			else {
385 				error = hvn_init(ifp);
386 				if (error)
387 					ifp->if_flags &= ~IFF_UP;
388 			}
389 		} else {
390 			if (ifp->if_flags & IFF_RUNNING)
391 				hvn_stop(ifp, 1);
392 		}
393 		break;
394 	default:
395 		error = ether_ioctl(ifp, command, data);
396 		break;
397 	}
398 
399 	if (error == ENETRESET) {
400 		if (ifp->if_flags & IFF_RUNNING)
401 			hvn_iff(sc);
402 		error = 0;
403 	}
404 
405 	splx(s);
406 
407 	return error;
408 }
409 
410 static int
411 hvn_media_change(struct ifnet *ifp)
412 {
413 
414 	return 0;
415 }
416 
417 static void
418 hvn_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
419 {
420 	struct hvn_softc *sc = IFP2SC(ifp);
421 	int link_state;
422 
423 	link_state = sc->sc_link_state;
424 	hvn_get_link_status(sc);
425 	if (link_state != sc->sc_link_state)
426 		if_link_state_change(ifp, sc->sc_link_state);
427 
428 	ifmr->ifm_status = IFM_AVALID;
429 	ifmr->ifm_active = IFM_ETHER | IFM_MANUAL;
430 	if (sc->sc_link_state == LINK_STATE_UP)
431 		ifmr->ifm_status |= IFM_ACTIVE;
432 }
433 
434 static int
435 hvn_iff(struct hvn_softc *sc)
436 {
437 
438 	/* XXX */
439 	sc->sc_promisc = 0;
440 
441 	return 0;
442 }
443 
444 static int
445 hvn_init(struct ifnet *ifp)
446 {
447 	struct hvn_softc *sc = IFP2SC(ifp);
448 	int error;
449 
450 	hvn_stop(ifp, 0);
451 
452 	error = hvn_iff(sc);
453 	if (error)
454 		return error;
455 
456 	error = hvn_rndis_open(sc);
457 	if (error == 0) {
458 		ifp->if_flags |= IFF_RUNNING;
459 		ifp->if_flags &= ~IFF_OACTIVE;
460 	}
461 	return error;
462 }
463 
464 static void
465 hvn_stop(struct ifnet *ifp, int disable)
466 {
467 	struct hvn_softc *sc = IFP2SC(ifp);
468 
469 	hvn_rndis_close(sc);
470 
471 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
472 }
473 
474 static void
475 hvn_start(struct ifnet *ifp)
476 {
477 	struct hvn_softc *sc = IFP2SC(ifp);
478 	struct hvn_tx_desc *txd;
479 	struct mbuf *m;
480 
481 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
482 		return;
483 
484 	for (;;) {
485 		if (!sc->sc_tx_avail) {
486 			/* transient */
487 			ifp->if_flags |= IFF_OACTIVE;
488 			break;
489 		}
490 
491 		IFQ_DEQUEUE(&ifp->if_snd, m);
492 		if (m == NULL)
493 			break;
494 
495 		if (hvn_encap(sc, m, &txd)) {
496 			/* the chain is too large */
497 			ifp->if_oerrors++;
498 			m_freem(m);
499 			continue;
500 		}
501 
502 		bpf_mtap(ifp, m, BPF_D_OUT);
503 
504 		if (hvn_rndis_output(sc, txd)) {
505 			hvn_decap(sc, txd);
506 			ifp->if_oerrors++;
507 			m_freem(m);
508 			continue;
509 		}
510 
511 		sc->sc_tx_next++;
512 	}
513 }
514 
515 static inline char *
516 hvn_rndis_pktinfo_append(struct rndis_packet_msg *pkt, size_t pktsize,
517     size_t datalen, uint32_t type)
518 {
519 	struct rndis_pktinfo *pi;
520 	size_t pi_size = sizeof(*pi) + datalen;
521 	char *cp;
522 
523 	KASSERT(pkt->rm_pktinfooffset + pkt->rm_pktinfolen + pi_size <=
524 	    pktsize);
525 
526 	cp = (char *)pkt + pkt->rm_pktinfooffset + pkt->rm_pktinfolen;
527 	pi = (struct rndis_pktinfo *)cp;
528 	pi->rm_size = pi_size;
529 	pi->rm_type = type;
530 	pi->rm_pktinfooffset = sizeof(*pi);
531 	pkt->rm_pktinfolen += pi_size;
532 	pkt->rm_dataoffset += pi_size;
533 	pkt->rm_len += pi_size;
534 
535 	return (char *)pi->rm_data;
536 }
537 
538 static int
539 hvn_encap(struct hvn_softc *sc, struct mbuf *m, struct hvn_tx_desc **txd0)
540 {
541 	struct hvn_tx_desc *txd;
542 	struct rndis_packet_msg *pkt;
543 	bus_dma_segment_t *seg;
544 	size_t pktlen;
545 	int i, rv;
546 
547 	do {
548 		txd = &sc->sc_tx_desc[sc->sc_tx_next % HVN_TX_DESC];
549 		sc->sc_tx_next++;
550 	} while (!txd->txd_ready);
551 	txd->txd_ready = 0;
552 
553 	pkt = txd->txd_req;
554 	memset(pkt, 0, HVN_RNDIS_PKT_LEN);
555 	pkt->rm_type = REMOTE_NDIS_PACKET_MSG;
556 	pkt->rm_len = sizeof(*pkt) + m->m_pkthdr.len;
557 	pkt->rm_dataoffset = RNDIS_DATA_OFFSET;
558 	pkt->rm_datalen = m->m_pkthdr.len;
559 	pkt->rm_pktinfooffset = sizeof(*pkt); /* adjusted below */
560 	pkt->rm_pktinfolen = 0;
561 
562 	rv = bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmap, m, BUS_DMA_READ |
563 	    BUS_DMA_NOWAIT);
564 	switch (rv) {
565 	case 0:
566 		break;
567 	case EFBIG:
568 		if (m_defrag(m, M_NOWAIT) == 0 &&
569 		    bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmap, m,
570 		      BUS_DMA_READ | BUS_DMA_NOWAIT) == 0)
571 			break;
572 		/* FALLTHROUGH */
573 	default:
574 		DPRINTF("%s: failed to load mbuf\n", device_xname(sc->sc_dev));
575 		return -1;
576 	}
577 	txd->txd_buf = m;
578 
579 	if (m->m_flags & M_VLANTAG) {
580 		uint32_t vlan;
581 		char *cp;
582 
583 		vlan = NDIS_VLAN_INFO_MAKE(
584 		    EVL_VLANOFTAG(m->m_pkthdr.ether_vtag),
585 		    EVL_PRIOFTAG(m->m_pkthdr.ether_vtag), 0);
586 		cp = hvn_rndis_pktinfo_append(pkt, HVN_RNDIS_PKT_LEN,
587 		    NDIS_VLAN_INFO_SIZE, NDIS_PKTINFO_TYPE_VLAN);
588 		memcpy(cp, &vlan, NDIS_VLAN_INFO_SIZE);
589 	}
590 
591 	if (m->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_UDPv4 |
592 	    M_CSUM_TCPv4)) {
593 		uint32_t csum = NDIS_TXCSUM_INFO_IPV4;
594 		char *cp;
595 
596 		if (m->m_pkthdr.csum_flags & M_CSUM_IPv4)
597 			csum |= NDIS_TXCSUM_INFO_IPCS;
598 		if (m->m_pkthdr.csum_flags & M_CSUM_TCPv4)
599 			csum |= NDIS_TXCSUM_INFO_TCPCS;
600 		if (m->m_pkthdr.csum_flags & M_CSUM_UDPv4)
601 			csum |= NDIS_TXCSUM_INFO_UDPCS;
602 		cp = hvn_rndis_pktinfo_append(pkt, HVN_RNDIS_PKT_LEN,
603 		    NDIS_TXCSUM_INFO_SIZE, NDIS_PKTINFO_TYPE_CSUM);
604 		memcpy(cp, &csum, NDIS_TXCSUM_INFO_SIZE);
605 	}
606 
607 	pktlen = pkt->rm_pktinfooffset + pkt->rm_pktinfolen;
608 	pkt->rm_pktinfooffset -= RNDIS_HEADER_OFFSET;
609 
610 	/* Attach an RNDIS message to the first slot */
611 	txd->txd_sgl[0].gpa_page = txd->txd_gpa.gpa_page;
612 	txd->txd_sgl[0].gpa_ofs = txd->txd_gpa.gpa_ofs;
613 	txd->txd_sgl[0].gpa_len = pktlen;
614 	txd->txd_nsge = txd->txd_dmap->dm_nsegs + 1;
615 
616 	for (i = 0; i < txd->txd_dmap->dm_nsegs; i++) {
617 		seg = &txd->txd_dmap->dm_segs[i];
618 		txd->txd_sgl[1 + i].gpa_page = atop(seg->ds_addr);
619 		txd->txd_sgl[1 + i].gpa_ofs = seg->ds_addr & PAGE_MASK;
620 		txd->txd_sgl[1 + i].gpa_len = seg->ds_len;
621 	}
622 
623 	*txd0 = txd;
624 
625 	atomic_dec_uint(&sc->sc_tx_avail);
626 
627 	return 0;
628 }
629 
630 static void
631 hvn_decap(struct hvn_softc *sc, struct hvn_tx_desc *txd)
632 {
633 	struct ifnet *ifp = SC2IFP(sc);
634 
635 	bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap, 0, 0,
636 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
637 	bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
638 	txd->txd_buf = NULL;
639 	txd->txd_nsge = 0;
640 	txd->txd_ready = 1;
641 	atomic_inc_uint(&sc->sc_tx_avail);
642 	ifp->if_flags &= ~IFF_OACTIVE;
643 }
644 
645 static void
646 hvn_txeof(struct hvn_softc *sc, uint64_t tid)
647 {
648 	struct ifnet *ifp = SC2IFP(sc);
649 	struct hvn_tx_desc *txd;
650 	struct mbuf *m;
651 	uint32_t id = tid >> 32;
652 
653 	if ((tid & 0xffffffffU) != 0)
654 		return;
655 
656 	id -= HVN_NVS_CHIM_SIG;
657 	if (id >= HVN_TX_DESC) {
658 		device_printf(sc->sc_dev, "tx packet index too large: %u", id);
659 		return;
660 	}
661 
662 	txd = &sc->sc_tx_desc[id];
663 
664 	if ((m = txd->txd_buf) == NULL) {
665 		device_printf(sc->sc_dev, "no mbuf @%u\n", id);
666 		return;
667 	}
668 	txd->txd_buf = NULL;
669 
670 	bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap, 0, 0,
671 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
672 	bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
673 	m_freem(m);
674 	ifp->if_opackets++;
675 
676 	txd->txd_ready = 1;
677 
678 	atomic_inc_uint(&sc->sc_tx_avail);
679 	ifp->if_flags &= ~IFF_OACTIVE;
680 }
681 
682 static int
683 hvn_rx_ring_create(struct hvn_softc *sc)
684 {
685 	struct hvn_nvs_rxbuf_conn cmd;
686 	struct hvn_nvs_rxbuf_conn_resp *rsp;
687 	uint64_t tid;
688 
689 	if (sc->sc_proto <= HVN_NVS_PROTO_VERSION_2)
690 		sc->sc_rx_size = 15 * 1024 * 1024;	/* 15MB */
691 	else
692 		sc->sc_rx_size = 16 * 1024 * 1024; 	/* 16MB */
693 	sc->sc_rx_ring = hyperv_dma_alloc(sc->sc_dmat, &sc->sc_rx_dma,
694 	    sc->sc_rx_size, PAGE_SIZE, PAGE_SIZE, sc->sc_rx_size / PAGE_SIZE);
695 	if (sc->sc_rx_ring == NULL) {
696 		DPRINTF("%s: failed to allocate Rx ring buffer\n",
697 		    device_xname(sc->sc_dev));
698 		return -1;
699 	}
700 	if (vmbus_handle_alloc(sc->sc_chan, &sc->sc_rx_dma, sc->sc_rx_size,
701 	    &sc->sc_rx_hndl)) {
702 		DPRINTF("%s: failed to obtain a PA handle\n",
703 		    device_xname(sc->sc_dev));
704 		goto errout;
705 	}
706 
707 	memset(&cmd, 0, sizeof(cmd));
708 	cmd.nvs_type = HVN_NVS_TYPE_RXBUF_CONN;
709 	cmd.nvs_gpadl = sc->sc_rx_hndl;
710 	cmd.nvs_sig = HVN_NVS_RXBUF_SIG;
711 
712 	tid = atomic_inc_uint_nv(&sc->sc_nvstid);
713 	if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 100))
714 		goto errout;
715 
716 	rsp = (struct hvn_nvs_rxbuf_conn_resp *)&sc->sc_nvsrsp;
717 	if (rsp->nvs_status != HVN_NVS_STATUS_OK) {
718 		DPRINTF("%s: failed to set up the Rx ring\n",
719 		    device_xname(sc->sc_dev));
720 		goto errout;
721 	}
722 	if (rsp->nvs_nsect > 1) {
723 		DPRINTF("%s: invalid number of Rx ring sections: %u\n",
724 		    device_xname(sc->sc_dev), rsp->nvs_nsect);
725 		hvn_rx_ring_destroy(sc);
726 		return -1;
727 	}
728 	return 0;
729 
730  errout:
731 	if (sc->sc_rx_hndl) {
732 		vmbus_handle_free(sc->sc_chan, sc->sc_rx_hndl);
733 		sc->sc_rx_hndl = 0;
734 	}
735 	if (sc->sc_rx_ring) {
736 		kmem_free(sc->sc_rx_ring, sc->sc_rx_size);
737 		sc->sc_rx_ring = NULL;
738 	}
739 	return -1;
740 }
741 
742 static int
743 hvn_rx_ring_destroy(struct hvn_softc *sc)
744 {
745 	struct hvn_nvs_rxbuf_disconn cmd;
746 	uint64_t tid;
747 
748 	if (sc->sc_rx_ring == NULL)
749 		return 0;
750 
751 	memset(&cmd, 0, sizeof(cmd));
752 	cmd.nvs_type = HVN_NVS_TYPE_RXBUF_DISCONN;
753 	cmd.nvs_sig = HVN_NVS_RXBUF_SIG;
754 
755 	tid = atomic_inc_uint_nv(&sc->sc_nvstid);
756 	if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 0))
757 		return -1;
758 
759 	delay(100);
760 
761 	vmbus_handle_free(sc->sc_chan, sc->sc_rx_hndl);
762 
763 	sc->sc_rx_hndl = 0;
764 
765 	kmem_free(sc->sc_rx_ring, sc->sc_rx_size);
766 	sc->sc_rx_ring = NULL;
767 
768 	return 0;
769 }
770 
771 static int
772 hvn_tx_ring_create(struct hvn_softc *sc)
773 {
774 	const int dmaflags = cold ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK;
775 	struct hvn_tx_desc *txd;
776 	bus_dma_segment_t *seg;
777 	size_t msgsize;
778 	int i, rsegs;
779 	paddr_t pa;
780 
781 	msgsize = roundup(HVN_RNDIS_PKT_LEN, 128);
782 
783 	/* Allocate memory to store RNDIS messages */
784 	if (bus_dmamem_alloc(sc->sc_dmat, msgsize * HVN_TX_DESC, PAGE_SIZE, 0,
785 	    &sc->sc_tx_mseg, 1, &rsegs, dmaflags)) {
786 		DPRINTF("%s: failed to allocate memory for RDNIS messages\n",
787 		    device_xname(sc->sc_dev));
788 		goto errout;
789 	}
790 	if (bus_dmamem_map(sc->sc_dmat, &sc->sc_tx_mseg, 1, msgsize *
791 	    HVN_TX_DESC, (void **)&sc->sc_tx_msgs, dmaflags)) {
792 		DPRINTF("%s: failed to establish mapping for RDNIS messages\n",
793 		    device_xname(sc->sc_dev));
794 		goto errout;
795 	}
796 	memset(sc->sc_tx_msgs, 0, msgsize * HVN_TX_DESC);
797 	if (bus_dmamap_create(sc->sc_dmat, msgsize * HVN_TX_DESC, 1,
798 	    msgsize * HVN_TX_DESC, 0, dmaflags, &sc->sc_tx_rmap)) {
799 		DPRINTF("%s: failed to create map for RDNIS messages\n",
800 		    device_xname(sc->sc_dev));
801 		goto errout;
802 	}
803 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_tx_rmap, sc->sc_tx_msgs,
804 	    msgsize * HVN_TX_DESC, NULL, dmaflags)) {
805 		DPRINTF("%s: failed to create map for RDNIS messages\n",
806 		    device_xname(sc->sc_dev));
807 		goto errout;
808 	}
809 
810 	for (i = 0; i < HVN_TX_DESC; i++) {
811 		txd = &sc->sc_tx_desc[i];
812 		if (bus_dmamap_create(sc->sc_dmat, HVN_TX_PKT_SIZE,
813 		    HVN_TX_FRAGS, HVN_TX_FRAG_SIZE, PAGE_SIZE, dmaflags,
814 		    &txd->txd_dmap)) {
815 			DPRINTF("%s: failed to create map for TX descriptors\n",
816 			    device_xname(sc->sc_dev));
817 			goto errout;
818 		}
819 		seg = &sc->sc_tx_rmap->dm_segs[0];
820 		pa = seg->ds_addr + (msgsize * i);
821 		txd->txd_gpa.gpa_page = atop(pa);
822 		txd->txd_gpa.gpa_ofs = pa & PAGE_MASK;
823 		txd->txd_gpa.gpa_len = msgsize;
824 		txd->txd_req = (void *)(sc->sc_tx_msgs + (msgsize * i));
825 		txd->txd_id = i + HVN_NVS_CHIM_SIG;
826 		txd->txd_ready = 1;
827 	}
828 	sc->sc_tx_avail = HVN_TX_DESC;
829 
830 	return 0;
831 
832  errout:
833 	hvn_tx_ring_destroy(sc);
834 	return -1;
835 }
836 
837 static void
838 hvn_tx_ring_destroy(struct hvn_softc *sc)
839 {
840 	struct hvn_tx_desc *txd;
841 	int i;
842 
843 	for (i = 0; i < HVN_TX_DESC; i++) {
844 		txd = &sc->sc_tx_desc[i];
845 		if (txd->txd_dmap == NULL)
846 			continue;
847 		bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap, 0, 0,
848 		    BUS_DMASYNC_POSTWRITE);
849 		bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
850 		bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmap);
851 		txd->txd_dmap = NULL;
852 		if (txd->txd_buf == NULL)
853 			continue;
854 		m_free(txd->txd_buf);
855 		txd->txd_buf = NULL;
856 	}
857 	if (sc->sc_tx_rmap) {
858 		bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
859 		    BUS_DMASYNC_POSTWRITE);
860 		bus_dmamap_unload(sc->sc_dmat, sc->sc_tx_rmap);
861 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_rmap);
862 	}
863 	if (sc->sc_tx_msgs) {
864 		size_t msgsize = roundup(HVN_RNDIS_PKT_LEN, 128);
865 
866 		bus_dmamem_unmap(sc->sc_dmat, sc->sc_tx_msgs,
867 		    msgsize * HVN_TX_DESC);
868 		bus_dmamem_free(sc->sc_dmat, &sc->sc_tx_mseg, 1);
869 	}
870 	sc->sc_tx_rmap = NULL;
871 	sc->sc_tx_msgs = NULL;
872 }
873 
874 static int
875 hvn_get_lladdr(struct hvn_softc *sc, uint8_t *enaddr)
876 {
877 	size_t addrlen = ETHER_ADDR_LEN;
878 	int rv;
879 
880 	rv = hvn_rndis_query(sc, OID_802_3_PERMANENT_ADDRESS, enaddr, &addrlen);
881 	if (rv == 0 && addrlen != ETHER_ADDR_LEN)
882 		rv = -1;
883 	return rv;
884 }
885 
886 static void
887 hvn_get_link_status(struct hvn_softc *sc)
888 {
889 	uint32_t state;
890 	size_t len = sizeof(state);
891 
892 	if (hvn_rndis_query(sc, OID_GEN_MEDIA_CONNECT_STATUS,
893 	    &state, &len) == 0)
894 		sc->sc_link_state = (state == NDIS_MEDIA_STATE_CONNECTED) ?
895 		    LINK_STATE_UP : LINK_STATE_DOWN;
896 }
897 
898 static int
899 hvn_nvs_attach(struct hvn_softc *sc)
900 {
901 	static const uint32_t protos[] = {
902 		HVN_NVS_PROTO_VERSION_5,
903 		HVN_NVS_PROTO_VERSION_4,
904 		HVN_NVS_PROTO_VERSION_2,
905 		HVN_NVS_PROTO_VERSION_1
906 	};
907 	struct hvn_nvs_init cmd;
908 	struct hvn_nvs_init_resp *rsp;
909 	struct hvn_nvs_ndis_init ncmd;
910 	struct hvn_nvs_ndis_conf ccmd;
911 	uint32_t ndisver, ringsize;
912 	uint64_t tid;
913 	int i;
914 
915 	sc->sc_nvsbuf = kmem_zalloc(HVN_NVS_BUFSIZE, KM_SLEEP);
916 
917 	/* We need to be able to fit all RNDIS control and data messages */
918 	ringsize = HVN_RNDIS_CTLREQS *
919 	    (sizeof(struct hvn_nvs_rndis) + sizeof(struct vmbus_gpa)) +
920 	    HVN_TX_DESC * (sizeof(struct hvn_nvs_rndis) +
921 	    (HVN_TX_FRAGS + 1) * sizeof(struct vmbus_gpa));
922 
923 	sc->sc_chan->ch_flags &= ~CHF_BATCHED;
924 
925 	if (vmbus_channel_setdeferred(sc->sc_chan, device_xname(sc->sc_dev))) {
926 		aprint_error_dev(sc->sc_dev,
927 		    "failed to create the interrupt thread\n");
928 		return -1;
929 	}
930 
931 	/* Associate our interrupt handler with the channel */
932 	if (vmbus_channel_open(sc->sc_chan, ringsize, NULL, 0,
933 	    hvn_nvs_intr, sc)) {
934 		DPRINTF("%s: failed to open channel\n",
935 		    device_xname(sc->sc_dev));
936 		kmem_free(sc->sc_nvsbuf, HVN_NVS_BUFSIZE);
937 		return -1;
938 	}
939 
940 	memset(&cmd, 0, sizeof(cmd));
941 	cmd.nvs_type = HVN_NVS_TYPE_INIT;
942 	for (i = 0; i < __arraycount(protos); i++) {
943 		cmd.nvs_ver_min = cmd.nvs_ver_max = protos[i];
944 		tid = atomic_inc_uint_nv(&sc->sc_nvstid);
945 		if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 100))
946 			return -1;
947 
948 		rsp = (struct hvn_nvs_init_resp *)&sc->sc_nvsrsp;
949 		if (rsp->nvs_status == HVN_NVS_STATUS_OK) {
950 			sc->sc_proto = protos[i];
951 			break;
952 		}
953 	}
954 	if (i == __arraycount(protos)) {
955 		DPRINTF("%s: failed to negotiate NVSP version\n",
956 		    device_xname(sc->sc_dev));
957 		return -1;
958 	}
959 
960 	if (sc->sc_proto >= HVN_NVS_PROTO_VERSION_2) {
961 		memset(&ccmd, 0, sizeof(ccmd));
962 		ccmd.nvs_type = HVN_NVS_TYPE_NDIS_CONF;
963 		ccmd.nvs_mtu = HVN_MAXMTU;
964 		ccmd.nvs_caps = HVN_NVS_NDIS_CONF_VLAN;
965 
966 		tid = atomic_inc_uint_nv(&sc->sc_nvstid);
967 		if (hvn_nvs_cmd(sc, &ccmd, sizeof(ccmd), tid, 100))
968 			return -1;
969 	}
970 
971 	memset(&ncmd, 0, sizeof(ncmd));
972 	ncmd.nvs_type = HVN_NVS_TYPE_NDIS_INIT;
973 	if (sc->sc_proto <= HVN_NVS_PROTO_VERSION_4)
974 		ndisver = NDIS_VERSION_6_1;
975 	else
976 		ndisver = NDIS_VERSION_6_30;
977 	ncmd.nvs_ndis_major = (ndisver & 0xffff0000) >> 16;
978 	ncmd.nvs_ndis_minor = ndisver & 0x0000ffff;
979 
980 	tid = atomic_inc_uint_nv(&sc->sc_nvstid);
981 	if (hvn_nvs_cmd(sc, &ncmd, sizeof(ncmd), tid, 100))
982 		return -1;
983 
984 	sc->sc_ndisver = ndisver;
985 
986 	return 0;
987 }
988 
989 static void
990 hvn_nvs_intr(void *arg)
991 {
992 	struct hvn_softc *sc = arg;
993 	struct ifnet *ifp = SC2IFP(sc);
994 	struct vmbus_chanpkt_hdr *cph;
995 	const struct hvn_nvs_hdr *nvs;
996 	uint64_t rid;
997 	uint32_t rlen;
998 	int rv;
999 	bool dotx = false;
1000 
1001 	for (;;) {
1002 		rv = vmbus_channel_recv(sc->sc_chan, sc->sc_nvsbuf,
1003 		    HVN_NVS_BUFSIZE, &rlen, &rid, 1);
1004 		if (rv != 0 || rlen == 0) {
1005 			if (rv != EAGAIN)
1006 				device_printf(sc->sc_dev,
1007 				    "failed to receive an NVSP packet\n");
1008 			break;
1009 		}
1010 		cph = (struct vmbus_chanpkt_hdr *)sc->sc_nvsbuf;
1011 		nvs = (const struct hvn_nvs_hdr *)VMBUS_CHANPKT_CONST_DATA(cph);
1012 
1013 		if (cph->cph_type == VMBUS_CHANPKT_TYPE_COMP) {
1014 			switch (nvs->nvs_type) {
1015 			case HVN_NVS_TYPE_INIT_RESP:
1016 			case HVN_NVS_TYPE_RXBUF_CONNRESP:
1017 			case HVN_NVS_TYPE_CHIM_CONNRESP:
1018 			case HVN_NVS_TYPE_SUBCH_RESP:
1019 				/* copy the response back */
1020 				memcpy(&sc->sc_nvsrsp, nvs, HVN_NVS_MSGSIZE);
1021 				sc->sc_nvsdone = 1;
1022 				wakeup(&sc->sc_nvsrsp);
1023 				break;
1024 			case HVN_NVS_TYPE_RNDIS_ACK:
1025 				dotx = true;
1026 				hvn_txeof(sc, cph->cph_tid);
1027 				break;
1028 			default:
1029 				device_printf(sc->sc_dev,
1030 				    "unhandled NVSP packet type %u "
1031 				    "on completion\n", nvs->nvs_type);
1032 				break;
1033 			}
1034 		} else if (cph->cph_type == VMBUS_CHANPKT_TYPE_RXBUF) {
1035 			switch (nvs->nvs_type) {
1036 			case HVN_NVS_TYPE_RNDIS:
1037 				hvn_rndis_input(sc, cph->cph_tid, cph);
1038 				break;
1039 			default:
1040 				device_printf(sc->sc_dev,
1041 				    "unhandled NVSP packet type %u "
1042 				    "on receive\n", nvs->nvs_type);
1043 				break;
1044 			}
1045 		} else
1046 			device_printf(sc->sc_dev,
1047 			    "unknown NVSP packet type %u\n", cph->cph_type);
1048 	}
1049 
1050 	if (dotx)
1051 		if_schedule_deferred_start(ifp);
1052 }
1053 
1054 static int
1055 hvn_nvs_cmd(struct hvn_softc *sc, void *cmd, size_t cmdsize, uint64_t tid,
1056     int timo)
1057 {
1058 	struct hvn_nvs_hdr *hdr = cmd;
1059 	int tries = 10;
1060 	int rv, s;
1061 
1062 	sc->sc_nvsdone = 0;
1063 
1064 	do {
1065 		rv = vmbus_channel_send(sc->sc_chan, cmd, cmdsize,
1066 		    tid, VMBUS_CHANPKT_TYPE_INBAND,
1067 		    timo ? VMBUS_CHANPKT_FLAG_RC : 0);
1068 		if (rv == EAGAIN) {
1069 			if (cold)
1070 				delay(1000);
1071 			else
1072 				tsleep(cmd, PRIBIO, "nvsout", 1);
1073 		} else if (rv) {
1074 			DPRINTF("%s: NVSP operation %u send error %d\n",
1075 			    device_xname(sc->sc_dev), hdr->nvs_type, rv);
1076 			return rv;
1077 		}
1078 	} while (rv != 0 && --tries > 0);
1079 
1080 	if (tries == 0 && rv != 0) {
1081 		device_printf(sc->sc_dev,
1082 		    "NVSP operation %u send error %d\n", hdr->nvs_type, rv);
1083 		return rv;
1084 	}
1085 
1086 	if (timo == 0)
1087 		return 0;
1088 
1089 	do {
1090 		if (cold)
1091 			delay(1000);
1092 		else
1093 			tsleep(sc, PRIBIO | PCATCH, "nvscmd", 1);
1094 		s = splnet();
1095 		hvn_nvs_intr(sc);
1096 		splx(s);
1097 	} while (--timo > 0 && sc->sc_nvsdone != 1);
1098 
1099 	if (timo == 0 && sc->sc_nvsdone != 1) {
1100 		device_printf(sc->sc_dev, "NVSP operation %u timed out\n",
1101 		    hdr->nvs_type);
1102 		return ETIMEDOUT;
1103 	}
1104 	return 0;
1105 }
1106 
1107 static int
1108 hvn_nvs_ack(struct hvn_softc *sc, uint64_t tid)
1109 {
1110 	struct hvn_nvs_rndis_ack cmd;
1111 	int tries = 5;
1112 	int rv;
1113 
1114 	cmd.nvs_type = HVN_NVS_TYPE_RNDIS_ACK;
1115 	cmd.nvs_status = HVN_NVS_STATUS_OK;
1116 	do {
1117 		rv = vmbus_channel_send(sc->sc_chan, &cmd, sizeof(cmd),
1118 		    tid, VMBUS_CHANPKT_TYPE_COMP, 0);
1119 		if (rv == EAGAIN)
1120 			delay(10);
1121 		else if (rv) {
1122 			DPRINTF("%s: NVSP acknowledgement error %d\n",
1123 			    device_xname(sc->sc_dev), rv);
1124 			return rv;
1125 		}
1126 	} while (rv != 0 && --tries > 0);
1127 	return rv;
1128 }
1129 
1130 static void
1131 hvn_nvs_detach(struct hvn_softc *sc)
1132 {
1133 
1134 	if (vmbus_channel_close(sc->sc_chan) == 0) {
1135 		kmem_free(sc->sc_nvsbuf, HVN_NVS_BUFSIZE);
1136 		sc->sc_nvsbuf = NULL;
1137 	}
1138 }
1139 
1140 static inline struct rndis_cmd *
1141 hvn_alloc_cmd(struct hvn_softc *sc)
1142 {
1143 	struct rndis_cmd *rc;
1144 
1145 	mutex_enter(&sc->sc_cntl_fqlck);
1146 	while ((rc = TAILQ_FIRST(&sc->sc_cntl_fq)) == NULL)
1147 		/* XXX use condvar(9) instead of mtsleep */
1148 		mtsleep(&sc->sc_cntl_fq, PRIBIO, "nvsalloc", 1,
1149 		    &sc->sc_cntl_fqlck);
1150 	TAILQ_REMOVE(&sc->sc_cntl_fq, rc, rc_entry);
1151 	mutex_exit(&sc->sc_cntl_fqlck);
1152 	return rc;
1153 }
1154 
1155 static inline void
1156 hvn_submit_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1157 {
1158 
1159 	mutex_enter(&sc->sc_cntl_sqlck);
1160 	TAILQ_INSERT_TAIL(&sc->sc_cntl_sq, rc, rc_entry);
1161 	mutex_exit(&sc->sc_cntl_sqlck);
1162 }
1163 
1164 static inline struct rndis_cmd *
1165 hvn_complete_cmd(struct hvn_softc *sc, uint32_t id)
1166 {
1167 	struct rndis_cmd *rc;
1168 
1169 	mutex_enter(&sc->sc_cntl_sqlck);
1170 	TAILQ_FOREACH(rc, &sc->sc_cntl_sq, rc_entry) {
1171 		if (rc->rc_id == id) {
1172 			TAILQ_REMOVE(&sc->sc_cntl_sq, rc, rc_entry);
1173 			break;
1174 		}
1175 	}
1176 	mutex_exit(&sc->sc_cntl_sqlck);
1177 	if (rc != NULL) {
1178 		mutex_enter(&sc->sc_cntl_cqlck);
1179 		TAILQ_INSERT_TAIL(&sc->sc_cntl_cq, rc, rc_entry);
1180 		mutex_exit(&sc->sc_cntl_cqlck);
1181 	}
1182 	return rc;
1183 }
1184 
1185 static inline void
1186 hvn_release_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1187 {
1188 
1189 	mutex_enter(&sc->sc_cntl_cqlck);
1190 	TAILQ_REMOVE(&sc->sc_cntl_cq, rc, rc_entry);
1191 	mutex_exit(&sc->sc_cntl_cqlck);
1192 }
1193 
1194 static inline int
1195 hvn_rollback_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1196 {
1197 	struct rndis_cmd *rn;
1198 
1199 	mutex_enter(&sc->sc_cntl_sqlck);
1200 	TAILQ_FOREACH(rn, &sc->sc_cntl_sq, rc_entry) {
1201 		if (rn == rc) {
1202 			TAILQ_REMOVE(&sc->sc_cntl_sq, rc, rc_entry);
1203 			mutex_exit(&sc->sc_cntl_sqlck);
1204 			return 0;
1205 		}
1206 	}
1207 	mutex_exit(&sc->sc_cntl_sqlck);
1208 	return -1;
1209 }
1210 
1211 static inline void
1212 hvn_free_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1213 {
1214 
1215 	memset(rc->rc_req, 0, sizeof(struct rndis_packet_msg));
1216 	memset(&rc->rc_cmp, 0, sizeof(rc->rc_cmp));
1217 	memset(&rc->rc_msg, 0, sizeof(rc->rc_msg));
1218 	mutex_enter(&sc->sc_cntl_fqlck);
1219 	TAILQ_INSERT_TAIL(&sc->sc_cntl_fq, rc, rc_entry);
1220 	mutex_exit(&sc->sc_cntl_fqlck);
1221 	wakeup(&sc->sc_cntl_fq);
1222 }
1223 
1224 static int
1225 hvn_rndis_attach(struct hvn_softc *sc)
1226 {
1227 	const int dmaflags = cold ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK;
1228 	struct rndis_init_req *req;
1229 	struct rndis_init_comp *cmp;
1230 	struct rndis_cmd *rc;
1231 	int i, rv;
1232 
1233 	/* RNDIS control message queues */
1234 	TAILQ_INIT(&sc->sc_cntl_sq);
1235 	TAILQ_INIT(&sc->sc_cntl_cq);
1236 	TAILQ_INIT(&sc->sc_cntl_fq);
1237 	mutex_init(&sc->sc_cntl_sqlck, MUTEX_DEFAULT, IPL_NET);
1238 	mutex_init(&sc->sc_cntl_cqlck, MUTEX_DEFAULT, IPL_NET);
1239 	mutex_init(&sc->sc_cntl_fqlck, MUTEX_DEFAULT, IPL_NET);
1240 
1241 	for (i = 0; i < HVN_RNDIS_CTLREQS; i++) {
1242 		rc = &sc->sc_cntl_msgs[i];
1243 		if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1244 		    dmaflags, &rc->rc_dmap)) {
1245 			DPRINTF("%s: failed to create RNDIS command map\n",
1246 			    device_xname(sc->sc_dev));
1247 			goto errout;
1248 		}
1249 		if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE,
1250 		    0, &rc->rc_segs, 1, &rc->rc_nsegs, dmaflags)) {
1251 			DPRINTF("%s: failed to allocate RNDIS command\n",
1252 			    device_xname(sc->sc_dev));
1253 			bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1254 			goto errout;
1255 		}
1256 		if (bus_dmamem_map(sc->sc_dmat, &rc->rc_segs, rc->rc_nsegs,
1257 		    PAGE_SIZE, (void **)&rc->rc_req, dmaflags)) {
1258 			DPRINTF("%s: failed to allocate RNDIS command\n",
1259 			    device_xname(sc->sc_dev));
1260 			bus_dmamem_free(sc->sc_dmat, &rc->rc_segs,
1261 			    rc->rc_nsegs);
1262 			bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1263 			goto errout;
1264 		}
1265 		memset(rc->rc_req, 0, PAGE_SIZE);
1266 		if (bus_dmamap_load(sc->sc_dmat, rc->rc_dmap, rc->rc_req,
1267 		    PAGE_SIZE, NULL, dmaflags)) {
1268 			DPRINTF("%s: failed to load RNDIS command map\n",
1269 			    device_xname(sc->sc_dev));
1270 			bus_dmamem_free(sc->sc_dmat, &rc->rc_segs,
1271 			    rc->rc_nsegs);
1272 			bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1273 			goto errout;
1274 		}
1275 		rc->rc_gpa = atop(rc->rc_dmap->dm_segs[0].ds_addr);
1276 		TAILQ_INSERT_TAIL(&sc->sc_cntl_fq, rc, rc_entry);
1277 	}
1278 
1279 	rc = hvn_alloc_cmd(sc);
1280 
1281 	bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1282 	    BUS_DMASYNC_PREREAD);
1283 
1284 	rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1285 
1286 	req = rc->rc_req;
1287 	req->rm_type = REMOTE_NDIS_INITIALIZE_MSG;
1288 	req->rm_len = sizeof(*req);
1289 	req->rm_rid = rc->rc_id;
1290 	req->rm_ver_major = RNDIS_VERSION_MAJOR;
1291 	req->rm_ver_minor = RNDIS_VERSION_MINOR;
1292 	req->rm_max_xfersz = HVN_RNDIS_XFER_SIZE;
1293 
1294 	rc->rc_cmplen = sizeof(*cmp);
1295 
1296 	bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1297 	    BUS_DMASYNC_PREWRITE);
1298 
1299 	if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1300 		DPRINTF("%s: INITIALIZE_MSG failed, error %d\n",
1301 		    device_xname(sc->sc_dev), rv);
1302 		hvn_free_cmd(sc, rc);
1303 		goto errout;
1304 	}
1305 	cmp = (struct rndis_init_comp *)&rc->rc_cmp;
1306 	if (cmp->rm_status != RNDIS_STATUS_SUCCESS) {
1307 		DPRINTF("%s: failed to init RNDIS, error %#x\n",
1308 		    device_xname(sc->sc_dev), cmp->rm_status);
1309 		hvn_free_cmd(sc, rc);
1310 		goto errout;
1311 	}
1312 
1313 	hvn_free_cmd(sc, rc);
1314 
1315 	/* Initialize RNDIS Data command */
1316 	memset(&sc->sc_data_msg, 0, sizeof(sc->sc_data_msg));
1317 	sc->sc_data_msg.nvs_type = HVN_NVS_TYPE_RNDIS;
1318 	sc->sc_data_msg.nvs_rndis_mtype = HVN_NVS_RNDIS_MTYPE_DATA;
1319 	sc->sc_data_msg.nvs_chim_idx = HVN_NVS_CHIM_IDX_INVALID;
1320 
1321 	return 0;
1322 
1323 errout:
1324 	for (i = 0; i < HVN_RNDIS_CTLREQS; i++) {
1325 		rc = &sc->sc_cntl_msgs[i];
1326 		if (rc->rc_req == NULL)
1327 			continue;
1328 		TAILQ_REMOVE(&sc->sc_cntl_fq, rc, rc_entry);
1329 		bus_dmamem_free(sc->sc_dmat, &rc->rc_segs, rc->rc_nsegs);
1330 		rc->rc_req = NULL;
1331 		bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1332 	}
1333 	return -1;
1334 }
1335 
1336 static int
1337 hvn_set_capabilities(struct hvn_softc *sc)
1338 {
1339 	struct ndis_offload_params params;
1340 	size_t len = sizeof(params);
1341 
1342 	memset(&params, 0, sizeof(params));
1343 
1344 	params.ndis_hdr.ndis_type = NDIS_OBJTYPE_DEFAULT;
1345 	if (sc->sc_ndisver < NDIS_VERSION_6_30) {
1346 		params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_2;
1347 		len = params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE_6_1;
1348 	} else {
1349 		params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_3;
1350 		len = params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE;
1351 	}
1352 
1353 	params.ndis_ip4csum = NDIS_OFFLOAD_PARAM_TXRX;
1354 	params.ndis_tcp4csum = NDIS_OFFLOAD_PARAM_TXRX;
1355 	params.ndis_tcp6csum = NDIS_OFFLOAD_PARAM_TXRX;
1356 	if (sc->sc_ndisver >= NDIS_VERSION_6_30) {
1357 		params.ndis_udp4csum = NDIS_OFFLOAD_PARAM_TXRX;
1358 		params.ndis_udp6csum = NDIS_OFFLOAD_PARAM_TXRX;
1359 	}
1360 
1361 	return hvn_rndis_set(sc, OID_TCP_OFFLOAD_PARAMETERS, &params, len);
1362 }
1363 
1364 static int
1365 hvn_rndis_cmd(struct hvn_softc *sc, struct rndis_cmd *rc, int timo)
1366 {
1367 	struct hvn_nvs_rndis *msg = &rc->rc_msg;
1368 	struct rndis_msghdr *hdr = rc->rc_req;
1369 	struct vmbus_gpa sgl[1];
1370 	int tries = 10;
1371 	int rv, s;
1372 
1373 	KASSERT(timo > 0);
1374 
1375 	msg->nvs_type = HVN_NVS_TYPE_RNDIS;
1376 	msg->nvs_rndis_mtype = HVN_NVS_RNDIS_MTYPE_CTRL;
1377 	msg->nvs_chim_idx = HVN_NVS_CHIM_IDX_INVALID;
1378 
1379 	sgl[0].gpa_page = rc->rc_gpa;
1380 	sgl[0].gpa_len = hdr->rm_len;
1381 	sgl[0].gpa_ofs = 0;
1382 
1383 	rc->rc_done = 0;
1384 
1385 	hvn_submit_cmd(sc, rc);
1386 
1387 	do {
1388 		rv = vmbus_channel_send_sgl(sc->sc_chan, sgl, 1, &rc->rc_msg,
1389 		    sizeof(*msg), rc->rc_id);
1390 		if (rv == EAGAIN) {
1391 			if (cold)
1392 				delay(1000);
1393 			else
1394 				tsleep(rc, PRIBIO, "rndisout", 1);
1395 		} else if (rv) {
1396 			DPRINTF("%s: RNDIS operation %u send error %d\n",
1397 			    device_xname(sc->sc_dev), hdr->rm_type, rv);
1398 			hvn_rollback_cmd(sc, rc);
1399 			return rv;
1400 		}
1401 	} while (rv != 0 && --tries > 0);
1402 
1403 	if (tries == 0 && rv != 0) {
1404 		device_printf(sc->sc_dev,
1405 		    "RNDIS operation %u send error %d\n", hdr->rm_type, rv);
1406 		return rv;
1407 	}
1408 
1409 	bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1410 	    BUS_DMASYNC_POSTWRITE);
1411 
1412 	do {
1413 		if (cold)
1414 			delay(1000);
1415 		else
1416 			tsleep(rc, PRIBIO | PCATCH, "rndiscmd", 1);
1417 		s = splnet();
1418 		hvn_nvs_intr(sc);
1419 		splx(s);
1420 	} while (--timo > 0 && rc->rc_done != 1);
1421 
1422 	bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1423 	    BUS_DMASYNC_POSTREAD);
1424 
1425 	if (rc->rc_done != 1) {
1426 		rv = timo == 0 ? ETIMEDOUT : EINTR;
1427 		if (hvn_rollback_cmd(sc, rc)) {
1428 			hvn_release_cmd(sc, rc);
1429 			rv = 0;
1430 		} else if (rv == ETIMEDOUT) {
1431 			device_printf(sc->sc_dev,
1432 			    "RNDIS operation %u timed out\n", hdr->rm_type);
1433 		}
1434 		return rv;
1435 	}
1436 
1437 	hvn_release_cmd(sc, rc);
1438 	return 0;
1439 }
1440 
1441 static void
1442 hvn_rndis_input(struct hvn_softc *sc, uint64_t tid, void *arg)
1443 {
1444 	struct vmbus_chanpkt_prplist *cp = arg;
1445 	uint32_t off, len, type;
1446 	int i;
1447 
1448 	if (sc->sc_rx_ring == NULL) {
1449 		DPRINTF("%s: invalid rx ring\n", device_xname(sc->sc_dev));
1450 		return;
1451 	}
1452 
1453 	for (i = 0; i < cp->cp_range_cnt; i++) {
1454 		off = cp->cp_range[i].gpa_ofs;
1455 		len = cp->cp_range[i].gpa_len;
1456 
1457 		KASSERT(off + len <= sc->sc_rx_size);
1458 		KASSERT(len >= RNDIS_HEADER_OFFSET + 4);
1459 
1460 		memcpy(&type, sc->sc_rx_ring + off, sizeof(type));
1461 		switch (type) {
1462 		/* data message */
1463 		case REMOTE_NDIS_PACKET_MSG:
1464 			hvn_rxeof(sc, sc->sc_rx_ring + off, len);
1465 			break;
1466 		/* completion messages */
1467 		case REMOTE_NDIS_INITIALIZE_CMPLT:
1468 		case REMOTE_NDIS_QUERY_CMPLT:
1469 		case REMOTE_NDIS_SET_CMPLT:
1470 		case REMOTE_NDIS_RESET_CMPLT:
1471 		case REMOTE_NDIS_KEEPALIVE_CMPLT:
1472 			hvn_rndis_complete(sc, sc->sc_rx_ring + off, len);
1473 			break;
1474 		/* notification message */
1475 		case REMOTE_NDIS_INDICATE_STATUS_MSG:
1476 			hvn_rndis_status(sc, sc->sc_rx_ring + off, len);
1477 			break;
1478 		default:
1479 			device_printf(sc->sc_dev,
1480 			    "unhandled RNDIS message type %u\n", type);
1481 			break;
1482 		}
1483 	}
1484 
1485 	hvn_nvs_ack(sc, tid);
1486 }
1487 
1488 static inline struct mbuf *
1489 hvn_devget(struct hvn_softc *sc, void *buf, uint32_t len)
1490 {
1491 	struct ifnet *ifp = SC2IFP(sc);
1492 	struct mbuf *m;
1493 	size_t size = len + ETHER_ALIGN;
1494 
1495 	MGETHDR(m, M_NOWAIT, MT_DATA);
1496 	if (m == NULL)
1497 		return NULL;
1498 
1499 	if (size > MHLEN) {
1500 		if (size <= MCLBYTES)
1501 			MCLGET(m, M_NOWAIT);
1502 		else
1503 			MEXTMALLOC(m, size, M_NOWAIT);
1504 		if ((m->m_flags & M_EXT) == 0) {
1505 			m_freem(m);
1506 			return NULL;
1507 		}
1508 	}
1509 
1510 	m->m_len = m->m_pkthdr.len = size;
1511 	m_adj(m, ETHER_ALIGN);
1512 	m_copyback(m, 0, len, buf);
1513 	m_set_rcvif(m, ifp);
1514 	return m;
1515 }
1516 
1517 static void
1518 hvn_rxeof(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1519 {
1520 	struct ifnet *ifp = SC2IFP(sc);
1521 	struct rndis_packet_msg *pkt;
1522 	struct rndis_pktinfo *pi;
1523 	uint32_t csum, vlan;
1524 	struct mbuf *m;
1525 
1526 	if (!(ifp->if_flags & IFF_RUNNING))
1527 		return;
1528 
1529 	if (len < sizeof(*pkt)) {
1530 		device_printf(sc->sc_dev, "data packet too short: %u\n",
1531 		    len);
1532 		return;
1533 	}
1534 
1535 	pkt = (struct rndis_packet_msg *)buf;
1536 	if (pkt->rm_dataoffset + pkt->rm_datalen > len) {
1537 		device_printf(sc->sc_dev,
1538 		    "data packet out of bounds: %u@%u\n", pkt->rm_dataoffset,
1539 		    pkt->rm_datalen);
1540 		return;
1541 	}
1542 
1543 	if ((m = hvn_devget(sc, buf + RNDIS_HEADER_OFFSET + pkt->rm_dataoffset,
1544 	    pkt->rm_datalen)) == NULL) {
1545 		ifp->if_ierrors++;
1546 		return;
1547 	}
1548 
1549 	if (pkt->rm_pktinfooffset + pkt->rm_pktinfolen > len) {
1550 		device_printf(sc->sc_dev,
1551 		    "pktinfo is out of bounds: %u@%u vs %u\n",
1552 		    pkt->rm_pktinfolen, pkt->rm_pktinfooffset, len);
1553 		goto done;
1554 	}
1555 
1556 	pi = (struct rndis_pktinfo *)(buf + RNDIS_HEADER_OFFSET +
1557 	    pkt->rm_pktinfooffset);
1558 	while (pkt->rm_pktinfolen > 0) {
1559 		if (pi->rm_size > pkt->rm_pktinfolen) {
1560 			device_printf(sc->sc_dev,
1561 			    "invalid pktinfo size: %u/%u\n", pi->rm_size,
1562 			    pkt->rm_pktinfolen);
1563 			break;
1564 		}
1565 
1566 		switch (pi->rm_type) {
1567 		case NDIS_PKTINFO_TYPE_CSUM:
1568 			memcpy(&csum, pi->rm_data, sizeof(csum));
1569 			if (csum & NDIS_RXCSUM_INFO_IPCS_OK)
1570 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1571 			if (csum & NDIS_RXCSUM_INFO_TCPCS_OK)
1572 				m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1573 			if (csum & NDIS_RXCSUM_INFO_UDPCS_OK)
1574 				m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1575 			break;
1576 		case NDIS_PKTINFO_TYPE_VLAN:
1577 			memcpy(&vlan, pi->rm_data, sizeof(vlan));
1578 			if (vlan != 0xffffffff) {
1579 				m->m_pkthdr.ether_vtag =
1580 				    NDIS_VLAN_INFO_ID(vlan) |
1581 				    (NDIS_VLAN_INFO_PRI(vlan) << EVL_PRIO_BITS);
1582 				m->m_flags |= M_VLANTAG;
1583 			}
1584 			break;
1585 		default:
1586 			DPRINTF("%s: unhandled pktinfo type %u\n",
1587 			    device_xname(sc->sc_dev), pi->rm_type);
1588 			break;
1589 		}
1590 
1591 		pkt->rm_pktinfolen -= pi->rm_size;
1592 		pi = (struct rndis_pktinfo *)((char *)pi + pi->rm_size);
1593 	}
1594 
1595  done:
1596 	if_percpuq_enqueue(sc->sc_ipq, m);
1597 }
1598 
1599 static void
1600 hvn_rndis_complete(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1601 {
1602 	struct rndis_cmd *rc;
1603 	uint32_t id;
1604 
1605 	memcpy(&id, buf + RNDIS_HEADER_OFFSET, sizeof(id));
1606 	if ((rc = hvn_complete_cmd(sc, id)) != NULL) {
1607 		if (len < rc->rc_cmplen)
1608 			device_printf(sc->sc_dev,
1609 			    "RNDIS response %u too short: %u\n", id, len);
1610 		else
1611 			memcpy(&rc->rc_cmp, buf, rc->rc_cmplen);
1612 		if (len > rc->rc_cmplen &&
1613 		    len - rc->rc_cmplen > HVN_RNDIS_BUFSIZE)
1614 			device_printf(sc->sc_dev,
1615 			    "RNDIS response %u too large: %u\n", id, len);
1616 		else if (len > rc->rc_cmplen)
1617 			memcpy(&rc->rc_cmpbuf, buf + rc->rc_cmplen,
1618 			    len - rc->rc_cmplen);
1619 		rc->rc_done = 1;
1620 		wakeup(rc);
1621 	} else {
1622 		DPRINTF("%s: failed to complete RNDIS request id %u\n",
1623 		    device_xname(sc->sc_dev), id);
1624 	}
1625 }
1626 
1627 static int
1628 hvn_rndis_output(struct hvn_softc *sc, struct hvn_tx_desc *txd)
1629 {
1630 	uint64_t rid = (uint64_t)txd->txd_id << 32;
1631 	int rv;
1632 
1633 	rv = vmbus_channel_send_sgl(sc->sc_chan, txd->txd_sgl, txd->txd_nsge,
1634 	    &sc->sc_data_msg, sizeof(sc->sc_data_msg), rid);
1635 	if (rv) {
1636 		DPRINTF("%s: RNDIS data send error %d\n",
1637 		    device_xname(sc->sc_dev), rv);
1638 		return rv;
1639 	}
1640 	return 0;
1641 }
1642 
1643 static void
1644 hvn_rndis_status(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1645 {
1646 	struct ifnet *ifp = SC2IFP(sc);
1647 	uint32_t status;
1648 	int link_state = sc->sc_link_state;
1649 
1650 	memcpy(&status, buf + RNDIS_HEADER_OFFSET, sizeof(status));
1651 	switch (status) {
1652 	case RNDIS_STATUS_MEDIA_CONNECT:
1653 		sc->sc_link_state = LINK_STATE_UP;
1654 		break;
1655 	case RNDIS_STATUS_MEDIA_DISCONNECT:
1656 		sc->sc_link_state = LINK_STATE_DOWN;
1657 		break;
1658 	/* Ignore these */
1659 	case RNDIS_STATUS_OFFLOAD_CURRENT_CONFIG:
1660 		return;
1661 	default:
1662 		DPRINTF("%s: unhandled status %#x\n", device_xname(sc->sc_dev),
1663 		    status);
1664 		return;
1665 	}
1666 	if (link_state != sc->sc_link_state)
1667 		if_link_state_change(ifp, sc->sc_link_state);
1668 }
1669 
1670 static int
1671 hvn_rndis_query(struct hvn_softc *sc, uint32_t oid, void *res, size_t *length)
1672 {
1673 	struct rndis_cmd *rc;
1674 	struct rndis_query_req *req;
1675 	struct rndis_query_comp *cmp;
1676 	size_t olength = *length;
1677 	int rv;
1678 
1679 	rc = hvn_alloc_cmd(sc);
1680 
1681 	bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1682 	    BUS_DMASYNC_PREREAD);
1683 
1684 	rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1685 
1686 	req = rc->rc_req;
1687 	req->rm_type = REMOTE_NDIS_QUERY_MSG;
1688 	req->rm_len = sizeof(*req);
1689 	req->rm_rid = rc->rc_id;
1690 	req->rm_oid = oid;
1691 	req->rm_infobufoffset = sizeof(*req) - RNDIS_HEADER_OFFSET;
1692 
1693 	rc->rc_cmplen = sizeof(*cmp);
1694 
1695 	bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1696 	    BUS_DMASYNC_PREWRITE);
1697 
1698 	if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1699 		DPRINTF("%s: QUERY_MSG failed, error %d\n",
1700 		    device_xname(sc->sc_dev), rv);
1701 		hvn_free_cmd(sc, rc);
1702 		return rv;
1703 	}
1704 
1705 	cmp = (struct rndis_query_comp *)&rc->rc_cmp;
1706 	switch (cmp->rm_status) {
1707 	case RNDIS_STATUS_SUCCESS:
1708 		if (cmp->rm_infobuflen > olength) {
1709 			rv = EINVAL;
1710 			break;
1711 		}
1712 		memcpy(res, rc->rc_cmpbuf, cmp->rm_infobuflen);
1713 		*length = cmp->rm_infobuflen;
1714 		break;
1715 	default:
1716 		*length = 0;
1717 		rv = EIO;
1718 		break;
1719 	}
1720 
1721 	hvn_free_cmd(sc, rc);
1722 	return rv;
1723 }
1724 
1725 static int
1726 hvn_rndis_set(struct hvn_softc *sc, uint32_t oid, void *data, size_t length)
1727 {
1728 	struct rndis_cmd *rc;
1729 	struct rndis_set_req *req;
1730 	struct rndis_set_comp *cmp;
1731 	int rv;
1732 
1733 	rc = hvn_alloc_cmd(sc);
1734 
1735 	bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1736 	    BUS_DMASYNC_PREREAD);
1737 
1738 	rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1739 
1740 	req = rc->rc_req;
1741 	req->rm_type = REMOTE_NDIS_SET_MSG;
1742 	req->rm_len = sizeof(*req) + length;
1743 	req->rm_rid = rc->rc_id;
1744 	req->rm_oid = oid;
1745 	req->rm_infobufoffset = sizeof(*req) - RNDIS_HEADER_OFFSET;
1746 
1747 	rc->rc_cmplen = sizeof(*cmp);
1748 
1749 	if (length > 0) {
1750 		KASSERT(sizeof(*req) + length < PAGE_SIZE);
1751 		req->rm_infobuflen = length;
1752 		memcpy(req + 1, data, length);
1753 	}
1754 
1755 	bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1756 	    BUS_DMASYNC_PREWRITE);
1757 
1758 	if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1759 		DPRINTF("%s: SET_MSG failed, error %d\n",
1760 		    device_xname(sc->sc_dev), rv);
1761 		hvn_free_cmd(sc, rc);
1762 		return rv;
1763 	}
1764 
1765 	cmp = (struct rndis_set_comp *)&rc->rc_cmp;
1766 	if (cmp->rm_status != RNDIS_STATUS_SUCCESS)
1767 		rv = EIO;
1768 
1769 	hvn_free_cmd(sc, rc);
1770 	return rv;
1771 }
1772 
1773 static int
1774 hvn_rndis_open(struct hvn_softc *sc)
1775 {
1776 	uint32_t filter;
1777 	int rv;
1778 
1779 	if (sc->sc_promisc)
1780 		filter = RNDIS_PACKET_TYPE_PROMISCUOUS;
1781 	else
1782 		filter = RNDIS_PACKET_TYPE_BROADCAST |
1783 		    RNDIS_PACKET_TYPE_ALL_MULTICAST |
1784 		    RNDIS_PACKET_TYPE_DIRECTED;
1785 
1786 	rv = hvn_rndis_set(sc, OID_GEN_CURRENT_PACKET_FILTER,
1787 	    &filter, sizeof(filter));
1788 	if (rv) {
1789 		DPRINTF("%s: failed to set RNDIS filter to %#x\n",
1790 		    device_xname(sc->sc_dev), filter);
1791 	}
1792 	return rv;
1793 }
1794 
1795 static int
1796 hvn_rndis_close(struct hvn_softc *sc)
1797 {
1798 	uint32_t filter = 0;
1799 	int rv;
1800 
1801 	rv = hvn_rndis_set(sc, OID_GEN_CURRENT_PACKET_FILTER,
1802 	    &filter, sizeof(filter));
1803 	if (rv) {
1804 		DPRINTF("%s: failed to clear RNDIS filter\n",
1805 		    device_xname(sc->sc_dev));
1806 	}
1807 	return rv;
1808 }
1809 
1810 static void
1811 hvn_rndis_detach(struct hvn_softc *sc)
1812 {
1813 	struct rndis_cmd *rc;
1814 	struct rndis_halt_req *req;
1815 	int rv;
1816 
1817 	rc = hvn_alloc_cmd(sc);
1818 
1819 	bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1820 	    BUS_DMASYNC_PREREAD);
1821 
1822 	rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1823 
1824 	req = rc->rc_req;
1825 	req->rm_type = REMOTE_NDIS_HALT_MSG;
1826 	req->rm_len = sizeof(*req);
1827 	req->rm_rid = rc->rc_id;
1828 
1829 	bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1830 	    BUS_DMASYNC_PREWRITE);
1831 
1832 	if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1833 		DPRINTF("%s: HALT_MSG failed, error %d\n",
1834 		    device_xname(sc->sc_dev), rv);
1835 	}
1836 	hvn_free_cmd(sc, rc);
1837 }
1838