xref: /openbsd-src/sys/dev/pv/hyperv.c (revision c7e8ea31cd41a963f06f0a8ba93948b06aa6b4a4)
1 /*-
2  * Copyright (c) 2009-2012 Microsoft Corp.
3  * Copyright (c) 2012 NetApp Inc.
4  * Copyright (c) 2012 Citrix Inc.
5  * Copyright (c) 2016 Mike Belopuhov <mike@esdenera.com>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * The OpenBSD port was done under funding by Esdenera Networks GmbH.
32  */
33 
34 #include <sys/param.h>
35 
36 /* Hyperv requires locked atomic operations */
37 #ifndef MULTIPROCESSOR
38 #define _HYPERVMPATOMICS
39 #define MULTIPROCESSOR
40 #endif
41 #include <sys/atomic.h>
42 #ifdef _HYPERVMPATOMICS
43 #undef MULTIPROCESSOR
44 #undef _HYPERVMPATOMICS
45 #endif
46 
47 #include <sys/systm.h>
48 #include <sys/proc.h>
49 #include <sys/signal.h>
50 #include <sys/signalvar.h>
51 #include <sys/malloc.h>
52 #include <sys/kernel.h>
53 #include <sys/device.h>
54 #include <sys/timetc.h>
55 #include <sys/task.h>
56 #include <sys/syslog.h>
57 
58 #include <machine/bus.h>
59 #include <machine/cpu.h>
60 #include <machine/cpufunc.h>
61 
62 #include <uvm/uvm_extern.h>
63 
64 #include <machine/i82489var.h>
65 
66 #include <dev/rndvar.h>
67 
68 #include <dev/pv/pvvar.h>
69 #include <dev/pv/pvreg.h>
70 #include <dev/pv/hypervreg.h>
71 #include <dev/pv/hypervvar.h>
72 
73 /* Command submission flags */
74 #define HCF_SLEEPOK	0x0001	/* M_WAITOK */
75 #define HCF_NOSLEEP	0x0002	/* M_NOWAIT */
76 #define HCF_NOREPLY	0x0004
77 
78 struct hv_softc *hv_sc;
79 
80 int 	hv_match(struct device *, void *, void *);
81 void	hv_attach(struct device *, struct device *, void *);
82 void	hv_set_version(struct hv_softc *);
83 u_int	hv_gettime(struct timecounter *);
84 int	hv_init_hypercall(struct hv_softc *);
85 uint64_t hv_hypercall(struct hv_softc *, uint64_t, void *, void *);
86 int	hv_init_interrupts(struct hv_softc *);
87 int	hv_init_synic(struct hv_softc *);
88 int	hv_cmd(struct hv_softc *, void *, size_t, void *, size_t, int);
89 int	hv_start(struct hv_softc *, struct hv_msg *);
90 int	hv_reply(struct hv_softc *, struct hv_msg *);
91 void	hv_wait(struct hv_softc *, int (*done)(struct hv_softc *,
92 	    struct hv_msg *), struct hv_msg *, void *, const char *);
93 uint16_t hv_intr_signal(struct hv_softc *, void *);
94 void	hv_intr(void);
95 void	hv_event_intr(struct hv_softc *);
96 void	hv_message_intr(struct hv_softc *);
97 int	hv_vmbus_connect(struct hv_softc *);
98 void	hv_channel_response(struct hv_softc *, struct vmbus_chanmsg_hdr *);
99 void	hv_channel_offer(struct hv_softc *, struct vmbus_chanmsg_hdr *);
100 void	hv_channel_rescind(struct hv_softc *, struct vmbus_chanmsg_hdr *);
101 void	hv_channel_delivered(struct hv_softc *, struct vmbus_chanmsg_hdr *);
102 int	hv_channel_scan(struct hv_softc *);
103 void	hv_process_offer(struct hv_softc *, struct hv_offer *);
104 struct hv_channel *
105 	hv_channel_lookup(struct hv_softc *, uint32_t);
106 int	hv_channel_ring_create(struct hv_channel *, uint32_t);
107 void	hv_channel_ring_destroy(struct hv_channel *);
108 void	hv_channel_pause(struct hv_channel *);
109 uint	hv_channel_unpause(struct hv_channel *);
110 uint	hv_channel_ready(struct hv_channel *);
111 extern void hv_attach_icdevs(struct hv_softc *);
112 int	hv_attach_devices(struct hv_softc *);
113 
114 struct {
115 	int		  hmd_response;
116 	int		  hmd_request;
117 	void		(*hmd_handler)(struct hv_softc *,
118 			    struct vmbus_chanmsg_hdr *);
119 } hv_msg_dispatch[] = {
120 	{ 0,					0, NULL },
121 	{ VMBUS_CHANMSG_CHOFFER,		0, hv_channel_offer },
122 	{ VMBUS_CHANMSG_CHRESCIND,		0, hv_channel_rescind },
123 	{ VMBUS_CHANMSG_CHREQUEST,		VMBUS_CHANMSG_CHOFFER,
124 	  NULL },
125 	{ VMBUS_CHANMSG_CHOFFER_DONE,		0,
126 	  hv_channel_delivered },
127 	{ VMBUS_CHANMSG_CHOPEN,			0, NULL },
128 	{ VMBUS_CHANMSG_CHOPEN_RESP,		VMBUS_CHANMSG_CHOPEN,
129 	  hv_channel_response },
130 	{ VMBUS_CHANMSG_CHCLOSE,		0, NULL },
131 	{ VMBUS_CHANMSG_GPADL_CONN,		0, NULL },
132 	{ VMBUS_CHANMSG_GPADL_SUBCONN,		0, NULL },
133 	{ VMBUS_CHANMSG_GPADL_CONNRESP,		VMBUS_CHANMSG_GPADL_CONN,
134 	  hv_channel_response },
135 	{ VMBUS_CHANMSG_GPADL_DISCONN,		0, NULL },
136 	{ VMBUS_CHANMSG_GPADL_DISCONNRESP,	VMBUS_CHANMSG_GPADL_DISCONN,
137 	  hv_channel_response },
138 	{ VMBUS_CHANMSG_CHFREE,			0, NULL },
139 	{ VMBUS_CHANMSG_CONNECT,		0, NULL },
140 	{ VMBUS_CHANMSG_CONNECT_RESP,		VMBUS_CHANMSG_CONNECT,
141 	  hv_channel_response },
142 	{ VMBUS_CHANMSG_DISCONNECT,		0, NULL },
143 };
144 
145 struct timecounter hv_timecounter = {
146 	hv_gettime, 0, 0xffffffff, 10000000, "hyperv", 9001
147 };
148 
149 struct cfdriver hyperv_cd = {
150 	NULL, "hyperv", DV_DULL
151 };
152 
153 const struct cfattach hyperv_ca = {
154 	sizeof(struct hv_softc), hv_match, hv_attach
155 };
156 
157 const struct hv_guid hv_guid_network = {
158 	{ 0x63, 0x51, 0x61, 0xf8, 0x3e, 0xdf, 0xc5, 0x46,
159 	  0x91, 0x3f, 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e }
160 };
161 
162 const struct hv_guid hv_guid_ide = {
163 	{ 0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
164 	  0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5 }
165 };
166 
167 const struct hv_guid hv_guid_scsi = {
168 	{ 0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
169 	  0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f }
170 };
171 
172 const struct hv_guid hv_guid_shutdown = {
173 	{ 0x31, 0x60, 0x0b, 0x0e, 0x13, 0x52, 0x34, 0x49,
174 	  0x81, 0x8b, 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb }
175 };
176 
177 const struct hv_guid hv_guid_timesync = {
178 	{ 0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49,
179 	  0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf }
180 };
181 
182 const struct hv_guid hv_guid_heartbeat = {
183 	{ 0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e,
184 	  0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d }
185 };
186 
187 const struct hv_guid hv_guid_kvp = {
188 	{ 0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d,
189 	  0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6 }
190 };
191 
192 #ifdef HYPERV_DEBUG
193 const struct hv_guid hv_guid_vss = {
194 	{ 0x29, 0x2e, 0xfa, 0x35, 0x23, 0xea, 0x36, 0x42,
195 	  0x96, 0xae, 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40 }
196 };
197 
198 const struct hv_guid hv_guid_dynmem = {
199 	{ 0xdc, 0x74, 0x50, 0x52, 0x85, 0x89, 0xe2, 0x46,
200 	  0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02 }
201 };
202 
203 const struct hv_guid hv_guid_mouse = {
204 	{ 0x9e, 0xb6, 0xa8, 0xcf, 0x4a, 0x5b, 0xc0, 0x4c,
205 	  0xb9, 0x8b, 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a }
206 };
207 
208 const struct hv_guid hv_guid_kbd = {
209 	{ 0x6d, 0xad, 0x12, 0xf9, 0x17, 0x2b, 0xea, 0x48,
210 	  0xbd, 0x65, 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84 }
211 };
212 
213 const struct hv_guid hv_guid_video = {
214 	{ 0x02, 0x78, 0x0a, 0xda, 0x77, 0xe3, 0xac, 0x4a,
215 	  0x8e, 0x77, 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8 }
216 };
217 
218 const struct hv_guid hv_guid_fc = {
219 	{ 0x4a, 0xcc, 0x9b, 0x2f, 0x69, 0x00, 0xf3, 0x4a,
220 	  0xb7, 0x6b, 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda }
221 };
222 
223 const struct hv_guid hv_guid_fcopy = {
224 	{ 0xe3, 0x4b, 0xd1, 0x34, 0xe4, 0xde, 0xc8, 0x41,
225 	  0x9a, 0xe7, 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92 }
226 };
227 
228 const struct hv_guid hv_guid_pcie = {
229 	{ 0x1d, 0xf6, 0xc4, 0x44, 0x44, 0x44, 0x00, 0x44,
230 	  0x9d, 0x52, 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f }
231 };
232 
233 const struct hv_guid hv_guid_netdir = {
234 	{ 0x3d, 0xaf, 0x2e, 0x8c, 0xa7, 0x32, 0x09, 0x4b,
235 	  0xab, 0x99, 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01 }
236 };
237 
238 const struct hv_guid hv_guid_rdesktop = {
239 	{ 0xf4, 0xac, 0x6a, 0x27, 0x15, 0xac, 0x6c, 0x42,
240 	  0x98, 0xdd, 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe }
241 };
242 
243 /* Automatic Virtual Machine Activation (AVMA) Services */
244 const struct hv_guid hv_guid_avma1 = {
245 	{ 0x55, 0xb2, 0x87, 0x44, 0x8c, 0xb8, 0x3f, 0x40,
246 	  0xbb, 0x51, 0xd1, 0xf6, 0x9c, 0xf1, 0x7f, 0x87 }
247 };
248 
249 const struct hv_guid hv_guid_avma2 = {
250 	{ 0xf4, 0xba, 0x75, 0x33, 0x15, 0x9e, 0x30, 0x4b,
251 	  0xb7, 0x65, 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b }
252 };
253 
254 const struct hv_guid hv_guid_avma3 = {
255 	{ 0xa0, 0x1f, 0x22, 0x99, 0xad, 0x24, 0xe2, 0x11,
256 	  0xbe, 0x98, 0x00, 0x1a, 0xa0, 0x1b, 0xbf, 0x6e }
257 };
258 
259 const struct hv_guid hv_guid_avma4 = {
260 	{ 0x16, 0x57, 0xe6, 0xf8, 0xb3, 0x3c, 0x06, 0x4a,
261 	  0x9a, 0x60, 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5 }
262 };
263 #endif	/* HYPERV_DEBUG */
264 
265 int
266 hv_match(struct device *parent, void *match, void *aux)
267 {
268 	struct pv_attach_args *pva = aux;
269 	struct pvbus_hv *hv = &pva->pva_hv[PVBUS_HYPERV];
270 
271 	if ((hv->hv_major == 0 && hv->hv_minor == 0) || hv->hv_base == 0)
272 		return (0);
273 
274 	return (1);
275 }
276 
277 void
278 hv_attach(struct device *parent, struct device *self, void *aux)
279 {
280 	struct hv_softc *sc = (struct hv_softc *)self;
281 	struct pv_attach_args *pva = aux;
282 	struct pvbus_hv *hv = &pva->pva_hv[PVBUS_HYPERV];
283 
284 	sc->sc_pvbus = hv;
285 	sc->sc_dmat = pva->pva_dmat;
286 
287 	if (!(hv->hv_features & CPUID_HV_MSR_HYPERCALL) ||
288 	    !(hv->hv_features & CPUID_HV_MSR_SYNIC)) {
289 		printf(": not functional\n");
290 		return;
291 	}
292 
293 	DPRINTF("\n");
294 
295 	hv_set_version(sc);
296 
297 	if (hv->hv_features & CPUID_HV_MSR_TIME_REFCNT)
298 		tc_init(&hv_timecounter);
299 
300 	if (hv_init_hypercall(sc))
301 		return;
302 
303 	/* Wire it up to the global */
304 	hv_sc = sc;
305 
306 	if (hv_init_interrupts(sc))
307 		return;
308 
309 	if (hv_vmbus_connect(sc))
310 		return;
311 
312 	DPRINTF("%s", sc->sc_dev.dv_xname);
313 	printf(": protocol %d.%d, features %#x\n",
314 	    VMBUS_VERSION_MAJOR(sc->sc_proto),
315 	    VMBUS_VERSION_MINOR(sc->sc_proto),
316 	    hv->hv_features);
317 
318 	if (hv_channel_scan(sc))
319 		return;
320 
321 	/* Attach heartbeat, KVP and other "internal" services */
322 	hv_attach_icdevs(sc);
323 
324 	/* Attach devices with external drivers */
325 	hv_attach_devices(sc);
326 }
327 
328 void
329 hv_set_version(struct hv_softc *sc)
330 {
331 	uint64_t ver;
332 
333 	/* OpenBSD build date */
334 	ver = MSR_HV_GUESTID_OSTYPE_OPENBSD;
335 	ver |= (uint64_t)OpenBSD << MSR_HV_GUESTID_VERSION_SHIFT;
336 	wrmsr(MSR_HV_GUEST_OS_ID, ver);
337 }
338 
339 u_int
340 hv_gettime(struct timecounter *tc)
341 {
342 	u_int now = rdmsr(MSR_HV_TIME_REF_COUNT);
343 
344 	return (now);
345 }
346 
347 int
348 hv_init_hypercall(struct hv_softc *sc)
349 {
350 	extern void *hv_hypercall_page;
351 	uint64_t msr;
352 	paddr_t pa;
353 
354 	sc->sc_hc = &hv_hypercall_page;
355 
356 	if (!pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_hc, &pa)) {
357 		printf(": hypercall page PA extraction failed\n");
358 		return (-1);
359 	}
360 
361 	msr = (atop(pa) << MSR_HV_HYPERCALL_PGSHIFT) | MSR_HV_HYPERCALL_ENABLE;
362 	wrmsr(MSR_HV_HYPERCALL, msr);
363 
364 	if (!(rdmsr(MSR_HV_HYPERCALL) & MSR_HV_HYPERCALL_ENABLE)) {
365 		printf(": failed to set up a hypercall page\n");
366 		return (-1);
367 	}
368 
369 	return (0);
370 }
371 
372 uint64_t
373 hv_hypercall(struct hv_softc *sc, uint64_t control, void *input,
374     void *output)
375 {
376 	paddr_t input_pa = 0, output_pa = 0;
377 	uint64_t status = 0;
378 
379 	if (input != NULL &&
380 	    pmap_extract(pmap_kernel(), (vaddr_t)input, &input_pa) == 0) {
381 		printf("%s: hypercall input PA extraction failed\n",
382 		    sc->sc_dev.dv_xname);
383 		return (~HYPERCALL_STATUS_SUCCESS);
384 	}
385 
386 	if (output != NULL &&
387 	    pmap_extract(pmap_kernel(), (vaddr_t)output, &output_pa) == 0) {
388 		printf("%s: hypercall output PA extraction failed\n",
389 		    sc->sc_dev.dv_xname);
390 		return (~HYPERCALL_STATUS_SUCCESS);
391 	}
392 
393 #ifdef __amd64__
394 	__asm__ __volatile__ ("mov %0, %%r8" : : "r" (output_pa) : "r8");
395 	__asm__ __volatile__ ("call *%3" : "=a" (status) : "c" (control),
396 	    "d" (input_pa), "m" (sc->sc_hc));
397 #else  /* __i386__ */
398 	{
399 		uint32_t control_hi = control >> 32;
400 		uint32_t control_lo = control & 0xfffffffff;
401 		uint32_t status_hi = 1;
402 		uint32_t status_lo = 1;
403 
404 		__asm__ __volatile__ ("call *%8" :
405 		    "=d" (status_hi), "=a"(status_lo) :
406 		    "d" (control_hi), "a" (control_lo),
407 		    "b" (0), "c" (input_pa), "D" (0), "S" (output_pa),
408 		    "m" (sc->sc_hc));
409 
410 		status = status_lo | ((uint64_t)status_hi << 32);
411 	}
412 #endif	/* __amd64__ */
413 
414 	return (status);
415 }
416 
417 int
418 hv_init_interrupts(struct hv_softc *sc)
419 {
420 	struct cpu_info *ci = curcpu();
421 	int cpu = CPU_INFO_UNIT(ci);
422 
423 	sc->sc_idtvec = LAPIC_HYPERV_VECTOR;
424 
425 	TAILQ_INIT(&sc->sc_reqs);
426 	mtx_init(&sc->sc_reqlck, IPL_NET);
427 
428 	TAILQ_INIT(&sc->sc_rsps);
429 	mtx_init(&sc->sc_rsplck, IPL_NET);
430 
431 	sc->sc_simp[cpu] = km_alloc(PAGE_SIZE, &kv_any, &kp_zero, &kd_nowait);
432 	if (sc->sc_simp[cpu] == NULL) {
433 		printf(": failed to allocate SIMP\n");
434 		return (-1);
435 	}
436 
437 	sc->sc_siep[cpu] = km_alloc(PAGE_SIZE, &kv_any, &kp_zero, &kd_nowait);
438 	if (sc->sc_siep[cpu] == NULL) {
439 		printf(": failed to allocate SIEP\n");
440 		km_free(sc->sc_simp[cpu], PAGE_SIZE, &kv_any, &kp_zero);
441 		return (-1);
442 	}
443 
444 	sc->sc_proto = VMBUS_VERSION_WS2008;
445 
446 	return (hv_init_synic(sc));
447 }
448 
449 int
450 hv_init_synic(struct hv_softc *sc)
451 {
452 	struct cpu_info *ci = curcpu();
453 	int cpu = CPU_INFO_UNIT(ci);
454 	uint64_t simp, siefp, sctrl, sint;
455 	paddr_t pa;
456 
457 	/*
458 	 * Setup the Synic's message page
459 	 */
460 	if (!pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_simp[cpu], &pa)) {
461 		printf(": SIMP PA extraction failed\n");
462 		return (-1);
463 	}
464 	simp = rdmsr(MSR_HV_SIMP);
465 	simp &= (1 << MSR_HV_SIMP_PGSHIFT) - 1;
466 	simp |= (atop(pa) << MSR_HV_SIMP_PGSHIFT);
467 	simp |= MSR_HV_SIMP_ENABLE;
468 	wrmsr(MSR_HV_SIMP, simp);
469 
470 	/*
471 	 * Setup the Synic's event page
472 	 */
473 	if (!pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_siep[cpu], &pa)) {
474 		printf(": SIEP PA extraction failed\n");
475 		return (-1);
476 	}
477 	siefp = rdmsr(MSR_HV_SIEFP);
478 	siefp &= (1<<MSR_HV_SIEFP_PGSHIFT) - 1;
479 	siefp |= (atop(pa) << MSR_HV_SIEFP_PGSHIFT);
480 	siefp |= MSR_HV_SIEFP_ENABLE;
481 	wrmsr(MSR_HV_SIEFP, siefp);
482 
483 	/*
484 	 * Configure and unmask SINT for message and event flags
485 	 */
486 	sint = rdmsr(MSR_HV_SINT0 + VMBUS_SINT_MESSAGE);
487 	sint = sc->sc_idtvec | MSR_HV_SINT_AUTOEOI |
488 	    (sint & MSR_HV_SINT_RSVD_MASK);
489 	wrmsr(MSR_HV_SINT0 + VMBUS_SINT_MESSAGE, sint);
490 
491 	/* Enable the global synic bit */
492 	sctrl = rdmsr(MSR_HV_SCONTROL);
493 	sctrl |= MSR_HV_SCTRL_ENABLE;
494 	wrmsr(MSR_HV_SCONTROL, sctrl);
495 
496 	sc->sc_vcpus[cpu] = rdmsr(MSR_HV_VP_INDEX);
497 
498 	DPRINTF("vcpu%u: SIMP %#llx SIEFP %#llx SCTRL %#llx\n",
499 	    sc->sc_vcpus[cpu], simp, siefp, sctrl);
500 
501 	return (0);
502 }
503 
504 int
505 hv_cmd(struct hv_softc *sc, void *cmd, size_t cmdlen, void *rsp,
506     size_t rsplen, int flags)
507 {
508 	struct hv_msg msg;
509 	int rv;
510 
511 	if (cmdlen > VMBUS_MSG_DSIZE_MAX) {
512 		printf("%s: payload too large (%lu)\n", sc->sc_dev.dv_xname,
513 		    cmdlen);
514 		return (EMSGSIZE);
515 	}
516 
517 	memset(&msg, 0, sizeof(msg));
518 
519 	msg.msg_req.hc_dsize = cmdlen;
520 	memcpy(msg.msg_req.hc_data, cmd, cmdlen);
521 
522 	if (!(flags & HCF_NOREPLY)) {
523 		msg.msg_rsp = rsp;
524 		msg.msg_rsplen = rsplen;
525 	} else
526 		msg.msg_flags |= MSGF_NOQUEUE;
527 
528 	if (flags & HCF_NOSLEEP)
529 		msg.msg_flags |= MSGF_NOSLEEP;
530 
531 	if ((rv = hv_start(sc, &msg)) != 0)
532 		return (rv);
533 	return (hv_reply(sc, &msg));
534 }
535 
536 int
537 hv_start(struct hv_softc *sc, struct hv_msg *msg)
538 {
539 	const int delays[] = { 100, 100, 100, 500, 500, 5000, 5000, 5000 };
540 	const char *wchan = "hvstart";
541 	uint16_t status;
542 	int i, s;
543 
544 	msg->msg_req.hc_connid = VMBUS_CONNID_MESSAGE;
545 	msg->msg_req.hc_msgtype = 1;
546 
547 	if (!(msg->msg_flags & MSGF_NOQUEUE)) {
548 		mtx_enter(&sc->sc_reqlck);
549 		TAILQ_INSERT_TAIL(&sc->sc_reqs, msg, msg_entry);
550 		mtx_leave(&sc->sc_reqlck);
551 	}
552 
553 	for (i = 0; i < nitems(delays); i++) {
554 		status = hv_hypercall(sc, HYPERCALL_POST_MESSAGE,
555 		    &msg->msg_req, NULL);
556 		if (status == HYPERCALL_STATUS_SUCCESS)
557 			break;
558 		if (msg->msg_flags & MSGF_NOSLEEP) {
559 			delay(delays[i]);
560 			s = splnet();
561 			hv_intr();
562 			splx(s);
563 		} else
564 			tsleep(wchan, PRIBIO, wchan, 1);
565 	}
566 	if (status != 0) {
567 		printf("%s: posting vmbus message failed with %d\n",
568 		    sc->sc_dev.dv_xname, status);
569 		if (!(msg->msg_flags & MSGF_NOQUEUE)) {
570 			mtx_enter(&sc->sc_reqlck);
571 			TAILQ_REMOVE(&sc->sc_reqs, msg, msg_entry);
572 			mtx_leave(&sc->sc_reqlck);
573 		}
574 		return (EIO);
575 	}
576 
577 	return (0);
578 }
579 
580 static int
581 hv_reply_done(struct hv_softc *sc, struct hv_msg *msg)
582 {
583 	struct hv_msg *m;
584 
585 	mtx_enter(&sc->sc_rsplck);
586 	TAILQ_FOREACH(m, &sc->sc_rsps, msg_entry) {
587 		if (m == msg) {
588 			mtx_leave(&sc->sc_rsplck);
589 			return (1);
590 		}
591 	}
592 	mtx_leave(&sc->sc_rsplck);
593 	return (0);
594 }
595 
596 int
597 hv_reply(struct hv_softc *sc, struct hv_msg *msg)
598 {
599 	if (msg->msg_flags & MSGF_NOQUEUE)
600 		return (0);
601 
602 	hv_wait(sc, hv_reply_done, msg, msg, "hvreply");
603 
604 	mtx_enter(&sc->sc_rsplck);
605 	TAILQ_REMOVE(&sc->sc_rsps, msg, msg_entry);
606 	mtx_leave(&sc->sc_rsplck);
607 
608 	return (0);
609 }
610 
611 void
612 hv_wait(struct hv_softc *sc, int (*cond)(struct hv_softc *, struct hv_msg *),
613     struct hv_msg *msg, void *wchan, const char *wmsg)
614 {
615 	int s;
616 
617 	KASSERT(cold ? msg->msg_flags & MSGF_NOSLEEP : 1);
618 
619 	while (!cond(sc, msg)) {
620 		if (msg->msg_flags & MSGF_NOSLEEP) {
621 			delay(1000);
622 			s = splnet();
623 			hv_intr();
624 			splx(s);
625 		} else
626 			tsleep(wchan, PRIBIO, wmsg ? wmsg : "hvwait", 1);
627 	}
628 }
629 
630 uint16_t
631 hv_intr_signal(struct hv_softc *sc, void *con)
632 {
633 	uint64_t status;
634 
635 	status = hv_hypercall(sc, HYPERCALL_SIGNAL_EVENT, con, NULL);
636 	return ((uint16_t)status);
637 }
638 
639 void
640 hv_intr(void)
641 {
642 	struct hv_softc *sc = hv_sc;
643 
644 	hv_event_intr(sc);
645 	hv_message_intr(sc);
646 }
647 
648 void
649 hv_event_intr(struct hv_softc *sc)
650 {
651 	struct vmbus_evtflags *evt;
652 	struct cpu_info *ci = curcpu();
653 	int cpu = CPU_INFO_UNIT(ci);
654 	int bit, row, maxrow, chanid;
655 	struct hv_channel *ch;
656 	u_long *revents, pending;
657 
658 	evt = (struct vmbus_evtflags *)sc->sc_siep[cpu] +
659 	    VMBUS_SINT_MESSAGE;
660 	if ((sc->sc_proto == VMBUS_VERSION_WS2008) ||
661 	    (sc->sc_proto == VMBUS_VERSION_WIN7)) {
662 		if (!test_bit(0, &evt->evt_flags[0]))
663 			return;
664 		clear_bit(0, &evt->evt_flags[0]);
665 		maxrow = VMBUS_CHAN_MAX_COMPAT / VMBUS_EVTFLAG_LEN;
666 		/*
667 		 * receive size is 1/2 page and divide that by 4 bytes
668 		 */
669 		revents = sc->sc_revents;
670 	} else {
671 		maxrow = nitems(evt->evt_flags);
672 		/*
673 		 * On Host with Win8 or above, the event page can be
674 		 * checked directly to get the id of the channel
675 		 * that has the pending interrupt.
676 		 */
677 		revents = &evt->evt_flags[0];
678 	}
679 
680 	for (row = 0; row < maxrow; row++) {
681 		if (revents[row] == 0)
682 			continue;
683 		pending = atomic_swap_ulong(&revents[row], 0);
684 		for (bit = 0; pending > 0; pending >>= 1, bit++) {
685 			if ((pending & 1) == 0)
686 				continue;
687 			chanid = (row * LONG_BIT) + bit;
688 			/* vmbus channel protocol message */
689 			if (chanid == 0)
690 				continue;
691 			ch = hv_channel_lookup(sc, chanid);
692 			if (ch == NULL) {
693 				printf("%s: unhandled event on %d\n",
694 				    sc->sc_dev.dv_xname, chanid);
695 				continue;
696 			}
697 			if (ch->ch_state != HV_CHANSTATE_OPENED) {
698 				printf("%s: channel %d is not active\n",
699 				    sc->sc_dev.dv_xname, chanid);
700 				continue;
701 			}
702 			ch->ch_evcnt.ec_count++;
703 			hv_channel_schedule(ch);
704 		}
705 	}
706 }
707 
708 void
709 hv_message_intr(struct hv_softc *sc)
710 {
711 	struct vmbus_message *msg;
712 	struct vmbus_chanmsg_hdr *hdr;
713 	struct cpu_info *ci = curcpu();
714 	int cpu = CPU_INFO_UNIT(ci);
715 
716 	for (;;) {
717 		msg = (struct vmbus_message *)sc->sc_simp[cpu] +
718 		    VMBUS_SINT_MESSAGE;
719 		if (msg->msg_type == VMBUS_MSGTYPE_NONE)
720 			break;
721 
722 		hdr = (struct vmbus_chanmsg_hdr *)msg->msg_data;
723 		if (hdr->chm_type >= VMBUS_CHANMSG_COUNT) {
724 			printf("%s: unhandled message type %u flags %#x\n",
725 			    sc->sc_dev.dv_xname, hdr->chm_type,
726 			    msg->msg_flags);
727 			goto skip;
728 		}
729 		if (hv_msg_dispatch[hdr->chm_type].hmd_handler)
730 			hv_msg_dispatch[hdr->chm_type].hmd_handler(sc, hdr);
731 		else
732 			printf("%s: unhandled message type %u\n",
733 			    sc->sc_dev.dv_xname, hdr->chm_type);
734  skip:
735 		msg->msg_type = VMBUS_MSGTYPE_NONE;
736 		virtio_membar_sync();
737 		if (msg->msg_flags & VMBUS_MSGFLAG_PENDING)
738 			wrmsr(MSR_HV_EOM, 0);
739 	}
740 }
741 
742 void
743 hv_channel_response(struct hv_softc *sc, struct vmbus_chanmsg_hdr *rsphdr)
744 {
745 	struct hv_msg *msg;
746 	struct vmbus_chanmsg_hdr *reqhdr;
747 	int req;
748 
749 	req = hv_msg_dispatch[rsphdr->chm_type].hmd_request;
750 	mtx_enter(&sc->sc_reqlck);
751 	TAILQ_FOREACH(msg, &sc->sc_reqs, msg_entry) {
752 		reqhdr = (struct vmbus_chanmsg_hdr *)&msg->msg_req.hc_data;
753 		if (reqhdr->chm_type == req) {
754 			TAILQ_REMOVE(&sc->sc_reqs, msg, msg_entry);
755 			break;
756 		}
757 	}
758 	mtx_leave(&sc->sc_reqlck);
759 	if (msg != NULL) {
760 		memcpy(msg->msg_rsp, rsphdr, msg->msg_rsplen);
761 		mtx_enter(&sc->sc_rsplck);
762 		TAILQ_INSERT_TAIL(&sc->sc_rsps, msg, msg_entry);
763 		mtx_leave(&sc->sc_rsplck);
764 		wakeup(msg);
765 	}
766 }
767 
768 void
769 hv_channel_offer(struct hv_softc *sc, struct vmbus_chanmsg_hdr *hdr)
770 {
771 	struct hv_offer *co;
772 
773 	co = malloc(sizeof(*co), M_DEVBUF, M_NOWAIT | M_ZERO);
774 	if (co == NULL) {
775 		printf("%s: failed to allocate an offer object\n",
776 		    sc->sc_dev.dv_xname);
777 		return;
778 	}
779 
780 	memcpy(&co->co_chan, hdr, sizeof(co->co_chan));
781 
782 	mtx_enter(&sc->sc_offerlck);
783 	SIMPLEQ_INSERT_TAIL(&sc->sc_offers, co, co_entry);
784 	mtx_leave(&sc->sc_offerlck);
785 }
786 
787 void
788 hv_channel_rescind(struct hv_softc *sc, struct vmbus_chanmsg_hdr *hdr)
789 {
790 	const struct vmbus_chanmsg_chrescind *cmd;
791 
792 	cmd = (const struct vmbus_chanmsg_chrescind *)hdr;
793 	printf("%s: revoking channel %u\n", sc->sc_dev.dv_xname,
794 	    cmd->chm_chanid);
795 }
796 
797 void
798 hv_channel_delivered(struct hv_softc *sc, struct vmbus_chanmsg_hdr *hdr)
799 {
800 	atomic_setbits_int(&sc->sc_flags, HSF_OFFERS_DELIVERED);
801 	wakeup(&sc->sc_offers);
802 }
803 
804 int
805 hv_vmbus_connect(struct hv_softc *sc)
806 {
807 	const uint32_t versions[] = {
808 		VMBUS_VERSION_WIN8_1, VMBUS_VERSION_WIN8,
809 		VMBUS_VERSION_WIN7, VMBUS_VERSION_WS2008
810 	};
811 	struct vmbus_chanmsg_connect cmd;
812 	struct vmbus_chanmsg_connect_resp rsp;
813 	paddr_t epa, mpa1, mpa2;
814 	int i;
815 
816 	sc->sc_events = km_alloc(PAGE_SIZE, &kv_any, &kp_zero, &kd_nowait);
817 	if (sc->sc_events == NULL) {
818 		printf(": failed to allocate channel port events page\n");
819 		goto errout;
820 	}
821 	if (!pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_events, &epa)) {
822 		printf(": channel port events page PA extraction failed\n");
823 		goto errout;
824 	}
825 
826 	sc->sc_wevents = (u_long *)sc->sc_events;
827 	sc->sc_revents = (u_long *)((caddr_t)sc->sc_events + (PAGE_SIZE >> 1));
828 
829 	sc->sc_monitor[0] = km_alloc(PAGE_SIZE, &kv_any, &kp_zero, &kd_nowait);
830 	if (sc->sc_monitor[0] == NULL) {
831 		printf(": failed to allocate monitor page 1\n");
832 		goto errout;
833 	}
834 	if (!pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_monitor[0], &mpa1)) {
835 		printf(": monitor page 1 PA extraction failed\n");
836 		goto errout;
837 	}
838 
839 	sc->sc_monitor[1] = km_alloc(PAGE_SIZE, &kv_any, &kp_zero, &kd_nowait);
840 	if (sc->sc_monitor[1] == NULL) {
841 		printf(": failed to allocate monitor page 2\n");
842 		goto errout;
843 	}
844 	if (!pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_monitor[1], &mpa2)) {
845 		printf(": monitor page 2 PA extraction failed\n");
846 		goto errout;
847 	}
848 
849 	memset(&cmd, 0, sizeof(cmd));
850 	cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CONNECT;
851 	cmd.chm_evtflags = (uint64_t)epa;
852 	cmd.chm_mnf1 = (uint64_t)mpa1;
853 	cmd.chm_mnf2 = (uint64_t)mpa2;
854 
855 	memset(&rsp, 0, sizeof(rsp));
856 
857 	for (i = 0; i < nitems(versions); i++) {
858 		cmd.chm_ver = versions[i];
859 		if (hv_cmd(sc, &cmd, sizeof(cmd), &rsp, sizeof(rsp),
860 		    HCF_NOSLEEP)) {
861 			DPRINTF("%s: CONNECT failed\n",
862 			    sc->sc_dev.dv_xname);
863 			goto errout;
864 		}
865 		if (rsp.chm_done) {
866 			sc->sc_flags |= HSF_CONNECTED;
867 			sc->sc_proto = versions[i];
868 			sc->sc_handle = VMBUS_GPADL_START;
869 			break;
870 		}
871 	}
872 	if (i == nitems(versions)) {
873 		printf("%s: failed to negotiate protocol version\n",
874 		    sc->sc_dev.dv_xname);
875 		goto errout;
876 	}
877 
878 	return (0);
879 
880  errout:
881 	if (sc->sc_events) {
882 		km_free(sc->sc_events, PAGE_SIZE, &kv_any, &kp_zero);
883 		sc->sc_events = NULL;
884 		sc->sc_wevents = NULL;
885 		sc->sc_revents = NULL;
886 	}
887 	if (sc->sc_monitor[0]) {
888 		km_free(sc->sc_monitor[0], PAGE_SIZE, &kv_any, &kp_zero);
889 		sc->sc_monitor[0] = NULL;
890 	}
891 	if (sc->sc_monitor[1]) {
892 		km_free(sc->sc_monitor[1], PAGE_SIZE, &kv_any, &kp_zero);
893 		sc->sc_monitor[1] = NULL;
894 	}
895 	return (-1);
896 }
897 
898 #ifdef HYPERV_DEBUG
899 static inline char *
900 guidprint(struct hv_guid *a)
901 {
902 	/* 3     0  5  4 7 6  8 9  10        15 */
903 	/* 33221100-5544-7766-9988-FFEEDDCCBBAA */
904 	static char buf[16 * 2 + 4 + 1];
905 	int i, j = 0;
906 
907 	for (i = 3; i != -1; i -= 1, j += 2)
908 		snprintf(&buf[j], 3, "%02x", (uint8_t)a->data[i]);
909 	buf[j++] = '-';
910 	for (i = 5; i != 3; i -= 1, j += 2)
911 		snprintf(&buf[j], 3, "%02x", (uint8_t)a->data[i]);
912 	buf[j++] = '-';
913 	for (i = 7; i != 5; i -= 1, j += 2)
914 		snprintf(&buf[j], 3, "%02x", (uint8_t)a->data[i]);
915 	buf[j++] = '-';
916 	for (i = 8; i < 10; i += 1, j += 2)
917 		snprintf(&buf[j], 3, "%02x", (uint8_t)a->data[i]);
918 	buf[j++] = '-';
919 	for (i = 10; i < 16; i += 1, j += 2)
920 		snprintf(&buf[j], 3, "%02x", (uint8_t)a->data[i]);
921 	return (&buf[0]);
922 }
923 #endif	/* HYPERV_DEBUG */
924 
925 void
926 hv_guid_sprint(struct hv_guid *guid, char *str, size_t size)
927 {
928 	const struct {
929 		const struct hv_guid	*guid;
930 		const char		*ident;
931 	} map[] = {
932 		{ &hv_guid_network,	"network" },
933 		{ &hv_guid_ide,		"ide" },
934 		{ &hv_guid_scsi,	"scsi" },
935 		{ &hv_guid_shutdown,	"shutdown" },
936 		{ &hv_guid_timesync,	"timesync" },
937 		{ &hv_guid_heartbeat,	"heartbeat" },
938 		{ &hv_guid_kvp,		"kvp" },
939 #ifdef HYPERV_DEBUG
940 		{ &hv_guid_vss,		"vss" },
941 		{ &hv_guid_dynmem,	"dynamic-memory" },
942 		{ &hv_guid_mouse,	"mouse" },
943 		{ &hv_guid_kbd,		"keyboard" },
944 		{ &hv_guid_video,	"video" },
945 		{ &hv_guid_fc,		"fiber-channel" },
946 		{ &hv_guid_fcopy,	"file-copy" },
947 		{ &hv_guid_pcie,	"pcie-passthrough" },
948 		{ &hv_guid_netdir,	"network-direct" },
949 		{ &hv_guid_rdesktop,	"remote-desktop" },
950 		{ &hv_guid_avma1,	"avma-1" },
951 		{ &hv_guid_avma2,	"avma-2" },
952 		{ &hv_guid_avma3,	"avma-3" },
953 		{ &hv_guid_avma4,	"avma-4" },
954 #endif
955 	};
956 	int i;
957 
958 	for (i = 0; i < nitems(map); i++) {
959 		if (memcmp(guid, map[i].guid, sizeof(*guid)) == 0) {
960 			strlcpy(str, map[i].ident, size);
961 			return;
962 		}
963 	}
964 #ifdef HYPERV_DEBUG
965 	strlcpy(str, guidprint(guid), size);
966 #endif
967 }
968 
969 static int
970 hv_channel_scan_done(struct hv_softc *sc, struct hv_msg *msg __unused)
971 {
972 	return (sc->sc_flags & HSF_OFFERS_DELIVERED);
973 }
974 
975 int
976 hv_channel_scan(struct hv_softc *sc)
977 {
978 	struct vmbus_chanmsg_hdr hdr;
979 	struct vmbus_chanmsg_choffer rsp;
980 	struct hv_offer *co;
981 
982 	SIMPLEQ_INIT(&sc->sc_offers);
983 	mtx_init(&sc->sc_offerlck, IPL_NET);
984 
985 	memset(&hdr, 0, sizeof(hdr));
986 	hdr.chm_type = VMBUS_CHANMSG_CHREQUEST;
987 
988 	if (hv_cmd(sc, &hdr, sizeof(hdr), &rsp, sizeof(rsp),
989 	    HCF_NOSLEEP | HCF_NOREPLY)) {
990 		DPRINTF("%s: CHREQUEST failed\n", sc->sc_dev.dv_xname);
991 		return (-1);
992 	}
993 
994 	hv_wait(sc, hv_channel_scan_done, (struct hv_msg *)&hdr,
995 	    &sc->sc_offers, "hvscan");
996 
997 	TAILQ_INIT(&sc->sc_channels);
998 	mtx_init(&sc->sc_channelck, IPL_NET);
999 
1000 	mtx_enter(&sc->sc_offerlck);
1001 	while (!SIMPLEQ_EMPTY(&sc->sc_offers)) {
1002 		co = SIMPLEQ_FIRST(&sc->sc_offers);
1003 		SIMPLEQ_REMOVE_HEAD(&sc->sc_offers, co_entry);
1004 		mtx_leave(&sc->sc_offerlck);
1005 
1006 		hv_process_offer(sc, co);
1007 		free(co, M_DEVBUF, sizeof(*co));
1008 
1009 		mtx_enter(&sc->sc_offerlck);
1010 	}
1011 	mtx_leave(&sc->sc_offerlck);
1012 
1013 	return (0);
1014 }
1015 
1016 void
1017 hv_process_offer(struct hv_softc *sc, struct hv_offer *co)
1018 {
1019 	struct hv_channel *ch, *nch;
1020 
1021 	nch = malloc(sizeof(*nch), M_DEVBUF, M_ZERO | M_NOWAIT);
1022 	if (nch == NULL) {
1023 		printf("%s: failed to allocate memory for the channel\n",
1024 		    sc->sc_dev.dv_xname);
1025 		return;
1026 	}
1027 	nch->ch_sc = sc;
1028 	hv_guid_sprint(&co->co_chan.chm_chtype, nch->ch_ident,
1029 	    sizeof(nch->ch_ident));
1030 
1031 	/*
1032 	 * By default we setup state to enable batched reading.
1033 	 * A specific service can choose to disable this prior
1034 	 * to opening the channel.
1035 	 */
1036 	nch->ch_flags |= CHF_BATCHED;
1037 
1038 	KASSERT((((vaddr_t)&nch->ch_monprm) & 0x7) == 0);
1039 	memset(&nch->ch_monprm, 0, sizeof(nch->ch_monprm));
1040 	nch->ch_monprm.mp_connid = VMBUS_CONNID_EVENT;
1041 
1042 	if (sc->sc_proto != VMBUS_VERSION_WS2008)
1043 		nch->ch_monprm.mp_connid = co->co_chan.chm_connid;
1044 
1045 	if (co->co_chan.chm_flags1 & VMBUS_CHOFFER_FLAG1_HASMNF) {
1046 		nch->ch_mgroup = co->co_chan.chm_montrig / VMBUS_MONTRIG_LEN;
1047 		nch->ch_mindex = co->co_chan.chm_montrig % VMBUS_MONTRIG_LEN;
1048 		nch->ch_flags |= CHF_MONITOR;
1049 	}
1050 
1051 	nch->ch_id = co->co_chan.chm_chanid;
1052 
1053 	memcpy(&nch->ch_type, &co->co_chan.chm_chtype, sizeof(ch->ch_type));
1054 	memcpy(&nch->ch_inst, &co->co_chan.chm_chinst, sizeof(ch->ch_inst));
1055 
1056 	mtx_enter(&sc->sc_channelck);
1057 	TAILQ_FOREACH(ch, &sc->sc_channels, ch_entry) {
1058 		if (!memcmp(&ch->ch_type, &nch->ch_type, sizeof(ch->ch_type)) &&
1059 		    !memcmp(&ch->ch_inst, &nch->ch_inst, sizeof(ch->ch_inst)))
1060 			break;
1061 	}
1062 	if (ch != NULL) {
1063 		if (co->co_chan.chm_subidx == 0) {
1064 			printf("%s: unknown offer \"%s\"\n",
1065 			    sc->sc_dev.dv_xname, nch->ch_ident);
1066 			mtx_leave(&sc->sc_channelck);
1067 			free(nch, M_DEVBUF, sizeof(*nch));
1068 			return;
1069 		}
1070 #ifdef HYPERV_DEBUG
1071 		printf("%s: subchannel %u for \"%s\"\n", sc->sc_dev.dv_xname,
1072 		    co->co_chan.chm_subidx, ch->ch_ident);
1073 #endif
1074 		mtx_leave(&sc->sc_channelck);
1075 		free(nch, M_DEVBUF, sizeof(*nch));
1076 		return;
1077 	}
1078 
1079 	nch->ch_state = HV_CHANSTATE_OFFERED;
1080 
1081 	TAILQ_INSERT_TAIL(&sc->sc_channels, nch, ch_entry);
1082 	mtx_leave(&sc->sc_channelck);
1083 
1084 #ifdef HYPERV_DEBUG
1085 	printf("%s: channel %u: \"%s\"", sc->sc_dev.dv_xname, nch->ch_id,
1086 	    nch->ch_ident);
1087 	if (nch->ch_flags & CHF_MONITOR)
1088 		printf(", monitor %u\n", co->co_chan.chm_montrig);
1089 	else
1090 		printf("\n");
1091 #endif
1092 }
1093 
1094 struct hv_channel *
1095 hv_channel_lookup(struct hv_softc *sc, uint32_t relid)
1096 {
1097 	struct hv_channel *ch;
1098 
1099 	TAILQ_FOREACH(ch, &sc->sc_channels, ch_entry) {
1100 		if (ch->ch_id == relid)
1101 			return (ch);
1102 	}
1103 	return (NULL);
1104 }
1105 
1106 int
1107 hv_channel_ring_create(struct hv_channel *ch, uint32_t buflen)
1108 {
1109 	struct hv_softc *sc = ch->ch_sc;
1110 
1111 	buflen = roundup(buflen, PAGE_SIZE) + sizeof(struct vmbus_bufring);
1112 	ch->ch_ring = km_alloc(2 * buflen, &kv_any, &kp_zero, cold ?
1113 	    &kd_nowait : &kd_waitok);
1114 	if (ch->ch_ring == NULL) {
1115 		printf("%s: failed to allocate channel ring\n",
1116 		    sc->sc_dev.dv_xname);
1117 		return (-1);
1118 	}
1119 	ch->ch_ring_size = 2 * buflen;
1120 
1121 	memset(&ch->ch_wrd, 0, sizeof(ch->ch_wrd));
1122 	ch->ch_wrd.rd_ring = (struct vmbus_bufring *)ch->ch_ring;
1123 	ch->ch_wrd.rd_size = buflen;
1124 	ch->ch_wrd.rd_dsize = buflen - sizeof(struct vmbus_bufring);
1125 	mtx_init(&ch->ch_wrd.rd_lock, IPL_NET);
1126 
1127 	memset(&ch->ch_rrd, 0, sizeof(ch->ch_rrd));
1128 	ch->ch_rrd.rd_ring = (struct vmbus_bufring *)((uint8_t *)ch->ch_ring +
1129 	    buflen);
1130 	ch->ch_rrd.rd_size = buflen;
1131 	ch->ch_rrd.rd_dsize = buflen - sizeof(struct vmbus_bufring);
1132 	mtx_init(&ch->ch_rrd.rd_lock, IPL_NET);
1133 
1134 	if (hv_handle_alloc(ch, ch->ch_ring, 2 * buflen, &ch->ch_ring_gpadl)) {
1135 		printf("%s: failed to obtain a PA handle for the ring\n",
1136 		    sc->sc_dev.dv_xname);
1137 		hv_channel_ring_destroy(ch);
1138 		return (-1);
1139 	}
1140 
1141 	return (0);
1142 }
1143 
1144 void
1145 hv_channel_ring_destroy(struct hv_channel *ch)
1146 {
1147 	km_free(ch->ch_ring, ch->ch_ring_size, &kv_any, &kp_zero);
1148 	ch->ch_ring = NULL;
1149 	hv_handle_free(ch, ch->ch_ring_gpadl);
1150 
1151 	memset(&ch->ch_wrd, 0, sizeof(ch->ch_wrd));
1152 	memset(&ch->ch_rrd, 0, sizeof(ch->ch_rrd));
1153 }
1154 
1155 int
1156 hv_channel_open(struct hv_channel *ch, size_t buflen, void *udata,
1157     size_t udatalen, void (*handler)(void *), void *arg)
1158 {
1159 	struct hv_softc *sc = ch->ch_sc;
1160 	struct vmbus_chanmsg_chopen cmd;
1161 	struct vmbus_chanmsg_chopen_resp rsp;
1162 	int rv;
1163 
1164 	if (ch->ch_ring == NULL &&
1165 	    hv_channel_ring_create(ch, buflen)) {
1166 		DPRINTF("%s: failed to create channel ring\n",
1167 		    sc->sc_dev.dv_xname);
1168 		return (-1);
1169 	}
1170 
1171 	memset(&cmd, 0, sizeof(cmd));
1172 	cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CHOPEN;
1173 	cmd.chm_openid = ch->ch_id;
1174 	cmd.chm_chanid = ch->ch_id;
1175 	cmd.chm_gpadl = ch->ch_ring_gpadl;
1176 	cmd.chm_txbr_pgcnt = ch->ch_wrd.rd_size >> PAGE_SHIFT;
1177 	cmd.chm_vcpuid = ch->ch_vcpu;
1178 
1179 	if (udata && udatalen > 0)
1180 		memcpy(cmd.chm_udata, udata, udatalen);
1181 
1182 	memset(&rsp, 0, sizeof(rsp));
1183 
1184 	ch->ch_handler = handler;
1185 	ch->ch_ctx = arg;
1186 
1187 	ch->ch_state = HV_CHANSTATE_OPENED;
1188 
1189 	rv = hv_cmd(sc, &cmd, sizeof(cmd), &rsp, sizeof(rsp),
1190 	    cold ? HCF_NOSLEEP : HCF_SLEEPOK);
1191 	if (rv) {
1192 		hv_channel_ring_destroy(ch);
1193 		DPRINTF("%s: CHOPEN failed with %d\n",
1194 		    sc->sc_dev.dv_xname, rv);
1195 		ch->ch_handler = NULL;
1196 		ch->ch_ctx = NULL;
1197 		ch->ch_state = HV_CHANSTATE_OFFERED;
1198 		return (-1);
1199 	}
1200 
1201 	return (0);
1202 }
1203 
1204 int
1205 hv_channel_close(struct hv_channel *ch)
1206 {
1207 	struct hv_softc *sc = ch->ch_sc;
1208 	struct vmbus_chanmsg_chclose cmd;
1209 	int rv;
1210 
1211 	memset(&cmd, 0, sizeof(cmd));
1212 	cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CHCLOSE;
1213 	cmd.chm_chanid = ch->ch_id;
1214 
1215 	ch->ch_state = HV_CHANSTATE_CLOSING;
1216 	rv = hv_cmd(sc, &cmd, sizeof(cmd), NULL, 0, HCF_NOREPLY);
1217 	if (rv) {
1218 		DPRINTF("%s: CHCLOSE failed with %d\n",
1219 		    sc->sc_dev.dv_xname, rv);
1220 		return (-1);
1221 	}
1222 	ch->ch_state = HV_CHANSTATE_CLOSED;
1223 	hv_channel_ring_destroy(ch);
1224 	return (0);
1225 }
1226 
1227 static inline void
1228 hv_channel_setevent(struct hv_softc *sc, struct hv_channel *ch)
1229 {
1230 	struct vmbus_mon_trig *mtg;
1231 
1232 	/* Each uint32_t represents 32 channels */
1233 	set_bit(ch->ch_id, sc->sc_wevents);
1234 	if (ch->ch_flags & CHF_MONITOR) {
1235 		mtg = &sc->sc_monitor[1]->mnf_trigs[ch->ch_mgroup];
1236 		set_bit(ch->ch_mindex, &mtg->mt_pending);
1237 	} else
1238 		hv_intr_signal(sc, &ch->ch_monprm);
1239 }
1240 
1241 void
1242 hv_channel_intr(void *arg)
1243 {
1244 	struct hv_channel *ch = arg;
1245 
1246 	if (hv_channel_ready(ch))
1247 		ch->ch_handler(ch->ch_ctx);
1248 
1249 	if (hv_channel_unpause(ch) == 0)
1250 		return;
1251 
1252 	hv_channel_pause(ch);
1253 	hv_channel_schedule(ch);
1254 }
1255 
1256 int
1257 hv_channel_setdeferred(struct hv_channel *ch, const char *name)
1258 {
1259 	ch->ch_taskq = taskq_create(name, 1, IPL_NET, TASKQ_MPSAFE);
1260 	if (ch->ch_taskq == NULL)
1261 		return (-1);
1262 	task_set(&ch->ch_task, hv_channel_intr, ch);
1263 	return (0);
1264 }
1265 
1266 void
1267 hv_channel_schedule(struct hv_channel *ch)
1268 {
1269 	if (ch->ch_handler) {
1270 		if (!cold && (ch->ch_flags & CHF_BATCHED)) {
1271 			hv_channel_pause(ch);
1272 			task_add(ch->ch_taskq, &ch->ch_task);
1273 		} else
1274 			ch->ch_handler(ch->ch_ctx);
1275 	}
1276 }
1277 
1278 static inline void
1279 hv_ring_put(struct hv_ring_data *wrd, uint8_t *data, uint32_t datalen)
1280 {
1281 	int left = MIN(datalen, wrd->rd_dsize - wrd->rd_prod);
1282 
1283 	memcpy(&wrd->rd_ring->br_data[wrd->rd_prod], data, left);
1284 	memcpy(&wrd->rd_ring->br_data[0], data + left, datalen - left);
1285 	wrd->rd_prod += datalen;
1286 	if (wrd->rd_prod >= wrd->rd_dsize)
1287 		wrd->rd_prod -= wrd->rd_dsize;
1288 }
1289 
1290 static inline void
1291 hv_ring_get(struct hv_ring_data *rrd, uint8_t *data, uint32_t datalen,
1292     int peek)
1293 {
1294 	int left = MIN(datalen, rrd->rd_dsize - rrd->rd_cons);
1295 
1296 	memcpy(data, &rrd->rd_ring->br_data[rrd->rd_cons], left);
1297 	memcpy(data + left, &rrd->rd_ring->br_data[0], datalen - left);
1298 	if (!peek) {
1299 		rrd->rd_cons += datalen;
1300 		if (rrd->rd_cons >= rrd->rd_dsize)
1301 			rrd->rd_cons -= rrd->rd_dsize;
1302 	}
1303 }
1304 
1305 static inline void
1306 hv_ring_avail(struct hv_ring_data *rd, uint32_t *towrite, uint32_t *toread)
1307 {
1308 	uint32_t ridx = rd->rd_ring->br_rindex;
1309 	uint32_t widx = rd->rd_ring->br_windex;
1310 	uint32_t r, w;
1311 
1312 	if (widx >= ridx)
1313 		w = rd->rd_dsize - (widx - ridx);
1314 	else
1315 		w = ridx - widx;
1316 	r = rd->rd_dsize - w;
1317 	if (towrite)
1318 		*towrite = w;
1319 	if (toread)
1320 		*toread = r;
1321 }
1322 
1323 int
1324 hv_ring_write(struct hv_ring_data *wrd, struct iovec *iov, int iov_cnt,
1325     int *needsig)
1326 {
1327 	uint64_t indices = 0;
1328 	uint32_t avail, oprod, datalen = sizeof(indices);
1329 	int i;
1330 
1331 	for (i = 0; i < iov_cnt; i++)
1332 		datalen += iov[i].iov_len;
1333 
1334 	KASSERT(datalen <= wrd->rd_dsize);
1335 
1336 	hv_ring_avail(wrd, &avail, NULL);
1337 	if (avail <= datalen) {
1338 		DPRINTF("%s: avail %u datalen %u\n", __func__, avail, datalen);
1339 		return (EAGAIN);
1340 	}
1341 
1342 	oprod = wrd->rd_prod;
1343 
1344 	for (i = 0; i < iov_cnt; i++)
1345 		hv_ring_put(wrd, iov[i].iov_base, iov[i].iov_len);
1346 
1347 	indices = (uint64_t)oprod << 32;
1348 	hv_ring_put(wrd, (uint8_t *)&indices, sizeof(indices));
1349 
1350 	virtio_membar_sync();
1351 	wrd->rd_ring->br_windex = wrd->rd_prod;
1352 	virtio_membar_sync();
1353 
1354 	/* Signal when the ring transitions from being empty to non-empty */
1355 	if (wrd->rd_ring->br_imask == 0 &&
1356 	    wrd->rd_ring->br_rindex == oprod)
1357 		*needsig = 1;
1358 	else
1359 		*needsig = 0;
1360 
1361 	return (0);
1362 }
1363 
1364 int
1365 hv_channel_send(struct hv_channel *ch, void *data, uint32_t datalen,
1366     uint64_t rid, int type, uint32_t flags)
1367 {
1368 	struct hv_softc *sc = ch->ch_sc;
1369 	struct vmbus_chanpkt cp;
1370 	struct iovec iov[3];
1371 	uint32_t pktlen, pktlen_aligned;
1372 	uint64_t zeropad = 0;
1373 	int rv, needsig = 0;
1374 
1375 	pktlen = sizeof(cp) + datalen;
1376 	pktlen_aligned = roundup(pktlen, sizeof(uint64_t));
1377 
1378 	cp.cp_hdr.cph_type = type;
1379 	cp.cp_hdr.cph_flags = flags;
1380 	VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_hlen, sizeof(cp));
1381 	VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_tlen, pktlen_aligned);
1382 	cp.cp_hdr.cph_tid = rid;
1383 
1384 	iov[0].iov_base = &cp;
1385 	iov[0].iov_len = sizeof(cp);
1386 
1387 	iov[1].iov_base = data;
1388 	iov[1].iov_len = datalen;
1389 
1390 	iov[2].iov_base = &zeropad;
1391 	iov[2].iov_len = pktlen_aligned - pktlen;
1392 
1393 	mtx_enter(&ch->ch_wrd.rd_lock);
1394 	rv = hv_ring_write(&ch->ch_wrd, iov, 3, &needsig);
1395 	mtx_leave(&ch->ch_wrd.rd_lock);
1396 	if (rv == 0 && needsig)
1397 		hv_channel_setevent(sc, ch);
1398 
1399 	return (rv);
1400 }
1401 
1402 int
1403 hv_channel_send_sgl(struct hv_channel *ch, struct vmbus_gpa *sgl,
1404     uint32_t nsge, void *data, uint32_t datalen, uint64_t rid)
1405 {
1406 	struct hv_softc *sc = ch->ch_sc;
1407 	struct vmbus_chanpkt_sglist cp;
1408 	struct iovec iov[4];
1409 	uint32_t buflen, pktlen, pktlen_aligned;
1410 	uint64_t zeropad = 0;
1411 	int rv, needsig = 0;
1412 
1413 	buflen = sizeof(struct vmbus_gpa) * nsge;
1414 	pktlen = sizeof(cp) + datalen + buflen;
1415 	pktlen_aligned = roundup(pktlen, sizeof(uint64_t));
1416 
1417 	cp.cp_hdr.cph_type = VMBUS_CHANPKT_TYPE_GPA;
1418 	cp.cp_hdr.cph_flags = VMBUS_CHANPKT_FLAG_RC;
1419 	VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_hlen, sizeof(cp) + buflen);
1420 	VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_tlen, pktlen_aligned);
1421 	cp.cp_hdr.cph_tid = rid;
1422 	cp.cp_gpa_cnt = nsge;
1423 	cp.cp_rsvd = 0;
1424 
1425 	iov[0].iov_base = &cp;
1426 	iov[0].iov_len = sizeof(cp);
1427 
1428 	iov[1].iov_base = sgl;
1429 	iov[1].iov_len = buflen;
1430 
1431 	iov[2].iov_base = data;
1432 	iov[2].iov_len = datalen;
1433 
1434 	iov[3].iov_base = &zeropad;
1435 	iov[3].iov_len = pktlen_aligned - pktlen;
1436 
1437 	mtx_enter(&ch->ch_wrd.rd_lock);
1438 	rv = hv_ring_write(&ch->ch_wrd, iov, 4, &needsig);
1439 	mtx_leave(&ch->ch_wrd.rd_lock);
1440 	if (rv == 0 && needsig)
1441 		hv_channel_setevent(sc, ch);
1442 
1443 	return (rv);
1444 }
1445 
1446 int
1447 hv_channel_send_prpl(struct hv_channel *ch, struct vmbus_gpa_range *prpl,
1448     uint32_t nprp, void *data, uint32_t datalen, uint64_t rid)
1449 {
1450 	struct hv_softc *sc = ch->ch_sc;
1451 	struct vmbus_chanpkt_prplist cp;
1452 	struct iovec iov[4];
1453 	uint32_t buflen, pktlen, pktlen_aligned;
1454 	uint64_t zeropad = 0;
1455 	int rv, needsig = 0;
1456 
1457 	buflen = sizeof(struct vmbus_gpa_range) * (nprp + 1);
1458 	pktlen = sizeof(cp) + datalen + buflen;
1459 	pktlen_aligned = roundup(pktlen, sizeof(uint64_t));
1460 
1461 	cp.cp_hdr.cph_type = VMBUS_CHANPKT_TYPE_GPA;
1462 	cp.cp_hdr.cph_flags = VMBUS_CHANPKT_FLAG_RC;
1463 	VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_hlen, sizeof(cp) + buflen);
1464 	VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_tlen, pktlen_aligned);
1465 	cp.cp_hdr.cph_tid = rid;
1466 	cp.cp_range_cnt = 1;
1467 	cp.cp_rsvd = 0;
1468 
1469 	iov[0].iov_base = &cp;
1470 	iov[0].iov_len = sizeof(cp);
1471 
1472 	iov[1].iov_base = prpl;
1473 	iov[1].iov_len = buflen;
1474 
1475 	iov[2].iov_base = data;
1476 	iov[2].iov_len = datalen;
1477 
1478 	iov[3].iov_base = &zeropad;
1479 	iov[3].iov_len = pktlen_aligned - pktlen;
1480 
1481 	mtx_enter(&ch->ch_wrd.rd_lock);
1482 	rv = hv_ring_write(&ch->ch_wrd, iov, 4, &needsig);
1483 	mtx_leave(&ch->ch_wrd.rd_lock);
1484 	if (rv == 0 && needsig)
1485 		hv_channel_setevent(sc, ch);
1486 
1487 	return (rv);
1488 }
1489 
1490 int
1491 hv_ring_peek(struct hv_ring_data *rrd, void *data, uint32_t datalen)
1492 {
1493 	uint32_t avail;
1494 
1495 	KASSERT(datalen <= rrd->rd_dsize);
1496 
1497 	hv_ring_avail(rrd, NULL, &avail);
1498 	if (avail < datalen)
1499 		return (EAGAIN);
1500 
1501 	hv_ring_get(rrd, (uint8_t *)data, datalen, 1);
1502 	return (0);
1503 }
1504 
1505 int
1506 hv_ring_read(struct hv_ring_data *rrd, void *data, uint32_t datalen,
1507     uint32_t offset)
1508 {
1509 	uint64_t indices;
1510 	uint32_t avail;
1511 
1512 	KASSERT(datalen <= rrd->rd_dsize);
1513 
1514 	hv_ring_avail(rrd, NULL, &avail);
1515 	if (avail < datalen) {
1516 		DPRINTF("%s: avail %u datalen %u\n", __func__, avail, datalen);
1517 		return (EAGAIN);
1518 	}
1519 
1520 	if (offset) {
1521 		rrd->rd_cons += offset;
1522 		if (rrd->rd_cons >= rrd->rd_dsize)
1523 			rrd->rd_cons -= rrd->rd_dsize;
1524 	}
1525 
1526 	hv_ring_get(rrd, (uint8_t *)data, datalen, 0);
1527 	hv_ring_get(rrd, (uint8_t *)&indices, sizeof(indices), 0);
1528 
1529 	virtio_membar_sync();
1530 	rrd->rd_ring->br_rindex = rrd->rd_cons;
1531 
1532 	return (0);
1533 }
1534 
1535 int
1536 hv_channel_recv(struct hv_channel *ch, void *data, uint32_t datalen,
1537     uint32_t *rlen, uint64_t *rid, int raw)
1538 {
1539 	struct vmbus_chanpkt_hdr cph;
1540 	uint32_t offset, pktlen;
1541 	int rv;
1542 
1543 	*rlen = 0;
1544 
1545 	mtx_enter(&ch->ch_rrd.rd_lock);
1546 
1547 	if ((rv = hv_ring_peek(&ch->ch_rrd, &cph, sizeof(cph))) != 0) {
1548 		mtx_leave(&ch->ch_rrd.rd_lock);
1549 		return (rv);
1550 	}
1551 
1552 	offset = raw ? 0 : VMBUS_CHANPKT_GETLEN(cph.cph_hlen);
1553 	pktlen = VMBUS_CHANPKT_GETLEN(cph.cph_tlen) - offset;
1554 	if (pktlen > datalen) {
1555 		mtx_leave(&ch->ch_rrd.rd_lock);
1556 		printf("%s: pktlen %u datalen %u\n", __func__, pktlen, datalen);
1557 		return (EINVAL);
1558 	}
1559 
1560 	rv = hv_ring_read(&ch->ch_rrd, data, pktlen, offset);
1561 	if (rv == 0) {
1562 		*rlen = pktlen;
1563 		*rid = cph.cph_tid;
1564 	}
1565 
1566 	mtx_leave(&ch->ch_rrd.rd_lock);
1567 
1568 	return (rv);
1569 }
1570 
1571 static inline void
1572 hv_ring_mask(struct hv_ring_data *rd)
1573 {
1574 	virtio_membar_sync();
1575 	rd->rd_ring->br_imask = 1;
1576 	virtio_membar_sync();
1577 }
1578 
1579 static inline void
1580 hv_ring_unmask(struct hv_ring_data *rd)
1581 {
1582 	virtio_membar_sync();
1583 	rd->rd_ring->br_imask = 0;
1584 	virtio_membar_sync();
1585 }
1586 
1587 void
1588 hv_channel_pause(struct hv_channel *ch)
1589 {
1590 	hv_ring_mask(&ch->ch_rrd);
1591 }
1592 
1593 uint
1594 hv_channel_unpause(struct hv_channel *ch)
1595 {
1596 	uint32_t avail;
1597 
1598 	hv_ring_unmask(&ch->ch_rrd);
1599 	hv_ring_avail(&ch->ch_rrd, NULL, &avail);
1600 
1601 	return (avail);
1602 }
1603 
1604 uint
1605 hv_channel_ready(struct hv_channel *ch)
1606 {
1607 	uint32_t avail;
1608 
1609 	hv_ring_avail(&ch->ch_rrd, NULL, &avail);
1610 
1611 	return (avail);
1612 }
1613 
1614 /* How many PFNs can be referenced by the header */
1615 #define HV_NPFNHDR	((VMBUS_MSG_DSIZE_MAX -	\
1616 	  sizeof(struct vmbus_chanmsg_gpadl_conn)) / sizeof(uint64_t))
1617 
1618 /* How many PFNs can be referenced by the body */
1619 #define HV_NPFNBODY	((VMBUS_MSG_DSIZE_MAX -	\
1620 	  sizeof(struct vmbus_chanmsg_gpadl_subconn)) / sizeof(uint64_t))
1621 
1622 int
1623 hv_handle_alloc(struct hv_channel *ch, void *buffer, uint32_t buflen,
1624     uint32_t *handle)
1625 {
1626 	struct hv_softc *sc = ch->ch_sc;
1627 	struct vmbus_chanmsg_gpadl_conn *hdr;
1628 	struct vmbus_chanmsg_gpadl_subconn *cmd;
1629 	struct vmbus_chanmsg_gpadl_connresp rsp;
1630 	struct hv_msg *msg;
1631 	int i, j, last, left, rv;
1632 	int bodylen = 0, ncmds = 0, pfn = 0;
1633 	int waitflag = cold ? M_NOWAIT : M_WAITOK;
1634 	uint64_t *frames;
1635 	paddr_t pa;
1636 	caddr_t body;
1637 	/* Total number of pages to reference */
1638 	int total = atop(buflen);
1639 	/* Number of pages that will fit the header */
1640 	int inhdr = MIN(total, HV_NPFNHDR);
1641 
1642 	KASSERT((buflen & (PAGE_SIZE - 1)) == 0);
1643 
1644 	if ((msg = malloc(sizeof(*msg), M_DEVBUF, M_ZERO | waitflag)) == NULL)
1645 		return (ENOMEM);
1646 
1647 	/* Prepare array of frame addresses */
1648 	if ((frames = mallocarray(total, sizeof(*frames), M_DEVBUF, M_ZERO |
1649 	    waitflag)) == NULL) {
1650 		free(msg, M_DEVBUF, sizeof(*msg));
1651 		return (ENOMEM);
1652 	}
1653 	for (i = 0; i < total; i++) {
1654 		if (!pmap_extract(pmap_kernel(), (vaddr_t)buffer +
1655 		    PAGE_SIZE * i, &pa)) {
1656 			free(msg, M_DEVBUF, sizeof(*msg));
1657 			free(frames, M_DEVBUF, total * sizeof(*frames));
1658 			return (EFAULT);
1659 		}
1660 		frames[i] = atop(pa);
1661 	}
1662 
1663 	msg->msg_req.hc_dsize = sizeof(struct vmbus_chanmsg_gpadl_conn) +
1664 	    inhdr * sizeof(uint64_t);
1665 	hdr = (struct vmbus_chanmsg_gpadl_conn *)msg->msg_req.hc_data;
1666 	msg->msg_rsp = &rsp;
1667 	msg->msg_rsplen = sizeof(rsp);
1668 	if (waitflag == M_NOWAIT)
1669 		msg->msg_flags = MSGF_NOSLEEP;
1670 
1671 	left = total - inhdr;
1672 
1673 	/* Allocate additional gpadl_body structures if required */
1674 	if (left > 0) {
1675 		ncmds = MAX(1, left / HV_NPFNBODY + left % HV_NPFNBODY);
1676 		bodylen = ncmds * VMBUS_MSG_DSIZE_MAX;
1677 		body = malloc(bodylen, M_DEVBUF, M_ZERO | waitflag);
1678 		if (body == NULL) {
1679 			free(msg, M_DEVBUF, sizeof(*msg));
1680 			free(frames, M_DEVBUF, atop(buflen) * sizeof(*frames));
1681 			return (ENOMEM);
1682 		}
1683 	}
1684 
1685 	*handle = atomic_inc_int_nv(&sc->sc_handle);
1686 
1687 	hdr->chm_hdr.chm_type = VMBUS_CHANMSG_GPADL_CONN;
1688 	hdr->chm_chanid = ch->ch_id;
1689 	hdr->chm_gpadl = *handle;
1690 
1691 	/* Single range for a contiguous buffer */
1692 	hdr->chm_range_cnt = 1;
1693 	hdr->chm_range_len = sizeof(struct vmbus_gpa_range) + total *
1694 	    sizeof(uint64_t);
1695 	hdr->chm_range.gpa_ofs = 0;
1696 	hdr->chm_range.gpa_len = buflen;
1697 
1698 	/* Fit as many pages as possible into the header */
1699 	for (i = 0; i < inhdr; i++)
1700 		hdr->chm_range.gpa_page[i] = frames[pfn++];
1701 
1702 	for (i = 0; i < ncmds; i++) {
1703 		cmd = (struct vmbus_chanmsg_gpadl_subconn *)(body +
1704 		    VMBUS_MSG_DSIZE_MAX * i);
1705 		cmd->chm_hdr.chm_type = VMBUS_CHANMSG_GPADL_SUBCONN;
1706 		cmd->chm_gpadl = *handle;
1707 		last = MIN(left, HV_NPFNBODY);
1708 		for (j = 0; j < last; j++)
1709 			cmd->chm_gpa_page[j] = frames[pfn++];
1710 		left -= last;
1711 	}
1712 
1713 	rv = hv_start(sc, msg);
1714 	if (rv != 0) {
1715 		DPRINTF("%s: GPADL_CONN failed\n", sc->sc_dev.dv_xname);
1716 		goto out;
1717 	}
1718 	for (i = 0; i < ncmds; i++) {
1719 		int cmdlen = sizeof(*cmd);
1720 		cmd = (struct vmbus_chanmsg_gpadl_subconn *)(body +
1721 		    VMBUS_MSG_DSIZE_MAX * i);
1722 		/* Last element can be short */
1723 		if (i == ncmds - 1)
1724 			cmdlen += last * sizeof(uint64_t);
1725 		else
1726 			cmdlen += HV_NPFNBODY * sizeof(uint64_t);
1727 		rv = hv_cmd(sc, cmd, cmdlen, NULL, 0, waitflag | HCF_NOREPLY);
1728 		if (rv != 0) {
1729 			DPRINTF("%s: GPADL_SUBCONN (iteration %d/%d) failed "
1730 			    "with %d\n", sc->sc_dev.dv_xname, i, ncmds, rv);
1731 			goto out;
1732 		}
1733 	}
1734 	rv = hv_reply(sc, msg);
1735 	if (rv != 0)
1736 		DPRINTF("%s: GPADL allocation failed with %d\n",
1737 		    sc->sc_dev.dv_xname, rv);
1738 
1739  out:
1740 	free(msg, M_DEVBUF, sizeof(*msg));
1741 	free(frames, M_DEVBUF, total * sizeof(*frames));
1742 	if (bodylen > 0)
1743 		free(body, M_DEVBUF, bodylen);
1744 	if (rv != 0)
1745 		return (rv);
1746 
1747 	KASSERT(*handle == rsp.chm_gpadl);
1748 
1749 	return (0);
1750 }
1751 
1752 void
1753 hv_handle_free(struct hv_channel *ch, uint32_t handle)
1754 {
1755 	struct hv_softc *sc = ch->ch_sc;
1756 	struct vmbus_chanmsg_gpadl_disconn cmd;
1757 	struct vmbus_chanmsg_gpadl_disconn rsp;
1758 	int rv;
1759 
1760 	memset(&cmd, 0, sizeof(cmd));
1761 	cmd.chm_hdr.chm_type = VMBUS_CHANMSG_GPADL_DISCONN;
1762 	cmd.chm_chanid = ch->ch_id;
1763 	cmd.chm_gpadl = handle;
1764 
1765 	rv = hv_cmd(sc, &cmd, sizeof(cmd), &rsp, sizeof(rsp), cold ?
1766 	    HCF_NOSLEEP : 0);
1767 	if (rv)
1768 		DPRINTF("%s: GPADL_DISCONN failed with %d\n",
1769 		    sc->sc_dev.dv_xname, rv);
1770 }
1771 
1772 static int
1773 hv_attach_print(void *aux, const char *name)
1774 {
1775 	struct hv_attach_args *aa = aux;
1776 
1777 	if (name)
1778 		printf("\"%s\" at %s", aa->aa_ident, name);
1779 
1780 	return (UNCONF);
1781 }
1782 
1783 int
1784 hv_attach_devices(struct hv_softc *sc)
1785 {
1786 	struct hv_dev *dv;
1787 	struct hv_channel *ch;
1788 
1789 	SLIST_INIT(&sc->sc_devs);
1790 	mtx_init(&sc->sc_devlck, IPL_NET);
1791 
1792 	TAILQ_FOREACH(ch, &sc->sc_channels, ch_entry) {
1793 		if (ch->ch_state != HV_CHANSTATE_OFFERED)
1794 			continue;
1795 		if (!(ch->ch_flags & CHF_MONITOR))
1796 			continue;
1797 		dv = malloc(sizeof(*dv), M_DEVBUF, M_ZERO | M_NOWAIT);
1798 		if (dv == NULL) {
1799 			printf("%s: failed to allocate device object\n",
1800 			    sc->sc_dev.dv_xname);
1801 			return (-1);
1802 		}
1803 		dv->dv_aa.aa_parent = sc;
1804 		dv->dv_aa.aa_type = &ch->ch_type;
1805 		dv->dv_aa.aa_inst = &ch->ch_inst;
1806 		dv->dv_aa.aa_ident = ch->ch_ident;
1807 		dv->dv_aa.aa_chan = ch;
1808 		dv->dv_aa.aa_dmat = sc->sc_dmat;
1809 		mtx_enter(&sc->sc_devlck);
1810 		SLIST_INSERT_HEAD(&sc->sc_devs, dv, dv_entry);
1811 		mtx_leave(&sc->sc_devlck);
1812 		config_found((struct device *)sc, &dv->dv_aa, hv_attach_print);
1813 	}
1814 	return (0);
1815 }
1816 
1817 void
1818 hv_evcount_attach(struct hv_channel *ch, const char *name)
1819 {
1820 	struct hv_softc *sc = ch->ch_sc;
1821 
1822 	evcount_attach(&ch->ch_evcnt, name, &sc->sc_idtvec);
1823 }
1824