xref: /openbsd-src/sys/dev/pv/hyperv.c (revision fc405d53b73a2d73393cb97f684863d17b583e38)
1 /*-
2  * Copyright (c) 2009-2012 Microsoft Corp.
3  * Copyright (c) 2012 NetApp Inc.
4  * Copyright (c) 2012 Citrix Inc.
5  * Copyright (c) 2016 Mike Belopuhov <mike@esdenera.com>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * The OpenBSD port was done under funding by Esdenera Networks GmbH.
32  */
33 
34 #include <sys/param.h>
35 
36 /* Hyperv requires locked atomic operations */
37 #ifndef MULTIPROCESSOR
38 #define _HYPERVMPATOMICS
39 #define MULTIPROCESSOR
40 #endif
41 #include <sys/atomic.h>
42 #ifdef _HYPERVMPATOMICS
43 #undef MULTIPROCESSOR
44 #undef _HYPERVMPATOMICS
45 #endif
46 
47 #include <sys/systm.h>
48 #include <sys/proc.h>
49 #include <sys/signal.h>
50 #include <sys/signalvar.h>
51 #include <sys/malloc.h>
52 #include <sys/kernel.h>
53 #include <sys/device.h>
54 #include <sys/timetc.h>
55 #include <sys/task.h>
56 #include <sys/syslog.h>
57 
58 #include <machine/bus.h>
59 #include <machine/cpu.h>
60 #include <machine/cpufunc.h>
61 
62 #include <uvm/uvm_extern.h>
63 
64 #include <machine/i82489var.h>
65 
66 #include <dev/pv/pvvar.h>
67 #include <dev/pv/pvreg.h>
68 #include <dev/pv/hypervreg.h>
69 #include <dev/pv/hypervvar.h>
70 
71 /* Command submission flags */
72 #define HCF_SLEEPOK	0x0001	/* M_WAITOK */
73 #define HCF_NOSLEEP	0x0002	/* M_NOWAIT */
74 #define HCF_NOREPLY	0x0004
75 
76 struct hv_softc *hv_sc;
77 
78 int 	hv_match(struct device *, void *, void *);
79 void	hv_attach(struct device *, struct device *, void *);
80 void	hv_set_version(struct hv_softc *);
81 u_int	hv_gettime(struct timecounter *);
82 int	hv_init_hypercall(struct hv_softc *);
83 uint64_t hv_hypercall(struct hv_softc *, uint64_t, void *, void *);
84 int	hv_init_interrupts(struct hv_softc *);
85 int	hv_init_synic(struct hv_softc *);
86 int	hv_cmd(struct hv_softc *, void *, size_t, void *, size_t, int);
87 int	hv_start(struct hv_softc *, struct hv_msg *);
88 int	hv_reply(struct hv_softc *, struct hv_msg *);
89 void	hv_wait(struct hv_softc *, int (*done)(struct hv_softc *,
90 	    struct hv_msg *), struct hv_msg *, void *, const char *);
91 uint16_t hv_intr_signal(struct hv_softc *, void *);
92 void	hv_intr(void);
93 void	hv_event_intr(struct hv_softc *);
94 void	hv_message_intr(struct hv_softc *);
95 int	hv_vmbus_connect(struct hv_softc *);
96 void	hv_channel_response(struct hv_softc *, struct vmbus_chanmsg_hdr *);
97 void	hv_channel_offer(struct hv_softc *, struct vmbus_chanmsg_hdr *);
98 void	hv_channel_rescind(struct hv_softc *, struct vmbus_chanmsg_hdr *);
99 void	hv_channel_delivered(struct hv_softc *, struct vmbus_chanmsg_hdr *);
100 int	hv_channel_scan(struct hv_softc *);
101 void	hv_process_offer(struct hv_softc *, struct hv_offer *);
102 struct hv_channel *
103 	hv_channel_lookup(struct hv_softc *, uint32_t);
104 int	hv_channel_ring_create(struct hv_channel *, uint32_t);
105 void	hv_channel_ring_destroy(struct hv_channel *);
106 void	hv_channel_pause(struct hv_channel *);
107 uint	hv_channel_unpause(struct hv_channel *);
108 uint	hv_channel_ready(struct hv_channel *);
109 extern void hv_attach_icdevs(struct hv_softc *);
110 int	hv_attach_devices(struct hv_softc *);
111 
112 struct {
113 	int		  hmd_response;
114 	int		  hmd_request;
115 	void		(*hmd_handler)(struct hv_softc *,
116 			    struct vmbus_chanmsg_hdr *);
117 } hv_msg_dispatch[] = {
118 	{ 0,					0, NULL },
119 	{ VMBUS_CHANMSG_CHOFFER,		0, hv_channel_offer },
120 	{ VMBUS_CHANMSG_CHRESCIND,		0, hv_channel_rescind },
121 	{ VMBUS_CHANMSG_CHREQUEST,		VMBUS_CHANMSG_CHOFFER,
122 	  NULL },
123 	{ VMBUS_CHANMSG_CHOFFER_DONE,		0,
124 	  hv_channel_delivered },
125 	{ VMBUS_CHANMSG_CHOPEN,			0, NULL },
126 	{ VMBUS_CHANMSG_CHOPEN_RESP,		VMBUS_CHANMSG_CHOPEN,
127 	  hv_channel_response },
128 	{ VMBUS_CHANMSG_CHCLOSE,		0, NULL },
129 	{ VMBUS_CHANMSG_GPADL_CONN,		0, NULL },
130 	{ VMBUS_CHANMSG_GPADL_SUBCONN,		0, NULL },
131 	{ VMBUS_CHANMSG_GPADL_CONNRESP,		VMBUS_CHANMSG_GPADL_CONN,
132 	  hv_channel_response },
133 	{ VMBUS_CHANMSG_GPADL_DISCONN,		0, NULL },
134 	{ VMBUS_CHANMSG_GPADL_DISCONNRESP,	VMBUS_CHANMSG_GPADL_DISCONN,
135 	  hv_channel_response },
136 	{ VMBUS_CHANMSG_CHFREE,			0, NULL },
137 	{ VMBUS_CHANMSG_CONNECT,		0, NULL },
138 	{ VMBUS_CHANMSG_CONNECT_RESP,		VMBUS_CHANMSG_CONNECT,
139 	  hv_channel_response },
140 	{ VMBUS_CHANMSG_DISCONNECT,		0, NULL },
141 };
142 
143 struct timecounter hv_timecounter = {
144 	.tc_get_timecount = hv_gettime,
145 	.tc_counter_mask = 0xffffffff,
146 	.tc_frequency = 10000000,
147 	.tc_name = "hyperv",
148 	.tc_quality = 9001,
149 	.tc_priv = NULL,
150 	.tc_user = 0,
151 };
152 
153 struct cfdriver hyperv_cd = {
154 	NULL, "hyperv", DV_DULL
155 };
156 
157 const struct cfattach hyperv_ca = {
158 	sizeof(struct hv_softc), hv_match, hv_attach
159 };
160 
161 const struct hv_guid hv_guid_network = {
162 	{ 0x63, 0x51, 0x61, 0xf8, 0x3e, 0xdf, 0xc5, 0x46,
163 	  0x91, 0x3f, 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e }
164 };
165 
166 const struct hv_guid hv_guid_ide = {
167 	{ 0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
168 	  0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5 }
169 };
170 
171 const struct hv_guid hv_guid_scsi = {
172 	{ 0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
173 	  0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f }
174 };
175 
176 const struct hv_guid hv_guid_shutdown = {
177 	{ 0x31, 0x60, 0x0b, 0x0e, 0x13, 0x52, 0x34, 0x49,
178 	  0x81, 0x8b, 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb }
179 };
180 
181 const struct hv_guid hv_guid_timesync = {
182 	{ 0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49,
183 	  0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf }
184 };
185 
186 const struct hv_guid hv_guid_heartbeat = {
187 	{ 0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e,
188 	  0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d }
189 };
190 
191 const struct hv_guid hv_guid_kvp = {
192 	{ 0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d,
193 	  0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6 }
194 };
195 
196 #ifdef HYPERV_DEBUG
197 const struct hv_guid hv_guid_vss = {
198 	{ 0x29, 0x2e, 0xfa, 0x35, 0x23, 0xea, 0x36, 0x42,
199 	  0x96, 0xae, 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40 }
200 };
201 
202 const struct hv_guid hv_guid_dynmem = {
203 	{ 0xdc, 0x74, 0x50, 0x52, 0x85, 0x89, 0xe2, 0x46,
204 	  0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02 }
205 };
206 
207 const struct hv_guid hv_guid_mouse = {
208 	{ 0x9e, 0xb6, 0xa8, 0xcf, 0x4a, 0x5b, 0xc0, 0x4c,
209 	  0xb9, 0x8b, 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a }
210 };
211 
212 const struct hv_guid hv_guid_kbd = {
213 	{ 0x6d, 0xad, 0x12, 0xf9, 0x17, 0x2b, 0xea, 0x48,
214 	  0xbd, 0x65, 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84 }
215 };
216 
217 const struct hv_guid hv_guid_video = {
218 	{ 0x02, 0x78, 0x0a, 0xda, 0x77, 0xe3, 0xac, 0x4a,
219 	  0x8e, 0x77, 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8 }
220 };
221 
222 const struct hv_guid hv_guid_fc = {
223 	{ 0x4a, 0xcc, 0x9b, 0x2f, 0x69, 0x00, 0xf3, 0x4a,
224 	  0xb7, 0x6b, 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda }
225 };
226 
227 const struct hv_guid hv_guid_fcopy = {
228 	{ 0xe3, 0x4b, 0xd1, 0x34, 0xe4, 0xde, 0xc8, 0x41,
229 	  0x9a, 0xe7, 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92 }
230 };
231 
232 const struct hv_guid hv_guid_pcie = {
233 	{ 0x1d, 0xf6, 0xc4, 0x44, 0x44, 0x44, 0x00, 0x44,
234 	  0x9d, 0x52, 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f }
235 };
236 
237 const struct hv_guid hv_guid_netdir = {
238 	{ 0x3d, 0xaf, 0x2e, 0x8c, 0xa7, 0x32, 0x09, 0x4b,
239 	  0xab, 0x99, 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01 }
240 };
241 
242 const struct hv_guid hv_guid_rdesktop = {
243 	{ 0xf4, 0xac, 0x6a, 0x27, 0x15, 0xac, 0x6c, 0x42,
244 	  0x98, 0xdd, 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe }
245 };
246 
247 /* Automatic Virtual Machine Activation (AVMA) Services */
248 const struct hv_guid hv_guid_avma1 = {
249 	{ 0x55, 0xb2, 0x87, 0x44, 0x8c, 0xb8, 0x3f, 0x40,
250 	  0xbb, 0x51, 0xd1, 0xf6, 0x9c, 0xf1, 0x7f, 0x87 }
251 };
252 
253 const struct hv_guid hv_guid_avma2 = {
254 	{ 0xf4, 0xba, 0x75, 0x33, 0x15, 0x9e, 0x30, 0x4b,
255 	  0xb7, 0x65, 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b }
256 };
257 
258 const struct hv_guid hv_guid_avma3 = {
259 	{ 0xa0, 0x1f, 0x22, 0x99, 0xad, 0x24, 0xe2, 0x11,
260 	  0xbe, 0x98, 0x00, 0x1a, 0xa0, 0x1b, 0xbf, 0x6e }
261 };
262 
263 const struct hv_guid hv_guid_avma4 = {
264 	{ 0x16, 0x57, 0xe6, 0xf8, 0xb3, 0x3c, 0x06, 0x4a,
265 	  0x9a, 0x60, 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5 }
266 };
267 #endif	/* HYPERV_DEBUG */
268 
269 int
270 hv_match(struct device *parent, void *match, void *aux)
271 {
272 	struct pv_attach_args *pva = aux;
273 	struct pvbus_hv *hv = &pva->pva_hv[PVBUS_HYPERV];
274 
275 	if ((hv->hv_major == 0 && hv->hv_minor == 0) || hv->hv_base == 0)
276 		return (0);
277 
278 	return (1);
279 }
280 
281 void
282 hv_attach(struct device *parent, struct device *self, void *aux)
283 {
284 	struct hv_softc *sc = (struct hv_softc *)self;
285 	struct pv_attach_args *pva = aux;
286 	struct pvbus_hv *hv = &pva->pva_hv[PVBUS_HYPERV];
287 
288 	sc->sc_pvbus = hv;
289 	sc->sc_dmat = pva->pva_dmat;
290 
291 	if (!(hv->hv_features & CPUID_HV_MSR_HYPERCALL) ||
292 	    !(hv->hv_features & CPUID_HV_MSR_SYNIC)) {
293 		printf(": not functional\n");
294 		return;
295 	}
296 
297 	DPRINTF("\n");
298 
299 	hv_set_version(sc);
300 
301 	if (hv->hv_features & CPUID_HV_MSR_TIME_REFCNT)
302 		tc_init(&hv_timecounter);
303 
304 	if (hv_init_hypercall(sc))
305 		return;
306 
307 	/* Wire it up to the global */
308 	hv_sc = sc;
309 
310 	if (hv_init_interrupts(sc))
311 		return;
312 
313 	if (hv_vmbus_connect(sc))
314 		return;
315 
316 	DPRINTF("%s", sc->sc_dev.dv_xname);
317 	printf(": protocol %d.%d, features %#x\n",
318 	    VMBUS_VERSION_MAJOR(sc->sc_proto),
319 	    VMBUS_VERSION_MINOR(sc->sc_proto),
320 	    hv->hv_features);
321 
322 	if (hv_channel_scan(sc))
323 		return;
324 
325 	/* Attach heartbeat, KVP and other "internal" services */
326 	hv_attach_icdevs(sc);
327 
328 	/* Attach devices with external drivers */
329 	hv_attach_devices(sc);
330 }
331 
332 void
333 hv_set_version(struct hv_softc *sc)
334 {
335 	uint64_t ver;
336 
337 	/* OpenBSD build date */
338 	ver = MSR_HV_GUESTID_OSTYPE_OPENBSD;
339 	ver |= (uint64_t)OpenBSD << MSR_HV_GUESTID_VERSION_SHIFT;
340 	wrmsr(MSR_HV_GUEST_OS_ID, ver);
341 }
342 
343 u_int
344 hv_gettime(struct timecounter *tc)
345 {
346 	u_int now = rdmsr(MSR_HV_TIME_REF_COUNT);
347 
348 	return (now);
349 }
350 
351 void
352 hv_delay(int usecs)
353 {
354 	uint64_t interval, start;
355 
356 	/* 10 MHz fixed frequency */
357 	interval = (uint64_t)usecs * 10;
358 	start = rdmsr(MSR_HV_TIME_REF_COUNT);
359 	while (rdmsr(MSR_HV_TIME_REF_COUNT) - start < interval)
360 		CPU_BUSY_CYCLE();
361 }
362 
363 int
364 hv_init_hypercall(struct hv_softc *sc)
365 {
366 	extern void *hv_hypercall_page;
367 	uint64_t msr;
368 	paddr_t pa;
369 
370 	sc->sc_hc = &hv_hypercall_page;
371 
372 	if (!pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_hc, &pa)) {
373 		printf(": hypercall page PA extraction failed\n");
374 		return (-1);
375 	}
376 
377 	msr = (atop(pa) << MSR_HV_HYPERCALL_PGSHIFT) | MSR_HV_HYPERCALL_ENABLE;
378 	wrmsr(MSR_HV_HYPERCALL, msr);
379 
380 	if (!(rdmsr(MSR_HV_HYPERCALL) & MSR_HV_HYPERCALL_ENABLE)) {
381 		printf(": failed to set up a hypercall page\n");
382 		return (-1);
383 	}
384 
385 	return (0);
386 }
387 
388 uint64_t
389 hv_hypercall(struct hv_softc *sc, uint64_t control, void *input,
390     void *output)
391 {
392 	paddr_t input_pa = 0, output_pa = 0;
393 	uint64_t status = 0;
394 
395 	if (input != NULL &&
396 	    pmap_extract(pmap_kernel(), (vaddr_t)input, &input_pa) == 0) {
397 		printf("%s: hypercall input PA extraction failed\n",
398 		    sc->sc_dev.dv_xname);
399 		return (~HYPERCALL_STATUS_SUCCESS);
400 	}
401 
402 	if (output != NULL &&
403 	    pmap_extract(pmap_kernel(), (vaddr_t)output, &output_pa) == 0) {
404 		printf("%s: hypercall output PA extraction failed\n",
405 		    sc->sc_dev.dv_xname);
406 		return (~HYPERCALL_STATUS_SUCCESS);
407 	}
408 
409 #ifdef __amd64__
410 	__asm__ volatile ("mov %0, %%r8" : : "r" (output_pa) : "r8");
411 	__asm__ volatile ("call *%3" : "=a" (status) : "c" (control),
412 	    "d" (input_pa), "m" (sc->sc_hc));
413 #else  /* __i386__ */
414 	{
415 		uint32_t control_hi = control >> 32;
416 		uint32_t control_lo = control & 0xfffffffff;
417 		uint32_t status_hi = 1;
418 		uint32_t status_lo = 1;
419 
420 		__asm__ volatile ("call *%8" :
421 		    "=d" (status_hi), "=a"(status_lo) :
422 		    "d" (control_hi), "a" (control_lo),
423 		    "b" (0), "c" (input_pa), "D" (0), "S" (output_pa),
424 		    "m" (sc->sc_hc));
425 
426 		status = status_lo | ((uint64_t)status_hi << 32);
427 	}
428 #endif	/* __amd64__ */
429 
430 	return (status);
431 }
432 
433 int
434 hv_init_interrupts(struct hv_softc *sc)
435 {
436 	struct cpu_info *ci = curcpu();
437 	int cpu = CPU_INFO_UNIT(ci);
438 
439 	sc->sc_idtvec = LAPIC_HYPERV_VECTOR;
440 
441 	TAILQ_INIT(&sc->sc_reqs);
442 	mtx_init(&sc->sc_reqlck, IPL_NET);
443 
444 	TAILQ_INIT(&sc->sc_rsps);
445 	mtx_init(&sc->sc_rsplck, IPL_NET);
446 
447 	sc->sc_simp[cpu] = km_alloc(PAGE_SIZE, &kv_any, &kp_zero, &kd_nowait);
448 	if (sc->sc_simp[cpu] == NULL) {
449 		printf(": failed to allocate SIMP\n");
450 		return (-1);
451 	}
452 
453 	sc->sc_siep[cpu] = km_alloc(PAGE_SIZE, &kv_any, &kp_zero, &kd_nowait);
454 	if (sc->sc_siep[cpu] == NULL) {
455 		printf(": failed to allocate SIEP\n");
456 		km_free(sc->sc_simp[cpu], PAGE_SIZE, &kv_any, &kp_zero);
457 		return (-1);
458 	}
459 
460 	sc->sc_proto = VMBUS_VERSION_WS2008;
461 
462 	return (hv_init_synic(sc));
463 }
464 
465 int
466 hv_init_synic(struct hv_softc *sc)
467 {
468 	struct cpu_info *ci = curcpu();
469 	int cpu = CPU_INFO_UNIT(ci);
470 	uint64_t simp, siefp, sctrl, sint;
471 	paddr_t pa;
472 
473 	/*
474 	 * Setup the Synic's message page
475 	 */
476 	if (!pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_simp[cpu], &pa)) {
477 		printf(": SIMP PA extraction failed\n");
478 		return (-1);
479 	}
480 	simp = rdmsr(MSR_HV_SIMP);
481 	simp &= (1 << MSR_HV_SIMP_PGSHIFT) - 1;
482 	simp |= (atop(pa) << MSR_HV_SIMP_PGSHIFT);
483 	simp |= MSR_HV_SIMP_ENABLE;
484 	wrmsr(MSR_HV_SIMP, simp);
485 
486 	/*
487 	 * Setup the Synic's event page
488 	 */
489 	if (!pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_siep[cpu], &pa)) {
490 		printf(": SIEP PA extraction failed\n");
491 		return (-1);
492 	}
493 	siefp = rdmsr(MSR_HV_SIEFP);
494 	siefp &= (1<<MSR_HV_SIEFP_PGSHIFT) - 1;
495 	siefp |= (atop(pa) << MSR_HV_SIEFP_PGSHIFT);
496 	siefp |= MSR_HV_SIEFP_ENABLE;
497 	wrmsr(MSR_HV_SIEFP, siefp);
498 
499 	/*
500 	 * Configure and unmask SINT for message and event flags
501 	 */
502 	sint = rdmsr(MSR_HV_SINT0 + VMBUS_SINT_MESSAGE);
503 	sint = sc->sc_idtvec | MSR_HV_SINT_AUTOEOI |
504 	    (sint & MSR_HV_SINT_RSVD_MASK);
505 	wrmsr(MSR_HV_SINT0 + VMBUS_SINT_MESSAGE, sint);
506 
507 	/* Enable the global synic bit */
508 	sctrl = rdmsr(MSR_HV_SCONTROL);
509 	sctrl |= MSR_HV_SCTRL_ENABLE;
510 	wrmsr(MSR_HV_SCONTROL, sctrl);
511 
512 	sc->sc_vcpus[cpu] = rdmsr(MSR_HV_VP_INDEX);
513 
514 	DPRINTF("vcpu%u: SIMP %#llx SIEFP %#llx SCTRL %#llx\n",
515 	    sc->sc_vcpus[cpu], simp, siefp, sctrl);
516 
517 	return (0);
518 }
519 
520 int
521 hv_cmd(struct hv_softc *sc, void *cmd, size_t cmdlen, void *rsp,
522     size_t rsplen, int flags)
523 {
524 	struct hv_msg msg;
525 	int rv;
526 
527 	if (cmdlen > VMBUS_MSG_DSIZE_MAX) {
528 		printf("%s: payload too large (%lu)\n", sc->sc_dev.dv_xname,
529 		    cmdlen);
530 		return (EMSGSIZE);
531 	}
532 
533 	memset(&msg, 0, sizeof(msg));
534 
535 	msg.msg_req.hc_dsize = cmdlen;
536 	memcpy(msg.msg_req.hc_data, cmd, cmdlen);
537 
538 	if (!(flags & HCF_NOREPLY)) {
539 		msg.msg_rsp = rsp;
540 		msg.msg_rsplen = rsplen;
541 	} else
542 		msg.msg_flags |= MSGF_NOQUEUE;
543 
544 	if (flags & HCF_NOSLEEP)
545 		msg.msg_flags |= MSGF_NOSLEEP;
546 
547 	if ((rv = hv_start(sc, &msg)) != 0)
548 		return (rv);
549 	return (hv_reply(sc, &msg));
550 }
551 
552 int
553 hv_start(struct hv_softc *sc, struct hv_msg *msg)
554 {
555 	const int delays[] = { 100, 100, 100, 500, 500, 5000, 5000, 5000 };
556 	const char *wchan = "hvstart";
557 	uint16_t status;
558 	int i, s;
559 
560 	msg->msg_req.hc_connid = VMBUS_CONNID_MESSAGE;
561 	msg->msg_req.hc_msgtype = 1;
562 
563 	if (!(msg->msg_flags & MSGF_NOQUEUE)) {
564 		mtx_enter(&sc->sc_reqlck);
565 		TAILQ_INSERT_TAIL(&sc->sc_reqs, msg, msg_entry);
566 		mtx_leave(&sc->sc_reqlck);
567 	}
568 
569 	for (i = 0; i < nitems(delays); i++) {
570 		status = hv_hypercall(sc, HYPERCALL_POST_MESSAGE,
571 		    &msg->msg_req, NULL);
572 		if (status == HYPERCALL_STATUS_SUCCESS)
573 			break;
574 		if (msg->msg_flags & MSGF_NOSLEEP) {
575 			delay(delays[i]);
576 			s = splnet();
577 			hv_intr();
578 			splx(s);
579 		} else {
580 			tsleep_nsec(wchan, PRIBIO, wchan,
581 			    USEC_TO_NSEC(delays[i]));
582 		}
583 	}
584 	if (status != 0) {
585 		printf("%s: posting vmbus message failed with %d\n",
586 		    sc->sc_dev.dv_xname, status);
587 		if (!(msg->msg_flags & MSGF_NOQUEUE)) {
588 			mtx_enter(&sc->sc_reqlck);
589 			TAILQ_REMOVE(&sc->sc_reqs, msg, msg_entry);
590 			mtx_leave(&sc->sc_reqlck);
591 		}
592 		return (EIO);
593 	}
594 
595 	return (0);
596 }
597 
598 static int
599 hv_reply_done(struct hv_softc *sc, struct hv_msg *msg)
600 {
601 	struct hv_msg *m;
602 
603 	mtx_enter(&sc->sc_rsplck);
604 	TAILQ_FOREACH(m, &sc->sc_rsps, msg_entry) {
605 		if (m == msg) {
606 			mtx_leave(&sc->sc_rsplck);
607 			return (1);
608 		}
609 	}
610 	mtx_leave(&sc->sc_rsplck);
611 	return (0);
612 }
613 
614 int
615 hv_reply(struct hv_softc *sc, struct hv_msg *msg)
616 {
617 	if (msg->msg_flags & MSGF_NOQUEUE)
618 		return (0);
619 
620 	hv_wait(sc, hv_reply_done, msg, msg, "hvreply");
621 
622 	mtx_enter(&sc->sc_rsplck);
623 	TAILQ_REMOVE(&sc->sc_rsps, msg, msg_entry);
624 	mtx_leave(&sc->sc_rsplck);
625 
626 	return (0);
627 }
628 
629 void
630 hv_wait(struct hv_softc *sc, int (*cond)(struct hv_softc *, struct hv_msg *),
631     struct hv_msg *msg, void *wchan, const char *wmsg)
632 {
633 	int s;
634 
635 	KASSERT(cold ? msg->msg_flags & MSGF_NOSLEEP : 1);
636 
637 	while (!cond(sc, msg)) {
638 		if (msg->msg_flags & MSGF_NOSLEEP) {
639 			delay(1000);
640 			s = splnet();
641 			hv_intr();
642 			splx(s);
643 		} else {
644 			tsleep_nsec(wchan, PRIBIO, wmsg ? wmsg : "hvwait",
645 			    USEC_TO_NSEC(1000));
646 		}
647 	}
648 }
649 
650 uint16_t
651 hv_intr_signal(struct hv_softc *sc, void *con)
652 {
653 	uint64_t status;
654 
655 	status = hv_hypercall(sc, HYPERCALL_SIGNAL_EVENT, con, NULL);
656 	return ((uint16_t)status);
657 }
658 
659 void
660 hv_intr(void)
661 {
662 	struct hv_softc *sc = hv_sc;
663 
664 	hv_event_intr(sc);
665 	hv_message_intr(sc);
666 }
667 
668 void
669 hv_event_intr(struct hv_softc *sc)
670 {
671 	struct vmbus_evtflags *evt;
672 	struct cpu_info *ci = curcpu();
673 	int cpu = CPU_INFO_UNIT(ci);
674 	int bit, row, maxrow, chanid;
675 	struct hv_channel *ch;
676 	u_long *revents, pending;
677 
678 	evt = (struct vmbus_evtflags *)sc->sc_siep[cpu] +
679 	    VMBUS_SINT_MESSAGE;
680 	if ((sc->sc_proto == VMBUS_VERSION_WS2008) ||
681 	    (sc->sc_proto == VMBUS_VERSION_WIN7)) {
682 		if (!test_bit(0, &evt->evt_flags[0]))
683 			return;
684 		clear_bit(0, &evt->evt_flags[0]);
685 		maxrow = VMBUS_CHAN_MAX_COMPAT / VMBUS_EVTFLAG_LEN;
686 		/*
687 		 * receive size is 1/2 page and divide that by 4 bytes
688 		 */
689 		revents = sc->sc_revents;
690 	} else {
691 		maxrow = nitems(evt->evt_flags);
692 		/*
693 		 * On Host with Win8 or above, the event page can be
694 		 * checked directly to get the id of the channel
695 		 * that has the pending interrupt.
696 		 */
697 		revents = &evt->evt_flags[0];
698 	}
699 
700 	for (row = 0; row < maxrow; row++) {
701 		if (revents[row] == 0)
702 			continue;
703 		pending = atomic_swap_ulong(&revents[row], 0);
704 		for (bit = 0; pending > 0; pending >>= 1, bit++) {
705 			if ((pending & 1) == 0)
706 				continue;
707 			chanid = (row * LONG_BIT) + bit;
708 			/* vmbus channel protocol message */
709 			if (chanid == 0)
710 				continue;
711 			ch = hv_channel_lookup(sc, chanid);
712 			if (ch == NULL) {
713 				printf("%s: unhandled event on %d\n",
714 				    sc->sc_dev.dv_xname, chanid);
715 				continue;
716 			}
717 			if (ch->ch_state != HV_CHANSTATE_OPENED) {
718 				printf("%s: channel %d is not active\n",
719 				    sc->sc_dev.dv_xname, chanid);
720 				continue;
721 			}
722 			ch->ch_evcnt.ec_count++;
723 			hv_channel_schedule(ch);
724 		}
725 	}
726 }
727 
728 void
729 hv_message_intr(struct hv_softc *sc)
730 {
731 	struct vmbus_message *msg;
732 	struct vmbus_chanmsg_hdr *hdr;
733 	struct cpu_info *ci = curcpu();
734 	int cpu = CPU_INFO_UNIT(ci);
735 
736 	for (;;) {
737 		msg = (struct vmbus_message *)sc->sc_simp[cpu] +
738 		    VMBUS_SINT_MESSAGE;
739 		if (msg->msg_type == VMBUS_MSGTYPE_NONE)
740 			break;
741 
742 		hdr = (struct vmbus_chanmsg_hdr *)msg->msg_data;
743 		if (hdr->chm_type >= VMBUS_CHANMSG_COUNT) {
744 			printf("%s: unhandled message type %u flags %#x\n",
745 			    sc->sc_dev.dv_xname, hdr->chm_type,
746 			    msg->msg_flags);
747 			goto skip;
748 		}
749 		if (hv_msg_dispatch[hdr->chm_type].hmd_handler)
750 			hv_msg_dispatch[hdr->chm_type].hmd_handler(sc, hdr);
751 		else
752 			printf("%s: unhandled message type %u\n",
753 			    sc->sc_dev.dv_xname, hdr->chm_type);
754  skip:
755 		msg->msg_type = VMBUS_MSGTYPE_NONE;
756 		virtio_membar_sync();
757 		if (msg->msg_flags & VMBUS_MSGFLAG_PENDING)
758 			wrmsr(MSR_HV_EOM, 0);
759 	}
760 }
761 
762 void
763 hv_channel_response(struct hv_softc *sc, struct vmbus_chanmsg_hdr *rsphdr)
764 {
765 	struct hv_msg *msg;
766 	struct vmbus_chanmsg_hdr *reqhdr;
767 	int req;
768 
769 	req = hv_msg_dispatch[rsphdr->chm_type].hmd_request;
770 	mtx_enter(&sc->sc_reqlck);
771 	TAILQ_FOREACH(msg, &sc->sc_reqs, msg_entry) {
772 		reqhdr = (struct vmbus_chanmsg_hdr *)&msg->msg_req.hc_data;
773 		if (reqhdr->chm_type == req) {
774 			TAILQ_REMOVE(&sc->sc_reqs, msg, msg_entry);
775 			break;
776 		}
777 	}
778 	mtx_leave(&sc->sc_reqlck);
779 	if (msg != NULL) {
780 		memcpy(msg->msg_rsp, rsphdr, msg->msg_rsplen);
781 		mtx_enter(&sc->sc_rsplck);
782 		TAILQ_INSERT_TAIL(&sc->sc_rsps, msg, msg_entry);
783 		mtx_leave(&sc->sc_rsplck);
784 		wakeup(msg);
785 	}
786 }
787 
788 void
789 hv_channel_offer(struct hv_softc *sc, struct vmbus_chanmsg_hdr *hdr)
790 {
791 	struct hv_offer *co;
792 
793 	co = malloc(sizeof(*co), M_DEVBUF, M_NOWAIT | M_ZERO);
794 	if (co == NULL) {
795 		printf("%s: failed to allocate an offer object\n",
796 		    sc->sc_dev.dv_xname);
797 		return;
798 	}
799 
800 	memcpy(&co->co_chan, hdr, sizeof(co->co_chan));
801 
802 	mtx_enter(&sc->sc_offerlck);
803 	SIMPLEQ_INSERT_TAIL(&sc->sc_offers, co, co_entry);
804 	mtx_leave(&sc->sc_offerlck);
805 }
806 
807 void
808 hv_channel_rescind(struct hv_softc *sc, struct vmbus_chanmsg_hdr *hdr)
809 {
810 	const struct vmbus_chanmsg_chrescind *cmd;
811 
812 	cmd = (const struct vmbus_chanmsg_chrescind *)hdr;
813 	printf("%s: revoking channel %u\n", sc->sc_dev.dv_xname,
814 	    cmd->chm_chanid);
815 }
816 
817 void
818 hv_channel_delivered(struct hv_softc *sc, struct vmbus_chanmsg_hdr *hdr)
819 {
820 	atomic_setbits_int(&sc->sc_flags, HSF_OFFERS_DELIVERED);
821 	wakeup(&sc->sc_offers);
822 }
823 
824 int
825 hv_vmbus_connect(struct hv_softc *sc)
826 {
827 	const uint32_t versions[] = {
828 		VMBUS_VERSION_WIN10,
829 		VMBUS_VERSION_WIN8_1, VMBUS_VERSION_WIN8,
830 		VMBUS_VERSION_WIN7, VMBUS_VERSION_WS2008
831 	};
832 	struct vmbus_chanmsg_connect cmd;
833 	struct vmbus_chanmsg_connect_resp rsp;
834 	paddr_t epa, mpa1, mpa2;
835 	int i;
836 
837 	sc->sc_events = km_alloc(PAGE_SIZE, &kv_any, &kp_zero, &kd_nowait);
838 	if (sc->sc_events == NULL) {
839 		printf(": failed to allocate channel port events page\n");
840 		goto errout;
841 	}
842 	if (!pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_events, &epa)) {
843 		printf(": channel port events page PA extraction failed\n");
844 		goto errout;
845 	}
846 
847 	sc->sc_wevents = (u_long *)sc->sc_events;
848 	sc->sc_revents = (u_long *)((caddr_t)sc->sc_events + (PAGE_SIZE >> 1));
849 
850 	sc->sc_monitor[0] = km_alloc(PAGE_SIZE, &kv_any, &kp_zero, &kd_nowait);
851 	if (sc->sc_monitor[0] == NULL) {
852 		printf(": failed to allocate monitor page 1\n");
853 		goto errout;
854 	}
855 	if (!pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_monitor[0], &mpa1)) {
856 		printf(": monitor page 1 PA extraction failed\n");
857 		goto errout;
858 	}
859 
860 	sc->sc_monitor[1] = km_alloc(PAGE_SIZE, &kv_any, &kp_zero, &kd_nowait);
861 	if (sc->sc_monitor[1] == NULL) {
862 		printf(": failed to allocate monitor page 2\n");
863 		goto errout;
864 	}
865 	if (!pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_monitor[1], &mpa2)) {
866 		printf(": monitor page 2 PA extraction failed\n");
867 		goto errout;
868 	}
869 
870 	memset(&cmd, 0, sizeof(cmd));
871 	cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CONNECT;
872 	cmd.chm_evtflags = (uint64_t)epa;
873 	cmd.chm_mnf1 = (uint64_t)mpa1;
874 	cmd.chm_mnf2 = (uint64_t)mpa2;
875 
876 	memset(&rsp, 0, sizeof(rsp));
877 
878 	for (i = 0; i < nitems(versions); i++) {
879 		cmd.chm_ver = versions[i];
880 		if (hv_cmd(sc, &cmd, sizeof(cmd), &rsp, sizeof(rsp),
881 		    HCF_NOSLEEP)) {
882 			DPRINTF("%s: CONNECT failed\n",
883 			    sc->sc_dev.dv_xname);
884 			goto errout;
885 		}
886 		if (rsp.chm_done) {
887 			sc->sc_flags |= HSF_CONNECTED;
888 			sc->sc_proto = versions[i];
889 			sc->sc_handle = VMBUS_GPADL_START;
890 			break;
891 		}
892 	}
893 	if (i == nitems(versions)) {
894 		printf("%s: failed to negotiate protocol version\n",
895 		    sc->sc_dev.dv_xname);
896 		goto errout;
897 	}
898 
899 	return (0);
900 
901  errout:
902 	if (sc->sc_events) {
903 		km_free(sc->sc_events, PAGE_SIZE, &kv_any, &kp_zero);
904 		sc->sc_events = NULL;
905 		sc->sc_wevents = NULL;
906 		sc->sc_revents = NULL;
907 	}
908 	if (sc->sc_monitor[0]) {
909 		km_free(sc->sc_monitor[0], PAGE_SIZE, &kv_any, &kp_zero);
910 		sc->sc_monitor[0] = NULL;
911 	}
912 	if (sc->sc_monitor[1]) {
913 		km_free(sc->sc_monitor[1], PAGE_SIZE, &kv_any, &kp_zero);
914 		sc->sc_monitor[1] = NULL;
915 	}
916 	return (-1);
917 }
918 
919 #ifdef HYPERV_DEBUG
920 static inline char *
921 guidprint(struct hv_guid *a)
922 {
923 	/* 3     0  5  4 7 6  8 9  10        15 */
924 	/* 33221100-5544-7766-9988-FFEEDDCCBBAA */
925 	static char buf[16 * 2 + 4 + 1];
926 	int i, j = 0;
927 
928 	for (i = 3; i != -1; i -= 1, j += 2)
929 		snprintf(&buf[j], 3, "%02x", (uint8_t)a->data[i]);
930 	buf[j++] = '-';
931 	for (i = 5; i != 3; i -= 1, j += 2)
932 		snprintf(&buf[j], 3, "%02x", (uint8_t)a->data[i]);
933 	buf[j++] = '-';
934 	for (i = 7; i != 5; i -= 1, j += 2)
935 		snprintf(&buf[j], 3, "%02x", (uint8_t)a->data[i]);
936 	buf[j++] = '-';
937 	for (i = 8; i < 10; i += 1, j += 2)
938 		snprintf(&buf[j], 3, "%02x", (uint8_t)a->data[i]);
939 	buf[j++] = '-';
940 	for (i = 10; i < 16; i += 1, j += 2)
941 		snprintf(&buf[j], 3, "%02x", (uint8_t)a->data[i]);
942 	return (&buf[0]);
943 }
944 #endif	/* HYPERV_DEBUG */
945 
946 void
947 hv_guid_sprint(struct hv_guid *guid, char *str, size_t size)
948 {
949 	const struct {
950 		const struct hv_guid	*guid;
951 		const char		*ident;
952 	} map[] = {
953 		{ &hv_guid_network,	"network" },
954 		{ &hv_guid_ide,		"ide" },
955 		{ &hv_guid_scsi,	"scsi" },
956 		{ &hv_guid_shutdown,	"shutdown" },
957 		{ &hv_guid_timesync,	"timesync" },
958 		{ &hv_guid_heartbeat,	"heartbeat" },
959 		{ &hv_guid_kvp,		"kvp" },
960 #ifdef HYPERV_DEBUG
961 		{ &hv_guid_vss,		"vss" },
962 		{ &hv_guid_dynmem,	"dynamic-memory" },
963 		{ &hv_guid_mouse,	"mouse" },
964 		{ &hv_guid_kbd,		"keyboard" },
965 		{ &hv_guid_video,	"video" },
966 		{ &hv_guid_fc,		"fiber-channel" },
967 		{ &hv_guid_fcopy,	"file-copy" },
968 		{ &hv_guid_pcie,	"pcie-passthrough" },
969 		{ &hv_guid_netdir,	"network-direct" },
970 		{ &hv_guid_rdesktop,	"remote-desktop" },
971 		{ &hv_guid_avma1,	"avma-1" },
972 		{ &hv_guid_avma2,	"avma-2" },
973 		{ &hv_guid_avma3,	"avma-3" },
974 		{ &hv_guid_avma4,	"avma-4" },
975 #endif
976 	};
977 	int i;
978 
979 	for (i = 0; i < nitems(map); i++) {
980 		if (memcmp(guid, map[i].guid, sizeof(*guid)) == 0) {
981 			strlcpy(str, map[i].ident, size);
982 			return;
983 		}
984 	}
985 #ifdef HYPERV_DEBUG
986 	strlcpy(str, guidprint(guid), size);
987 #endif
988 }
989 
990 static int
991 hv_channel_scan_done(struct hv_softc *sc, struct hv_msg *msg __unused)
992 {
993 	return (sc->sc_flags & HSF_OFFERS_DELIVERED);
994 }
995 
996 int
997 hv_channel_scan(struct hv_softc *sc)
998 {
999 	struct vmbus_chanmsg_hdr hdr;
1000 	struct vmbus_chanmsg_choffer rsp;
1001 	struct hv_offer *co;
1002 
1003 	SIMPLEQ_INIT(&sc->sc_offers);
1004 	mtx_init(&sc->sc_offerlck, IPL_NET);
1005 
1006 	memset(&hdr, 0, sizeof(hdr));
1007 	hdr.chm_type = VMBUS_CHANMSG_CHREQUEST;
1008 
1009 	if (hv_cmd(sc, &hdr, sizeof(hdr), &rsp, sizeof(rsp),
1010 	    HCF_NOSLEEP | HCF_NOREPLY)) {
1011 		DPRINTF("%s: CHREQUEST failed\n", sc->sc_dev.dv_xname);
1012 		return (-1);
1013 	}
1014 
1015 	hv_wait(sc, hv_channel_scan_done, (struct hv_msg *)&hdr,
1016 	    &sc->sc_offers, "hvscan");
1017 
1018 	TAILQ_INIT(&sc->sc_channels);
1019 	mtx_init(&sc->sc_channelck, IPL_NET);
1020 
1021 	mtx_enter(&sc->sc_offerlck);
1022 	while (!SIMPLEQ_EMPTY(&sc->sc_offers)) {
1023 		co = SIMPLEQ_FIRST(&sc->sc_offers);
1024 		SIMPLEQ_REMOVE_HEAD(&sc->sc_offers, co_entry);
1025 		mtx_leave(&sc->sc_offerlck);
1026 
1027 		hv_process_offer(sc, co);
1028 		free(co, M_DEVBUF, sizeof(*co));
1029 
1030 		mtx_enter(&sc->sc_offerlck);
1031 	}
1032 	mtx_leave(&sc->sc_offerlck);
1033 
1034 	return (0);
1035 }
1036 
1037 void
1038 hv_process_offer(struct hv_softc *sc, struct hv_offer *co)
1039 {
1040 	struct hv_channel *ch, *nch;
1041 
1042 	nch = malloc(sizeof(*nch), M_DEVBUF, M_ZERO | M_NOWAIT);
1043 	if (nch == NULL) {
1044 		printf("%s: failed to allocate memory for the channel\n",
1045 		    sc->sc_dev.dv_xname);
1046 		return;
1047 	}
1048 	nch->ch_sc = sc;
1049 	hv_guid_sprint(&co->co_chan.chm_chtype, nch->ch_ident,
1050 	    sizeof(nch->ch_ident));
1051 
1052 	/*
1053 	 * By default we setup state to enable batched reading.
1054 	 * A specific service can choose to disable this prior
1055 	 * to opening the channel.
1056 	 */
1057 	nch->ch_flags |= CHF_BATCHED;
1058 
1059 	KASSERT((((vaddr_t)&nch->ch_monprm) & 0x7) == 0);
1060 	memset(&nch->ch_monprm, 0, sizeof(nch->ch_monprm));
1061 	nch->ch_monprm.mp_connid = VMBUS_CONNID_EVENT;
1062 
1063 	if (sc->sc_proto != VMBUS_VERSION_WS2008)
1064 		nch->ch_monprm.mp_connid = co->co_chan.chm_connid;
1065 
1066 	if (co->co_chan.chm_flags1 & VMBUS_CHOFFER_FLAG1_HASMNF) {
1067 		nch->ch_mgroup = co->co_chan.chm_montrig / VMBUS_MONTRIG_LEN;
1068 		nch->ch_mindex = co->co_chan.chm_montrig % VMBUS_MONTRIG_LEN;
1069 		nch->ch_flags |= CHF_MONITOR;
1070 	}
1071 
1072 	nch->ch_id = co->co_chan.chm_chanid;
1073 
1074 	memcpy(&nch->ch_type, &co->co_chan.chm_chtype, sizeof(ch->ch_type));
1075 	memcpy(&nch->ch_inst, &co->co_chan.chm_chinst, sizeof(ch->ch_inst));
1076 
1077 	mtx_enter(&sc->sc_channelck);
1078 	TAILQ_FOREACH(ch, &sc->sc_channels, ch_entry) {
1079 		if (!memcmp(&ch->ch_type, &nch->ch_type, sizeof(ch->ch_type)) &&
1080 		    !memcmp(&ch->ch_inst, &nch->ch_inst, sizeof(ch->ch_inst)))
1081 			break;
1082 	}
1083 	if (ch != NULL) {
1084 		if (co->co_chan.chm_subidx == 0) {
1085 			printf("%s: unknown offer \"%s\"\n",
1086 			    sc->sc_dev.dv_xname, nch->ch_ident);
1087 			mtx_leave(&sc->sc_channelck);
1088 			free(nch, M_DEVBUF, sizeof(*nch));
1089 			return;
1090 		}
1091 #ifdef HYPERV_DEBUG
1092 		printf("%s: subchannel %u for \"%s\"\n", sc->sc_dev.dv_xname,
1093 		    co->co_chan.chm_subidx, ch->ch_ident);
1094 #endif
1095 		mtx_leave(&sc->sc_channelck);
1096 		free(nch, M_DEVBUF, sizeof(*nch));
1097 		return;
1098 	}
1099 
1100 	nch->ch_state = HV_CHANSTATE_OFFERED;
1101 
1102 	TAILQ_INSERT_TAIL(&sc->sc_channels, nch, ch_entry);
1103 	mtx_leave(&sc->sc_channelck);
1104 
1105 #ifdef HYPERV_DEBUG
1106 	printf("%s: channel %u: \"%s\"", sc->sc_dev.dv_xname, nch->ch_id,
1107 	    nch->ch_ident);
1108 	if (nch->ch_flags & CHF_MONITOR)
1109 		printf(", monitor %u\n", co->co_chan.chm_montrig);
1110 	else
1111 		printf("\n");
1112 #endif
1113 }
1114 
1115 struct hv_channel *
1116 hv_channel_lookup(struct hv_softc *sc, uint32_t relid)
1117 {
1118 	struct hv_channel *ch;
1119 
1120 	TAILQ_FOREACH(ch, &sc->sc_channels, ch_entry) {
1121 		if (ch->ch_id == relid)
1122 			return (ch);
1123 	}
1124 	return (NULL);
1125 }
1126 
1127 int
1128 hv_channel_ring_create(struct hv_channel *ch, uint32_t buflen)
1129 {
1130 	struct hv_softc *sc = ch->ch_sc;
1131 
1132 	buflen = roundup(buflen, PAGE_SIZE) + sizeof(struct vmbus_bufring);
1133 	ch->ch_ring = km_alloc(2 * buflen, &kv_any, &kp_zero, cold ?
1134 	    &kd_nowait : &kd_waitok);
1135 	if (ch->ch_ring == NULL) {
1136 		printf("%s: failed to allocate channel ring\n",
1137 		    sc->sc_dev.dv_xname);
1138 		return (-1);
1139 	}
1140 	ch->ch_ring_size = 2 * buflen;
1141 
1142 	memset(&ch->ch_wrd, 0, sizeof(ch->ch_wrd));
1143 	ch->ch_wrd.rd_ring = (struct vmbus_bufring *)ch->ch_ring;
1144 	ch->ch_wrd.rd_size = buflen;
1145 	ch->ch_wrd.rd_dsize = buflen - sizeof(struct vmbus_bufring);
1146 	mtx_init(&ch->ch_wrd.rd_lock, IPL_NET);
1147 
1148 	memset(&ch->ch_rrd, 0, sizeof(ch->ch_rrd));
1149 	ch->ch_rrd.rd_ring = (struct vmbus_bufring *)((uint8_t *)ch->ch_ring +
1150 	    buflen);
1151 	ch->ch_rrd.rd_size = buflen;
1152 	ch->ch_rrd.rd_dsize = buflen - sizeof(struct vmbus_bufring);
1153 	mtx_init(&ch->ch_rrd.rd_lock, IPL_NET);
1154 
1155 	if (hv_handle_alloc(ch, ch->ch_ring, 2 * buflen, &ch->ch_ring_gpadl)) {
1156 		printf("%s: failed to obtain a PA handle for the ring\n",
1157 		    sc->sc_dev.dv_xname);
1158 		hv_channel_ring_destroy(ch);
1159 		return (-1);
1160 	}
1161 
1162 	return (0);
1163 }
1164 
1165 void
1166 hv_channel_ring_destroy(struct hv_channel *ch)
1167 {
1168 	km_free(ch->ch_ring, ch->ch_ring_size, &kv_any, &kp_zero);
1169 	ch->ch_ring = NULL;
1170 	hv_handle_free(ch, ch->ch_ring_gpadl);
1171 
1172 	memset(&ch->ch_wrd, 0, sizeof(ch->ch_wrd));
1173 	memset(&ch->ch_rrd, 0, sizeof(ch->ch_rrd));
1174 }
1175 
1176 int
1177 hv_channel_open(struct hv_channel *ch, size_t buflen, void *udata,
1178     size_t udatalen, void (*handler)(void *), void *arg)
1179 {
1180 	struct hv_softc *sc = ch->ch_sc;
1181 	struct vmbus_chanmsg_chopen cmd;
1182 	struct vmbus_chanmsg_chopen_resp rsp;
1183 	int rv;
1184 
1185 	if (ch->ch_ring == NULL &&
1186 	    hv_channel_ring_create(ch, buflen)) {
1187 		DPRINTF("%s: failed to create channel ring\n",
1188 		    sc->sc_dev.dv_xname);
1189 		return (-1);
1190 	}
1191 
1192 	memset(&cmd, 0, sizeof(cmd));
1193 	cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CHOPEN;
1194 	cmd.chm_openid = ch->ch_id;
1195 	cmd.chm_chanid = ch->ch_id;
1196 	cmd.chm_gpadl = ch->ch_ring_gpadl;
1197 	cmd.chm_txbr_pgcnt = ch->ch_wrd.rd_size >> PAGE_SHIFT;
1198 	cmd.chm_vcpuid = ch->ch_vcpu;
1199 
1200 	if (udata && udatalen > 0)
1201 		memcpy(cmd.chm_udata, udata, udatalen);
1202 
1203 	memset(&rsp, 0, sizeof(rsp));
1204 
1205 	ch->ch_handler = handler;
1206 	ch->ch_ctx = arg;
1207 
1208 	ch->ch_state = HV_CHANSTATE_OPENED;
1209 
1210 	rv = hv_cmd(sc, &cmd, sizeof(cmd), &rsp, sizeof(rsp),
1211 	    cold ? HCF_NOSLEEP : HCF_SLEEPOK);
1212 	if (rv) {
1213 		hv_channel_ring_destroy(ch);
1214 		DPRINTF("%s: CHOPEN failed with %d\n",
1215 		    sc->sc_dev.dv_xname, rv);
1216 		ch->ch_handler = NULL;
1217 		ch->ch_ctx = NULL;
1218 		ch->ch_state = HV_CHANSTATE_OFFERED;
1219 		return (-1);
1220 	}
1221 
1222 	return (0);
1223 }
1224 
1225 int
1226 hv_channel_close(struct hv_channel *ch)
1227 {
1228 	struct hv_softc *sc = ch->ch_sc;
1229 	struct vmbus_chanmsg_chclose cmd;
1230 	int rv;
1231 
1232 	memset(&cmd, 0, sizeof(cmd));
1233 	cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CHCLOSE;
1234 	cmd.chm_chanid = ch->ch_id;
1235 
1236 	ch->ch_state = HV_CHANSTATE_CLOSING;
1237 	rv = hv_cmd(sc, &cmd, sizeof(cmd), NULL, 0, HCF_NOREPLY);
1238 	if (rv) {
1239 		DPRINTF("%s: CHCLOSE failed with %d\n",
1240 		    sc->sc_dev.dv_xname, rv);
1241 		return (-1);
1242 	}
1243 	ch->ch_state = HV_CHANSTATE_CLOSED;
1244 	hv_channel_ring_destroy(ch);
1245 	return (0);
1246 }
1247 
1248 static inline void
1249 hv_channel_setevent(struct hv_softc *sc, struct hv_channel *ch)
1250 {
1251 	struct vmbus_mon_trig *mtg;
1252 
1253 	/* Each uint32_t represents 32 channels */
1254 	set_bit(ch->ch_id, sc->sc_wevents);
1255 	if (ch->ch_flags & CHF_MONITOR) {
1256 		mtg = &sc->sc_monitor[1]->mnf_trigs[ch->ch_mgroup];
1257 		set_bit(ch->ch_mindex, &mtg->mt_pending);
1258 	} else
1259 		hv_intr_signal(sc, &ch->ch_monprm);
1260 }
1261 
1262 void
1263 hv_channel_intr(void *arg)
1264 {
1265 	struct hv_channel *ch = arg;
1266 
1267 	if (hv_channel_ready(ch))
1268 		ch->ch_handler(ch->ch_ctx);
1269 
1270 	if (hv_channel_unpause(ch) == 0)
1271 		return;
1272 
1273 	hv_channel_pause(ch);
1274 	hv_channel_schedule(ch);
1275 }
1276 
1277 int
1278 hv_channel_setdeferred(struct hv_channel *ch, const char *name)
1279 {
1280 	ch->ch_taskq = taskq_create(name, 1, IPL_NET, TASKQ_MPSAFE);
1281 	if (ch->ch_taskq == NULL)
1282 		return (-1);
1283 	task_set(&ch->ch_task, hv_channel_intr, ch);
1284 	return (0);
1285 }
1286 
1287 void
1288 hv_channel_schedule(struct hv_channel *ch)
1289 {
1290 	if (ch->ch_handler) {
1291 		if (!cold && (ch->ch_flags & CHF_BATCHED)) {
1292 			hv_channel_pause(ch);
1293 			task_add(ch->ch_taskq, &ch->ch_task);
1294 		} else
1295 			ch->ch_handler(ch->ch_ctx);
1296 	}
1297 }
1298 
1299 static inline void
1300 hv_ring_put(struct hv_ring_data *wrd, uint8_t *data, uint32_t datalen)
1301 {
1302 	int left = MIN(datalen, wrd->rd_dsize - wrd->rd_prod);
1303 
1304 	memcpy(&wrd->rd_ring->br_data[wrd->rd_prod], data, left);
1305 	memcpy(&wrd->rd_ring->br_data[0], data + left, datalen - left);
1306 	wrd->rd_prod += datalen;
1307 	if (wrd->rd_prod >= wrd->rd_dsize)
1308 		wrd->rd_prod -= wrd->rd_dsize;
1309 }
1310 
1311 static inline void
1312 hv_ring_get(struct hv_ring_data *rrd, uint8_t *data, uint32_t datalen,
1313     int peek)
1314 {
1315 	int left = MIN(datalen, rrd->rd_dsize - rrd->rd_cons);
1316 
1317 	memcpy(data, &rrd->rd_ring->br_data[rrd->rd_cons], left);
1318 	memcpy(data + left, &rrd->rd_ring->br_data[0], datalen - left);
1319 	if (!peek) {
1320 		rrd->rd_cons += datalen;
1321 		if (rrd->rd_cons >= rrd->rd_dsize)
1322 			rrd->rd_cons -= rrd->rd_dsize;
1323 	}
1324 }
1325 
1326 static inline void
1327 hv_ring_avail(struct hv_ring_data *rd, uint32_t *towrite, uint32_t *toread)
1328 {
1329 	uint32_t ridx = rd->rd_ring->br_rindex;
1330 	uint32_t widx = rd->rd_ring->br_windex;
1331 	uint32_t r, w;
1332 
1333 	if (widx >= ridx)
1334 		w = rd->rd_dsize - (widx - ridx);
1335 	else
1336 		w = ridx - widx;
1337 	r = rd->rd_dsize - w;
1338 	if (towrite)
1339 		*towrite = w;
1340 	if (toread)
1341 		*toread = r;
1342 }
1343 
1344 int
1345 hv_ring_write(struct hv_ring_data *wrd, struct iovec *iov, int iov_cnt,
1346     int *needsig)
1347 {
1348 	uint64_t indices = 0;
1349 	uint32_t avail, oprod, datalen = sizeof(indices);
1350 	int i;
1351 
1352 	for (i = 0; i < iov_cnt; i++)
1353 		datalen += iov[i].iov_len;
1354 
1355 	KASSERT(datalen <= wrd->rd_dsize);
1356 
1357 	hv_ring_avail(wrd, &avail, NULL);
1358 	if (avail <= datalen) {
1359 		DPRINTF("%s: avail %u datalen %u\n", __func__, avail, datalen);
1360 		return (EAGAIN);
1361 	}
1362 
1363 	oprod = wrd->rd_prod;
1364 
1365 	for (i = 0; i < iov_cnt; i++)
1366 		hv_ring_put(wrd, iov[i].iov_base, iov[i].iov_len);
1367 
1368 	indices = (uint64_t)oprod << 32;
1369 	hv_ring_put(wrd, (uint8_t *)&indices, sizeof(indices));
1370 
1371 	virtio_membar_sync();
1372 	wrd->rd_ring->br_windex = wrd->rd_prod;
1373 	virtio_membar_sync();
1374 
1375 	/* Signal when the ring transitions from being empty to non-empty */
1376 	if (wrd->rd_ring->br_imask == 0 &&
1377 	    wrd->rd_ring->br_rindex == oprod)
1378 		*needsig = 1;
1379 	else
1380 		*needsig = 0;
1381 
1382 	return (0);
1383 }
1384 
1385 int
1386 hv_channel_send(struct hv_channel *ch, void *data, uint32_t datalen,
1387     uint64_t rid, int type, uint32_t flags)
1388 {
1389 	struct hv_softc *sc = ch->ch_sc;
1390 	struct vmbus_chanpkt cp;
1391 	struct iovec iov[3];
1392 	uint32_t pktlen, pktlen_aligned;
1393 	uint64_t zeropad = 0;
1394 	int rv, needsig = 0;
1395 
1396 	pktlen = sizeof(cp) + datalen;
1397 	pktlen_aligned = roundup(pktlen, sizeof(uint64_t));
1398 
1399 	cp.cp_hdr.cph_type = type;
1400 	cp.cp_hdr.cph_flags = flags;
1401 	VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_hlen, sizeof(cp));
1402 	VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_tlen, pktlen_aligned);
1403 	cp.cp_hdr.cph_tid = rid;
1404 
1405 	iov[0].iov_base = &cp;
1406 	iov[0].iov_len = sizeof(cp);
1407 
1408 	iov[1].iov_base = data;
1409 	iov[1].iov_len = datalen;
1410 
1411 	iov[2].iov_base = &zeropad;
1412 	iov[2].iov_len = pktlen_aligned - pktlen;
1413 
1414 	mtx_enter(&ch->ch_wrd.rd_lock);
1415 	rv = hv_ring_write(&ch->ch_wrd, iov, 3, &needsig);
1416 	mtx_leave(&ch->ch_wrd.rd_lock);
1417 	if (rv == 0 && needsig)
1418 		hv_channel_setevent(sc, ch);
1419 
1420 	return (rv);
1421 }
1422 
1423 int
1424 hv_channel_send_sgl(struct hv_channel *ch, struct vmbus_gpa *sgl,
1425     uint32_t nsge, void *data, uint32_t datalen, uint64_t rid)
1426 {
1427 	struct hv_softc *sc = ch->ch_sc;
1428 	struct vmbus_chanpkt_sglist cp;
1429 	struct iovec iov[4];
1430 	uint32_t buflen, pktlen, pktlen_aligned;
1431 	uint64_t zeropad = 0;
1432 	int rv, needsig = 0;
1433 
1434 	buflen = sizeof(struct vmbus_gpa) * nsge;
1435 	pktlen = sizeof(cp) + datalen + buflen;
1436 	pktlen_aligned = roundup(pktlen, sizeof(uint64_t));
1437 
1438 	cp.cp_hdr.cph_type = VMBUS_CHANPKT_TYPE_GPA;
1439 	cp.cp_hdr.cph_flags = VMBUS_CHANPKT_FLAG_RC;
1440 	VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_hlen, sizeof(cp) + buflen);
1441 	VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_tlen, pktlen_aligned);
1442 	cp.cp_hdr.cph_tid = rid;
1443 	cp.cp_gpa_cnt = nsge;
1444 	cp.cp_rsvd = 0;
1445 
1446 	iov[0].iov_base = &cp;
1447 	iov[0].iov_len = sizeof(cp);
1448 
1449 	iov[1].iov_base = sgl;
1450 	iov[1].iov_len = buflen;
1451 
1452 	iov[2].iov_base = data;
1453 	iov[2].iov_len = datalen;
1454 
1455 	iov[3].iov_base = &zeropad;
1456 	iov[3].iov_len = pktlen_aligned - pktlen;
1457 
1458 	mtx_enter(&ch->ch_wrd.rd_lock);
1459 	rv = hv_ring_write(&ch->ch_wrd, iov, 4, &needsig);
1460 	mtx_leave(&ch->ch_wrd.rd_lock);
1461 	if (rv == 0 && needsig)
1462 		hv_channel_setevent(sc, ch);
1463 
1464 	return (rv);
1465 }
1466 
1467 int
1468 hv_channel_send_prpl(struct hv_channel *ch, struct vmbus_gpa_range *prpl,
1469     uint32_t nprp, void *data, uint32_t datalen, uint64_t rid)
1470 {
1471 	struct hv_softc *sc = ch->ch_sc;
1472 	struct vmbus_chanpkt_prplist cp;
1473 	struct iovec iov[4];
1474 	uint32_t buflen, pktlen, pktlen_aligned;
1475 	uint64_t zeropad = 0;
1476 	int rv, needsig = 0;
1477 
1478 	buflen = sizeof(struct vmbus_gpa_range) * (nprp + 1);
1479 	pktlen = sizeof(cp) + datalen + buflen;
1480 	pktlen_aligned = roundup(pktlen, sizeof(uint64_t));
1481 
1482 	cp.cp_hdr.cph_type = VMBUS_CHANPKT_TYPE_GPA;
1483 	cp.cp_hdr.cph_flags = VMBUS_CHANPKT_FLAG_RC;
1484 	VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_hlen, sizeof(cp) + buflen);
1485 	VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_tlen, pktlen_aligned);
1486 	cp.cp_hdr.cph_tid = rid;
1487 	cp.cp_range_cnt = 1;
1488 	cp.cp_rsvd = 0;
1489 
1490 	iov[0].iov_base = &cp;
1491 	iov[0].iov_len = sizeof(cp);
1492 
1493 	iov[1].iov_base = prpl;
1494 	iov[1].iov_len = buflen;
1495 
1496 	iov[2].iov_base = data;
1497 	iov[2].iov_len = datalen;
1498 
1499 	iov[3].iov_base = &zeropad;
1500 	iov[3].iov_len = pktlen_aligned - pktlen;
1501 
1502 	mtx_enter(&ch->ch_wrd.rd_lock);
1503 	rv = hv_ring_write(&ch->ch_wrd, iov, 4, &needsig);
1504 	mtx_leave(&ch->ch_wrd.rd_lock);
1505 	if (rv == 0 && needsig)
1506 		hv_channel_setevent(sc, ch);
1507 
1508 	return (rv);
1509 }
1510 
1511 int
1512 hv_ring_peek(struct hv_ring_data *rrd, void *data, uint32_t datalen)
1513 {
1514 	uint32_t avail;
1515 
1516 	KASSERT(datalen <= rrd->rd_dsize);
1517 
1518 	hv_ring_avail(rrd, NULL, &avail);
1519 	if (avail < datalen)
1520 		return (EAGAIN);
1521 
1522 	hv_ring_get(rrd, (uint8_t *)data, datalen, 1);
1523 	return (0);
1524 }
1525 
1526 int
1527 hv_ring_read(struct hv_ring_data *rrd, void *data, uint32_t datalen,
1528     uint32_t offset)
1529 {
1530 	uint64_t indices;
1531 	uint32_t avail;
1532 
1533 	KASSERT(datalen <= rrd->rd_dsize);
1534 
1535 	hv_ring_avail(rrd, NULL, &avail);
1536 	if (avail < datalen) {
1537 		DPRINTF("%s: avail %u datalen %u\n", __func__, avail, datalen);
1538 		return (EAGAIN);
1539 	}
1540 
1541 	if (offset) {
1542 		rrd->rd_cons += offset;
1543 		if (rrd->rd_cons >= rrd->rd_dsize)
1544 			rrd->rd_cons -= rrd->rd_dsize;
1545 	}
1546 
1547 	hv_ring_get(rrd, (uint8_t *)data, datalen, 0);
1548 	hv_ring_get(rrd, (uint8_t *)&indices, sizeof(indices), 0);
1549 
1550 	virtio_membar_sync();
1551 	rrd->rd_ring->br_rindex = rrd->rd_cons;
1552 
1553 	return (0);
1554 }
1555 
1556 int
1557 hv_channel_recv(struct hv_channel *ch, void *data, uint32_t datalen,
1558     uint32_t *rlen, uint64_t *rid, int raw)
1559 {
1560 	struct vmbus_chanpkt_hdr cph;
1561 	uint32_t offset, pktlen;
1562 	int rv;
1563 
1564 	*rlen = 0;
1565 
1566 	mtx_enter(&ch->ch_rrd.rd_lock);
1567 
1568 	if ((rv = hv_ring_peek(&ch->ch_rrd, &cph, sizeof(cph))) != 0) {
1569 		mtx_leave(&ch->ch_rrd.rd_lock);
1570 		return (rv);
1571 	}
1572 
1573 	offset = raw ? 0 : VMBUS_CHANPKT_GETLEN(cph.cph_hlen);
1574 	pktlen = VMBUS_CHANPKT_GETLEN(cph.cph_tlen) - offset;
1575 	if (pktlen > datalen) {
1576 		mtx_leave(&ch->ch_rrd.rd_lock);
1577 		printf("%s: pktlen %u datalen %u\n", __func__, pktlen, datalen);
1578 		return (EINVAL);
1579 	}
1580 
1581 	rv = hv_ring_read(&ch->ch_rrd, data, pktlen, offset);
1582 	if (rv == 0) {
1583 		*rlen = pktlen;
1584 		*rid = cph.cph_tid;
1585 	}
1586 
1587 	mtx_leave(&ch->ch_rrd.rd_lock);
1588 
1589 	return (rv);
1590 }
1591 
1592 static inline void
1593 hv_ring_mask(struct hv_ring_data *rd)
1594 {
1595 	virtio_membar_sync();
1596 	rd->rd_ring->br_imask = 1;
1597 	virtio_membar_sync();
1598 }
1599 
1600 static inline void
1601 hv_ring_unmask(struct hv_ring_data *rd)
1602 {
1603 	virtio_membar_sync();
1604 	rd->rd_ring->br_imask = 0;
1605 	virtio_membar_sync();
1606 }
1607 
1608 void
1609 hv_channel_pause(struct hv_channel *ch)
1610 {
1611 	hv_ring_mask(&ch->ch_rrd);
1612 }
1613 
1614 uint
1615 hv_channel_unpause(struct hv_channel *ch)
1616 {
1617 	uint32_t avail;
1618 
1619 	hv_ring_unmask(&ch->ch_rrd);
1620 	hv_ring_avail(&ch->ch_rrd, NULL, &avail);
1621 
1622 	return (avail);
1623 }
1624 
1625 uint
1626 hv_channel_ready(struct hv_channel *ch)
1627 {
1628 	uint32_t avail;
1629 
1630 	hv_ring_avail(&ch->ch_rrd, NULL, &avail);
1631 
1632 	return (avail);
1633 }
1634 
1635 /* How many PFNs can be referenced by the header */
1636 #define HV_NPFNHDR	((VMBUS_MSG_DSIZE_MAX -	\
1637 	  sizeof(struct vmbus_chanmsg_gpadl_conn)) / sizeof(uint64_t))
1638 
1639 /* How many PFNs can be referenced by the body */
1640 #define HV_NPFNBODY	((VMBUS_MSG_DSIZE_MAX -	\
1641 	  sizeof(struct vmbus_chanmsg_gpadl_subconn)) / sizeof(uint64_t))
1642 
1643 int
1644 hv_handle_alloc(struct hv_channel *ch, void *buffer, uint32_t buflen,
1645     uint32_t *handle)
1646 {
1647 	struct hv_softc *sc = ch->ch_sc;
1648 	struct vmbus_chanmsg_gpadl_conn *hdr;
1649 	struct vmbus_chanmsg_gpadl_subconn *cmd;
1650 	struct vmbus_chanmsg_gpadl_connresp rsp;
1651 	struct hv_msg *msg;
1652 	int i, j, last, left, rv;
1653 	int bodylen = 0, ncmds = 0, pfn = 0;
1654 	int waitflag = cold ? M_NOWAIT : M_WAITOK;
1655 	uint64_t *frames;
1656 	paddr_t pa;
1657 	caddr_t body;
1658 	/* Total number of pages to reference */
1659 	int total = atop(buflen);
1660 	/* Number of pages that will fit the header */
1661 	int inhdr = MIN(total, HV_NPFNHDR);
1662 
1663 	KASSERT((buflen & (PAGE_SIZE - 1)) == 0);
1664 
1665 	if ((msg = malloc(sizeof(*msg), M_DEVBUF, M_ZERO | waitflag)) == NULL)
1666 		return (ENOMEM);
1667 
1668 	/* Prepare array of frame addresses */
1669 	if ((frames = mallocarray(total, sizeof(*frames), M_DEVBUF, M_ZERO |
1670 	    waitflag)) == NULL) {
1671 		free(msg, M_DEVBUF, sizeof(*msg));
1672 		return (ENOMEM);
1673 	}
1674 	for (i = 0; i < total; i++) {
1675 		if (!pmap_extract(pmap_kernel(), (vaddr_t)buffer +
1676 		    PAGE_SIZE * i, &pa)) {
1677 			free(msg, M_DEVBUF, sizeof(*msg));
1678 			free(frames, M_DEVBUF, total * sizeof(*frames));
1679 			return (EFAULT);
1680 		}
1681 		frames[i] = atop(pa);
1682 	}
1683 
1684 	msg->msg_req.hc_dsize = sizeof(struct vmbus_chanmsg_gpadl_conn) +
1685 	    inhdr * sizeof(uint64_t);
1686 	hdr = (struct vmbus_chanmsg_gpadl_conn *)msg->msg_req.hc_data;
1687 	msg->msg_rsp = &rsp;
1688 	msg->msg_rsplen = sizeof(rsp);
1689 	if (waitflag == M_NOWAIT)
1690 		msg->msg_flags = MSGF_NOSLEEP;
1691 
1692 	left = total - inhdr;
1693 
1694 	/* Allocate additional gpadl_body structures if required */
1695 	if (left > 0) {
1696 		ncmds = MAX(1, left / HV_NPFNBODY + left % HV_NPFNBODY);
1697 		bodylen = ncmds * VMBUS_MSG_DSIZE_MAX;
1698 		body = malloc(bodylen, M_DEVBUF, M_ZERO | waitflag);
1699 		if (body == NULL) {
1700 			free(msg, M_DEVBUF, sizeof(*msg));
1701 			free(frames, M_DEVBUF, atop(buflen) * sizeof(*frames));
1702 			return (ENOMEM);
1703 		}
1704 	}
1705 
1706 	*handle = atomic_inc_int_nv(&sc->sc_handle);
1707 
1708 	hdr->chm_hdr.chm_type = VMBUS_CHANMSG_GPADL_CONN;
1709 	hdr->chm_chanid = ch->ch_id;
1710 	hdr->chm_gpadl = *handle;
1711 
1712 	/* Single range for a contiguous buffer */
1713 	hdr->chm_range_cnt = 1;
1714 	hdr->chm_range_len = sizeof(struct vmbus_gpa_range) + total *
1715 	    sizeof(uint64_t);
1716 	hdr->chm_range.gpa_ofs = 0;
1717 	hdr->chm_range.gpa_len = buflen;
1718 
1719 	/* Fit as many pages as possible into the header */
1720 	for (i = 0; i < inhdr; i++)
1721 		hdr->chm_range.gpa_page[i] = frames[pfn++];
1722 
1723 	for (i = 0; i < ncmds; i++) {
1724 		cmd = (struct vmbus_chanmsg_gpadl_subconn *)(body +
1725 		    VMBUS_MSG_DSIZE_MAX * i);
1726 		cmd->chm_hdr.chm_type = VMBUS_CHANMSG_GPADL_SUBCONN;
1727 		cmd->chm_gpadl = *handle;
1728 		last = MIN(left, HV_NPFNBODY);
1729 		for (j = 0; j < last; j++)
1730 			cmd->chm_gpa_page[j] = frames[pfn++];
1731 		left -= last;
1732 	}
1733 
1734 	rv = hv_start(sc, msg);
1735 	if (rv != 0) {
1736 		DPRINTF("%s: GPADL_CONN failed\n", sc->sc_dev.dv_xname);
1737 		goto out;
1738 	}
1739 	for (i = 0; i < ncmds; i++) {
1740 		int cmdlen = sizeof(*cmd);
1741 		cmd = (struct vmbus_chanmsg_gpadl_subconn *)(body +
1742 		    VMBUS_MSG_DSIZE_MAX * i);
1743 		/* Last element can be short */
1744 		if (i == ncmds - 1)
1745 			cmdlen += last * sizeof(uint64_t);
1746 		else
1747 			cmdlen += HV_NPFNBODY * sizeof(uint64_t);
1748 		rv = hv_cmd(sc, cmd, cmdlen, NULL, 0, waitflag | HCF_NOREPLY);
1749 		if (rv != 0) {
1750 			DPRINTF("%s: GPADL_SUBCONN (iteration %d/%d) failed "
1751 			    "with %d\n", sc->sc_dev.dv_xname, i, ncmds, rv);
1752 			goto out;
1753 		}
1754 	}
1755 	rv = hv_reply(sc, msg);
1756 	if (rv != 0)
1757 		DPRINTF("%s: GPADL allocation failed with %d\n",
1758 		    sc->sc_dev.dv_xname, rv);
1759 
1760  out:
1761 	free(msg, M_DEVBUF, sizeof(*msg));
1762 	free(frames, M_DEVBUF, total * sizeof(*frames));
1763 	if (bodylen > 0)
1764 		free(body, M_DEVBUF, bodylen);
1765 	if (rv != 0)
1766 		return (rv);
1767 
1768 	KASSERT(*handle == rsp.chm_gpadl);
1769 
1770 	return (0);
1771 }
1772 
1773 void
1774 hv_handle_free(struct hv_channel *ch, uint32_t handle)
1775 {
1776 	struct hv_softc *sc = ch->ch_sc;
1777 	struct vmbus_chanmsg_gpadl_disconn cmd;
1778 	struct vmbus_chanmsg_gpadl_disconn rsp;
1779 	int rv;
1780 
1781 	memset(&cmd, 0, sizeof(cmd));
1782 	cmd.chm_hdr.chm_type = VMBUS_CHANMSG_GPADL_DISCONN;
1783 	cmd.chm_chanid = ch->ch_id;
1784 	cmd.chm_gpadl = handle;
1785 
1786 	rv = hv_cmd(sc, &cmd, sizeof(cmd), &rsp, sizeof(rsp), cold ?
1787 	    HCF_NOSLEEP : 0);
1788 	if (rv)
1789 		DPRINTF("%s: GPADL_DISCONN failed with %d\n",
1790 		    sc->sc_dev.dv_xname, rv);
1791 }
1792 
1793 static int
1794 hv_attach_print(void *aux, const char *name)
1795 {
1796 	struct hv_attach_args *aa = aux;
1797 
1798 	if (name)
1799 		printf("\"%s\" at %s", aa->aa_ident, name);
1800 
1801 	return (UNCONF);
1802 }
1803 
1804 int
1805 hv_attach_devices(struct hv_softc *sc)
1806 {
1807 	struct hv_dev *dv;
1808 	struct hv_channel *ch;
1809 
1810 	SLIST_INIT(&sc->sc_devs);
1811 	mtx_init(&sc->sc_devlck, IPL_NET);
1812 
1813 	TAILQ_FOREACH(ch, &sc->sc_channels, ch_entry) {
1814 		if (ch->ch_state != HV_CHANSTATE_OFFERED)
1815 			continue;
1816 		if (!(ch->ch_flags & CHF_MONITOR))
1817 			continue;
1818 		dv = malloc(sizeof(*dv), M_DEVBUF, M_ZERO | M_NOWAIT);
1819 		if (dv == NULL) {
1820 			printf("%s: failed to allocate device object\n",
1821 			    sc->sc_dev.dv_xname);
1822 			return (-1);
1823 		}
1824 		dv->dv_aa.aa_parent = sc;
1825 		dv->dv_aa.aa_type = &ch->ch_type;
1826 		dv->dv_aa.aa_inst = &ch->ch_inst;
1827 		dv->dv_aa.aa_ident = ch->ch_ident;
1828 		dv->dv_aa.aa_chan = ch;
1829 		dv->dv_aa.aa_dmat = sc->sc_dmat;
1830 		mtx_enter(&sc->sc_devlck);
1831 		SLIST_INSERT_HEAD(&sc->sc_devs, dv, dv_entry);
1832 		mtx_leave(&sc->sc_devlck);
1833 		config_found((struct device *)sc, &dv->dv_aa, hv_attach_print);
1834 	}
1835 	return (0);
1836 }
1837 
1838 void
1839 hv_evcount_attach(struct hv_channel *ch, const char *name)
1840 {
1841 	struct hv_softc *sc = ch->ch_sc;
1842 
1843 	evcount_attach(&ch->ch_evcnt, name, &sc->sc_idtvec);
1844 }
1845