1 /* $NetBSD: vmbus.c,v 1.15 2021/12/23 04:06:51 yamaguchi Exp $ */ 2 /* $OpenBSD: hyperv.c,v 1.43 2017/06/27 13:56:15 mikeb Exp $ */ 3 4 /*- 5 * Copyright (c) 2009-2012 Microsoft Corp. 6 * Copyright (c) 2012 NetApp Inc. 7 * Copyright (c) 2012 Citrix Inc. 8 * Copyright (c) 2016 Mike Belopuhov <mike@esdenera.com> 9 * All rights reserved. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice unmodified, this list of conditions, and the following 16 * disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * The OpenBSD port was done under funding by Esdenera Networks GmbH. 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: vmbus.c,v 1.15 2021/12/23 04:06:51 yamaguchi Exp $"); 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/device.h> 43 #include <sys/atomic.h> 44 #include <sys/bitops.h> 45 #include <sys/bus.h> 46 #include <sys/cpu.h> 47 #include <sys/intr.h> 48 #include <sys/kmem.h> 49 #include <sys/kthread.h> 50 #include <sys/module.h> 51 #include <sys/mutex.h> 52 #include <sys/xcall.h> 53 54 #include <uvm/uvm_extern.h> 55 56 #include <dev/hyperv/vmbusvar.h> 57 58 #define VMBUS_GPADL_START 0xffff /* 0x10000 effectively */ 59 60 /* Command submission flags */ 61 #define HCF_SLEEPOK 0x0000 62 #define HCF_NOSLEEP 0x0002 /* M_NOWAIT */ 63 #define HCF_NOREPLY 0x0004 64 65 static void vmbus_attach_deferred(device_t); 66 static int vmbus_attach_print(void *, const char *); 67 static int vmbus_alloc_dma(struct vmbus_softc *); 68 static void vmbus_free_dma(struct vmbus_softc *); 69 static int vmbus_init_interrupts(struct vmbus_softc *); 70 static void vmbus_deinit_interrupts(struct vmbus_softc *); 71 static void vmbus_init_interrupts_pcpu(void *, void *); 72 static void vmbus_deinit_interrupts_pcpu(void *, void *); 73 74 static int vmbus_connect(struct vmbus_softc *); 75 static int vmbus_cmd(struct vmbus_softc *, void *, size_t, void *, size_t, 76 int); 77 static int vmbus_start(struct vmbus_softc *, struct vmbus_msg *, paddr_t); 78 static int vmbus_reply(struct vmbus_softc *, struct vmbus_msg *); 79 static uint16_t vmbus_intr_signal(struct vmbus_softc *, paddr_t); 80 static void vmbus_event_proc(void *, struct cpu_info *); 81 static void vmbus_event_proc_compat(void *, struct cpu_info *); 82 static void vmbus_message_proc(void *, struct cpu_info *); 83 static void vmbus_message_softintr(void *); 84 static void vmbus_channel_response(struct vmbus_softc *, 85 struct vmbus_chanmsg_hdr *); 86 static void vmbus_channel_offer(struct vmbus_softc *, 87 struct vmbus_chanmsg_hdr *); 88 static void vmbus_channel_rescind(struct vmbus_softc *, 89 struct vmbus_chanmsg_hdr *); 90 static void vmbus_channel_delivered(struct vmbus_softc *, 91 struct vmbus_chanmsg_hdr *); 92 static int vmbus_channel_scan(struct vmbus_softc *); 93 static void vmbus_channel_cpu_default(struct vmbus_channel *); 94 static void vmbus_process_offer(struct vmbus_softc *, 95 struct vmbus_chanmsg_choffer *); 96 static void vmbus_process_rescind(struct vmbus_softc *, 97 struct vmbus_chanmsg_chrescind *); 98 static struct vmbus_channel * 99 vmbus_channel_lookup(struct vmbus_softc *, uint32_t); 100 static int vmbus_channel_ring_create(struct vmbus_channel *, uint32_t); 101 static void vmbus_channel_ring_destroy(struct vmbus_channel *); 102 static void vmbus_channel_detach(struct vmbus_channel *); 103 static void vmbus_channel_pause(struct vmbus_channel *); 104 static uint32_t vmbus_channel_unpause(struct vmbus_channel *); 105 static uint32_t vmbus_channel_ready(struct vmbus_channel *); 106 static void vmbus_chevq_enqueue(struct vmbus_softc *, int, void *); 107 static void vmbus_process_chevq(void *); 108 static void vmbus_chevq_thread(void *); 109 static void vmbus_devq_enqueue(struct vmbus_softc *, int, 110 struct vmbus_channel *); 111 static void vmbus_process_devq(void *); 112 static void vmbus_devq_thread(void *); 113 static void vmbus_subchannel_devq_thread(void *); 114 115 static struct vmbus_softc *vmbus_sc; 116 117 static const struct { 118 int hmd_response; 119 int hmd_request; 120 void (*hmd_handler)(struct vmbus_softc *, 121 struct vmbus_chanmsg_hdr *); 122 } vmbus_msg_dispatch[] = { 123 { 0, 0, NULL }, 124 { VMBUS_CHANMSG_CHOFFER, 0, vmbus_channel_offer }, 125 { VMBUS_CHANMSG_CHRESCIND, 0, vmbus_channel_rescind }, 126 { VMBUS_CHANMSG_CHREQUEST, VMBUS_CHANMSG_CHOFFER, NULL }, 127 { VMBUS_CHANMSG_CHOFFER_DONE, 0, vmbus_channel_delivered }, 128 { VMBUS_CHANMSG_CHOPEN, 0, NULL }, 129 { VMBUS_CHANMSG_CHOPEN_RESP, VMBUS_CHANMSG_CHOPEN, 130 vmbus_channel_response }, 131 { VMBUS_CHANMSG_CHCLOSE, 0, NULL }, 132 { VMBUS_CHANMSG_GPADL_CONN, 0, NULL }, 133 { VMBUS_CHANMSG_GPADL_SUBCONN, 0, NULL }, 134 { VMBUS_CHANMSG_GPADL_CONNRESP, VMBUS_CHANMSG_GPADL_CONN, 135 vmbus_channel_response }, 136 { VMBUS_CHANMSG_GPADL_DISCONN, 0, NULL }, 137 { VMBUS_CHANMSG_GPADL_DISCONNRESP, VMBUS_CHANMSG_GPADL_DISCONN, 138 vmbus_channel_response }, 139 { VMBUS_CHANMSG_CHFREE, 0, NULL }, 140 { VMBUS_CHANMSG_CONNECT, 0, NULL }, 141 { VMBUS_CHANMSG_CONNECT_RESP, VMBUS_CHANMSG_CONNECT, 142 vmbus_channel_response }, 143 { VMBUS_CHANMSG_DISCONNECT, 0, NULL }, 144 }; 145 146 const struct hyperv_guid hyperv_guid_network = { 147 { 0x63, 0x51, 0x61, 0xf8, 0x3e, 0xdf, 0xc5, 0x46, 148 0x91, 0x3f, 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e } 149 }; 150 151 const struct hyperv_guid hyperv_guid_ide = { 152 { 0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44, 153 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5 } 154 }; 155 156 const struct hyperv_guid hyperv_guid_scsi = { 157 { 0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d, 158 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f } 159 }; 160 161 const struct hyperv_guid hyperv_guid_shutdown = { 162 { 0x31, 0x60, 0x0b, 0x0e, 0x13, 0x52, 0x34, 0x49, 163 0x81, 0x8b, 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb } 164 }; 165 166 const struct hyperv_guid hyperv_guid_timesync = { 167 { 0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49, 168 0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf } 169 }; 170 171 const struct hyperv_guid hyperv_guid_heartbeat = { 172 { 0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e, 173 0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d } 174 }; 175 176 const struct hyperv_guid hyperv_guid_kvp = { 177 { 0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d, 178 0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6 } 179 }; 180 181 const struct hyperv_guid hyperv_guid_vss = { 182 { 0x29, 0x2e, 0xfa, 0x35, 0x23, 0xea, 0x36, 0x42, 183 0x96, 0xae, 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40 } 184 }; 185 186 const struct hyperv_guid hyperv_guid_dynmem = { 187 { 0xdc, 0x74, 0x50, 0x52, 0x85, 0x89, 0xe2, 0x46, 188 0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02 } 189 }; 190 191 const struct hyperv_guid hyperv_guid_mouse = { 192 { 0x9e, 0xb6, 0xa8, 0xcf, 0x4a, 0x5b, 0xc0, 0x4c, 193 0xb9, 0x8b, 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a } 194 }; 195 196 const struct hyperv_guid hyperv_guid_kbd = { 197 { 0x6d, 0xad, 0x12, 0xf9, 0x17, 0x2b, 0xea, 0x48, 198 0xbd, 0x65, 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84 } 199 }; 200 201 const struct hyperv_guid hyperv_guid_video = { 202 { 0x02, 0x78, 0x0a, 0xda, 0x77, 0xe3, 0xac, 0x4a, 203 0x8e, 0x77, 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8 } 204 }; 205 206 const struct hyperv_guid hyperv_guid_fc = { 207 { 0x4a, 0xcc, 0x9b, 0x2f, 0x69, 0x00, 0xf3, 0x4a, 208 0xb7, 0x6b, 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda } 209 }; 210 211 const struct hyperv_guid hyperv_guid_fcopy = { 212 { 0xe3, 0x4b, 0xd1, 0x34, 0xe4, 0xde, 0xc8, 0x41, 213 0x9a, 0xe7, 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92 } 214 }; 215 216 const struct hyperv_guid hyperv_guid_pcie = { 217 { 0x1d, 0xf6, 0xc4, 0x44, 0x44, 0x44, 0x00, 0x44, 218 0x9d, 0x52, 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f } 219 }; 220 221 const struct hyperv_guid hyperv_guid_netdir = { 222 { 0x3d, 0xaf, 0x2e, 0x8c, 0xa7, 0x32, 0x09, 0x4b, 223 0xab, 0x99, 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01 } 224 }; 225 226 const struct hyperv_guid hyperv_guid_rdesktop = { 227 { 0xf4, 0xac, 0x6a, 0x27, 0x15, 0xac, 0x6c, 0x42, 228 0x98, 0xdd, 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe } 229 }; 230 231 /* Automatic Virtual Machine Activation (AVMA) Services */ 232 const struct hyperv_guid hyperv_guid_avma1 = { 233 { 0x55, 0xb2, 0x87, 0x44, 0x8c, 0xb8, 0x3f, 0x40, 234 0xbb, 0x51, 0xd1, 0xf6, 0x9c, 0xf1, 0x7f, 0x87 } 235 }; 236 237 const struct hyperv_guid hyperv_guid_avma2 = { 238 { 0xf4, 0xba, 0x75, 0x33, 0x15, 0x9e, 0x30, 0x4b, 239 0xb7, 0x65, 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b } 240 }; 241 242 const struct hyperv_guid hyperv_guid_avma3 = { 243 { 0xa0, 0x1f, 0x22, 0x99, 0xad, 0x24, 0xe2, 0x11, 244 0xbe, 0x98, 0x00, 0x1a, 0xa0, 0x1b, 0xbf, 0x6e } 245 }; 246 247 const struct hyperv_guid hyperv_guid_avma4 = { 248 { 0x16, 0x57, 0xe6, 0xf8, 0xb3, 0x3c, 0x06, 0x4a, 249 0x9a, 0x60, 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5 } 250 }; 251 252 int 253 vmbus_match(device_t parent, cfdata_t cf, void *aux) 254 { 255 256 if (cf->cf_unit != 0 || 257 !hyperv_hypercall_enabled() || 258 !hyperv_synic_supported()) 259 return 0; 260 261 return 1; 262 } 263 264 int 265 vmbus_attach(struct vmbus_softc *sc) 266 { 267 268 aprint_naive("\n"); 269 aprint_normal(": Hyper-V VMBus\n"); 270 271 vmbus_sc = sc; 272 273 sc->sc_msgpool = pool_cache_init(sizeof(struct vmbus_msg), 8, 0, 0, 274 "hvmsg", NULL, IPL_NET, NULL, NULL, NULL); 275 hyperv_set_message_proc(vmbus_message_proc, sc); 276 277 if (vmbus_alloc_dma(sc)) 278 goto cleanup; 279 280 if (vmbus_init_interrupts(sc)) 281 goto cleanup; 282 283 if (vmbus_connect(sc)) 284 goto cleanup; 285 286 aprint_normal_dev(sc->sc_dev, "protocol %d.%d\n", 287 VMBUS_VERSION_MAJOR(sc->sc_proto), 288 VMBUS_VERSION_MINOR(sc->sc_proto)); 289 290 if (sc->sc_proto == VMBUS_VERSION_WS2008 || 291 sc->sc_proto == VMBUS_VERSION_WIN7) { 292 hyperv_set_event_proc(vmbus_event_proc_compat, sc); 293 sc->sc_channel_max = VMBUS_CHAN_MAX_COMPAT; 294 } else { 295 hyperv_set_event_proc(vmbus_event_proc, sc); 296 sc->sc_channel_max = VMBUS_CHAN_MAX; 297 } 298 299 if (vmbus_channel_scan(sc)) 300 goto cleanup; 301 302 config_interrupts(sc->sc_dev, vmbus_attach_deferred); 303 304 return 0; 305 306 cleanup: 307 vmbus_deinit_interrupts(sc); 308 vmbus_free_dma(sc); 309 return -1; 310 } 311 312 static void 313 vmbus_attach_deferred(device_t self) 314 { 315 struct vmbus_softc *sc = device_private(self); 316 uint64_t xc; 317 318 xc = xc_broadcast(0, vmbus_init_interrupts_pcpu, 319 sc, NULL); 320 xc_wait(xc); 321 } 322 323 int 324 vmbus_detach(struct vmbus_softc *sc, int flags) 325 { 326 327 vmbus_deinit_interrupts(sc); 328 vmbus_free_dma(sc); 329 330 return 0; 331 } 332 333 static int 334 vmbus_alloc_dma(struct vmbus_softc *sc) 335 { 336 CPU_INFO_ITERATOR cii; 337 struct cpu_info *ci; 338 struct vmbus_percpu_data *pd; 339 int i; 340 341 /* 342 * Per-CPU messages and event flags. 343 */ 344 for (CPU_INFO_FOREACH(cii, ci)) { 345 pd = &sc->sc_percpu[cpu_index(ci)]; 346 347 pd->simp = hyperv_dma_alloc(sc->sc_dmat, &pd->simp_dma, 348 PAGE_SIZE, PAGE_SIZE, 0, 1, HYPERV_DMA_SLEEPOK); 349 if (pd->simp == NULL) 350 return ENOMEM; 351 352 pd->siep = hyperv_dma_alloc(sc->sc_dmat, &pd->siep_dma, 353 PAGE_SIZE, PAGE_SIZE, 0, 1, HYPERV_DMA_SLEEPOK); 354 if (pd->siep == NULL) 355 return ENOMEM; 356 } 357 358 sc->sc_events = hyperv_dma_alloc(sc->sc_dmat, &sc->sc_events_dma, 359 PAGE_SIZE, PAGE_SIZE, 0, 1, HYPERV_DMA_SLEEPOK); 360 if (sc->sc_events == NULL) 361 return ENOMEM; 362 sc->sc_wevents = (u_long *)sc->sc_events; 363 sc->sc_revents = (u_long *)((uint8_t *)sc->sc_events + (PAGE_SIZE / 2)); 364 365 for (i = 0; i < __arraycount(sc->sc_monitor); i++) { 366 sc->sc_monitor[i] = hyperv_dma_alloc(sc->sc_dmat, 367 &sc->sc_monitor_dma[i], PAGE_SIZE, PAGE_SIZE, 0, 1, 368 HYPERV_DMA_SLEEPOK); 369 if (sc->sc_monitor[i] == NULL) 370 return ENOMEM; 371 } 372 373 return 0; 374 } 375 376 static void 377 vmbus_free_dma(struct vmbus_softc *sc) 378 { 379 CPU_INFO_ITERATOR cii; 380 struct cpu_info *ci; 381 int i; 382 383 if (sc->sc_events != NULL) { 384 sc->sc_events = sc->sc_wevents = sc->sc_revents = NULL; 385 hyperv_dma_free(sc->sc_dmat, &sc->sc_events_dma); 386 } 387 388 for (i = 0; i < __arraycount(sc->sc_monitor); i++) { 389 sc->sc_monitor[i] = NULL; 390 hyperv_dma_free(sc->sc_dmat, &sc->sc_monitor_dma[i]); 391 } 392 393 for (CPU_INFO_FOREACH(cii, ci)) { 394 struct vmbus_percpu_data *pd = &sc->sc_percpu[cpu_index(ci)]; 395 396 if (pd->simp != NULL) { 397 pd->simp = NULL; 398 hyperv_dma_free(sc->sc_dmat, &pd->simp_dma); 399 } 400 if (pd->siep != NULL) { 401 pd->siep = NULL; 402 hyperv_dma_free(sc->sc_dmat, &pd->siep_dma); 403 } 404 } 405 } 406 407 static int 408 vmbus_init_interrupts(struct vmbus_softc *sc) 409 { 410 uint64_t xc; 411 412 TAILQ_INIT(&sc->sc_reqs); 413 mutex_init(&sc->sc_req_lock, MUTEX_DEFAULT, IPL_NET); 414 415 TAILQ_INIT(&sc->sc_rsps); 416 mutex_init(&sc->sc_rsp_lock, MUTEX_DEFAULT, IPL_NET); 417 418 sc->sc_proto = VMBUS_VERSION_WS2008; 419 420 /* XXX event_tq */ 421 422 sc->sc_msg_sih = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE, 423 vmbus_message_softintr, sc); 424 if (sc->sc_msg_sih == NULL) 425 return -1; 426 427 kcpuset_create(&sc->sc_intr_cpuset, true); 428 if (cold) { 429 /* Initialize other CPUs later. */ 430 vmbus_init_interrupts_pcpu(sc, NULL); 431 } else { 432 xc = xc_broadcast(0, vmbus_init_interrupts_pcpu, 433 sc, NULL); 434 xc_wait(xc); 435 } 436 atomic_or_32(&sc->sc_flags, VMBUS_SCFLAG_SYNIC); 437 438 return 0; 439 } 440 441 static void 442 vmbus_deinit_interrupts(struct vmbus_softc *sc) 443 { 444 uint64_t xc; 445 446 if (cold) { 447 vmbus_deinit_interrupts_pcpu(sc, NULL); 448 } else { 449 xc = xc_broadcast(0, vmbus_deinit_interrupts_pcpu, 450 sc, NULL); 451 xc_wait(xc); 452 } 453 atomic_and_32(&sc->sc_flags, (uint32_t)~VMBUS_SCFLAG_SYNIC); 454 455 /* XXX event_tq */ 456 457 if (sc->sc_msg_sih != NULL) { 458 softint_disestablish(sc->sc_msg_sih); 459 sc->sc_msg_sih = NULL; 460 } 461 } 462 463 static void 464 vmbus_init_interrupts_pcpu(void *arg1, void *arg2 __unused) 465 { 466 struct vmbus_softc *sc = arg1; 467 cpuid_t cpu; 468 int s; 469 470 s = splhigh(); 471 472 cpu = cpu_index(curcpu()); 473 if (!kcpuset_isset(sc->sc_intr_cpuset, cpu)) { 474 kcpuset_atomic_set(sc->sc_intr_cpuset, cpu); 475 vmbus_init_interrupts_md(sc, cpu); 476 vmbus_init_synic_md(sc, cpu); 477 } 478 479 splx(s); 480 } 481 482 static void 483 vmbus_deinit_interrupts_pcpu(void *arg1, void *arg2 __unused) 484 { 485 struct vmbus_softc *sc = arg1; 486 cpuid_t cpu; 487 int s; 488 489 s = splhigh(); 490 491 cpu = cpu_index(curcpu()); 492 if (kcpuset_isset(sc->sc_intr_cpuset, cpu)) { 493 if (ISSET(sc->sc_flags, VMBUS_SCFLAG_SYNIC)) 494 vmbus_deinit_synic_md(sc, cpu); 495 vmbus_deinit_interrupts_md(sc, cpu); 496 kcpuset_atomic_clear(sc->sc_intr_cpuset, cpu); 497 } 498 499 splx(s); 500 } 501 502 static int 503 vmbus_connect(struct vmbus_softc *sc) 504 { 505 static const uint32_t versions[] = { 506 VMBUS_VERSION_WIN8_1, 507 VMBUS_VERSION_WIN8, 508 VMBUS_VERSION_WIN7, 509 VMBUS_VERSION_WS2008 510 }; 511 struct vmbus_chanmsg_connect cmd; 512 struct vmbus_chanmsg_connect_resp rsp; 513 int i, rv; 514 515 memset(&cmd, 0, sizeof(cmd)); 516 cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CONNECT; 517 cmd.chm_evtflags = hyperv_dma_get_paddr(&sc->sc_events_dma); 518 cmd.chm_mnf1 = hyperv_dma_get_paddr(&sc->sc_monitor_dma[0]); 519 cmd.chm_mnf2 = hyperv_dma_get_paddr(&sc->sc_monitor_dma[1]); 520 521 memset(&rsp, 0, sizeof(rsp)); 522 523 for (i = 0; i < __arraycount(versions); i++) { 524 cmd.chm_ver = versions[i]; 525 rv = vmbus_cmd(sc, &cmd, sizeof(cmd), &rsp, sizeof(rsp), 526 cold ? HCF_NOSLEEP : HCF_SLEEPOK); 527 if (rv) { 528 DPRINTF("%s: CONNECT failed\n", 529 device_xname(sc->sc_dev)); 530 return rv; 531 } 532 if (rsp.chm_done) { 533 atomic_or_32(&sc->sc_flags, VMBUS_SCFLAG_CONNECTED); 534 sc->sc_proto = versions[i]; 535 sc->sc_handle = VMBUS_GPADL_START; 536 break; 537 } 538 } 539 if (i == __arraycount(versions)) { 540 device_printf(sc->sc_dev, 541 "failed to negotiate protocol version\n"); 542 return ENXIO; 543 } 544 545 return 0; 546 } 547 548 static int 549 vmbus_cmd(struct vmbus_softc *sc, void *cmd, size_t cmdlen, void *rsp, 550 size_t rsplen, int flags) 551 { 552 const int prflags = cold ? PR_NOWAIT : PR_WAITOK; 553 struct vmbus_msg *msg; 554 paddr_t pa; 555 int rv; 556 557 if (cmdlen > VMBUS_MSG_DSIZE_MAX) { 558 device_printf(sc->sc_dev, "payload too large (%zu)\n", 559 cmdlen); 560 return EMSGSIZE; 561 } 562 563 msg = pool_cache_get_paddr(sc->sc_msgpool, prflags, &pa); 564 if (msg == NULL) { 565 device_printf(sc->sc_dev, "couldn't get msgpool\n"); 566 return ENOMEM; 567 } 568 memset(msg, 0, sizeof(*msg)); 569 msg->msg_req.hc_dsize = cmdlen; 570 memcpy(msg->msg_req.hc_data, cmd, cmdlen); 571 572 if (!(flags & HCF_NOREPLY)) { 573 msg->msg_rsp = rsp; 574 msg->msg_rsplen = rsplen; 575 } else 576 msg->msg_flags |= MSGF_NOQUEUE; 577 578 if (flags & HCF_NOSLEEP) 579 msg->msg_flags |= MSGF_NOSLEEP; 580 581 rv = vmbus_start(sc, msg, pa); 582 if (rv == 0) 583 rv = vmbus_reply(sc, msg); 584 pool_cache_put_paddr(sc->sc_msgpool, msg, pa); 585 return rv; 586 } 587 588 static int 589 vmbus_start(struct vmbus_softc *sc, struct vmbus_msg *msg, paddr_t msg_pa) 590 { 591 static const int delays[] = { 592 100, 100, 100, 500, 500, 5000, 5000, 5000 593 }; 594 const char *wchan = "hvstart"; 595 uint16_t status; 596 int i, s; 597 598 msg->msg_req.hc_connid = VMBUS_CONNID_MESSAGE; 599 msg->msg_req.hc_msgtype = 1; 600 601 if (!(msg->msg_flags & MSGF_NOQUEUE)) { 602 mutex_enter(&sc->sc_req_lock); 603 TAILQ_INSERT_TAIL(&sc->sc_reqs, msg, msg_entry); 604 mutex_exit(&sc->sc_req_lock); 605 } 606 607 for (i = 0; i < __arraycount(delays); i++) { 608 status = hyperv_hypercall_post_message( 609 msg_pa + offsetof(struct vmbus_msg, msg_req)); 610 if (status == HYPERCALL_STATUS_SUCCESS) 611 break; 612 613 if (msg->msg_flags & MSGF_NOSLEEP) { 614 delay(delays[i]); 615 s = splnet(); 616 hyperv_intr(); 617 splx(s); 618 } else 619 tsleep(wchan, PRIBIO, wchan, 620 uimax(1, mstohz(delays[i] / 1000))); 621 } 622 if (status != HYPERCALL_STATUS_SUCCESS) { 623 device_printf(sc->sc_dev, 624 "posting vmbus message failed with %d\n", status); 625 if (!(msg->msg_flags & MSGF_NOQUEUE)) { 626 mutex_enter(&sc->sc_req_lock); 627 TAILQ_REMOVE(&sc->sc_reqs, msg, msg_entry); 628 mutex_exit(&sc->sc_req_lock); 629 } 630 return EIO; 631 } 632 633 return 0; 634 } 635 636 static int 637 vmbus_reply_done(struct vmbus_softc *sc, struct vmbus_msg *msg) 638 { 639 struct vmbus_msg *m; 640 641 mutex_enter(&sc->sc_rsp_lock); 642 TAILQ_FOREACH(m, &sc->sc_rsps, msg_entry) { 643 if (m == msg) { 644 mutex_exit(&sc->sc_rsp_lock); 645 return 1; 646 } 647 } 648 mutex_exit(&sc->sc_rsp_lock); 649 return 0; 650 } 651 652 static int 653 vmbus_reply(struct vmbus_softc *sc, struct vmbus_msg *msg) 654 { 655 int s; 656 657 if (msg->msg_flags & MSGF_NOQUEUE) 658 return 0; 659 660 while (!vmbus_reply_done(sc, msg)) { 661 if (msg->msg_flags & MSGF_NOSLEEP) { 662 delay(1000); 663 s = splnet(); 664 hyperv_intr(); 665 splx(s); 666 } else 667 tsleep(msg, PRIBIO, "hvreply", 1); 668 } 669 670 mutex_enter(&sc->sc_rsp_lock); 671 TAILQ_REMOVE(&sc->sc_rsps, msg, msg_entry); 672 mutex_exit(&sc->sc_rsp_lock); 673 674 return 0; 675 } 676 677 static uint16_t 678 vmbus_intr_signal(struct vmbus_softc *sc, paddr_t con_pa) 679 { 680 uint64_t status; 681 682 status = hyperv_hypercall_signal_event(con_pa); 683 return (uint16_t)status; 684 } 685 686 #if LONG_BIT == 64 687 #define ffsl(v) ffs64(v) 688 #elif LONG_BIT == 32 689 #define ffsl(v) ffs32(v) 690 #else 691 #error unsupport LONG_BIT 692 #endif /* LONG_BIT */ 693 694 static void 695 vmbus_event_flags_proc(struct vmbus_softc *sc, volatile u_long *revents, 696 int maxrow) 697 { 698 struct vmbus_channel *ch; 699 u_long pending; 700 uint32_t chanid, chanid_base; 701 int row, chanid_ofs; 702 703 for (row = 0; row < maxrow; row++) { 704 if (revents[row] == 0) 705 continue; 706 707 pending = atomic_swap_ulong(&revents[row], 0); 708 chanid_base = row * LONG_BIT; 709 710 while ((chanid_ofs = ffsl(pending)) != 0) { 711 chanid_ofs--; /* NOTE: ffs is 1-based */ 712 pending &= ~(1UL << chanid_ofs); 713 714 chanid = chanid_base + chanid_ofs; 715 /* vmbus channel protocol message */ 716 if (chanid == 0) 717 continue; 718 719 ch = vmbus_channel_lookup(sc, chanid); 720 if (ch == NULL) { 721 device_printf(sc->sc_dev, 722 "unhandled event on %d\n", chanid); 723 continue; 724 } 725 if (ch->ch_state != VMBUS_CHANSTATE_OPENED) { 726 device_printf(sc->sc_dev, 727 "channel %d is not active\n", chanid); 728 continue; 729 } 730 ch->ch_evcnt.ev_count++; 731 vmbus_channel_schedule(ch); 732 } 733 } 734 } 735 736 static void 737 vmbus_event_proc(void *arg, struct cpu_info *ci) 738 { 739 struct vmbus_softc *sc = arg; 740 struct vmbus_evtflags *evt; 741 742 /* 743 * On Host with Win8 or above, the event page can be 744 * checked directly to get the id of the channel 745 * that has the pending interrupt. 746 */ 747 evt = (struct vmbus_evtflags *)sc->sc_percpu[cpu_index(ci)].siep + 748 VMBUS_SINT_MESSAGE; 749 750 vmbus_event_flags_proc(sc, evt->evt_flags, 751 __arraycount(evt->evt_flags)); 752 } 753 754 static void 755 vmbus_event_proc_compat(void *arg, struct cpu_info *ci) 756 { 757 struct vmbus_softc *sc = arg; 758 struct vmbus_evtflags *evt; 759 760 evt = (struct vmbus_evtflags *)sc->sc_percpu[cpu_index(ci)].siep + 761 VMBUS_SINT_MESSAGE; 762 763 if (test_bit(0, &evt->evt_flags[0])) { 764 clear_bit(0, &evt->evt_flags[0]); 765 /* 766 * receive size is 1/2 page and divide that by 4 bytes 767 */ 768 vmbus_event_flags_proc(sc, sc->sc_revents, 769 VMBUS_CHAN_MAX_COMPAT / VMBUS_EVTFLAG_LEN); 770 } 771 } 772 773 static void 774 vmbus_message_proc(void *arg, struct cpu_info *ci) 775 { 776 struct vmbus_softc *sc = arg; 777 struct vmbus_message *msg; 778 779 msg = (struct vmbus_message *)sc->sc_percpu[cpu_index(ci)].simp + 780 VMBUS_SINT_MESSAGE; 781 if (__predict_false(msg->msg_type != HYPERV_MSGTYPE_NONE)) { 782 if (__predict_true(!cold)) 783 softint_schedule_cpu(sc->sc_msg_sih, ci); 784 else 785 vmbus_message_softintr(sc); 786 } 787 } 788 789 static void 790 vmbus_message_softintr(void *arg) 791 { 792 struct vmbus_softc *sc = arg; 793 struct vmbus_message *msg; 794 struct vmbus_chanmsg_hdr *hdr; 795 uint32_t type; 796 cpuid_t cpu; 797 798 cpu = cpu_index(curcpu()); 799 800 for (;;) { 801 msg = (struct vmbus_message *)sc->sc_percpu[cpu].simp + 802 VMBUS_SINT_MESSAGE; 803 if (msg->msg_type == HYPERV_MSGTYPE_NONE) 804 break; 805 806 hdr = (struct vmbus_chanmsg_hdr *)msg->msg_data; 807 type = hdr->chm_type; 808 if (type >= VMBUS_CHANMSG_COUNT) { 809 device_printf(sc->sc_dev, 810 "unhandled message type %u flags %#x\n", type, 811 msg->msg_flags); 812 } else { 813 if (vmbus_msg_dispatch[type].hmd_handler) { 814 vmbus_msg_dispatch[type].hmd_handler(sc, hdr); 815 } else { 816 device_printf(sc->sc_dev, 817 "unhandled message type %u\n", type); 818 } 819 } 820 821 msg->msg_type = HYPERV_MSGTYPE_NONE; 822 membar_sync(); 823 if (msg->msg_flags & VMBUS_MSGFLAG_PENDING) 824 hyperv_send_eom(); 825 } 826 } 827 828 static void 829 vmbus_channel_response(struct vmbus_softc *sc, struct vmbus_chanmsg_hdr *rsphdr) 830 { 831 struct vmbus_msg *msg; 832 struct vmbus_chanmsg_hdr *reqhdr; 833 int req; 834 835 req = vmbus_msg_dispatch[rsphdr->chm_type].hmd_request; 836 mutex_enter(&sc->sc_req_lock); 837 TAILQ_FOREACH(msg, &sc->sc_reqs, msg_entry) { 838 reqhdr = (struct vmbus_chanmsg_hdr *)&msg->msg_req.hc_data; 839 if (reqhdr->chm_type == req) { 840 TAILQ_REMOVE(&sc->sc_reqs, msg, msg_entry); 841 break; 842 } 843 } 844 mutex_exit(&sc->sc_req_lock); 845 if (msg != NULL) { 846 memcpy(msg->msg_rsp, rsphdr, msg->msg_rsplen); 847 mutex_enter(&sc->sc_rsp_lock); 848 TAILQ_INSERT_TAIL(&sc->sc_rsps, msg, msg_entry); 849 mutex_exit(&sc->sc_rsp_lock); 850 wakeup(msg); 851 } 852 } 853 854 static void 855 vmbus_channel_offer(struct vmbus_softc *sc, struct vmbus_chanmsg_hdr *hdr) 856 { 857 struct vmbus_chanmsg_choffer *co; 858 859 co = kmem_intr_alloc(sizeof(*co), KM_NOSLEEP); 860 if (co == NULL) { 861 device_printf(sc->sc_dev, 862 "failed to allocate an offer object\n"); 863 return; 864 } 865 866 memcpy(co, hdr, sizeof(*co)); 867 vmbus_chevq_enqueue(sc, VMBUS_CHEV_TYPE_OFFER, co); 868 } 869 870 static void 871 vmbus_channel_rescind(struct vmbus_softc *sc, struct vmbus_chanmsg_hdr *hdr) 872 { 873 struct vmbus_chanmsg_chrescind *cr; 874 875 cr = kmem_intr_alloc(sizeof(*cr), KM_NOSLEEP); 876 if (cr == NULL) { 877 device_printf(sc->sc_dev, 878 "failed to allocate an rescind object\n"); 879 return; 880 } 881 882 memcpy(cr, hdr, sizeof(*cr)); 883 vmbus_chevq_enqueue(sc, VMBUS_CHEV_TYPE_RESCIND, cr); 884 } 885 886 static void 887 vmbus_channel_delivered(struct vmbus_softc *sc, struct vmbus_chanmsg_hdr *hdr) 888 { 889 890 atomic_or_32(&sc->sc_flags, VMBUS_SCFLAG_OFFERS_DELIVERED); 891 wakeup(&sc->sc_devq); 892 } 893 894 static void 895 hyperv_guid_sprint(struct hyperv_guid *guid, char *str, size_t size) 896 { 897 static const struct { 898 const struct hyperv_guid *guid; 899 const char *ident; 900 } map[] = { 901 { &hyperv_guid_network, "network" }, 902 { &hyperv_guid_ide, "ide" }, 903 { &hyperv_guid_scsi, "scsi" }, 904 { &hyperv_guid_shutdown, "shutdown" }, 905 { &hyperv_guid_timesync, "timesync" }, 906 { &hyperv_guid_heartbeat, "heartbeat" }, 907 { &hyperv_guid_kvp, "kvp" }, 908 { &hyperv_guid_vss, "vss" }, 909 { &hyperv_guid_dynmem, "dynamic-memory" }, 910 { &hyperv_guid_mouse, "mouse" }, 911 { &hyperv_guid_kbd, "keyboard" }, 912 { &hyperv_guid_video, "video" }, 913 { &hyperv_guid_fc, "fiber-channel" }, 914 { &hyperv_guid_fcopy, "file-copy" }, 915 { &hyperv_guid_pcie, "pcie-passthrough" }, 916 { &hyperv_guid_netdir, "network-direct" }, 917 { &hyperv_guid_rdesktop, "remote-desktop" }, 918 { &hyperv_guid_avma1, "avma-1" }, 919 { &hyperv_guid_avma2, "avma-2" }, 920 { &hyperv_guid_avma3, "avma-3" }, 921 { &hyperv_guid_avma4, "avma-4" }, 922 }; 923 int i; 924 925 for (i = 0; i < __arraycount(map); i++) { 926 if (memcmp(guid, map[i].guid, sizeof(*guid)) == 0) { 927 strlcpy(str, map[i].ident, size); 928 return; 929 } 930 } 931 hyperv_guid2str(guid, str, size); 932 } 933 934 static int 935 vmbus_channel_scan(struct vmbus_softc *sc) 936 { 937 struct vmbus_chanmsg_hdr hdr; 938 struct vmbus_chanmsg_choffer rsp; 939 940 TAILQ_INIT(&sc->sc_prichans); 941 mutex_init(&sc->sc_prichan_lock, MUTEX_DEFAULT, IPL_NET); 942 TAILQ_INIT(&sc->sc_channels); 943 mutex_init(&sc->sc_channel_lock, MUTEX_DEFAULT, IPL_NET); 944 945 /* 946 * This queue serializes vmbus channel offer and rescind messages. 947 */ 948 SIMPLEQ_INIT(&sc->sc_chevq); 949 mutex_init(&sc->sc_chevq_lock, MUTEX_DEFAULT, IPL_NET); 950 cv_init(&sc->sc_chevq_cv, "hvchevcv"); 951 if (kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL, 952 vmbus_chevq_thread, sc, NULL, "hvchevq") != 0) { 953 DPRINTF("%s: failed to create prich chevq thread\n", 954 device_xname(sc->sc_dev)); 955 return -1; 956 } 957 958 /* 959 * This queue serializes vmbus devices' attach and detach 960 * for channel offer and rescind messages. 961 */ 962 SIMPLEQ_INIT(&sc->sc_devq); 963 mutex_init(&sc->sc_devq_lock, MUTEX_DEFAULT, IPL_NET); 964 cv_init(&sc->sc_devq_cv, "hvdevqcv"); 965 if (kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL, 966 vmbus_devq_thread, sc, NULL, "hvdevq") != 0) { 967 DPRINTF("%s: failed to create prich devq thread\n", 968 device_xname(sc->sc_dev)); 969 return -1; 970 } 971 972 /* 973 * This queue handles sub-channel detach, so that vmbus 974 * device's detach running in sc_devq can drain its sub-channels. 975 */ 976 SIMPLEQ_INIT(&sc->sc_subch_devq); 977 mutex_init(&sc->sc_subch_devq_lock, MUTEX_DEFAULT, IPL_NET); 978 cv_init(&sc->sc_subch_devq_cv, "hvsdvqcv"); 979 if (kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL, 980 vmbus_subchannel_devq_thread, sc, NULL, "hvsdevq") != 0) { 981 DPRINTF("%s: failed to create subch devq thread\n", 982 device_xname(sc->sc_dev)); 983 return -1; 984 } 985 986 memset(&hdr, 0, sizeof(hdr)); 987 hdr.chm_type = VMBUS_CHANMSG_CHREQUEST; 988 989 if (vmbus_cmd(sc, &hdr, sizeof(hdr), &rsp, sizeof(rsp), 990 HCF_NOREPLY | (cold ? HCF_NOSLEEP : HCF_SLEEPOK))) { 991 DPRINTF("%s: CHREQUEST failed\n", device_xname(sc->sc_dev)); 992 return -1; 993 } 994 995 while (!ISSET(sc->sc_flags, VMBUS_SCFLAG_OFFERS_DELIVERED)) 996 tsleep(&sc->sc_devq, PRIBIO, "hvscan", 1); 997 998 mutex_enter(&sc->sc_chevq_lock); 999 vmbus_process_chevq(sc); 1000 mutex_exit(&sc->sc_chevq_lock); 1001 mutex_enter(&sc->sc_devq_lock); 1002 vmbus_process_devq(sc); 1003 mutex_exit(&sc->sc_devq_lock); 1004 1005 return 0; 1006 } 1007 1008 static struct vmbus_channel * 1009 vmbus_channel_alloc(struct vmbus_softc *sc) 1010 { 1011 struct vmbus_channel *ch; 1012 1013 ch = kmem_zalloc(sizeof(*ch), KM_SLEEP); 1014 1015 ch->ch_monprm = hyperv_dma_alloc(sc->sc_dmat, &ch->ch_monprm_dma, 1016 sizeof(*ch->ch_monprm), 8, 0, 1, HYPERV_DMA_SLEEPOK); 1017 if (ch->ch_monprm == NULL) { 1018 device_printf(sc->sc_dev, "monprm alloc failed\n"); 1019 kmem_free(ch, sizeof(*ch)); 1020 return NULL; 1021 } 1022 1023 ch->ch_refs = 1; 1024 ch->ch_sc = sc; 1025 mutex_init(&ch->ch_subchannel_lock, MUTEX_DEFAULT, IPL_NET); 1026 TAILQ_INIT(&ch->ch_subchannels); 1027 1028 ch->ch_state = VMBUS_CHANSTATE_CLOSED; 1029 1030 return ch; 1031 } 1032 1033 static void 1034 vmbus_channel_free(struct vmbus_channel *ch) 1035 { 1036 struct vmbus_softc *sc = ch->ch_sc; 1037 1038 KASSERTMSG(TAILQ_EMPTY(&ch->ch_subchannels) && 1039 ch->ch_subchannel_count == 0, "still owns sub-channels"); 1040 KASSERTMSG(ch->ch_state == 0 || ch->ch_state == VMBUS_CHANSTATE_CLOSED, 1041 "free busy channel"); 1042 KASSERTMSG(ch->ch_refs == 0, "channel %u: invalid refcnt %d", 1043 ch->ch_id, ch->ch_refs); 1044 1045 hyperv_dma_free(sc->sc_dmat, &ch->ch_monprm_dma); 1046 mutex_destroy(&ch->ch_subchannel_lock); 1047 /* XXX ch_evcnt */ 1048 if (ch->ch_taskq != NULL) 1049 softint_disestablish(ch->ch_taskq); 1050 kmem_free(ch, sizeof(*ch)); 1051 } 1052 1053 static int 1054 vmbus_channel_add(struct vmbus_channel *nch) 1055 { 1056 struct vmbus_softc *sc = nch->ch_sc; 1057 struct vmbus_channel *ch; 1058 u_int refs __diagused; 1059 1060 if (nch->ch_id == 0) { 1061 device_printf(sc->sc_dev, "got channel 0 offer, discard\n"); 1062 return EINVAL; 1063 } else if (nch->ch_id >= sc->sc_channel_max) { 1064 device_printf(sc->sc_dev, "invalid channel %u offer\n", 1065 nch->ch_id); 1066 return EINVAL; 1067 } 1068 1069 mutex_enter(&sc->sc_prichan_lock); 1070 TAILQ_FOREACH(ch, &sc->sc_prichans, ch_prientry) { 1071 if (!memcmp(&ch->ch_type, &nch->ch_type, sizeof(ch->ch_type)) && 1072 !memcmp(&ch->ch_inst, &nch->ch_inst, sizeof(ch->ch_inst))) 1073 break; 1074 } 1075 if (VMBUS_CHAN_ISPRIMARY(nch)) { 1076 if (ch == NULL) { 1077 TAILQ_INSERT_TAIL(&sc->sc_prichans, nch, ch_prientry); 1078 mutex_exit(&sc->sc_prichan_lock); 1079 goto done; 1080 } else { 1081 mutex_exit(&sc->sc_prichan_lock); 1082 device_printf(sc->sc_dev, 1083 "duplicated primary channel%u\n", nch->ch_id); 1084 return EINVAL; 1085 } 1086 } else { 1087 if (ch == NULL) { 1088 mutex_exit(&sc->sc_prichan_lock); 1089 device_printf(sc->sc_dev, "no primary channel%u\n", 1090 nch->ch_id); 1091 return EINVAL; 1092 } 1093 } 1094 mutex_exit(&sc->sc_prichan_lock); 1095 1096 KASSERT(!VMBUS_CHAN_ISPRIMARY(nch)); 1097 KASSERT(ch != NULL); 1098 1099 refs = atomic_inc_uint_nv(&nch->ch_refs); 1100 KASSERT(refs == 2); 1101 1102 nch->ch_primary_channel = ch; 1103 nch->ch_dev = ch->ch_dev; 1104 1105 mutex_enter(&ch->ch_subchannel_lock); 1106 TAILQ_INSERT_TAIL(&ch->ch_subchannels, nch, ch_subentry); 1107 ch->ch_subchannel_count++; 1108 mutex_exit(&ch->ch_subchannel_lock); 1109 wakeup(ch); 1110 1111 done: 1112 mutex_enter(&sc->sc_channel_lock); 1113 TAILQ_INSERT_TAIL(&sc->sc_channels, nch, ch_entry); 1114 mutex_exit(&sc->sc_channel_lock); 1115 1116 vmbus_channel_cpu_default(nch); 1117 1118 return 0; 1119 } 1120 1121 void 1122 vmbus_channel_cpu_set(struct vmbus_channel *ch, int cpu) 1123 { 1124 struct vmbus_softc *sc = ch->ch_sc; 1125 1126 KASSERTMSG(cpu >= 0 && cpu < ncpu, "invalid cpu %d", cpu); 1127 1128 if (sc->sc_proto == VMBUS_VERSION_WS2008 || 1129 sc->sc_proto == VMBUS_VERSION_WIN7) { 1130 /* Only cpu0 is supported */ 1131 cpu = 0; 1132 } 1133 1134 ch->ch_cpuid = cpu; 1135 ch->ch_vcpu = hyperv_get_vcpuid(cpu); 1136 } 1137 1138 void 1139 vmbus_channel_cpu_rr(struct vmbus_channel *ch) 1140 { 1141 static uint32_t vmbus_channel_nextcpu; 1142 int cpu; 1143 1144 cpu = atomic_inc_32_nv(&vmbus_channel_nextcpu) % ncpu; 1145 vmbus_channel_cpu_set(ch, cpu); 1146 } 1147 1148 static void 1149 vmbus_channel_cpu_default(struct vmbus_channel *ch) 1150 { 1151 1152 /* 1153 * By default, pin the channel to cpu0. Devices having 1154 * special channel-cpu mapping requirement should call 1155 * vmbus_channel_cpu_{set,rr}(). 1156 */ 1157 vmbus_channel_cpu_set(ch, 0); 1158 } 1159 1160 bool 1161 vmbus_channel_is_revoked(struct vmbus_channel *ch) 1162 { 1163 1164 return (ch->ch_flags & CHF_REVOKED) ? true : false; 1165 } 1166 1167 static void 1168 vmbus_process_offer(struct vmbus_softc *sc, struct vmbus_chanmsg_choffer *co) 1169 { 1170 struct vmbus_channel *ch; 1171 1172 ch = vmbus_channel_alloc(sc); 1173 if (ch == NULL) { 1174 device_printf(sc->sc_dev, "allocate channel %u failed\n", 1175 co->chm_chanid); 1176 return; 1177 } 1178 1179 /* 1180 * By default we setup state to enable batched reading. 1181 * A specific service can choose to disable this prior 1182 * to opening the channel. 1183 */ 1184 ch->ch_flags |= CHF_BATCHED; 1185 1186 hyperv_guid_sprint(&co->chm_chtype, ch->ch_ident, 1187 sizeof(ch->ch_ident)); 1188 1189 ch->ch_monprm->mp_connid = VMBUS_CONNID_EVENT; 1190 if (sc->sc_proto > VMBUS_VERSION_WS2008) 1191 ch->ch_monprm->mp_connid = co->chm_connid; 1192 1193 if (co->chm_flags1 & VMBUS_CHOFFER_FLAG1_HASMNF) { 1194 ch->ch_mgroup = co->chm_montrig / VMBUS_MONTRIG_LEN; 1195 ch->ch_mindex = co->chm_montrig % VMBUS_MONTRIG_LEN; 1196 ch->ch_flags |= CHF_MONITOR; 1197 } 1198 1199 ch->ch_id = co->chm_chanid; 1200 ch->ch_subidx = co->chm_subidx; 1201 1202 memcpy(&ch->ch_type, &co->chm_chtype, sizeof(ch->ch_type)); 1203 memcpy(&ch->ch_inst, &co->chm_chinst, sizeof(ch->ch_inst)); 1204 1205 if (vmbus_channel_add(ch) != 0) { 1206 atomic_dec_uint(&ch->ch_refs); 1207 vmbus_channel_free(ch); 1208 return; 1209 } 1210 1211 ch->ch_state = VMBUS_CHANSTATE_OFFERED; 1212 1213 vmbus_devq_enqueue(sc, VMBUS_DEV_TYPE_ATTACH, ch); 1214 1215 #ifdef HYPERV_DEBUG 1216 printf("%s: channel %u: \"%s\"", device_xname(sc->sc_dev), ch->ch_id, 1217 ch->ch_ident); 1218 if (ch->ch_flags & CHF_MONITOR) 1219 printf(", monitor %u\n", co->chm_montrig); 1220 else 1221 printf("\n"); 1222 #endif 1223 } 1224 1225 static void 1226 vmbus_process_rescind(struct vmbus_softc *sc, 1227 struct vmbus_chanmsg_chrescind *cr) 1228 { 1229 struct vmbus_channel *ch; 1230 1231 if (cr->chm_chanid > VMBUS_CHAN_MAX) { 1232 device_printf(sc->sc_dev, "invalid revoked channel%u\n", 1233 cr->chm_chanid); 1234 return; 1235 } 1236 1237 mutex_enter(&sc->sc_channel_lock); 1238 ch = vmbus_channel_lookup(sc, cr->chm_chanid); 1239 if (ch == NULL) { 1240 mutex_exit(&sc->sc_channel_lock); 1241 device_printf(sc->sc_dev, "channel%u is not offered\n", 1242 cr->chm_chanid); 1243 return; 1244 } 1245 TAILQ_REMOVE(&sc->sc_channels, ch, ch_entry); 1246 mutex_exit(&sc->sc_channel_lock); 1247 1248 if (VMBUS_CHAN_ISPRIMARY(ch)) { 1249 mutex_enter(&sc->sc_prichan_lock); 1250 TAILQ_REMOVE(&sc->sc_prichans, ch, ch_prientry); 1251 mutex_exit(&sc->sc_prichan_lock); 1252 } 1253 1254 KASSERTMSG(!(ch->ch_flags & CHF_REVOKED), 1255 "channel%u has already been revoked", ch->ch_id); 1256 atomic_or_uint(&ch->ch_flags, CHF_REVOKED); 1257 1258 vmbus_channel_detach(ch); 1259 } 1260 1261 static int 1262 vmbus_channel_release(struct vmbus_channel *ch) 1263 { 1264 struct vmbus_softc *sc = ch->ch_sc; 1265 struct vmbus_chanmsg_chfree cmd; 1266 int rv; 1267 1268 memset(&cmd, 0, sizeof(cmd)); 1269 cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CHFREE; 1270 cmd.chm_chanid = ch->ch_id; 1271 1272 rv = vmbus_cmd(sc, &cmd, sizeof(cmd), NULL, 0, 1273 HCF_NOREPLY | (cold ? HCF_NOSLEEP : HCF_SLEEPOK)); 1274 if (rv) { 1275 DPRINTF("%s: CHFREE failed with %d\n", device_xname(sc->sc_dev), 1276 rv); 1277 } 1278 return rv; 1279 } 1280 1281 struct vmbus_channel ** 1282 vmbus_subchannel_get(struct vmbus_channel *prich, int cnt) 1283 { 1284 struct vmbus_softc *sc = prich->ch_sc; 1285 struct vmbus_channel **ret, *ch; 1286 int i, s; 1287 1288 KASSERTMSG(cnt > 0, "invalid sub-channel count %d", cnt); 1289 1290 ret = kmem_zalloc(sizeof(struct vmbus_channel *) * cnt, KM_SLEEP); 1291 1292 mutex_enter(&prich->ch_subchannel_lock); 1293 1294 while (prich->ch_subchannel_count < cnt) { 1295 if (cold) { 1296 mutex_exit(&prich->ch_subchannel_lock); 1297 delay(1000); 1298 s = splnet(); 1299 hyperv_intr(); 1300 splx(s); 1301 mutex_enter(&sc->sc_chevq_lock); 1302 vmbus_process_chevq(sc); 1303 mutex_exit(&sc->sc_chevq_lock); 1304 mutex_enter(&prich->ch_subchannel_lock); 1305 } else { 1306 mtsleep(prich, PRIBIO, "hvsubch", 1, 1307 &prich->ch_subchannel_lock); 1308 } 1309 } 1310 1311 i = 0; 1312 TAILQ_FOREACH(ch, &prich->ch_subchannels, ch_subentry) { 1313 ret[i] = ch; /* XXX inc refs */ 1314 1315 if (++i == cnt) 1316 break; 1317 } 1318 1319 KASSERTMSG(i == cnt, "invalid subchan count %d, should be %d", 1320 prich->ch_subchannel_count, cnt); 1321 1322 mutex_exit(&prich->ch_subchannel_lock); 1323 1324 return ret; 1325 } 1326 1327 void 1328 vmbus_subchannel_put(struct vmbus_channel **subch, int cnt) 1329 { 1330 1331 kmem_free(subch, sizeof(struct vmbus_channel *) * cnt); 1332 } 1333 1334 static struct vmbus_channel * 1335 vmbus_channel_lookup(struct vmbus_softc *sc, uint32_t relid) 1336 { 1337 struct vmbus_channel *ch; 1338 1339 TAILQ_FOREACH(ch, &sc->sc_channels, ch_entry) { 1340 if (ch->ch_id == relid) 1341 return ch; 1342 } 1343 return NULL; 1344 } 1345 1346 static int 1347 vmbus_channel_ring_create(struct vmbus_channel *ch, uint32_t buflen) 1348 { 1349 struct vmbus_softc *sc = ch->ch_sc; 1350 1351 buflen = roundup(buflen, PAGE_SIZE) + sizeof(struct vmbus_bufring); 1352 ch->ch_ring_size = 2 * buflen; 1353 /* page aligned memory */ 1354 ch->ch_ring = hyperv_dma_alloc(sc->sc_dmat, &ch->ch_ring_dma, 1355 ch->ch_ring_size, PAGE_SIZE, 0, 1, HYPERV_DMA_SLEEPOK); 1356 if (ch->ch_ring == NULL) { 1357 device_printf(sc->sc_dev, 1358 "failed to allocate channel ring\n"); 1359 return ENOMEM; 1360 } 1361 1362 memset(&ch->ch_wrd, 0, sizeof(ch->ch_wrd)); 1363 ch->ch_wrd.rd_ring = (struct vmbus_bufring *)ch->ch_ring; 1364 ch->ch_wrd.rd_size = buflen; 1365 ch->ch_wrd.rd_dsize = buflen - sizeof(struct vmbus_bufring); 1366 mutex_init(&ch->ch_wrd.rd_lock, MUTEX_DEFAULT, IPL_NET); 1367 1368 memset(&ch->ch_rrd, 0, sizeof(ch->ch_rrd)); 1369 ch->ch_rrd.rd_ring = (struct vmbus_bufring *)((uint8_t *)ch->ch_ring + 1370 buflen); 1371 ch->ch_rrd.rd_size = buflen; 1372 ch->ch_rrd.rd_dsize = buflen - sizeof(struct vmbus_bufring); 1373 mutex_init(&ch->ch_rrd.rd_lock, MUTEX_DEFAULT, IPL_NET); 1374 1375 if (vmbus_handle_alloc(ch, &ch->ch_ring_dma, ch->ch_ring_size, 1376 &ch->ch_ring_gpadl)) { 1377 device_printf(sc->sc_dev, 1378 "failed to obtain a PA handle for the ring\n"); 1379 vmbus_channel_ring_destroy(ch); 1380 return ENOMEM; 1381 } 1382 1383 return 0; 1384 } 1385 1386 static void 1387 vmbus_channel_ring_destroy(struct vmbus_channel *ch) 1388 { 1389 struct vmbus_softc *sc = ch->ch_sc; 1390 1391 hyperv_dma_free(sc->sc_dmat, &ch->ch_ring_dma); 1392 ch->ch_ring = NULL; 1393 vmbus_handle_free(ch, ch->ch_ring_gpadl); 1394 1395 mutex_destroy(&ch->ch_wrd.rd_lock); 1396 memset(&ch->ch_wrd, 0, sizeof(ch->ch_wrd)); 1397 mutex_destroy(&ch->ch_rrd.rd_lock); 1398 memset(&ch->ch_rrd, 0, sizeof(ch->ch_rrd)); 1399 } 1400 1401 int 1402 vmbus_channel_open(struct vmbus_channel *ch, size_t buflen, void *udata, 1403 size_t udatalen, void (*handler)(void *), void *arg) 1404 { 1405 struct vmbus_softc *sc = ch->ch_sc; 1406 struct vmbus_chanmsg_chopen cmd; 1407 struct vmbus_chanmsg_chopen_resp rsp; 1408 int rv = EINVAL; 1409 1410 if (ch->ch_ring == NULL && 1411 (rv = vmbus_channel_ring_create(ch, buflen))) { 1412 DPRINTF("%s: failed to create channel ring\n", 1413 device_xname(sc->sc_dev)); 1414 return rv; 1415 } 1416 1417 memset(&cmd, 0, sizeof(cmd)); 1418 cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CHOPEN; 1419 cmd.chm_openid = ch->ch_id; 1420 cmd.chm_chanid = ch->ch_id; 1421 cmd.chm_gpadl = ch->ch_ring_gpadl; 1422 cmd.chm_txbr_pgcnt = atop(ch->ch_wrd.rd_size); 1423 cmd.chm_vcpuid = ch->ch_vcpu; 1424 if (udata && udatalen > 0) 1425 memcpy(cmd.chm_udata, udata, udatalen); 1426 1427 memset(&rsp, 0, sizeof(rsp)); 1428 1429 ch->ch_handler = handler; 1430 ch->ch_ctx = arg; 1431 ch->ch_state = VMBUS_CHANSTATE_OPENED; 1432 1433 rv = vmbus_cmd(sc, &cmd, sizeof(cmd), &rsp, sizeof(rsp), 1434 cold ? HCF_NOSLEEP : HCF_SLEEPOK); 1435 if (rv) { 1436 vmbus_channel_ring_destroy(ch); 1437 DPRINTF("%s: CHOPEN failed with %d\n", device_xname(sc->sc_dev), 1438 rv); 1439 ch->ch_handler = NULL; 1440 ch->ch_ctx = NULL; 1441 ch->ch_state = VMBUS_CHANSTATE_OFFERED; 1442 return rv; 1443 } 1444 return 0; 1445 } 1446 1447 static void 1448 vmbus_channel_detach(struct vmbus_channel *ch) 1449 { 1450 u_int refs; 1451 1452 KASSERTMSG(ch->ch_refs > 0, "channel%u: invalid refcnt %d", 1453 ch->ch_id, ch->ch_refs); 1454 1455 refs = atomic_dec_uint_nv(&ch->ch_refs); 1456 if (refs == 0) { 1457 /* Detach the target channel. */ 1458 vmbus_devq_enqueue(ch->ch_sc, VMBUS_DEV_TYPE_DETACH, ch); 1459 } 1460 } 1461 1462 static int 1463 vmbus_channel_close_internal(struct vmbus_channel *ch) 1464 { 1465 struct vmbus_softc *sc = ch->ch_sc; 1466 struct vmbus_chanmsg_chclose cmd; 1467 int rv; 1468 1469 memset(&cmd, 0, sizeof(cmd)); 1470 cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CHCLOSE; 1471 cmd.chm_chanid = ch->ch_id; 1472 1473 ch->ch_state = VMBUS_CHANSTATE_CLOSING; 1474 rv = vmbus_cmd(sc, &cmd, sizeof(cmd), NULL, 0, 1475 HCF_NOREPLY | (cold ? HCF_NOSLEEP : HCF_SLEEPOK)); 1476 if (rv) { 1477 DPRINTF("%s: CHCLOSE failed with %d\n", 1478 device_xname(sc->sc_dev), rv); 1479 return rv; 1480 } 1481 ch->ch_state = VMBUS_CHANSTATE_CLOSED; 1482 vmbus_channel_ring_destroy(ch); 1483 return 0; 1484 } 1485 1486 int 1487 vmbus_channel_close_direct(struct vmbus_channel *ch) 1488 { 1489 int rv; 1490 1491 rv = vmbus_channel_close_internal(ch); 1492 if (!VMBUS_CHAN_ISPRIMARY(ch)) 1493 vmbus_channel_detach(ch); 1494 return rv; 1495 } 1496 1497 int 1498 vmbus_channel_close(struct vmbus_channel *ch) 1499 { 1500 struct vmbus_channel **subch; 1501 int i, cnt, rv; 1502 1503 if (!VMBUS_CHAN_ISPRIMARY(ch)) 1504 return 0; 1505 1506 cnt = ch->ch_subchannel_count; 1507 if (cnt > 0) { 1508 subch = vmbus_subchannel_get(ch, cnt); 1509 for (i = 0; i < ch->ch_subchannel_count; i++) { 1510 rv = vmbus_channel_close_internal(subch[i]); 1511 (void) rv; /* XXX */ 1512 vmbus_channel_detach(ch); 1513 } 1514 vmbus_subchannel_put(subch, cnt); 1515 } 1516 1517 return vmbus_channel_close_internal(ch); 1518 } 1519 1520 static inline void 1521 vmbus_channel_setevent(struct vmbus_softc *sc, struct vmbus_channel *ch) 1522 { 1523 struct vmbus_mon_trig *mtg; 1524 1525 /* Each uint32_t represents 32 channels */ 1526 set_bit(ch->ch_id, sc->sc_wevents); 1527 if (ch->ch_flags & CHF_MONITOR) { 1528 mtg = &sc->sc_monitor[1]->mnf_trigs[ch->ch_mgroup]; 1529 set_bit(ch->ch_mindex, &mtg->mt_pending); 1530 } else 1531 vmbus_intr_signal(sc, hyperv_dma_get_paddr(&ch->ch_monprm_dma)); 1532 } 1533 1534 static void 1535 vmbus_channel_intr(void *arg) 1536 { 1537 struct vmbus_channel *ch = arg; 1538 1539 if (vmbus_channel_ready(ch)) 1540 ch->ch_handler(ch->ch_ctx); 1541 1542 if (vmbus_channel_unpause(ch) == 0) 1543 return; 1544 1545 vmbus_channel_pause(ch); 1546 vmbus_channel_schedule(ch); 1547 } 1548 1549 int 1550 vmbus_channel_setdeferred(struct vmbus_channel *ch, const char *name) 1551 { 1552 1553 ch->ch_taskq = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE, 1554 vmbus_channel_intr, ch); 1555 if (ch->ch_taskq == NULL) 1556 return -1; 1557 return 0; 1558 } 1559 1560 void 1561 vmbus_channel_schedule(struct vmbus_channel *ch) 1562 { 1563 1564 if (ch->ch_handler) { 1565 if (!cold && (ch->ch_flags & CHF_BATCHED)) { 1566 vmbus_channel_pause(ch); 1567 softint_schedule(ch->ch_taskq); 1568 } else 1569 ch->ch_handler(ch->ch_ctx); 1570 } 1571 } 1572 1573 static __inline void 1574 vmbus_ring_put(struct vmbus_ring_data *wrd, uint8_t *data, uint32_t datalen) 1575 { 1576 int left = MIN(datalen, wrd->rd_dsize - wrd->rd_prod); 1577 1578 memcpy(&wrd->rd_ring->br_data[wrd->rd_prod], data, left); 1579 memcpy(&wrd->rd_ring->br_data[0], data + left, datalen - left); 1580 wrd->rd_prod += datalen; 1581 if (wrd->rd_prod >= wrd->rd_dsize) 1582 wrd->rd_prod -= wrd->rd_dsize; 1583 } 1584 1585 static inline void 1586 vmbus_ring_get(struct vmbus_ring_data *rrd, uint8_t *data, uint32_t datalen, 1587 int peek) 1588 { 1589 int left = MIN(datalen, rrd->rd_dsize - rrd->rd_cons); 1590 1591 memcpy(data, &rrd->rd_ring->br_data[rrd->rd_cons], left); 1592 memcpy(data + left, &rrd->rd_ring->br_data[0], datalen - left); 1593 if (!peek) { 1594 rrd->rd_cons += datalen; 1595 if (rrd->rd_cons >= rrd->rd_dsize) 1596 rrd->rd_cons -= rrd->rd_dsize; 1597 } 1598 } 1599 1600 static __inline void 1601 vmbus_ring_avail(struct vmbus_ring_data *rd, uint32_t *towrite, 1602 uint32_t *toread) 1603 { 1604 uint32_t ridx = rd->rd_ring->br_rindex; 1605 uint32_t widx = rd->rd_ring->br_windex; 1606 uint32_t r, w; 1607 1608 if (widx >= ridx) 1609 w = rd->rd_dsize - (widx - ridx); 1610 else 1611 w = ridx - widx; 1612 r = rd->rd_dsize - w; 1613 if (towrite) 1614 *towrite = w; 1615 if (toread) 1616 *toread = r; 1617 } 1618 1619 static int 1620 vmbus_ring_write(struct vmbus_ring_data *wrd, struct iovec *iov, int iov_cnt, 1621 int *needsig) 1622 { 1623 uint64_t indices = 0; 1624 uint32_t avail, oprod, datalen = sizeof(indices); 1625 int i; 1626 1627 for (i = 0; i < iov_cnt; i++) 1628 datalen += iov[i].iov_len; 1629 1630 KASSERT(datalen <= wrd->rd_dsize); 1631 1632 vmbus_ring_avail(wrd, &avail, NULL); 1633 if (avail <= datalen) { 1634 DPRINTF("%s: avail %u datalen %u\n", __func__, avail, datalen); 1635 return EAGAIN; 1636 } 1637 1638 oprod = wrd->rd_prod; 1639 1640 for (i = 0; i < iov_cnt; i++) 1641 vmbus_ring_put(wrd, iov[i].iov_base, iov[i].iov_len); 1642 1643 indices = (uint64_t)oprod << 32; 1644 vmbus_ring_put(wrd, (uint8_t *)&indices, sizeof(indices)); 1645 1646 membar_sync(); 1647 wrd->rd_ring->br_windex = wrd->rd_prod; 1648 membar_sync(); 1649 1650 /* Signal when the ring transitions from being empty to non-empty */ 1651 if (wrd->rd_ring->br_imask == 0 && 1652 wrd->rd_ring->br_rindex == oprod) 1653 *needsig = 1; 1654 else 1655 *needsig = 0; 1656 1657 return 0; 1658 } 1659 1660 int 1661 vmbus_channel_send(struct vmbus_channel *ch, void *data, uint32_t datalen, 1662 uint64_t rid, int type, uint32_t flags) 1663 { 1664 struct vmbus_softc *sc = ch->ch_sc; 1665 struct vmbus_chanpkt cp; 1666 struct iovec iov[3]; 1667 uint32_t pktlen, pktlen_aligned; 1668 uint64_t zeropad = 0; 1669 int rv, needsig = 0; 1670 1671 pktlen = sizeof(cp) + datalen; 1672 pktlen_aligned = roundup(pktlen, sizeof(uint64_t)); 1673 1674 cp.cp_hdr.cph_type = type; 1675 cp.cp_hdr.cph_flags = flags; 1676 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_hlen, sizeof(cp)); 1677 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_tlen, pktlen_aligned); 1678 cp.cp_hdr.cph_tid = rid; 1679 1680 iov[0].iov_base = &cp; 1681 iov[0].iov_len = sizeof(cp); 1682 1683 iov[1].iov_base = data; 1684 iov[1].iov_len = datalen; 1685 1686 iov[2].iov_base = &zeropad; 1687 iov[2].iov_len = pktlen_aligned - pktlen; 1688 1689 mutex_enter(&ch->ch_wrd.rd_lock); 1690 rv = vmbus_ring_write(&ch->ch_wrd, iov, 3, &needsig); 1691 mutex_exit(&ch->ch_wrd.rd_lock); 1692 if (rv == 0 && needsig) 1693 vmbus_channel_setevent(sc, ch); 1694 1695 return rv; 1696 } 1697 1698 int 1699 vmbus_channel_send_sgl(struct vmbus_channel *ch, struct vmbus_gpa *sgl, 1700 uint32_t nsge, void *data, uint32_t datalen, uint64_t rid) 1701 { 1702 struct vmbus_softc *sc = ch->ch_sc; 1703 struct vmbus_chanpkt_sglist cp; 1704 struct iovec iov[4]; 1705 uint32_t buflen, pktlen, pktlen_aligned; 1706 uint64_t zeropad = 0; 1707 int rv, needsig = 0; 1708 1709 buflen = sizeof(struct vmbus_gpa) * nsge; 1710 pktlen = sizeof(cp) + datalen + buflen; 1711 pktlen_aligned = roundup(pktlen, sizeof(uint64_t)); 1712 1713 cp.cp_hdr.cph_type = VMBUS_CHANPKT_TYPE_GPA; 1714 cp.cp_hdr.cph_flags = VMBUS_CHANPKT_FLAG_RC; 1715 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_hlen, sizeof(cp) + buflen); 1716 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_tlen, pktlen_aligned); 1717 cp.cp_hdr.cph_tid = rid; 1718 cp.cp_gpa_cnt = nsge; 1719 cp.cp_rsvd = 0; 1720 1721 iov[0].iov_base = &cp; 1722 iov[0].iov_len = sizeof(cp); 1723 1724 iov[1].iov_base = sgl; 1725 iov[1].iov_len = buflen; 1726 1727 iov[2].iov_base = data; 1728 iov[2].iov_len = datalen; 1729 1730 iov[3].iov_base = &zeropad; 1731 iov[3].iov_len = pktlen_aligned - pktlen; 1732 1733 mutex_enter(&ch->ch_wrd.rd_lock); 1734 rv = vmbus_ring_write(&ch->ch_wrd, iov, 4, &needsig); 1735 mutex_exit(&ch->ch_wrd.rd_lock); 1736 if (rv == 0 && needsig) 1737 vmbus_channel_setevent(sc, ch); 1738 1739 return rv; 1740 } 1741 1742 int 1743 vmbus_channel_send_prpl(struct vmbus_channel *ch, struct vmbus_gpa_range *prpl, 1744 uint32_t nprp, void *data, uint32_t datalen, uint64_t rid) 1745 { 1746 struct vmbus_softc *sc = ch->ch_sc; 1747 struct vmbus_chanpkt_prplist cp; 1748 struct iovec iov[4]; 1749 uint32_t buflen, pktlen, pktlen_aligned; 1750 uint64_t zeropad = 0; 1751 int rv, needsig = 0; 1752 1753 buflen = sizeof(struct vmbus_gpa_range) * (nprp + 1); 1754 pktlen = sizeof(cp) + datalen + buflen; 1755 pktlen_aligned = roundup(pktlen, sizeof(uint64_t)); 1756 1757 cp.cp_hdr.cph_type = VMBUS_CHANPKT_TYPE_GPA; 1758 cp.cp_hdr.cph_flags = VMBUS_CHANPKT_FLAG_RC; 1759 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_hlen, sizeof(cp) + buflen); 1760 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_tlen, pktlen_aligned); 1761 cp.cp_hdr.cph_tid = rid; 1762 cp.cp_range_cnt = 1; 1763 cp.cp_rsvd = 0; 1764 1765 iov[0].iov_base = &cp; 1766 iov[0].iov_len = sizeof(cp); 1767 1768 iov[1].iov_base = prpl; 1769 iov[1].iov_len = buflen; 1770 1771 iov[2].iov_base = data; 1772 iov[2].iov_len = datalen; 1773 1774 iov[3].iov_base = &zeropad; 1775 iov[3].iov_len = pktlen_aligned - pktlen; 1776 1777 mutex_enter(&ch->ch_wrd.rd_lock); 1778 rv = vmbus_ring_write(&ch->ch_wrd, iov, 4, &needsig); 1779 mutex_exit(&ch->ch_wrd.rd_lock); 1780 if (rv == 0 && needsig) 1781 vmbus_channel_setevent(sc, ch); 1782 1783 return rv; 1784 } 1785 1786 static int 1787 vmbus_ring_peek(struct vmbus_ring_data *rrd, void *data, uint32_t datalen) 1788 { 1789 uint32_t avail; 1790 1791 KASSERT(datalen <= rrd->rd_dsize); 1792 1793 vmbus_ring_avail(rrd, NULL, &avail); 1794 if (avail < datalen) 1795 return EAGAIN; 1796 1797 vmbus_ring_get(rrd, (uint8_t *)data, datalen, 1); 1798 return 0; 1799 } 1800 1801 static int 1802 vmbus_ring_read(struct vmbus_ring_data *rrd, void *data, uint32_t datalen, 1803 uint32_t offset) 1804 { 1805 uint64_t indices; 1806 uint32_t avail; 1807 1808 KASSERT(datalen <= rrd->rd_dsize); 1809 1810 vmbus_ring_avail(rrd, NULL, &avail); 1811 if (avail < datalen) { 1812 DPRINTF("%s: avail %u datalen %u\n", __func__, avail, datalen); 1813 return EAGAIN; 1814 } 1815 1816 if (offset) { 1817 rrd->rd_cons += offset; 1818 if (rrd->rd_cons >= rrd->rd_dsize) 1819 rrd->rd_cons -= rrd->rd_dsize; 1820 } 1821 1822 vmbus_ring_get(rrd, (uint8_t *)data, datalen, 0); 1823 vmbus_ring_get(rrd, (uint8_t *)&indices, sizeof(indices), 0); 1824 1825 membar_sync(); 1826 rrd->rd_ring->br_rindex = rrd->rd_cons; 1827 1828 return 0; 1829 } 1830 1831 int 1832 vmbus_channel_recv(struct vmbus_channel *ch, void *data, uint32_t datalen, 1833 uint32_t *rlen, uint64_t *rid, int raw) 1834 { 1835 struct vmbus_softc *sc = ch->ch_sc; 1836 struct vmbus_chanpkt_hdr cph; 1837 uint32_t offset, pktlen; 1838 int rv; 1839 1840 *rlen = 0; 1841 1842 mutex_enter(&ch->ch_rrd.rd_lock); 1843 1844 if ((rv = vmbus_ring_peek(&ch->ch_rrd, &cph, sizeof(cph))) != 0) { 1845 mutex_exit(&ch->ch_rrd.rd_lock); 1846 return rv; 1847 } 1848 1849 offset = raw ? 0 : VMBUS_CHANPKT_GETLEN(cph.cph_hlen); 1850 pktlen = VMBUS_CHANPKT_GETLEN(cph.cph_tlen) - offset; 1851 if (pktlen > datalen) { 1852 mutex_exit(&ch->ch_rrd.rd_lock); 1853 device_printf(sc->sc_dev, "%s: pktlen %u datalen %u\n", 1854 __func__, pktlen, datalen); 1855 return EINVAL; 1856 } 1857 1858 rv = vmbus_ring_read(&ch->ch_rrd, data, pktlen, offset); 1859 if (rv == 0) { 1860 *rlen = pktlen; 1861 *rid = cph.cph_tid; 1862 } 1863 1864 mutex_exit(&ch->ch_rrd.rd_lock); 1865 1866 return rv; 1867 } 1868 1869 static inline void 1870 vmbus_ring_mask(struct vmbus_ring_data *rd) 1871 { 1872 1873 membar_sync(); 1874 rd->rd_ring->br_imask = 1; 1875 membar_sync(); 1876 } 1877 1878 static inline void 1879 vmbus_ring_unmask(struct vmbus_ring_data *rd) 1880 { 1881 1882 membar_sync(); 1883 rd->rd_ring->br_imask = 0; 1884 membar_sync(); 1885 } 1886 1887 static void 1888 vmbus_channel_pause(struct vmbus_channel *ch) 1889 { 1890 1891 vmbus_ring_mask(&ch->ch_rrd); 1892 } 1893 1894 static uint32_t 1895 vmbus_channel_unpause(struct vmbus_channel *ch) 1896 { 1897 uint32_t avail; 1898 1899 vmbus_ring_unmask(&ch->ch_rrd); 1900 vmbus_ring_avail(&ch->ch_rrd, NULL, &avail); 1901 1902 return avail; 1903 } 1904 1905 static uint32_t 1906 vmbus_channel_ready(struct vmbus_channel *ch) 1907 { 1908 uint32_t avail; 1909 1910 vmbus_ring_avail(&ch->ch_rrd, NULL, &avail); 1911 1912 return avail; 1913 } 1914 1915 /* How many PFNs can be referenced by the header */ 1916 #define VMBUS_NPFNHDR ((VMBUS_MSG_DSIZE_MAX - \ 1917 sizeof(struct vmbus_chanmsg_gpadl_conn)) / sizeof(uint64_t)) 1918 1919 /* How many PFNs can be referenced by the body */ 1920 #define VMBUS_NPFNBODY ((VMBUS_MSG_DSIZE_MAX - \ 1921 sizeof(struct vmbus_chanmsg_gpadl_subconn)) / sizeof(uint64_t)) 1922 1923 int 1924 vmbus_handle_alloc(struct vmbus_channel *ch, const struct hyperv_dma *dma, 1925 uint32_t buflen, uint32_t *handle) 1926 { 1927 const int prflags = cold ? PR_NOWAIT : PR_WAITOK; 1928 const int kmemflags = cold ? KM_NOSLEEP : KM_SLEEP; 1929 const int msgflags = cold ? MSGF_NOSLEEP : 0; 1930 const int hcflags = cold ? HCF_NOSLEEP : HCF_SLEEPOK; 1931 struct vmbus_softc *sc = ch->ch_sc; 1932 struct vmbus_chanmsg_gpadl_conn *hdr; 1933 struct vmbus_chanmsg_gpadl_subconn *cmd; 1934 struct vmbus_chanmsg_gpadl_connresp rsp; 1935 struct vmbus_msg *msg; 1936 int i, j, last, left, rv; 1937 int bodylen = 0, ncmds = 0, pfn = 0; 1938 uint64_t *frames; 1939 paddr_t pa; 1940 uint8_t *body; 1941 /* Total number of pages to reference */ 1942 int total = atop(buflen); 1943 /* Number of pages that will fit the header */ 1944 int inhdr = MIN(total, VMBUS_NPFNHDR); 1945 1946 KASSERT((buflen & PAGE_MASK) == 0); 1947 KASSERT(buflen == (uint32_t)dma->map->dm_mapsize); 1948 1949 msg = pool_cache_get_paddr(sc->sc_msgpool, prflags, &pa); 1950 if (msg == NULL) 1951 return ENOMEM; 1952 1953 /* Prepare array of frame addresses */ 1954 frames = kmem_zalloc(total * sizeof(*frames), kmemflags); 1955 if (frames == NULL) { 1956 pool_cache_put_paddr(sc->sc_msgpool, msg, pa); 1957 return ENOMEM; 1958 } 1959 for (i = 0, j = 0; i < dma->map->dm_nsegs && j < total; i++) { 1960 bus_dma_segment_t *seg = &dma->map->dm_segs[i]; 1961 bus_addr_t addr = seg->ds_addr; 1962 1963 KASSERT((addr & PAGE_MASK) == 0); 1964 KASSERT((seg->ds_len & PAGE_MASK) == 0); 1965 1966 while (addr < seg->ds_addr + seg->ds_len && j < total) { 1967 frames[j++] = atop(addr); 1968 addr += PAGE_SIZE; 1969 } 1970 } 1971 1972 memset(msg, 0, sizeof(*msg)); 1973 msg->msg_req.hc_dsize = sizeof(struct vmbus_chanmsg_gpadl_conn) + 1974 inhdr * sizeof(uint64_t); 1975 hdr = (struct vmbus_chanmsg_gpadl_conn *)msg->msg_req.hc_data; 1976 msg->msg_rsp = &rsp; 1977 msg->msg_rsplen = sizeof(rsp); 1978 msg->msg_flags = msgflags; 1979 1980 left = total - inhdr; 1981 1982 /* Allocate additional gpadl_body structures if required */ 1983 if (left > 0) { 1984 ncmds = howmany(left, VMBUS_NPFNBODY); 1985 bodylen = ncmds * VMBUS_MSG_DSIZE_MAX; 1986 body = kmem_zalloc(bodylen, kmemflags); 1987 if (body == NULL) { 1988 kmem_free(frames, total * sizeof(*frames)); 1989 pool_cache_put_paddr(sc->sc_msgpool, msg, pa); 1990 return ENOMEM; 1991 } 1992 } 1993 1994 *handle = atomic_inc_32_nv(&sc->sc_handle); 1995 1996 hdr->chm_hdr.chm_type = VMBUS_CHANMSG_GPADL_CONN; 1997 hdr->chm_chanid = ch->ch_id; 1998 hdr->chm_gpadl = *handle; 1999 2000 /* Single range for a contiguous buffer */ 2001 hdr->chm_range_cnt = 1; 2002 hdr->chm_range_len = sizeof(struct vmbus_gpa_range) + total * 2003 sizeof(uint64_t); 2004 hdr->chm_range.gpa_ofs = 0; 2005 hdr->chm_range.gpa_len = buflen; 2006 2007 /* Fit as many pages as possible into the header */ 2008 for (i = 0; i < inhdr; i++) 2009 hdr->chm_range.gpa_page[i] = frames[pfn++]; 2010 2011 for (i = 0; i < ncmds; i++) { 2012 cmd = (struct vmbus_chanmsg_gpadl_subconn *)(body + 2013 VMBUS_MSG_DSIZE_MAX * i); 2014 cmd->chm_hdr.chm_type = VMBUS_CHANMSG_GPADL_SUBCONN; 2015 cmd->chm_gpadl = *handle; 2016 last = MIN(left, VMBUS_NPFNBODY); 2017 for (j = 0; j < last; j++) 2018 cmd->chm_gpa_page[j] = frames[pfn++]; 2019 left -= last; 2020 } 2021 2022 rv = vmbus_start(sc, msg, pa); 2023 if (rv != 0) { 2024 DPRINTF("%s: GPADL_CONN failed\n", device_xname(sc->sc_dev)); 2025 goto out; 2026 } 2027 for (i = 0; i < ncmds; i++) { 2028 int cmdlen = sizeof(*cmd); 2029 cmd = (struct vmbus_chanmsg_gpadl_subconn *)(body + 2030 VMBUS_MSG_DSIZE_MAX * i); 2031 /* Last element can be short */ 2032 if (i == ncmds - 1) 2033 cmdlen += last * sizeof(uint64_t); 2034 else 2035 cmdlen += VMBUS_NPFNBODY * sizeof(uint64_t); 2036 rv = vmbus_cmd(sc, cmd, cmdlen, NULL, 0, HCF_NOREPLY | hcflags); 2037 if (rv != 0) { 2038 DPRINTF("%s: GPADL_SUBCONN (iteration %d/%d) failed " 2039 "with %d\n", device_xname(sc->sc_dev), i, ncmds, 2040 rv); 2041 goto out; 2042 } 2043 } 2044 rv = vmbus_reply(sc, msg); 2045 if (rv != 0) { 2046 DPRINTF("%s: GPADL allocation failed with %d\n", 2047 device_xname(sc->sc_dev), rv); 2048 } 2049 2050 out: 2051 if (bodylen > 0) 2052 kmem_free(body, bodylen); 2053 kmem_free(frames, total * sizeof(*frames)); 2054 pool_cache_put_paddr(sc->sc_msgpool, msg, pa); 2055 if (rv) 2056 return rv; 2057 2058 KASSERT(*handle == rsp.chm_gpadl); 2059 2060 return 0; 2061 } 2062 2063 void 2064 vmbus_handle_free(struct vmbus_channel *ch, uint32_t handle) 2065 { 2066 struct vmbus_softc *sc = ch->ch_sc; 2067 struct vmbus_chanmsg_gpadl_disconn cmd; 2068 struct vmbus_chanmsg_gpadl_disconn rsp; 2069 int rv; 2070 2071 memset(&cmd, 0, sizeof(cmd)); 2072 cmd.chm_hdr.chm_type = VMBUS_CHANMSG_GPADL_DISCONN; 2073 cmd.chm_chanid = ch->ch_id; 2074 cmd.chm_gpadl = handle; 2075 2076 rv = vmbus_cmd(sc, &cmd, sizeof(cmd), &rsp, sizeof(rsp), 2077 cold ? HCF_NOSLEEP : HCF_SLEEPOK); 2078 if (rv) { 2079 DPRINTF("%s: GPADL_DISCONN failed with %d\n", 2080 device_xname(sc->sc_dev), rv); 2081 } 2082 } 2083 2084 static void 2085 vmbus_chevq_enqueue(struct vmbus_softc *sc, int type, void *arg) 2086 { 2087 struct vmbus_chev *vce; 2088 2089 vce = kmem_intr_alloc(sizeof(*vce), KM_NOSLEEP); 2090 if (vce == NULL) { 2091 device_printf(sc->sc_dev, "failed to allocate chev\n"); 2092 return; 2093 } 2094 2095 vce->vce_type = type; 2096 vce->vce_arg = arg; 2097 2098 mutex_enter(&sc->sc_chevq_lock); 2099 SIMPLEQ_INSERT_TAIL(&sc->sc_chevq, vce, vce_entry); 2100 cv_broadcast(&sc->sc_chevq_cv); 2101 mutex_exit(&sc->sc_chevq_lock); 2102 } 2103 2104 static void 2105 vmbus_process_chevq(void *arg) 2106 { 2107 struct vmbus_softc *sc = arg; 2108 struct vmbus_chev *vce; 2109 struct vmbus_chanmsg_choffer *co; 2110 struct vmbus_chanmsg_chrescind *cr; 2111 2112 KASSERT(mutex_owned(&sc->sc_chevq_lock)); 2113 2114 while (!SIMPLEQ_EMPTY(&sc->sc_chevq)) { 2115 vce = SIMPLEQ_FIRST(&sc->sc_chevq); 2116 SIMPLEQ_REMOVE_HEAD(&sc->sc_chevq, vce_entry); 2117 mutex_exit(&sc->sc_chevq_lock); 2118 2119 switch (vce->vce_type) { 2120 case VMBUS_CHEV_TYPE_OFFER: 2121 co = vce->vce_arg; 2122 vmbus_process_offer(sc, co); 2123 kmem_free(co, sizeof(*co)); 2124 break; 2125 2126 case VMBUS_CHEV_TYPE_RESCIND: 2127 cr = vce->vce_arg; 2128 vmbus_process_rescind(sc, cr); 2129 kmem_free(cr, sizeof(*cr)); 2130 break; 2131 2132 default: 2133 DPRINTF("%s: unknown chevq type %d\n", 2134 device_xname(sc->sc_dev), vce->vce_type); 2135 break; 2136 } 2137 kmem_free(vce, sizeof(*vce)); 2138 2139 mutex_enter(&sc->sc_chevq_lock); 2140 } 2141 } 2142 2143 static void 2144 vmbus_chevq_thread(void *arg) 2145 { 2146 struct vmbus_softc *sc = arg; 2147 2148 mutex_enter(&sc->sc_chevq_lock); 2149 for (;;) { 2150 if (SIMPLEQ_EMPTY(&sc->sc_chevq)) { 2151 cv_wait(&sc->sc_chevq_cv, &sc->sc_chevq_lock); 2152 continue; 2153 } 2154 2155 vmbus_process_chevq(sc); 2156 } 2157 mutex_exit(&sc->sc_chevq_lock); 2158 2159 kthread_exit(0); 2160 } 2161 2162 static void 2163 vmbus_devq_enqueue(struct vmbus_softc *sc, int type, struct vmbus_channel *ch) 2164 { 2165 struct vmbus_dev *vd; 2166 2167 vd = kmem_zalloc(sizeof(*vd), KM_SLEEP); 2168 if (vd == NULL) { 2169 device_printf(sc->sc_dev, "failed to allocate devq\n"); 2170 return; 2171 } 2172 2173 vd->vd_type = type; 2174 vd->vd_chan = ch; 2175 2176 if (VMBUS_CHAN_ISPRIMARY(ch)) { 2177 mutex_enter(&sc->sc_devq_lock); 2178 SIMPLEQ_INSERT_TAIL(&sc->sc_devq, vd, vd_entry); 2179 cv_broadcast(&sc->sc_devq_cv); 2180 mutex_exit(&sc->sc_devq_lock); 2181 } else { 2182 mutex_enter(&sc->sc_subch_devq_lock); 2183 SIMPLEQ_INSERT_TAIL(&sc->sc_subch_devq, vd, vd_entry); 2184 cv_broadcast(&sc->sc_subch_devq_cv); 2185 mutex_exit(&sc->sc_subch_devq_lock); 2186 } 2187 } 2188 2189 static void 2190 vmbus_process_devq(void *arg) 2191 { 2192 struct vmbus_softc *sc = arg; 2193 struct vmbus_dev *vd; 2194 struct vmbus_channel *ch; 2195 struct vmbus_attach_args vaa; 2196 2197 KASSERT(mutex_owned(&sc->sc_devq_lock)); 2198 2199 while (!SIMPLEQ_EMPTY(&sc->sc_devq)) { 2200 vd = SIMPLEQ_FIRST(&sc->sc_devq); 2201 SIMPLEQ_REMOVE_HEAD(&sc->sc_devq, vd_entry); 2202 mutex_exit(&sc->sc_devq_lock); 2203 2204 switch (vd->vd_type) { 2205 case VMBUS_DEV_TYPE_ATTACH: 2206 ch = vd->vd_chan; 2207 vaa.aa_type = &ch->ch_type; 2208 vaa.aa_inst = &ch->ch_inst; 2209 vaa.aa_ident = ch->ch_ident; 2210 vaa.aa_chan = ch; 2211 vaa.aa_iot = sc->sc_iot; 2212 vaa.aa_memt = sc->sc_memt; 2213 ch->ch_dev = config_found(sc->sc_dev, 2214 &vaa, vmbus_attach_print, CFARGS_NONE); 2215 break; 2216 2217 case VMBUS_DEV_TYPE_DETACH: 2218 ch = vd->vd_chan; 2219 if (ch->ch_dev != NULL) { 2220 config_detach(ch->ch_dev, DETACH_FORCE); 2221 ch->ch_dev = NULL; 2222 } 2223 vmbus_channel_release(ch); 2224 vmbus_channel_free(ch); 2225 break; 2226 2227 default: 2228 DPRINTF("%s: unknown devq type %d\n", 2229 device_xname(sc->sc_dev), vd->vd_type); 2230 break; 2231 } 2232 kmem_free(vd, sizeof(*vd)); 2233 2234 mutex_enter(&sc->sc_devq_lock); 2235 } 2236 } 2237 2238 static void 2239 vmbus_devq_thread(void *arg) 2240 { 2241 struct vmbus_softc *sc = arg; 2242 2243 mutex_enter(&sc->sc_devq_lock); 2244 for (;;) { 2245 if (SIMPLEQ_EMPTY(&sc->sc_devq)) { 2246 cv_wait(&sc->sc_devq_cv, &sc->sc_devq_lock); 2247 continue; 2248 } 2249 2250 vmbus_process_devq(sc); 2251 } 2252 mutex_exit(&sc->sc_devq_lock); 2253 2254 kthread_exit(0); 2255 } 2256 2257 static void 2258 vmbus_subchannel_devq_thread(void *arg) 2259 { 2260 struct vmbus_softc *sc = arg; 2261 struct vmbus_dev *vd; 2262 struct vmbus_channel *ch, *prich; 2263 2264 mutex_enter(&sc->sc_subch_devq_lock); 2265 for (;;) { 2266 if (SIMPLEQ_EMPTY(&sc->sc_subch_devq)) { 2267 cv_wait(&sc->sc_subch_devq_cv, &sc->sc_subch_devq_lock); 2268 continue; 2269 } 2270 2271 while (!SIMPLEQ_EMPTY(&sc->sc_subch_devq)) { 2272 vd = SIMPLEQ_FIRST(&sc->sc_subch_devq); 2273 SIMPLEQ_REMOVE_HEAD(&sc->sc_subch_devq, vd_entry); 2274 mutex_exit(&sc->sc_subch_devq_lock); 2275 2276 switch (vd->vd_type) { 2277 case VMBUS_DEV_TYPE_ATTACH: 2278 /* Nothing to do */ 2279 break; 2280 2281 case VMBUS_DEV_TYPE_DETACH: 2282 ch = vd->vd_chan; 2283 2284 vmbus_channel_release(ch); 2285 2286 prich = ch->ch_primary_channel; 2287 mutex_enter(&prich->ch_subchannel_lock); 2288 TAILQ_REMOVE(&prich->ch_subchannels, ch, 2289 ch_subentry); 2290 prich->ch_subchannel_count--; 2291 mutex_exit(&prich->ch_subchannel_lock); 2292 wakeup(prich); 2293 2294 vmbus_channel_free(ch); 2295 break; 2296 2297 default: 2298 DPRINTF("%s: unknown devq type %d\n", 2299 device_xname(sc->sc_dev), vd->vd_type); 2300 break; 2301 } 2302 2303 kmem_free(vd, sizeof(*vd)); 2304 2305 mutex_enter(&sc->sc_subch_devq_lock); 2306 } 2307 } 2308 mutex_exit(&sc->sc_subch_devq_lock); 2309 2310 kthread_exit(0); 2311 } 2312 2313 2314 static int 2315 vmbus_attach_print(void *aux, const char *name) 2316 { 2317 struct vmbus_attach_args *aa = aux; 2318 2319 if (name) 2320 printf("\"%s\" at %s", aa->aa_ident, name); 2321 2322 return UNCONF; 2323 } 2324 2325 MODULE(MODULE_CLASS_DRIVER, vmbus, "hyperv"); 2326 2327 #ifdef _MODULE 2328 #include "ioconf.c" 2329 #endif 2330 2331 static int 2332 vmbus_modcmd(modcmd_t cmd, void *aux) 2333 { 2334 int rv = 0; 2335 2336 switch (cmd) { 2337 case MODULE_CMD_INIT: 2338 #ifdef _MODULE 2339 rv = config_init_component(cfdriver_ioconf_vmbus, 2340 cfattach_ioconf_vmbus, cfdata_ioconf_vmbus); 2341 #endif 2342 break; 2343 2344 case MODULE_CMD_FINI: 2345 #ifdef _MODULE 2346 rv = config_fini_component(cfdriver_ioconf_vmbus, 2347 cfattach_ioconf_vmbus, cfdata_ioconf_vmbus); 2348 #endif 2349 break; 2350 2351 default: 2352 rv = ENOTTY; 2353 break; 2354 } 2355 2356 return rv; 2357 } 2358