1 /* $NetBSD: vmbus.c,v 1.8 2019/12/10 12:20:20 nonaka Exp $ */ 2 /* $OpenBSD: hyperv.c,v 1.43 2017/06/27 13:56:15 mikeb Exp $ */ 3 4 /*- 5 * Copyright (c) 2009-2012 Microsoft Corp. 6 * Copyright (c) 2012 NetApp Inc. 7 * Copyright (c) 2012 Citrix Inc. 8 * Copyright (c) 2016 Mike Belopuhov <mike@esdenera.com> 9 * All rights reserved. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice unmodified, this list of conditions, and the following 16 * disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * The OpenBSD port was done under funding by Esdenera Networks GmbH. 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: vmbus.c,v 1.8 2019/12/10 12:20:20 nonaka Exp $"); 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/device.h> 43 #include <sys/atomic.h> 44 #include <sys/bitops.h> 45 #include <sys/bus.h> 46 #include <sys/cpu.h> 47 #include <sys/intr.h> 48 #include <sys/kmem.h> 49 #include <sys/kthread.h> 50 #include <sys/module.h> 51 #include <sys/mutex.h> 52 #include <sys/xcall.h> 53 54 #include <uvm/uvm_extern.h> 55 56 #include <dev/hyperv/vmbusvar.h> 57 58 #define VMBUS_GPADL_START 0xffff /* 0x10000 effectively */ 59 60 /* Command submission flags */ 61 #define HCF_SLEEPOK 0x0000 62 #define HCF_NOSLEEP 0x0002 /* M_NOWAIT */ 63 #define HCF_NOREPLY 0x0004 64 65 static void vmbus_attach_deferred(device_t); 66 static int vmbus_attach_print(void *, const char *); 67 static int vmbus_alloc_dma(struct vmbus_softc *); 68 static void vmbus_free_dma(struct vmbus_softc *); 69 static int vmbus_init_interrupts(struct vmbus_softc *); 70 static void vmbus_deinit_interrupts(struct vmbus_softc *); 71 static void vmbus_init_synic(void *, void *); 72 static void vmbus_deinit_synic(void *, void *); 73 74 static int vmbus_connect(struct vmbus_softc *); 75 static int vmbus_cmd(struct vmbus_softc *, void *, size_t, void *, size_t, 76 int); 77 static int vmbus_start(struct vmbus_softc *, struct vmbus_msg *, paddr_t); 78 static int vmbus_reply(struct vmbus_softc *, struct vmbus_msg *); 79 static void vmbus_wait(struct vmbus_softc *, 80 int (*done)(struct vmbus_softc *, struct vmbus_msg *), 81 struct vmbus_msg *, void *, const char *); 82 static uint16_t vmbus_intr_signal(struct vmbus_softc *, paddr_t); 83 static void vmbus_event_proc(void *, struct cpu_info *); 84 static void vmbus_event_proc_compat(void *, struct cpu_info *); 85 static void vmbus_message_proc(void *, struct cpu_info *); 86 static void vmbus_message_softintr(void *); 87 static void vmbus_channel_response(struct vmbus_softc *, 88 struct vmbus_chanmsg_hdr *); 89 static void vmbus_channel_offer(struct vmbus_softc *, 90 struct vmbus_chanmsg_hdr *); 91 static void vmbus_channel_rescind(struct vmbus_softc *, 92 struct vmbus_chanmsg_hdr *); 93 static void vmbus_channel_delivered(struct vmbus_softc *, 94 struct vmbus_chanmsg_hdr *); 95 static int vmbus_channel_scan(struct vmbus_softc *); 96 static void vmbus_channel_cpu_default(struct vmbus_channel *); 97 static void vmbus_process_offer(struct vmbus_softc *, 98 struct vmbus_chanmsg_choffer *); 99 static void vmbus_process_rescind(struct vmbus_softc *, 100 struct vmbus_chanmsg_chrescind *); 101 static struct vmbus_channel * 102 vmbus_channel_lookup(struct vmbus_softc *, uint32_t); 103 static int vmbus_channel_ring_create(struct vmbus_channel *, uint32_t); 104 static void vmbus_channel_ring_destroy(struct vmbus_channel *); 105 static void vmbus_channel_detach(struct vmbus_channel *); 106 static void vmbus_channel_pause(struct vmbus_channel *); 107 static uint32_t vmbus_channel_unpause(struct vmbus_channel *); 108 static uint32_t vmbus_channel_ready(struct vmbus_channel *); 109 static void vmbus_devq_enqueue(struct vmbus_softc *, int, 110 struct vmbus_channel *); 111 static void vmbus_process_devq(void *); 112 static void vmbus_devq_thread(void *); 113 114 static struct vmbus_softc *vmbus_sc; 115 116 static const struct { 117 int hmd_response; 118 int hmd_request; 119 void (*hmd_handler)(struct vmbus_softc *, 120 struct vmbus_chanmsg_hdr *); 121 } vmbus_msg_dispatch[] = { 122 { 0, 0, NULL }, 123 { VMBUS_CHANMSG_CHOFFER, 0, vmbus_channel_offer }, 124 { VMBUS_CHANMSG_CHRESCIND, 0, vmbus_channel_rescind }, 125 { VMBUS_CHANMSG_CHREQUEST, VMBUS_CHANMSG_CHOFFER, NULL }, 126 { VMBUS_CHANMSG_CHOFFER_DONE, 0, vmbus_channel_delivered }, 127 { VMBUS_CHANMSG_CHOPEN, 0, NULL }, 128 { VMBUS_CHANMSG_CHOPEN_RESP, VMBUS_CHANMSG_CHOPEN, 129 vmbus_channel_response }, 130 { VMBUS_CHANMSG_CHCLOSE, 0, NULL }, 131 { VMBUS_CHANMSG_GPADL_CONN, 0, NULL }, 132 { VMBUS_CHANMSG_GPADL_SUBCONN, 0, NULL }, 133 { VMBUS_CHANMSG_GPADL_CONNRESP, VMBUS_CHANMSG_GPADL_CONN, 134 vmbus_channel_response }, 135 { VMBUS_CHANMSG_GPADL_DISCONN, 0, NULL }, 136 { VMBUS_CHANMSG_GPADL_DISCONNRESP, VMBUS_CHANMSG_GPADL_DISCONN, 137 vmbus_channel_response }, 138 { VMBUS_CHANMSG_CHFREE, 0, NULL }, 139 { VMBUS_CHANMSG_CONNECT, 0, NULL }, 140 { VMBUS_CHANMSG_CONNECT_RESP, VMBUS_CHANMSG_CONNECT, 141 vmbus_channel_response }, 142 { VMBUS_CHANMSG_DISCONNECT, 0, NULL }, 143 }; 144 145 const struct hyperv_guid hyperv_guid_network = { 146 { 0x63, 0x51, 0x61, 0xf8, 0x3e, 0xdf, 0xc5, 0x46, 147 0x91, 0x3f, 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e } 148 }; 149 150 const struct hyperv_guid hyperv_guid_ide = { 151 { 0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44, 152 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5 } 153 }; 154 155 const struct hyperv_guid hyperv_guid_scsi = { 156 { 0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d, 157 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f } 158 }; 159 160 const struct hyperv_guid hyperv_guid_shutdown = { 161 { 0x31, 0x60, 0x0b, 0x0e, 0x13, 0x52, 0x34, 0x49, 162 0x81, 0x8b, 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb } 163 }; 164 165 const struct hyperv_guid hyperv_guid_timesync = { 166 { 0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49, 167 0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf } 168 }; 169 170 const struct hyperv_guid hyperv_guid_heartbeat = { 171 { 0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e, 172 0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d } 173 }; 174 175 const struct hyperv_guid hyperv_guid_kvp = { 176 { 0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d, 177 0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6 } 178 }; 179 180 const struct hyperv_guid hyperv_guid_vss = { 181 { 0x29, 0x2e, 0xfa, 0x35, 0x23, 0xea, 0x36, 0x42, 182 0x96, 0xae, 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40 } 183 }; 184 185 const struct hyperv_guid hyperv_guid_dynmem = { 186 { 0xdc, 0x74, 0x50, 0x52, 0x85, 0x89, 0xe2, 0x46, 187 0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02 } 188 }; 189 190 const struct hyperv_guid hyperv_guid_mouse = { 191 { 0x9e, 0xb6, 0xa8, 0xcf, 0x4a, 0x5b, 0xc0, 0x4c, 192 0xb9, 0x8b, 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a } 193 }; 194 195 const struct hyperv_guid hyperv_guid_kbd = { 196 { 0x6d, 0xad, 0x12, 0xf9, 0x17, 0x2b, 0xea, 0x48, 197 0xbd, 0x65, 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84 } 198 }; 199 200 const struct hyperv_guid hyperv_guid_video = { 201 { 0x02, 0x78, 0x0a, 0xda, 0x77, 0xe3, 0xac, 0x4a, 202 0x8e, 0x77, 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8 } 203 }; 204 205 const struct hyperv_guid hyperv_guid_fc = { 206 { 0x4a, 0xcc, 0x9b, 0x2f, 0x69, 0x00, 0xf3, 0x4a, 207 0xb7, 0x6b, 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda } 208 }; 209 210 const struct hyperv_guid hyperv_guid_fcopy = { 211 { 0xe3, 0x4b, 0xd1, 0x34, 0xe4, 0xde, 0xc8, 0x41, 212 0x9a, 0xe7, 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92 } 213 }; 214 215 const struct hyperv_guid hyperv_guid_pcie = { 216 { 0x1d, 0xf6, 0xc4, 0x44, 0x44, 0x44, 0x00, 0x44, 217 0x9d, 0x52, 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f } 218 }; 219 220 const struct hyperv_guid hyperv_guid_netdir = { 221 { 0x3d, 0xaf, 0x2e, 0x8c, 0xa7, 0x32, 0x09, 0x4b, 222 0xab, 0x99, 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01 } 223 }; 224 225 const struct hyperv_guid hyperv_guid_rdesktop = { 226 { 0xf4, 0xac, 0x6a, 0x27, 0x15, 0xac, 0x6c, 0x42, 227 0x98, 0xdd, 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe } 228 }; 229 230 /* Automatic Virtual Machine Activation (AVMA) Services */ 231 const struct hyperv_guid hyperv_guid_avma1 = { 232 { 0x55, 0xb2, 0x87, 0x44, 0x8c, 0xb8, 0x3f, 0x40, 233 0xbb, 0x51, 0xd1, 0xf6, 0x9c, 0xf1, 0x7f, 0x87 } 234 }; 235 236 const struct hyperv_guid hyperv_guid_avma2 = { 237 { 0xf4, 0xba, 0x75, 0x33, 0x15, 0x9e, 0x30, 0x4b, 238 0xb7, 0x65, 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b } 239 }; 240 241 const struct hyperv_guid hyperv_guid_avma3 = { 242 { 0xa0, 0x1f, 0x22, 0x99, 0xad, 0x24, 0xe2, 0x11, 243 0xbe, 0x98, 0x00, 0x1a, 0xa0, 0x1b, 0xbf, 0x6e } 244 }; 245 246 const struct hyperv_guid hyperv_guid_avma4 = { 247 { 0x16, 0x57, 0xe6, 0xf8, 0xb3, 0x3c, 0x06, 0x4a, 248 0x9a, 0x60, 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5 } 249 }; 250 251 int 252 vmbus_match(device_t parent, cfdata_t cf, void *aux) 253 { 254 255 if (cf->cf_unit != 0 || 256 !hyperv_hypercall_enabled() || 257 !hyperv_synic_supported()) 258 return 0; 259 260 return 1; 261 } 262 263 int 264 vmbus_attach(struct vmbus_softc *sc) 265 { 266 267 aprint_naive("\n"); 268 aprint_normal(": Hyper-V VMBus\n"); 269 270 vmbus_sc = sc; 271 272 sc->sc_msgpool = pool_cache_init(sizeof(struct vmbus_msg), 8, 0, 0, 273 "hvmsg", NULL, IPL_NET, NULL, NULL, NULL); 274 hyperv_set_message_proc(vmbus_message_proc, sc); 275 276 if (vmbus_alloc_dma(sc)) 277 goto cleanup; 278 279 if (vmbus_init_interrupts(sc)) 280 goto cleanup; 281 282 if (vmbus_connect(sc)) 283 goto cleanup; 284 285 aprint_normal_dev(sc->sc_dev, "protocol %d.%d\n", 286 VMBUS_VERSION_MAJOR(sc->sc_proto), 287 VMBUS_VERSION_MINOR(sc->sc_proto)); 288 289 if (sc->sc_proto == VMBUS_VERSION_WS2008 || 290 sc->sc_proto == VMBUS_VERSION_WIN7) { 291 hyperv_set_event_proc(vmbus_event_proc_compat, sc); 292 sc->sc_channel_max = VMBUS_CHAN_MAX_COMPAT; 293 } else { 294 hyperv_set_event_proc(vmbus_event_proc, sc); 295 sc->sc_channel_max = VMBUS_CHAN_MAX; 296 } 297 298 if (vmbus_channel_scan(sc)) 299 goto cleanup; 300 301 config_interrupts(sc->sc_dev, vmbus_attach_deferred); 302 303 return 0; 304 305 cleanup: 306 vmbus_deinit_interrupts(sc); 307 vmbus_free_dma(sc); 308 return -1; 309 } 310 311 static void 312 vmbus_attach_deferred(device_t self) 313 { 314 struct vmbus_softc *sc = device_private(self); 315 316 xc_wait(xc_broadcast(0, vmbus_init_synic, sc, NULL)); 317 } 318 319 int 320 vmbus_detach(struct vmbus_softc *sc, int flags) 321 { 322 323 vmbus_deinit_interrupts(sc); 324 vmbus_free_dma(sc); 325 326 return 0; 327 } 328 329 static int 330 vmbus_alloc_dma(struct vmbus_softc *sc) 331 { 332 CPU_INFO_ITERATOR cii; 333 struct cpu_info *ci; 334 struct vmbus_percpu_data *pd; 335 int i; 336 337 /* 338 * Per-CPU messages and event flags. 339 */ 340 for (CPU_INFO_FOREACH(cii, ci)) { 341 pd = &sc->sc_percpu[cpu_index(ci)]; 342 343 pd->simp = hyperv_dma_alloc(sc->sc_dmat, &pd->simp_dma, 344 PAGE_SIZE, PAGE_SIZE, 0, 1, HYPERV_DMA_SLEEPOK); 345 if (pd->simp == NULL) 346 return ENOMEM; 347 348 pd->siep = hyperv_dma_alloc(sc->sc_dmat, &pd->siep_dma, 349 PAGE_SIZE, PAGE_SIZE, 0, 1, HYPERV_DMA_SLEEPOK); 350 if (pd->siep == NULL) 351 return ENOMEM; 352 } 353 354 sc->sc_events = hyperv_dma_alloc(sc->sc_dmat, &sc->sc_events_dma, 355 PAGE_SIZE, PAGE_SIZE, 0, 1, HYPERV_DMA_SLEEPOK); 356 if (sc->sc_events == NULL) 357 return ENOMEM; 358 sc->sc_wevents = (u_long *)sc->sc_events; 359 sc->sc_revents = (u_long *)((uint8_t *)sc->sc_events + (PAGE_SIZE / 2)); 360 361 for (i = 0; i < __arraycount(sc->sc_monitor); i++) { 362 sc->sc_monitor[i] = hyperv_dma_alloc(sc->sc_dmat, 363 &sc->sc_monitor_dma[i], PAGE_SIZE, PAGE_SIZE, 0, 1, 364 HYPERV_DMA_SLEEPOK); 365 if (sc->sc_monitor[i] == NULL) 366 return ENOMEM; 367 } 368 369 return 0; 370 } 371 372 static void 373 vmbus_free_dma(struct vmbus_softc *sc) 374 { 375 CPU_INFO_ITERATOR cii; 376 struct cpu_info *ci; 377 int i; 378 379 if (sc->sc_events != NULL) { 380 sc->sc_events = sc->sc_wevents = sc->sc_revents = NULL; 381 hyperv_dma_free(sc->sc_dmat, &sc->sc_events_dma); 382 } 383 384 for (i = 0; i < __arraycount(sc->sc_monitor); i++) { 385 sc->sc_monitor[i] = NULL; 386 hyperv_dma_free(sc->sc_dmat, &sc->sc_monitor_dma[i]); 387 } 388 389 for (CPU_INFO_FOREACH(cii, ci)) { 390 struct vmbus_percpu_data *pd = &sc->sc_percpu[cpu_index(ci)]; 391 392 if (pd->simp != NULL) { 393 pd->simp = NULL; 394 hyperv_dma_free(sc->sc_dmat, &pd->simp_dma); 395 } 396 if (pd->siep != NULL) { 397 pd->siep = NULL; 398 hyperv_dma_free(sc->sc_dmat, &pd->siep_dma); 399 } 400 } 401 } 402 403 static int 404 vmbus_init_interrupts(struct vmbus_softc *sc) 405 { 406 407 TAILQ_INIT(&sc->sc_reqs); 408 mutex_init(&sc->sc_req_lock, MUTEX_DEFAULT, IPL_NET); 409 410 TAILQ_INIT(&sc->sc_rsps); 411 mutex_init(&sc->sc_rsp_lock, MUTEX_DEFAULT, IPL_NET); 412 413 sc->sc_proto = VMBUS_VERSION_WS2008; 414 415 /* XXX event_tq */ 416 417 sc->sc_msg_sih = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE, 418 vmbus_message_softintr, sc); 419 if (sc->sc_msg_sih == NULL) 420 return -1; 421 422 vmbus_init_interrupts_md(sc); 423 424 kcpuset_create(&sc->sc_intr_cpuset, true); 425 if (cold) { 426 /* Initialize other CPUs later. */ 427 vmbus_init_synic(sc, NULL); 428 } else 429 xc_wait(xc_broadcast(0, vmbus_init_synic, sc, NULL)); 430 atomic_or_32(&sc->sc_flags, VMBUS_SCFLAG_SYNIC); 431 432 return 0; 433 } 434 435 static void 436 vmbus_deinit_interrupts(struct vmbus_softc *sc) 437 { 438 439 if (ISSET(sc->sc_flags, VMBUS_SCFLAG_SYNIC)) { 440 if (cold) 441 vmbus_deinit_synic(sc, NULL); 442 else 443 xc_wait(xc_broadcast(0, vmbus_deinit_synic, sc, NULL)); 444 atomic_and_32(&sc->sc_flags, (uint32_t)~VMBUS_SCFLAG_SYNIC); 445 } 446 447 /* XXX event_tq */ 448 449 if (sc->sc_msg_sih != NULL) { 450 softint_disestablish(sc->sc_msg_sih); 451 sc->sc_msg_sih = NULL; 452 } 453 454 vmbus_deinit_interrupts_md(sc); 455 } 456 457 static void 458 vmbus_init_synic(void *arg1, void *arg2) 459 { 460 struct vmbus_softc *sc = arg1; 461 cpuid_t cpu; 462 int s; 463 464 s = splhigh(); 465 466 cpu = cpu_index(curcpu()); 467 if (!kcpuset_isset(sc->sc_intr_cpuset, cpu)) { 468 kcpuset_atomic_set(sc->sc_intr_cpuset, cpu); 469 vmbus_init_synic_md(sc, cpu); 470 } 471 472 splx(s); 473 } 474 475 static void 476 vmbus_deinit_synic(void *arg1, void *arg2) 477 { 478 struct vmbus_softc *sc = arg1; 479 cpuid_t cpu; 480 int s; 481 482 s = splhigh(); 483 484 cpu = cpu_index(curcpu()); 485 if (kcpuset_isset(sc->sc_intr_cpuset, cpu)) { 486 vmbus_deinit_synic_md(sc, cpu); 487 kcpuset_atomic_clear(sc->sc_intr_cpuset, cpu); 488 } 489 490 splx(s); 491 } 492 493 static int 494 vmbus_connect(struct vmbus_softc *sc) 495 { 496 static const uint32_t versions[] = { 497 VMBUS_VERSION_WIN8_1, 498 VMBUS_VERSION_WIN8, 499 VMBUS_VERSION_WIN7, 500 VMBUS_VERSION_WS2008 501 }; 502 struct vmbus_chanmsg_connect cmd; 503 struct vmbus_chanmsg_connect_resp rsp; 504 int i, rv; 505 506 memset(&cmd, 0, sizeof(cmd)); 507 cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CONNECT; 508 cmd.chm_evtflags = hyperv_dma_get_paddr(&sc->sc_events_dma); 509 cmd.chm_mnf1 = hyperv_dma_get_paddr(&sc->sc_monitor_dma[0]); 510 cmd.chm_mnf2 = hyperv_dma_get_paddr(&sc->sc_monitor_dma[1]); 511 512 memset(&rsp, 0, sizeof(rsp)); 513 514 for (i = 0; i < __arraycount(versions); i++) { 515 cmd.chm_ver = versions[i]; 516 rv = vmbus_cmd(sc, &cmd, sizeof(cmd), &rsp, sizeof(rsp), 517 cold ? HCF_NOSLEEP : HCF_SLEEPOK); 518 if (rv) { 519 DPRINTF("%s: CONNECT failed\n", 520 device_xname(sc->sc_dev)); 521 return rv; 522 } 523 if (rsp.chm_done) { 524 atomic_or_32(&sc->sc_flags, VMBUS_SCFLAG_CONNECTED); 525 sc->sc_proto = versions[i]; 526 sc->sc_handle = VMBUS_GPADL_START; 527 break; 528 } 529 } 530 if (i == __arraycount(versions)) { 531 device_printf(sc->sc_dev, 532 "failed to negotiate protocol version\n"); 533 return ENXIO; 534 } 535 536 return 0; 537 } 538 539 static int 540 vmbus_cmd(struct vmbus_softc *sc, void *cmd, size_t cmdlen, void *rsp, 541 size_t rsplen, int flags) 542 { 543 const int prflags = cold ? PR_NOWAIT : PR_WAITOK; 544 struct vmbus_msg *msg; 545 paddr_t pa; 546 int rv; 547 548 if (cmdlen > VMBUS_MSG_DSIZE_MAX) { 549 device_printf(sc->sc_dev, "payload too large (%zu)\n", 550 cmdlen); 551 return EMSGSIZE; 552 } 553 554 msg = pool_cache_get_paddr(sc->sc_msgpool, prflags, &pa); 555 if (msg == NULL) { 556 device_printf(sc->sc_dev, "couldn't get msgpool\n"); 557 return ENOMEM; 558 } 559 memset(msg, 0, sizeof(*msg)); 560 msg->msg_req.hc_dsize = cmdlen; 561 memcpy(msg->msg_req.hc_data, cmd, cmdlen); 562 563 if (!(flags & HCF_NOREPLY)) { 564 msg->msg_rsp = rsp; 565 msg->msg_rsplen = rsplen; 566 } else 567 msg->msg_flags |= MSGF_NOQUEUE; 568 569 if (flags & HCF_NOSLEEP) 570 msg->msg_flags |= MSGF_NOSLEEP; 571 572 rv = vmbus_start(sc, msg, pa); 573 if (rv == 0) 574 rv = vmbus_reply(sc, msg); 575 pool_cache_put_paddr(sc->sc_msgpool, msg, pa); 576 return rv; 577 } 578 579 static int 580 vmbus_start(struct vmbus_softc *sc, struct vmbus_msg *msg, paddr_t msg_pa) 581 { 582 static const int delays[] = { 583 100, 100, 100, 500, 500, 5000, 5000, 5000 584 }; 585 const char *wchan = "hvstart"; 586 uint16_t status; 587 int i, s; 588 589 msg->msg_req.hc_connid = VMBUS_CONNID_MESSAGE; 590 msg->msg_req.hc_msgtype = 1; 591 592 if (!(msg->msg_flags & MSGF_NOQUEUE)) { 593 mutex_enter(&sc->sc_req_lock); 594 TAILQ_INSERT_TAIL(&sc->sc_reqs, msg, msg_entry); 595 mutex_exit(&sc->sc_req_lock); 596 } 597 598 for (i = 0; i < __arraycount(delays); i++) { 599 status = hyperv_hypercall_post_message( 600 msg_pa + offsetof(struct vmbus_msg, msg_req)); 601 if (status == HYPERCALL_STATUS_SUCCESS) 602 break; 603 604 if (msg->msg_flags & MSGF_NOSLEEP) { 605 delay(delays[i]); 606 s = splnet(); 607 hyperv_intr(); 608 splx(s); 609 } else 610 tsleep(wchan, PRIBIO, wchan, mstohz(delays[i])); 611 } 612 if (status != HYPERCALL_STATUS_SUCCESS) { 613 device_printf(sc->sc_dev, 614 "posting vmbus message failed with %d\n", status); 615 if (!(msg->msg_flags & MSGF_NOQUEUE)) { 616 mutex_enter(&sc->sc_req_lock); 617 TAILQ_REMOVE(&sc->sc_reqs, msg, msg_entry); 618 mutex_exit(&sc->sc_req_lock); 619 } 620 return EIO; 621 } 622 623 return 0; 624 } 625 626 static int 627 vmbus_reply_done(struct vmbus_softc *sc, struct vmbus_msg *msg) 628 { 629 struct vmbus_msg *m; 630 631 mutex_enter(&sc->sc_rsp_lock); 632 TAILQ_FOREACH(m, &sc->sc_rsps, msg_entry) { 633 if (m == msg) { 634 mutex_exit(&sc->sc_rsp_lock); 635 return 1; 636 } 637 } 638 mutex_exit(&sc->sc_rsp_lock); 639 return 0; 640 } 641 642 static int 643 vmbus_reply(struct vmbus_softc *sc, struct vmbus_msg *msg) 644 { 645 646 if (msg->msg_flags & MSGF_NOQUEUE) 647 return 0; 648 649 vmbus_wait(sc, vmbus_reply_done, msg, msg, "hvreply"); 650 651 mutex_enter(&sc->sc_rsp_lock); 652 TAILQ_REMOVE(&sc->sc_rsps, msg, msg_entry); 653 mutex_exit(&sc->sc_rsp_lock); 654 655 return 0; 656 } 657 658 static void 659 vmbus_wait(struct vmbus_softc *sc, 660 int (*cond)(struct vmbus_softc *, struct vmbus_msg *), 661 struct vmbus_msg *msg, void *wchan, const char *wmsg) 662 { 663 int s; 664 665 while (!cond(sc, msg)) { 666 if (msg->msg_flags & MSGF_NOSLEEP) { 667 delay(1000); 668 s = splnet(); 669 hyperv_intr(); 670 splx(s); 671 } else 672 tsleep(wchan, PRIBIO, wmsg ? wmsg : "hvwait", 673 mstohz(1)); 674 } 675 } 676 677 static uint16_t 678 vmbus_intr_signal(struct vmbus_softc *sc, paddr_t con_pa) 679 { 680 uint64_t status; 681 682 status = hyperv_hypercall_signal_event(con_pa); 683 return (uint16_t)status; 684 } 685 686 #if LONG_BIT == 64 687 #define ffsl(v) ffs64(v) 688 #elif LONG_BIT == 32 689 #define ffsl(v) ffs32(v) 690 #else 691 #error unsupport LONG_BIT 692 #endif /* LONG_BIT */ 693 694 static void 695 vmbus_event_flags_proc(struct vmbus_softc *sc, volatile u_long *revents, 696 int maxrow) 697 { 698 struct vmbus_channel *ch; 699 u_long pending; 700 uint32_t chanid, chanid_base; 701 int row, chanid_ofs; 702 703 for (row = 0; row < maxrow; row++) { 704 if (revents[row] == 0) 705 continue; 706 707 pending = atomic_swap_ulong(&revents[row], 0); 708 chanid_base = row * LONG_BIT; 709 710 while ((chanid_ofs = ffsl(pending)) != 0) { 711 chanid_ofs--; /* NOTE: ffs is 1-based */ 712 pending &= ~(1UL << chanid_ofs); 713 714 chanid = chanid_base + chanid_ofs; 715 /* vmbus channel protocol message */ 716 if (chanid == 0) 717 continue; 718 719 ch = vmbus_channel_lookup(sc, chanid); 720 if (ch == NULL) { 721 device_printf(sc->sc_dev, 722 "unhandled event on %d\n", chanid); 723 continue; 724 } 725 if (ch->ch_state != VMBUS_CHANSTATE_OPENED) { 726 device_printf(sc->sc_dev, 727 "channel %d is not active\n", chanid); 728 continue; 729 } 730 ch->ch_evcnt.ev_count++; 731 vmbus_channel_schedule(ch); 732 } 733 } 734 } 735 736 static void 737 vmbus_event_proc(void *arg, struct cpu_info *ci) 738 { 739 struct vmbus_softc *sc = arg; 740 struct vmbus_evtflags *evt; 741 742 /* 743 * On Host with Win8 or above, the event page can be 744 * checked directly to get the id of the channel 745 * that has the pending interrupt. 746 */ 747 evt = (struct vmbus_evtflags *)sc->sc_percpu[cpu_index(ci)].siep + 748 VMBUS_SINT_MESSAGE; 749 750 vmbus_event_flags_proc(sc, evt->evt_flags, 751 __arraycount(evt->evt_flags)); 752 } 753 754 static void 755 vmbus_event_proc_compat(void *arg, struct cpu_info *ci) 756 { 757 struct vmbus_softc *sc = arg; 758 struct vmbus_evtflags *evt; 759 760 evt = (struct vmbus_evtflags *)sc->sc_percpu[cpu_index(ci)].siep + 761 VMBUS_SINT_MESSAGE; 762 763 if (test_bit(0, &evt->evt_flags[0])) { 764 clear_bit(0, &evt->evt_flags[0]); 765 /* 766 * receive size is 1/2 page and divide that by 4 bytes 767 */ 768 vmbus_event_flags_proc(sc, sc->sc_revents, 769 VMBUS_CHAN_MAX_COMPAT / VMBUS_EVTFLAG_LEN); 770 } 771 } 772 773 static void 774 vmbus_message_proc(void *arg, struct cpu_info *ci) 775 { 776 struct vmbus_softc *sc = arg; 777 struct vmbus_message *msg; 778 779 msg = (struct vmbus_message *)sc->sc_percpu[cpu_index(ci)].simp + 780 VMBUS_SINT_MESSAGE; 781 if (__predict_false(msg->msg_type != HYPERV_MSGTYPE_NONE)) { 782 if (__predict_true(!cold)) 783 softint_schedule_cpu(sc->sc_msg_sih, ci); 784 else 785 vmbus_message_softintr(sc); 786 } 787 } 788 789 static void 790 vmbus_message_softintr(void *arg) 791 { 792 struct vmbus_softc *sc = arg; 793 struct vmbus_message *msg; 794 struct vmbus_chanmsg_hdr *hdr; 795 uint32_t type; 796 cpuid_t cpu; 797 798 cpu = cpu_index(curcpu()); 799 800 for (;;) { 801 msg = (struct vmbus_message *)sc->sc_percpu[cpu].simp + 802 VMBUS_SINT_MESSAGE; 803 if (msg->msg_type == HYPERV_MSGTYPE_NONE) 804 break; 805 806 hdr = (struct vmbus_chanmsg_hdr *)msg->msg_data; 807 type = hdr->chm_type; 808 if (type >= VMBUS_CHANMSG_COUNT) { 809 device_printf(sc->sc_dev, 810 "unhandled message type %u flags %#x\n", type, 811 msg->msg_flags); 812 } else { 813 if (vmbus_msg_dispatch[type].hmd_handler) { 814 vmbus_msg_dispatch[type].hmd_handler(sc, hdr); 815 } else { 816 device_printf(sc->sc_dev, 817 "unhandled message type %u\n", type); 818 } 819 } 820 821 msg->msg_type = HYPERV_MSGTYPE_NONE; 822 membar_sync(); 823 if (msg->msg_flags & VMBUS_MSGFLAG_PENDING) 824 hyperv_send_eom(); 825 } 826 } 827 828 static void 829 vmbus_channel_response(struct vmbus_softc *sc, struct vmbus_chanmsg_hdr *rsphdr) 830 { 831 struct vmbus_msg *msg; 832 struct vmbus_chanmsg_hdr *reqhdr; 833 int req; 834 835 req = vmbus_msg_dispatch[rsphdr->chm_type].hmd_request; 836 mutex_enter(&sc->sc_req_lock); 837 TAILQ_FOREACH(msg, &sc->sc_reqs, msg_entry) { 838 reqhdr = (struct vmbus_chanmsg_hdr *)&msg->msg_req.hc_data; 839 if (reqhdr->chm_type == req) { 840 TAILQ_REMOVE(&sc->sc_reqs, msg, msg_entry); 841 break; 842 } 843 } 844 mutex_exit(&sc->sc_req_lock); 845 if (msg != NULL) { 846 memcpy(msg->msg_rsp, rsphdr, msg->msg_rsplen); 847 mutex_enter(&sc->sc_rsp_lock); 848 TAILQ_INSERT_TAIL(&sc->sc_rsps, msg, msg_entry); 849 mutex_exit(&sc->sc_rsp_lock); 850 wakeup(msg); 851 } 852 } 853 854 static void 855 vmbus_channel_offer(struct vmbus_softc *sc, struct vmbus_chanmsg_hdr *hdr) 856 { 857 858 vmbus_process_offer(sc, (struct vmbus_chanmsg_choffer *)hdr); 859 } 860 861 static void 862 vmbus_channel_rescind(struct vmbus_softc *sc, struct vmbus_chanmsg_hdr *hdr) 863 { 864 865 vmbus_process_rescind(sc, (struct vmbus_chanmsg_chrescind *)hdr); 866 } 867 868 static void 869 vmbus_channel_delivered(struct vmbus_softc *sc, struct vmbus_chanmsg_hdr *hdr) 870 { 871 872 atomic_or_32(&sc->sc_flags, VMBUS_SCFLAG_OFFERS_DELIVERED); 873 wakeup(&sc->sc_devq); 874 } 875 876 static void 877 hyperv_guid_sprint(struct hyperv_guid *guid, char *str, size_t size) 878 { 879 static const struct { 880 const struct hyperv_guid *guid; 881 const char *ident; 882 } map[] = { 883 { &hyperv_guid_network, "network" }, 884 { &hyperv_guid_ide, "ide" }, 885 { &hyperv_guid_scsi, "scsi" }, 886 { &hyperv_guid_shutdown, "shutdown" }, 887 { &hyperv_guid_timesync, "timesync" }, 888 { &hyperv_guid_heartbeat, "heartbeat" }, 889 { &hyperv_guid_kvp, "kvp" }, 890 { &hyperv_guid_vss, "vss" }, 891 { &hyperv_guid_dynmem, "dynamic-memory" }, 892 { &hyperv_guid_mouse, "mouse" }, 893 { &hyperv_guid_kbd, "keyboard" }, 894 { &hyperv_guid_video, "video" }, 895 { &hyperv_guid_fc, "fiber-channel" }, 896 { &hyperv_guid_fcopy, "file-copy" }, 897 { &hyperv_guid_pcie, "pcie-passthrough" }, 898 { &hyperv_guid_netdir, "network-direct" }, 899 { &hyperv_guid_rdesktop, "remote-desktop" }, 900 { &hyperv_guid_avma1, "avma-1" }, 901 { &hyperv_guid_avma2, "avma-2" }, 902 { &hyperv_guid_avma3, "avma-3" }, 903 { &hyperv_guid_avma4, "avma-4" }, 904 }; 905 int i; 906 907 for (i = 0; i < __arraycount(map); i++) { 908 if (memcmp(guid, map[i].guid, sizeof(*guid)) == 0) { 909 strlcpy(str, map[i].ident, size); 910 return; 911 } 912 } 913 hyperv_guid2str(guid, str, size); 914 } 915 916 static int 917 vmbus_channel_scan_done(struct vmbus_softc *sc, struct vmbus_msg *msg __unused) 918 { 919 920 return ISSET(sc->sc_flags, VMBUS_SCFLAG_OFFERS_DELIVERED); 921 } 922 923 static int 924 vmbus_channel_scan(struct vmbus_softc *sc) 925 { 926 struct vmbus_chanmsg_hdr hdr; 927 struct vmbus_chanmsg_choffer rsp; 928 929 TAILQ_INIT(&sc->sc_channels); 930 mutex_init(&sc->sc_channel_lock, MUTEX_DEFAULT, IPL_NET); 931 932 SIMPLEQ_INIT(&sc->sc_devq); 933 mutex_init(&sc->sc_devq_lock, MUTEX_DEFAULT, IPL_NET); 934 cv_init(&sc->sc_devq_cv, "hvdevqcv"); 935 936 if (kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL, 937 vmbus_devq_thread, sc, NULL, "hvoffer") != 0) { 938 DPRINTF("%s: failed to create offer thread\n", 939 device_xname(sc->sc_dev)); 940 return -1; 941 } 942 943 memset(&hdr, 0, sizeof(hdr)); 944 hdr.chm_type = VMBUS_CHANMSG_CHREQUEST; 945 946 if (vmbus_cmd(sc, &hdr, sizeof(hdr), &rsp, sizeof(rsp), 947 HCF_NOREPLY | (cold ? HCF_NOSLEEP : HCF_SLEEPOK))) { 948 DPRINTF("%s: CHREQUEST failed\n", device_xname(sc->sc_dev)); 949 return -1; 950 } 951 952 vmbus_wait(sc, vmbus_channel_scan_done, (struct vmbus_msg *)&hdr, 953 &sc->sc_devq, "hvscan"); 954 955 mutex_enter(&sc->sc_devq_lock); 956 vmbus_process_devq(sc); 957 mutex_exit(&sc->sc_devq_lock); 958 959 return 0; 960 } 961 962 static struct vmbus_channel * 963 vmbus_channel_alloc(struct vmbus_softc *sc) 964 { 965 struct vmbus_channel *ch; 966 967 ch = kmem_intr_zalloc(sizeof(*ch), KM_NOSLEEP); 968 969 ch->ch_monprm = hyperv_dma_alloc(sc->sc_dmat, &ch->ch_monprm_dma, 970 sizeof(*ch->ch_monprm), 8, 0, 1, HYPERV_DMA_NOSLEEP); 971 if (ch->ch_monprm == NULL) { 972 device_printf(sc->sc_dev, "monprm alloc failed\n"); 973 kmem_free(ch, sizeof(*ch)); 974 return NULL; 975 } 976 977 ch->ch_refs = 1; 978 ch->ch_sc = sc; 979 mutex_init(&ch->ch_subchannel_lock, MUTEX_DEFAULT, IPL_NET); 980 TAILQ_INIT(&ch->ch_subchannels); 981 982 ch->ch_state = VMBUS_CHANSTATE_CLOSED; 983 984 return ch; 985 } 986 987 static void 988 vmbus_channel_free(struct vmbus_channel *ch) 989 { 990 struct vmbus_softc *sc = ch->ch_sc; 991 992 KASSERTMSG(TAILQ_EMPTY(&ch->ch_subchannels) && 993 ch->ch_subchannel_count == 0, "still owns sub-channels"); 994 KASSERTMSG(ch->ch_state == 0 || ch->ch_state == VMBUS_CHANSTATE_CLOSED, 995 "free busy channel"); 996 KASSERTMSG(ch->ch_refs == 0, "channel %u: invalid refcnt %d", 997 ch->ch_id, ch->ch_refs); 998 999 hyperv_dma_free(sc->sc_dmat, &ch->ch_monprm_dma); 1000 mutex_destroy(&ch->ch_subchannel_lock); 1001 /* XXX ch_evcnt */ 1002 if (ch->ch_taskq != NULL) 1003 softint_disestablish(ch->ch_taskq); 1004 kmem_free(ch, sizeof(*ch)); 1005 } 1006 1007 static int 1008 vmbus_channel_add(struct vmbus_channel *nch) 1009 { 1010 struct vmbus_softc *sc = nch->ch_sc; 1011 struct vmbus_channel *ch; 1012 u_int refs __diagused; 1013 1014 if (nch->ch_id == 0) { 1015 device_printf(sc->sc_dev, "got channel 0 offer, discard\n"); 1016 return EINVAL; 1017 } else if (nch->ch_id >= sc->sc_channel_max) { 1018 device_printf(sc->sc_dev, "invalid channel %u offer\n", 1019 nch->ch_id); 1020 return EINVAL; 1021 } 1022 1023 mutex_enter(&sc->sc_channel_lock); 1024 TAILQ_FOREACH(ch, &sc->sc_channels, ch_entry) { 1025 if (!memcmp(&ch->ch_type, &nch->ch_type, sizeof(ch->ch_type)) && 1026 !memcmp(&ch->ch_inst, &nch->ch_inst, sizeof(ch->ch_inst))) 1027 break; 1028 } 1029 if (VMBUS_CHAN_ISPRIMARY(nch)) { 1030 if (ch == NULL) { 1031 TAILQ_INSERT_TAIL(&sc->sc_channels, nch, ch_entry); 1032 mutex_exit(&sc->sc_channel_lock); 1033 goto done; 1034 } else { 1035 mutex_exit(&sc->sc_channel_lock); 1036 device_printf(sc->sc_dev, 1037 "duplicated primary channel%u\n", nch->ch_id); 1038 return EINVAL; 1039 } 1040 } else { 1041 if (ch == NULL) { 1042 mutex_exit(&sc->sc_channel_lock); 1043 device_printf(sc->sc_dev, "no primary channel%u\n", 1044 nch->ch_id); 1045 return EINVAL; 1046 } 1047 } 1048 mutex_exit(&sc->sc_channel_lock); 1049 1050 KASSERT(!VMBUS_CHAN_ISPRIMARY(nch)); 1051 KASSERT(ch != NULL); 1052 1053 refs = atomic_inc_uint_nv(&nch->ch_refs); 1054 KASSERT(refs == 2); 1055 1056 nch->ch_primary_channel = ch; 1057 nch->ch_dev = ch->ch_dev; 1058 1059 mutex_enter(&ch->ch_subchannel_lock); 1060 TAILQ_INSERT_TAIL(&ch->ch_subchannels, nch, ch_subentry); 1061 ch->ch_subchannel_count++; 1062 mutex_exit(&ch->ch_subchannel_lock); 1063 wakeup(ch); 1064 1065 done: 1066 vmbus_channel_cpu_default(nch); 1067 1068 return 0; 1069 } 1070 1071 void 1072 vmbus_channel_cpu_set(struct vmbus_channel *ch, int cpu) 1073 { 1074 struct vmbus_softc *sc = ch->ch_sc; 1075 1076 KASSERTMSG(cpu >= 0 && cpu < ncpu, "invalid cpu %d", cpu); 1077 1078 if (sc->sc_proto == VMBUS_VERSION_WS2008 || 1079 sc->sc_proto == VMBUS_VERSION_WIN7) { 1080 /* Only cpu0 is supported */ 1081 cpu = 0; 1082 } 1083 1084 ch->ch_cpuid = cpu; 1085 ch->ch_vcpu = hyperv_get_vcpuid(cpu); 1086 } 1087 1088 void 1089 vmbus_channel_cpu_rr(struct vmbus_channel *ch) 1090 { 1091 static uint32_t vmbus_channel_nextcpu; 1092 int cpu; 1093 1094 cpu = atomic_inc_32_nv(&vmbus_channel_nextcpu) % ncpu; 1095 vmbus_channel_cpu_set(ch, cpu); 1096 } 1097 1098 static void 1099 vmbus_channel_cpu_default(struct vmbus_channel *ch) 1100 { 1101 1102 /* 1103 * By default, pin the channel to cpu0. Devices having 1104 * special channel-cpu mapping requirement should call 1105 * vmbus_channel_cpu_{set,rr}(). 1106 */ 1107 vmbus_channel_cpu_set(ch, 0); 1108 } 1109 1110 bool 1111 vmbus_channel_is_revoked(struct vmbus_channel *ch) 1112 { 1113 1114 return (ch->ch_flags & CHF_REVOKED) ? true : false; 1115 } 1116 1117 1118 static void 1119 vmbus_process_offer(struct vmbus_softc *sc, struct vmbus_chanmsg_choffer *co) 1120 { 1121 struct vmbus_channel *ch; 1122 1123 ch = vmbus_channel_alloc(sc); 1124 if (ch == NULL) { 1125 device_printf(sc->sc_dev, "allocate channel %u failed\n", 1126 co->chm_chanid); 1127 return; 1128 } 1129 1130 /* 1131 * By default we setup state to enable batched reading. 1132 * A specific service can choose to disable this prior 1133 * to opening the channel. 1134 */ 1135 ch->ch_flags |= CHF_BATCHED; 1136 1137 hyperv_guid_sprint(&co->chm_chtype, ch->ch_ident, 1138 sizeof(ch->ch_ident)); 1139 1140 ch->ch_monprm->mp_connid = VMBUS_CONNID_EVENT; 1141 if (sc->sc_proto > VMBUS_VERSION_WS2008) 1142 ch->ch_monprm->mp_connid = co->chm_connid; 1143 1144 if (co->chm_flags1 & VMBUS_CHOFFER_FLAG1_HASMNF) { 1145 ch->ch_mgroup = co->chm_montrig / VMBUS_MONTRIG_LEN; 1146 ch->ch_mindex = co->chm_montrig % VMBUS_MONTRIG_LEN; 1147 ch->ch_flags |= CHF_MONITOR; 1148 } 1149 1150 ch->ch_id = co->chm_chanid; 1151 ch->ch_subidx = co->chm_subidx; 1152 1153 memcpy(&ch->ch_type, &co->chm_chtype, sizeof(ch->ch_type)); 1154 memcpy(&ch->ch_inst, &co->chm_chinst, sizeof(ch->ch_inst)); 1155 1156 if (vmbus_channel_add(ch) != 0) { 1157 atomic_dec_uint(&ch->ch_refs); 1158 vmbus_channel_free(ch); 1159 return; 1160 } 1161 1162 ch->ch_state = VMBUS_CHANSTATE_OFFERED; 1163 1164 vmbus_devq_enqueue(sc, VMBUS_DEV_TYPE_ATTACH, ch); 1165 1166 #ifdef HYPERV_DEBUG 1167 printf("%s: channel %u: \"%s\"", device_xname(sc->sc_dev), ch->ch_id, 1168 ch->ch_ident); 1169 if (ch->ch_flags & CHF_MONITOR) 1170 printf(", monitor %u\n", co->chm_montrig); 1171 else 1172 printf("\n"); 1173 #endif 1174 } 1175 1176 static void 1177 vmbus_process_rescind(struct vmbus_softc *sc, 1178 struct vmbus_chanmsg_chrescind *cr) 1179 { 1180 struct vmbus_channel *ch; 1181 1182 if (cr->chm_chanid > VMBUS_CHAN_MAX) { 1183 device_printf(sc->sc_dev, "invalid revoked channel%u\n", 1184 cr->chm_chanid); 1185 return; 1186 } 1187 1188 mutex_enter(&sc->sc_channel_lock); 1189 ch = vmbus_channel_lookup(sc, cr->chm_chanid); 1190 if (ch == NULL) { 1191 mutex_exit(&sc->sc_channel_lock); 1192 device_printf(sc->sc_dev, "channel%u is not offered\n", 1193 cr->chm_chanid); 1194 return; 1195 } 1196 TAILQ_REMOVE(&sc->sc_channels, ch, ch_entry); 1197 mutex_exit(&sc->sc_channel_lock); 1198 1199 KASSERTMSG(!(ch->ch_flags & CHF_REVOKED), 1200 "channel%u has already been revoked", ch->ch_id); 1201 atomic_or_uint(&ch->ch_flags, CHF_REVOKED); 1202 1203 vmbus_channel_detach(ch); 1204 } 1205 1206 static int 1207 vmbus_channel_release(struct vmbus_channel *ch) 1208 { 1209 struct vmbus_softc *sc = ch->ch_sc; 1210 struct vmbus_chanmsg_chfree cmd; 1211 int rv; 1212 1213 memset(&cmd, 0, sizeof(cmd)); 1214 cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CHFREE; 1215 cmd.chm_chanid = ch->ch_id; 1216 1217 rv = vmbus_cmd(sc, &cmd, sizeof(cmd), NULL, 0, 1218 HCF_NOREPLY | (cold ? HCF_NOSLEEP : HCF_SLEEPOK)); 1219 if (rv) { 1220 DPRINTF("%s: CHFREE failed with %d\n", device_xname(sc->sc_dev), 1221 rv); 1222 } 1223 return rv; 1224 } 1225 1226 struct vmbus_channel ** 1227 vmbus_subchannel_get(struct vmbus_channel *prich, int cnt) 1228 { 1229 struct vmbus_channel **ret, *ch; 1230 int i; 1231 1232 KASSERT(cnt > 0); 1233 1234 ret = kmem_alloc(sizeof(struct vmbus_channel *) * cnt, 1235 cold ? KM_NOSLEEP : KM_SLEEP); 1236 1237 mutex_enter(&prich->ch_subchannel_lock); 1238 1239 while (prich->ch_subchannel_count < cnt) 1240 /* XXX use condvar(9) instead of mtsleep */ 1241 mtsleep(prich, PRIBIO, "hvvmsubch", 0, 1242 &prich->ch_subchannel_lock); 1243 1244 i = 0; 1245 TAILQ_FOREACH(ch, &prich->ch_subchannels, ch_subentry) { 1246 ret[i] = ch; /* XXX inc refs */ 1247 1248 if (++i == cnt) 1249 break; 1250 } 1251 1252 mutex_exit(&prich->ch_subchannel_lock); 1253 1254 return ret; 1255 } 1256 1257 void 1258 vmbus_subchannel_put(struct vmbus_channel **subch, int cnt) 1259 { 1260 1261 kmem_free(subch, sizeof(struct vmbus_channel *) * cnt); 1262 } 1263 1264 static struct vmbus_channel * 1265 vmbus_channel_lookup(struct vmbus_softc *sc, uint32_t relid) 1266 { 1267 struct vmbus_channel *ch; 1268 1269 TAILQ_FOREACH(ch, &sc->sc_channels, ch_entry) { 1270 if (ch->ch_id == relid) 1271 return ch; 1272 } 1273 return NULL; 1274 } 1275 1276 static int 1277 vmbus_channel_ring_create(struct vmbus_channel *ch, uint32_t buflen) 1278 { 1279 struct vmbus_softc *sc = ch->ch_sc; 1280 1281 buflen = roundup(buflen, PAGE_SIZE) + sizeof(struct vmbus_bufring); 1282 ch->ch_ring_size = 2 * buflen; 1283 /* page aligned memory */ 1284 ch->ch_ring = hyperv_dma_alloc(sc->sc_dmat, &ch->ch_ring_dma, 1285 ch->ch_ring_size, PAGE_SIZE, 0, 1, HYPERV_DMA_SLEEPOK); 1286 if (ch->ch_ring == NULL) { 1287 device_printf(sc->sc_dev, 1288 "failed to allocate channel ring\n"); 1289 return ENOMEM; 1290 } 1291 1292 memset(&ch->ch_wrd, 0, sizeof(ch->ch_wrd)); 1293 ch->ch_wrd.rd_ring = (struct vmbus_bufring *)ch->ch_ring; 1294 ch->ch_wrd.rd_size = buflen; 1295 ch->ch_wrd.rd_dsize = buflen - sizeof(struct vmbus_bufring); 1296 mutex_init(&ch->ch_wrd.rd_lock, MUTEX_DEFAULT, IPL_NET); 1297 1298 memset(&ch->ch_rrd, 0, sizeof(ch->ch_rrd)); 1299 ch->ch_rrd.rd_ring = (struct vmbus_bufring *)((uint8_t *)ch->ch_ring + 1300 buflen); 1301 ch->ch_rrd.rd_size = buflen; 1302 ch->ch_rrd.rd_dsize = buflen - sizeof(struct vmbus_bufring); 1303 mutex_init(&ch->ch_rrd.rd_lock, MUTEX_DEFAULT, IPL_NET); 1304 1305 if (vmbus_handle_alloc(ch, &ch->ch_ring_dma, ch->ch_ring_size, 1306 &ch->ch_ring_gpadl)) { 1307 device_printf(sc->sc_dev, 1308 "failed to obtain a PA handle for the ring\n"); 1309 vmbus_channel_ring_destroy(ch); 1310 return ENOMEM; 1311 } 1312 1313 return 0; 1314 } 1315 1316 static void 1317 vmbus_channel_ring_destroy(struct vmbus_channel *ch) 1318 { 1319 struct vmbus_softc *sc = ch->ch_sc; 1320 1321 hyperv_dma_free(sc->sc_dmat, &ch->ch_ring_dma); 1322 ch->ch_ring = NULL; 1323 vmbus_handle_free(ch, ch->ch_ring_gpadl); 1324 1325 mutex_destroy(&ch->ch_wrd.rd_lock); 1326 memset(&ch->ch_wrd, 0, sizeof(ch->ch_wrd)); 1327 mutex_destroy(&ch->ch_rrd.rd_lock); 1328 memset(&ch->ch_rrd, 0, sizeof(ch->ch_rrd)); 1329 } 1330 1331 int 1332 vmbus_channel_open(struct vmbus_channel *ch, size_t buflen, void *udata, 1333 size_t udatalen, void (*handler)(void *), void *arg) 1334 { 1335 struct vmbus_softc *sc = ch->ch_sc; 1336 struct vmbus_chanmsg_chopen cmd; 1337 struct vmbus_chanmsg_chopen_resp rsp; 1338 int rv = EINVAL; 1339 1340 if (ch->ch_ring == NULL && 1341 (rv = vmbus_channel_ring_create(ch, buflen))) { 1342 DPRINTF("%s: failed to create channel ring\n", 1343 device_xname(sc->sc_dev)); 1344 return rv; 1345 } 1346 1347 memset(&cmd, 0, sizeof(cmd)); 1348 cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CHOPEN; 1349 cmd.chm_openid = ch->ch_id; 1350 cmd.chm_chanid = ch->ch_id; 1351 cmd.chm_gpadl = ch->ch_ring_gpadl; 1352 cmd.chm_txbr_pgcnt = atop(ch->ch_wrd.rd_size); 1353 cmd.chm_vcpuid = ch->ch_vcpu; 1354 if (udata && udatalen > 0) 1355 memcpy(cmd.chm_udata, udata, udatalen); 1356 1357 memset(&rsp, 0, sizeof(rsp)); 1358 1359 ch->ch_handler = handler; 1360 ch->ch_ctx = arg; 1361 ch->ch_state = VMBUS_CHANSTATE_OPENED; 1362 1363 rv = vmbus_cmd(sc, &cmd, sizeof(cmd), &rsp, sizeof(rsp), 1364 cold ? HCF_NOSLEEP : HCF_SLEEPOK); 1365 if (rv) { 1366 vmbus_channel_ring_destroy(ch); 1367 DPRINTF("%s: CHOPEN failed with %d\n", device_xname(sc->sc_dev), 1368 rv); 1369 ch->ch_handler = NULL; 1370 ch->ch_ctx = NULL; 1371 ch->ch_state = VMBUS_CHANSTATE_OFFERED; 1372 return rv; 1373 } 1374 return 0; 1375 } 1376 1377 static void 1378 vmbus_channel_detach(struct vmbus_channel *ch) 1379 { 1380 u_int refs; 1381 1382 KASSERTMSG(ch->ch_refs > 0, "channel%u: invalid refcnt %d", 1383 ch->ch_id, ch->ch_refs); 1384 1385 refs = atomic_dec_uint_nv(&ch->ch_refs); 1386 if (refs == 0) { 1387 /* Detach the target channel. */ 1388 vmbus_devq_enqueue(ch->ch_sc, VMBUS_DEV_TYPE_DETACH, ch); 1389 } 1390 } 1391 1392 static int 1393 vmbus_channel_close_internal(struct vmbus_channel *ch) 1394 { 1395 struct vmbus_softc *sc = ch->ch_sc; 1396 struct vmbus_chanmsg_chclose cmd; 1397 int rv; 1398 1399 memset(&cmd, 0, sizeof(cmd)); 1400 cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CHCLOSE; 1401 cmd.chm_chanid = ch->ch_id; 1402 1403 ch->ch_state = VMBUS_CHANSTATE_CLOSING; 1404 rv = vmbus_cmd(sc, &cmd, sizeof(cmd), NULL, 0, 1405 HCF_NOREPLY | (cold ? HCF_NOSLEEP : HCF_SLEEPOK)); 1406 if (rv) { 1407 DPRINTF("%s: CHCLOSE failed with %d\n", 1408 device_xname(sc->sc_dev), rv); 1409 return rv; 1410 } 1411 ch->ch_state = VMBUS_CHANSTATE_CLOSED; 1412 vmbus_channel_ring_destroy(ch); 1413 return 0; 1414 } 1415 1416 int 1417 vmbus_channel_close_direct(struct vmbus_channel *ch) 1418 { 1419 int rv; 1420 1421 rv = vmbus_channel_close_internal(ch); 1422 if (!VMBUS_CHAN_ISPRIMARY(ch)) 1423 vmbus_channel_detach(ch); 1424 return rv; 1425 } 1426 1427 int 1428 vmbus_channel_close(struct vmbus_channel *ch) 1429 { 1430 struct vmbus_channel **subch; 1431 int i, cnt, rv; 1432 1433 if (!VMBUS_CHAN_ISPRIMARY(ch)) 1434 return 0; 1435 1436 cnt = ch->ch_subchannel_count; 1437 if (cnt > 0) { 1438 subch = vmbus_subchannel_get(ch, cnt); 1439 for (i = 0; i < ch->ch_subchannel_count; i++) { 1440 rv = vmbus_channel_close_internal(subch[i]); 1441 (void) rv; /* XXX */ 1442 vmbus_channel_detach(ch); 1443 } 1444 vmbus_subchannel_put(subch, cnt); 1445 } 1446 1447 return vmbus_channel_close_internal(ch); 1448 } 1449 1450 static inline void 1451 vmbus_channel_setevent(struct vmbus_softc *sc, struct vmbus_channel *ch) 1452 { 1453 struct vmbus_mon_trig *mtg; 1454 1455 /* Each uint32_t represents 32 channels */ 1456 set_bit(ch->ch_id, sc->sc_wevents); 1457 if (ch->ch_flags & CHF_MONITOR) { 1458 mtg = &sc->sc_monitor[1]->mnf_trigs[ch->ch_mgroup]; 1459 set_bit(ch->ch_mindex, &mtg->mt_pending); 1460 } else 1461 vmbus_intr_signal(sc, hyperv_dma_get_paddr(&ch->ch_monprm_dma)); 1462 } 1463 1464 static void 1465 vmbus_channel_intr(void *arg) 1466 { 1467 struct vmbus_channel *ch = arg; 1468 1469 if (vmbus_channel_ready(ch)) 1470 ch->ch_handler(ch->ch_ctx); 1471 1472 if (vmbus_channel_unpause(ch) == 0) 1473 return; 1474 1475 vmbus_channel_pause(ch); 1476 vmbus_channel_schedule(ch); 1477 } 1478 1479 int 1480 vmbus_channel_setdeferred(struct vmbus_channel *ch, const char *name) 1481 { 1482 1483 ch->ch_taskq = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE, 1484 vmbus_channel_intr, ch); 1485 if (ch->ch_taskq == NULL) 1486 return -1; 1487 return 0; 1488 } 1489 1490 void 1491 vmbus_channel_schedule(struct vmbus_channel *ch) 1492 { 1493 1494 if (ch->ch_handler) { 1495 if (!cold && (ch->ch_flags & CHF_BATCHED)) { 1496 vmbus_channel_pause(ch); 1497 softint_schedule(ch->ch_taskq); 1498 } else 1499 ch->ch_handler(ch->ch_ctx); 1500 } 1501 } 1502 1503 static __inline void 1504 vmbus_ring_put(struct vmbus_ring_data *wrd, uint8_t *data, uint32_t datalen) 1505 { 1506 int left = MIN(datalen, wrd->rd_dsize - wrd->rd_prod); 1507 1508 memcpy(&wrd->rd_ring->br_data[wrd->rd_prod], data, left); 1509 memcpy(&wrd->rd_ring->br_data[0], data + left, datalen - left); 1510 wrd->rd_prod += datalen; 1511 if (wrd->rd_prod >= wrd->rd_dsize) 1512 wrd->rd_prod -= wrd->rd_dsize; 1513 } 1514 1515 static inline void 1516 vmbus_ring_get(struct vmbus_ring_data *rrd, uint8_t *data, uint32_t datalen, 1517 int peek) 1518 { 1519 int left = MIN(datalen, rrd->rd_dsize - rrd->rd_cons); 1520 1521 memcpy(data, &rrd->rd_ring->br_data[rrd->rd_cons], left); 1522 memcpy(data + left, &rrd->rd_ring->br_data[0], datalen - left); 1523 if (!peek) { 1524 rrd->rd_cons += datalen; 1525 if (rrd->rd_cons >= rrd->rd_dsize) 1526 rrd->rd_cons -= rrd->rd_dsize; 1527 } 1528 } 1529 1530 static __inline void 1531 vmbus_ring_avail(struct vmbus_ring_data *rd, uint32_t *towrite, 1532 uint32_t *toread) 1533 { 1534 uint32_t ridx = rd->rd_ring->br_rindex; 1535 uint32_t widx = rd->rd_ring->br_windex; 1536 uint32_t r, w; 1537 1538 if (widx >= ridx) 1539 w = rd->rd_dsize - (widx - ridx); 1540 else 1541 w = ridx - widx; 1542 r = rd->rd_dsize - w; 1543 if (towrite) 1544 *towrite = w; 1545 if (toread) 1546 *toread = r; 1547 } 1548 1549 static int 1550 vmbus_ring_write(struct vmbus_ring_data *wrd, struct iovec *iov, int iov_cnt, 1551 int *needsig) 1552 { 1553 uint64_t indices = 0; 1554 uint32_t avail, oprod, datalen = sizeof(indices); 1555 int i; 1556 1557 for (i = 0; i < iov_cnt; i++) 1558 datalen += iov[i].iov_len; 1559 1560 KASSERT(datalen <= wrd->rd_dsize); 1561 1562 vmbus_ring_avail(wrd, &avail, NULL); 1563 if (avail <= datalen) { 1564 DPRINTF("%s: avail %u datalen %u\n", __func__, avail, datalen); 1565 return EAGAIN; 1566 } 1567 1568 oprod = wrd->rd_prod; 1569 1570 for (i = 0; i < iov_cnt; i++) 1571 vmbus_ring_put(wrd, iov[i].iov_base, iov[i].iov_len); 1572 1573 indices = (uint64_t)oprod << 32; 1574 vmbus_ring_put(wrd, (uint8_t *)&indices, sizeof(indices)); 1575 1576 membar_sync(); 1577 wrd->rd_ring->br_windex = wrd->rd_prod; 1578 membar_sync(); 1579 1580 /* Signal when the ring transitions from being empty to non-empty */ 1581 if (wrd->rd_ring->br_imask == 0 && 1582 wrd->rd_ring->br_rindex == oprod) 1583 *needsig = 1; 1584 else 1585 *needsig = 0; 1586 1587 return 0; 1588 } 1589 1590 int 1591 vmbus_channel_send(struct vmbus_channel *ch, void *data, uint32_t datalen, 1592 uint64_t rid, int type, uint32_t flags) 1593 { 1594 struct vmbus_softc *sc = ch->ch_sc; 1595 struct vmbus_chanpkt cp; 1596 struct iovec iov[3]; 1597 uint32_t pktlen, pktlen_aligned; 1598 uint64_t zeropad = 0; 1599 int rv, needsig = 0; 1600 1601 pktlen = sizeof(cp) + datalen; 1602 pktlen_aligned = roundup(pktlen, sizeof(uint64_t)); 1603 1604 cp.cp_hdr.cph_type = type; 1605 cp.cp_hdr.cph_flags = flags; 1606 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_hlen, sizeof(cp)); 1607 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_tlen, pktlen_aligned); 1608 cp.cp_hdr.cph_tid = rid; 1609 1610 iov[0].iov_base = &cp; 1611 iov[0].iov_len = sizeof(cp); 1612 1613 iov[1].iov_base = data; 1614 iov[1].iov_len = datalen; 1615 1616 iov[2].iov_base = &zeropad; 1617 iov[2].iov_len = pktlen_aligned - pktlen; 1618 1619 mutex_enter(&ch->ch_wrd.rd_lock); 1620 rv = vmbus_ring_write(&ch->ch_wrd, iov, 3, &needsig); 1621 mutex_exit(&ch->ch_wrd.rd_lock); 1622 if (rv == 0 && needsig) 1623 vmbus_channel_setevent(sc, ch); 1624 1625 return rv; 1626 } 1627 1628 int 1629 vmbus_channel_send_sgl(struct vmbus_channel *ch, struct vmbus_gpa *sgl, 1630 uint32_t nsge, void *data, uint32_t datalen, uint64_t rid) 1631 { 1632 struct vmbus_softc *sc = ch->ch_sc; 1633 struct vmbus_chanpkt_sglist cp; 1634 struct iovec iov[4]; 1635 uint32_t buflen, pktlen, pktlen_aligned; 1636 uint64_t zeropad = 0; 1637 int rv, needsig = 0; 1638 1639 buflen = sizeof(struct vmbus_gpa) * nsge; 1640 pktlen = sizeof(cp) + datalen + buflen; 1641 pktlen_aligned = roundup(pktlen, sizeof(uint64_t)); 1642 1643 cp.cp_hdr.cph_type = VMBUS_CHANPKT_TYPE_GPA; 1644 cp.cp_hdr.cph_flags = VMBUS_CHANPKT_FLAG_RC; 1645 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_hlen, sizeof(cp) + buflen); 1646 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_tlen, pktlen_aligned); 1647 cp.cp_hdr.cph_tid = rid; 1648 cp.cp_gpa_cnt = nsge; 1649 cp.cp_rsvd = 0; 1650 1651 iov[0].iov_base = &cp; 1652 iov[0].iov_len = sizeof(cp); 1653 1654 iov[1].iov_base = sgl; 1655 iov[1].iov_len = buflen; 1656 1657 iov[2].iov_base = data; 1658 iov[2].iov_len = datalen; 1659 1660 iov[3].iov_base = &zeropad; 1661 iov[3].iov_len = pktlen_aligned - pktlen; 1662 1663 mutex_enter(&ch->ch_wrd.rd_lock); 1664 rv = vmbus_ring_write(&ch->ch_wrd, iov, 4, &needsig); 1665 mutex_exit(&ch->ch_wrd.rd_lock); 1666 if (rv == 0 && needsig) 1667 vmbus_channel_setevent(sc, ch); 1668 1669 return rv; 1670 } 1671 1672 int 1673 vmbus_channel_send_prpl(struct vmbus_channel *ch, struct vmbus_gpa_range *prpl, 1674 uint32_t nprp, void *data, uint32_t datalen, uint64_t rid) 1675 { 1676 struct vmbus_softc *sc = ch->ch_sc; 1677 struct vmbus_chanpkt_prplist cp; 1678 struct iovec iov[4]; 1679 uint32_t buflen, pktlen, pktlen_aligned; 1680 uint64_t zeropad = 0; 1681 int rv, needsig = 0; 1682 1683 buflen = sizeof(struct vmbus_gpa_range) * (nprp + 1); 1684 pktlen = sizeof(cp) + datalen + buflen; 1685 pktlen_aligned = roundup(pktlen, sizeof(uint64_t)); 1686 1687 cp.cp_hdr.cph_type = VMBUS_CHANPKT_TYPE_GPA; 1688 cp.cp_hdr.cph_flags = VMBUS_CHANPKT_FLAG_RC; 1689 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_hlen, sizeof(cp) + buflen); 1690 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_tlen, pktlen_aligned); 1691 cp.cp_hdr.cph_tid = rid; 1692 cp.cp_range_cnt = 1; 1693 cp.cp_rsvd = 0; 1694 1695 iov[0].iov_base = &cp; 1696 iov[0].iov_len = sizeof(cp); 1697 1698 iov[1].iov_base = prpl; 1699 iov[1].iov_len = buflen; 1700 1701 iov[2].iov_base = data; 1702 iov[2].iov_len = datalen; 1703 1704 iov[3].iov_base = &zeropad; 1705 iov[3].iov_len = pktlen_aligned - pktlen; 1706 1707 mutex_enter(&ch->ch_wrd.rd_lock); 1708 rv = vmbus_ring_write(&ch->ch_wrd, iov, 4, &needsig); 1709 mutex_exit(&ch->ch_wrd.rd_lock); 1710 if (rv == 0 && needsig) 1711 vmbus_channel_setevent(sc, ch); 1712 1713 return rv; 1714 } 1715 1716 static int 1717 vmbus_ring_peek(struct vmbus_ring_data *rrd, void *data, uint32_t datalen) 1718 { 1719 uint32_t avail; 1720 1721 KASSERT(datalen <= rrd->rd_dsize); 1722 1723 vmbus_ring_avail(rrd, NULL, &avail); 1724 if (avail < datalen) 1725 return EAGAIN; 1726 1727 vmbus_ring_get(rrd, (uint8_t *)data, datalen, 1); 1728 return 0; 1729 } 1730 1731 static int 1732 vmbus_ring_read(struct vmbus_ring_data *rrd, void *data, uint32_t datalen, 1733 uint32_t offset) 1734 { 1735 uint64_t indices; 1736 uint32_t avail; 1737 1738 KASSERT(datalen <= rrd->rd_dsize); 1739 1740 vmbus_ring_avail(rrd, NULL, &avail); 1741 if (avail < datalen) { 1742 DPRINTF("%s: avail %u datalen %u\n", __func__, avail, datalen); 1743 return EAGAIN; 1744 } 1745 1746 if (offset) { 1747 rrd->rd_cons += offset; 1748 if (rrd->rd_cons >= rrd->rd_dsize) 1749 rrd->rd_cons -= rrd->rd_dsize; 1750 } 1751 1752 vmbus_ring_get(rrd, (uint8_t *)data, datalen, 0); 1753 vmbus_ring_get(rrd, (uint8_t *)&indices, sizeof(indices), 0); 1754 1755 membar_sync(); 1756 rrd->rd_ring->br_rindex = rrd->rd_cons; 1757 1758 return 0; 1759 } 1760 1761 int 1762 vmbus_channel_recv(struct vmbus_channel *ch, void *data, uint32_t datalen, 1763 uint32_t *rlen, uint64_t *rid, int raw) 1764 { 1765 struct vmbus_softc *sc = ch->ch_sc; 1766 struct vmbus_chanpkt_hdr cph; 1767 uint32_t offset, pktlen; 1768 int rv; 1769 1770 *rlen = 0; 1771 1772 mutex_enter(&ch->ch_rrd.rd_lock); 1773 1774 if ((rv = vmbus_ring_peek(&ch->ch_rrd, &cph, sizeof(cph))) != 0) { 1775 mutex_exit(&ch->ch_rrd.rd_lock); 1776 return rv; 1777 } 1778 1779 offset = raw ? 0 : VMBUS_CHANPKT_GETLEN(cph.cph_hlen); 1780 pktlen = VMBUS_CHANPKT_GETLEN(cph.cph_tlen) - offset; 1781 if (pktlen > datalen) { 1782 mutex_exit(&ch->ch_rrd.rd_lock); 1783 device_printf(sc->sc_dev, "%s: pktlen %u datalen %u\n", 1784 __func__, pktlen, datalen); 1785 return EINVAL; 1786 } 1787 1788 rv = vmbus_ring_read(&ch->ch_rrd, data, pktlen, offset); 1789 if (rv == 0) { 1790 *rlen = pktlen; 1791 *rid = cph.cph_tid; 1792 } 1793 1794 mutex_exit(&ch->ch_rrd.rd_lock); 1795 1796 return rv; 1797 } 1798 1799 static inline void 1800 vmbus_ring_mask(struct vmbus_ring_data *rd) 1801 { 1802 1803 membar_sync(); 1804 rd->rd_ring->br_imask = 1; 1805 membar_sync(); 1806 } 1807 1808 static inline void 1809 vmbus_ring_unmask(struct vmbus_ring_data *rd) 1810 { 1811 1812 membar_sync(); 1813 rd->rd_ring->br_imask = 0; 1814 membar_sync(); 1815 } 1816 1817 static void 1818 vmbus_channel_pause(struct vmbus_channel *ch) 1819 { 1820 1821 vmbus_ring_mask(&ch->ch_rrd); 1822 } 1823 1824 static uint32_t 1825 vmbus_channel_unpause(struct vmbus_channel *ch) 1826 { 1827 uint32_t avail; 1828 1829 vmbus_ring_unmask(&ch->ch_rrd); 1830 vmbus_ring_avail(&ch->ch_rrd, NULL, &avail); 1831 1832 return avail; 1833 } 1834 1835 static uint32_t 1836 vmbus_channel_ready(struct vmbus_channel *ch) 1837 { 1838 uint32_t avail; 1839 1840 vmbus_ring_avail(&ch->ch_rrd, NULL, &avail); 1841 1842 return avail; 1843 } 1844 1845 /* How many PFNs can be referenced by the header */ 1846 #define VMBUS_NPFNHDR ((VMBUS_MSG_DSIZE_MAX - \ 1847 sizeof(struct vmbus_chanmsg_gpadl_conn)) / sizeof(uint64_t)) 1848 1849 /* How many PFNs can be referenced by the body */ 1850 #define VMBUS_NPFNBODY ((VMBUS_MSG_DSIZE_MAX - \ 1851 sizeof(struct vmbus_chanmsg_gpadl_subconn)) / sizeof(uint64_t)) 1852 1853 int 1854 vmbus_handle_alloc(struct vmbus_channel *ch, const struct hyperv_dma *dma, 1855 uint32_t buflen, uint32_t *handle) 1856 { 1857 const int prflags = cold ? PR_NOWAIT : PR_WAITOK; 1858 const int kmemflags = cold ? KM_NOSLEEP : KM_SLEEP; 1859 const int msgflags = cold ? MSGF_NOSLEEP : 0; 1860 const int hcflags = cold ? HCF_NOSLEEP : HCF_SLEEPOK; 1861 struct vmbus_softc *sc = ch->ch_sc; 1862 struct vmbus_chanmsg_gpadl_conn *hdr; 1863 struct vmbus_chanmsg_gpadl_subconn *cmd; 1864 struct vmbus_chanmsg_gpadl_connresp rsp; 1865 struct vmbus_msg *msg; 1866 int i, j, last, left, rv; 1867 int bodylen = 0, ncmds = 0, pfn = 0; 1868 uint64_t *frames; 1869 paddr_t pa; 1870 uint8_t *body; 1871 /* Total number of pages to reference */ 1872 int total = atop(buflen); 1873 /* Number of pages that will fit the header */ 1874 int inhdr = MIN(total, VMBUS_NPFNHDR); 1875 1876 KASSERT((buflen & PAGE_MASK) == 0); 1877 KASSERT(buflen == (uint32_t)dma->map->dm_mapsize); 1878 1879 msg = pool_cache_get_paddr(sc->sc_msgpool, prflags, &pa); 1880 if (msg == NULL) 1881 return ENOMEM; 1882 1883 /* Prepare array of frame addresses */ 1884 frames = kmem_zalloc(total * sizeof(*frames), kmemflags); 1885 if (frames == NULL) { 1886 pool_cache_put_paddr(sc->sc_msgpool, msg, pa); 1887 return ENOMEM; 1888 } 1889 for (i = 0, j = 0; i < dma->map->dm_nsegs && j < total; i++) { 1890 bus_dma_segment_t *seg = &dma->map->dm_segs[i]; 1891 bus_addr_t addr = seg->ds_addr; 1892 1893 KASSERT((addr & PAGE_MASK) == 0); 1894 KASSERT((seg->ds_len & PAGE_MASK) == 0); 1895 1896 while (addr < seg->ds_addr + seg->ds_len && j < total) { 1897 frames[j++] = atop(addr); 1898 addr += PAGE_SIZE; 1899 } 1900 } 1901 1902 memset(msg, 0, sizeof(*msg)); 1903 msg->msg_req.hc_dsize = sizeof(struct vmbus_chanmsg_gpadl_conn) + 1904 inhdr * sizeof(uint64_t); 1905 hdr = (struct vmbus_chanmsg_gpadl_conn *)msg->msg_req.hc_data; 1906 msg->msg_rsp = &rsp; 1907 msg->msg_rsplen = sizeof(rsp); 1908 msg->msg_flags = msgflags; 1909 1910 left = total - inhdr; 1911 1912 /* Allocate additional gpadl_body structures if required */ 1913 if (left > 0) { 1914 ncmds = MAX(1, left / VMBUS_NPFNBODY + left % VMBUS_NPFNBODY); 1915 bodylen = ncmds * VMBUS_MSG_DSIZE_MAX; 1916 body = kmem_zalloc(bodylen, kmemflags); 1917 if (body == NULL) { 1918 kmem_free(frames, total * sizeof(*frames)); 1919 pool_cache_put_paddr(sc->sc_msgpool, msg, pa); 1920 return ENOMEM; 1921 } 1922 } 1923 1924 *handle = atomic_inc_32_nv(&sc->sc_handle); 1925 1926 hdr->chm_hdr.chm_type = VMBUS_CHANMSG_GPADL_CONN; 1927 hdr->chm_chanid = ch->ch_id; 1928 hdr->chm_gpadl = *handle; 1929 1930 /* Single range for a contiguous buffer */ 1931 hdr->chm_range_cnt = 1; 1932 hdr->chm_range_len = sizeof(struct vmbus_gpa_range) + total * 1933 sizeof(uint64_t); 1934 hdr->chm_range.gpa_ofs = 0; 1935 hdr->chm_range.gpa_len = buflen; 1936 1937 /* Fit as many pages as possible into the header */ 1938 for (i = 0; i < inhdr; i++) 1939 hdr->chm_range.gpa_page[i] = frames[pfn++]; 1940 1941 for (i = 0; i < ncmds; i++) { 1942 cmd = (struct vmbus_chanmsg_gpadl_subconn *)(body + 1943 VMBUS_MSG_DSIZE_MAX * i); 1944 cmd->chm_hdr.chm_type = VMBUS_CHANMSG_GPADL_SUBCONN; 1945 cmd->chm_gpadl = *handle; 1946 last = MIN(left, VMBUS_NPFNBODY); 1947 for (j = 0; j < last; j++) 1948 cmd->chm_gpa_page[j] = frames[pfn++]; 1949 left -= last; 1950 } 1951 1952 rv = vmbus_start(sc, msg, pa); 1953 if (rv != 0) { 1954 DPRINTF("%s: GPADL_CONN failed\n", device_xname(sc->sc_dev)); 1955 goto out; 1956 } 1957 for (i = 0; i < ncmds; i++) { 1958 int cmdlen = sizeof(*cmd); 1959 cmd = (struct vmbus_chanmsg_gpadl_subconn *)(body + 1960 VMBUS_MSG_DSIZE_MAX * i); 1961 /* Last element can be short */ 1962 if (i == ncmds - 1) 1963 cmdlen += last * sizeof(uint64_t); 1964 else 1965 cmdlen += VMBUS_NPFNBODY * sizeof(uint64_t); 1966 rv = vmbus_cmd(sc, cmd, cmdlen, NULL, 0, HCF_NOREPLY | hcflags); 1967 if (rv != 0) { 1968 DPRINTF("%s: GPADL_SUBCONN (iteration %d/%d) failed " 1969 "with %d\n", device_xname(sc->sc_dev), i, ncmds, 1970 rv); 1971 goto out; 1972 } 1973 } 1974 rv = vmbus_reply(sc, msg); 1975 if (rv != 0) { 1976 DPRINTF("%s: GPADL allocation failed with %d\n", 1977 device_xname(sc->sc_dev), rv); 1978 } 1979 1980 out: 1981 if (bodylen > 0) 1982 kmem_free(body, bodylen); 1983 kmem_free(frames, total * sizeof(*frames)); 1984 pool_cache_put_paddr(sc->sc_msgpool, msg, pa); 1985 if (rv) 1986 return rv; 1987 1988 KASSERT(*handle == rsp.chm_gpadl); 1989 1990 return 0; 1991 } 1992 1993 void 1994 vmbus_handle_free(struct vmbus_channel *ch, uint32_t handle) 1995 { 1996 struct vmbus_softc *sc = ch->ch_sc; 1997 struct vmbus_chanmsg_gpadl_disconn cmd; 1998 struct vmbus_chanmsg_gpadl_disconn rsp; 1999 int rv; 2000 2001 memset(&cmd, 0, sizeof(cmd)); 2002 cmd.chm_hdr.chm_type = VMBUS_CHANMSG_GPADL_DISCONN; 2003 cmd.chm_chanid = ch->ch_id; 2004 cmd.chm_gpadl = handle; 2005 2006 rv = vmbus_cmd(sc, &cmd, sizeof(cmd), &rsp, sizeof(rsp), 2007 cold ? HCF_NOSLEEP : HCF_SLEEPOK); 2008 if (rv) { 2009 DPRINTF("%s: GPADL_DISCONN failed with %d\n", 2010 device_xname(sc->sc_dev), rv); 2011 } 2012 } 2013 2014 static void 2015 vmbus_devq_enqueue(struct vmbus_softc *sc, int type, struct vmbus_channel *ch) 2016 { 2017 struct vmbus_dev *vd; 2018 2019 vd = kmem_intr_zalloc(sizeof(*vd), KM_NOSLEEP); 2020 if (vd == NULL) { 2021 device_printf(sc->sc_dev, "failed to allocate devq\n"); 2022 return; 2023 } 2024 2025 vd->vd_type = type; 2026 vd->vd_chan = ch; 2027 2028 mutex_enter(&sc->sc_devq_lock); 2029 SIMPLEQ_INSERT_TAIL(&sc->sc_devq, vd, vd_entry); 2030 cv_broadcast(&sc->sc_devq_cv); 2031 mutex_exit(&sc->sc_devq_lock); 2032 } 2033 2034 static void 2035 vmbus_process_devq(void *arg) 2036 { 2037 struct vmbus_softc *sc = arg; 2038 struct vmbus_dev *vd; 2039 struct vmbus_channel *ch, *prich; 2040 2041 KASSERT(mutex_owned(&sc->sc_devq_lock)); 2042 2043 while (!SIMPLEQ_EMPTY(&sc->sc_devq)) { 2044 vd = SIMPLEQ_FIRST(&sc->sc_devq); 2045 SIMPLEQ_REMOVE_HEAD(&sc->sc_devq, vd_entry); 2046 mutex_exit(&sc->sc_devq_lock); 2047 2048 switch (vd->vd_type) { 2049 case VMBUS_DEV_TYPE_ATTACH: 2050 ch = vd->vd_chan; 2051 if (VMBUS_CHAN_ISPRIMARY(ch)) { 2052 struct vmbus_attach_args vaa; 2053 2054 vaa.aa_type = &ch->ch_type; 2055 vaa.aa_inst = &ch->ch_inst; 2056 vaa.aa_ident = ch->ch_ident; 2057 vaa.aa_chan = ch; 2058 vaa.aa_iot = sc->sc_iot; 2059 vaa.aa_memt = sc->sc_memt; 2060 ch->ch_dev = config_found_ia(sc->sc_dev, 2061 "hypervvmbus", &vaa, vmbus_attach_print); 2062 } 2063 break; 2064 2065 case VMBUS_DEV_TYPE_DETACH: 2066 ch = vd->vd_chan; 2067 if (VMBUS_CHAN_ISPRIMARY(ch)) { 2068 if (ch->ch_dev != NULL) { 2069 config_detach(ch->ch_dev, DETACH_FORCE); 2070 ch->ch_dev = NULL; 2071 } 2072 vmbus_channel_release(ch); 2073 vmbus_channel_free(ch); 2074 break; 2075 } 2076 2077 vmbus_channel_release(ch); 2078 2079 prich = ch->ch_primary_channel; 2080 mutex_enter(&prich->ch_subchannel_lock); 2081 TAILQ_REMOVE(&prich->ch_subchannels, ch, ch_subentry); 2082 prich->ch_subchannel_count--; 2083 mutex_exit(&prich->ch_subchannel_lock); 2084 wakeup(prich); 2085 2086 vmbus_channel_free(ch); 2087 break; 2088 2089 default: 2090 DPRINTF("%s: unknown offer type %d\n", 2091 device_xname(sc->sc_dev), vd->vd_type); 2092 break; 2093 } 2094 kmem_free(vd, sizeof(*vd)); 2095 2096 mutex_enter(&sc->sc_devq_lock); 2097 } 2098 } 2099 2100 static void 2101 vmbus_devq_thread(void *arg) 2102 { 2103 struct vmbus_softc *sc = arg; 2104 2105 mutex_enter(&sc->sc_devq_lock); 2106 for (;;) { 2107 if (SIMPLEQ_EMPTY(&sc->sc_devq)) { 2108 cv_wait(&sc->sc_devq_cv, &sc->sc_devq_lock); 2109 continue; 2110 } 2111 2112 vmbus_process_devq(sc); 2113 } 2114 mutex_exit(&sc->sc_devq_lock); 2115 2116 kthread_exit(0); 2117 } 2118 2119 static int 2120 vmbus_attach_print(void *aux, const char *name) 2121 { 2122 struct vmbus_attach_args *aa = aux; 2123 2124 if (name) 2125 printf("\"%s\" at %s", aa->aa_ident, name); 2126 2127 return UNCONF; 2128 } 2129 2130 MODULE(MODULE_CLASS_DRIVER, vmbus, "hyperv"); 2131 2132 #ifdef _MODULE 2133 #include "ioconf.c" 2134 #endif 2135 2136 static int 2137 vmbus_modcmd(modcmd_t cmd, void *aux) 2138 { 2139 int rv = 0; 2140 2141 switch (cmd) { 2142 case MODULE_CMD_INIT: 2143 #ifdef _MODULE 2144 rv = config_init_component(cfdriver_ioconf_vmbus, 2145 cfattach_ioconf_vmbus, cfdata_ioconf_vmbus); 2146 #endif 2147 break; 2148 2149 case MODULE_CMD_FINI: 2150 #ifdef _MODULE 2151 rv = config_fini_component(cfdriver_ioconf_vmbus, 2152 cfattach_ioconf_vmbus, cfdata_ioconf_vmbus); 2153 #endif 2154 break; 2155 2156 default: 2157 rv = ENOTTY; 2158 break; 2159 } 2160 2161 return rv; 2162 } 2163