1 /* $NetBSD: vmbus.c,v 1.2 2019/02/15 16:37:54 hannken Exp $ */ 2 /* $OpenBSD: hyperv.c,v 1.43 2017/06/27 13:56:15 mikeb Exp $ */ 3 4 /*- 5 * Copyright (c) 2009-2012 Microsoft Corp. 6 * Copyright (c) 2012 NetApp Inc. 7 * Copyright (c) 2012 Citrix Inc. 8 * Copyright (c) 2016 Mike Belopuhov <mike@esdenera.com> 9 * All rights reserved. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice unmodified, this list of conditions, and the following 16 * disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * The OpenBSD port was done under funding by Esdenera Networks GmbH. 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: vmbus.c,v 1.2 2019/02/15 16:37:54 hannken Exp $"); 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/device.h> 43 #include <sys/atomic.h> 44 #include <sys/bitops.h> 45 #include <sys/bus.h> 46 #include <sys/cpu.h> 47 #include <sys/intr.h> 48 #include <sys/kmem.h> 49 #include <sys/module.h> 50 #include <sys/mutex.h> 51 #include <sys/xcall.h> 52 53 #include <uvm/uvm_extern.h> 54 55 #include <dev/hyperv/vmbusvar.h> 56 57 #define VMBUS_GPADL_START 0xffff /* 0x10000 effectively */ 58 59 /* Command submission flags */ 60 #define HCF_SLEEPOK 0x0000 61 #define HCF_NOSLEEP 0x0002 /* M_NOWAIT */ 62 #define HCF_NOREPLY 0x0004 63 64 static void vmbus_attach_deferred(device_t); 65 static int vmbus_alloc_dma(struct vmbus_softc *); 66 static void vmbus_free_dma(struct vmbus_softc *); 67 static int vmbus_init_interrupts(struct vmbus_softc *); 68 static void vmbus_deinit_interrupts(struct vmbus_softc *); 69 static void vmbus_init_synic(void *, void *); 70 static void vmbus_deinit_synic(void *, void *); 71 72 static int vmbus_connect(struct vmbus_softc *); 73 static int vmbus_cmd(struct vmbus_softc *, void *, size_t, void *, size_t, 74 int); 75 static int vmbus_start(struct vmbus_softc *, struct vmbus_msg *, paddr_t); 76 static int vmbus_reply(struct vmbus_softc *, struct vmbus_msg *); 77 static void vmbus_wait(struct vmbus_softc *, 78 int (*done)(struct vmbus_softc *, struct vmbus_msg *), 79 struct vmbus_msg *, void *, const char *); 80 static uint16_t vmbus_intr_signal(struct vmbus_softc *, paddr_t); 81 static void vmbus_event_proc(void *, struct cpu_info *); 82 static void vmbus_event_proc_compat(void *, struct cpu_info *); 83 static void vmbus_message_proc(void *, struct cpu_info *); 84 static void vmbus_message_softintr(void *); 85 static void vmbus_channel_response(struct vmbus_softc *, 86 struct vmbus_chanmsg_hdr *); 87 static void vmbus_channel_offer(struct vmbus_softc *, 88 struct vmbus_chanmsg_hdr *); 89 static void vmbus_channel_rescind(struct vmbus_softc *, 90 struct vmbus_chanmsg_hdr *); 91 static void vmbus_channel_delivered(struct vmbus_softc *, 92 struct vmbus_chanmsg_hdr *); 93 static int vmbus_channel_scan(struct vmbus_softc *); 94 static void vmbus_channel_cpu_default(struct vmbus_channel *); 95 static void vmbus_process_offer(struct vmbus_softc *, struct vmbus_offer *); 96 static struct vmbus_channel * 97 vmbus_channel_lookup(struct vmbus_softc *, uint32_t); 98 static int vmbus_channel_ring_create(struct vmbus_channel *, uint32_t); 99 static void vmbus_channel_ring_destroy(struct vmbus_channel *); 100 static void vmbus_channel_pause(struct vmbus_channel *); 101 static uint32_t vmbus_channel_unpause(struct vmbus_channel *); 102 static uint32_t vmbus_channel_ready(struct vmbus_channel *); 103 static int vmbus_attach_icdevs(struct vmbus_softc *); 104 static int vmbus_attach_devices(struct vmbus_softc *); 105 106 static struct vmbus_softc *vmbus_sc; 107 108 static const struct { 109 int hmd_response; 110 int hmd_request; 111 void (*hmd_handler)(struct vmbus_softc *, 112 struct vmbus_chanmsg_hdr *); 113 } vmbus_msg_dispatch[] = { 114 { 0, 0, NULL }, 115 { VMBUS_CHANMSG_CHOFFER, 0, vmbus_channel_offer }, 116 { VMBUS_CHANMSG_CHRESCIND, 0, vmbus_channel_rescind }, 117 { VMBUS_CHANMSG_CHREQUEST, VMBUS_CHANMSG_CHOFFER, NULL }, 118 { VMBUS_CHANMSG_CHOFFER_DONE, 0, vmbus_channel_delivered }, 119 { VMBUS_CHANMSG_CHOPEN, 0, NULL }, 120 { VMBUS_CHANMSG_CHOPEN_RESP, VMBUS_CHANMSG_CHOPEN, 121 vmbus_channel_response }, 122 { VMBUS_CHANMSG_CHCLOSE, 0, NULL }, 123 { VMBUS_CHANMSG_GPADL_CONN, 0, NULL }, 124 { VMBUS_CHANMSG_GPADL_SUBCONN, 0, NULL }, 125 { VMBUS_CHANMSG_GPADL_CONNRESP, VMBUS_CHANMSG_GPADL_CONN, 126 vmbus_channel_response }, 127 { VMBUS_CHANMSG_GPADL_DISCONN, 0, NULL }, 128 { VMBUS_CHANMSG_GPADL_DISCONNRESP, VMBUS_CHANMSG_GPADL_DISCONN, 129 vmbus_channel_response }, 130 { VMBUS_CHANMSG_CHFREE, 0, NULL }, 131 { VMBUS_CHANMSG_CONNECT, 0, NULL }, 132 { VMBUS_CHANMSG_CONNECT_RESP, VMBUS_CHANMSG_CONNECT, 133 vmbus_channel_response }, 134 { VMBUS_CHANMSG_DISCONNECT, 0, NULL }, 135 }; 136 137 const struct hyperv_guid hyperv_guid_network = { 138 { 0x63, 0x51, 0x61, 0xf8, 0x3e, 0xdf, 0xc5, 0x46, 139 0x91, 0x3f, 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e } 140 }; 141 142 const struct hyperv_guid hyperv_guid_ide = { 143 { 0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44, 144 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5 } 145 }; 146 147 const struct hyperv_guid hyperv_guid_scsi = { 148 { 0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d, 149 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f } 150 }; 151 152 const struct hyperv_guid hyperv_guid_shutdown = { 153 { 0x31, 0x60, 0x0b, 0x0e, 0x13, 0x52, 0x34, 0x49, 154 0x81, 0x8b, 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb } 155 }; 156 157 const struct hyperv_guid hyperv_guid_timesync = { 158 { 0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49, 159 0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf } 160 }; 161 162 const struct hyperv_guid hyperv_guid_heartbeat = { 163 { 0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e, 164 0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d } 165 }; 166 167 const struct hyperv_guid hyperv_guid_kvp = { 168 { 0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d, 169 0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6 } 170 }; 171 172 const struct hyperv_guid hyperv_guid_vss = { 173 { 0x29, 0x2e, 0xfa, 0x35, 0x23, 0xea, 0x36, 0x42, 174 0x96, 0xae, 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40 } 175 }; 176 177 const struct hyperv_guid hyperv_guid_dynmem = { 178 { 0xdc, 0x74, 0x50, 0x52, 0x85, 0x89, 0xe2, 0x46, 179 0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02 } 180 }; 181 182 const struct hyperv_guid hyperv_guid_mouse = { 183 { 0x9e, 0xb6, 0xa8, 0xcf, 0x4a, 0x5b, 0xc0, 0x4c, 184 0xb9, 0x8b, 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a } 185 }; 186 187 const struct hyperv_guid hyperv_guid_kbd = { 188 { 0x6d, 0xad, 0x12, 0xf9, 0x17, 0x2b, 0xea, 0x48, 189 0xbd, 0x65, 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84 } 190 }; 191 192 const struct hyperv_guid hyperv_guid_video = { 193 { 0x02, 0x78, 0x0a, 0xda, 0x77, 0xe3, 0xac, 0x4a, 194 0x8e, 0x77, 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8 } 195 }; 196 197 const struct hyperv_guid hyperv_guid_fc = { 198 { 0x4a, 0xcc, 0x9b, 0x2f, 0x69, 0x00, 0xf3, 0x4a, 199 0xb7, 0x6b, 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda } 200 }; 201 202 const struct hyperv_guid hyperv_guid_fcopy = { 203 { 0xe3, 0x4b, 0xd1, 0x34, 0xe4, 0xde, 0xc8, 0x41, 204 0x9a, 0xe7, 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92 } 205 }; 206 207 const struct hyperv_guid hyperv_guid_pcie = { 208 { 0x1d, 0xf6, 0xc4, 0x44, 0x44, 0x44, 0x00, 0x44, 209 0x9d, 0x52, 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f } 210 }; 211 212 const struct hyperv_guid hyperv_guid_netdir = { 213 { 0x3d, 0xaf, 0x2e, 0x8c, 0xa7, 0x32, 0x09, 0x4b, 214 0xab, 0x99, 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01 } 215 }; 216 217 const struct hyperv_guid hyperv_guid_rdesktop = { 218 { 0xf4, 0xac, 0x6a, 0x27, 0x15, 0xac, 0x6c, 0x42, 219 0x98, 0xdd, 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe } 220 }; 221 222 /* Automatic Virtual Machine Activation (AVMA) Services */ 223 const struct hyperv_guid hyperv_guid_avma1 = { 224 { 0x55, 0xb2, 0x87, 0x44, 0x8c, 0xb8, 0x3f, 0x40, 225 0xbb, 0x51, 0xd1, 0xf6, 0x9c, 0xf1, 0x7f, 0x87 } 226 }; 227 228 const struct hyperv_guid hyperv_guid_avma2 = { 229 { 0xf4, 0xba, 0x75, 0x33, 0x15, 0x9e, 0x30, 0x4b, 230 0xb7, 0x65, 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b } 231 }; 232 233 const struct hyperv_guid hyperv_guid_avma3 = { 234 { 0xa0, 0x1f, 0x22, 0x99, 0xad, 0x24, 0xe2, 0x11, 235 0xbe, 0x98, 0x00, 0x1a, 0xa0, 0x1b, 0xbf, 0x6e } 236 }; 237 238 const struct hyperv_guid hyperv_guid_avma4 = { 239 { 0x16, 0x57, 0xe6, 0xf8, 0xb3, 0x3c, 0x06, 0x4a, 240 0x9a, 0x60, 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5 } 241 }; 242 243 int 244 vmbus_match(device_t parent, cfdata_t cf, void *aux) 245 { 246 247 if (cf->cf_unit != 0 || 248 !hyperv_hypercall_enabled() || 249 !hyperv_synic_supported()) 250 return 0; 251 252 return 1; 253 } 254 255 int 256 vmbus_attach(struct vmbus_softc *sc) 257 { 258 259 aprint_naive("\n"); 260 aprint_normal(": Hyper-V VMBus\n"); 261 262 vmbus_sc = sc; 263 264 sc->sc_msgpool = pool_cache_init(sizeof(struct vmbus_msg), 8, 0, 0, 265 "hvmsg", NULL, IPL_NET, NULL, NULL, NULL); 266 hyperv_set_message_proc(vmbus_message_proc, sc); 267 268 if (vmbus_alloc_dma(sc)) 269 goto cleanup; 270 271 if (vmbus_init_interrupts(sc)) 272 goto cleanup; 273 274 if (vmbus_connect(sc)) 275 goto cleanup; 276 277 aprint_normal_dev(sc->sc_dev, "protocol %d.%d\n", 278 VMBUS_VERSION_MAJOR(sc->sc_proto), 279 VMBUS_VERSION_MINOR(sc->sc_proto)); 280 281 if (sc->sc_proto == VMBUS_VERSION_WS2008 || 282 sc->sc_proto == VMBUS_VERSION_WIN7) { 283 hyperv_set_event_proc(vmbus_event_proc_compat, sc); 284 sc->sc_channel_max = VMBUS_CHAN_MAX_COMPAT; 285 } else { 286 hyperv_set_event_proc(vmbus_event_proc, sc); 287 sc->sc_channel_max = VMBUS_CHAN_MAX; 288 } 289 290 if (vmbus_channel_scan(sc)) 291 goto cleanup; 292 293 /* Attach heartbeat, KVP and other "internal" services */ 294 vmbus_attach_icdevs(sc); 295 296 /* Attach devices with external drivers */ 297 vmbus_attach_devices(sc); 298 299 config_interrupts(sc->sc_dev, vmbus_attach_deferred); 300 301 return 0; 302 303 cleanup: 304 vmbus_deinit_interrupts(sc); 305 vmbus_free_dma(sc); 306 return -1; 307 } 308 309 static void 310 vmbus_attach_deferred(device_t self) 311 { 312 struct vmbus_softc *sc = device_private(self); 313 314 xc_wait(xc_broadcast(0, vmbus_init_synic, sc, NULL)); 315 } 316 317 int 318 vmbus_detach(struct vmbus_softc *sc, int flags) 319 { 320 321 vmbus_deinit_interrupts(sc); 322 vmbus_free_dma(sc); 323 324 return 0; 325 } 326 327 static int 328 vmbus_alloc_dma(struct vmbus_softc *sc) 329 { 330 CPU_INFO_ITERATOR cii; 331 struct cpu_info *ci; 332 struct vmbus_percpu_data *pd; 333 int i; 334 335 /* 336 * Per-CPU messages and event flags. 337 */ 338 for (CPU_INFO_FOREACH(cii, ci)) { 339 pd = &sc->sc_percpu[cpu_index(ci)]; 340 341 pd->simp = hyperv_dma_alloc(sc->sc_dmat, &pd->simp_dma, 342 PAGE_SIZE, PAGE_SIZE, 0, 1); 343 if (pd->simp == NULL) 344 return ENOMEM; 345 346 pd->siep = hyperv_dma_alloc(sc->sc_dmat, &pd->siep_dma, 347 PAGE_SIZE, PAGE_SIZE, 0, 1); 348 if (pd->siep == NULL) 349 return ENOMEM; 350 } 351 352 sc->sc_events = hyperv_dma_alloc(sc->sc_dmat, &sc->sc_events_dma, 353 PAGE_SIZE, PAGE_SIZE, 0, 1); 354 if (sc->sc_events == NULL) 355 return ENOMEM; 356 sc->sc_wevents = (u_long *)sc->sc_events; 357 sc->sc_revents = (u_long *)((uint8_t *)sc->sc_events + (PAGE_SIZE / 2)); 358 359 for (i = 0; i < __arraycount(sc->sc_monitor); i++) { 360 sc->sc_monitor[i] = hyperv_dma_alloc(sc->sc_dmat, 361 &sc->sc_monitor_dma[i], PAGE_SIZE, PAGE_SIZE, 0, 1); 362 if (sc->sc_monitor[i] == NULL) 363 return ENOMEM; 364 } 365 366 return 0; 367 } 368 369 static void 370 vmbus_free_dma(struct vmbus_softc *sc) 371 { 372 CPU_INFO_ITERATOR cii; 373 struct cpu_info *ci; 374 int i; 375 376 if (sc->sc_events != NULL) { 377 sc->sc_events = sc->sc_wevents = sc->sc_revents = NULL; 378 hyperv_dma_free(sc->sc_dmat, &sc->sc_events_dma); 379 } 380 381 for (i = 0; i < __arraycount(sc->sc_monitor); i++) { 382 sc->sc_monitor[i] = NULL; 383 hyperv_dma_free(sc->sc_dmat, &sc->sc_monitor_dma[i]); 384 } 385 386 for (CPU_INFO_FOREACH(cii, ci)) { 387 struct vmbus_percpu_data *pd = &sc->sc_percpu[cpu_index(ci)]; 388 389 if (pd->simp != NULL) { 390 pd->simp = NULL; 391 hyperv_dma_free(sc->sc_dmat, &pd->simp_dma); 392 } 393 if (pd->siep != NULL) { 394 pd->siep = NULL; 395 hyperv_dma_free(sc->sc_dmat, &pd->siep_dma); 396 } 397 } 398 } 399 400 static int 401 vmbus_init_interrupts(struct vmbus_softc *sc) 402 { 403 404 TAILQ_INIT(&sc->sc_reqs); 405 mutex_init(&sc->sc_req_lock, MUTEX_DEFAULT, IPL_NET); 406 407 TAILQ_INIT(&sc->sc_rsps); 408 mutex_init(&sc->sc_rsp_lock, MUTEX_DEFAULT, IPL_NET); 409 410 sc->sc_proto = VMBUS_VERSION_WS2008; 411 412 /* XXX event_tq */ 413 414 sc->sc_msg_sih = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE, 415 vmbus_message_softintr, sc); 416 if (sc->sc_msg_sih == NULL) 417 return -1; 418 419 vmbus_init_interrupts_md(sc); 420 421 kcpuset_create(&sc->sc_intr_cpuset, true); 422 if (cold) { 423 /* Initialize other CPUs later. */ 424 vmbus_init_synic(sc, NULL); 425 } else 426 xc_wait(xc_broadcast(0, vmbus_init_synic, sc, NULL)); 427 atomic_or_32(&sc->sc_flags, VMBUS_SCFLAG_SYNIC); 428 429 return 0; 430 } 431 432 static void 433 vmbus_deinit_interrupts(struct vmbus_softc *sc) 434 { 435 436 if (ISSET(sc->sc_flags, VMBUS_SCFLAG_SYNIC)) { 437 if (cold) 438 vmbus_deinit_synic(sc, NULL); 439 else 440 xc_wait(xc_broadcast(0, vmbus_deinit_synic, sc, NULL)); 441 atomic_and_32(&sc->sc_flags, (uint32_t)~VMBUS_SCFLAG_SYNIC); 442 } 443 444 /* XXX event_tq */ 445 446 if (sc->sc_msg_sih != NULL) { 447 softint_disestablish(sc->sc_msg_sih); 448 sc->sc_msg_sih = NULL; 449 } 450 451 vmbus_deinit_interrupts_md(sc); 452 } 453 454 static void 455 vmbus_init_synic(void *arg1, void *arg2) 456 { 457 struct vmbus_softc *sc = arg1; 458 cpuid_t cpu; 459 int s; 460 461 s = splhigh(); 462 463 cpu = cpu_index(curcpu()); 464 if (!kcpuset_isset(sc->sc_intr_cpuset, cpu)) { 465 kcpuset_atomic_set(sc->sc_intr_cpuset, cpu); 466 vmbus_init_synic_md(sc, cpu); 467 } 468 469 splx(s); 470 } 471 472 static void 473 vmbus_deinit_synic(void *arg1, void *arg2) 474 { 475 struct vmbus_softc *sc = arg1; 476 cpuid_t cpu; 477 int s; 478 479 s = splhigh(); 480 481 cpu = cpu_index(curcpu()); 482 if (kcpuset_isset(sc->sc_intr_cpuset, cpu)) { 483 vmbus_deinit_synic_md(sc, cpu); 484 kcpuset_atomic_clear(sc->sc_intr_cpuset, cpu); 485 } 486 487 splx(s); 488 } 489 490 static int 491 vmbus_connect(struct vmbus_softc *sc) 492 { 493 static const uint32_t versions[] = { 494 VMBUS_VERSION_WIN8_1, 495 VMBUS_VERSION_WIN8, 496 VMBUS_VERSION_WIN7, 497 VMBUS_VERSION_WS2008 498 }; 499 struct vmbus_chanmsg_connect cmd; 500 struct vmbus_chanmsg_connect_resp rsp; 501 int i, rv; 502 503 memset(&cmd, 0, sizeof(cmd)); 504 cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CONNECT; 505 cmd.chm_evtflags = hyperv_dma_get_paddr(&sc->sc_events_dma); 506 cmd.chm_mnf1 = hyperv_dma_get_paddr(&sc->sc_monitor_dma[0]); 507 cmd.chm_mnf2 = hyperv_dma_get_paddr(&sc->sc_monitor_dma[1]); 508 509 memset(&rsp, 0, sizeof(rsp)); 510 511 for (i = 0; i < __arraycount(versions); i++) { 512 cmd.chm_ver = versions[i]; 513 rv = vmbus_cmd(sc, &cmd, sizeof(cmd), &rsp, sizeof(rsp), 514 cold ? HCF_NOSLEEP : HCF_SLEEPOK); 515 if (rv) { 516 DPRINTF("%s: CONNECT failed\n", 517 device_xname(sc->sc_dev)); 518 return rv; 519 } 520 if (rsp.chm_done) { 521 atomic_or_32(&sc->sc_flags, VMBUS_SCFLAG_CONNECTED); 522 sc->sc_proto = versions[i]; 523 sc->sc_handle = VMBUS_GPADL_START; 524 break; 525 } 526 } 527 if (i == __arraycount(versions)) { 528 device_printf(sc->sc_dev, 529 "failed to negotiate protocol version\n"); 530 return ENXIO; 531 } 532 533 return 0; 534 } 535 536 static int 537 vmbus_cmd(struct vmbus_softc *sc, void *cmd, size_t cmdlen, void *rsp, 538 size_t rsplen, int flags) 539 { 540 const int prflags = cold ? PR_NOWAIT : PR_WAITOK; 541 struct vmbus_msg *msg; 542 paddr_t pa; 543 int rv; 544 545 if (cmdlen > VMBUS_MSG_DSIZE_MAX) { 546 device_printf(sc->sc_dev, "payload too large (%zu)\n", 547 cmdlen); 548 return EMSGSIZE; 549 } 550 551 msg = pool_cache_get_paddr(sc->sc_msgpool, prflags, &pa); 552 if (msg == NULL) { 553 device_printf(sc->sc_dev, "couldn't get msgpool\n"); 554 return ENOMEM; 555 } 556 memset(msg, 0, sizeof(*msg)); 557 msg->msg_req.hc_dsize = cmdlen; 558 memcpy(msg->msg_req.hc_data, cmd, cmdlen); 559 560 if (!(flags & HCF_NOREPLY)) { 561 msg->msg_rsp = rsp; 562 msg->msg_rsplen = rsplen; 563 } else 564 msg->msg_flags |= MSGF_NOQUEUE; 565 566 if (flags & HCF_NOSLEEP) 567 msg->msg_flags |= MSGF_NOSLEEP; 568 569 rv = vmbus_start(sc, msg, pa); 570 if (rv == 0) 571 rv = vmbus_reply(sc, msg); 572 pool_cache_put_paddr(sc->sc_msgpool, msg, pa); 573 return rv; 574 } 575 576 static int 577 vmbus_start(struct vmbus_softc *sc, struct vmbus_msg *msg, paddr_t msg_pa) 578 { 579 static const int delays[] = { 580 100, 100, 100, 500, 500, 5000, 5000, 5000 581 }; 582 const char *wchan = "hvstart"; 583 uint16_t status; 584 int i, s; 585 586 msg->msg_req.hc_connid = VMBUS_CONNID_MESSAGE; 587 msg->msg_req.hc_msgtype = 1; 588 589 if (!(msg->msg_flags & MSGF_NOQUEUE)) { 590 mutex_enter(&sc->sc_req_lock); 591 TAILQ_INSERT_TAIL(&sc->sc_reqs, msg, msg_entry); 592 mutex_exit(&sc->sc_req_lock); 593 } 594 595 for (i = 0; i < __arraycount(delays); i++) { 596 status = hyperv_hypercall_post_message( 597 msg_pa + offsetof(struct vmbus_msg, msg_req)); 598 if (status == HYPERCALL_STATUS_SUCCESS) 599 break; 600 601 if (msg->msg_flags & MSGF_NOSLEEP) { 602 delay(delays[i]); 603 s = splnet(); 604 hyperv_intr(); 605 splx(s); 606 } else 607 tsleep(wchan, PRIBIO, wchan, 1); 608 } 609 if (status != HYPERCALL_STATUS_SUCCESS) { 610 device_printf(sc->sc_dev, 611 "posting vmbus message failed with %d\n", status); 612 if (!(msg->msg_flags & MSGF_NOQUEUE)) { 613 mutex_enter(&sc->sc_req_lock); 614 TAILQ_REMOVE(&sc->sc_reqs, msg, msg_entry); 615 mutex_exit(&sc->sc_req_lock); 616 } 617 return EIO; 618 } 619 620 return 0; 621 } 622 623 static int 624 vmbus_reply_done(struct vmbus_softc *sc, struct vmbus_msg *msg) 625 { 626 struct vmbus_msg *m; 627 628 mutex_enter(&sc->sc_rsp_lock); 629 TAILQ_FOREACH(m, &sc->sc_rsps, msg_entry) { 630 if (m == msg) { 631 mutex_exit(&sc->sc_rsp_lock); 632 return 1; 633 } 634 } 635 mutex_exit(&sc->sc_rsp_lock); 636 return 0; 637 } 638 639 static int 640 vmbus_reply(struct vmbus_softc *sc, struct vmbus_msg *msg) 641 { 642 643 if (msg->msg_flags & MSGF_NOQUEUE) 644 return 0; 645 646 vmbus_wait(sc, vmbus_reply_done, msg, msg, "hvreply"); 647 648 mutex_enter(&sc->sc_rsp_lock); 649 TAILQ_REMOVE(&sc->sc_rsps, msg, msg_entry); 650 mutex_exit(&sc->sc_rsp_lock); 651 652 return 0; 653 } 654 655 static void 656 vmbus_wait(struct vmbus_softc *sc, 657 int (*cond)(struct vmbus_softc *, struct vmbus_msg *), 658 struct vmbus_msg *msg, void *wchan, const char *wmsg) 659 { 660 int s; 661 662 while (!cond(sc, msg)) { 663 if (msg->msg_flags & MSGF_NOSLEEP) { 664 delay(1000); 665 s = splnet(); 666 hyperv_intr(); 667 splx(s); 668 } else 669 tsleep(wchan, PRIBIO, wmsg ? wmsg : "hvwait", 1); 670 } 671 } 672 673 static uint16_t 674 vmbus_intr_signal(struct vmbus_softc *sc, paddr_t con_pa) 675 { 676 uint64_t status; 677 678 status = hyperv_hypercall_signal_event(con_pa); 679 return (uint16_t)status; 680 } 681 682 #if LONG_BIT == 64 683 #define ffsl(v) ffs64(v) 684 #elif LONG_BIT == 32 685 #define ffsl(v) ffs32(v) 686 #else 687 #error unsupport LONG_BIT 688 #endif /* LONG_BIT */ 689 690 static void 691 vmbus_event_flags_proc(struct vmbus_softc *sc, volatile u_long *revents, 692 int maxrow) 693 { 694 struct vmbus_channel *ch; 695 u_long pending; 696 uint32_t chanid, chanid_base; 697 int row, chanid_ofs; 698 699 for (row = 0; row < maxrow; row++) { 700 if (revents[row] == 0) 701 continue; 702 703 pending = atomic_swap_ulong(&revents[row], 0); 704 chanid_base = row * LONG_BIT; 705 706 while ((chanid_ofs = ffsl(pending)) != 0) { 707 chanid_ofs--; /* NOTE: ffs is 1-based */ 708 pending &= ~(1UL << chanid_ofs); 709 710 chanid = chanid_base + chanid_ofs; 711 /* vmbus channel protocol message */ 712 if (chanid == 0) 713 continue; 714 715 ch = vmbus_channel_lookup(sc, chanid); 716 if (ch == NULL) { 717 device_printf(sc->sc_dev, 718 "unhandled event on %d\n", chanid); 719 continue; 720 } 721 if (ch->ch_state != VMBUS_CHANSTATE_OPENED) { 722 device_printf(sc->sc_dev, 723 "channel %d is not active\n", chanid); 724 continue; 725 } 726 ch->ch_evcnt.ev_count++; 727 vmbus_channel_schedule(ch); 728 } 729 } 730 } 731 732 static void 733 vmbus_event_proc(void *arg, struct cpu_info *ci) 734 { 735 struct vmbus_softc *sc = arg; 736 struct vmbus_evtflags *evt; 737 738 /* 739 * On Host with Win8 or above, the event page can be 740 * checked directly to get the id of the channel 741 * that has the pending interrupt. 742 */ 743 evt = (struct vmbus_evtflags *)sc->sc_percpu[cpu_index(ci)].siep + 744 VMBUS_SINT_MESSAGE; 745 746 vmbus_event_flags_proc(sc, evt->evt_flags, 747 __arraycount(evt->evt_flags)); 748 } 749 750 static void 751 vmbus_event_proc_compat(void *arg, struct cpu_info *ci) 752 { 753 struct vmbus_softc *sc = arg; 754 struct vmbus_evtflags *evt; 755 756 evt = (struct vmbus_evtflags *)sc->sc_percpu[cpu_index(ci)].siep + 757 VMBUS_SINT_MESSAGE; 758 759 if (test_bit(0, &evt->evt_flags[0])) { 760 clear_bit(0, &evt->evt_flags[0]); 761 /* 762 * receive size is 1/2 page and divide that by 4 bytes 763 */ 764 vmbus_event_flags_proc(sc, sc->sc_revents, 765 VMBUS_CHAN_MAX_COMPAT / VMBUS_EVTFLAG_LEN); 766 } 767 } 768 769 static void 770 vmbus_message_proc(void *arg, struct cpu_info *ci) 771 { 772 struct vmbus_softc *sc = arg; 773 struct vmbus_message *msg; 774 775 msg = (struct vmbus_message *)sc->sc_percpu[cpu_index(ci)].simp + 776 VMBUS_SINT_MESSAGE; 777 if (__predict_false(msg->msg_type != HYPERV_MSGTYPE_NONE)) { 778 if (__predict_true(!cold)) 779 softint_schedule_cpu(sc->sc_msg_sih, ci); 780 else 781 vmbus_message_softintr(sc); 782 } 783 } 784 785 static void 786 vmbus_message_softintr(void *arg) 787 { 788 struct vmbus_softc *sc = arg; 789 struct vmbus_message *msg; 790 struct vmbus_chanmsg_hdr *hdr; 791 uint32_t type; 792 cpuid_t cpu; 793 794 cpu = cpu_index(curcpu()); 795 796 for (;;) { 797 msg = (struct vmbus_message *)sc->sc_percpu[cpu].simp + 798 VMBUS_SINT_MESSAGE; 799 if (msg->msg_type == HYPERV_MSGTYPE_NONE) 800 break; 801 802 hdr = (struct vmbus_chanmsg_hdr *)msg->msg_data; 803 type = hdr->chm_type; 804 if (type >= VMBUS_CHANMSG_COUNT) { 805 device_printf(sc->sc_dev, 806 "unhandled message type %u flags %#x\n", type, 807 msg->msg_flags); 808 } else { 809 if (vmbus_msg_dispatch[type].hmd_handler) { 810 vmbus_msg_dispatch[type].hmd_handler(sc, hdr); 811 } else { 812 device_printf(sc->sc_dev, 813 "unhandled message type %u\n", type); 814 } 815 } 816 817 msg->msg_type = HYPERV_MSGTYPE_NONE; 818 membar_sync(); 819 if (msg->msg_flags & VMBUS_MSGFLAG_PENDING) 820 hyperv_send_eom(); 821 } 822 } 823 824 static void 825 vmbus_channel_response(struct vmbus_softc *sc, struct vmbus_chanmsg_hdr *rsphdr) 826 { 827 struct vmbus_msg *msg; 828 struct vmbus_chanmsg_hdr *reqhdr; 829 int req; 830 831 req = vmbus_msg_dispatch[rsphdr->chm_type].hmd_request; 832 mutex_enter(&sc->sc_req_lock); 833 TAILQ_FOREACH(msg, &sc->sc_reqs, msg_entry) { 834 reqhdr = (struct vmbus_chanmsg_hdr *)&msg->msg_req.hc_data; 835 if (reqhdr->chm_type == req) { 836 TAILQ_REMOVE(&sc->sc_reqs, msg, msg_entry); 837 break; 838 } 839 } 840 mutex_exit(&sc->sc_req_lock); 841 if (msg != NULL) { 842 memcpy(msg->msg_rsp, rsphdr, msg->msg_rsplen); 843 mutex_enter(&sc->sc_rsp_lock); 844 TAILQ_INSERT_TAIL(&sc->sc_rsps, msg, msg_entry); 845 mutex_exit(&sc->sc_rsp_lock); 846 wakeup(msg); 847 } 848 } 849 850 static void 851 vmbus_channel_offer(struct vmbus_softc *sc, struct vmbus_chanmsg_hdr *hdr) 852 { 853 struct vmbus_offer *co; 854 855 co = kmem_intr_zalloc(sizeof(*co), KM_NOSLEEP); 856 if (co == NULL) { 857 device_printf(sc->sc_dev, "couldn't allocate offer\n"); 858 return; 859 } 860 861 memcpy(&co->co_chan, hdr, sizeof(co->co_chan)); 862 863 mutex_enter(&sc->sc_offer_lock); 864 SIMPLEQ_INSERT_TAIL(&sc->sc_offers, co, co_entry); 865 mutex_exit(&sc->sc_offer_lock); 866 } 867 868 static void 869 vmbus_channel_rescind(struct vmbus_softc *sc, struct vmbus_chanmsg_hdr *hdr) 870 { 871 const struct vmbus_chanmsg_chrescind *cmd; 872 873 cmd = (const struct vmbus_chanmsg_chrescind *)hdr; 874 device_printf(sc->sc_dev, "revoking channel %u\n", cmd->chm_chanid); 875 } 876 877 static void 878 vmbus_channel_delivered(struct vmbus_softc *sc, struct vmbus_chanmsg_hdr *hdr) 879 { 880 881 atomic_or_32(&sc->sc_flags, VMBUS_SCFLAG_OFFERS_DELIVERED); 882 wakeup(&sc->sc_offers); 883 } 884 885 static void 886 hyperv_guid_sprint(struct hyperv_guid *guid, char *str, size_t size) 887 { 888 static const struct { 889 const struct hyperv_guid *guid; 890 const char *ident; 891 } map[] = { 892 { &hyperv_guid_network, "network" }, 893 { &hyperv_guid_ide, "ide" }, 894 { &hyperv_guid_scsi, "scsi" }, 895 { &hyperv_guid_shutdown, "shutdown" }, 896 { &hyperv_guid_timesync, "timesync" }, 897 { &hyperv_guid_heartbeat, "heartbeat" }, 898 { &hyperv_guid_kvp, "kvp" }, 899 { &hyperv_guid_vss, "vss" }, 900 { &hyperv_guid_dynmem, "dynamic-memory" }, 901 { &hyperv_guid_mouse, "mouse" }, 902 { &hyperv_guid_kbd, "keyboard" }, 903 { &hyperv_guid_video, "video" }, 904 { &hyperv_guid_fc, "fiber-channel" }, 905 { &hyperv_guid_fcopy, "file-copy" }, 906 { &hyperv_guid_pcie, "pcie-passthrough" }, 907 { &hyperv_guid_netdir, "network-direct" }, 908 { &hyperv_guid_rdesktop, "remote-desktop" }, 909 { &hyperv_guid_avma1, "avma-1" }, 910 { &hyperv_guid_avma2, "avma-2" }, 911 { &hyperv_guid_avma3, "avma-3" }, 912 { &hyperv_guid_avma4, "avma-4" }, 913 }; 914 int i; 915 916 for (i = 0; i < __arraycount(map); i++) { 917 if (memcmp(guid, map[i].guid, sizeof(*guid)) == 0) { 918 strlcpy(str, map[i].ident, size); 919 return; 920 } 921 } 922 hyperv_guid2str(guid, str, size); 923 } 924 925 static int 926 vmbus_channel_scan_done(struct vmbus_softc *sc, struct vmbus_msg *msg __unused) 927 { 928 929 return ISSET(sc->sc_flags, VMBUS_SCFLAG_OFFERS_DELIVERED); 930 } 931 932 static int 933 vmbus_channel_scan(struct vmbus_softc *sc) 934 { 935 struct vmbus_chanmsg_hdr hdr; 936 struct vmbus_chanmsg_choffer rsp; 937 struct vmbus_offer *co; 938 939 SIMPLEQ_INIT(&sc->sc_offers); 940 mutex_init(&sc->sc_offer_lock, MUTEX_DEFAULT, IPL_NET); 941 942 memset(&hdr, 0, sizeof(hdr)); 943 hdr.chm_type = VMBUS_CHANMSG_CHREQUEST; 944 945 if (vmbus_cmd(sc, &hdr, sizeof(hdr), &rsp, sizeof(rsp), 946 HCF_NOREPLY | (cold ? HCF_NOSLEEP : HCF_SLEEPOK))) { 947 DPRINTF("%s: CHREQUEST failed\n", device_xname(sc->sc_dev)); 948 return -1; 949 } 950 951 vmbus_wait(sc, vmbus_channel_scan_done, (struct vmbus_msg *)&hdr, 952 &sc->sc_offers, "hvscan"); 953 954 TAILQ_INIT(&sc->sc_channels); 955 mutex_init(&sc->sc_channel_lock, MUTEX_DEFAULT, IPL_NET); 956 957 mutex_enter(&sc->sc_offer_lock); 958 while (!SIMPLEQ_EMPTY(&sc->sc_offers)) { 959 co = SIMPLEQ_FIRST(&sc->sc_offers); 960 SIMPLEQ_REMOVE_HEAD(&sc->sc_offers, co_entry); 961 mutex_exit(&sc->sc_offer_lock); 962 963 vmbus_process_offer(sc, co); 964 kmem_free(co, sizeof(*co)); 965 966 mutex_enter(&sc->sc_offer_lock); 967 } 968 mutex_exit(&sc->sc_offer_lock); 969 970 return 0; 971 } 972 973 static struct vmbus_channel * 974 vmbus_channel_alloc(struct vmbus_softc *sc) 975 { 976 struct vmbus_channel *ch; 977 978 ch = kmem_zalloc(sizeof(*ch), cold ? KM_NOSLEEP : KM_SLEEP); 979 980 ch->ch_monprm = hyperv_dma_alloc(sc->sc_dmat, &ch->ch_monprm_dma, 981 sizeof(*ch->ch_monprm), 8, 0, 1); 982 if (ch->ch_monprm == NULL) { 983 device_printf(sc->sc_dev, "monprm alloc failed\n"); 984 kmem_free(ch, sizeof(*ch)); 985 return NULL; 986 } 987 memset(ch->ch_monprm, 0, sizeof(*ch->ch_monprm)); 988 989 ch->ch_refs = 1; 990 ch->ch_sc = sc; 991 mutex_init(&ch->ch_subchannel_lock, MUTEX_DEFAULT, IPL_NET); 992 TAILQ_INIT(&ch->ch_subchannels); 993 994 ch->ch_state = VMBUS_CHANSTATE_CLOSED; 995 996 return ch; 997 } 998 999 static void 1000 vmbus_channel_free(struct vmbus_channel *ch) 1001 { 1002 struct vmbus_softc *sc = ch->ch_sc; 1003 1004 KASSERTMSG(TAILQ_EMPTY(&ch->ch_subchannels) && 1005 ch->ch_subchannel_count == 0, "still owns sub-channels"); 1006 KASSERTMSG(ch->ch_state == 0 || ch->ch_state == VMBUS_CHANSTATE_CLOSED, 1007 "free busy channel"); 1008 KASSERTMSG(ch->ch_refs == 0, "channel %u: invalid refcnt %d", 1009 ch->ch_id, ch->ch_refs); 1010 1011 hyperv_dma_free(sc->sc_dmat, &ch->ch_monprm_dma); 1012 mutex_destroy(&ch->ch_subchannel_lock); 1013 /* XXX ch_evcnt */ 1014 softint_disestablish(ch->ch_taskq); 1015 kmem_free(ch, sizeof(*ch)); 1016 } 1017 1018 static int 1019 vmbus_channel_add(struct vmbus_channel *nch) 1020 { 1021 struct vmbus_softc *sc = nch->ch_sc; 1022 struct vmbus_channel *ch; 1023 u_int refs __diagused; 1024 1025 if (nch->ch_id == 0) { 1026 device_printf(sc->sc_dev, "got channel 0 offer, discard\n"); 1027 return EINVAL; 1028 } else if (nch->ch_id >= sc->sc_channel_max) { 1029 device_printf(sc->sc_dev, "invalid channel %u offer\n", 1030 nch->ch_id); 1031 return EINVAL; 1032 } 1033 1034 mutex_enter(&sc->sc_channel_lock); 1035 TAILQ_FOREACH(ch, &sc->sc_channels, ch_entry) { 1036 if (!memcmp(&ch->ch_type, &nch->ch_type, sizeof(ch->ch_type)) && 1037 !memcmp(&ch->ch_inst, &nch->ch_inst, sizeof(ch->ch_inst))) 1038 break; 1039 } 1040 if (VMBUS_CHAN_ISPRIMARY(nch)) { 1041 if (ch == NULL) { 1042 TAILQ_INSERT_TAIL(&sc->sc_channels, nch, ch_entry); 1043 mutex_exit(&sc->sc_channel_lock); 1044 goto done; 1045 } else { 1046 mutex_exit(&sc->sc_channel_lock); 1047 device_printf(sc->sc_dev, 1048 "duplicated primary channel%u\n", nch->ch_id); 1049 return EINVAL; 1050 } 1051 } else { 1052 if (ch == NULL) { 1053 mutex_exit(&sc->sc_channel_lock); 1054 device_printf(sc->sc_dev, "no primary channel%u\n", 1055 nch->ch_id); 1056 return EINVAL; 1057 } 1058 } 1059 mutex_exit(&sc->sc_channel_lock); 1060 1061 KASSERT(!VMBUS_CHAN_ISPRIMARY(nch)); 1062 KASSERT(ch != NULL); 1063 1064 refs = atomic_add_int_nv(&nch->ch_refs, 1); 1065 KASSERT(refs == 1); 1066 1067 nch->ch_primary_channel = ch; 1068 nch->ch_dev = ch->ch_dev; 1069 1070 mutex_enter(&ch->ch_subchannel_lock); 1071 TAILQ_INSERT_TAIL(&ch->ch_subchannels, nch, ch_subentry); 1072 ch->ch_subchannel_count++; 1073 mutex_exit(&ch->ch_subchannel_lock); 1074 wakeup(ch); 1075 1076 done: 1077 vmbus_channel_cpu_default(nch); 1078 1079 return 0; 1080 } 1081 1082 void 1083 vmbus_channel_cpu_set(struct vmbus_channel *ch, int cpu) 1084 { 1085 struct vmbus_softc *sc = ch->ch_sc; 1086 1087 KASSERTMSG(cpu >= 0 && cpu < ncpu, "invalid cpu %d", cpu); 1088 1089 if (sc->sc_proto == VMBUS_VERSION_WS2008 || 1090 sc->sc_proto == VMBUS_VERSION_WIN7) { 1091 /* Only cpu0 is supported */ 1092 cpu = 0; 1093 } 1094 1095 ch->ch_cpuid = cpu; 1096 ch->ch_vcpu = sc->sc_percpu[cpu].vcpuid; 1097 } 1098 1099 void 1100 vmbus_channel_cpu_rr(struct vmbus_channel *ch) 1101 { 1102 static uint32_t vmbus_channel_nextcpu; 1103 int cpu; 1104 1105 cpu = atomic_add_32_nv(&vmbus_channel_nextcpu, 1) % ncpu; 1106 vmbus_channel_cpu_set(ch, cpu); 1107 } 1108 1109 static void 1110 vmbus_channel_cpu_default(struct vmbus_channel *ch) 1111 { 1112 1113 /* 1114 * By default, pin the channel to cpu0. Devices having 1115 * special channel-cpu mapping requirement should call 1116 * vmbus_channel_cpu_{set,rr}(). 1117 */ 1118 vmbus_channel_cpu_set(ch, 0); 1119 } 1120 1121 static void 1122 vmbus_process_offer(struct vmbus_softc *sc, struct vmbus_offer *co) 1123 { 1124 struct vmbus_channel *ch; 1125 1126 ch = vmbus_channel_alloc(sc); 1127 if (ch == NULL) { 1128 device_printf(sc->sc_dev, "allocate channel %u failed\n", 1129 co->co_chan.chm_chanid); 1130 return; 1131 } 1132 1133 /* 1134 * By default we setup state to enable batched reading. 1135 * A specific service can choose to disable this prior 1136 * to opening the channel. 1137 */ 1138 ch->ch_flags |= CHF_BATCHED; 1139 1140 hyperv_guid_sprint(&co->co_chan.chm_chtype, ch->ch_ident, 1141 sizeof(ch->ch_ident)); 1142 1143 ch->ch_monprm->mp_connid = VMBUS_CONNID_EVENT; 1144 if (sc->sc_proto > VMBUS_VERSION_WS2008) 1145 ch->ch_monprm->mp_connid = co->co_chan.chm_connid; 1146 1147 if (co->co_chan.chm_flags1 & VMBUS_CHOFFER_FLAG1_HASMNF) { 1148 ch->ch_mgroup = co->co_chan.chm_montrig / VMBUS_MONTRIG_LEN; 1149 ch->ch_mindex = co->co_chan.chm_montrig % VMBUS_MONTRIG_LEN; 1150 ch->ch_flags |= CHF_MONITOR; 1151 } 1152 1153 ch->ch_id = co->co_chan.chm_chanid; 1154 ch->ch_subidx = co->co_chan.chm_subidx; 1155 1156 memcpy(&ch->ch_type, &co->co_chan.chm_chtype, sizeof(ch->ch_type)); 1157 memcpy(&ch->ch_inst, &co->co_chan.chm_chinst, sizeof(ch->ch_inst)); 1158 1159 if (VMBUS_CHAN_ISPRIMARY(ch)) { 1160 /* set primary channel mgmt wq */ 1161 } else { 1162 /* set sub channel mgmt wq */ 1163 } 1164 1165 if (vmbus_channel_add(ch) != 0) { 1166 vmbus_channel_free(ch); 1167 return; 1168 } 1169 1170 ch->ch_state = VMBUS_CHANSTATE_OFFERED; 1171 1172 #ifdef HYPERV_DEBUG 1173 printf("%s: channel %u: \"%s\"", device_xname(sc->sc_dev), ch->ch_id, 1174 ch->ch_ident); 1175 if (ch->ch_flags & CHF_MONITOR) 1176 printf(", monitor %u\n", co->co_chan.chm_montrig); 1177 else 1178 printf("\n"); 1179 #endif 1180 } 1181 1182 static int 1183 vmbus_channel_release(struct vmbus_channel *ch) 1184 { 1185 struct vmbus_softc *sc = ch->ch_sc; 1186 struct vmbus_chanmsg_chfree cmd; 1187 int rv; 1188 1189 memset(&cmd, 0, sizeof(cmd)); 1190 cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CHFREE; 1191 cmd.chm_chanid = ch->ch_id; 1192 1193 rv = vmbus_cmd(sc, &cmd, sizeof(cmd), NULL, 0, 1194 HCF_NOREPLY | (cold ? HCF_NOSLEEP : HCF_SLEEPOK)); 1195 if (rv) { 1196 DPRINTF("%s: CHFREE failed with %d\n", device_xname(sc->sc_dev), 1197 rv); 1198 } 1199 return rv; 1200 } 1201 1202 struct vmbus_channel ** 1203 vmbus_subchannel_get(struct vmbus_channel *prich, int cnt) 1204 { 1205 struct vmbus_channel **ret, *ch; 1206 int i; 1207 1208 KASSERT(cnt > 0); 1209 1210 ret = kmem_alloc(sizeof(struct vmbus_channel *) * cnt, 1211 cold ? KM_NOSLEEP : KM_SLEEP); 1212 1213 mutex_enter(&prich->ch_subchannel_lock); 1214 1215 while (prich->ch_subchannel_count < cnt) 1216 /* XXX use condvar(9) instead of mtsleep */ 1217 mtsleep(prich, PRIBIO, "hvvmsubch", 0, 1218 &prich->ch_subchannel_lock); 1219 1220 i = 0; 1221 TAILQ_FOREACH(ch, &prich->ch_subchannels, ch_subentry) { 1222 ret[i] = ch; /* XXX inc refs */ 1223 1224 if (++i == cnt) 1225 break; 1226 } 1227 1228 mutex_exit(&prich->ch_subchannel_lock); 1229 1230 return ret; 1231 } 1232 1233 void 1234 vmbus_subchannel_put(struct vmbus_channel **subch, int cnt) 1235 { 1236 1237 kmem_free(subch, sizeof(struct vmbus_channel *) * cnt); 1238 } 1239 1240 static struct vmbus_channel * 1241 vmbus_channel_lookup(struct vmbus_softc *sc, uint32_t relid) 1242 { 1243 struct vmbus_channel *ch; 1244 1245 TAILQ_FOREACH(ch, &sc->sc_channels, ch_entry) { 1246 if (ch->ch_id == relid) 1247 return ch; 1248 } 1249 return NULL; 1250 } 1251 1252 static int 1253 vmbus_channel_ring_create(struct vmbus_channel *ch, uint32_t buflen) 1254 { 1255 struct vmbus_softc *sc = ch->ch_sc; 1256 1257 buflen = roundup(buflen, PAGE_SIZE) + sizeof(struct vmbus_bufring); 1258 ch->ch_ring_size = 2 * buflen; 1259 ch->ch_ring = hyperv_dma_alloc(sc->sc_dmat, &ch->ch_ring_dma, 1260 ch->ch_ring_size, PAGE_SIZE, 0, 1); /* page aligned memory */ 1261 if (ch->ch_ring == NULL) { 1262 device_printf(sc->sc_dev, 1263 "failed to allocate channel ring\n"); 1264 return ENOMEM; 1265 } 1266 1267 memset(&ch->ch_wrd, 0, sizeof(ch->ch_wrd)); 1268 ch->ch_wrd.rd_ring = (struct vmbus_bufring *)ch->ch_ring; 1269 ch->ch_wrd.rd_size = buflen; 1270 ch->ch_wrd.rd_dsize = buflen - sizeof(struct vmbus_bufring); 1271 mutex_init(&ch->ch_wrd.rd_lock, MUTEX_DEFAULT, IPL_NET); 1272 1273 memset(&ch->ch_rrd, 0, sizeof(ch->ch_rrd)); 1274 ch->ch_rrd.rd_ring = (struct vmbus_bufring *)((uint8_t *)ch->ch_ring + 1275 buflen); 1276 ch->ch_rrd.rd_size = buflen; 1277 ch->ch_rrd.rd_dsize = buflen - sizeof(struct vmbus_bufring); 1278 mutex_init(&ch->ch_rrd.rd_lock, MUTEX_DEFAULT, IPL_NET); 1279 1280 if (vmbus_handle_alloc(ch, &ch->ch_ring_dma, ch->ch_ring_size, 1281 &ch->ch_ring_gpadl)) { 1282 device_printf(sc->sc_dev, 1283 "failed to obtain a PA handle for the ring\n"); 1284 vmbus_channel_ring_destroy(ch); 1285 return ENOMEM; 1286 } 1287 1288 return 0; 1289 } 1290 1291 static void 1292 vmbus_channel_ring_destroy(struct vmbus_channel *ch) 1293 { 1294 struct vmbus_softc *sc = ch->ch_sc; 1295 1296 hyperv_dma_free(sc->sc_dmat, &ch->ch_ring_dma); 1297 ch->ch_ring = NULL; 1298 vmbus_handle_free(ch, ch->ch_ring_gpadl); 1299 1300 mutex_destroy(&ch->ch_wrd.rd_lock); 1301 memset(&ch->ch_wrd, 0, sizeof(ch->ch_wrd)); 1302 mutex_destroy(&ch->ch_rrd.rd_lock); 1303 memset(&ch->ch_rrd, 0, sizeof(ch->ch_rrd)); 1304 } 1305 1306 int 1307 vmbus_channel_open(struct vmbus_channel *ch, size_t buflen, void *udata, 1308 size_t udatalen, void (*handler)(void *), void *arg) 1309 { 1310 struct vmbus_softc *sc = ch->ch_sc; 1311 struct vmbus_chanmsg_chopen cmd; 1312 struct vmbus_chanmsg_chopen_resp rsp; 1313 int rv = EINVAL; 1314 1315 if (ch->ch_ring == NULL && 1316 (rv = vmbus_channel_ring_create(ch, buflen))) { 1317 DPRINTF("%s: failed to create channel ring\n", 1318 device_xname(sc->sc_dev)); 1319 return rv; 1320 } 1321 1322 memset(&cmd, 0, sizeof(cmd)); 1323 cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CHOPEN; 1324 cmd.chm_openid = ch->ch_id; 1325 cmd.chm_chanid = ch->ch_id; 1326 cmd.chm_gpadl = ch->ch_ring_gpadl; 1327 cmd.chm_txbr_pgcnt = atop(ch->ch_wrd.rd_size); 1328 cmd.chm_vcpuid = ch->ch_vcpu; 1329 if (udata && udatalen > 0) 1330 memcpy(cmd.chm_udata, udata, udatalen); 1331 1332 memset(&rsp, 0, sizeof(rsp)); 1333 1334 ch->ch_handler = handler; 1335 ch->ch_ctx = arg; 1336 ch->ch_state = VMBUS_CHANSTATE_OPENED; 1337 1338 rv = vmbus_cmd(sc, &cmd, sizeof(cmd), &rsp, sizeof(rsp), 1339 cold ? HCF_NOSLEEP : HCF_SLEEPOK); 1340 if (rv) { 1341 vmbus_channel_ring_destroy(ch); 1342 DPRINTF("%s: CHOPEN failed with %d\n", device_xname(sc->sc_dev), 1343 rv); 1344 ch->ch_handler = NULL; 1345 ch->ch_ctx = NULL; 1346 ch->ch_state = VMBUS_CHANSTATE_OFFERED; 1347 return rv; 1348 } 1349 return 0; 1350 } 1351 1352 static void 1353 vmbus_channel_detach(struct vmbus_channel *ch) 1354 { 1355 u_int refs; 1356 1357 refs = atomic_add_int_nv(&ch->ch_refs, -1); 1358 if (refs == 1) { 1359 /* XXX on workqueue? */ 1360 if (VMBUS_CHAN_ISPRIMARY(ch)) { 1361 vmbus_channel_release(ch); 1362 vmbus_channel_free(ch); 1363 } else { 1364 struct vmbus_channel *prich = ch->ch_primary_channel; 1365 1366 vmbus_channel_release(ch); 1367 1368 mutex_enter(&prich->ch_subchannel_lock); 1369 TAILQ_REMOVE(&prich->ch_subchannels, ch, ch_subentry); 1370 prich->ch_subchannel_count--; 1371 mutex_exit(&prich->ch_subchannel_lock); 1372 wakeup(prich); 1373 1374 vmbus_channel_free(ch); 1375 } 1376 } 1377 } 1378 1379 static int 1380 vmbus_channel_close_internal(struct vmbus_channel *ch) 1381 { 1382 struct vmbus_softc *sc = ch->ch_sc; 1383 struct vmbus_chanmsg_chclose cmd; 1384 int rv; 1385 1386 memset(&cmd, 0, sizeof(cmd)); 1387 cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CHCLOSE; 1388 cmd.chm_chanid = ch->ch_id; 1389 1390 ch->ch_state = VMBUS_CHANSTATE_CLOSING; 1391 rv = vmbus_cmd(sc, &cmd, sizeof(cmd), NULL, 0, 1392 HCF_NOREPLY | (cold ? HCF_NOSLEEP : HCF_SLEEPOK)); 1393 if (rv) { 1394 DPRINTF("%s: CHCLOSE failed with %d\n", 1395 device_xname(sc->sc_dev), rv); 1396 return rv; 1397 } 1398 ch->ch_state = VMBUS_CHANSTATE_CLOSED; 1399 vmbus_channel_ring_destroy(ch); 1400 return 0; 1401 } 1402 1403 int 1404 vmbus_channel_close_direct(struct vmbus_channel *ch) 1405 { 1406 int rv; 1407 1408 rv = vmbus_channel_close_internal(ch); 1409 if (!VMBUS_CHAN_ISPRIMARY(ch)) 1410 vmbus_channel_detach(ch); 1411 return rv; 1412 } 1413 1414 int 1415 vmbus_channel_close(struct vmbus_channel *ch) 1416 { 1417 struct vmbus_channel **subch; 1418 int i, cnt, rv; 1419 1420 if (!VMBUS_CHAN_ISPRIMARY(ch)) 1421 return 0; 1422 1423 cnt = ch->ch_subchannel_count; 1424 if (cnt > 0) { 1425 subch = vmbus_subchannel_get(ch, cnt); 1426 for (i = 0; i < ch->ch_subchannel_count; i++) { 1427 rv = vmbus_channel_close_internal(subch[i]); 1428 (void) rv; /* XXX */ 1429 vmbus_channel_detach(ch); 1430 } 1431 vmbus_subchannel_put(subch, cnt); 1432 } 1433 1434 return vmbus_channel_close_internal(ch); 1435 } 1436 1437 static inline void 1438 vmbus_channel_setevent(struct vmbus_softc *sc, struct vmbus_channel *ch) 1439 { 1440 struct vmbus_mon_trig *mtg; 1441 1442 /* Each uint32_t represents 32 channels */ 1443 set_bit(ch->ch_id, sc->sc_wevents); 1444 if (ch->ch_flags & CHF_MONITOR) { 1445 mtg = &sc->sc_monitor[1]->mnf_trigs[ch->ch_mgroup]; 1446 set_bit(ch->ch_mindex, &mtg->mt_pending); 1447 } else 1448 vmbus_intr_signal(sc, hyperv_dma_get_paddr(&ch->ch_monprm_dma)); 1449 } 1450 1451 static void 1452 vmbus_channel_intr(void *arg) 1453 { 1454 struct vmbus_channel *ch = arg; 1455 1456 if (vmbus_channel_ready(ch)) 1457 ch->ch_handler(ch->ch_ctx); 1458 1459 if (vmbus_channel_unpause(ch) == 0) 1460 return; 1461 1462 vmbus_channel_pause(ch); 1463 vmbus_channel_schedule(ch); 1464 } 1465 1466 int 1467 vmbus_channel_setdeferred(struct vmbus_channel *ch, const char *name) 1468 { 1469 1470 ch->ch_taskq = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE, 1471 vmbus_channel_intr, ch); 1472 if (ch->ch_taskq == NULL) 1473 return -1; 1474 return 0; 1475 } 1476 1477 void 1478 vmbus_channel_schedule(struct vmbus_channel *ch) 1479 { 1480 1481 if (ch->ch_handler) { 1482 if (!cold && (ch->ch_flags & CHF_BATCHED)) { 1483 vmbus_channel_pause(ch); 1484 softint_schedule(ch->ch_taskq); 1485 } else 1486 ch->ch_handler(ch->ch_ctx); 1487 } 1488 } 1489 1490 static __inline void 1491 vmbus_ring_put(struct vmbus_ring_data *wrd, uint8_t *data, uint32_t datalen) 1492 { 1493 int left = MIN(datalen, wrd->rd_dsize - wrd->rd_prod); 1494 1495 memcpy(&wrd->rd_ring->br_data[wrd->rd_prod], data, left); 1496 memcpy(&wrd->rd_ring->br_data[0], data + left, datalen - left); 1497 wrd->rd_prod += datalen; 1498 if (wrd->rd_prod >= wrd->rd_dsize) 1499 wrd->rd_prod -= wrd->rd_dsize; 1500 } 1501 1502 static inline void 1503 vmbus_ring_get(struct vmbus_ring_data *rrd, uint8_t *data, uint32_t datalen, 1504 int peek) 1505 { 1506 int left = MIN(datalen, rrd->rd_dsize - rrd->rd_cons); 1507 1508 memcpy(data, &rrd->rd_ring->br_data[rrd->rd_cons], left); 1509 memcpy(data + left, &rrd->rd_ring->br_data[0], datalen - left); 1510 if (!peek) { 1511 rrd->rd_cons += datalen; 1512 if (rrd->rd_cons >= rrd->rd_dsize) 1513 rrd->rd_cons -= rrd->rd_dsize; 1514 } 1515 } 1516 1517 static __inline void 1518 vmbus_ring_avail(struct vmbus_ring_data *rd, uint32_t *towrite, 1519 uint32_t *toread) 1520 { 1521 uint32_t ridx = rd->rd_ring->br_rindex; 1522 uint32_t widx = rd->rd_ring->br_windex; 1523 uint32_t r, w; 1524 1525 if (widx >= ridx) 1526 w = rd->rd_dsize - (widx - ridx); 1527 else 1528 w = ridx - widx; 1529 r = rd->rd_dsize - w; 1530 if (towrite) 1531 *towrite = w; 1532 if (toread) 1533 *toread = r; 1534 } 1535 1536 static int 1537 vmbus_ring_write(struct vmbus_ring_data *wrd, struct iovec *iov, int iov_cnt, 1538 int *needsig) 1539 { 1540 uint64_t indices = 0; 1541 uint32_t avail, oprod, datalen = sizeof(indices); 1542 int i; 1543 1544 for (i = 0; i < iov_cnt; i++) 1545 datalen += iov[i].iov_len; 1546 1547 KASSERT(datalen <= wrd->rd_dsize); 1548 1549 vmbus_ring_avail(wrd, &avail, NULL); 1550 if (avail <= datalen) { 1551 DPRINTF("%s: avail %u datalen %u\n", __func__, avail, datalen); 1552 return EAGAIN; 1553 } 1554 1555 oprod = wrd->rd_prod; 1556 1557 for (i = 0; i < iov_cnt; i++) 1558 vmbus_ring_put(wrd, iov[i].iov_base, iov[i].iov_len); 1559 1560 indices = (uint64_t)oprod << 32; 1561 vmbus_ring_put(wrd, (uint8_t *)&indices, sizeof(indices)); 1562 1563 membar_sync(); 1564 wrd->rd_ring->br_windex = wrd->rd_prod; 1565 membar_sync(); 1566 1567 /* Signal when the ring transitions from being empty to non-empty */ 1568 if (wrd->rd_ring->br_imask == 0 && 1569 wrd->rd_ring->br_rindex == oprod) 1570 *needsig = 1; 1571 else 1572 *needsig = 0; 1573 1574 return 0; 1575 } 1576 1577 int 1578 vmbus_channel_send(struct vmbus_channel *ch, void *data, uint32_t datalen, 1579 uint64_t rid, int type, uint32_t flags) 1580 { 1581 struct vmbus_softc *sc = ch->ch_sc; 1582 struct vmbus_chanpkt cp; 1583 struct iovec iov[3]; 1584 uint32_t pktlen, pktlen_aligned; 1585 uint64_t zeropad = 0; 1586 int rv, needsig = 0; 1587 1588 pktlen = sizeof(cp) + datalen; 1589 pktlen_aligned = roundup(pktlen, sizeof(uint64_t)); 1590 1591 cp.cp_hdr.cph_type = type; 1592 cp.cp_hdr.cph_flags = flags; 1593 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_hlen, sizeof(cp)); 1594 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_tlen, pktlen_aligned); 1595 cp.cp_hdr.cph_tid = rid; 1596 1597 iov[0].iov_base = &cp; 1598 iov[0].iov_len = sizeof(cp); 1599 1600 iov[1].iov_base = data; 1601 iov[1].iov_len = datalen; 1602 1603 iov[2].iov_base = &zeropad; 1604 iov[2].iov_len = pktlen_aligned - pktlen; 1605 1606 mutex_enter(&ch->ch_wrd.rd_lock); 1607 rv = vmbus_ring_write(&ch->ch_wrd, iov, 3, &needsig); 1608 mutex_exit(&ch->ch_wrd.rd_lock); 1609 if (rv == 0 && needsig) 1610 vmbus_channel_setevent(sc, ch); 1611 1612 return rv; 1613 } 1614 1615 int 1616 vmbus_channel_send_sgl(struct vmbus_channel *ch, struct vmbus_gpa *sgl, 1617 uint32_t nsge, void *data, uint32_t datalen, uint64_t rid) 1618 { 1619 struct vmbus_softc *sc = ch->ch_sc; 1620 struct vmbus_chanpkt_sglist cp; 1621 struct iovec iov[4]; 1622 uint32_t buflen, pktlen, pktlen_aligned; 1623 uint64_t zeropad = 0; 1624 int rv, needsig = 0; 1625 1626 buflen = sizeof(struct vmbus_gpa) * nsge; 1627 pktlen = sizeof(cp) + datalen + buflen; 1628 pktlen_aligned = roundup(pktlen, sizeof(uint64_t)); 1629 1630 cp.cp_hdr.cph_type = VMBUS_CHANPKT_TYPE_GPA; 1631 cp.cp_hdr.cph_flags = VMBUS_CHANPKT_FLAG_RC; 1632 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_hlen, sizeof(cp) + buflen); 1633 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_tlen, pktlen_aligned); 1634 cp.cp_hdr.cph_tid = rid; 1635 cp.cp_gpa_cnt = nsge; 1636 cp.cp_rsvd = 0; 1637 1638 iov[0].iov_base = &cp; 1639 iov[0].iov_len = sizeof(cp); 1640 1641 iov[1].iov_base = sgl; 1642 iov[1].iov_len = buflen; 1643 1644 iov[2].iov_base = data; 1645 iov[2].iov_len = datalen; 1646 1647 iov[3].iov_base = &zeropad; 1648 iov[3].iov_len = pktlen_aligned - pktlen; 1649 1650 mutex_enter(&ch->ch_wrd.rd_lock); 1651 rv = vmbus_ring_write(&ch->ch_wrd, iov, 4, &needsig); 1652 mutex_exit(&ch->ch_wrd.rd_lock); 1653 if (rv == 0 && needsig) 1654 vmbus_channel_setevent(sc, ch); 1655 1656 return rv; 1657 } 1658 1659 int 1660 vmbus_channel_send_prpl(struct vmbus_channel *ch, struct vmbus_gpa_range *prpl, 1661 uint32_t nprp, void *data, uint32_t datalen, uint64_t rid) 1662 { 1663 struct vmbus_softc *sc = ch->ch_sc; 1664 struct vmbus_chanpkt_prplist cp; 1665 struct iovec iov[4]; 1666 uint32_t buflen, pktlen, pktlen_aligned; 1667 uint64_t zeropad = 0; 1668 int rv, needsig = 0; 1669 1670 buflen = sizeof(struct vmbus_gpa_range) * (nprp + 1); 1671 pktlen = sizeof(cp) + datalen + buflen; 1672 pktlen_aligned = roundup(pktlen, sizeof(uint64_t)); 1673 1674 cp.cp_hdr.cph_type = VMBUS_CHANPKT_TYPE_GPA; 1675 cp.cp_hdr.cph_flags = VMBUS_CHANPKT_FLAG_RC; 1676 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_hlen, sizeof(cp) + buflen); 1677 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_tlen, pktlen_aligned); 1678 cp.cp_hdr.cph_tid = rid; 1679 cp.cp_range_cnt = 1; 1680 cp.cp_rsvd = 0; 1681 1682 iov[0].iov_base = &cp; 1683 iov[0].iov_len = sizeof(cp); 1684 1685 iov[1].iov_base = prpl; 1686 iov[1].iov_len = buflen; 1687 1688 iov[2].iov_base = data; 1689 iov[2].iov_len = datalen; 1690 1691 iov[3].iov_base = &zeropad; 1692 iov[3].iov_len = pktlen_aligned - pktlen; 1693 1694 mutex_enter(&ch->ch_wrd.rd_lock); 1695 rv = vmbus_ring_write(&ch->ch_wrd, iov, 4, &needsig); 1696 mutex_exit(&ch->ch_wrd.rd_lock); 1697 if (rv == 0 && needsig) 1698 vmbus_channel_setevent(sc, ch); 1699 1700 return rv; 1701 } 1702 1703 static int 1704 vmbus_ring_peek(struct vmbus_ring_data *rrd, void *data, uint32_t datalen) 1705 { 1706 uint32_t avail; 1707 1708 KASSERT(datalen <= rrd->rd_dsize); 1709 1710 vmbus_ring_avail(rrd, NULL, &avail); 1711 if (avail < datalen) 1712 return EAGAIN; 1713 1714 vmbus_ring_get(rrd, (uint8_t *)data, datalen, 1); 1715 return 0; 1716 } 1717 1718 static int 1719 vmbus_ring_read(struct vmbus_ring_data *rrd, void *data, uint32_t datalen, 1720 uint32_t offset) 1721 { 1722 uint64_t indices; 1723 uint32_t avail; 1724 1725 KASSERT(datalen <= rrd->rd_dsize); 1726 1727 vmbus_ring_avail(rrd, NULL, &avail); 1728 if (avail < datalen) { 1729 DPRINTF("%s: avail %u datalen %u\n", __func__, avail, datalen); 1730 return EAGAIN; 1731 } 1732 1733 if (offset) { 1734 rrd->rd_cons += offset; 1735 if (rrd->rd_cons >= rrd->rd_dsize) 1736 rrd->rd_cons -= rrd->rd_dsize; 1737 } 1738 1739 vmbus_ring_get(rrd, (uint8_t *)data, datalen, 0); 1740 vmbus_ring_get(rrd, (uint8_t *)&indices, sizeof(indices), 0); 1741 1742 membar_sync(); 1743 rrd->rd_ring->br_rindex = rrd->rd_cons; 1744 1745 return 0; 1746 } 1747 1748 int 1749 vmbus_channel_recv(struct vmbus_channel *ch, void *data, uint32_t datalen, 1750 uint32_t *rlen, uint64_t *rid, int raw) 1751 { 1752 struct vmbus_softc *sc = ch->ch_sc; 1753 struct vmbus_chanpkt_hdr cph; 1754 uint32_t offset, pktlen; 1755 int rv; 1756 1757 *rlen = 0; 1758 1759 mutex_enter(&ch->ch_rrd.rd_lock); 1760 1761 if ((rv = vmbus_ring_peek(&ch->ch_rrd, &cph, sizeof(cph))) != 0) { 1762 mutex_exit(&ch->ch_rrd.rd_lock); 1763 return rv; 1764 } 1765 1766 offset = raw ? 0 : VMBUS_CHANPKT_GETLEN(cph.cph_hlen); 1767 pktlen = VMBUS_CHANPKT_GETLEN(cph.cph_tlen) - offset; 1768 if (pktlen > datalen) { 1769 mutex_exit(&ch->ch_rrd.rd_lock); 1770 device_printf(sc->sc_dev, "%s: pktlen %u datalen %u\n", 1771 __func__, pktlen, datalen); 1772 return EINVAL; 1773 } 1774 1775 rv = vmbus_ring_read(&ch->ch_rrd, data, pktlen, offset); 1776 if (rv == 0) { 1777 *rlen = pktlen; 1778 *rid = cph.cph_tid; 1779 } 1780 1781 mutex_exit(&ch->ch_rrd.rd_lock); 1782 1783 return rv; 1784 } 1785 1786 static inline void 1787 vmbus_ring_mask(struct vmbus_ring_data *rd) 1788 { 1789 1790 membar_sync(); 1791 rd->rd_ring->br_imask = 1; 1792 membar_sync(); 1793 } 1794 1795 static inline void 1796 vmbus_ring_unmask(struct vmbus_ring_data *rd) 1797 { 1798 1799 membar_sync(); 1800 rd->rd_ring->br_imask = 0; 1801 membar_sync(); 1802 } 1803 1804 static void 1805 vmbus_channel_pause(struct vmbus_channel *ch) 1806 { 1807 1808 vmbus_ring_mask(&ch->ch_rrd); 1809 } 1810 1811 static uint32_t 1812 vmbus_channel_unpause(struct vmbus_channel *ch) 1813 { 1814 uint32_t avail; 1815 1816 vmbus_ring_unmask(&ch->ch_rrd); 1817 vmbus_ring_avail(&ch->ch_rrd, NULL, &avail); 1818 1819 return avail; 1820 } 1821 1822 static uint32_t 1823 vmbus_channel_ready(struct vmbus_channel *ch) 1824 { 1825 uint32_t avail; 1826 1827 vmbus_ring_avail(&ch->ch_rrd, NULL, &avail); 1828 1829 return avail; 1830 } 1831 1832 /* How many PFNs can be referenced by the header */ 1833 #define VMBUS_NPFNHDR ((VMBUS_MSG_DSIZE_MAX - \ 1834 sizeof(struct vmbus_chanmsg_gpadl_conn)) / sizeof(uint64_t)) 1835 1836 /* How many PFNs can be referenced by the body */ 1837 #define VMBUS_NPFNBODY ((VMBUS_MSG_DSIZE_MAX - \ 1838 sizeof(struct vmbus_chanmsg_gpadl_subconn)) / sizeof(uint64_t)) 1839 1840 int 1841 vmbus_handle_alloc(struct vmbus_channel *ch, const struct hyperv_dma *dma, 1842 uint32_t buflen, uint32_t *handle) 1843 { 1844 const int prflags = cold ? PR_NOWAIT : PR_WAITOK; 1845 const int kmemflags = cold ? KM_NOSLEEP : KM_SLEEP; 1846 const int msgflags = cold ? MSGF_NOSLEEP : 0; 1847 const int hcflags = cold ? HCF_NOSLEEP : HCF_SLEEPOK; 1848 struct vmbus_softc *sc = ch->ch_sc; 1849 struct vmbus_chanmsg_gpadl_conn *hdr; 1850 struct vmbus_chanmsg_gpadl_subconn *cmd; 1851 struct vmbus_chanmsg_gpadl_connresp rsp; 1852 struct vmbus_msg *msg; 1853 int i, j, last, left, rv; 1854 int bodylen = 0, ncmds = 0, pfn = 0; 1855 uint64_t *frames; 1856 paddr_t pa; 1857 uint8_t *body; 1858 /* Total number of pages to reference */ 1859 int total = atop(buflen); 1860 /* Number of pages that will fit the header */ 1861 int inhdr = MIN(total, VMBUS_NPFNHDR); 1862 1863 KASSERT((buflen & PAGE_MASK) == 0); 1864 KASSERT(buflen == (uint32_t)dma->map->dm_mapsize); 1865 1866 msg = pool_cache_get_paddr(sc->sc_msgpool, prflags, &pa); 1867 if (msg == NULL) 1868 return ENOMEM; 1869 1870 /* Prepare array of frame addresses */ 1871 frames = kmem_zalloc(total * sizeof(*frames), kmemflags); 1872 if (frames == NULL) { 1873 pool_cache_put_paddr(sc->sc_msgpool, msg, pa); 1874 return ENOMEM; 1875 } 1876 for (i = 0, j = 0; i < dma->map->dm_nsegs && j < total; i++) { 1877 bus_dma_segment_t *seg = &dma->map->dm_segs[i]; 1878 bus_addr_t addr = seg->ds_addr; 1879 1880 KASSERT((addr & PAGE_MASK) == 0); 1881 KASSERT((seg->ds_len & PAGE_MASK) == 0); 1882 1883 while (addr < seg->ds_addr + seg->ds_len && j < total) { 1884 frames[j++] = atop(addr); 1885 addr += PAGE_SIZE; 1886 } 1887 } 1888 1889 memset(msg, 0, sizeof(*msg)); 1890 msg->msg_req.hc_dsize = sizeof(struct vmbus_chanmsg_gpadl_conn) + 1891 inhdr * sizeof(uint64_t); 1892 hdr = (struct vmbus_chanmsg_gpadl_conn *)msg->msg_req.hc_data; 1893 msg->msg_rsp = &rsp; 1894 msg->msg_rsplen = sizeof(rsp); 1895 msg->msg_flags = msgflags; 1896 1897 left = total - inhdr; 1898 1899 /* Allocate additional gpadl_body structures if required */ 1900 if (left > 0) { 1901 ncmds = MAX(1, left / VMBUS_NPFNBODY + left % VMBUS_NPFNBODY); 1902 bodylen = ncmds * VMBUS_MSG_DSIZE_MAX; 1903 body = kmem_zalloc(bodylen, kmemflags); 1904 if (body == NULL) { 1905 kmem_free(frames, total * sizeof(*frames)); 1906 pool_cache_put_paddr(sc->sc_msgpool, msg, pa); 1907 return ENOMEM; 1908 } 1909 } 1910 1911 *handle = atomic_add_int_nv(&sc->sc_handle, 1); 1912 1913 hdr->chm_hdr.chm_type = VMBUS_CHANMSG_GPADL_CONN; 1914 hdr->chm_chanid = ch->ch_id; 1915 hdr->chm_gpadl = *handle; 1916 1917 /* Single range for a contiguous buffer */ 1918 hdr->chm_range_cnt = 1; 1919 hdr->chm_range_len = sizeof(struct vmbus_gpa_range) + total * 1920 sizeof(uint64_t); 1921 hdr->chm_range.gpa_ofs = 0; 1922 hdr->chm_range.gpa_len = buflen; 1923 1924 /* Fit as many pages as possible into the header */ 1925 for (i = 0; i < inhdr; i++) 1926 hdr->chm_range.gpa_page[i] = frames[pfn++]; 1927 1928 for (i = 0; i < ncmds; i++) { 1929 cmd = (struct vmbus_chanmsg_gpadl_subconn *)(body + 1930 VMBUS_MSG_DSIZE_MAX * i); 1931 cmd->chm_hdr.chm_type = VMBUS_CHANMSG_GPADL_SUBCONN; 1932 cmd->chm_gpadl = *handle; 1933 last = MIN(left, VMBUS_NPFNBODY); 1934 for (j = 0; j < last; j++) 1935 cmd->chm_gpa_page[j] = frames[pfn++]; 1936 left -= last; 1937 } 1938 1939 rv = vmbus_start(sc, msg, pa); 1940 if (rv != 0) { 1941 DPRINTF("%s: GPADL_CONN failed\n", device_xname(sc->sc_dev)); 1942 goto out; 1943 } 1944 for (i = 0; i < ncmds; i++) { 1945 int cmdlen = sizeof(*cmd); 1946 cmd = (struct vmbus_chanmsg_gpadl_subconn *)(body + 1947 VMBUS_MSG_DSIZE_MAX * i); 1948 /* Last element can be short */ 1949 if (i == ncmds - 1) 1950 cmdlen += last * sizeof(uint64_t); 1951 else 1952 cmdlen += VMBUS_NPFNBODY * sizeof(uint64_t); 1953 rv = vmbus_cmd(sc, cmd, cmdlen, NULL, 0, HCF_NOREPLY | hcflags); 1954 if (rv != 0) { 1955 DPRINTF("%s: GPADL_SUBCONN (iteration %d/%d) failed " 1956 "with %d\n", device_xname(sc->sc_dev), i, ncmds, 1957 rv); 1958 goto out; 1959 } 1960 } 1961 rv = vmbus_reply(sc, msg); 1962 if (rv != 0) { 1963 DPRINTF("%s: GPADL allocation failed with %d\n", 1964 device_xname(sc->sc_dev), rv); 1965 } 1966 1967 out: 1968 if (bodylen > 0) 1969 kmem_free(body, bodylen); 1970 kmem_free(frames, total * sizeof(*frames)); 1971 pool_cache_put_paddr(sc->sc_msgpool, msg, pa); 1972 if (rv) 1973 return rv; 1974 1975 KASSERT(*handle == rsp.chm_gpadl); 1976 1977 return 0; 1978 } 1979 1980 void 1981 vmbus_handle_free(struct vmbus_channel *ch, uint32_t handle) 1982 { 1983 struct vmbus_softc *sc = ch->ch_sc; 1984 struct vmbus_chanmsg_gpadl_disconn cmd; 1985 struct vmbus_chanmsg_gpadl_disconn rsp; 1986 int rv; 1987 1988 memset(&cmd, 0, sizeof(cmd)); 1989 cmd.chm_hdr.chm_type = VMBUS_CHANMSG_GPADL_DISCONN; 1990 cmd.chm_chanid = ch->ch_id; 1991 cmd.chm_gpadl = handle; 1992 1993 rv = vmbus_cmd(sc, &cmd, sizeof(cmd), &rsp, sizeof(rsp), 1994 cold ? HCF_NOSLEEP : HCF_SLEEPOK); 1995 if (rv) { 1996 DPRINTF("%s: GPADL_DISCONN failed with %d\n", 1997 device_xname(sc->sc_dev), rv); 1998 } 1999 } 2000 2001 static int 2002 vmbus_attach_print(void *aux, const char *name) 2003 { 2004 struct vmbus_attach_args *aa = aux; 2005 2006 if (name) 2007 printf("\"%s\" at %s", aa->aa_ident, name); 2008 2009 return UNCONF; 2010 } 2011 2012 static int 2013 vmbus_attach_icdevs(struct vmbus_softc *sc) 2014 { 2015 struct vmbus_dev *dv; 2016 struct vmbus_channel *ch; 2017 2018 SLIST_INIT(&sc->sc_icdevs); 2019 mutex_init(&sc->sc_icdev_lock, MUTEX_DEFAULT, IPL_NET); 2020 2021 TAILQ_FOREACH(ch, &sc->sc_channels, ch_entry) { 2022 if (ch->ch_state != VMBUS_CHANSTATE_OFFERED) 2023 continue; 2024 if (ch->ch_flags & CHF_MONITOR) 2025 continue; 2026 2027 dv = kmem_zalloc(sizeof(*dv), cold ? KM_NOSLEEP : KM_SLEEP); 2028 if (dv == NULL) { 2029 device_printf(sc->sc_dev, 2030 "failed to allocate ic device object\n"); 2031 return ENOMEM; 2032 } 2033 dv->dv_aa.aa_type = &ch->ch_type; 2034 dv->dv_aa.aa_inst = &ch->ch_inst; 2035 dv->dv_aa.aa_ident = ch->ch_ident; 2036 dv->dv_aa.aa_chan = ch; 2037 mutex_enter(&sc->sc_icdev_lock); 2038 SLIST_INSERT_HEAD(&sc->sc_icdevs, dv, dv_entry); 2039 mutex_exit(&sc->sc_icdev_lock); 2040 ch->ch_dev = config_found_ia(sc->sc_dev, "hypervvmbus", 2041 &dv->dv_aa, vmbus_attach_print); 2042 } 2043 return 0; 2044 } 2045 2046 static int 2047 vmbus_attach_devices(struct vmbus_softc *sc) 2048 { 2049 struct vmbus_dev *dv; 2050 struct vmbus_channel *ch; 2051 2052 SLIST_INIT(&sc->sc_devs); 2053 mutex_init(&sc->sc_dev_lock, MUTEX_DEFAULT, IPL_NET); 2054 2055 TAILQ_FOREACH(ch, &sc->sc_channels, ch_entry) { 2056 if (ch->ch_state != VMBUS_CHANSTATE_OFFERED) 2057 continue; 2058 if (!(ch->ch_flags & CHF_MONITOR)) 2059 continue; 2060 2061 dv = kmem_zalloc(sizeof(*dv), cold ? KM_NOSLEEP : KM_SLEEP); 2062 if (dv == NULL) { 2063 device_printf(sc->sc_dev, 2064 "failed to allocate device object\n"); 2065 return ENOMEM; 2066 } 2067 dv->dv_aa.aa_type = &ch->ch_type; 2068 dv->dv_aa.aa_inst = &ch->ch_inst; 2069 dv->dv_aa.aa_ident = ch->ch_ident; 2070 dv->dv_aa.aa_chan = ch; 2071 mutex_enter(&sc->sc_dev_lock); 2072 SLIST_INSERT_HEAD(&sc->sc_devs, dv, dv_entry); 2073 mutex_exit(&sc->sc_dev_lock); 2074 ch->ch_dev = config_found_ia(sc->sc_dev, "hypervvmbus", 2075 &dv->dv_aa, vmbus_attach_print); 2076 } 2077 return 0; 2078 } 2079 2080 MODULE(MODULE_CLASS_DRIVER, vmbus, "hyperv"); 2081 2082 #ifdef _MODULE 2083 #include "ioconf.c" 2084 #endif 2085 2086 static int 2087 vmbus_modcmd(modcmd_t cmd, void *aux) 2088 { 2089 int rv = 0; 2090 2091 switch (cmd) { 2092 case MODULE_CMD_INIT: 2093 #ifdef _MODULE 2094 rv = config_init_component(cfdriver_ioconf_vmbus, 2095 cfattach_ioconf_vmbus, cfdata_ioconf_vmbus); 2096 #endif 2097 break; 2098 2099 case MODULE_CMD_FINI: 2100 #ifdef _MODULE 2101 rv = config_fini_component(cfdriver_ioconf_vmbus, 2102 cfattach_ioconf_vmbus, cfdata_ioconf_vmbus); 2103 #endif 2104 break; 2105 2106 default: 2107 rv = ENOTTY; 2108 break; 2109 } 2110 2111 return rv; 2112 } 2113