1 /* $NetBSD: vmbus.c,v 1.4 2019/07/09 10:07:11 nakayama Exp $ */ 2 /* $OpenBSD: hyperv.c,v 1.43 2017/06/27 13:56:15 mikeb Exp $ */ 3 4 /*- 5 * Copyright (c) 2009-2012 Microsoft Corp. 6 * Copyright (c) 2012 NetApp Inc. 7 * Copyright (c) 2012 Citrix Inc. 8 * Copyright (c) 2016 Mike Belopuhov <mike@esdenera.com> 9 * All rights reserved. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice unmodified, this list of conditions, and the following 16 * disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * The OpenBSD port was done under funding by Esdenera Networks GmbH. 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: vmbus.c,v 1.4 2019/07/09 10:07:11 nakayama Exp $"); 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/device.h> 43 #include <sys/atomic.h> 44 #include <sys/bitops.h> 45 #include <sys/bus.h> 46 #include <sys/cpu.h> 47 #include <sys/intr.h> 48 #include <sys/kmem.h> 49 #include <sys/module.h> 50 #include <sys/mutex.h> 51 #include <sys/xcall.h> 52 53 #include <uvm/uvm_extern.h> 54 55 #include <dev/hyperv/vmbusvar.h> 56 57 #define VMBUS_GPADL_START 0xffff /* 0x10000 effectively */ 58 59 /* Command submission flags */ 60 #define HCF_SLEEPOK 0x0000 61 #define HCF_NOSLEEP 0x0002 /* M_NOWAIT */ 62 #define HCF_NOREPLY 0x0004 63 64 static void vmbus_attach_deferred(device_t); 65 static int vmbus_alloc_dma(struct vmbus_softc *); 66 static void vmbus_free_dma(struct vmbus_softc *); 67 static int vmbus_init_interrupts(struct vmbus_softc *); 68 static void vmbus_deinit_interrupts(struct vmbus_softc *); 69 static void vmbus_init_synic(void *, void *); 70 static void vmbus_deinit_synic(void *, void *); 71 72 static int vmbus_connect(struct vmbus_softc *); 73 static int vmbus_cmd(struct vmbus_softc *, void *, size_t, void *, size_t, 74 int); 75 static int vmbus_start(struct vmbus_softc *, struct vmbus_msg *, paddr_t); 76 static int vmbus_reply(struct vmbus_softc *, struct vmbus_msg *); 77 static void vmbus_wait(struct vmbus_softc *, 78 int (*done)(struct vmbus_softc *, struct vmbus_msg *), 79 struct vmbus_msg *, void *, const char *); 80 static uint16_t vmbus_intr_signal(struct vmbus_softc *, paddr_t); 81 static void vmbus_event_proc(void *, struct cpu_info *); 82 static void vmbus_event_proc_compat(void *, struct cpu_info *); 83 static void vmbus_message_proc(void *, struct cpu_info *); 84 static void vmbus_message_softintr(void *); 85 static void vmbus_channel_response(struct vmbus_softc *, 86 struct vmbus_chanmsg_hdr *); 87 static void vmbus_channel_offer(struct vmbus_softc *, 88 struct vmbus_chanmsg_hdr *); 89 static void vmbus_channel_rescind(struct vmbus_softc *, 90 struct vmbus_chanmsg_hdr *); 91 static void vmbus_channel_delivered(struct vmbus_softc *, 92 struct vmbus_chanmsg_hdr *); 93 static int vmbus_channel_scan(struct vmbus_softc *); 94 static void vmbus_channel_cpu_default(struct vmbus_channel *); 95 static void vmbus_process_offer(struct vmbus_softc *, struct vmbus_offer *); 96 static struct vmbus_channel * 97 vmbus_channel_lookup(struct vmbus_softc *, uint32_t); 98 static int vmbus_channel_ring_create(struct vmbus_channel *, uint32_t); 99 static void vmbus_channel_ring_destroy(struct vmbus_channel *); 100 static void vmbus_channel_pause(struct vmbus_channel *); 101 static uint32_t vmbus_channel_unpause(struct vmbus_channel *); 102 static uint32_t vmbus_channel_ready(struct vmbus_channel *); 103 static int vmbus_attach_icdevs(struct vmbus_softc *); 104 static int vmbus_attach_devices(struct vmbus_softc *); 105 106 static struct vmbus_softc *vmbus_sc; 107 108 static const struct { 109 int hmd_response; 110 int hmd_request; 111 void (*hmd_handler)(struct vmbus_softc *, 112 struct vmbus_chanmsg_hdr *); 113 } vmbus_msg_dispatch[] = { 114 { 0, 0, NULL }, 115 { VMBUS_CHANMSG_CHOFFER, 0, vmbus_channel_offer }, 116 { VMBUS_CHANMSG_CHRESCIND, 0, vmbus_channel_rescind }, 117 { VMBUS_CHANMSG_CHREQUEST, VMBUS_CHANMSG_CHOFFER, NULL }, 118 { VMBUS_CHANMSG_CHOFFER_DONE, 0, vmbus_channel_delivered }, 119 { VMBUS_CHANMSG_CHOPEN, 0, NULL }, 120 { VMBUS_CHANMSG_CHOPEN_RESP, VMBUS_CHANMSG_CHOPEN, 121 vmbus_channel_response }, 122 { VMBUS_CHANMSG_CHCLOSE, 0, NULL }, 123 { VMBUS_CHANMSG_GPADL_CONN, 0, NULL }, 124 { VMBUS_CHANMSG_GPADL_SUBCONN, 0, NULL }, 125 { VMBUS_CHANMSG_GPADL_CONNRESP, VMBUS_CHANMSG_GPADL_CONN, 126 vmbus_channel_response }, 127 { VMBUS_CHANMSG_GPADL_DISCONN, 0, NULL }, 128 { VMBUS_CHANMSG_GPADL_DISCONNRESP, VMBUS_CHANMSG_GPADL_DISCONN, 129 vmbus_channel_response }, 130 { VMBUS_CHANMSG_CHFREE, 0, NULL }, 131 { VMBUS_CHANMSG_CONNECT, 0, NULL }, 132 { VMBUS_CHANMSG_CONNECT_RESP, VMBUS_CHANMSG_CONNECT, 133 vmbus_channel_response }, 134 { VMBUS_CHANMSG_DISCONNECT, 0, NULL }, 135 }; 136 137 const struct hyperv_guid hyperv_guid_network = { 138 { 0x63, 0x51, 0x61, 0xf8, 0x3e, 0xdf, 0xc5, 0x46, 139 0x91, 0x3f, 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e } 140 }; 141 142 const struct hyperv_guid hyperv_guid_ide = { 143 { 0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44, 144 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5 } 145 }; 146 147 const struct hyperv_guid hyperv_guid_scsi = { 148 { 0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d, 149 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f } 150 }; 151 152 const struct hyperv_guid hyperv_guid_shutdown = { 153 { 0x31, 0x60, 0x0b, 0x0e, 0x13, 0x52, 0x34, 0x49, 154 0x81, 0x8b, 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb } 155 }; 156 157 const struct hyperv_guid hyperv_guid_timesync = { 158 { 0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49, 159 0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf } 160 }; 161 162 const struct hyperv_guid hyperv_guid_heartbeat = { 163 { 0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e, 164 0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d } 165 }; 166 167 const struct hyperv_guid hyperv_guid_kvp = { 168 { 0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d, 169 0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6 } 170 }; 171 172 const struct hyperv_guid hyperv_guid_vss = { 173 { 0x29, 0x2e, 0xfa, 0x35, 0x23, 0xea, 0x36, 0x42, 174 0x96, 0xae, 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40 } 175 }; 176 177 const struct hyperv_guid hyperv_guid_dynmem = { 178 { 0xdc, 0x74, 0x50, 0x52, 0x85, 0x89, 0xe2, 0x46, 179 0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02 } 180 }; 181 182 const struct hyperv_guid hyperv_guid_mouse = { 183 { 0x9e, 0xb6, 0xa8, 0xcf, 0x4a, 0x5b, 0xc0, 0x4c, 184 0xb9, 0x8b, 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a } 185 }; 186 187 const struct hyperv_guid hyperv_guid_kbd = { 188 { 0x6d, 0xad, 0x12, 0xf9, 0x17, 0x2b, 0xea, 0x48, 189 0xbd, 0x65, 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84 } 190 }; 191 192 const struct hyperv_guid hyperv_guid_video = { 193 { 0x02, 0x78, 0x0a, 0xda, 0x77, 0xe3, 0xac, 0x4a, 194 0x8e, 0x77, 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8 } 195 }; 196 197 const struct hyperv_guid hyperv_guid_fc = { 198 { 0x4a, 0xcc, 0x9b, 0x2f, 0x69, 0x00, 0xf3, 0x4a, 199 0xb7, 0x6b, 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda } 200 }; 201 202 const struct hyperv_guid hyperv_guid_fcopy = { 203 { 0xe3, 0x4b, 0xd1, 0x34, 0xe4, 0xde, 0xc8, 0x41, 204 0x9a, 0xe7, 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92 } 205 }; 206 207 const struct hyperv_guid hyperv_guid_pcie = { 208 { 0x1d, 0xf6, 0xc4, 0x44, 0x44, 0x44, 0x00, 0x44, 209 0x9d, 0x52, 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f } 210 }; 211 212 const struct hyperv_guid hyperv_guid_netdir = { 213 { 0x3d, 0xaf, 0x2e, 0x8c, 0xa7, 0x32, 0x09, 0x4b, 214 0xab, 0x99, 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01 } 215 }; 216 217 const struct hyperv_guid hyperv_guid_rdesktop = { 218 { 0xf4, 0xac, 0x6a, 0x27, 0x15, 0xac, 0x6c, 0x42, 219 0x98, 0xdd, 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe } 220 }; 221 222 /* Automatic Virtual Machine Activation (AVMA) Services */ 223 const struct hyperv_guid hyperv_guid_avma1 = { 224 { 0x55, 0xb2, 0x87, 0x44, 0x8c, 0xb8, 0x3f, 0x40, 225 0xbb, 0x51, 0xd1, 0xf6, 0x9c, 0xf1, 0x7f, 0x87 } 226 }; 227 228 const struct hyperv_guid hyperv_guid_avma2 = { 229 { 0xf4, 0xba, 0x75, 0x33, 0x15, 0x9e, 0x30, 0x4b, 230 0xb7, 0x65, 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b } 231 }; 232 233 const struct hyperv_guid hyperv_guid_avma3 = { 234 { 0xa0, 0x1f, 0x22, 0x99, 0xad, 0x24, 0xe2, 0x11, 235 0xbe, 0x98, 0x00, 0x1a, 0xa0, 0x1b, 0xbf, 0x6e } 236 }; 237 238 const struct hyperv_guid hyperv_guid_avma4 = { 239 { 0x16, 0x57, 0xe6, 0xf8, 0xb3, 0x3c, 0x06, 0x4a, 240 0x9a, 0x60, 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5 } 241 }; 242 243 int 244 vmbus_match(device_t parent, cfdata_t cf, void *aux) 245 { 246 247 if (cf->cf_unit != 0 || 248 !hyperv_hypercall_enabled() || 249 !hyperv_synic_supported()) 250 return 0; 251 252 return 1; 253 } 254 255 int 256 vmbus_attach(struct vmbus_softc *sc) 257 { 258 259 aprint_naive("\n"); 260 aprint_normal(": Hyper-V VMBus\n"); 261 262 vmbus_sc = sc; 263 264 sc->sc_msgpool = pool_cache_init(sizeof(struct vmbus_msg), 8, 0, 0, 265 "hvmsg", NULL, IPL_NET, NULL, NULL, NULL); 266 hyperv_set_message_proc(vmbus_message_proc, sc); 267 268 if (vmbus_alloc_dma(sc)) 269 goto cleanup; 270 271 if (vmbus_init_interrupts(sc)) 272 goto cleanup; 273 274 if (vmbus_connect(sc)) 275 goto cleanup; 276 277 aprint_normal_dev(sc->sc_dev, "protocol %d.%d\n", 278 VMBUS_VERSION_MAJOR(sc->sc_proto), 279 VMBUS_VERSION_MINOR(sc->sc_proto)); 280 281 if (sc->sc_proto == VMBUS_VERSION_WS2008 || 282 sc->sc_proto == VMBUS_VERSION_WIN7) { 283 hyperv_set_event_proc(vmbus_event_proc_compat, sc); 284 sc->sc_channel_max = VMBUS_CHAN_MAX_COMPAT; 285 } else { 286 hyperv_set_event_proc(vmbus_event_proc, sc); 287 sc->sc_channel_max = VMBUS_CHAN_MAX; 288 } 289 290 if (vmbus_channel_scan(sc)) 291 goto cleanup; 292 293 /* Attach heartbeat, KVP and other "internal" services */ 294 vmbus_attach_icdevs(sc); 295 296 /* Attach devices with external drivers */ 297 vmbus_attach_devices(sc); 298 299 config_interrupts(sc->sc_dev, vmbus_attach_deferred); 300 301 return 0; 302 303 cleanup: 304 vmbus_deinit_interrupts(sc); 305 vmbus_free_dma(sc); 306 return -1; 307 } 308 309 static void 310 vmbus_attach_deferred(device_t self) 311 { 312 struct vmbus_softc *sc = device_private(self); 313 314 xc_wait(xc_broadcast(0, vmbus_init_synic, sc, NULL)); 315 } 316 317 int 318 vmbus_detach(struct vmbus_softc *sc, int flags) 319 { 320 321 vmbus_deinit_interrupts(sc); 322 vmbus_free_dma(sc); 323 324 return 0; 325 } 326 327 static int 328 vmbus_alloc_dma(struct vmbus_softc *sc) 329 { 330 CPU_INFO_ITERATOR cii; 331 struct cpu_info *ci; 332 struct vmbus_percpu_data *pd; 333 int i; 334 335 /* 336 * Per-CPU messages and event flags. 337 */ 338 for (CPU_INFO_FOREACH(cii, ci)) { 339 pd = &sc->sc_percpu[cpu_index(ci)]; 340 341 pd->simp = hyperv_dma_alloc(sc->sc_dmat, &pd->simp_dma, 342 PAGE_SIZE, PAGE_SIZE, 0, 1); 343 if (pd->simp == NULL) 344 return ENOMEM; 345 346 pd->siep = hyperv_dma_alloc(sc->sc_dmat, &pd->siep_dma, 347 PAGE_SIZE, PAGE_SIZE, 0, 1); 348 if (pd->siep == NULL) 349 return ENOMEM; 350 } 351 352 sc->sc_events = hyperv_dma_alloc(sc->sc_dmat, &sc->sc_events_dma, 353 PAGE_SIZE, PAGE_SIZE, 0, 1); 354 if (sc->sc_events == NULL) 355 return ENOMEM; 356 sc->sc_wevents = (u_long *)sc->sc_events; 357 sc->sc_revents = (u_long *)((uint8_t *)sc->sc_events + (PAGE_SIZE / 2)); 358 359 for (i = 0; i < __arraycount(sc->sc_monitor); i++) { 360 sc->sc_monitor[i] = hyperv_dma_alloc(sc->sc_dmat, 361 &sc->sc_monitor_dma[i], PAGE_SIZE, PAGE_SIZE, 0, 1); 362 if (sc->sc_monitor[i] == NULL) 363 return ENOMEM; 364 } 365 366 return 0; 367 } 368 369 static void 370 vmbus_free_dma(struct vmbus_softc *sc) 371 { 372 CPU_INFO_ITERATOR cii; 373 struct cpu_info *ci; 374 int i; 375 376 if (sc->sc_events != NULL) { 377 sc->sc_events = sc->sc_wevents = sc->sc_revents = NULL; 378 hyperv_dma_free(sc->sc_dmat, &sc->sc_events_dma); 379 } 380 381 for (i = 0; i < __arraycount(sc->sc_monitor); i++) { 382 sc->sc_monitor[i] = NULL; 383 hyperv_dma_free(sc->sc_dmat, &sc->sc_monitor_dma[i]); 384 } 385 386 for (CPU_INFO_FOREACH(cii, ci)) { 387 struct vmbus_percpu_data *pd = &sc->sc_percpu[cpu_index(ci)]; 388 389 if (pd->simp != NULL) { 390 pd->simp = NULL; 391 hyperv_dma_free(sc->sc_dmat, &pd->simp_dma); 392 } 393 if (pd->siep != NULL) { 394 pd->siep = NULL; 395 hyperv_dma_free(sc->sc_dmat, &pd->siep_dma); 396 } 397 } 398 } 399 400 static int 401 vmbus_init_interrupts(struct vmbus_softc *sc) 402 { 403 404 TAILQ_INIT(&sc->sc_reqs); 405 mutex_init(&sc->sc_req_lock, MUTEX_DEFAULT, IPL_NET); 406 407 TAILQ_INIT(&sc->sc_rsps); 408 mutex_init(&sc->sc_rsp_lock, MUTEX_DEFAULT, IPL_NET); 409 410 sc->sc_proto = VMBUS_VERSION_WS2008; 411 412 /* XXX event_tq */ 413 414 sc->sc_msg_sih = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE, 415 vmbus_message_softintr, sc); 416 if (sc->sc_msg_sih == NULL) 417 return -1; 418 419 vmbus_init_interrupts_md(sc); 420 421 kcpuset_create(&sc->sc_intr_cpuset, true); 422 if (cold) { 423 /* Initialize other CPUs later. */ 424 vmbus_init_synic(sc, NULL); 425 } else 426 xc_wait(xc_broadcast(0, vmbus_init_synic, sc, NULL)); 427 atomic_or_32(&sc->sc_flags, VMBUS_SCFLAG_SYNIC); 428 429 return 0; 430 } 431 432 static void 433 vmbus_deinit_interrupts(struct vmbus_softc *sc) 434 { 435 436 if (ISSET(sc->sc_flags, VMBUS_SCFLAG_SYNIC)) { 437 if (cold) 438 vmbus_deinit_synic(sc, NULL); 439 else 440 xc_wait(xc_broadcast(0, vmbus_deinit_synic, sc, NULL)); 441 atomic_and_32(&sc->sc_flags, (uint32_t)~VMBUS_SCFLAG_SYNIC); 442 } 443 444 /* XXX event_tq */ 445 446 if (sc->sc_msg_sih != NULL) { 447 softint_disestablish(sc->sc_msg_sih); 448 sc->sc_msg_sih = NULL; 449 } 450 451 vmbus_deinit_interrupts_md(sc); 452 } 453 454 static void 455 vmbus_init_synic(void *arg1, void *arg2) 456 { 457 struct vmbus_softc *sc = arg1; 458 cpuid_t cpu; 459 int s; 460 461 s = splhigh(); 462 463 cpu = cpu_index(curcpu()); 464 if (!kcpuset_isset(sc->sc_intr_cpuset, cpu)) { 465 kcpuset_atomic_set(sc->sc_intr_cpuset, cpu); 466 vmbus_init_synic_md(sc, cpu); 467 } 468 469 splx(s); 470 } 471 472 static void 473 vmbus_deinit_synic(void *arg1, void *arg2) 474 { 475 struct vmbus_softc *sc = arg1; 476 cpuid_t cpu; 477 int s; 478 479 s = splhigh(); 480 481 cpu = cpu_index(curcpu()); 482 if (kcpuset_isset(sc->sc_intr_cpuset, cpu)) { 483 vmbus_deinit_synic_md(sc, cpu); 484 kcpuset_atomic_clear(sc->sc_intr_cpuset, cpu); 485 } 486 487 splx(s); 488 } 489 490 static int 491 vmbus_connect(struct vmbus_softc *sc) 492 { 493 static const uint32_t versions[] = { 494 VMBUS_VERSION_WIN8_1, 495 VMBUS_VERSION_WIN8, 496 VMBUS_VERSION_WIN7, 497 VMBUS_VERSION_WS2008 498 }; 499 struct vmbus_chanmsg_connect cmd; 500 struct vmbus_chanmsg_connect_resp rsp; 501 int i, rv; 502 503 memset(&cmd, 0, sizeof(cmd)); 504 cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CONNECT; 505 cmd.chm_evtflags = hyperv_dma_get_paddr(&sc->sc_events_dma); 506 cmd.chm_mnf1 = hyperv_dma_get_paddr(&sc->sc_monitor_dma[0]); 507 cmd.chm_mnf2 = hyperv_dma_get_paddr(&sc->sc_monitor_dma[1]); 508 509 memset(&rsp, 0, sizeof(rsp)); 510 511 for (i = 0; i < __arraycount(versions); i++) { 512 cmd.chm_ver = versions[i]; 513 rv = vmbus_cmd(sc, &cmd, sizeof(cmd), &rsp, sizeof(rsp), 514 cold ? HCF_NOSLEEP : HCF_SLEEPOK); 515 if (rv) { 516 DPRINTF("%s: CONNECT failed\n", 517 device_xname(sc->sc_dev)); 518 return rv; 519 } 520 if (rsp.chm_done) { 521 atomic_or_32(&sc->sc_flags, VMBUS_SCFLAG_CONNECTED); 522 sc->sc_proto = versions[i]; 523 sc->sc_handle = VMBUS_GPADL_START; 524 break; 525 } 526 } 527 if (i == __arraycount(versions)) { 528 device_printf(sc->sc_dev, 529 "failed to negotiate protocol version\n"); 530 return ENXIO; 531 } 532 533 return 0; 534 } 535 536 static int 537 vmbus_cmd(struct vmbus_softc *sc, void *cmd, size_t cmdlen, void *rsp, 538 size_t rsplen, int flags) 539 { 540 const int prflags = cold ? PR_NOWAIT : PR_WAITOK; 541 struct vmbus_msg *msg; 542 paddr_t pa; 543 int rv; 544 545 if (cmdlen > VMBUS_MSG_DSIZE_MAX) { 546 device_printf(sc->sc_dev, "payload too large (%zu)\n", 547 cmdlen); 548 return EMSGSIZE; 549 } 550 551 msg = pool_cache_get_paddr(sc->sc_msgpool, prflags, &pa); 552 if (msg == NULL) { 553 device_printf(sc->sc_dev, "couldn't get msgpool\n"); 554 return ENOMEM; 555 } 556 memset(msg, 0, sizeof(*msg)); 557 msg->msg_req.hc_dsize = cmdlen; 558 memcpy(msg->msg_req.hc_data, cmd, cmdlen); 559 560 if (!(flags & HCF_NOREPLY)) { 561 msg->msg_rsp = rsp; 562 msg->msg_rsplen = rsplen; 563 } else 564 msg->msg_flags |= MSGF_NOQUEUE; 565 566 if (flags & HCF_NOSLEEP) 567 msg->msg_flags |= MSGF_NOSLEEP; 568 569 rv = vmbus_start(sc, msg, pa); 570 if (rv == 0) 571 rv = vmbus_reply(sc, msg); 572 pool_cache_put_paddr(sc->sc_msgpool, msg, pa); 573 return rv; 574 } 575 576 static int 577 vmbus_start(struct vmbus_softc *sc, struct vmbus_msg *msg, paddr_t msg_pa) 578 { 579 static const int delays[] = { 580 100, 100, 100, 500, 500, 5000, 5000, 5000 581 }; 582 const char *wchan = "hvstart"; 583 uint16_t status; 584 int i, s; 585 586 msg->msg_req.hc_connid = VMBUS_CONNID_MESSAGE; 587 msg->msg_req.hc_msgtype = 1; 588 589 if (!(msg->msg_flags & MSGF_NOQUEUE)) { 590 mutex_enter(&sc->sc_req_lock); 591 TAILQ_INSERT_TAIL(&sc->sc_reqs, msg, msg_entry); 592 mutex_exit(&sc->sc_req_lock); 593 } 594 595 for (i = 0; i < __arraycount(delays); i++) { 596 status = hyperv_hypercall_post_message( 597 msg_pa + offsetof(struct vmbus_msg, msg_req)); 598 if (status == HYPERCALL_STATUS_SUCCESS) 599 break; 600 601 if (msg->msg_flags & MSGF_NOSLEEP) { 602 delay(delays[i]); 603 s = splnet(); 604 hyperv_intr(); 605 splx(s); 606 } else 607 tsleep(wchan, PRIBIO, wchan, 1); 608 } 609 if (status != HYPERCALL_STATUS_SUCCESS) { 610 device_printf(sc->sc_dev, 611 "posting vmbus message failed with %d\n", status); 612 if (!(msg->msg_flags & MSGF_NOQUEUE)) { 613 mutex_enter(&sc->sc_req_lock); 614 TAILQ_REMOVE(&sc->sc_reqs, msg, msg_entry); 615 mutex_exit(&sc->sc_req_lock); 616 } 617 return EIO; 618 } 619 620 return 0; 621 } 622 623 static int 624 vmbus_reply_done(struct vmbus_softc *sc, struct vmbus_msg *msg) 625 { 626 struct vmbus_msg *m; 627 628 mutex_enter(&sc->sc_rsp_lock); 629 TAILQ_FOREACH(m, &sc->sc_rsps, msg_entry) { 630 if (m == msg) { 631 mutex_exit(&sc->sc_rsp_lock); 632 return 1; 633 } 634 } 635 mutex_exit(&sc->sc_rsp_lock); 636 return 0; 637 } 638 639 static int 640 vmbus_reply(struct vmbus_softc *sc, struct vmbus_msg *msg) 641 { 642 643 if (msg->msg_flags & MSGF_NOQUEUE) 644 return 0; 645 646 vmbus_wait(sc, vmbus_reply_done, msg, msg, "hvreply"); 647 648 mutex_enter(&sc->sc_rsp_lock); 649 TAILQ_REMOVE(&sc->sc_rsps, msg, msg_entry); 650 mutex_exit(&sc->sc_rsp_lock); 651 652 return 0; 653 } 654 655 static void 656 vmbus_wait(struct vmbus_softc *sc, 657 int (*cond)(struct vmbus_softc *, struct vmbus_msg *), 658 struct vmbus_msg *msg, void *wchan, const char *wmsg) 659 { 660 int s; 661 662 while (!cond(sc, msg)) { 663 if (msg->msg_flags & MSGF_NOSLEEP) { 664 delay(1000); 665 s = splnet(); 666 hyperv_intr(); 667 splx(s); 668 } else 669 tsleep(wchan, PRIBIO, wmsg ? wmsg : "hvwait", 1); 670 } 671 } 672 673 static uint16_t 674 vmbus_intr_signal(struct vmbus_softc *sc, paddr_t con_pa) 675 { 676 uint64_t status; 677 678 status = hyperv_hypercall_signal_event(con_pa); 679 return (uint16_t)status; 680 } 681 682 #if LONG_BIT == 64 683 #define ffsl(v) ffs64(v) 684 #elif LONG_BIT == 32 685 #define ffsl(v) ffs32(v) 686 #else 687 #error unsupport LONG_BIT 688 #endif /* LONG_BIT */ 689 690 static void 691 vmbus_event_flags_proc(struct vmbus_softc *sc, volatile u_long *revents, 692 int maxrow) 693 { 694 struct vmbus_channel *ch; 695 u_long pending; 696 uint32_t chanid, chanid_base; 697 int row, chanid_ofs; 698 699 for (row = 0; row < maxrow; row++) { 700 if (revents[row] == 0) 701 continue; 702 703 pending = atomic_swap_ulong(&revents[row], 0); 704 chanid_base = row * LONG_BIT; 705 706 while ((chanid_ofs = ffsl(pending)) != 0) { 707 chanid_ofs--; /* NOTE: ffs is 1-based */ 708 pending &= ~(1UL << chanid_ofs); 709 710 chanid = chanid_base + chanid_ofs; 711 /* vmbus channel protocol message */ 712 if (chanid == 0) 713 continue; 714 715 ch = vmbus_channel_lookup(sc, chanid); 716 if (ch == NULL) { 717 device_printf(sc->sc_dev, 718 "unhandled event on %d\n", chanid); 719 continue; 720 } 721 if (ch->ch_state != VMBUS_CHANSTATE_OPENED) { 722 device_printf(sc->sc_dev, 723 "channel %d is not active\n", chanid); 724 continue; 725 } 726 ch->ch_evcnt.ev_count++; 727 vmbus_channel_schedule(ch); 728 } 729 } 730 } 731 732 static void 733 vmbus_event_proc(void *arg, struct cpu_info *ci) 734 { 735 struct vmbus_softc *sc = arg; 736 struct vmbus_evtflags *evt; 737 738 /* 739 * On Host with Win8 or above, the event page can be 740 * checked directly to get the id of the channel 741 * that has the pending interrupt. 742 */ 743 evt = (struct vmbus_evtflags *)sc->sc_percpu[cpu_index(ci)].siep + 744 VMBUS_SINT_MESSAGE; 745 746 vmbus_event_flags_proc(sc, evt->evt_flags, 747 __arraycount(evt->evt_flags)); 748 } 749 750 static void 751 vmbus_event_proc_compat(void *arg, struct cpu_info *ci) 752 { 753 struct vmbus_softc *sc = arg; 754 struct vmbus_evtflags *evt; 755 756 evt = (struct vmbus_evtflags *)sc->sc_percpu[cpu_index(ci)].siep + 757 VMBUS_SINT_MESSAGE; 758 759 if (test_bit(0, &evt->evt_flags[0])) { 760 clear_bit(0, &evt->evt_flags[0]); 761 /* 762 * receive size is 1/2 page and divide that by 4 bytes 763 */ 764 vmbus_event_flags_proc(sc, sc->sc_revents, 765 VMBUS_CHAN_MAX_COMPAT / VMBUS_EVTFLAG_LEN); 766 } 767 } 768 769 static void 770 vmbus_message_proc(void *arg, struct cpu_info *ci) 771 { 772 struct vmbus_softc *sc = arg; 773 struct vmbus_message *msg; 774 775 msg = (struct vmbus_message *)sc->sc_percpu[cpu_index(ci)].simp + 776 VMBUS_SINT_MESSAGE; 777 if (__predict_false(msg->msg_type != HYPERV_MSGTYPE_NONE)) { 778 if (__predict_true(!cold)) 779 softint_schedule_cpu(sc->sc_msg_sih, ci); 780 else 781 vmbus_message_softintr(sc); 782 } 783 } 784 785 static void 786 vmbus_message_softintr(void *arg) 787 { 788 struct vmbus_softc *sc = arg; 789 struct vmbus_message *msg; 790 struct vmbus_chanmsg_hdr *hdr; 791 uint32_t type; 792 cpuid_t cpu; 793 794 cpu = cpu_index(curcpu()); 795 796 for (;;) { 797 msg = (struct vmbus_message *)sc->sc_percpu[cpu].simp + 798 VMBUS_SINT_MESSAGE; 799 if (msg->msg_type == HYPERV_MSGTYPE_NONE) 800 break; 801 802 hdr = (struct vmbus_chanmsg_hdr *)msg->msg_data; 803 type = hdr->chm_type; 804 if (type >= VMBUS_CHANMSG_COUNT) { 805 device_printf(sc->sc_dev, 806 "unhandled message type %u flags %#x\n", type, 807 msg->msg_flags); 808 } else { 809 if (vmbus_msg_dispatch[type].hmd_handler) { 810 vmbus_msg_dispatch[type].hmd_handler(sc, hdr); 811 } else { 812 device_printf(sc->sc_dev, 813 "unhandled message type %u\n", type); 814 } 815 } 816 817 msg->msg_type = HYPERV_MSGTYPE_NONE; 818 membar_sync(); 819 if (msg->msg_flags & VMBUS_MSGFLAG_PENDING) 820 hyperv_send_eom(); 821 } 822 } 823 824 static void 825 vmbus_channel_response(struct vmbus_softc *sc, struct vmbus_chanmsg_hdr *rsphdr) 826 { 827 struct vmbus_msg *msg; 828 struct vmbus_chanmsg_hdr *reqhdr; 829 int req; 830 831 req = vmbus_msg_dispatch[rsphdr->chm_type].hmd_request; 832 mutex_enter(&sc->sc_req_lock); 833 TAILQ_FOREACH(msg, &sc->sc_reqs, msg_entry) { 834 reqhdr = (struct vmbus_chanmsg_hdr *)&msg->msg_req.hc_data; 835 if (reqhdr->chm_type == req) { 836 TAILQ_REMOVE(&sc->sc_reqs, msg, msg_entry); 837 break; 838 } 839 } 840 mutex_exit(&sc->sc_req_lock); 841 if (msg != NULL) { 842 memcpy(msg->msg_rsp, rsphdr, msg->msg_rsplen); 843 mutex_enter(&sc->sc_rsp_lock); 844 TAILQ_INSERT_TAIL(&sc->sc_rsps, msg, msg_entry); 845 mutex_exit(&sc->sc_rsp_lock); 846 wakeup(msg); 847 } 848 } 849 850 static void 851 vmbus_channel_offer(struct vmbus_softc *sc, struct vmbus_chanmsg_hdr *hdr) 852 { 853 struct vmbus_offer *co; 854 855 co = kmem_intr_zalloc(sizeof(*co), KM_NOSLEEP); 856 if (co == NULL) { 857 device_printf(sc->sc_dev, "couldn't allocate offer\n"); 858 return; 859 } 860 861 memcpy(&co->co_chan, hdr, sizeof(co->co_chan)); 862 863 mutex_enter(&sc->sc_offer_lock); 864 SIMPLEQ_INSERT_TAIL(&sc->sc_offers, co, co_entry); 865 mutex_exit(&sc->sc_offer_lock); 866 } 867 868 static void 869 vmbus_channel_rescind(struct vmbus_softc *sc, struct vmbus_chanmsg_hdr *hdr) 870 { 871 const struct vmbus_chanmsg_chrescind *cmd; 872 873 cmd = (const struct vmbus_chanmsg_chrescind *)hdr; 874 device_printf(sc->sc_dev, "revoking channel %u\n", cmd->chm_chanid); 875 } 876 877 static void 878 vmbus_channel_delivered(struct vmbus_softc *sc, struct vmbus_chanmsg_hdr *hdr) 879 { 880 881 atomic_or_32(&sc->sc_flags, VMBUS_SCFLAG_OFFERS_DELIVERED); 882 wakeup(&sc->sc_offers); 883 } 884 885 static void 886 hyperv_guid_sprint(struct hyperv_guid *guid, char *str, size_t size) 887 { 888 static const struct { 889 const struct hyperv_guid *guid; 890 const char *ident; 891 } map[] = { 892 { &hyperv_guid_network, "network" }, 893 { &hyperv_guid_ide, "ide" }, 894 { &hyperv_guid_scsi, "scsi" }, 895 { &hyperv_guid_shutdown, "shutdown" }, 896 { &hyperv_guid_timesync, "timesync" }, 897 { &hyperv_guid_heartbeat, "heartbeat" }, 898 { &hyperv_guid_kvp, "kvp" }, 899 { &hyperv_guid_vss, "vss" }, 900 { &hyperv_guid_dynmem, "dynamic-memory" }, 901 { &hyperv_guid_mouse, "mouse" }, 902 { &hyperv_guid_kbd, "keyboard" }, 903 { &hyperv_guid_video, "video" }, 904 { &hyperv_guid_fc, "fiber-channel" }, 905 { &hyperv_guid_fcopy, "file-copy" }, 906 { &hyperv_guid_pcie, "pcie-passthrough" }, 907 { &hyperv_guid_netdir, "network-direct" }, 908 { &hyperv_guid_rdesktop, "remote-desktop" }, 909 { &hyperv_guid_avma1, "avma-1" }, 910 { &hyperv_guid_avma2, "avma-2" }, 911 { &hyperv_guid_avma3, "avma-3" }, 912 { &hyperv_guid_avma4, "avma-4" }, 913 }; 914 int i; 915 916 for (i = 0; i < __arraycount(map); i++) { 917 if (memcmp(guid, map[i].guid, sizeof(*guid)) == 0) { 918 strlcpy(str, map[i].ident, size); 919 return; 920 } 921 } 922 hyperv_guid2str(guid, str, size); 923 } 924 925 static int 926 vmbus_channel_scan_done(struct vmbus_softc *sc, struct vmbus_msg *msg __unused) 927 { 928 929 return ISSET(sc->sc_flags, VMBUS_SCFLAG_OFFERS_DELIVERED); 930 } 931 932 static int 933 vmbus_channel_scan(struct vmbus_softc *sc) 934 { 935 struct vmbus_chanmsg_hdr hdr; 936 struct vmbus_chanmsg_choffer rsp; 937 struct vmbus_offer *co; 938 939 SIMPLEQ_INIT(&sc->sc_offers); 940 mutex_init(&sc->sc_offer_lock, MUTEX_DEFAULT, IPL_NET); 941 942 memset(&hdr, 0, sizeof(hdr)); 943 hdr.chm_type = VMBUS_CHANMSG_CHREQUEST; 944 945 if (vmbus_cmd(sc, &hdr, sizeof(hdr), &rsp, sizeof(rsp), 946 HCF_NOREPLY | (cold ? HCF_NOSLEEP : HCF_SLEEPOK))) { 947 DPRINTF("%s: CHREQUEST failed\n", device_xname(sc->sc_dev)); 948 return -1; 949 } 950 951 vmbus_wait(sc, vmbus_channel_scan_done, (struct vmbus_msg *)&hdr, 952 &sc->sc_offers, "hvscan"); 953 954 TAILQ_INIT(&sc->sc_channels); 955 mutex_init(&sc->sc_channel_lock, MUTEX_DEFAULT, IPL_NET); 956 957 mutex_enter(&sc->sc_offer_lock); 958 while (!SIMPLEQ_EMPTY(&sc->sc_offers)) { 959 co = SIMPLEQ_FIRST(&sc->sc_offers); 960 SIMPLEQ_REMOVE_HEAD(&sc->sc_offers, co_entry); 961 mutex_exit(&sc->sc_offer_lock); 962 963 vmbus_process_offer(sc, co); 964 kmem_free(co, sizeof(*co)); 965 966 mutex_enter(&sc->sc_offer_lock); 967 } 968 mutex_exit(&sc->sc_offer_lock); 969 970 return 0; 971 } 972 973 static struct vmbus_channel * 974 vmbus_channel_alloc(struct vmbus_softc *sc) 975 { 976 struct vmbus_channel *ch; 977 978 ch = kmem_zalloc(sizeof(*ch), cold ? KM_NOSLEEP : KM_SLEEP); 979 980 ch->ch_monprm = hyperv_dma_alloc(sc->sc_dmat, &ch->ch_monprm_dma, 981 sizeof(*ch->ch_monprm), 8, 0, 1); 982 if (ch->ch_monprm == NULL) { 983 device_printf(sc->sc_dev, "monprm alloc failed\n"); 984 kmem_free(ch, sizeof(*ch)); 985 return NULL; 986 } 987 memset(ch->ch_monprm, 0, sizeof(*ch->ch_monprm)); 988 989 ch->ch_refs = 1; 990 ch->ch_sc = sc; 991 mutex_init(&ch->ch_subchannel_lock, MUTEX_DEFAULT, IPL_NET); 992 TAILQ_INIT(&ch->ch_subchannels); 993 994 ch->ch_state = VMBUS_CHANSTATE_CLOSED; 995 996 return ch; 997 } 998 999 static void 1000 vmbus_channel_free(struct vmbus_channel *ch) 1001 { 1002 struct vmbus_softc *sc = ch->ch_sc; 1003 1004 KASSERTMSG(TAILQ_EMPTY(&ch->ch_subchannels) && 1005 ch->ch_subchannel_count == 0, "still owns sub-channels"); 1006 KASSERTMSG(ch->ch_state == 0 || ch->ch_state == VMBUS_CHANSTATE_CLOSED, 1007 "free busy channel"); 1008 KASSERTMSG(ch->ch_refs == 0, "channel %u: invalid refcnt %d", 1009 ch->ch_id, ch->ch_refs); 1010 1011 hyperv_dma_free(sc->sc_dmat, &ch->ch_monprm_dma); 1012 mutex_destroy(&ch->ch_subchannel_lock); 1013 /* XXX ch_evcnt */ 1014 softint_disestablish(ch->ch_taskq); 1015 kmem_free(ch, sizeof(*ch)); 1016 } 1017 1018 static int 1019 vmbus_channel_add(struct vmbus_channel *nch) 1020 { 1021 struct vmbus_softc *sc = nch->ch_sc; 1022 struct vmbus_channel *ch; 1023 u_int refs __diagused; 1024 1025 if (nch->ch_id == 0) { 1026 device_printf(sc->sc_dev, "got channel 0 offer, discard\n"); 1027 return EINVAL; 1028 } else if (nch->ch_id >= sc->sc_channel_max) { 1029 device_printf(sc->sc_dev, "invalid channel %u offer\n", 1030 nch->ch_id); 1031 return EINVAL; 1032 } 1033 1034 mutex_enter(&sc->sc_channel_lock); 1035 TAILQ_FOREACH(ch, &sc->sc_channels, ch_entry) { 1036 if (!memcmp(&ch->ch_type, &nch->ch_type, sizeof(ch->ch_type)) && 1037 !memcmp(&ch->ch_inst, &nch->ch_inst, sizeof(ch->ch_inst))) 1038 break; 1039 } 1040 if (VMBUS_CHAN_ISPRIMARY(nch)) { 1041 if (ch == NULL) { 1042 TAILQ_INSERT_TAIL(&sc->sc_channels, nch, ch_entry); 1043 mutex_exit(&sc->sc_channel_lock); 1044 goto done; 1045 } else { 1046 mutex_exit(&sc->sc_channel_lock); 1047 device_printf(sc->sc_dev, 1048 "duplicated primary channel%u\n", nch->ch_id); 1049 return EINVAL; 1050 } 1051 } else { 1052 if (ch == NULL) { 1053 mutex_exit(&sc->sc_channel_lock); 1054 device_printf(sc->sc_dev, "no primary channel%u\n", 1055 nch->ch_id); 1056 return EINVAL; 1057 } 1058 } 1059 mutex_exit(&sc->sc_channel_lock); 1060 1061 KASSERT(!VMBUS_CHAN_ISPRIMARY(nch)); 1062 KASSERT(ch != NULL); 1063 1064 refs = atomic_add_int_nv(&nch->ch_refs, 1); 1065 KASSERT(refs == 1); 1066 1067 nch->ch_primary_channel = ch; 1068 nch->ch_dev = ch->ch_dev; 1069 1070 mutex_enter(&ch->ch_subchannel_lock); 1071 TAILQ_INSERT_TAIL(&ch->ch_subchannels, nch, ch_subentry); 1072 ch->ch_subchannel_count++; 1073 mutex_exit(&ch->ch_subchannel_lock); 1074 wakeup(ch); 1075 1076 done: 1077 vmbus_channel_cpu_default(nch); 1078 1079 return 0; 1080 } 1081 1082 void 1083 vmbus_channel_cpu_set(struct vmbus_channel *ch, int cpu) 1084 { 1085 struct vmbus_softc *sc = ch->ch_sc; 1086 1087 KASSERTMSG(cpu >= 0 && cpu < ncpu, "invalid cpu %d", cpu); 1088 1089 if (sc->sc_proto == VMBUS_VERSION_WS2008 || 1090 sc->sc_proto == VMBUS_VERSION_WIN7) { 1091 /* Only cpu0 is supported */ 1092 cpu = 0; 1093 } 1094 1095 ch->ch_cpuid = cpu; 1096 ch->ch_vcpu = sc->sc_percpu[cpu].vcpuid; 1097 } 1098 1099 void 1100 vmbus_channel_cpu_rr(struct vmbus_channel *ch) 1101 { 1102 static uint32_t vmbus_channel_nextcpu; 1103 int cpu; 1104 1105 cpu = atomic_add_32_nv(&vmbus_channel_nextcpu, 1) % ncpu; 1106 vmbus_channel_cpu_set(ch, cpu); 1107 } 1108 1109 static void 1110 vmbus_channel_cpu_default(struct vmbus_channel *ch) 1111 { 1112 1113 /* 1114 * By default, pin the channel to cpu0. Devices having 1115 * special channel-cpu mapping requirement should call 1116 * vmbus_channel_cpu_{set,rr}(). 1117 */ 1118 vmbus_channel_cpu_set(ch, 0); 1119 } 1120 1121 static void 1122 vmbus_process_offer(struct vmbus_softc *sc, struct vmbus_offer *co) 1123 { 1124 struct vmbus_channel *ch; 1125 1126 ch = vmbus_channel_alloc(sc); 1127 if (ch == NULL) { 1128 device_printf(sc->sc_dev, "allocate channel %u failed\n", 1129 co->co_chan.chm_chanid); 1130 return; 1131 } 1132 1133 /* 1134 * By default we setup state to enable batched reading. 1135 * A specific service can choose to disable this prior 1136 * to opening the channel. 1137 */ 1138 ch->ch_flags |= CHF_BATCHED; 1139 1140 hyperv_guid_sprint(&co->co_chan.chm_chtype, ch->ch_ident, 1141 sizeof(ch->ch_ident)); 1142 1143 ch->ch_monprm->mp_connid = VMBUS_CONNID_EVENT; 1144 if (sc->sc_proto > VMBUS_VERSION_WS2008) 1145 ch->ch_monprm->mp_connid = co->co_chan.chm_connid; 1146 1147 if (co->co_chan.chm_flags1 & VMBUS_CHOFFER_FLAG1_HASMNF) { 1148 ch->ch_mgroup = co->co_chan.chm_montrig / VMBUS_MONTRIG_LEN; 1149 ch->ch_mindex = co->co_chan.chm_montrig % VMBUS_MONTRIG_LEN; 1150 ch->ch_flags |= CHF_MONITOR; 1151 } 1152 1153 ch->ch_id = co->co_chan.chm_chanid; 1154 ch->ch_subidx = co->co_chan.chm_subidx; 1155 1156 memcpy(&ch->ch_type, &co->co_chan.chm_chtype, sizeof(ch->ch_type)); 1157 memcpy(&ch->ch_inst, &co->co_chan.chm_chinst, sizeof(ch->ch_inst)); 1158 1159 if (VMBUS_CHAN_ISPRIMARY(ch)) { 1160 /* set primary channel mgmt wq */ 1161 } else { 1162 /* set sub channel mgmt wq */ 1163 } 1164 1165 if (vmbus_channel_add(ch) != 0) { 1166 vmbus_channel_free(ch); 1167 return; 1168 } 1169 1170 ch->ch_state = VMBUS_CHANSTATE_OFFERED; 1171 1172 #ifdef HYPERV_DEBUG 1173 printf("%s: channel %u: \"%s\"", device_xname(sc->sc_dev), ch->ch_id, 1174 ch->ch_ident); 1175 if (ch->ch_flags & CHF_MONITOR) 1176 printf(", monitor %u\n", co->co_chan.chm_montrig); 1177 else 1178 printf("\n"); 1179 #endif 1180 } 1181 1182 static int 1183 vmbus_channel_release(struct vmbus_channel *ch) 1184 { 1185 struct vmbus_softc *sc = ch->ch_sc; 1186 struct vmbus_chanmsg_chfree cmd; 1187 int rv; 1188 1189 memset(&cmd, 0, sizeof(cmd)); 1190 cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CHFREE; 1191 cmd.chm_chanid = ch->ch_id; 1192 1193 rv = vmbus_cmd(sc, &cmd, sizeof(cmd), NULL, 0, 1194 HCF_NOREPLY | (cold ? HCF_NOSLEEP : HCF_SLEEPOK)); 1195 if (rv) { 1196 DPRINTF("%s: CHFREE failed with %d\n", device_xname(sc->sc_dev), 1197 rv); 1198 } 1199 return rv; 1200 } 1201 1202 struct vmbus_channel ** 1203 vmbus_subchannel_get(struct vmbus_channel *prich, int cnt) 1204 { 1205 struct vmbus_channel **ret, *ch; 1206 int i; 1207 1208 KASSERT(cnt > 0); 1209 1210 ret = kmem_alloc(sizeof(struct vmbus_channel *) * cnt, 1211 cold ? KM_NOSLEEP : KM_SLEEP); 1212 1213 mutex_enter(&prich->ch_subchannel_lock); 1214 1215 while (prich->ch_subchannel_count < cnt) 1216 /* XXX use condvar(9) instead of mtsleep */ 1217 mtsleep(prich, PRIBIO, "hvvmsubch", 0, 1218 &prich->ch_subchannel_lock); 1219 1220 i = 0; 1221 TAILQ_FOREACH(ch, &prich->ch_subchannels, ch_subentry) { 1222 ret[i] = ch; /* XXX inc refs */ 1223 1224 if (++i == cnt) 1225 break; 1226 } 1227 1228 mutex_exit(&prich->ch_subchannel_lock); 1229 1230 return ret; 1231 } 1232 1233 void 1234 vmbus_subchannel_put(struct vmbus_channel **subch, int cnt) 1235 { 1236 1237 kmem_free(subch, sizeof(struct vmbus_channel *) * cnt); 1238 } 1239 1240 static struct vmbus_channel * 1241 vmbus_channel_lookup(struct vmbus_softc *sc, uint32_t relid) 1242 { 1243 struct vmbus_channel *ch; 1244 1245 TAILQ_FOREACH(ch, &sc->sc_channels, ch_entry) { 1246 if (ch->ch_id == relid) 1247 return ch; 1248 } 1249 return NULL; 1250 } 1251 1252 static int 1253 vmbus_channel_ring_create(struct vmbus_channel *ch, uint32_t buflen) 1254 { 1255 struct vmbus_softc *sc = ch->ch_sc; 1256 1257 buflen = roundup(buflen, PAGE_SIZE) + sizeof(struct vmbus_bufring); 1258 ch->ch_ring_size = 2 * buflen; 1259 ch->ch_ring = hyperv_dma_alloc(sc->sc_dmat, &ch->ch_ring_dma, 1260 ch->ch_ring_size, PAGE_SIZE, 0, 1); /* page aligned memory */ 1261 if (ch->ch_ring == NULL) { 1262 device_printf(sc->sc_dev, 1263 "failed to allocate channel ring\n"); 1264 return ENOMEM; 1265 } 1266 memset(ch->ch_ring, 0, ch->ch_ring_size); 1267 1268 memset(&ch->ch_wrd, 0, sizeof(ch->ch_wrd)); 1269 ch->ch_wrd.rd_ring = (struct vmbus_bufring *)ch->ch_ring; 1270 ch->ch_wrd.rd_size = buflen; 1271 ch->ch_wrd.rd_dsize = buflen - sizeof(struct vmbus_bufring); 1272 mutex_init(&ch->ch_wrd.rd_lock, MUTEX_DEFAULT, IPL_NET); 1273 1274 memset(&ch->ch_rrd, 0, sizeof(ch->ch_rrd)); 1275 ch->ch_rrd.rd_ring = (struct vmbus_bufring *)((uint8_t *)ch->ch_ring + 1276 buflen); 1277 ch->ch_rrd.rd_size = buflen; 1278 ch->ch_rrd.rd_dsize = buflen - sizeof(struct vmbus_bufring); 1279 mutex_init(&ch->ch_rrd.rd_lock, MUTEX_DEFAULT, IPL_NET); 1280 1281 if (vmbus_handle_alloc(ch, &ch->ch_ring_dma, ch->ch_ring_size, 1282 &ch->ch_ring_gpadl)) { 1283 device_printf(sc->sc_dev, 1284 "failed to obtain a PA handle for the ring\n"); 1285 vmbus_channel_ring_destroy(ch); 1286 return ENOMEM; 1287 } 1288 1289 return 0; 1290 } 1291 1292 static void 1293 vmbus_channel_ring_destroy(struct vmbus_channel *ch) 1294 { 1295 struct vmbus_softc *sc = ch->ch_sc; 1296 1297 hyperv_dma_free(sc->sc_dmat, &ch->ch_ring_dma); 1298 ch->ch_ring = NULL; 1299 vmbus_handle_free(ch, ch->ch_ring_gpadl); 1300 1301 mutex_destroy(&ch->ch_wrd.rd_lock); 1302 memset(&ch->ch_wrd, 0, sizeof(ch->ch_wrd)); 1303 mutex_destroy(&ch->ch_rrd.rd_lock); 1304 memset(&ch->ch_rrd, 0, sizeof(ch->ch_rrd)); 1305 } 1306 1307 int 1308 vmbus_channel_open(struct vmbus_channel *ch, size_t buflen, void *udata, 1309 size_t udatalen, void (*handler)(void *), void *arg) 1310 { 1311 struct vmbus_softc *sc = ch->ch_sc; 1312 struct vmbus_chanmsg_chopen cmd; 1313 struct vmbus_chanmsg_chopen_resp rsp; 1314 int rv = EINVAL; 1315 1316 if (ch->ch_ring == NULL && 1317 (rv = vmbus_channel_ring_create(ch, buflen))) { 1318 DPRINTF("%s: failed to create channel ring\n", 1319 device_xname(sc->sc_dev)); 1320 return rv; 1321 } 1322 1323 memset(&cmd, 0, sizeof(cmd)); 1324 cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CHOPEN; 1325 cmd.chm_openid = ch->ch_id; 1326 cmd.chm_chanid = ch->ch_id; 1327 cmd.chm_gpadl = ch->ch_ring_gpadl; 1328 cmd.chm_txbr_pgcnt = atop(ch->ch_wrd.rd_size); 1329 cmd.chm_vcpuid = ch->ch_vcpu; 1330 if (udata && udatalen > 0) 1331 memcpy(cmd.chm_udata, udata, udatalen); 1332 1333 memset(&rsp, 0, sizeof(rsp)); 1334 1335 ch->ch_handler = handler; 1336 ch->ch_ctx = arg; 1337 ch->ch_state = VMBUS_CHANSTATE_OPENED; 1338 1339 rv = vmbus_cmd(sc, &cmd, sizeof(cmd), &rsp, sizeof(rsp), 1340 cold ? HCF_NOSLEEP : HCF_SLEEPOK); 1341 if (rv) { 1342 vmbus_channel_ring_destroy(ch); 1343 DPRINTF("%s: CHOPEN failed with %d\n", device_xname(sc->sc_dev), 1344 rv); 1345 ch->ch_handler = NULL; 1346 ch->ch_ctx = NULL; 1347 ch->ch_state = VMBUS_CHANSTATE_OFFERED; 1348 return rv; 1349 } 1350 return 0; 1351 } 1352 1353 static void 1354 vmbus_channel_detach(struct vmbus_channel *ch) 1355 { 1356 u_int refs; 1357 1358 refs = atomic_add_int_nv(&ch->ch_refs, -1); 1359 if (refs == 1) { 1360 /* XXX on workqueue? */ 1361 if (VMBUS_CHAN_ISPRIMARY(ch)) { 1362 vmbus_channel_release(ch); 1363 vmbus_channel_free(ch); 1364 } else { 1365 struct vmbus_channel *prich = ch->ch_primary_channel; 1366 1367 vmbus_channel_release(ch); 1368 1369 mutex_enter(&prich->ch_subchannel_lock); 1370 TAILQ_REMOVE(&prich->ch_subchannels, ch, ch_subentry); 1371 prich->ch_subchannel_count--; 1372 mutex_exit(&prich->ch_subchannel_lock); 1373 wakeup(prich); 1374 1375 vmbus_channel_free(ch); 1376 } 1377 } 1378 } 1379 1380 static int 1381 vmbus_channel_close_internal(struct vmbus_channel *ch) 1382 { 1383 struct vmbus_softc *sc = ch->ch_sc; 1384 struct vmbus_chanmsg_chclose cmd; 1385 int rv; 1386 1387 memset(&cmd, 0, sizeof(cmd)); 1388 cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CHCLOSE; 1389 cmd.chm_chanid = ch->ch_id; 1390 1391 ch->ch_state = VMBUS_CHANSTATE_CLOSING; 1392 rv = vmbus_cmd(sc, &cmd, sizeof(cmd), NULL, 0, 1393 HCF_NOREPLY | (cold ? HCF_NOSLEEP : HCF_SLEEPOK)); 1394 if (rv) { 1395 DPRINTF("%s: CHCLOSE failed with %d\n", 1396 device_xname(sc->sc_dev), rv); 1397 return rv; 1398 } 1399 ch->ch_state = VMBUS_CHANSTATE_CLOSED; 1400 vmbus_channel_ring_destroy(ch); 1401 return 0; 1402 } 1403 1404 int 1405 vmbus_channel_close_direct(struct vmbus_channel *ch) 1406 { 1407 int rv; 1408 1409 rv = vmbus_channel_close_internal(ch); 1410 if (!VMBUS_CHAN_ISPRIMARY(ch)) 1411 vmbus_channel_detach(ch); 1412 return rv; 1413 } 1414 1415 int 1416 vmbus_channel_close(struct vmbus_channel *ch) 1417 { 1418 struct vmbus_channel **subch; 1419 int i, cnt, rv; 1420 1421 if (!VMBUS_CHAN_ISPRIMARY(ch)) 1422 return 0; 1423 1424 cnt = ch->ch_subchannel_count; 1425 if (cnt > 0) { 1426 subch = vmbus_subchannel_get(ch, cnt); 1427 for (i = 0; i < ch->ch_subchannel_count; i++) { 1428 rv = vmbus_channel_close_internal(subch[i]); 1429 (void) rv; /* XXX */ 1430 vmbus_channel_detach(ch); 1431 } 1432 vmbus_subchannel_put(subch, cnt); 1433 } 1434 1435 return vmbus_channel_close_internal(ch); 1436 } 1437 1438 static inline void 1439 vmbus_channel_setevent(struct vmbus_softc *sc, struct vmbus_channel *ch) 1440 { 1441 struct vmbus_mon_trig *mtg; 1442 1443 /* Each uint32_t represents 32 channels */ 1444 set_bit(ch->ch_id, sc->sc_wevents); 1445 if (ch->ch_flags & CHF_MONITOR) { 1446 mtg = &sc->sc_monitor[1]->mnf_trigs[ch->ch_mgroup]; 1447 set_bit(ch->ch_mindex, &mtg->mt_pending); 1448 } else 1449 vmbus_intr_signal(sc, hyperv_dma_get_paddr(&ch->ch_monprm_dma)); 1450 } 1451 1452 static void 1453 vmbus_channel_intr(void *arg) 1454 { 1455 struct vmbus_channel *ch = arg; 1456 1457 if (vmbus_channel_ready(ch)) 1458 ch->ch_handler(ch->ch_ctx); 1459 1460 if (vmbus_channel_unpause(ch) == 0) 1461 return; 1462 1463 vmbus_channel_pause(ch); 1464 vmbus_channel_schedule(ch); 1465 } 1466 1467 int 1468 vmbus_channel_setdeferred(struct vmbus_channel *ch, const char *name) 1469 { 1470 1471 ch->ch_taskq = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE, 1472 vmbus_channel_intr, ch); 1473 if (ch->ch_taskq == NULL) 1474 return -1; 1475 return 0; 1476 } 1477 1478 void 1479 vmbus_channel_schedule(struct vmbus_channel *ch) 1480 { 1481 1482 if (ch->ch_handler) { 1483 if (!cold && (ch->ch_flags & CHF_BATCHED)) { 1484 vmbus_channel_pause(ch); 1485 softint_schedule(ch->ch_taskq); 1486 } else 1487 ch->ch_handler(ch->ch_ctx); 1488 } 1489 } 1490 1491 static __inline void 1492 vmbus_ring_put(struct vmbus_ring_data *wrd, uint8_t *data, uint32_t datalen) 1493 { 1494 int left = MIN(datalen, wrd->rd_dsize - wrd->rd_prod); 1495 1496 memcpy(&wrd->rd_ring->br_data[wrd->rd_prod], data, left); 1497 memcpy(&wrd->rd_ring->br_data[0], data + left, datalen - left); 1498 wrd->rd_prod += datalen; 1499 if (wrd->rd_prod >= wrd->rd_dsize) 1500 wrd->rd_prod -= wrd->rd_dsize; 1501 } 1502 1503 static inline void 1504 vmbus_ring_get(struct vmbus_ring_data *rrd, uint8_t *data, uint32_t datalen, 1505 int peek) 1506 { 1507 int left = MIN(datalen, rrd->rd_dsize - rrd->rd_cons); 1508 1509 memcpy(data, &rrd->rd_ring->br_data[rrd->rd_cons], left); 1510 memcpy(data + left, &rrd->rd_ring->br_data[0], datalen - left); 1511 if (!peek) { 1512 rrd->rd_cons += datalen; 1513 if (rrd->rd_cons >= rrd->rd_dsize) 1514 rrd->rd_cons -= rrd->rd_dsize; 1515 } 1516 } 1517 1518 static __inline void 1519 vmbus_ring_avail(struct vmbus_ring_data *rd, uint32_t *towrite, 1520 uint32_t *toread) 1521 { 1522 uint32_t ridx = rd->rd_ring->br_rindex; 1523 uint32_t widx = rd->rd_ring->br_windex; 1524 uint32_t r, w; 1525 1526 if (widx >= ridx) 1527 w = rd->rd_dsize - (widx - ridx); 1528 else 1529 w = ridx - widx; 1530 r = rd->rd_dsize - w; 1531 if (towrite) 1532 *towrite = w; 1533 if (toread) 1534 *toread = r; 1535 } 1536 1537 static int 1538 vmbus_ring_write(struct vmbus_ring_data *wrd, struct iovec *iov, int iov_cnt, 1539 int *needsig) 1540 { 1541 uint64_t indices = 0; 1542 uint32_t avail, oprod, datalen = sizeof(indices); 1543 int i; 1544 1545 for (i = 0; i < iov_cnt; i++) 1546 datalen += iov[i].iov_len; 1547 1548 KASSERT(datalen <= wrd->rd_dsize); 1549 1550 vmbus_ring_avail(wrd, &avail, NULL); 1551 if (avail <= datalen) { 1552 DPRINTF("%s: avail %u datalen %u\n", __func__, avail, datalen); 1553 return EAGAIN; 1554 } 1555 1556 oprod = wrd->rd_prod; 1557 1558 for (i = 0; i < iov_cnt; i++) 1559 vmbus_ring_put(wrd, iov[i].iov_base, iov[i].iov_len); 1560 1561 indices = (uint64_t)oprod << 32; 1562 vmbus_ring_put(wrd, (uint8_t *)&indices, sizeof(indices)); 1563 1564 membar_sync(); 1565 wrd->rd_ring->br_windex = wrd->rd_prod; 1566 membar_sync(); 1567 1568 /* Signal when the ring transitions from being empty to non-empty */ 1569 if (wrd->rd_ring->br_imask == 0 && 1570 wrd->rd_ring->br_rindex == oprod) 1571 *needsig = 1; 1572 else 1573 *needsig = 0; 1574 1575 return 0; 1576 } 1577 1578 int 1579 vmbus_channel_send(struct vmbus_channel *ch, void *data, uint32_t datalen, 1580 uint64_t rid, int type, uint32_t flags) 1581 { 1582 struct vmbus_softc *sc = ch->ch_sc; 1583 struct vmbus_chanpkt cp; 1584 struct iovec iov[3]; 1585 uint32_t pktlen, pktlen_aligned; 1586 uint64_t zeropad = 0; 1587 int rv, needsig = 0; 1588 1589 pktlen = sizeof(cp) + datalen; 1590 pktlen_aligned = roundup(pktlen, sizeof(uint64_t)); 1591 1592 cp.cp_hdr.cph_type = type; 1593 cp.cp_hdr.cph_flags = flags; 1594 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_hlen, sizeof(cp)); 1595 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_tlen, pktlen_aligned); 1596 cp.cp_hdr.cph_tid = rid; 1597 1598 iov[0].iov_base = &cp; 1599 iov[0].iov_len = sizeof(cp); 1600 1601 iov[1].iov_base = data; 1602 iov[1].iov_len = datalen; 1603 1604 iov[2].iov_base = &zeropad; 1605 iov[2].iov_len = pktlen_aligned - pktlen; 1606 1607 mutex_enter(&ch->ch_wrd.rd_lock); 1608 rv = vmbus_ring_write(&ch->ch_wrd, iov, 3, &needsig); 1609 mutex_exit(&ch->ch_wrd.rd_lock); 1610 if (rv == 0 && needsig) 1611 vmbus_channel_setevent(sc, ch); 1612 1613 return rv; 1614 } 1615 1616 int 1617 vmbus_channel_send_sgl(struct vmbus_channel *ch, struct vmbus_gpa *sgl, 1618 uint32_t nsge, void *data, uint32_t datalen, uint64_t rid) 1619 { 1620 struct vmbus_softc *sc = ch->ch_sc; 1621 struct vmbus_chanpkt_sglist cp; 1622 struct iovec iov[4]; 1623 uint32_t buflen, pktlen, pktlen_aligned; 1624 uint64_t zeropad = 0; 1625 int rv, needsig = 0; 1626 1627 buflen = sizeof(struct vmbus_gpa) * nsge; 1628 pktlen = sizeof(cp) + datalen + buflen; 1629 pktlen_aligned = roundup(pktlen, sizeof(uint64_t)); 1630 1631 cp.cp_hdr.cph_type = VMBUS_CHANPKT_TYPE_GPA; 1632 cp.cp_hdr.cph_flags = VMBUS_CHANPKT_FLAG_RC; 1633 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_hlen, sizeof(cp) + buflen); 1634 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_tlen, pktlen_aligned); 1635 cp.cp_hdr.cph_tid = rid; 1636 cp.cp_gpa_cnt = nsge; 1637 cp.cp_rsvd = 0; 1638 1639 iov[0].iov_base = &cp; 1640 iov[0].iov_len = sizeof(cp); 1641 1642 iov[1].iov_base = sgl; 1643 iov[1].iov_len = buflen; 1644 1645 iov[2].iov_base = data; 1646 iov[2].iov_len = datalen; 1647 1648 iov[3].iov_base = &zeropad; 1649 iov[3].iov_len = pktlen_aligned - pktlen; 1650 1651 mutex_enter(&ch->ch_wrd.rd_lock); 1652 rv = vmbus_ring_write(&ch->ch_wrd, iov, 4, &needsig); 1653 mutex_exit(&ch->ch_wrd.rd_lock); 1654 if (rv == 0 && needsig) 1655 vmbus_channel_setevent(sc, ch); 1656 1657 return rv; 1658 } 1659 1660 int 1661 vmbus_channel_send_prpl(struct vmbus_channel *ch, struct vmbus_gpa_range *prpl, 1662 uint32_t nprp, void *data, uint32_t datalen, uint64_t rid) 1663 { 1664 struct vmbus_softc *sc = ch->ch_sc; 1665 struct vmbus_chanpkt_prplist cp; 1666 struct iovec iov[4]; 1667 uint32_t buflen, pktlen, pktlen_aligned; 1668 uint64_t zeropad = 0; 1669 int rv, needsig = 0; 1670 1671 buflen = sizeof(struct vmbus_gpa_range) * (nprp + 1); 1672 pktlen = sizeof(cp) + datalen + buflen; 1673 pktlen_aligned = roundup(pktlen, sizeof(uint64_t)); 1674 1675 cp.cp_hdr.cph_type = VMBUS_CHANPKT_TYPE_GPA; 1676 cp.cp_hdr.cph_flags = VMBUS_CHANPKT_FLAG_RC; 1677 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_hlen, sizeof(cp) + buflen); 1678 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_tlen, pktlen_aligned); 1679 cp.cp_hdr.cph_tid = rid; 1680 cp.cp_range_cnt = 1; 1681 cp.cp_rsvd = 0; 1682 1683 iov[0].iov_base = &cp; 1684 iov[0].iov_len = sizeof(cp); 1685 1686 iov[1].iov_base = prpl; 1687 iov[1].iov_len = buflen; 1688 1689 iov[2].iov_base = data; 1690 iov[2].iov_len = datalen; 1691 1692 iov[3].iov_base = &zeropad; 1693 iov[3].iov_len = pktlen_aligned - pktlen; 1694 1695 mutex_enter(&ch->ch_wrd.rd_lock); 1696 rv = vmbus_ring_write(&ch->ch_wrd, iov, 4, &needsig); 1697 mutex_exit(&ch->ch_wrd.rd_lock); 1698 if (rv == 0 && needsig) 1699 vmbus_channel_setevent(sc, ch); 1700 1701 return rv; 1702 } 1703 1704 static int 1705 vmbus_ring_peek(struct vmbus_ring_data *rrd, void *data, uint32_t datalen) 1706 { 1707 uint32_t avail; 1708 1709 KASSERT(datalen <= rrd->rd_dsize); 1710 1711 vmbus_ring_avail(rrd, NULL, &avail); 1712 if (avail < datalen) 1713 return EAGAIN; 1714 1715 vmbus_ring_get(rrd, (uint8_t *)data, datalen, 1); 1716 return 0; 1717 } 1718 1719 static int 1720 vmbus_ring_read(struct vmbus_ring_data *rrd, void *data, uint32_t datalen, 1721 uint32_t offset) 1722 { 1723 uint64_t indices; 1724 uint32_t avail; 1725 1726 KASSERT(datalen <= rrd->rd_dsize); 1727 1728 vmbus_ring_avail(rrd, NULL, &avail); 1729 if (avail < datalen) { 1730 DPRINTF("%s: avail %u datalen %u\n", __func__, avail, datalen); 1731 return EAGAIN; 1732 } 1733 1734 if (offset) { 1735 rrd->rd_cons += offset; 1736 if (rrd->rd_cons >= rrd->rd_dsize) 1737 rrd->rd_cons -= rrd->rd_dsize; 1738 } 1739 1740 vmbus_ring_get(rrd, (uint8_t *)data, datalen, 0); 1741 vmbus_ring_get(rrd, (uint8_t *)&indices, sizeof(indices), 0); 1742 1743 membar_sync(); 1744 rrd->rd_ring->br_rindex = rrd->rd_cons; 1745 1746 return 0; 1747 } 1748 1749 int 1750 vmbus_channel_recv(struct vmbus_channel *ch, void *data, uint32_t datalen, 1751 uint32_t *rlen, uint64_t *rid, int raw) 1752 { 1753 struct vmbus_softc *sc = ch->ch_sc; 1754 struct vmbus_chanpkt_hdr cph; 1755 uint32_t offset, pktlen; 1756 int rv; 1757 1758 *rlen = 0; 1759 1760 mutex_enter(&ch->ch_rrd.rd_lock); 1761 1762 if ((rv = vmbus_ring_peek(&ch->ch_rrd, &cph, sizeof(cph))) != 0) { 1763 mutex_exit(&ch->ch_rrd.rd_lock); 1764 return rv; 1765 } 1766 1767 offset = raw ? 0 : VMBUS_CHANPKT_GETLEN(cph.cph_hlen); 1768 pktlen = VMBUS_CHANPKT_GETLEN(cph.cph_tlen) - offset; 1769 if (pktlen > datalen) { 1770 mutex_exit(&ch->ch_rrd.rd_lock); 1771 device_printf(sc->sc_dev, "%s: pktlen %u datalen %u\n", 1772 __func__, pktlen, datalen); 1773 return EINVAL; 1774 } 1775 1776 rv = vmbus_ring_read(&ch->ch_rrd, data, pktlen, offset); 1777 if (rv == 0) { 1778 *rlen = pktlen; 1779 *rid = cph.cph_tid; 1780 } 1781 1782 mutex_exit(&ch->ch_rrd.rd_lock); 1783 1784 return rv; 1785 } 1786 1787 static inline void 1788 vmbus_ring_mask(struct vmbus_ring_data *rd) 1789 { 1790 1791 membar_sync(); 1792 rd->rd_ring->br_imask = 1; 1793 membar_sync(); 1794 } 1795 1796 static inline void 1797 vmbus_ring_unmask(struct vmbus_ring_data *rd) 1798 { 1799 1800 membar_sync(); 1801 rd->rd_ring->br_imask = 0; 1802 membar_sync(); 1803 } 1804 1805 static void 1806 vmbus_channel_pause(struct vmbus_channel *ch) 1807 { 1808 1809 vmbus_ring_mask(&ch->ch_rrd); 1810 } 1811 1812 static uint32_t 1813 vmbus_channel_unpause(struct vmbus_channel *ch) 1814 { 1815 uint32_t avail; 1816 1817 vmbus_ring_unmask(&ch->ch_rrd); 1818 vmbus_ring_avail(&ch->ch_rrd, NULL, &avail); 1819 1820 return avail; 1821 } 1822 1823 static uint32_t 1824 vmbus_channel_ready(struct vmbus_channel *ch) 1825 { 1826 uint32_t avail; 1827 1828 vmbus_ring_avail(&ch->ch_rrd, NULL, &avail); 1829 1830 return avail; 1831 } 1832 1833 /* How many PFNs can be referenced by the header */ 1834 #define VMBUS_NPFNHDR ((VMBUS_MSG_DSIZE_MAX - \ 1835 sizeof(struct vmbus_chanmsg_gpadl_conn)) / sizeof(uint64_t)) 1836 1837 /* How many PFNs can be referenced by the body */ 1838 #define VMBUS_NPFNBODY ((VMBUS_MSG_DSIZE_MAX - \ 1839 sizeof(struct vmbus_chanmsg_gpadl_subconn)) / sizeof(uint64_t)) 1840 1841 int 1842 vmbus_handle_alloc(struct vmbus_channel *ch, const struct hyperv_dma *dma, 1843 uint32_t buflen, uint32_t *handle) 1844 { 1845 const int prflags = cold ? PR_NOWAIT : PR_WAITOK; 1846 const int kmemflags = cold ? KM_NOSLEEP : KM_SLEEP; 1847 const int msgflags = cold ? MSGF_NOSLEEP : 0; 1848 const int hcflags = cold ? HCF_NOSLEEP : HCF_SLEEPOK; 1849 struct vmbus_softc *sc = ch->ch_sc; 1850 struct vmbus_chanmsg_gpadl_conn *hdr; 1851 struct vmbus_chanmsg_gpadl_subconn *cmd; 1852 struct vmbus_chanmsg_gpadl_connresp rsp; 1853 struct vmbus_msg *msg; 1854 int i, j, last, left, rv; 1855 int bodylen = 0, ncmds = 0, pfn = 0; 1856 uint64_t *frames; 1857 paddr_t pa; 1858 uint8_t *body; 1859 /* Total number of pages to reference */ 1860 int total = atop(buflen); 1861 /* Number of pages that will fit the header */ 1862 int inhdr = MIN(total, VMBUS_NPFNHDR); 1863 1864 KASSERT((buflen & PAGE_MASK) == 0); 1865 KASSERT(buflen == (uint32_t)dma->map->dm_mapsize); 1866 1867 msg = pool_cache_get_paddr(sc->sc_msgpool, prflags, &pa); 1868 if (msg == NULL) 1869 return ENOMEM; 1870 1871 /* Prepare array of frame addresses */ 1872 frames = kmem_zalloc(total * sizeof(*frames), kmemflags); 1873 if (frames == NULL) { 1874 pool_cache_put_paddr(sc->sc_msgpool, msg, pa); 1875 return ENOMEM; 1876 } 1877 for (i = 0, j = 0; i < dma->map->dm_nsegs && j < total; i++) { 1878 bus_dma_segment_t *seg = &dma->map->dm_segs[i]; 1879 bus_addr_t addr = seg->ds_addr; 1880 1881 KASSERT((addr & PAGE_MASK) == 0); 1882 KASSERT((seg->ds_len & PAGE_MASK) == 0); 1883 1884 while (addr < seg->ds_addr + seg->ds_len && j < total) { 1885 frames[j++] = atop(addr); 1886 addr += PAGE_SIZE; 1887 } 1888 } 1889 1890 memset(msg, 0, sizeof(*msg)); 1891 msg->msg_req.hc_dsize = sizeof(struct vmbus_chanmsg_gpadl_conn) + 1892 inhdr * sizeof(uint64_t); 1893 hdr = (struct vmbus_chanmsg_gpadl_conn *)msg->msg_req.hc_data; 1894 msg->msg_rsp = &rsp; 1895 msg->msg_rsplen = sizeof(rsp); 1896 msg->msg_flags = msgflags; 1897 1898 left = total - inhdr; 1899 1900 /* Allocate additional gpadl_body structures if required */ 1901 if (left > 0) { 1902 ncmds = MAX(1, left / VMBUS_NPFNBODY + left % VMBUS_NPFNBODY); 1903 bodylen = ncmds * VMBUS_MSG_DSIZE_MAX; 1904 body = kmem_zalloc(bodylen, kmemflags); 1905 if (body == NULL) { 1906 kmem_free(frames, total * sizeof(*frames)); 1907 pool_cache_put_paddr(sc->sc_msgpool, msg, pa); 1908 return ENOMEM; 1909 } 1910 } 1911 1912 *handle = atomic_add_int_nv(&sc->sc_handle, 1); 1913 1914 hdr->chm_hdr.chm_type = VMBUS_CHANMSG_GPADL_CONN; 1915 hdr->chm_chanid = ch->ch_id; 1916 hdr->chm_gpadl = *handle; 1917 1918 /* Single range for a contiguous buffer */ 1919 hdr->chm_range_cnt = 1; 1920 hdr->chm_range_len = sizeof(struct vmbus_gpa_range) + total * 1921 sizeof(uint64_t); 1922 hdr->chm_range.gpa_ofs = 0; 1923 hdr->chm_range.gpa_len = buflen; 1924 1925 /* Fit as many pages as possible into the header */ 1926 for (i = 0; i < inhdr; i++) 1927 hdr->chm_range.gpa_page[i] = frames[pfn++]; 1928 1929 for (i = 0; i < ncmds; i++) { 1930 cmd = (struct vmbus_chanmsg_gpadl_subconn *)(body + 1931 VMBUS_MSG_DSIZE_MAX * i); 1932 cmd->chm_hdr.chm_type = VMBUS_CHANMSG_GPADL_SUBCONN; 1933 cmd->chm_gpadl = *handle; 1934 last = MIN(left, VMBUS_NPFNBODY); 1935 for (j = 0; j < last; j++) 1936 cmd->chm_gpa_page[j] = frames[pfn++]; 1937 left -= last; 1938 } 1939 1940 rv = vmbus_start(sc, msg, pa); 1941 if (rv != 0) { 1942 DPRINTF("%s: GPADL_CONN failed\n", device_xname(sc->sc_dev)); 1943 goto out; 1944 } 1945 for (i = 0; i < ncmds; i++) { 1946 int cmdlen = sizeof(*cmd); 1947 cmd = (struct vmbus_chanmsg_gpadl_subconn *)(body + 1948 VMBUS_MSG_DSIZE_MAX * i); 1949 /* Last element can be short */ 1950 if (i == ncmds - 1) 1951 cmdlen += last * sizeof(uint64_t); 1952 else 1953 cmdlen += VMBUS_NPFNBODY * sizeof(uint64_t); 1954 rv = vmbus_cmd(sc, cmd, cmdlen, NULL, 0, HCF_NOREPLY | hcflags); 1955 if (rv != 0) { 1956 DPRINTF("%s: GPADL_SUBCONN (iteration %d/%d) failed " 1957 "with %d\n", device_xname(sc->sc_dev), i, ncmds, 1958 rv); 1959 goto out; 1960 } 1961 } 1962 rv = vmbus_reply(sc, msg); 1963 if (rv != 0) { 1964 DPRINTF("%s: GPADL allocation failed with %d\n", 1965 device_xname(sc->sc_dev), rv); 1966 } 1967 1968 out: 1969 if (bodylen > 0) 1970 kmem_free(body, bodylen); 1971 kmem_free(frames, total * sizeof(*frames)); 1972 pool_cache_put_paddr(sc->sc_msgpool, msg, pa); 1973 if (rv) 1974 return rv; 1975 1976 KASSERT(*handle == rsp.chm_gpadl); 1977 1978 return 0; 1979 } 1980 1981 void 1982 vmbus_handle_free(struct vmbus_channel *ch, uint32_t handle) 1983 { 1984 struct vmbus_softc *sc = ch->ch_sc; 1985 struct vmbus_chanmsg_gpadl_disconn cmd; 1986 struct vmbus_chanmsg_gpadl_disconn rsp; 1987 int rv; 1988 1989 memset(&cmd, 0, sizeof(cmd)); 1990 cmd.chm_hdr.chm_type = VMBUS_CHANMSG_GPADL_DISCONN; 1991 cmd.chm_chanid = ch->ch_id; 1992 cmd.chm_gpadl = handle; 1993 1994 rv = vmbus_cmd(sc, &cmd, sizeof(cmd), &rsp, sizeof(rsp), 1995 cold ? HCF_NOSLEEP : HCF_SLEEPOK); 1996 if (rv) { 1997 DPRINTF("%s: GPADL_DISCONN failed with %d\n", 1998 device_xname(sc->sc_dev), rv); 1999 } 2000 } 2001 2002 static int 2003 vmbus_attach_print(void *aux, const char *name) 2004 { 2005 struct vmbus_attach_args *aa = aux; 2006 2007 if (name) 2008 printf("\"%s\" at %s", aa->aa_ident, name); 2009 2010 return UNCONF; 2011 } 2012 2013 static int 2014 vmbus_attach_icdevs(struct vmbus_softc *sc) 2015 { 2016 struct vmbus_dev *dv; 2017 struct vmbus_channel *ch; 2018 2019 SLIST_INIT(&sc->sc_icdevs); 2020 mutex_init(&sc->sc_icdev_lock, MUTEX_DEFAULT, IPL_NET); 2021 2022 TAILQ_FOREACH(ch, &sc->sc_channels, ch_entry) { 2023 if (ch->ch_state != VMBUS_CHANSTATE_OFFERED) 2024 continue; 2025 if (ch->ch_flags & CHF_MONITOR) 2026 continue; 2027 2028 dv = kmem_zalloc(sizeof(*dv), cold ? KM_NOSLEEP : KM_SLEEP); 2029 if (dv == NULL) { 2030 device_printf(sc->sc_dev, 2031 "failed to allocate ic device object\n"); 2032 return ENOMEM; 2033 } 2034 dv->dv_aa.aa_type = &ch->ch_type; 2035 dv->dv_aa.aa_inst = &ch->ch_inst; 2036 dv->dv_aa.aa_ident = ch->ch_ident; 2037 dv->dv_aa.aa_chan = ch; 2038 dv->dv_aa.aa_iot = sc->sc_iot; 2039 dv->dv_aa.aa_memt = sc->sc_memt; 2040 mutex_enter(&sc->sc_icdev_lock); 2041 SLIST_INSERT_HEAD(&sc->sc_icdevs, dv, dv_entry); 2042 mutex_exit(&sc->sc_icdev_lock); 2043 ch->ch_dev = config_found_ia(sc->sc_dev, "hypervvmbus", 2044 &dv->dv_aa, vmbus_attach_print); 2045 } 2046 return 0; 2047 } 2048 2049 static int 2050 vmbus_attach_devices(struct vmbus_softc *sc) 2051 { 2052 struct vmbus_dev *dv; 2053 struct vmbus_channel *ch; 2054 2055 SLIST_INIT(&sc->sc_devs); 2056 mutex_init(&sc->sc_dev_lock, MUTEX_DEFAULT, IPL_NET); 2057 2058 TAILQ_FOREACH(ch, &sc->sc_channels, ch_entry) { 2059 if (ch->ch_state != VMBUS_CHANSTATE_OFFERED) 2060 continue; 2061 if (!(ch->ch_flags & CHF_MONITOR)) 2062 continue; 2063 2064 dv = kmem_zalloc(sizeof(*dv), cold ? KM_NOSLEEP : KM_SLEEP); 2065 if (dv == NULL) { 2066 device_printf(sc->sc_dev, 2067 "failed to allocate device object\n"); 2068 return ENOMEM; 2069 } 2070 dv->dv_aa.aa_type = &ch->ch_type; 2071 dv->dv_aa.aa_inst = &ch->ch_inst; 2072 dv->dv_aa.aa_ident = ch->ch_ident; 2073 dv->dv_aa.aa_chan = ch; 2074 dv->dv_aa.aa_iot = sc->sc_iot; 2075 dv->dv_aa.aa_memt = sc->sc_memt; 2076 mutex_enter(&sc->sc_dev_lock); 2077 SLIST_INSERT_HEAD(&sc->sc_devs, dv, dv_entry); 2078 mutex_exit(&sc->sc_dev_lock); 2079 ch->ch_dev = config_found_ia(sc->sc_dev, "hypervvmbus", 2080 &dv->dv_aa, vmbus_attach_print); 2081 } 2082 return 0; 2083 } 2084 2085 MODULE(MODULE_CLASS_DRIVER, vmbus, "hyperv"); 2086 2087 #ifdef _MODULE 2088 #include "ioconf.c" 2089 #endif 2090 2091 static int 2092 vmbus_modcmd(modcmd_t cmd, void *aux) 2093 { 2094 int rv = 0; 2095 2096 switch (cmd) { 2097 case MODULE_CMD_INIT: 2098 #ifdef _MODULE 2099 rv = config_init_component(cfdriver_ioconf_vmbus, 2100 cfattach_ioconf_vmbus, cfdata_ioconf_vmbus); 2101 #endif 2102 break; 2103 2104 case MODULE_CMD_FINI: 2105 #ifdef _MODULE 2106 rv = config_fini_component(cfdriver_ioconf_vmbus, 2107 cfattach_ioconf_vmbus, cfdata_ioconf_vmbus); 2108 #endif 2109 break; 2110 2111 default: 2112 rv = ENOTTY; 2113 break; 2114 } 2115 2116 return rv; 2117 } 2118