1 /* $NetBSD: if_hvn.c,v 1.20 2021/01/29 04:38:49 nonaka Exp $ */ 2 /* $OpenBSD: if_hvn.c,v 1.39 2018/03/11 14:31:34 mikeb Exp $ */ 3 4 /*- 5 * Copyright (c) 2009-2012,2016 Microsoft Corp. 6 * Copyright (c) 2010-2012 Citrix Inc. 7 * Copyright (c) 2012 NetApp Inc. 8 * Copyright (c) 2016 Mike Belopuhov <mike@esdenera.com> 9 * All rights reserved. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice unmodified, this list of conditions, and the following 16 * disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * The OpenBSD port was done under funding by Esdenera Networks GmbH. 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: if_hvn.c,v 1.20 2021/01/29 04:38:49 nonaka Exp $"); 39 40 #ifdef _KERNEL_OPT 41 #include "opt_inet.h" 42 #include "opt_inet6.h" 43 #include "opt_net_mpsafe.h" 44 #endif 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/kernel.h> 49 #include <sys/device.h> 50 #include <sys/atomic.h> 51 #include <sys/bus.h> 52 #include <sys/intr.h> 53 #include <sys/kmem.h> 54 55 #include <net/if.h> 56 #include <net/if_ether.h> 57 #include <net/if_media.h> 58 59 #include <net/bpf.h> 60 61 #include <dev/ic/ndisreg.h> 62 #include <dev/ic/rndisreg.h> 63 64 #include <dev/hyperv/vmbusvar.h> 65 #include <dev/hyperv/if_hvnreg.h> 66 67 #ifndef EVL_PRIO_BITS 68 #define EVL_PRIO_BITS 13 69 #endif 70 #ifndef EVL_CFI_BITS 71 #define EVL_CFI_BITS 12 72 #endif 73 74 #define HVN_NVS_MSGSIZE 32 75 #define HVN_NVS_BUFSIZE PAGE_SIZE 76 77 /* 78 * RNDIS control interface 79 */ 80 #define HVN_RNDIS_CTLREQS 4 81 #define HVN_RNDIS_BUFSIZE 512 82 83 struct rndis_cmd { 84 uint32_t rc_id; 85 struct hvn_nvs_rndis rc_msg; 86 void *rc_req; 87 bus_dmamap_t rc_dmap; 88 bus_dma_segment_t rc_segs; 89 int rc_nsegs; 90 uint64_t rc_gpa; 91 struct rndis_packet_msg rc_cmp; 92 uint32_t rc_cmplen; 93 uint8_t rc_cmpbuf[HVN_RNDIS_BUFSIZE]; 94 int rc_done; 95 TAILQ_ENTRY(rndis_cmd) rc_entry; 96 }; 97 TAILQ_HEAD(rndis_queue, rndis_cmd); 98 99 #define HVN_MAXMTU (9 * 1024) 100 101 #define HVN_RNDIS_XFER_SIZE 2048 102 103 /* 104 * Tx ring 105 */ 106 #define HVN_TX_DESC 256 107 #define HVN_TX_FRAGS 15 /* 31 is the max */ 108 #define HVN_TX_FRAG_SIZE PAGE_SIZE 109 #define HVN_TX_PKT_SIZE 16384 110 111 #define HVN_RNDIS_PKT_LEN \ 112 (sizeof(struct rndis_packet_msg) + \ 113 sizeof(struct rndis_pktinfo) + NDIS_VLAN_INFO_SIZE + \ 114 sizeof(struct rndis_pktinfo) + NDIS_TXCSUM_INFO_SIZE) 115 116 struct hvn_tx_desc { 117 uint32_t txd_id; 118 int txd_ready; 119 struct vmbus_gpa txd_sgl[HVN_TX_FRAGS + 1]; 120 int txd_nsge; 121 struct mbuf *txd_buf; 122 bus_dmamap_t txd_dmap; 123 struct vmbus_gpa txd_gpa; 124 struct rndis_packet_msg *txd_req; 125 }; 126 127 struct hvn_softc { 128 device_t sc_dev; 129 130 struct vmbus_softc *sc_vmbus; 131 struct vmbus_channel *sc_chan; 132 bus_dma_tag_t sc_dmat; 133 134 struct ethercom sc_ec; 135 struct ifmedia sc_media; 136 kmutex_t sc_media_lock; /* XXX */ 137 struct if_percpuq *sc_ipq; 138 int sc_link_state; 139 int sc_promisc; 140 141 uint32_t sc_flags; 142 #define HVN_SCF_ATTACHED __BIT(0) 143 144 /* NVS protocol */ 145 int sc_proto; 146 uint32_t sc_nvstid; 147 uint8_t sc_nvsrsp[HVN_NVS_MSGSIZE]; 148 uint8_t *sc_nvsbuf; 149 int sc_nvsdone; 150 151 /* RNDIS protocol */ 152 int sc_ndisver; 153 uint32_t sc_rndisrid; 154 struct rndis_queue sc_cntl_sq; /* submission queue */ 155 kmutex_t sc_cntl_sqlck; 156 struct rndis_queue sc_cntl_cq; /* completion queue */ 157 kmutex_t sc_cntl_cqlck; 158 struct rndis_queue sc_cntl_fq; /* free queue */ 159 kmutex_t sc_cntl_fqlck; 160 struct rndis_cmd sc_cntl_msgs[HVN_RNDIS_CTLREQS]; 161 struct hvn_nvs_rndis sc_data_msg; 162 163 /* Rx ring */ 164 uint8_t *sc_rx_ring; 165 int sc_rx_size; 166 uint32_t sc_rx_hndl; 167 struct hyperv_dma sc_rx_dma; 168 169 /* Tx ring */ 170 uint32_t sc_tx_next; 171 uint32_t sc_tx_avail; 172 struct hvn_tx_desc sc_tx_desc[HVN_TX_DESC]; 173 bus_dmamap_t sc_tx_rmap; 174 uint8_t *sc_tx_msgs; 175 bus_dma_segment_t sc_tx_mseg; 176 }; 177 178 #define SC2IFP(_sc_) (&(_sc_)->sc_ec.ec_if) 179 #define IFP2SC(_ifp_) ((_ifp_)->if_softc) 180 181 182 static int hvn_match(device_t, cfdata_t, void *); 183 static void hvn_attach(device_t, device_t, void *); 184 static int hvn_detach(device_t, int); 185 186 CFATTACH_DECL_NEW(hvn, sizeof(struct hvn_softc), 187 hvn_match, hvn_attach, hvn_detach, NULL); 188 189 static int hvn_ioctl(struct ifnet *, u_long, void *); 190 static int hvn_media_change(struct ifnet *); 191 static void hvn_media_status(struct ifnet *, struct ifmediareq *); 192 static int hvn_iff(struct hvn_softc *); 193 static int hvn_init(struct ifnet *); 194 static void hvn_stop(struct ifnet *, int); 195 static void hvn_start(struct ifnet *); 196 static int hvn_encap(struct hvn_softc *, struct mbuf *, 197 struct hvn_tx_desc **); 198 static void hvn_decap(struct hvn_softc *, struct hvn_tx_desc *); 199 static void hvn_txeof(struct hvn_softc *, uint64_t); 200 static int hvn_rx_ring_create(struct hvn_softc *); 201 static int hvn_rx_ring_destroy(struct hvn_softc *); 202 static int hvn_tx_ring_create(struct hvn_softc *); 203 static void hvn_tx_ring_destroy(struct hvn_softc *); 204 static int hvn_set_capabilities(struct hvn_softc *); 205 static int hvn_get_lladdr(struct hvn_softc *, uint8_t *); 206 static void hvn_get_link_status(struct hvn_softc *); 207 208 /* NSVP */ 209 static int hvn_nvs_attach(struct hvn_softc *); 210 static void hvn_nvs_intr(void *); 211 static int hvn_nvs_cmd(struct hvn_softc *, void *, size_t, uint64_t, int); 212 static int hvn_nvs_ack(struct hvn_softc *, uint64_t); 213 static void hvn_nvs_detach(struct hvn_softc *); 214 215 /* RNDIS */ 216 static int hvn_rndis_attach(struct hvn_softc *); 217 static int hvn_rndis_cmd(struct hvn_softc *, struct rndis_cmd *, int); 218 static void hvn_rndis_input(struct hvn_softc *, uint64_t, void *); 219 static void hvn_rxeof(struct hvn_softc *, uint8_t *, uint32_t); 220 static void hvn_rndis_complete(struct hvn_softc *, uint8_t *, uint32_t); 221 static int hvn_rndis_output(struct hvn_softc *, struct hvn_tx_desc *); 222 static void hvn_rndis_status(struct hvn_softc *, uint8_t *, uint32_t); 223 static int hvn_rndis_query(struct hvn_softc *, uint32_t, void *, size_t *); 224 static int hvn_rndis_set(struct hvn_softc *, uint32_t, void *, size_t); 225 static int hvn_rndis_open(struct hvn_softc *); 226 static int hvn_rndis_close(struct hvn_softc *); 227 static void hvn_rndis_detach(struct hvn_softc *); 228 229 static int 230 hvn_match(device_t parent, cfdata_t match, void *aux) 231 { 232 struct vmbus_attach_args *aa = aux; 233 234 if (memcmp(aa->aa_type, &hyperv_guid_network, sizeof(*aa->aa_type))) 235 return 0; 236 return 1; 237 } 238 239 static void 240 hvn_attach(device_t parent, device_t self, void *aux) 241 { 242 struct hvn_softc *sc = device_private(self); 243 struct vmbus_attach_args *aa = aux; 244 struct ifnet *ifp = SC2IFP(sc); 245 uint8_t enaddr[ETHER_ADDR_LEN]; 246 int error; 247 248 sc->sc_dev = self; 249 sc->sc_vmbus = (struct vmbus_softc *)device_private(parent); 250 sc->sc_chan = aa->aa_chan; 251 sc->sc_dmat = sc->sc_vmbus->sc_dmat; 252 253 aprint_naive("\n"); 254 aprint_normal(": Hyper-V NetVSC\n"); 255 256 if (hvn_nvs_attach(sc)) { 257 aprint_error_dev(self, "failed to init NVSP\n"); 258 return; 259 } 260 261 if (hvn_rx_ring_create(sc)) { 262 aprint_error_dev(self, "failed to create Rx ring\n"); 263 goto fail1; 264 } 265 266 if (hvn_tx_ring_create(sc)) { 267 aprint_error_dev(self, "failed to create Tx ring\n"); 268 goto fail2; 269 } 270 271 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 272 ifp->if_softc = sc; 273 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 274 ifp->if_ioctl = hvn_ioctl; 275 ifp->if_start = hvn_start; 276 ifp->if_init = hvn_init; 277 ifp->if_stop = hvn_stop; 278 ifp->if_capabilities = IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx; 279 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx; 280 ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx; 281 if (sc->sc_ndisver > NDIS_VERSION_6_30) { 282 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Tx; 283 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Rx; 284 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Tx; 285 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Rx; 286 } 287 if (sc->sc_proto >= HVN_NVS_PROTO_VERSION_2) { 288 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING; 289 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU; 290 sc->sc_ec.ec_capenable |= ETHERCAP_VLAN_HWTAGGING; 291 } 292 293 IFQ_SET_MAXLEN(&ifp->if_snd, HVN_TX_DESC - 1); 294 IFQ_SET_READY(&ifp->if_snd); 295 296 /* Initialize ifmedia structures. */ 297 sc->sc_ec.ec_ifmedia = &sc->sc_media; 298 /* XXX media locking needs revisiting */ 299 mutex_init(&sc->sc_media_lock, MUTEX_DEFAULT, IPL_SOFTNET); 300 ifmedia_init_with_lock(&sc->sc_media, IFM_IMASK, 301 hvn_media_change, hvn_media_status, &sc->sc_media_lock); 302 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_MANUAL, 0, NULL); 303 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_MANUAL); 304 305 error = if_initialize(ifp); 306 if (error) { 307 aprint_error_dev(self, "if_initialize failed(%d)\n", error); 308 goto fail3; 309 } 310 sc->sc_ipq = if_percpuq_create(ifp); 311 if_deferred_start_init(ifp, NULL); 312 313 if (hvn_rndis_attach(sc)) { 314 aprint_error_dev(self, "failed to init RNDIS\n"); 315 goto fail3; 316 } 317 318 aprint_normal_dev(self, "NVS %d.%d NDIS %d.%d\n", 319 sc->sc_proto >> 16, sc->sc_proto & 0xffff, 320 sc->sc_ndisver >> 16 , sc->sc_ndisver & 0xffff); 321 322 if (hvn_set_capabilities(sc)) { 323 aprint_error_dev(self, "failed to setup offloading\n"); 324 goto fail4; 325 } 326 327 if (hvn_get_lladdr(sc, enaddr)) { 328 aprint_error_dev(self, 329 "failed to obtain an ethernet address\n"); 330 goto fail4; 331 } 332 aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(enaddr)); 333 334 ether_ifattach(ifp, enaddr); 335 if_register(ifp); 336 337 if (pmf_device_register(self, NULL, NULL)) 338 pmf_class_network_register(self, ifp); 339 else 340 aprint_error_dev(self, "couldn't establish power handler\n"); 341 342 SET(sc->sc_flags, HVN_SCF_ATTACHED); 343 return; 344 345 fail4: hvn_rndis_detach(sc); 346 if_percpuq_destroy(sc->sc_ipq); 347 fail3: ifmedia_fini(&sc->sc_media); 348 mutex_destroy(&sc->sc_media_lock); 349 hvn_tx_ring_destroy(sc); 350 fail2: hvn_rx_ring_destroy(sc); 351 fail1: hvn_nvs_detach(sc); 352 } 353 354 static int 355 hvn_detach(device_t self, int flags) 356 { 357 struct hvn_softc *sc = device_private(self); 358 struct ifnet *ifp = SC2IFP(sc); 359 360 if (!ISSET(sc->sc_flags, HVN_SCF_ATTACHED)) 361 return 0; 362 363 if (ifp->if_flags & IFF_RUNNING) 364 hvn_stop(ifp, 1); 365 366 pmf_device_deregister(self); 367 368 ether_ifdetach(ifp); 369 if_detach(ifp); 370 ifmedia_fini(&sc->sc_media); 371 mutex_destroy(&sc->sc_media_lock); 372 if_percpuq_destroy(sc->sc_ipq); 373 374 hvn_rndis_detach(sc); 375 hvn_rx_ring_destroy(sc); 376 hvn_tx_ring_destroy(sc); 377 hvn_nvs_detach(sc); 378 379 return 0; 380 } 381 382 static int 383 hvn_ioctl(struct ifnet *ifp, u_long command, void * data) 384 { 385 struct hvn_softc *sc = IFP2SC(ifp); 386 int s, error = 0; 387 388 s = splnet(); 389 390 error = ether_ioctl(ifp, command, data); 391 if (error == ENETRESET) { 392 if (ifp->if_flags & IFF_RUNNING) 393 hvn_iff(sc); 394 error = 0; 395 } 396 397 splx(s); 398 399 return error; 400 } 401 402 static int 403 hvn_media_change(struct ifnet *ifp) 404 { 405 406 return 0; 407 } 408 409 static void 410 hvn_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 411 { 412 struct hvn_softc *sc = IFP2SC(ifp); 413 int link_state; 414 415 link_state = sc->sc_link_state; 416 hvn_get_link_status(sc); 417 if (link_state != sc->sc_link_state) 418 if_link_state_change(ifp, sc->sc_link_state); 419 420 ifmr->ifm_status = IFM_AVALID; 421 ifmr->ifm_active = IFM_ETHER | IFM_MANUAL; 422 if (sc->sc_link_state == LINK_STATE_UP) 423 ifmr->ifm_status |= IFM_ACTIVE; 424 } 425 426 static int 427 hvn_iff(struct hvn_softc *sc) 428 { 429 430 /* XXX */ 431 sc->sc_promisc = 0; 432 433 return 0; 434 } 435 436 static int 437 hvn_init(struct ifnet *ifp) 438 { 439 struct hvn_softc *sc = IFP2SC(ifp); 440 int error; 441 442 hvn_stop(ifp, 0); 443 444 error = hvn_iff(sc); 445 if (error) 446 return error; 447 448 error = hvn_rndis_open(sc); 449 if (error == 0) { 450 ifp->if_flags |= IFF_RUNNING; 451 ifp->if_flags &= ~IFF_OACTIVE; 452 } 453 return error; 454 } 455 456 static void 457 hvn_stop(struct ifnet *ifp, int disable) 458 { 459 struct hvn_softc *sc = IFP2SC(ifp); 460 461 hvn_rndis_close(sc); 462 463 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 464 } 465 466 static void 467 hvn_start(struct ifnet *ifp) 468 { 469 struct hvn_softc *sc = IFP2SC(ifp); 470 struct hvn_tx_desc *txd; 471 struct mbuf *m; 472 473 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 474 return; 475 476 for (;;) { 477 if (!sc->sc_tx_avail) { 478 /* transient */ 479 ifp->if_flags |= IFF_OACTIVE; 480 break; 481 } 482 483 IFQ_DEQUEUE(&ifp->if_snd, m); 484 if (m == NULL) 485 break; 486 487 if (hvn_encap(sc, m, &txd)) { 488 /* the chain is too large */ 489 if_statinc(ifp, if_oerrors); 490 m_freem(m); 491 continue; 492 } 493 494 bpf_mtap(ifp, m, BPF_D_OUT); 495 496 if (hvn_rndis_output(sc, txd)) { 497 hvn_decap(sc, txd); 498 if_statinc(ifp, if_oerrors); 499 m_freem(m); 500 continue; 501 } 502 503 sc->sc_tx_next++; 504 } 505 } 506 507 static inline char * 508 hvn_rndis_pktinfo_append(struct rndis_packet_msg *pkt, size_t pktsize, 509 size_t datalen, uint32_t type) 510 { 511 struct rndis_pktinfo *pi; 512 size_t pi_size = sizeof(*pi) + datalen; 513 char *cp; 514 515 KASSERT(pkt->rm_pktinfooffset + pkt->rm_pktinfolen + pi_size <= 516 pktsize); 517 518 cp = (char *)pkt + pkt->rm_pktinfooffset + pkt->rm_pktinfolen; 519 pi = (struct rndis_pktinfo *)cp; 520 pi->rm_size = pi_size; 521 pi->rm_type = type; 522 pi->rm_pktinfooffset = sizeof(*pi); 523 pkt->rm_pktinfolen += pi_size; 524 pkt->rm_dataoffset += pi_size; 525 pkt->rm_len += pi_size; 526 527 return (char *)pi->rm_data; 528 } 529 530 static int 531 hvn_encap(struct hvn_softc *sc, struct mbuf *m, struct hvn_tx_desc **txd0) 532 { 533 struct hvn_tx_desc *txd; 534 struct rndis_packet_msg *pkt; 535 bus_dma_segment_t *seg; 536 size_t pktlen; 537 int i, rv; 538 539 do { 540 txd = &sc->sc_tx_desc[sc->sc_tx_next % HVN_TX_DESC]; 541 sc->sc_tx_next++; 542 } while (!txd->txd_ready); 543 txd->txd_ready = 0; 544 545 pkt = txd->txd_req; 546 memset(pkt, 0, HVN_RNDIS_PKT_LEN); 547 pkt->rm_type = REMOTE_NDIS_PACKET_MSG; 548 pkt->rm_len = sizeof(*pkt) + m->m_pkthdr.len; 549 pkt->rm_dataoffset = RNDIS_DATA_OFFSET; 550 pkt->rm_datalen = m->m_pkthdr.len; 551 pkt->rm_pktinfooffset = sizeof(*pkt); /* adjusted below */ 552 pkt->rm_pktinfolen = 0; 553 554 rv = bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmap, m, BUS_DMA_READ | 555 BUS_DMA_NOWAIT); 556 switch (rv) { 557 case 0: 558 break; 559 case EFBIG: 560 if (m_defrag(m, M_NOWAIT) != NULL && 561 bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmap, m, 562 BUS_DMA_READ | BUS_DMA_NOWAIT) == 0) 563 break; 564 /* FALLTHROUGH */ 565 default: 566 DPRINTF("%s: failed to load mbuf\n", device_xname(sc->sc_dev)); 567 return -1; 568 } 569 txd->txd_buf = m; 570 571 if (vlan_has_tag(m)) { 572 uint32_t vlan; 573 char *cp; 574 uint16_t tag; 575 576 tag = vlan_get_tag(m); 577 vlan = NDIS_VLAN_INFO_MAKE(EVL_VLANOFTAG(tag), 578 EVL_PRIOFTAG(tag), EVL_CFIOFTAG(tag)); 579 cp = hvn_rndis_pktinfo_append(pkt, HVN_RNDIS_PKT_LEN, 580 NDIS_VLAN_INFO_SIZE, NDIS_PKTINFO_TYPE_VLAN); 581 memcpy(cp, &vlan, NDIS_VLAN_INFO_SIZE); 582 } 583 584 if (m->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_UDPv4 | 585 M_CSUM_TCPv4)) { 586 uint32_t csum = NDIS_TXCSUM_INFO_IPV4; 587 char *cp; 588 589 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) 590 csum |= NDIS_TXCSUM_INFO_IPCS; 591 if (m->m_pkthdr.csum_flags & M_CSUM_TCPv4) 592 csum |= NDIS_TXCSUM_INFO_TCPCS; 593 if (m->m_pkthdr.csum_flags & M_CSUM_UDPv4) 594 csum |= NDIS_TXCSUM_INFO_UDPCS; 595 cp = hvn_rndis_pktinfo_append(pkt, HVN_RNDIS_PKT_LEN, 596 NDIS_TXCSUM_INFO_SIZE, NDIS_PKTINFO_TYPE_CSUM); 597 memcpy(cp, &csum, NDIS_TXCSUM_INFO_SIZE); 598 } 599 600 pktlen = pkt->rm_pktinfooffset + pkt->rm_pktinfolen; 601 pkt->rm_pktinfooffset -= RNDIS_HEADER_OFFSET; 602 603 /* Attach an RNDIS message to the first slot */ 604 txd->txd_sgl[0].gpa_page = txd->txd_gpa.gpa_page; 605 txd->txd_sgl[0].gpa_ofs = txd->txd_gpa.gpa_ofs; 606 txd->txd_sgl[0].gpa_len = pktlen; 607 txd->txd_nsge = txd->txd_dmap->dm_nsegs + 1; 608 609 for (i = 0; i < txd->txd_dmap->dm_nsegs; i++) { 610 seg = &txd->txd_dmap->dm_segs[i]; 611 txd->txd_sgl[1 + i].gpa_page = atop(seg->ds_addr); 612 txd->txd_sgl[1 + i].gpa_ofs = seg->ds_addr & PAGE_MASK; 613 txd->txd_sgl[1 + i].gpa_len = seg->ds_len; 614 } 615 616 *txd0 = txd; 617 618 atomic_dec_uint(&sc->sc_tx_avail); 619 620 return 0; 621 } 622 623 static void 624 hvn_decap(struct hvn_softc *sc, struct hvn_tx_desc *txd) 625 { 626 struct ifnet *ifp = SC2IFP(sc); 627 628 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap, 629 0, txd->txd_dmap->dm_mapsize, 630 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 631 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap); 632 txd->txd_buf = NULL; 633 txd->txd_nsge = 0; 634 txd->txd_ready = 1; 635 atomic_inc_uint(&sc->sc_tx_avail); 636 ifp->if_flags &= ~IFF_OACTIVE; 637 } 638 639 static void 640 hvn_txeof(struct hvn_softc *sc, uint64_t tid) 641 { 642 struct ifnet *ifp = SC2IFP(sc); 643 struct hvn_tx_desc *txd; 644 struct mbuf *m; 645 uint32_t id = tid >> 32; 646 647 if ((tid & 0xffffffffU) != 0) 648 return; 649 650 id -= HVN_NVS_CHIM_SIG; 651 if (id >= HVN_TX_DESC) { 652 device_printf(sc->sc_dev, "tx packet index too large: %u", id); 653 return; 654 } 655 656 txd = &sc->sc_tx_desc[id]; 657 658 if ((m = txd->txd_buf) == NULL) { 659 device_printf(sc->sc_dev, "no mbuf @%u\n", id); 660 return; 661 } 662 txd->txd_buf = NULL; 663 664 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap, 665 0, txd->txd_dmap->dm_mapsize, 666 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 667 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap); 668 m_freem(m); 669 if_statinc(ifp, if_opackets); 670 671 txd->txd_ready = 1; 672 673 atomic_inc_uint(&sc->sc_tx_avail); 674 ifp->if_flags &= ~IFF_OACTIVE; 675 } 676 677 static int 678 hvn_rx_ring_create(struct hvn_softc *sc) 679 { 680 struct hvn_nvs_rxbuf_conn cmd; 681 struct hvn_nvs_rxbuf_conn_resp *rsp; 682 uint64_t tid; 683 684 if (sc->sc_proto <= HVN_NVS_PROTO_VERSION_2) 685 sc->sc_rx_size = 15 * 1024 * 1024; /* 15MB */ 686 else 687 sc->sc_rx_size = 16 * 1024 * 1024; /* 16MB */ 688 sc->sc_rx_ring = hyperv_dma_alloc(sc->sc_dmat, &sc->sc_rx_dma, 689 sc->sc_rx_size, PAGE_SIZE, PAGE_SIZE, sc->sc_rx_size / PAGE_SIZE, 690 HYPERV_DMA_SLEEPOK); 691 if (sc->sc_rx_ring == NULL) { 692 DPRINTF("%s: failed to allocate Rx ring buffer\n", 693 device_xname(sc->sc_dev)); 694 return -1; 695 } 696 if (vmbus_handle_alloc(sc->sc_chan, &sc->sc_rx_dma, sc->sc_rx_size, 697 &sc->sc_rx_hndl)) { 698 DPRINTF("%s: failed to obtain a PA handle\n", 699 device_xname(sc->sc_dev)); 700 goto errout; 701 } 702 703 memset(&cmd, 0, sizeof(cmd)); 704 cmd.nvs_type = HVN_NVS_TYPE_RXBUF_CONN; 705 cmd.nvs_gpadl = sc->sc_rx_hndl; 706 cmd.nvs_sig = HVN_NVS_RXBUF_SIG; 707 708 tid = atomic_inc_uint_nv(&sc->sc_nvstid); 709 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 100)) 710 goto errout; 711 712 rsp = (struct hvn_nvs_rxbuf_conn_resp *)&sc->sc_nvsrsp; 713 if (rsp->nvs_status != HVN_NVS_STATUS_OK) { 714 DPRINTF("%s: failed to set up the Rx ring\n", 715 device_xname(sc->sc_dev)); 716 goto errout; 717 } 718 if (rsp->nvs_nsect > 1) { 719 DPRINTF("%s: invalid number of Rx ring sections: %u\n", 720 device_xname(sc->sc_dev), rsp->nvs_nsect); 721 hvn_rx_ring_destroy(sc); 722 return -1; 723 } 724 return 0; 725 726 errout: 727 if (sc->sc_rx_hndl) { 728 vmbus_handle_free(sc->sc_chan, sc->sc_rx_hndl); 729 sc->sc_rx_hndl = 0; 730 } 731 if (sc->sc_rx_ring) { 732 hyperv_dma_free(sc->sc_dmat, &sc->sc_rx_dma); 733 sc->sc_rx_ring = NULL; 734 } 735 return -1; 736 } 737 738 static int 739 hvn_rx_ring_destroy(struct hvn_softc *sc) 740 { 741 struct hvn_nvs_rxbuf_disconn cmd; 742 uint64_t tid; 743 744 if (sc->sc_rx_ring == NULL) 745 return 0; 746 747 memset(&cmd, 0, sizeof(cmd)); 748 cmd.nvs_type = HVN_NVS_TYPE_RXBUF_DISCONN; 749 cmd.nvs_sig = HVN_NVS_RXBUF_SIG; 750 751 tid = atomic_inc_uint_nv(&sc->sc_nvstid); 752 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 0)) 753 return -1; 754 755 delay(100); 756 757 vmbus_handle_free(sc->sc_chan, sc->sc_rx_hndl); 758 sc->sc_rx_hndl = 0; 759 760 hyperv_dma_free(sc->sc_dmat, &sc->sc_rx_dma); 761 sc->sc_rx_ring = NULL; 762 763 return 0; 764 } 765 766 static int 767 hvn_tx_ring_create(struct hvn_softc *sc) 768 { 769 const int dmaflags = cold ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK; 770 struct hvn_tx_desc *txd; 771 bus_dma_segment_t *seg; 772 size_t msgsize; 773 int i, rsegs; 774 paddr_t pa; 775 776 msgsize = roundup(HVN_RNDIS_PKT_LEN, 128); 777 778 /* Allocate memory to store RNDIS messages */ 779 if (bus_dmamem_alloc(sc->sc_dmat, msgsize * HVN_TX_DESC, PAGE_SIZE, 0, 780 &sc->sc_tx_mseg, 1, &rsegs, dmaflags)) { 781 DPRINTF("%s: failed to allocate memory for RDNIS messages\n", 782 device_xname(sc->sc_dev)); 783 goto errout; 784 } 785 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_tx_mseg, 1, msgsize * 786 HVN_TX_DESC, (void **)&sc->sc_tx_msgs, dmaflags)) { 787 DPRINTF("%s: failed to establish mapping for RDNIS messages\n", 788 device_xname(sc->sc_dev)); 789 goto errout; 790 } 791 memset(sc->sc_tx_msgs, 0, msgsize * HVN_TX_DESC); 792 if (bus_dmamap_create(sc->sc_dmat, msgsize * HVN_TX_DESC, 1, 793 msgsize * HVN_TX_DESC, 0, dmaflags, &sc->sc_tx_rmap)) { 794 DPRINTF("%s: failed to create map for RDNIS messages\n", 795 device_xname(sc->sc_dev)); 796 goto errout; 797 } 798 if (bus_dmamap_load(sc->sc_dmat, sc->sc_tx_rmap, sc->sc_tx_msgs, 799 msgsize * HVN_TX_DESC, NULL, dmaflags)) { 800 DPRINTF("%s: failed to create map for RDNIS messages\n", 801 device_xname(sc->sc_dev)); 802 goto errout; 803 } 804 805 for (i = 0; i < HVN_TX_DESC; i++) { 806 txd = &sc->sc_tx_desc[i]; 807 if (bus_dmamap_create(sc->sc_dmat, HVN_TX_PKT_SIZE, 808 HVN_TX_FRAGS, HVN_TX_FRAG_SIZE, PAGE_SIZE, dmaflags, 809 &txd->txd_dmap)) { 810 DPRINTF("%s: failed to create map for TX descriptors\n", 811 device_xname(sc->sc_dev)); 812 goto errout; 813 } 814 seg = &sc->sc_tx_rmap->dm_segs[0]; 815 pa = seg->ds_addr + (msgsize * i); 816 txd->txd_gpa.gpa_page = atop(pa); 817 txd->txd_gpa.gpa_ofs = pa & PAGE_MASK; 818 txd->txd_gpa.gpa_len = msgsize; 819 txd->txd_req = (void *)(sc->sc_tx_msgs + (msgsize * i)); 820 txd->txd_id = i + HVN_NVS_CHIM_SIG; 821 txd->txd_ready = 1; 822 } 823 sc->sc_tx_avail = HVN_TX_DESC; 824 825 return 0; 826 827 errout: 828 hvn_tx_ring_destroy(sc); 829 return -1; 830 } 831 832 static void 833 hvn_tx_ring_destroy(struct hvn_softc *sc) 834 { 835 struct hvn_tx_desc *txd; 836 int i; 837 838 for (i = 0; i < HVN_TX_DESC; i++) { 839 txd = &sc->sc_tx_desc[i]; 840 if (txd->txd_dmap == NULL) 841 continue; 842 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap, 843 0, txd->txd_dmap->dm_mapsize, 844 BUS_DMASYNC_POSTWRITE); 845 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap); 846 bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmap); 847 txd->txd_dmap = NULL; 848 if (txd->txd_buf == NULL) 849 continue; 850 m_freem(txd->txd_buf); 851 txd->txd_buf = NULL; 852 } 853 if (sc->sc_tx_rmap != NULL) { 854 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 855 0, sc->sc_tx_rmap->dm_mapsize, 856 BUS_DMASYNC_POSTWRITE); 857 bus_dmamap_unload(sc->sc_dmat, sc->sc_tx_rmap); 858 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_rmap); 859 sc->sc_tx_rmap = NULL; 860 } 861 if (sc->sc_tx_msgs != NULL) { 862 size_t msgsize = roundup(HVN_RNDIS_PKT_LEN, 128); 863 864 bus_dmamem_unmap(sc->sc_dmat, sc->sc_tx_msgs, 865 msgsize * HVN_TX_DESC); 866 bus_dmamem_free(sc->sc_dmat, &sc->sc_tx_mseg, 1); 867 sc->sc_tx_msgs = NULL; 868 } 869 } 870 871 static int 872 hvn_get_lladdr(struct hvn_softc *sc, uint8_t *enaddr) 873 { 874 size_t addrlen = ETHER_ADDR_LEN; 875 int rv; 876 877 rv = hvn_rndis_query(sc, OID_802_3_PERMANENT_ADDRESS, enaddr, &addrlen); 878 if (rv == 0 && addrlen != ETHER_ADDR_LEN) 879 rv = -1; 880 return rv; 881 } 882 883 static void 884 hvn_get_link_status(struct hvn_softc *sc) 885 { 886 uint32_t state; 887 size_t len = sizeof(state); 888 889 if (hvn_rndis_query(sc, OID_GEN_MEDIA_CONNECT_STATUS, 890 &state, &len) == 0) 891 sc->sc_link_state = (state == NDIS_MEDIA_STATE_CONNECTED) ? 892 LINK_STATE_UP : LINK_STATE_DOWN; 893 } 894 895 static int 896 hvn_nvs_attach(struct hvn_softc *sc) 897 { 898 static const uint32_t protos[] = { 899 HVN_NVS_PROTO_VERSION_5, 900 HVN_NVS_PROTO_VERSION_4, 901 HVN_NVS_PROTO_VERSION_2, 902 HVN_NVS_PROTO_VERSION_1 903 }; 904 struct hvn_nvs_init cmd; 905 struct hvn_nvs_init_resp *rsp; 906 struct hvn_nvs_ndis_init ncmd; 907 struct hvn_nvs_ndis_conf ccmd; 908 uint32_t ndisver, ringsize; 909 uint64_t tid; 910 int i; 911 912 sc->sc_nvsbuf = kmem_zalloc(HVN_NVS_BUFSIZE, KM_SLEEP); 913 914 /* We need to be able to fit all RNDIS control and data messages */ 915 ringsize = HVN_RNDIS_CTLREQS * 916 (sizeof(struct hvn_nvs_rndis) + sizeof(struct vmbus_gpa)) + 917 HVN_TX_DESC * (sizeof(struct hvn_nvs_rndis) + 918 (HVN_TX_FRAGS + 1) * sizeof(struct vmbus_gpa)); 919 920 sc->sc_chan->ch_flags &= ~CHF_BATCHED; 921 922 /* Associate our interrupt handler with the channel */ 923 if (vmbus_channel_open(sc->sc_chan, ringsize, NULL, 0, 924 hvn_nvs_intr, sc)) { 925 DPRINTF("%s: failed to open channel\n", 926 device_xname(sc->sc_dev)); 927 kmem_free(sc->sc_nvsbuf, HVN_NVS_BUFSIZE); 928 return -1; 929 } 930 931 memset(&cmd, 0, sizeof(cmd)); 932 cmd.nvs_type = HVN_NVS_TYPE_INIT; 933 for (i = 0; i < __arraycount(protos); i++) { 934 cmd.nvs_ver_min = cmd.nvs_ver_max = protos[i]; 935 tid = atomic_inc_uint_nv(&sc->sc_nvstid); 936 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 100)) 937 return -1; 938 939 rsp = (struct hvn_nvs_init_resp *)&sc->sc_nvsrsp; 940 if (rsp->nvs_status == HVN_NVS_STATUS_OK) { 941 sc->sc_proto = protos[i]; 942 break; 943 } 944 } 945 if (i == __arraycount(protos)) { 946 DPRINTF("%s: failed to negotiate NVSP version\n", 947 device_xname(sc->sc_dev)); 948 return -1; 949 } 950 951 if (sc->sc_proto >= HVN_NVS_PROTO_VERSION_2) { 952 memset(&ccmd, 0, sizeof(ccmd)); 953 ccmd.nvs_type = HVN_NVS_TYPE_NDIS_CONF; 954 ccmd.nvs_mtu = HVN_MAXMTU; 955 ccmd.nvs_caps = HVN_NVS_NDIS_CONF_VLAN; 956 957 tid = atomic_inc_uint_nv(&sc->sc_nvstid); 958 if (hvn_nvs_cmd(sc, &ccmd, sizeof(ccmd), tid, 100)) 959 return -1; 960 } 961 962 memset(&ncmd, 0, sizeof(ncmd)); 963 ncmd.nvs_type = HVN_NVS_TYPE_NDIS_INIT; 964 if (sc->sc_proto <= HVN_NVS_PROTO_VERSION_4) 965 ndisver = NDIS_VERSION_6_1; 966 else 967 ndisver = NDIS_VERSION_6_30; 968 ncmd.nvs_ndis_major = (ndisver & 0xffff0000) >> 16; 969 ncmd.nvs_ndis_minor = ndisver & 0x0000ffff; 970 971 tid = atomic_inc_uint_nv(&sc->sc_nvstid); 972 if (hvn_nvs_cmd(sc, &ncmd, sizeof(ncmd), tid, 100)) 973 return -1; 974 975 sc->sc_ndisver = ndisver; 976 977 return 0; 978 } 979 980 static void 981 hvn_nvs_intr(void *arg) 982 { 983 struct hvn_softc *sc = arg; 984 struct ifnet *ifp = SC2IFP(sc); 985 struct vmbus_chanpkt_hdr *cph; 986 const struct hvn_nvs_hdr *nvs; 987 uint64_t rid; 988 uint32_t rlen; 989 int rv; 990 bool dotx = false; 991 992 for (;;) { 993 rv = vmbus_channel_recv(sc->sc_chan, sc->sc_nvsbuf, 994 HVN_NVS_BUFSIZE, &rlen, &rid, 1); 995 if (rv != 0 || rlen == 0) { 996 if (rv != EAGAIN) 997 device_printf(sc->sc_dev, 998 "failed to receive an NVSP packet\n"); 999 break; 1000 } 1001 cph = (struct vmbus_chanpkt_hdr *)sc->sc_nvsbuf; 1002 nvs = (const struct hvn_nvs_hdr *)VMBUS_CHANPKT_CONST_DATA(cph); 1003 1004 if (cph->cph_type == VMBUS_CHANPKT_TYPE_COMP) { 1005 switch (nvs->nvs_type) { 1006 case HVN_NVS_TYPE_INIT_RESP: 1007 case HVN_NVS_TYPE_RXBUF_CONNRESP: 1008 case HVN_NVS_TYPE_CHIM_CONNRESP: 1009 case HVN_NVS_TYPE_SUBCH_RESP: 1010 /* copy the response back */ 1011 memcpy(&sc->sc_nvsrsp, nvs, HVN_NVS_MSGSIZE); 1012 sc->sc_nvsdone = 1; 1013 wakeup(&sc->sc_nvsrsp); 1014 break; 1015 case HVN_NVS_TYPE_RNDIS_ACK: 1016 dotx = true; 1017 hvn_txeof(sc, cph->cph_tid); 1018 break; 1019 default: 1020 device_printf(sc->sc_dev, 1021 "unhandled NVSP packet type %u " 1022 "on completion\n", nvs->nvs_type); 1023 break; 1024 } 1025 } else if (cph->cph_type == VMBUS_CHANPKT_TYPE_RXBUF) { 1026 switch (nvs->nvs_type) { 1027 case HVN_NVS_TYPE_RNDIS: 1028 hvn_rndis_input(sc, cph->cph_tid, cph); 1029 break; 1030 default: 1031 device_printf(sc->sc_dev, 1032 "unhandled NVSP packet type %u " 1033 "on receive\n", nvs->nvs_type); 1034 break; 1035 } 1036 } else if (cph->cph_type == VMBUS_CHANPKT_TYPE_INBAND) { 1037 switch (nvs->nvs_type) { 1038 case HVN_NVS_TYPE_TXTBL_NOTE: 1039 /* Useless; ignore */ 1040 break; 1041 default: 1042 device_printf(sc->sc_dev, 1043 "got notify, nvs type %u\n", nvs->nvs_type); 1044 break; 1045 } 1046 } else 1047 device_printf(sc->sc_dev, 1048 "unknown NVSP packet type %u\n", cph->cph_type); 1049 } 1050 1051 if (dotx) 1052 if_schedule_deferred_start(ifp); 1053 } 1054 1055 static int 1056 hvn_nvs_cmd(struct hvn_softc *sc, void *cmd, size_t cmdsize, uint64_t tid, 1057 int timo) 1058 { 1059 struct hvn_nvs_hdr *hdr = cmd; 1060 int tries = 10; 1061 int rv, s; 1062 1063 sc->sc_nvsdone = 0; 1064 1065 do { 1066 rv = vmbus_channel_send(sc->sc_chan, cmd, cmdsize, 1067 tid, VMBUS_CHANPKT_TYPE_INBAND, 1068 timo ? VMBUS_CHANPKT_FLAG_RC : 0); 1069 if (rv == EAGAIN) { 1070 if (cold) 1071 delay(1000); 1072 else 1073 tsleep(cmd, PRIBIO, "nvsout", 1074 uimax(1, mstohz(1))); 1075 } else if (rv) { 1076 DPRINTF("%s: NVSP operation %u send error %d\n", 1077 device_xname(sc->sc_dev), hdr->nvs_type, rv); 1078 return rv; 1079 } 1080 } while (rv != 0 && --tries > 0); 1081 1082 if (tries == 0 && rv != 0) { 1083 device_printf(sc->sc_dev, 1084 "NVSP operation %u send error %d\n", hdr->nvs_type, rv); 1085 return rv; 1086 } 1087 1088 if (timo == 0) 1089 return 0; 1090 1091 do { 1092 if (cold) { 1093 delay(1000); 1094 s = splnet(); 1095 hvn_nvs_intr(sc); 1096 splx(s); 1097 } else 1098 tsleep(sc->sc_nvsrsp, PRIBIO | PCATCH, "nvscmd", 1099 uimax(1, mstohz(1))); 1100 } while (--timo > 0 && sc->sc_nvsdone != 1); 1101 1102 if (timo == 0 && sc->sc_nvsdone != 1) { 1103 device_printf(sc->sc_dev, "NVSP operation %u timed out\n", 1104 hdr->nvs_type); 1105 return ETIMEDOUT; 1106 } 1107 return 0; 1108 } 1109 1110 static int 1111 hvn_nvs_ack(struct hvn_softc *sc, uint64_t tid) 1112 { 1113 struct hvn_nvs_rndis_ack cmd; 1114 int tries = 5; 1115 int rv; 1116 1117 cmd.nvs_type = HVN_NVS_TYPE_RNDIS_ACK; 1118 cmd.nvs_status = HVN_NVS_STATUS_OK; 1119 do { 1120 rv = vmbus_channel_send(sc->sc_chan, &cmd, sizeof(cmd), 1121 tid, VMBUS_CHANPKT_TYPE_COMP, 0); 1122 if (rv == EAGAIN) 1123 delay(10); 1124 else if (rv) { 1125 DPRINTF("%s: NVSP acknowledgement error %d\n", 1126 device_xname(sc->sc_dev), rv); 1127 return rv; 1128 } 1129 } while (rv != 0 && --tries > 0); 1130 return rv; 1131 } 1132 1133 static void 1134 hvn_nvs_detach(struct hvn_softc *sc) 1135 { 1136 1137 if (vmbus_channel_close(sc->sc_chan) == 0) { 1138 kmem_free(sc->sc_nvsbuf, HVN_NVS_BUFSIZE); 1139 sc->sc_nvsbuf = NULL; 1140 } 1141 } 1142 1143 static inline struct rndis_cmd * 1144 hvn_alloc_cmd(struct hvn_softc *sc) 1145 { 1146 struct rndis_cmd *rc; 1147 1148 mutex_enter(&sc->sc_cntl_fqlck); 1149 while ((rc = TAILQ_FIRST(&sc->sc_cntl_fq)) == NULL) 1150 /* XXX use condvar(9) instead of mtsleep */ 1151 mtsleep(&sc->sc_cntl_fq, PRIBIO, "nvsalloc", 1, 1152 &sc->sc_cntl_fqlck); 1153 TAILQ_REMOVE(&sc->sc_cntl_fq, rc, rc_entry); 1154 mutex_exit(&sc->sc_cntl_fqlck); 1155 return rc; 1156 } 1157 1158 static inline void 1159 hvn_submit_cmd(struct hvn_softc *sc, struct rndis_cmd *rc) 1160 { 1161 1162 mutex_enter(&sc->sc_cntl_sqlck); 1163 TAILQ_INSERT_TAIL(&sc->sc_cntl_sq, rc, rc_entry); 1164 mutex_exit(&sc->sc_cntl_sqlck); 1165 } 1166 1167 static inline struct rndis_cmd * 1168 hvn_complete_cmd(struct hvn_softc *sc, uint32_t id) 1169 { 1170 struct rndis_cmd *rc; 1171 1172 mutex_enter(&sc->sc_cntl_sqlck); 1173 TAILQ_FOREACH(rc, &sc->sc_cntl_sq, rc_entry) { 1174 if (rc->rc_id == id) { 1175 TAILQ_REMOVE(&sc->sc_cntl_sq, rc, rc_entry); 1176 break; 1177 } 1178 } 1179 mutex_exit(&sc->sc_cntl_sqlck); 1180 if (rc != NULL) { 1181 mutex_enter(&sc->sc_cntl_cqlck); 1182 TAILQ_INSERT_TAIL(&sc->sc_cntl_cq, rc, rc_entry); 1183 mutex_exit(&sc->sc_cntl_cqlck); 1184 } 1185 return rc; 1186 } 1187 1188 static inline void 1189 hvn_release_cmd(struct hvn_softc *sc, struct rndis_cmd *rc) 1190 { 1191 1192 mutex_enter(&sc->sc_cntl_cqlck); 1193 TAILQ_REMOVE(&sc->sc_cntl_cq, rc, rc_entry); 1194 mutex_exit(&sc->sc_cntl_cqlck); 1195 } 1196 1197 static inline int 1198 hvn_rollback_cmd(struct hvn_softc *sc, struct rndis_cmd *rc) 1199 { 1200 struct rndis_cmd *rn; 1201 1202 mutex_enter(&sc->sc_cntl_sqlck); 1203 TAILQ_FOREACH(rn, &sc->sc_cntl_sq, rc_entry) { 1204 if (rn == rc) { 1205 TAILQ_REMOVE(&sc->sc_cntl_sq, rc, rc_entry); 1206 mutex_exit(&sc->sc_cntl_sqlck); 1207 return 0; 1208 } 1209 } 1210 mutex_exit(&sc->sc_cntl_sqlck); 1211 return -1; 1212 } 1213 1214 static inline void 1215 hvn_free_cmd(struct hvn_softc *sc, struct rndis_cmd *rc) 1216 { 1217 1218 memset(rc->rc_req, 0, sizeof(struct rndis_packet_msg)); 1219 memset(&rc->rc_cmp, 0, sizeof(rc->rc_cmp)); 1220 memset(&rc->rc_msg, 0, sizeof(rc->rc_msg)); 1221 mutex_enter(&sc->sc_cntl_fqlck); 1222 TAILQ_INSERT_TAIL(&sc->sc_cntl_fq, rc, rc_entry); 1223 mutex_exit(&sc->sc_cntl_fqlck); 1224 wakeup(&sc->sc_cntl_fq); 1225 } 1226 1227 static int 1228 hvn_rndis_attach(struct hvn_softc *sc) 1229 { 1230 const int dmaflags = cold ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK; 1231 struct rndis_init_req *req; 1232 struct rndis_init_comp *cmp; 1233 struct rndis_cmd *rc; 1234 int i, rv; 1235 1236 /* RNDIS control message queues */ 1237 TAILQ_INIT(&sc->sc_cntl_sq); 1238 TAILQ_INIT(&sc->sc_cntl_cq); 1239 TAILQ_INIT(&sc->sc_cntl_fq); 1240 mutex_init(&sc->sc_cntl_sqlck, MUTEX_DEFAULT, IPL_NET); 1241 mutex_init(&sc->sc_cntl_cqlck, MUTEX_DEFAULT, IPL_NET); 1242 mutex_init(&sc->sc_cntl_fqlck, MUTEX_DEFAULT, IPL_NET); 1243 1244 for (i = 0; i < HVN_RNDIS_CTLREQS; i++) { 1245 rc = &sc->sc_cntl_msgs[i]; 1246 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0, 1247 dmaflags, &rc->rc_dmap)) { 1248 DPRINTF("%s: failed to create RNDIS command map\n", 1249 device_xname(sc->sc_dev)); 1250 goto errout; 1251 } 1252 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 1253 0, &rc->rc_segs, 1, &rc->rc_nsegs, dmaflags)) { 1254 DPRINTF("%s: failed to allocate RNDIS command\n", 1255 device_xname(sc->sc_dev)); 1256 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap); 1257 goto errout; 1258 } 1259 if (bus_dmamem_map(sc->sc_dmat, &rc->rc_segs, rc->rc_nsegs, 1260 PAGE_SIZE, (void **)&rc->rc_req, dmaflags)) { 1261 DPRINTF("%s: failed to allocate RNDIS command\n", 1262 device_xname(sc->sc_dev)); 1263 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs, 1264 rc->rc_nsegs); 1265 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap); 1266 goto errout; 1267 } 1268 memset(rc->rc_req, 0, PAGE_SIZE); 1269 if (bus_dmamap_load(sc->sc_dmat, rc->rc_dmap, rc->rc_req, 1270 PAGE_SIZE, NULL, dmaflags)) { 1271 DPRINTF("%s: failed to load RNDIS command map\n", 1272 device_xname(sc->sc_dev)); 1273 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs, 1274 rc->rc_nsegs); 1275 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap); 1276 goto errout; 1277 } 1278 rc->rc_gpa = atop(rc->rc_dmap->dm_segs[0].ds_addr); 1279 TAILQ_INSERT_TAIL(&sc->sc_cntl_fq, rc, rc_entry); 1280 } 1281 1282 rc = hvn_alloc_cmd(sc); 1283 1284 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE, 1285 BUS_DMASYNC_PREREAD); 1286 1287 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid); 1288 1289 req = rc->rc_req; 1290 req->rm_type = REMOTE_NDIS_INITIALIZE_MSG; 1291 req->rm_len = sizeof(*req); 1292 req->rm_rid = rc->rc_id; 1293 req->rm_ver_major = RNDIS_VERSION_MAJOR; 1294 req->rm_ver_minor = RNDIS_VERSION_MINOR; 1295 req->rm_max_xfersz = HVN_RNDIS_XFER_SIZE; 1296 1297 rc->rc_cmplen = sizeof(*cmp); 1298 1299 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE, 1300 BUS_DMASYNC_PREWRITE); 1301 1302 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) { 1303 DPRINTF("%s: INITIALIZE_MSG failed, error %d\n", 1304 device_xname(sc->sc_dev), rv); 1305 hvn_free_cmd(sc, rc); 1306 goto errout; 1307 } 1308 cmp = (struct rndis_init_comp *)&rc->rc_cmp; 1309 if (cmp->rm_status != RNDIS_STATUS_SUCCESS) { 1310 DPRINTF("%s: failed to init RNDIS, error %#x\n", 1311 device_xname(sc->sc_dev), cmp->rm_status); 1312 hvn_free_cmd(sc, rc); 1313 goto errout; 1314 } 1315 1316 hvn_free_cmd(sc, rc); 1317 1318 /* Initialize RNDIS Data command */ 1319 memset(&sc->sc_data_msg, 0, sizeof(sc->sc_data_msg)); 1320 sc->sc_data_msg.nvs_type = HVN_NVS_TYPE_RNDIS; 1321 sc->sc_data_msg.nvs_rndis_mtype = HVN_NVS_RNDIS_MTYPE_DATA; 1322 sc->sc_data_msg.nvs_chim_idx = HVN_NVS_CHIM_IDX_INVALID; 1323 1324 return 0; 1325 1326 errout: 1327 for (i = 0; i < HVN_RNDIS_CTLREQS; i++) { 1328 rc = &sc->sc_cntl_msgs[i]; 1329 if (rc->rc_req == NULL) 1330 continue; 1331 TAILQ_REMOVE(&sc->sc_cntl_fq, rc, rc_entry); 1332 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs, rc->rc_nsegs); 1333 rc->rc_req = NULL; 1334 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap); 1335 } 1336 return -1; 1337 } 1338 1339 static int 1340 hvn_set_capabilities(struct hvn_softc *sc) 1341 { 1342 struct ndis_offload_params params; 1343 size_t len = sizeof(params); 1344 1345 memset(¶ms, 0, sizeof(params)); 1346 1347 params.ndis_hdr.ndis_type = NDIS_OBJTYPE_DEFAULT; 1348 if (sc->sc_ndisver < NDIS_VERSION_6_30) { 1349 params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_2; 1350 len = params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE_6_1; 1351 } else { 1352 params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_3; 1353 len = params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE; 1354 } 1355 1356 params.ndis_ip4csum = NDIS_OFFLOAD_PARAM_TXRX; 1357 params.ndis_tcp4csum = NDIS_OFFLOAD_PARAM_TXRX; 1358 params.ndis_tcp6csum = NDIS_OFFLOAD_PARAM_TXRX; 1359 if (sc->sc_ndisver >= NDIS_VERSION_6_30) { 1360 params.ndis_udp4csum = NDIS_OFFLOAD_PARAM_TXRX; 1361 params.ndis_udp6csum = NDIS_OFFLOAD_PARAM_TXRX; 1362 } 1363 1364 return hvn_rndis_set(sc, OID_TCP_OFFLOAD_PARAMETERS, ¶ms, len); 1365 } 1366 1367 static int 1368 hvn_rndis_cmd(struct hvn_softc *sc, struct rndis_cmd *rc, int timo) 1369 { 1370 struct hvn_nvs_rndis *msg = &rc->rc_msg; 1371 struct rndis_msghdr *hdr = rc->rc_req; 1372 struct vmbus_gpa sgl[1]; 1373 int tries = 10; 1374 int rv, s; 1375 1376 KASSERT(timo > 0); 1377 1378 msg->nvs_type = HVN_NVS_TYPE_RNDIS; 1379 msg->nvs_rndis_mtype = HVN_NVS_RNDIS_MTYPE_CTRL; 1380 msg->nvs_chim_idx = HVN_NVS_CHIM_IDX_INVALID; 1381 1382 sgl[0].gpa_page = rc->rc_gpa; 1383 sgl[0].gpa_len = hdr->rm_len; 1384 sgl[0].gpa_ofs = 0; 1385 1386 rc->rc_done = 0; 1387 1388 hvn_submit_cmd(sc, rc); 1389 1390 do { 1391 rv = vmbus_channel_send_sgl(sc->sc_chan, sgl, 1, &rc->rc_msg, 1392 sizeof(*msg), rc->rc_id); 1393 if (rv == EAGAIN) { 1394 if (cold) 1395 delay(1000); 1396 else 1397 tsleep(rc, PRIBIO, "rndisout", 1398 uimax(1, mstohz(1))); 1399 } else if (rv) { 1400 DPRINTF("%s: RNDIS operation %u send error %d\n", 1401 device_xname(sc->sc_dev), hdr->rm_type, rv); 1402 hvn_rollback_cmd(sc, rc); 1403 return rv; 1404 } 1405 } while (rv != 0 && --tries > 0); 1406 1407 if (tries == 0 && rv != 0) { 1408 device_printf(sc->sc_dev, 1409 "RNDIS operation %u send error %d\n", hdr->rm_type, rv); 1410 return rv; 1411 } 1412 if (vmbus_channel_is_revoked(sc->sc_chan)) { 1413 /* No response */ 1414 return 0; 1415 } 1416 1417 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE, 1418 BUS_DMASYNC_POSTWRITE); 1419 1420 do { 1421 if (cold) { 1422 delay(1000); 1423 s = splnet(); 1424 hvn_nvs_intr(sc); 1425 splx(s); 1426 } else 1427 tsleep(rc, PRIBIO | PCATCH, "rndiscmd", 1428 uimax(1, mstohz(1))); 1429 } while (--timo > 0 && rc->rc_done != 1); 1430 1431 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE, 1432 BUS_DMASYNC_POSTREAD); 1433 1434 if (rc->rc_done != 1) { 1435 rv = timo == 0 ? ETIMEDOUT : EINTR; 1436 if (hvn_rollback_cmd(sc, rc)) { 1437 hvn_release_cmd(sc, rc); 1438 rv = 0; 1439 } else if (rv == ETIMEDOUT) { 1440 device_printf(sc->sc_dev, 1441 "RNDIS operation %u timed out\n", hdr->rm_type); 1442 } 1443 return rv; 1444 } 1445 1446 hvn_release_cmd(sc, rc); 1447 return 0; 1448 } 1449 1450 static void 1451 hvn_rndis_input(struct hvn_softc *sc, uint64_t tid, void *arg) 1452 { 1453 struct vmbus_chanpkt_prplist *cp = arg; 1454 uint32_t off, len, type; 1455 int i; 1456 1457 if (sc->sc_rx_ring == NULL) { 1458 DPRINTF("%s: invalid rx ring\n", device_xname(sc->sc_dev)); 1459 return; 1460 } 1461 1462 for (i = 0; i < cp->cp_range_cnt; i++) { 1463 off = cp->cp_range[i].gpa_ofs; 1464 len = cp->cp_range[i].gpa_len; 1465 1466 KASSERT(off + len <= sc->sc_rx_size); 1467 KASSERT(len >= RNDIS_HEADER_OFFSET + 4); 1468 1469 memcpy(&type, sc->sc_rx_ring + off, sizeof(type)); 1470 switch (type) { 1471 /* data message */ 1472 case REMOTE_NDIS_PACKET_MSG: 1473 hvn_rxeof(sc, sc->sc_rx_ring + off, len); 1474 break; 1475 /* completion messages */ 1476 case REMOTE_NDIS_INITIALIZE_CMPLT: 1477 case REMOTE_NDIS_QUERY_CMPLT: 1478 case REMOTE_NDIS_SET_CMPLT: 1479 case REMOTE_NDIS_RESET_CMPLT: 1480 case REMOTE_NDIS_KEEPALIVE_CMPLT: 1481 hvn_rndis_complete(sc, sc->sc_rx_ring + off, len); 1482 break; 1483 /* notification message */ 1484 case REMOTE_NDIS_INDICATE_STATUS_MSG: 1485 hvn_rndis_status(sc, sc->sc_rx_ring + off, len); 1486 break; 1487 default: 1488 device_printf(sc->sc_dev, 1489 "unhandled RNDIS message type %u\n", type); 1490 break; 1491 } 1492 } 1493 1494 hvn_nvs_ack(sc, tid); 1495 } 1496 1497 static inline struct mbuf * 1498 hvn_devget(struct hvn_softc *sc, void *buf, uint32_t len) 1499 { 1500 struct ifnet *ifp = SC2IFP(sc); 1501 struct mbuf *m; 1502 size_t size = len + ETHER_ALIGN; 1503 1504 MGETHDR(m, M_NOWAIT, MT_DATA); 1505 if (m == NULL) 1506 return NULL; 1507 1508 if (size > MHLEN) { 1509 if (size <= MCLBYTES) 1510 MCLGET(m, M_NOWAIT); 1511 else 1512 MEXTMALLOC(m, size, M_NOWAIT); 1513 if ((m->m_flags & M_EXT) == 0) { 1514 m_freem(m); 1515 return NULL; 1516 } 1517 } 1518 1519 m->m_len = m->m_pkthdr.len = size; 1520 m_adj(m, ETHER_ALIGN); 1521 m_copyback(m, 0, len, buf); 1522 m_set_rcvif(m, ifp); 1523 return m; 1524 } 1525 1526 static void 1527 hvn_rxeof(struct hvn_softc *sc, uint8_t *buf, uint32_t len) 1528 { 1529 struct ifnet *ifp = SC2IFP(sc); 1530 struct rndis_packet_msg *pkt; 1531 struct rndis_pktinfo *pi; 1532 uint32_t csum, vlan; 1533 struct mbuf *m; 1534 1535 if (!(ifp->if_flags & IFF_RUNNING)) 1536 return; 1537 1538 if (len < sizeof(*pkt)) { 1539 device_printf(sc->sc_dev, "data packet too short: %u\n", 1540 len); 1541 return; 1542 } 1543 1544 pkt = (struct rndis_packet_msg *)buf; 1545 if (pkt->rm_dataoffset + pkt->rm_datalen > len) { 1546 device_printf(sc->sc_dev, 1547 "data packet out of bounds: %u@%u\n", pkt->rm_dataoffset, 1548 pkt->rm_datalen); 1549 return; 1550 } 1551 1552 if ((m = hvn_devget(sc, buf + RNDIS_HEADER_OFFSET + pkt->rm_dataoffset, 1553 pkt->rm_datalen)) == NULL) { 1554 if_statinc(ifp, if_ierrors); 1555 return; 1556 } 1557 1558 if (pkt->rm_pktinfooffset + pkt->rm_pktinfolen > len) { 1559 device_printf(sc->sc_dev, 1560 "pktinfo is out of bounds: %u@%u vs %u\n", 1561 pkt->rm_pktinfolen, pkt->rm_pktinfooffset, len); 1562 goto done; 1563 } 1564 1565 pi = (struct rndis_pktinfo *)(buf + RNDIS_HEADER_OFFSET + 1566 pkt->rm_pktinfooffset); 1567 while (pkt->rm_pktinfolen > 0) { 1568 if (pi->rm_size > pkt->rm_pktinfolen) { 1569 device_printf(sc->sc_dev, 1570 "invalid pktinfo size: %u/%u\n", pi->rm_size, 1571 pkt->rm_pktinfolen); 1572 break; 1573 } 1574 1575 switch (pi->rm_type) { 1576 case NDIS_PKTINFO_TYPE_CSUM: 1577 memcpy(&csum, pi->rm_data, sizeof(csum)); 1578 if (csum & NDIS_RXCSUM_INFO_IPCS_OK) 1579 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 1580 if (csum & NDIS_RXCSUM_INFO_TCPCS_OK) 1581 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 1582 if (csum & NDIS_RXCSUM_INFO_UDPCS_OK) 1583 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 1584 break; 1585 case NDIS_PKTINFO_TYPE_VLAN: 1586 memcpy(&vlan, pi->rm_data, sizeof(vlan)); 1587 if (vlan != 0xffffffff) { 1588 uint16_t t = NDIS_VLAN_INFO_ID(vlan); 1589 t |= NDIS_VLAN_INFO_PRI(vlan) << EVL_PRIO_BITS; 1590 t |= NDIS_VLAN_INFO_CFI(vlan) << EVL_CFI_BITS; 1591 vlan_set_tag(m, t); 1592 } 1593 break; 1594 default: 1595 DPRINTF("%s: unhandled pktinfo type %u\n", 1596 device_xname(sc->sc_dev), pi->rm_type); 1597 break; 1598 } 1599 1600 pkt->rm_pktinfolen -= pi->rm_size; 1601 pi = (struct rndis_pktinfo *)((char *)pi + pi->rm_size); 1602 } 1603 1604 done: 1605 if_percpuq_enqueue(sc->sc_ipq, m); 1606 } 1607 1608 static void 1609 hvn_rndis_complete(struct hvn_softc *sc, uint8_t *buf, uint32_t len) 1610 { 1611 struct rndis_cmd *rc; 1612 uint32_t id; 1613 1614 memcpy(&id, buf + RNDIS_HEADER_OFFSET, sizeof(id)); 1615 if ((rc = hvn_complete_cmd(sc, id)) != NULL) { 1616 if (len < rc->rc_cmplen) 1617 device_printf(sc->sc_dev, 1618 "RNDIS response %u too short: %u\n", id, len); 1619 else 1620 memcpy(&rc->rc_cmp, buf, rc->rc_cmplen); 1621 if (len > rc->rc_cmplen && 1622 len - rc->rc_cmplen > HVN_RNDIS_BUFSIZE) 1623 device_printf(sc->sc_dev, 1624 "RNDIS response %u too large: %u\n", id, len); 1625 else if (len > rc->rc_cmplen) 1626 memcpy(&rc->rc_cmpbuf, buf + rc->rc_cmplen, 1627 len - rc->rc_cmplen); 1628 rc->rc_done = 1; 1629 wakeup(rc); 1630 } else { 1631 DPRINTF("%s: failed to complete RNDIS request id %u\n", 1632 device_xname(sc->sc_dev), id); 1633 } 1634 } 1635 1636 static int 1637 hvn_rndis_output(struct hvn_softc *sc, struct hvn_tx_desc *txd) 1638 { 1639 uint64_t rid = (uint64_t)txd->txd_id << 32; 1640 int rv; 1641 1642 rv = vmbus_channel_send_sgl(sc->sc_chan, txd->txd_sgl, txd->txd_nsge, 1643 &sc->sc_data_msg, sizeof(sc->sc_data_msg), rid); 1644 if (rv) { 1645 DPRINTF("%s: RNDIS data send error %d\n", 1646 device_xname(sc->sc_dev), rv); 1647 return rv; 1648 } 1649 return 0; 1650 } 1651 1652 static void 1653 hvn_rndis_status(struct hvn_softc *sc, uint8_t *buf, uint32_t len) 1654 { 1655 struct ifnet *ifp = SC2IFP(sc); 1656 uint32_t status; 1657 int link_state = sc->sc_link_state; 1658 1659 memcpy(&status, buf + RNDIS_HEADER_OFFSET, sizeof(status)); 1660 switch (status) { 1661 case RNDIS_STATUS_MEDIA_CONNECT: 1662 sc->sc_link_state = LINK_STATE_UP; 1663 break; 1664 case RNDIS_STATUS_MEDIA_DISCONNECT: 1665 sc->sc_link_state = LINK_STATE_DOWN; 1666 break; 1667 /* Ignore these */ 1668 case RNDIS_STATUS_OFFLOAD_CURRENT_CONFIG: 1669 return; 1670 default: 1671 DPRINTF("%s: unhandled status %#x\n", device_xname(sc->sc_dev), 1672 status); 1673 return; 1674 } 1675 if (link_state != sc->sc_link_state) 1676 if_link_state_change(ifp, sc->sc_link_state); 1677 } 1678 1679 static int 1680 hvn_rndis_query(struct hvn_softc *sc, uint32_t oid, void *res, size_t *length) 1681 { 1682 struct rndis_cmd *rc; 1683 struct rndis_query_req *req; 1684 struct rndis_query_comp *cmp; 1685 size_t olength = *length; 1686 int rv; 1687 1688 rc = hvn_alloc_cmd(sc); 1689 1690 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE, 1691 BUS_DMASYNC_PREREAD); 1692 1693 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid); 1694 1695 req = rc->rc_req; 1696 req->rm_type = REMOTE_NDIS_QUERY_MSG; 1697 req->rm_len = sizeof(*req); 1698 req->rm_rid = rc->rc_id; 1699 req->rm_oid = oid; 1700 req->rm_infobufoffset = sizeof(*req) - RNDIS_HEADER_OFFSET; 1701 1702 rc->rc_cmplen = sizeof(*cmp); 1703 1704 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE, 1705 BUS_DMASYNC_PREWRITE); 1706 1707 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) { 1708 DPRINTF("%s: QUERY_MSG failed, error %d\n", 1709 device_xname(sc->sc_dev), rv); 1710 hvn_free_cmd(sc, rc); 1711 return rv; 1712 } 1713 1714 cmp = (struct rndis_query_comp *)&rc->rc_cmp; 1715 switch (cmp->rm_status) { 1716 case RNDIS_STATUS_SUCCESS: 1717 if (cmp->rm_infobuflen > olength) { 1718 rv = EINVAL; 1719 break; 1720 } 1721 memcpy(res, rc->rc_cmpbuf, cmp->rm_infobuflen); 1722 *length = cmp->rm_infobuflen; 1723 break; 1724 default: 1725 *length = 0; 1726 rv = EIO; 1727 break; 1728 } 1729 1730 hvn_free_cmd(sc, rc); 1731 return rv; 1732 } 1733 1734 static int 1735 hvn_rndis_set(struct hvn_softc *sc, uint32_t oid, void *data, size_t length) 1736 { 1737 struct rndis_cmd *rc; 1738 struct rndis_set_req *req; 1739 struct rndis_set_comp *cmp; 1740 int rv; 1741 1742 rc = hvn_alloc_cmd(sc); 1743 1744 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE, 1745 BUS_DMASYNC_PREREAD); 1746 1747 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid); 1748 1749 req = rc->rc_req; 1750 req->rm_type = REMOTE_NDIS_SET_MSG; 1751 req->rm_len = sizeof(*req) + length; 1752 req->rm_rid = rc->rc_id; 1753 req->rm_oid = oid; 1754 req->rm_infobufoffset = sizeof(*req) - RNDIS_HEADER_OFFSET; 1755 1756 rc->rc_cmplen = sizeof(*cmp); 1757 1758 if (length > 0) { 1759 KASSERT(sizeof(*req) + length < PAGE_SIZE); 1760 req->rm_infobuflen = length; 1761 memcpy(req + 1, data, length); 1762 } 1763 1764 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE, 1765 BUS_DMASYNC_PREWRITE); 1766 1767 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) { 1768 DPRINTF("%s: SET_MSG failed, error %d\n", 1769 device_xname(sc->sc_dev), rv); 1770 hvn_free_cmd(sc, rc); 1771 return rv; 1772 } 1773 1774 cmp = (struct rndis_set_comp *)&rc->rc_cmp; 1775 if (cmp->rm_status != RNDIS_STATUS_SUCCESS) 1776 rv = EIO; 1777 1778 hvn_free_cmd(sc, rc); 1779 return rv; 1780 } 1781 1782 static int 1783 hvn_rndis_open(struct hvn_softc *sc) 1784 { 1785 uint32_t filter; 1786 int rv; 1787 1788 if (sc->sc_promisc) 1789 filter = RNDIS_PACKET_TYPE_PROMISCUOUS; 1790 else 1791 filter = RNDIS_PACKET_TYPE_BROADCAST | 1792 RNDIS_PACKET_TYPE_ALL_MULTICAST | 1793 RNDIS_PACKET_TYPE_DIRECTED; 1794 1795 rv = hvn_rndis_set(sc, OID_GEN_CURRENT_PACKET_FILTER, 1796 &filter, sizeof(filter)); 1797 if (rv) { 1798 DPRINTF("%s: failed to set RNDIS filter to %#x\n", 1799 device_xname(sc->sc_dev), filter); 1800 } 1801 return rv; 1802 } 1803 1804 static int 1805 hvn_rndis_close(struct hvn_softc *sc) 1806 { 1807 uint32_t filter = 0; 1808 int rv; 1809 1810 rv = hvn_rndis_set(sc, OID_GEN_CURRENT_PACKET_FILTER, 1811 &filter, sizeof(filter)); 1812 if (rv) { 1813 DPRINTF("%s: failed to clear RNDIS filter\n", 1814 device_xname(sc->sc_dev)); 1815 } 1816 return rv; 1817 } 1818 1819 static void 1820 hvn_rndis_detach(struct hvn_softc *sc) 1821 { 1822 struct rndis_cmd *rc; 1823 struct rndis_halt_req *req; 1824 int rv; 1825 1826 rc = hvn_alloc_cmd(sc); 1827 1828 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE, 1829 BUS_DMASYNC_PREREAD); 1830 1831 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid); 1832 1833 req = rc->rc_req; 1834 req->rm_type = REMOTE_NDIS_HALT_MSG; 1835 req->rm_len = sizeof(*req); 1836 req->rm_rid = rc->rc_id; 1837 1838 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE, 1839 BUS_DMASYNC_PREWRITE); 1840 1841 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) { 1842 DPRINTF("%s: HALT_MSG failed, error %d\n", 1843 device_xname(sc->sc_dev), rv); 1844 } 1845 hvn_free_cmd(sc, rc); 1846 1847 mutex_destroy(&sc->sc_cntl_sqlck); 1848 mutex_destroy(&sc->sc_cntl_cqlck); 1849 mutex_destroy(&sc->sc_cntl_fqlck); 1850 } 1851