1 /* $NetBSD: if_vioif.c,v 1.2 2011/11/19 12:32:54 jmcneill Exp $ */ 2 3 /* 4 * Copyright (c) 2010 Minoura Makoto. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.2 2011/11/19 12:32:54 jmcneill Exp $"); 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/kernel.h> 34 #include <sys/bus.h> 35 #include <sys/condvar.h> 36 #include <sys/device.h> 37 #include <sys/intr.h> 38 #include <sys/kmem.h> 39 #include <sys/mbuf.h> 40 #include <sys/mutex.h> 41 #include <sys/sockio.h> 42 43 #include <dev/pci/pcidevs.h> 44 #include <dev/pci/pcireg.h> 45 #include <dev/pci/pcivar.h> 46 #include <dev/pci/virtioreg.h> 47 #include <dev/pci/virtiovar.h> 48 49 #include <net/if.h> 50 #include <net/if_media.h> 51 #include <net/if_ether.h> 52 53 #include <net/bpf.h> 54 55 56 /* 57 * if_vioifreg.h: 58 */ 59 /* Configuration registers */ 60 #define VIRTIO_NET_CONFIG_MAC 0 /* 8bit x 6byte */ 61 #define VIRTIO_NET_CONFIG_STATUS 6 /* 16bit */ 62 63 /* Feature bits */ 64 #define VIRTIO_NET_F_CSUM (1<<0) 65 #define VIRTIO_NET_F_GUEST_CSUM (1<<1) 66 #define VIRTIO_NET_F_MAC (1<<5) 67 #define VIRTIO_NET_F_GSO (1<<6) 68 #define VIRTIO_NET_F_GUEST_TSO4 (1<<7) 69 #define VIRTIO_NET_F_GUEST_TSO6 (1<<8) 70 #define VIRTIO_NET_F_GUEST_ECN (1<<9) 71 #define VIRTIO_NET_F_GUEST_UFO (1<<10) 72 #define VIRTIO_NET_F_HOST_TSO4 (1<<11) 73 #define VIRTIO_NET_F_HOST_TSO6 (1<<12) 74 #define VIRTIO_NET_F_HOST_ECN (1<<13) 75 #define VIRTIO_NET_F_HOST_UFO (1<<14) 76 #define VIRTIO_NET_F_MRG_RXBUF (1<<15) 77 #define VIRTIO_NET_F_STATUS (1<<16) 78 #define VIRTIO_NET_F_CTRL_VQ (1<<17) 79 #define VIRTIO_NET_F_CTRL_RX (1<<18) 80 #define VIRTIO_NET_F_CTRL_VLAN (1<<19) 81 82 /* Status */ 83 #define VIRTIO_NET_S_LINK_UP 1 84 85 /* Packet header structure */ 86 struct virtio_net_hdr { 87 uint8_t flags; 88 uint8_t gso_type; 89 uint16_t hdr_len; 90 uint16_t gso_size; 91 uint16_t csum_start; 92 uint16_t csum_offset; 93 #if 0 94 uint16_t num_buffers; /* if VIRTIO_NET_F_MRG_RXBUF enabled */ 95 #endif 96 } __packed; 97 98 #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* flags */ 99 #define VIRTIO_NET_HDR_GSO_NONE 0 /* gso_type */ 100 #define VIRTIO_NET_HDR_GSO_TCPV4 1 /* gso_type */ 101 #define VIRTIO_NET_HDR_GSO_UDP 3 /* gso_type */ 102 #define VIRTIO_NET_HDR_GSO_TCPV6 4 /* gso_type */ 103 #define VIRTIO_NET_HDR_GSO_ECN 0x80 /* gso_type, |'ed */ 104 105 #define VIRTIO_NET_MAX_GSO_LEN (65536+ETHER_HDR_LEN) 106 107 /* Control virtqueue */ 108 struct virtio_net_ctrl_cmd { 109 uint8_t class; 110 uint8_t command; 111 } __packed; 112 #define VIRTIO_NET_CTRL_RX 0 113 # define VIRTIO_NET_CTRL_RX_PROMISC 0 114 # define VIRTIO_NET_CTRL_RX_ALLMULTI 1 115 116 #define VIRTIO_NET_CTRL_MAC 1 117 # define VIRTIO_NET_CTRL_MAC_TABLE_SET 0 118 119 #define VIRTIO_NET_CTRL_VLAN 2 120 # define VIRTIO_NET_CTRL_VLAN_ADD 0 121 # define VIRTIO_NET_CTRL_VLAN_DEL 1 122 123 struct virtio_net_ctrl_status { 124 uint8_t ack; 125 } __packed; 126 #define VIRTIO_NET_OK 0 127 #define VIRTIO_NET_ERR 1 128 129 struct virtio_net_ctrl_rx { 130 uint8_t onoff; 131 } __packed; 132 133 struct virtio_net_ctrl_mac_tbl { 134 uint32_t nentries; 135 uint8_t macs[][ETHER_ADDR_LEN]; 136 } __packed; 137 138 struct virtio_net_ctrl_vlan { 139 uint16_t id; 140 } __packed; 141 142 143 /* 144 * if_vioifvar.h: 145 */ 146 struct vioif_softc { 147 device_t sc_dev; 148 149 struct virtio_softc *sc_virtio; 150 struct virtqueue sc_vq[3]; 151 152 uint8_t sc_mac[ETHER_ADDR_LEN]; 153 struct ethercom sc_ethercom; 154 uint32_t sc_features; 155 short sc_ifflags; 156 157 /* bus_dmamem */ 158 bus_dma_segment_t sc_hdr_segs[1]; 159 struct virtio_net_hdr *sc_hdrs; 160 #define sc_rx_hdrs sc_hdrs 161 struct virtio_net_hdr *sc_tx_hdrs; 162 struct virtio_net_ctrl_cmd *sc_ctrl_cmd; 163 struct virtio_net_ctrl_status *sc_ctrl_status; 164 struct virtio_net_ctrl_rx *sc_ctrl_rx; 165 struct virtio_net_ctrl_mac_tbl *sc_ctrl_mac_tbl_uc; 166 struct virtio_net_ctrl_mac_tbl *sc_ctrl_mac_tbl_mc; 167 168 /* kmem */ 169 bus_dmamap_t *sc_arrays; 170 #define sc_rxhdr_dmamaps sc_arrays 171 bus_dmamap_t *sc_txhdr_dmamaps; 172 bus_dmamap_t *sc_rx_dmamaps; 173 bus_dmamap_t *sc_tx_dmamaps; 174 struct mbuf **sc_rx_mbufs; 175 struct mbuf **sc_tx_mbufs; 176 177 bus_dmamap_t sc_ctrl_cmd_dmamap; 178 bus_dmamap_t sc_ctrl_status_dmamap; 179 bus_dmamap_t sc_ctrl_rx_dmamap; 180 bus_dmamap_t sc_ctrl_tbl_uc_dmamap; 181 bus_dmamap_t sc_ctrl_tbl_mc_dmamap; 182 183 void *sc_rx_softint; 184 185 enum { 186 FREE, INUSE, DONE 187 } sc_ctrl_inuse; 188 kcondvar_t sc_ctrl_wait; 189 kmutex_t sc_ctrl_wait_lock; 190 }; 191 #define VIRTIO_NET_TX_MAXNSEGS (16) /* XXX */ 192 #define VIRTIO_NET_CTRL_MAC_MAXENTRIES (64) /* XXX */ 193 194 /* cfattach interface functions */ 195 static int vioif_match(device_t, cfdata_t, void *); 196 static void vioif_attach(device_t, device_t, void *); 197 static void vioif_deferred_init(device_t); 198 199 /* ifnet interface functions */ 200 static int vioif_init(struct ifnet *); 201 static void vioif_stop(struct ifnet *, int); 202 static void vioif_start(struct ifnet *); 203 static int vioif_ioctl(struct ifnet *, u_long, void *); 204 static void vioif_watchdog(struct ifnet *); 205 206 /* rx */ 207 static int vioif_add_rx_mbuf(struct vioif_softc *, int); 208 static void vioif_free_rx_mbuf(struct vioif_softc *, int); 209 static void vioif_populate_rx_mbufs(struct vioif_softc *); 210 static int vioif_rx_deq(struct vioif_softc *); 211 static int vioif_rx_vq_done(struct virtqueue *); 212 static void vioif_rx_softint(void *); 213 static void vioif_rx_drain(struct vioif_softc *); 214 215 /* tx */ 216 static int vioif_tx_vq_done(struct virtqueue *); 217 static void vioif_tx_drain(struct vioif_softc *); 218 219 /* other control */ 220 static int vioif_updown(struct vioif_softc *, bool); 221 static int vioif_ctrl_rx(struct vioif_softc *, int, bool); 222 static int vioif_set_promisc(struct vioif_softc *, bool); 223 static int vioif_set_allmulti(struct vioif_softc *, bool); 224 static int vioif_set_rx_filter(struct vioif_softc *); 225 static int vioif_rx_filter(struct vioif_softc *); 226 static int vioif_ctrl_vq_done(struct virtqueue *); 227 228 CFATTACH_DECL_NEW(vioif, sizeof(struct vioif_softc), 229 vioif_match, vioif_attach, NULL, NULL); 230 231 static int 232 vioif_match(device_t parent, cfdata_t match, void *aux) 233 { 234 struct virtio_softc *va = aux; 235 236 if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_NETWORK) 237 return 1; 238 239 return 0; 240 } 241 242 /* allocate memory */ 243 /* 244 * dma memory is used for: 245 * sc_rx_hdrs[slot]: metadata array for recieved frames (READ) 246 * sc_tx_hdrs[slot]: metadata array for frames to be sent (WRITE) 247 * sc_ctrl_cmd: command to be sent via ctrl vq (WRITE) 248 * sc_ctrl_status: return value for a command via ctrl vq (READ) 249 * sc_ctrl_rx: parameter for a VIRTIO_NET_CTRL_RX class command 250 * (WRITE) 251 * sc_ctrl_mac_tbl_uc: unicast MAC address filter for a VIRTIO_NET_CTRL_MAC 252 * class command (WRITE) 253 * sc_ctrl_mac_tbl_mc: multicast MAC address filter for a VIRTIO_NET_CTRL_MAC 254 * class command (WRITE) 255 * sc_ctrl_* structures are allocated only one each; they are protected by 256 * sc_ctrl_inuse variable and sc_ctrl_wait condvar. 257 */ 258 /* 259 * dynamically allocated memory is used for: 260 * sc_rxhdr_dmamaps[slot]: bus_dmamap_t array for sc_rx_hdrs[slot] 261 * sc_txhdr_dmamaps[slot]: bus_dmamap_t array for sc_tx_hdrs[slot] 262 * sc_rx_dmamaps[slot]: bus_dmamap_t array for recieved payload 263 * sc_tx_dmamaps[slot]: bus_dmamap_t array for sent payload 264 * sc_rx_mbufs[slot]: mbuf pointer array for recieved frames 265 * sc_tx_mbufs[slot]: mbuf pointer array for sent frames 266 */ 267 static int 268 vioif_alloc_mems(struct vioif_softc *sc) 269 { 270 struct virtio_softc *vsc = sc->sc_virtio; 271 int allocsize, allocsize2, r, rsegs, i; 272 void *vaddr; 273 intptr_t p; 274 int rxqsize, txqsize; 275 276 rxqsize = vsc->sc_vqs[0].vq_num; 277 txqsize = vsc->sc_vqs[1].vq_num; 278 279 allocsize = sizeof(struct virtio_net_hdr) * rxqsize; 280 allocsize += sizeof(struct virtio_net_hdr) * txqsize; 281 if (vsc->sc_nvqs == 3) { 282 allocsize += sizeof(struct virtio_net_ctrl_cmd) * 1; 283 allocsize += sizeof(struct virtio_net_ctrl_status) * 1; 284 allocsize += sizeof(struct virtio_net_ctrl_rx) * 1; 285 allocsize += sizeof(struct virtio_net_ctrl_mac_tbl) 286 + sizeof(struct virtio_net_ctrl_mac_tbl) 287 + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES; 288 } 289 r = bus_dmamem_alloc(vsc->sc_dmat, allocsize, 0, 0, 290 &sc->sc_hdr_segs[0], 1, &rsegs, BUS_DMA_NOWAIT); 291 if (r != 0) { 292 aprint_error_dev(sc->sc_dev, 293 "DMA memory allocation failed, size %d, " 294 "error code %d\n", allocsize, r); 295 goto err_none; 296 } 297 r = bus_dmamem_map(vsc->sc_dmat, 298 &sc->sc_hdr_segs[0], 1, allocsize, 299 &vaddr, BUS_DMA_NOWAIT); 300 if (r != 0) { 301 aprint_error_dev(sc->sc_dev, 302 "DMA memory map failed, " 303 "error code %d\n", r); 304 goto err_dmamem_alloc; 305 } 306 sc->sc_hdrs = vaddr; 307 memset(vaddr, 0, allocsize); 308 p = (intptr_t) vaddr; 309 p += sizeof(struct virtio_net_hdr) * rxqsize; 310 #define P(name,size) do { sc->sc_ ##name = (void*) p; \ 311 p += size; } while (0) 312 P(tx_hdrs, sizeof(struct virtio_net_hdr) * txqsize); 313 if (vsc->sc_nvqs == 3) { 314 P(ctrl_cmd, sizeof(struct virtio_net_ctrl_cmd)); 315 P(ctrl_status, sizeof(struct virtio_net_ctrl_status)); 316 P(ctrl_rx, sizeof(struct virtio_net_ctrl_rx)); 317 P(ctrl_mac_tbl_uc, sizeof(struct virtio_net_ctrl_mac_tbl)); 318 P(ctrl_mac_tbl_mc, 319 (sizeof(struct virtio_net_ctrl_mac_tbl) 320 + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES)); 321 } 322 #undef P 323 324 allocsize2 = sizeof(bus_dmamap_t) * (rxqsize + txqsize); 325 allocsize2 += sizeof(bus_dmamap_t) * (rxqsize + txqsize); 326 allocsize2 += sizeof(struct mbuf*) * (rxqsize + txqsize); 327 sc->sc_arrays = kmem_zalloc(allocsize2, KM_SLEEP); 328 if (sc->sc_arrays == NULL) 329 goto err_dmamem_map; 330 sc->sc_txhdr_dmamaps = sc->sc_arrays + rxqsize; 331 sc->sc_rx_dmamaps = sc->sc_txhdr_dmamaps + txqsize; 332 sc->sc_tx_dmamaps = sc->sc_rx_dmamaps + rxqsize; 333 sc->sc_rx_mbufs = (void*) (sc->sc_tx_dmamaps + txqsize); 334 sc->sc_tx_mbufs = sc->sc_rx_mbufs + rxqsize; 335 336 #define C(map, buf, size, nsegs, rw, usage) \ 337 do { \ 338 r = bus_dmamap_create(vsc->sc_dmat, size, nsegs, size, 0, \ 339 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, \ 340 &sc->sc_ ##map); \ 341 if (r != 0) { \ 342 aprint_error_dev(sc->sc_dev, \ 343 usage " dmamap creation failed, " \ 344 "error code %d\n", r); \ 345 goto err_reqs; \ 346 } \ 347 } while (0) 348 #define C_L1(map, buf, size, nsegs, rw, usage) \ 349 C(map, buf, size, nsegs, rw, usage); \ 350 do { \ 351 r = bus_dmamap_load(vsc->sc_dmat, sc->sc_ ##map, \ 352 &sc->sc_ ##buf, size, NULL, \ 353 BUS_DMA_ ##rw | BUS_DMA_NOWAIT); \ 354 if (r != 0) { \ 355 aprint_error_dev(sc->sc_dev, \ 356 usage " dmamap load failed, " \ 357 "error code %d\n", r); \ 358 goto err_reqs; \ 359 } \ 360 } while (0) 361 #define C_L2(map, buf, size, nsegs, rw, usage) \ 362 C(map, buf, size, nsegs, rw, usage); \ 363 do { \ 364 r = bus_dmamap_load(vsc->sc_dmat, sc->sc_ ##map, \ 365 sc->sc_ ##buf, size, NULL, \ 366 BUS_DMA_ ##rw | BUS_DMA_NOWAIT); \ 367 if (r != 0) { \ 368 aprint_error_dev(sc->sc_dev, \ 369 usage " dmamap load failed, " \ 370 "error code %d\n", r); \ 371 goto err_reqs; \ 372 } \ 373 } while (0) 374 for (i = 0; i < rxqsize; i++) { 375 C_L1(rxhdr_dmamaps[i], rx_hdrs[i], 376 sizeof(struct virtio_net_hdr), 1, 377 READ, "rx header"); 378 C(rx_dmamaps[i], NULL, MCLBYTES, 1, 0, "rx payload"); 379 } 380 381 for (i = 0; i < txqsize; i++) { 382 C_L1(txhdr_dmamaps[i], rx_hdrs[i], 383 sizeof(struct virtio_net_hdr), 1, 384 WRITE, "tx header"); 385 C(tx_dmamaps[i], NULL, ETHER_MAX_LEN, 256 /* XXX */, 0, 386 "tx payload"); 387 } 388 389 if (vsc->sc_nvqs == 3) { 390 /* control vq class & command */ 391 C_L2(ctrl_cmd_dmamap, ctrl_cmd, 392 sizeof(struct virtio_net_ctrl_cmd), 1, WRITE, 393 "control command"); 394 395 /* control vq status */ 396 C_L2(ctrl_status_dmamap, ctrl_status, 397 sizeof(struct virtio_net_ctrl_status), 1, READ, 398 "control status"); 399 400 /* control vq rx mode command parameter */ 401 C_L2(ctrl_rx_dmamap, ctrl_rx, 402 sizeof(struct virtio_net_ctrl_rx), 1, WRITE, 403 "rx mode control command"); 404 405 /* control vq MAC filter table for unicast */ 406 /* do not load now since its length is variable */ 407 C(ctrl_tbl_uc_dmamap, NULL, 408 sizeof(struct virtio_net_ctrl_mac_tbl) + 0, 1, WRITE, 409 "unicast MAC address filter command"); 410 411 /* control vq MAC filter table for multicast */ 412 C(ctrl_tbl_mc_dmamap, NULL, 413 (sizeof(struct virtio_net_ctrl_mac_tbl) 414 + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES), 415 1, WRITE, "multicast MAC address filter command"); 416 } 417 #undef C_L2 418 #undef C_L1 419 #undef C 420 421 return 0; 422 423 err_reqs: 424 #define D(map) \ 425 do { \ 426 if (sc->sc_ ##map) { \ 427 bus_dmamap_destroy(vsc->sc_dmat, sc->sc_ ##map); \ 428 sc->sc_ ##map = NULL; \ 429 } \ 430 } while (0) 431 D(ctrl_tbl_mc_dmamap); 432 D(ctrl_tbl_uc_dmamap); 433 D(ctrl_rx_dmamap); 434 D(ctrl_status_dmamap); 435 D(ctrl_cmd_dmamap); 436 for (i = 0; i < txqsize; i++) { 437 D(tx_dmamaps[i]); 438 D(txhdr_dmamaps[i]); 439 } 440 for (i = 0; i < rxqsize; i++) { 441 D(rx_dmamaps[i]); 442 D(rxhdr_dmamaps[i]); 443 } 444 #undef D 445 if (sc->sc_arrays) { 446 kmem_free(sc->sc_arrays, allocsize2); 447 sc->sc_arrays = 0; 448 } 449 err_dmamem_map: 450 bus_dmamem_unmap(vsc->sc_dmat, sc->sc_hdrs, allocsize); 451 err_dmamem_alloc: 452 bus_dmamem_free(vsc->sc_dmat, &sc->sc_hdr_segs[0], 1); 453 err_none: 454 return -1; 455 } 456 457 static void 458 vioif_attach(device_t parent, device_t self, void *aux) 459 { 460 struct vioif_softc *sc = device_private(self); 461 struct virtio_softc *vsc = device_private(parent); 462 uint32_t features; 463 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 464 465 if (vsc->sc_child != NULL) { 466 aprint_normal(": child already attached for %s; " 467 "something wrong...\n", 468 device_xname(parent)); 469 return; 470 } 471 472 sc->sc_dev = self; 473 sc->sc_virtio = vsc; 474 475 vsc->sc_child = self; 476 vsc->sc_ipl = IPL_NET; 477 vsc->sc_vqs = &sc->sc_vq[0]; 478 vsc->sc_config_change = 0; 479 vsc->sc_intrhand = virtio_vq_intr; 480 481 features = virtio_negotiate_features(vsc, 482 (VIRTIO_NET_F_MAC | 483 VIRTIO_NET_F_STATUS | 484 VIRTIO_NET_F_CTRL_VQ | 485 VIRTIO_NET_F_CTRL_RX | 486 VIRTIO_F_NOTIFY_ON_EMPTY)); 487 if (features & VIRTIO_NET_F_MAC) { 488 sc->sc_mac[0] = virtio_read_device_config_1(vsc, 489 VIRTIO_NET_CONFIG_MAC+0); 490 sc->sc_mac[1] = virtio_read_device_config_1(vsc, 491 VIRTIO_NET_CONFIG_MAC+1); 492 sc->sc_mac[2] = virtio_read_device_config_1(vsc, 493 VIRTIO_NET_CONFIG_MAC+2); 494 sc->sc_mac[3] = virtio_read_device_config_1(vsc, 495 VIRTIO_NET_CONFIG_MAC+3); 496 sc->sc_mac[4] = virtio_read_device_config_1(vsc, 497 VIRTIO_NET_CONFIG_MAC+4); 498 sc->sc_mac[5] = virtio_read_device_config_1(vsc, 499 VIRTIO_NET_CONFIG_MAC+5); 500 } else { 501 /* code stolen from sys/net/if_tap.c */ 502 struct timeval tv; 503 uint32_t ui; 504 getmicrouptime(&tv); 505 ui = (tv.tv_sec ^ tv.tv_usec) & 0xffffff; 506 memcpy(sc->sc_mac+3, (uint8_t *)&ui, 3); 507 virtio_write_device_config_1(vsc, 508 VIRTIO_NET_CONFIG_MAC+0, 509 sc->sc_mac[0]); 510 virtio_write_device_config_1(vsc, 511 VIRTIO_NET_CONFIG_MAC+1, 512 sc->sc_mac[1]); 513 virtio_write_device_config_1(vsc, 514 VIRTIO_NET_CONFIG_MAC+2, 515 sc->sc_mac[2]); 516 virtio_write_device_config_1(vsc, 517 VIRTIO_NET_CONFIG_MAC+3, 518 sc->sc_mac[3]); 519 virtio_write_device_config_1(vsc, 520 VIRTIO_NET_CONFIG_MAC+4, 521 sc->sc_mac[4]); 522 virtio_write_device_config_1(vsc, 523 VIRTIO_NET_CONFIG_MAC+5, 524 sc->sc_mac[5]); 525 } 526 aprint_normal(": Ethernet address %s\n", ether_sprintf(sc->sc_mac)); 527 aprint_naive("\n"); 528 529 if (virtio_alloc_vq(vsc, &sc->sc_vq[0], 0, 530 MCLBYTES+sizeof(struct virtio_net_hdr), 2, 531 "rx") != 0) { 532 goto err; 533 } 534 vsc->sc_nvqs = 1; 535 sc->sc_vq[0].vq_done = vioif_rx_vq_done; 536 if (virtio_alloc_vq(vsc, &sc->sc_vq[1], 1, 537 (sizeof(struct virtio_net_hdr) 538 + (ETHER_MAX_LEN - ETHER_HDR_LEN)), 539 VIRTIO_NET_TX_MAXNSEGS + 1, 540 "tx") != 0) { 541 goto err; 542 } 543 vsc->sc_nvqs = 2; 544 sc->sc_vq[1].vq_done = vioif_tx_vq_done; 545 virtio_start_vq_intr(vsc, &sc->sc_vq[0]); 546 virtio_stop_vq_intr(vsc, &sc->sc_vq[1]); /* not urgent; do it later */ 547 if ((features & VIRTIO_NET_F_CTRL_VQ) 548 && (features & VIRTIO_NET_F_CTRL_RX)) { 549 if (virtio_alloc_vq(vsc, &sc->sc_vq[2], 2, 550 NBPG, 1, "control") == 0) { 551 sc->sc_vq[2].vq_done = vioif_ctrl_vq_done; 552 cv_init(&sc->sc_ctrl_wait, "ctrl_vq"); 553 mutex_init(&sc->sc_ctrl_wait_lock, 554 MUTEX_DEFAULT, IPL_NET); 555 sc->sc_ctrl_inuse = FREE; 556 virtio_start_vq_intr(vsc, &sc->sc_vq[2]); 557 vsc->sc_nvqs = 3; 558 } 559 } 560 561 sc->sc_rx_softint = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE, 562 vioif_rx_softint, sc); 563 if (sc->sc_rx_softint == NULL) { 564 aprint_error_dev(self, "cannot establish softint\n"); 565 goto err; 566 } 567 568 if (vioif_alloc_mems(sc) < 0) 569 goto err; 570 if (vsc->sc_nvqs == 3) 571 config_interrupts(self, vioif_deferred_init); 572 573 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 574 ifp->if_softc = sc; 575 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 576 ifp->if_start = vioif_start; 577 ifp->if_ioctl = vioif_ioctl; 578 ifp->if_init = vioif_init; 579 ifp->if_stop = vioif_stop; 580 ifp->if_capabilities = 0; 581 ifp->if_watchdog = vioif_watchdog; 582 583 if_attach(ifp); 584 ether_ifattach(ifp, sc->sc_mac); 585 586 return; 587 588 err: 589 if (vsc->sc_nvqs == 3) { 590 virtio_free_vq(vsc, &sc->sc_vq[2]); 591 cv_destroy(&sc->sc_ctrl_wait); 592 mutex_destroy(&sc->sc_ctrl_wait_lock); 593 vsc->sc_nvqs = 2; 594 } 595 if (vsc->sc_nvqs == 2) { 596 virtio_free_vq(vsc, &sc->sc_vq[1]); 597 vsc->sc_nvqs = 1; 598 } 599 if (vsc->sc_nvqs == 1) { 600 virtio_free_vq(vsc, &sc->sc_vq[0]); 601 vsc->sc_nvqs = 0; 602 } 603 vsc->sc_child = (void*)1; 604 return; 605 } 606 607 /* we need interrupts to make promiscuous mode off */ 608 static void 609 vioif_deferred_init(device_t self) 610 { 611 struct vioif_softc *sc = device_private(self); 612 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 613 int r; 614 615 r = vioif_set_promisc(sc, false); 616 if (r != 0) 617 aprint_error_dev(self, "resetting promisc mode failed, " 618 "errror code %d\n", r); 619 else 620 ifp->if_flags &= ~IFF_PROMISC; 621 } 622 623 /* 624 * Interface functions for ifnet 625 */ 626 static int 627 vioif_init(struct ifnet *ifp) 628 { 629 struct vioif_softc *sc = ifp->if_softc; 630 631 vioif_stop(ifp, 0); 632 vioif_populate_rx_mbufs(sc); 633 vioif_updown(sc, true); 634 ifp->if_flags |= IFF_RUNNING; 635 ifp->if_flags &= ~IFF_OACTIVE; 636 vioif_rx_filter(sc); 637 638 return 0; 639 } 640 641 static void 642 vioif_stop(struct ifnet *ifp, int disable) 643 { 644 struct vioif_softc *sc = ifp->if_softc; 645 struct virtio_softc *vsc = sc->sc_virtio; 646 647 /* only way to stop I/O and DMA is resetting... */ 648 virtio_reset(vsc); 649 vioif_rx_deq(sc); 650 vioif_tx_drain(sc); 651 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 652 653 if (disable) 654 vioif_rx_drain(sc); 655 656 virtio_reinit_start(vsc); 657 virtio_negotiate_features(vsc, sc->sc_features); 658 virtio_start_vq_intr(vsc, &sc->sc_vq[0]); 659 virtio_stop_vq_intr(vsc, &sc->sc_vq[1]); 660 if (vsc->sc_nvqs >= 3) 661 virtio_start_vq_intr(vsc, &sc->sc_vq[2]); 662 virtio_reinit_end(vsc); 663 vioif_updown(sc, false); 664 } 665 666 static void 667 vioif_start(struct ifnet *ifp) 668 { 669 struct vioif_softc *sc = ifp->if_softc; 670 struct virtio_softc *vsc = sc->sc_virtio; 671 struct virtqueue *vq = &sc->sc_vq[1]; /* tx vq */ 672 struct mbuf *m; 673 int queued = 0, retry = 0; 674 675 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 676 return; 677 678 for (;;) { 679 int slot, r; 680 681 IFQ_POLL(&ifp->if_snd, m); 682 if (m == NULL) 683 break; 684 685 r = virtio_enqueue_prep(vsc, vq, &slot); 686 if (r == EAGAIN) { 687 ifp->if_flags |= IFF_OACTIVE; 688 vioif_tx_vq_done(vq); 689 if (retry++ == 0) 690 continue; 691 else 692 break; 693 } 694 if (r != 0) 695 panic("enqueue_prep for a tx buffer"); 696 r = bus_dmamap_load_mbuf(vsc->sc_dmat, 697 sc->sc_tx_dmamaps[slot], 698 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 699 if (r != 0) { 700 virtio_enqueue_abort(vsc, vq, slot); 701 printf("%s: tx dmamap load failed, error code %d\n", 702 device_xname(sc->sc_dev), r); 703 break; 704 } 705 r = virtio_enqueue_reserve(vsc, vq, slot, 706 sc->sc_tx_dmamaps[slot]->dm_nsegs + 1); 707 if (r != 0) { 708 bus_dmamap_unload(vsc->sc_dmat, 709 sc->sc_tx_dmamaps[slot]); 710 ifp->if_flags |= IFF_OACTIVE; 711 vioif_tx_vq_done(vq); 712 if (retry++ == 0) 713 continue; 714 else 715 break; 716 } 717 IFQ_DEQUEUE(&ifp->if_snd, m); 718 sc->sc_tx_mbufs[slot] = m; 719 720 memset(&sc->sc_tx_hdrs[slot], 0, sizeof(struct virtio_net_hdr)); 721 bus_dmamap_sync(vsc->sc_dmat, sc->sc_tx_dmamaps[slot], 722 0, sc->sc_tx_dmamaps[slot]->dm_mapsize, 723 BUS_DMASYNC_PREWRITE); 724 bus_dmamap_sync(vsc->sc_dmat, sc->sc_txhdr_dmamaps[slot], 725 0, sc->sc_txhdr_dmamaps[slot]->dm_mapsize, 726 BUS_DMASYNC_PREWRITE); 727 virtio_enqueue(vsc, vq, slot, sc->sc_txhdr_dmamaps[slot], true); 728 virtio_enqueue(vsc, vq, slot, sc->sc_tx_dmamaps[slot], true); 729 virtio_enqueue_commit(vsc, vq, slot, false); 730 queued++; 731 bpf_mtap(ifp, m); 732 } 733 734 if (queued > 0) { 735 virtio_enqueue_commit(vsc, vq, -1, true); 736 ifp->if_timer = 5; 737 } 738 } 739 740 static int 741 vioif_ioctl(struct ifnet *ifp, u_long cmd, void *data) 742 { 743 int s, r; 744 745 s = splnet(); 746 747 r = ether_ioctl(ifp, cmd, data); 748 if ((r == 0 && cmd == SIOCSIFFLAGS) || 749 (r == ENETRESET && (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI))) { 750 if (ifp->if_flags & IFF_RUNNING) 751 r = vioif_rx_filter(ifp->if_softc); 752 else 753 r = 0; 754 } 755 756 splx(s); 757 758 return r; 759 } 760 761 void 762 vioif_watchdog(struct ifnet *ifp) 763 { 764 struct vioif_softc *sc = ifp->if_softc; 765 766 if (ifp->if_flags & IFF_RUNNING) 767 vioif_tx_vq_done(&sc->sc_vq[1]); 768 } 769 770 771 /* 772 * Recieve implementation 773 */ 774 /* allocate and initialize a mbuf for recieve */ 775 static int 776 vioif_add_rx_mbuf(struct vioif_softc *sc, int i) 777 { 778 struct mbuf *m; 779 int r; 780 781 MGETHDR(m, M_DONTWAIT, MT_DATA); 782 if (m == NULL) 783 return ENOBUFS; 784 MCLGET(m, M_DONTWAIT); 785 if ((m->m_flags & M_EXT) == 0) { 786 m_freem(m); 787 return ENOBUFS; 788 } 789 sc->sc_rx_mbufs[i] = m; 790 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 791 r = bus_dmamap_load_mbuf(sc->sc_virtio->sc_dmat, 792 sc->sc_rx_dmamaps[i], 793 m, BUS_DMA_READ|BUS_DMA_NOWAIT); 794 if (r) { 795 m_freem(m); 796 sc->sc_rx_mbufs[i] = 0; 797 return r; 798 } 799 800 return 0; 801 } 802 803 /* free a mbuf for recieve */ 804 static void 805 vioif_free_rx_mbuf(struct vioif_softc *sc, int i) 806 { 807 bus_dmamap_unload(sc->sc_virtio->sc_dmat, sc->sc_rx_dmamaps[i]); 808 m_freem(sc->sc_rx_mbufs[i]); 809 sc->sc_rx_mbufs[i] = NULL; 810 } 811 812 /* add mbufs for all the empty recieve slots */ 813 static void 814 vioif_populate_rx_mbufs(struct vioif_softc *sc) 815 { 816 struct virtio_softc *vsc = sc->sc_virtio; 817 int i, r, ndone = 0; 818 struct virtqueue *vq = &sc->sc_vq[0]; /* rx vq */ 819 820 for (i = 0; i < vq->vq_num; i++) { 821 int slot; 822 r = virtio_enqueue_prep(vsc, vq, &slot); 823 if (r == EAGAIN) 824 break; 825 if (r != 0) 826 panic("enqueue_prep for rx buffers"); 827 if (sc->sc_rx_mbufs[slot] == NULL) { 828 r = vioif_add_rx_mbuf(sc, slot); 829 if (r != 0) { 830 printf("%s: rx mbuf allocation failed, " 831 "error code %d\n", 832 device_xname(sc->sc_dev), r); 833 break; 834 } 835 } 836 r = virtio_enqueue_reserve(vsc, vq, slot, 837 sc->sc_rx_dmamaps[slot]->dm_nsegs + 1); 838 if (r != 0) { 839 vioif_free_rx_mbuf(sc, slot); 840 break; 841 } 842 bus_dmamap_sync(vsc->sc_dmat, sc->sc_rxhdr_dmamaps[slot], 843 0, sizeof(struct virtio_net_hdr), BUS_DMASYNC_PREREAD); 844 bus_dmamap_sync(vsc->sc_dmat, sc->sc_rx_dmamaps[slot], 845 0, MCLBYTES, BUS_DMASYNC_PREREAD); 846 virtio_enqueue(vsc, vq, slot, sc->sc_rxhdr_dmamaps[slot], false); 847 virtio_enqueue(vsc, vq, slot, sc->sc_rx_dmamaps[slot], false); 848 virtio_enqueue_commit(vsc, vq, slot, false); 849 ndone++; 850 } 851 if (ndone > 0) 852 virtio_enqueue_commit(vsc, vq, -1, true); 853 } 854 855 /* dequeue recieved packets */ 856 static int 857 vioif_rx_deq(struct vioif_softc *sc) 858 { 859 struct virtio_softc *vsc = sc->sc_virtio; 860 struct virtqueue *vq = &sc->sc_vq[0]; 861 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 862 struct mbuf *m; 863 int r = 0; 864 int slot, len; 865 866 while (virtio_dequeue(vsc, vq, &slot, &len) == 0) { 867 len -= sizeof(struct virtio_net_hdr); 868 r = 1; 869 bus_dmamap_sync(vsc->sc_dmat, sc->sc_rxhdr_dmamaps[slot], 870 0, sizeof(struct virtio_net_hdr), 871 BUS_DMASYNC_POSTREAD); 872 bus_dmamap_sync(vsc->sc_dmat, sc->sc_rx_dmamaps[slot], 873 0, MCLBYTES, 874 BUS_DMASYNC_POSTREAD); 875 m = sc->sc_rx_mbufs[slot]; 876 KASSERT(m != NULL); 877 bus_dmamap_unload(vsc->sc_dmat, sc->sc_rx_dmamaps[slot]); 878 sc->sc_rx_mbufs[slot] = 0; 879 virtio_dequeue_commit(vsc, vq, slot); 880 m->m_pkthdr.rcvif = ifp; 881 m->m_len = m->m_pkthdr.len = len; 882 ifp->if_ipackets++; 883 bpf_mtap(ifp, m); 884 (*ifp->if_input)(ifp, m); 885 } 886 887 return r; 888 } 889 890 /* rx interrupt; call _dequeue above and schedule a softint */ 891 static int 892 vioif_rx_vq_done(struct virtqueue *vq) 893 { 894 struct virtio_softc *vsc = vq->vq_owner; 895 struct vioif_softc *sc = device_private(vsc->sc_child); 896 int r; 897 898 r = vioif_rx_deq(sc); 899 if (r) 900 softint_schedule(sc->sc_rx_softint); 901 902 return r; 903 } 904 905 /* softint: enqueue recieve requests for new incoming packets */ 906 static void 907 vioif_rx_softint(void *arg) 908 { 909 struct vioif_softc *sc = arg; 910 911 vioif_populate_rx_mbufs(sc); 912 } 913 914 /* free all the mbufs; called from if_stop(disable) */ 915 static void 916 vioif_rx_drain(struct vioif_softc *sc) 917 { 918 struct virtqueue *vq = &sc->sc_vq[0]; 919 int i; 920 921 for (i = 0; i < vq->vq_num; i++) { 922 if (sc->sc_rx_mbufs[i] == NULL) 923 continue; 924 vioif_free_rx_mbuf(sc, i); 925 } 926 } 927 928 929 /* 930 * Transmition implementation 931 */ 932 /* actual transmission is done in if_start */ 933 /* tx interrupt; dequeue and free mbufs */ 934 /* 935 * tx interrupt is actually disabled; this should be called upon 936 * tx vq full and watchdog 937 */ 938 static int 939 vioif_tx_vq_done(struct virtqueue *vq) 940 { 941 struct virtio_softc *vsc = vq->vq_owner; 942 struct vioif_softc *sc = device_private(vsc->sc_child); 943 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 944 struct mbuf *m; 945 int r = 0; 946 int slot, len; 947 948 while (virtio_dequeue(vsc, vq, &slot, &len) == 0) { 949 r++; 950 bus_dmamap_sync(vsc->sc_dmat, sc->sc_txhdr_dmamaps[slot], 951 0, sizeof(struct virtio_net_hdr), 952 BUS_DMASYNC_POSTWRITE); 953 bus_dmamap_sync(vsc->sc_dmat, sc->sc_tx_dmamaps[slot], 954 0, sc->sc_tx_dmamaps[slot]->dm_mapsize, 955 BUS_DMASYNC_POSTWRITE); 956 m = sc->sc_tx_mbufs[slot]; 957 bus_dmamap_unload(vsc->sc_dmat, sc->sc_tx_dmamaps[slot]); 958 sc->sc_tx_mbufs[slot] = 0; 959 virtio_dequeue_commit(vsc, vq, slot); 960 ifp->if_opackets++; 961 m_freem(m); 962 } 963 964 if (r) 965 ifp->if_flags &= ~IFF_OACTIVE; 966 return r; 967 } 968 969 /* free all the mbufs already put on vq; called from if_stop(disable) */ 970 static void 971 vioif_tx_drain(struct vioif_softc *sc) 972 { 973 struct virtio_softc *vsc = sc->sc_virtio; 974 struct virtqueue *vq = &sc->sc_vq[1]; 975 int i; 976 977 for (i = 0; i < vq->vq_num; i++) { 978 if (sc->sc_tx_mbufs[i] == NULL) 979 continue; 980 bus_dmamap_unload(vsc->sc_dmat, sc->sc_tx_dmamaps[i]); 981 m_freem(sc->sc_tx_mbufs[i]); 982 sc->sc_tx_mbufs[i] = NULL; 983 } 984 } 985 986 /* 987 * Control vq 988 */ 989 /* issue a VIRTIO_NET_CTRL_RX class command and wait for completion */ 990 static int 991 vioif_ctrl_rx(struct vioif_softc *sc, int cmd, bool onoff) 992 { 993 struct virtio_softc *vsc = sc->sc_virtio; 994 struct virtqueue *vq = &sc->sc_vq[2]; 995 int r, slot; 996 997 if (vsc->sc_nvqs < 3) 998 return ENOTSUP; 999 1000 mutex_enter(&sc->sc_ctrl_wait_lock); 1001 while (sc->sc_ctrl_inuse != FREE) 1002 cv_wait(&sc->sc_ctrl_wait, &sc->sc_ctrl_wait_lock); 1003 sc->sc_ctrl_inuse = INUSE; 1004 mutex_exit(&sc->sc_ctrl_wait_lock); 1005 1006 sc->sc_ctrl_cmd->class = VIRTIO_NET_CTRL_RX; 1007 sc->sc_ctrl_cmd->command = cmd; 1008 sc->sc_ctrl_rx->onoff = onoff; 1009 1010 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_cmd_dmamap, 1011 0, sizeof(struct virtio_net_ctrl_cmd), 1012 BUS_DMASYNC_PREWRITE); 1013 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_rx_dmamap, 1014 0, sizeof(struct virtio_net_ctrl_rx), 1015 BUS_DMASYNC_PREWRITE); 1016 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_status_dmamap, 1017 0, sizeof(struct virtio_net_ctrl_status), 1018 BUS_DMASYNC_PREREAD); 1019 1020 r = virtio_enqueue_prep(vsc, vq, &slot); 1021 if (r != 0) 1022 panic("%s: control vq busy!?", device_xname(sc->sc_dev)); 1023 r = virtio_enqueue_reserve(vsc, vq, slot, 3); 1024 if (r != 0) 1025 panic("%s: control vq busy!?", device_xname(sc->sc_dev)); 1026 virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_cmd_dmamap, true); 1027 virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_rx_dmamap, true); 1028 virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_status_dmamap, false); 1029 virtio_enqueue_commit(vsc, vq, slot, true); 1030 1031 /* wait for done */ 1032 mutex_enter(&sc->sc_ctrl_wait_lock); 1033 while (sc->sc_ctrl_inuse != DONE) 1034 cv_wait(&sc->sc_ctrl_wait, &sc->sc_ctrl_wait_lock); 1035 mutex_exit(&sc->sc_ctrl_wait_lock); 1036 /* already dequeueued */ 1037 1038 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_cmd_dmamap, 0, 1039 sizeof(struct virtio_net_ctrl_cmd), 1040 BUS_DMASYNC_POSTWRITE); 1041 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_rx_dmamap, 0, 1042 sizeof(struct virtio_net_ctrl_rx), 1043 BUS_DMASYNC_POSTWRITE); 1044 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_status_dmamap, 0, 1045 sizeof(struct virtio_net_ctrl_status), 1046 BUS_DMASYNC_POSTREAD); 1047 1048 if (sc->sc_ctrl_status->ack == VIRTIO_NET_OK) 1049 r = 0; 1050 else { 1051 printf("%s: failed setting rx mode\n", 1052 device_xname(sc->sc_dev)); 1053 r = EIO; 1054 } 1055 1056 mutex_enter(&sc->sc_ctrl_wait_lock); 1057 sc->sc_ctrl_inuse = FREE; 1058 cv_signal(&sc->sc_ctrl_wait); 1059 mutex_exit(&sc->sc_ctrl_wait_lock); 1060 1061 return r; 1062 } 1063 1064 static int 1065 vioif_set_promisc(struct vioif_softc *sc, bool onoff) 1066 { 1067 int r; 1068 1069 r = vioif_ctrl_rx(sc, VIRTIO_NET_CTRL_RX_PROMISC, onoff); 1070 1071 return r; 1072 } 1073 1074 static int 1075 vioif_set_allmulti(struct vioif_softc *sc, bool onoff) 1076 { 1077 int r; 1078 1079 r = vioif_ctrl_rx(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, onoff); 1080 1081 return r; 1082 } 1083 1084 /* issue VIRTIO_NET_CTRL_MAC_TABLE_SET command and wait for completion */ 1085 static int 1086 vioif_set_rx_filter(struct vioif_softc *sc) 1087 { 1088 /* filter already set in sc_ctrl_mac_tbl */ 1089 struct virtio_softc *vsc = sc->sc_virtio; 1090 struct virtqueue *vq = &sc->sc_vq[2]; 1091 int r, slot; 1092 1093 if (vsc->sc_nvqs < 3) 1094 return ENOTSUP; 1095 1096 mutex_enter(&sc->sc_ctrl_wait_lock); 1097 while (sc->sc_ctrl_inuse != FREE) 1098 cv_wait(&sc->sc_ctrl_wait, &sc->sc_ctrl_wait_lock); 1099 sc->sc_ctrl_inuse = INUSE; 1100 mutex_exit(&sc->sc_ctrl_wait_lock); 1101 1102 sc->sc_ctrl_cmd->class = VIRTIO_NET_CTRL_MAC; 1103 sc->sc_ctrl_cmd->command = VIRTIO_NET_CTRL_MAC_TABLE_SET; 1104 1105 r = bus_dmamap_load(vsc->sc_dmat, sc->sc_ctrl_tbl_uc_dmamap, 1106 sc->sc_ctrl_mac_tbl_uc, 1107 (sizeof(struct virtio_net_ctrl_mac_tbl) 1108 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_uc->nentries), 1109 NULL, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1110 if (r) { 1111 printf("%s: control command dmamap load failed, " 1112 "error code %d\n", device_xname(sc->sc_dev), r); 1113 goto out; 1114 } 1115 r = bus_dmamap_load(vsc->sc_dmat, sc->sc_ctrl_tbl_mc_dmamap, 1116 sc->sc_ctrl_mac_tbl_mc, 1117 (sizeof(struct virtio_net_ctrl_mac_tbl) 1118 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_mc->nentries), 1119 NULL, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1120 if (r) { 1121 printf("%s: control command dmamap load failed, " 1122 "error code %d\n", device_xname(sc->sc_dev), r); 1123 bus_dmamap_unload(vsc->sc_dmat, sc->sc_ctrl_tbl_uc_dmamap); 1124 goto out; 1125 } 1126 1127 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_cmd_dmamap, 1128 0, sizeof(struct virtio_net_ctrl_cmd), 1129 BUS_DMASYNC_PREWRITE); 1130 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_tbl_uc_dmamap, 0, 1131 (sizeof(struct virtio_net_ctrl_mac_tbl) 1132 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_uc->nentries), 1133 BUS_DMASYNC_PREWRITE); 1134 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_tbl_mc_dmamap, 0, 1135 (sizeof(struct virtio_net_ctrl_mac_tbl) 1136 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_mc->nentries), 1137 BUS_DMASYNC_PREWRITE); 1138 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_status_dmamap, 1139 0, sizeof(struct virtio_net_ctrl_status), 1140 BUS_DMASYNC_PREREAD); 1141 1142 r = virtio_enqueue_prep(vsc, vq, &slot); 1143 if (r != 0) 1144 panic("%s: control vq busy!?", device_xname(sc->sc_dev)); 1145 r = virtio_enqueue_reserve(vsc, vq, slot, 4); 1146 if (r != 0) 1147 panic("%s: control vq busy!?", device_xname(sc->sc_dev)); 1148 virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_cmd_dmamap, true); 1149 virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_tbl_uc_dmamap, true); 1150 virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_tbl_mc_dmamap, true); 1151 virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_status_dmamap, false); 1152 virtio_enqueue_commit(vsc, vq, slot, true); 1153 1154 /* wait for done */ 1155 mutex_enter(&sc->sc_ctrl_wait_lock); 1156 while (sc->sc_ctrl_inuse != DONE) 1157 cv_wait(&sc->sc_ctrl_wait, &sc->sc_ctrl_wait_lock); 1158 mutex_exit(&sc->sc_ctrl_wait_lock); 1159 /* already dequeueued */ 1160 1161 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_cmd_dmamap, 0, 1162 sizeof(struct virtio_net_ctrl_cmd), 1163 BUS_DMASYNC_POSTWRITE); 1164 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_tbl_uc_dmamap, 0, 1165 (sizeof(struct virtio_net_ctrl_mac_tbl) 1166 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_uc->nentries), 1167 BUS_DMASYNC_POSTWRITE); 1168 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_tbl_mc_dmamap, 0, 1169 (sizeof(struct virtio_net_ctrl_mac_tbl) 1170 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_mc->nentries), 1171 BUS_DMASYNC_POSTWRITE); 1172 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_status_dmamap, 0, 1173 sizeof(struct virtio_net_ctrl_status), 1174 BUS_DMASYNC_POSTREAD); 1175 bus_dmamap_unload(vsc->sc_dmat, sc->sc_ctrl_tbl_uc_dmamap); 1176 bus_dmamap_unload(vsc->sc_dmat, sc->sc_ctrl_tbl_mc_dmamap); 1177 1178 if (sc->sc_ctrl_status->ack == VIRTIO_NET_OK) 1179 r = 0; 1180 else { 1181 printf("%s: failed setting rx filter\n", 1182 device_xname(sc->sc_dev)); 1183 r = EIO; 1184 } 1185 1186 out: 1187 mutex_enter(&sc->sc_ctrl_wait_lock); 1188 sc->sc_ctrl_inuse = FREE; 1189 cv_signal(&sc->sc_ctrl_wait); 1190 mutex_exit(&sc->sc_ctrl_wait_lock); 1191 1192 return r; 1193 } 1194 1195 /* ctrl vq interrupt; wake up the command issuer */ 1196 static int 1197 vioif_ctrl_vq_done(struct virtqueue *vq) 1198 { 1199 struct virtio_softc *vsc = vq->vq_owner; 1200 struct vioif_softc *sc = device_private(vsc->sc_child); 1201 int r, slot; 1202 1203 r = virtio_dequeue(vsc, vq, &slot, NULL); 1204 if (r == ENOENT) 1205 return 0; 1206 virtio_dequeue_commit(vsc, vq, slot); 1207 1208 mutex_enter(&sc->sc_ctrl_wait_lock); 1209 sc->sc_ctrl_inuse = DONE; 1210 cv_signal(&sc->sc_ctrl_wait); 1211 mutex_exit(&sc->sc_ctrl_wait_lock); 1212 1213 return 1; 1214 } 1215 1216 /* 1217 * If IFF_PROMISC requested, set promiscuous 1218 * If multicast filter small enough (<=MAXENTRIES) set rx filter 1219 * If large multicast filter exist use ALLMULTI 1220 */ 1221 /* 1222 * If setting rx filter fails fall back to ALLMULTI 1223 * If ALLMULTI fails fall back to PROMISC 1224 */ 1225 static int 1226 vioif_rx_filter(struct vioif_softc *sc) 1227 { 1228 struct virtio_softc *vsc = sc->sc_virtio; 1229 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1230 struct ether_multi *enm; 1231 struct ether_multistep step; 1232 int nentries; 1233 int promisc = 0, allmulti = 0, rxfilter = 0; 1234 int r; 1235 1236 if (vsc->sc_nvqs < 3) { /* no ctrl vq; always promisc */ 1237 ifp->if_flags |= IFF_PROMISC; 1238 return 0; 1239 } 1240 1241 if (ifp->if_flags & IFF_PROMISC) { 1242 promisc = 1; 1243 goto set; 1244 } 1245 1246 nentries = -1; 1247 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm); 1248 while (nentries++, enm != NULL) { 1249 if (nentries >= VIRTIO_NET_CTRL_MAC_MAXENTRIES) { 1250 allmulti = 1; 1251 goto set; 1252 } 1253 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 1254 ETHER_ADDR_LEN)) { 1255 allmulti = 1; 1256 goto set; 1257 } 1258 memcpy(sc->sc_ctrl_mac_tbl_mc->macs[nentries], 1259 enm->enm_addrlo, ETHER_ADDR_LEN); 1260 ETHER_NEXT_MULTI(step, enm); 1261 } 1262 rxfilter = 1; 1263 1264 set: 1265 if (rxfilter) { 1266 sc->sc_ctrl_mac_tbl_uc->nentries = 0; 1267 sc->sc_ctrl_mac_tbl_mc->nentries = nentries; 1268 r = vioif_set_rx_filter(sc); 1269 if (r != 0) { 1270 rxfilter = 0; 1271 allmulti = 1; /* fallback */ 1272 } 1273 } else { 1274 /* remove rx filter */ 1275 sc->sc_ctrl_mac_tbl_uc->nentries = 0; 1276 sc->sc_ctrl_mac_tbl_mc->nentries = 0; 1277 r = vioif_set_rx_filter(sc); 1278 /* what to do on failure? */ 1279 } 1280 if (allmulti) { 1281 r = vioif_set_allmulti(sc, true); 1282 if (r != 0) { 1283 allmulti = 0; 1284 promisc = 1; /* fallback */ 1285 } 1286 } else { 1287 r = vioif_set_allmulti(sc, false); 1288 /* what to do on failure? */ 1289 } 1290 if (promisc) { 1291 r = vioif_set_promisc(sc, true); 1292 } else { 1293 r = vioif_set_promisc(sc, false); 1294 } 1295 1296 return r; 1297 } 1298 1299 /* change link status */ 1300 static int 1301 vioif_updown(struct vioif_softc *sc, bool isup) 1302 { 1303 struct virtio_softc *vsc = sc->sc_virtio; 1304 1305 if (!(vsc->sc_features & VIRTIO_NET_F_STATUS)) 1306 return ENODEV; 1307 virtio_write_device_config_1(vsc, 1308 VIRTIO_NET_CONFIG_STATUS, 1309 isup?VIRTIO_NET_S_LINK_UP:0); 1310 return 0; 1311 } 1312