1 /* $NetBSD: if_vioif.c,v 1.1 2011/10/30 12:12:21 hannken Exp $ */ 2 3 /* 4 * Copyright (c) 2010 Minoura Makoto. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.1 2011/10/30 12:12:21 hannken Exp $"); 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/kernel.h> 34 #include <sys/bus.h> 35 #include <sys/condvar.h> 36 #include <sys/device.h> 37 #include <sys/intr.h> 38 #include <sys/kmem.h> 39 #include <sys/mbuf.h> 40 #include <sys/mutex.h> 41 #include <sys/sockio.h> 42 43 #include <dev/pci/pcidevs.h> 44 #include <dev/pci/pcireg.h> 45 #include <dev/pci/pcivar.h> 46 #include <dev/pci/virtioreg.h> 47 #include <dev/pci/virtiovar.h> 48 49 #include <net/if.h> 50 #include <net/if_media.h> 51 #include <net/if_ether.h> 52 53 #include <net/bpf.h> 54 55 56 /* 57 * if_vioifreg.h: 58 */ 59 /* Configuration registers */ 60 #define VIRTIO_NET_CONFIG_MAC 0 /* 8bit x 6byte */ 61 #define VIRTIO_NET_CONFIG_STATUS 6 /* 16bit */ 62 63 /* Feature bits */ 64 #define VIRTIO_NET_F_CSUM (1<<0) 65 #define VIRTIO_NET_F_GUEST_CSUM (1<<1) 66 #define VIRTIO_NET_F_MAC (1<<5) 67 #define VIRTIO_NET_F_GSO (1<<6) 68 #define VIRTIO_NET_F_GUEST_TSO4 (1<<7) 69 #define VIRTIO_NET_F_GUEST_TSO6 (1<<8) 70 #define VIRTIO_NET_F_GUEST_ECN (1<<9) 71 #define VIRTIO_NET_F_GUEST_UFO (1<<10) 72 #define VIRTIO_NET_F_HOST_TSO4 (1<<11) 73 #define VIRTIO_NET_F_HOST_TSO6 (1<<12) 74 #define VIRTIO_NET_F_HOST_ECN (1<<13) 75 #define VIRTIO_NET_F_HOST_UFO (1<<14) 76 #define VIRTIO_NET_F_MRG_RXBUF (1<<15) 77 #define VIRTIO_NET_F_STATUS (1<<16) 78 #define VIRTIO_NET_F_CTRL_VQ (1<<17) 79 #define VIRTIO_NET_F_CTRL_RX (1<<18) 80 #define VIRTIO_NET_F_CTRL_VLAN (1<<19) 81 82 /* Status */ 83 #define VIRTIO_NET_S_LINK_UP 1 84 85 /* Packet header structure */ 86 struct virtio_net_hdr { 87 uint8_t flags; 88 uint8_t gso_type; 89 uint16_t hdr_len; 90 uint16_t gso_size; 91 uint16_t csum_start; 92 uint16_t csum_offset; 93 #if 0 94 uint16_t num_buffers; /* if VIRTIO_NET_F_MRG_RXBUF enabled */ 95 #endif 96 } __packed; 97 98 #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* flags */ 99 #define VIRTIO_NET_HDR_GSO_NONE 0 /* gso_type */ 100 #define VIRTIO_NET_HDR_GSO_TCPV4 1 /* gso_type */ 101 #define VIRTIO_NET_HDR_GSO_UDP 3 /* gso_type */ 102 #define VIRTIO_NET_HDR_GSO_TCPV6 4 /* gso_type */ 103 #define VIRTIO_NET_HDR_GSO_ECN 0x80 /* gso_type, |'ed */ 104 105 #define VIRTIO_NET_MAX_GSO_LEN (65536+ETHER_HDR_LEN) 106 107 /* Control virtqueue */ 108 struct virtio_net_ctrl_cmd { 109 uint8_t class; 110 uint8_t command; 111 } __packed; 112 #define VIRTIO_NET_CTRL_RX 0 113 # define VIRTIO_NET_CTRL_RX_PROMISC 0 114 # define VIRTIO_NET_CTRL_RX_ALLMULTI 1 115 116 #define VIRTIO_NET_CTRL_MAC 1 117 # define VIRTIO_NET_CTRL_MAC_TABLE_SET 0 118 119 #define VIRTIO_NET_CTRL_VLAN 2 120 # define VIRTIO_NET_CTRL_VLAN_ADD 0 121 # define VIRTIO_NET_CTRL_VLAN_DEL 1 122 123 struct virtio_net_ctrl_status { 124 uint8_t ack; 125 } __packed; 126 #define VIRTIO_NET_OK 0 127 #define VIRTIO_NET_ERR 1 128 129 struct virtio_net_ctrl_rx { 130 uint8_t onoff; 131 } __packed; 132 133 struct virtio_net_ctrl_mac_tbl { 134 uint32_t nentries; 135 uint8_t macs[][ETHER_ADDR_LEN]; 136 } __packed; 137 138 struct virtio_net_ctrl_vlan { 139 uint16_t id; 140 } __packed; 141 142 143 /* 144 * if_vioifvar.h: 145 */ 146 struct vioif_softc { 147 device_t sc_dev; 148 149 struct virtio_softc *sc_virtio; 150 struct virtqueue sc_vq[3]; 151 152 uint8_t sc_mac[ETHER_ADDR_LEN]; 153 struct ethercom sc_ethercom; 154 uint32_t sc_features; 155 short sc_ifflags; 156 157 /* bus_dmamem */ 158 bus_dma_segment_t sc_hdr_segs[1]; 159 struct virtio_net_hdr *sc_hdrs; 160 #define sc_rx_hdrs sc_hdrs 161 struct virtio_net_hdr *sc_tx_hdrs; 162 struct virtio_net_ctrl_cmd *sc_ctrl_cmd; 163 struct virtio_net_ctrl_status *sc_ctrl_status; 164 struct virtio_net_ctrl_rx *sc_ctrl_rx; 165 struct virtio_net_ctrl_mac_tbl *sc_ctrl_mac_tbl_uc; 166 struct virtio_net_ctrl_mac_tbl *sc_ctrl_mac_tbl_mc; 167 168 /* kmem */ 169 bus_dmamap_t *sc_arrays; 170 #define sc_rxhdr_dmamaps sc_arrays 171 bus_dmamap_t *sc_txhdr_dmamaps; 172 bus_dmamap_t *sc_rx_dmamaps; 173 bus_dmamap_t *sc_tx_dmamaps; 174 struct mbuf **sc_rx_mbufs; 175 struct mbuf **sc_tx_mbufs; 176 177 bus_dmamap_t sc_ctrl_cmd_dmamap; 178 bus_dmamap_t sc_ctrl_status_dmamap; 179 bus_dmamap_t sc_ctrl_rx_dmamap; 180 bus_dmamap_t sc_ctrl_tbl_uc_dmamap; 181 bus_dmamap_t sc_ctrl_tbl_mc_dmamap; 182 183 void *sc_rx_softint; 184 185 enum { 186 FREE, INUSE, DONE 187 } sc_ctrl_inuse; 188 kcondvar_t sc_ctrl_wait; 189 kmutex_t sc_ctrl_wait_lock; 190 }; 191 #define VIRTIO_NET_TX_MAXNSEGS (16) /* XXX */ 192 #define VIRTIO_NET_CTRL_MAC_MAXENTRIES (64) /* XXX */ 193 194 /* cfattach interface functions */ 195 static int vioif_match(device_t, cfdata_t, void *); 196 static void vioif_attach(device_t, device_t, void *); 197 static void vioif_deferred_init(device_t); 198 199 /* ifnet interface functions */ 200 static int vioif_init(struct ifnet *); 201 static void vioif_stop(struct ifnet *, int); 202 static void vioif_start(struct ifnet *); 203 static int vioif_ioctl(struct ifnet *, u_long, void *); 204 static void vioif_watchdog(struct ifnet *); 205 206 /* rx */ 207 static int vioif_add_rx_mbuf(struct vioif_softc *, int); 208 static void vioif_free_rx_mbuf(struct vioif_softc *, int); 209 static void vioif_populate_rx_mbufs(struct vioif_softc *); 210 static int vioif_rx_deq(struct vioif_softc *); 211 static int vioif_rx_vq_done(struct virtqueue *); 212 static void vioif_rx_softint(void *); 213 static void vioif_rx_drain(struct vioif_softc *); 214 215 /* tx */ 216 static int vioif_tx_vq_done(struct virtqueue *); 217 static void vioif_tx_drain(struct vioif_softc *); 218 219 /* other control */ 220 static int vioif_updown(struct vioif_softc *, bool); 221 static int vioif_ctrl_rx(struct vioif_softc *, int, bool); 222 static int vioif_set_promisc(struct vioif_softc *, bool); 223 static int vioif_set_allmulti(struct vioif_softc *, bool); 224 static int vioif_set_rx_filter(struct vioif_softc *); 225 static int vioif_rx_filter(struct vioif_softc *); 226 static int vioif_ctrl_vq_done(struct virtqueue *); 227 228 CFATTACH_DECL_NEW(vioif, sizeof(struct vioif_softc), 229 vioif_match, vioif_attach, NULL, NULL); 230 231 static int 232 vioif_match(device_t parent, cfdata_t match, void *aux) 233 { 234 struct virtio_softc *va = aux; 235 236 if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_NETWORK) 237 return 1; 238 239 return 0; 240 } 241 242 /* allocate memory */ 243 /* 244 * dma memory is used for: 245 * sc_rx_hdrs[slot]: metadata array for recieved frames (READ) 246 * sc_tx_hdrs[slot]: metadata array for frames to be sent (WRITE) 247 * sc_ctrl_cmd: command to be sent via ctrl vq (WRITE) 248 * sc_ctrl_status: return value for a command via ctrl vq (READ) 249 * sc_ctrl_rx: parameter for a VIRTIO_NET_CTRL_RX class command 250 * (WRITE) 251 * sc_ctrl_mac_tbl_uc: unicast MAC address filter for a VIRTIO_NET_CTRL_MAC 252 * class command (WRITE) 253 * sc_ctrl_mac_tbl_mc: multicast MAC address filter for a VIRTIO_NET_CTRL_MAC 254 * class command (WRITE) 255 * sc_ctrl_* structures are allocated only one each; they are protected by 256 * sc_ctrl_inuse variable and sc_ctrl_wait condvar. 257 */ 258 /* 259 * dynamically allocated memory is used for: 260 * sc_rxhdr_dmamaps[slot]: bus_dmamap_t array for sc_rx_hdrs[slot] 261 * sc_txhdr_dmamaps[slot]: bus_dmamap_t array for sc_tx_hdrs[slot] 262 * sc_rx_dmamaps[slot]: bus_dmamap_t array for recieved payload 263 * sc_tx_dmamaps[slot]: bus_dmamap_t array for sent payload 264 * sc_rx_mbufs[slot]: mbuf pointer array for recieved frames 265 * sc_tx_mbufs[slot]: mbuf pointer array for sent frames 266 */ 267 static int 268 vioif_alloc_mems(struct vioif_softc *sc) 269 { 270 struct virtio_softc *vsc = sc->sc_virtio; 271 int allocsize, allocsize2, r, rsegs, i; 272 void *vaddr; 273 intptr_t p; 274 int rxqsize, txqsize; 275 276 rxqsize = vsc->sc_vqs[0].vq_num; 277 txqsize = vsc->sc_vqs[1].vq_num; 278 279 allocsize = sizeof(struct virtio_net_hdr) * rxqsize; 280 allocsize += sizeof(struct virtio_net_hdr) * txqsize; 281 if (vsc->sc_nvqs == 3) { 282 allocsize += sizeof(struct virtio_net_ctrl_cmd) * 1; 283 allocsize += sizeof(struct virtio_net_ctrl_status) * 1; 284 allocsize += sizeof(struct virtio_net_ctrl_rx) * 1; 285 allocsize += sizeof(struct virtio_net_ctrl_mac_tbl) 286 + sizeof(struct virtio_net_ctrl_mac_tbl) 287 + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES; 288 } 289 r = bus_dmamem_alloc(vsc->sc_dmat, allocsize, 0, 0, 290 &sc->sc_hdr_segs[0], 1, &rsegs, BUS_DMA_NOWAIT); 291 if (r != 0) { 292 aprint_error_dev(sc->sc_dev, 293 "DMA memory allocation failed, size %d, " 294 "error code %d\n", allocsize, r); 295 goto err_none; 296 } 297 r = bus_dmamem_map(vsc->sc_dmat, 298 &sc->sc_hdr_segs[0], 1, allocsize, 299 &vaddr, BUS_DMA_NOWAIT); 300 if (r != 0) { 301 aprint_error_dev(sc->sc_dev, 302 "DMA memory map failed, " 303 "error code %d\n", r); 304 goto err_dmamem_alloc; 305 } 306 sc->sc_hdrs = vaddr; 307 memset(vaddr, 0, allocsize); 308 p = (intptr_t) vaddr; 309 p += sizeof(struct virtio_net_hdr) * rxqsize; 310 #define P(name,size) do { sc->sc_ ##name = (void*) p; \ 311 p += size; } while (0) 312 P(tx_hdrs, sizeof(struct virtio_net_hdr) * txqsize); 313 if (vsc->sc_nvqs == 3) { 314 P(ctrl_cmd, sizeof(struct virtio_net_ctrl_cmd)); 315 P(ctrl_status, sizeof(struct virtio_net_ctrl_status)); 316 P(ctrl_rx, sizeof(struct virtio_net_ctrl_rx)); 317 P(ctrl_mac_tbl_uc, sizeof(struct virtio_net_ctrl_mac_tbl)); 318 P(ctrl_mac_tbl_mc, 319 (sizeof(struct virtio_net_ctrl_mac_tbl) 320 + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES)); 321 } 322 #undef P 323 324 allocsize2 = sizeof(bus_dmamap_t) * (rxqsize + txqsize); 325 allocsize2 += sizeof(bus_dmamap_t) * (rxqsize + txqsize); 326 allocsize2 += sizeof(struct mbuf*) * (rxqsize + txqsize); 327 sc->sc_arrays = kmem_zalloc(allocsize2, KM_SLEEP); 328 if (sc->sc_arrays == NULL) 329 goto err_dmamem_map; 330 sc->sc_txhdr_dmamaps = sc->sc_arrays + rxqsize; 331 sc->sc_rx_dmamaps = sc->sc_txhdr_dmamaps + txqsize; 332 sc->sc_tx_dmamaps = sc->sc_rx_dmamaps + rxqsize; 333 sc->sc_rx_mbufs = (void*) (sc->sc_tx_dmamaps + txqsize); 334 sc->sc_tx_mbufs = sc->sc_rx_mbufs + rxqsize; 335 336 #define C(map, buf, size, nsegs, rw, usage) \ 337 do { \ 338 r = bus_dmamap_create(vsc->sc_dmat, size, nsegs, size, 0, \ 339 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, \ 340 &sc->sc_ ##map); \ 341 if (r != 0) { \ 342 aprint_error_dev(sc->sc_dev, \ 343 usage " dmamap creation failed, " \ 344 "error code %d\n", r); \ 345 goto err_reqs; \ 346 } \ 347 } while (0) 348 #define C_L1(map, buf, size, nsegs, rw, usage) \ 349 C(map, buf, size, nsegs, rw, usage); \ 350 do { \ 351 r = bus_dmamap_load(vsc->sc_dmat, sc->sc_ ##map, \ 352 &sc->sc_ ##buf, size, NULL, \ 353 BUS_DMA_ ##rw | BUS_DMA_NOWAIT); \ 354 if (r != 0) { \ 355 aprint_error_dev(sc->sc_dev, \ 356 usage " dmamap load failed, " \ 357 "error code %d\n", r); \ 358 goto err_reqs; \ 359 } \ 360 } while (0) 361 #define C_L2(map, buf, size, nsegs, rw, usage) \ 362 C(map, buf, size, nsegs, rw, usage); \ 363 do { \ 364 r = bus_dmamap_load(vsc->sc_dmat, sc->sc_ ##map, \ 365 sc->sc_ ##buf, size, NULL, \ 366 BUS_DMA_ ##rw | BUS_DMA_NOWAIT); \ 367 if (r != 0) { \ 368 aprint_error_dev(sc->sc_dev, \ 369 usage " dmamap load failed, " \ 370 "error code %d\n", r); \ 371 goto err_reqs; \ 372 } \ 373 } while (0) 374 for (i = 0; i < rxqsize; i++) { 375 C_L1(rxhdr_dmamaps[i], rx_hdrs[i], 376 sizeof(struct virtio_net_hdr), 1, 377 READ, "rx header"); 378 C(rx_dmamaps[i], NULL, MCLBYTES, 1, 0, "rx payload"); 379 } 380 381 for (i = 0; i < txqsize; i++) { 382 C_L1(txhdr_dmamaps[i], rx_hdrs[i], 383 sizeof(struct virtio_net_hdr), 1, 384 WRITE, "tx header"); 385 C(tx_dmamaps[i], NULL, ETHER_MAX_LEN, 256 /* XXX */, 0, 386 "tx payload"); 387 } 388 389 if (vsc->sc_nvqs == 3) { 390 /* control vq class & command */ 391 C_L2(ctrl_cmd_dmamap, ctrl_cmd, 392 sizeof(struct virtio_net_ctrl_cmd), 1, WRITE, 393 "control command"); 394 395 /* control vq status */ 396 C_L2(ctrl_status_dmamap, ctrl_status, 397 sizeof(struct virtio_net_ctrl_status), 1, READ, 398 "control status"); 399 400 /* control vq rx mode command parameter */ 401 C_L2(ctrl_rx_dmamap, ctrl_rx, 402 sizeof(struct virtio_net_ctrl_rx), 1, WRITE, 403 "rx mode control command"); 404 405 /* control vq MAC filter table for unicast */ 406 /* do not load now since its length is variable */ 407 C(ctrl_tbl_uc_dmamap, NULL, 408 sizeof(struct virtio_net_ctrl_mac_tbl) + 0, 1, WRITE, 409 "unicast MAC address filter command"); 410 411 /* control vq MAC filter table for multicast */ 412 C(ctrl_tbl_mc_dmamap, NULL, 413 (sizeof(struct virtio_net_ctrl_mac_tbl) 414 + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES), 415 1, WRITE, "multicast MAC address filter command"); 416 } 417 #undef C_L2 418 #undef C_L1 419 #undef C 420 421 return 0; 422 423 err_reqs: 424 #define D(map) \ 425 do { \ 426 if (sc->sc_ ##map) { \ 427 bus_dmamap_destroy(vsc->sc_dmat, sc->sc_ ##map); \ 428 sc->sc_ ##map = NULL; \ 429 } \ 430 } while (0) 431 D(ctrl_tbl_mc_dmamap); 432 D(ctrl_tbl_uc_dmamap); 433 D(ctrl_rx_dmamap); 434 D(ctrl_status_dmamap); 435 D(ctrl_cmd_dmamap); 436 for (i = 0; i < txqsize; i++) { 437 D(tx_dmamaps[i]); 438 D(txhdr_dmamaps[i]); 439 } 440 for (i = 0; i < rxqsize; i++) { 441 D(rx_dmamaps[i]); 442 D(rxhdr_dmamaps[i]); 443 } 444 #undef D 445 if (sc->sc_arrays) { 446 kmem_free(sc->sc_arrays, allocsize2); 447 sc->sc_arrays = 0; 448 } 449 err_dmamem_map: 450 bus_dmamem_unmap(vsc->sc_dmat, sc->sc_hdrs, allocsize); 451 err_dmamem_alloc: 452 bus_dmamem_free(vsc->sc_dmat, &sc->sc_hdr_segs[0], 1); 453 err_none: 454 return -1; 455 } 456 457 static void 458 vioif_attach(device_t parent, device_t self, void *aux) 459 { 460 struct vioif_softc *sc = device_private(self); 461 struct virtio_softc *vsc = device_private(parent); 462 uint32_t features; 463 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 464 465 if (vsc->sc_child != NULL) { 466 aprint_normal(": child already attached for %s; " 467 "something wrong...\n", 468 device_xname(parent)); 469 return; 470 } 471 472 sc->sc_dev = self; 473 sc->sc_virtio = vsc; 474 475 vsc->sc_child = self; 476 vsc->sc_ipl = IPL_NET; 477 vsc->sc_vqs = &sc->sc_vq[0]; 478 vsc->sc_config_change = 0; 479 vsc->sc_intrhand = virtio_vq_intr; 480 481 features = virtio_negotiate_features(vsc, 482 (VIRTIO_NET_F_MAC | 483 VIRTIO_NET_F_STATUS | 484 VIRTIO_NET_F_CTRL_VQ | 485 VIRTIO_NET_F_CTRL_RX | 486 VIRTIO_F_NOTIFY_ON_EMPTY)); 487 if (features & VIRTIO_NET_F_MAC) { 488 sc->sc_mac[0] = virtio_read_device_config_1(vsc, 489 VIRTIO_NET_CONFIG_MAC+0); 490 sc->sc_mac[1] = virtio_read_device_config_1(vsc, 491 VIRTIO_NET_CONFIG_MAC+1); 492 sc->sc_mac[2] = virtio_read_device_config_1(vsc, 493 VIRTIO_NET_CONFIG_MAC+2); 494 sc->sc_mac[3] = virtio_read_device_config_1(vsc, 495 VIRTIO_NET_CONFIG_MAC+3); 496 sc->sc_mac[4] = virtio_read_device_config_1(vsc, 497 VIRTIO_NET_CONFIG_MAC+4); 498 sc->sc_mac[5] = virtio_read_device_config_1(vsc, 499 VIRTIO_NET_CONFIG_MAC+5); 500 } else { 501 /* code stolen from sys/net/if_tap.c */ 502 struct timeval tv; 503 uint32_t ui; 504 getmicrouptime(&tv); 505 ui = (tv.tv_sec ^ tv.tv_usec) & 0xffffff; 506 memcpy(sc->sc_mac+3, (uint8_t *)&ui, 3); 507 virtio_write_device_config_1(vsc, 508 VIRTIO_NET_CONFIG_MAC+0, 509 sc->sc_mac[0]); 510 virtio_write_device_config_1(vsc, 511 VIRTIO_NET_CONFIG_MAC+1, 512 sc->sc_mac[1]); 513 virtio_write_device_config_1(vsc, 514 VIRTIO_NET_CONFIG_MAC+2, 515 sc->sc_mac[2]); 516 virtio_write_device_config_1(vsc, 517 VIRTIO_NET_CONFIG_MAC+3, 518 sc->sc_mac[3]); 519 virtio_write_device_config_1(vsc, 520 VIRTIO_NET_CONFIG_MAC+4, 521 sc->sc_mac[4]); 522 virtio_write_device_config_1(vsc, 523 VIRTIO_NET_CONFIG_MAC+5, 524 sc->sc_mac[5]); 525 } 526 aprint_normal(": Ethernet address %s\n", ether_sprintf(sc->sc_mac)); 527 aprint_naive("\n"); 528 529 if (virtio_alloc_vq(vsc, &sc->sc_vq[0], 0, 530 MCLBYTES+sizeof(struct virtio_net_hdr), 2, 531 "rx") != 0) { 532 goto err; 533 } 534 vsc->sc_nvqs = 1; 535 sc->sc_vq[0].vq_done = vioif_rx_vq_done; 536 if (virtio_alloc_vq(vsc, &sc->sc_vq[1], 1, 537 (sizeof(struct virtio_net_hdr) 538 + (ETHER_MAX_LEN - ETHER_HDR_LEN)), 539 VIRTIO_NET_TX_MAXNSEGS + 1, 540 "tx") != 0) { 541 goto err; 542 } 543 vsc->sc_nvqs = 2; 544 sc->sc_vq[1].vq_done = vioif_tx_vq_done; 545 virtio_start_vq_intr(vsc, &sc->sc_vq[0]); 546 virtio_stop_vq_intr(vsc, &sc->sc_vq[1]); /* not urgent; do it later */ 547 if ((features & VIRTIO_NET_F_CTRL_VQ) 548 && (features & VIRTIO_NET_F_CTRL_RX)) { 549 if (virtio_alloc_vq(vsc, &sc->sc_vq[2], 2, 550 NBPG, 1, "control") == 0) { 551 sc->sc_vq[2].vq_done = vioif_ctrl_vq_done; 552 cv_init(&sc->sc_ctrl_wait, "ctrl_vq"); 553 mutex_init(&sc->sc_ctrl_wait_lock, 554 MUTEX_DEFAULT, IPL_NET); 555 sc->sc_ctrl_inuse = FREE; 556 virtio_start_vq_intr(vsc, &sc->sc_vq[2]); 557 vsc->sc_nvqs = 3; 558 } 559 } 560 561 sc->sc_rx_softint = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE, 562 vioif_rx_softint, sc); 563 if (sc->sc_rx_softint == NULL) { 564 aprint_error_dev(self, "cannot establish softint\n"); 565 goto err; 566 } 567 568 if (vioif_alloc_mems(sc) < 0) 569 goto err; 570 if (vsc->sc_nvqs == 3) 571 config_interrupts(self, vioif_deferred_init); 572 573 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 574 ifp->if_softc = sc; 575 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 576 ifp->if_start = vioif_start; 577 ifp->if_ioctl = vioif_ioctl; 578 ifp->if_init = vioif_init; 579 ifp->if_stop = vioif_stop; 580 ifp->if_capabilities = 0; 581 ifp->if_watchdog = vioif_watchdog; 582 583 if_attach(ifp); 584 ether_ifattach(ifp, sc->sc_mac); 585 586 return; 587 588 err: 589 if (vsc->sc_nvqs == 3) { 590 virtio_free_vq(vsc, &sc->sc_vq[2]); 591 cv_destroy(&sc->sc_ctrl_wait); 592 mutex_destroy(&sc->sc_ctrl_wait_lock); 593 vsc->sc_nvqs = 2; 594 } 595 if (vsc->sc_nvqs == 2) { 596 virtio_free_vq(vsc, &sc->sc_vq[1]); 597 vsc->sc_nvqs = 1; 598 } 599 if (vsc->sc_nvqs == 1) { 600 virtio_free_vq(vsc, &sc->sc_vq[0]); 601 vsc->sc_nvqs = 0; 602 } 603 vsc->sc_child = (void*)1; 604 return; 605 } 606 607 /* we need interrupts to make promiscuous mode off */ 608 static void 609 vioif_deferred_init(device_t self) 610 { 611 struct vioif_softc *sc = device_private(self); 612 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 613 int r; 614 615 r = vioif_set_promisc(sc, false); 616 if (r != 0) 617 aprint_error_dev(self, "resetting promisc mode failed, " 618 "errror code %d\n", r); 619 else 620 ifp->if_flags &= ~IFF_PROMISC; 621 } 622 623 /* 624 * Interface functions for ifnet 625 */ 626 static int 627 vioif_init(struct ifnet *ifp) 628 { 629 struct vioif_softc *sc = ifp->if_softc; 630 631 vioif_stop(ifp, 0); 632 vioif_populate_rx_mbufs(sc); 633 vioif_updown(sc, true); 634 ifp->if_flags |= IFF_RUNNING; 635 ifp->if_flags &= ~IFF_OACTIVE; 636 vioif_rx_filter(sc); 637 638 return 0; 639 } 640 641 static void 642 vioif_stop(struct ifnet *ifp, int disable) 643 { 644 struct vioif_softc *sc = ifp->if_softc; 645 struct virtio_softc *vsc = sc->sc_virtio; 646 647 /* only way to stop I/O and DMA is resetting... */ 648 virtio_reset(vsc); 649 vioif_rx_deq(sc); 650 vioif_tx_drain(sc); 651 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 652 653 if (disable) 654 vioif_rx_drain(sc); 655 656 virtio_reinit_start(vsc); 657 virtio_negotiate_features(vsc, sc->sc_features); 658 virtio_start_vq_intr(vsc, &sc->sc_vq[0]); 659 virtio_stop_vq_intr(vsc, &sc->sc_vq[1]); 660 if (vsc->sc_nvqs >= 3) 661 virtio_start_vq_intr(vsc, &sc->sc_vq[2]); 662 virtio_reinit_end(vsc); 663 vioif_updown(sc, false); 664 } 665 666 static void 667 vioif_start(struct ifnet *ifp) 668 { 669 struct vioif_softc *sc = ifp->if_softc; 670 struct virtio_softc *vsc = sc->sc_virtio; 671 struct virtqueue *vq = &sc->sc_vq[1]; /* tx vq */ 672 struct mbuf *m; 673 int queued = 0, retry = 0; 674 675 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 676 return; 677 678 while (IFQ_POLL(&ifp->if_snd, m), m != NULL) { 679 int slot, r; 680 681 r = virtio_enqueue_prep(vsc, vq, &slot); 682 if (r == EAGAIN) { 683 ifp->if_flags |= IFF_OACTIVE; 684 vioif_tx_vq_done(vq); 685 if (retry++ == 0) 686 continue; 687 else 688 break; 689 } 690 if (r != 0) 691 panic("enqueue_prep for a tx buffer"); 692 r = bus_dmamap_load_mbuf(vsc->sc_dmat, 693 sc->sc_tx_dmamaps[slot], 694 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 695 if (r != 0) { 696 virtio_enqueue_abort(vsc, vq, slot); 697 printf("%s: tx dmamap load failed, error code %d\n", 698 device_xname(sc->sc_dev), r); 699 break; 700 } 701 r = virtio_enqueue_reserve(vsc, vq, slot, 702 sc->sc_tx_dmamaps[slot]->dm_nsegs + 1); 703 if (r != 0) { 704 bus_dmamap_unload(vsc->sc_dmat, 705 sc->sc_tx_dmamaps[slot]); 706 ifp->if_flags |= IFF_OACTIVE; 707 vioif_tx_vq_done(vq); 708 if (retry++ == 0) 709 continue; 710 else 711 break; 712 } 713 IFQ_DEQUEUE(&ifp->if_snd, m); 714 sc->sc_tx_mbufs[slot] = m; 715 716 memset(&sc->sc_tx_hdrs[slot], 0, sizeof(struct virtio_net_hdr)); 717 bus_dmamap_sync(vsc->sc_dmat, sc->sc_tx_dmamaps[slot], 718 0, sc->sc_tx_dmamaps[slot]->dm_mapsize, 719 BUS_DMASYNC_PREWRITE); 720 bus_dmamap_sync(vsc->sc_dmat, sc->sc_txhdr_dmamaps[slot], 721 0, sc->sc_txhdr_dmamaps[slot]->dm_mapsize, 722 BUS_DMASYNC_PREWRITE); 723 virtio_enqueue(vsc, vq, slot, sc->sc_txhdr_dmamaps[slot], true); 724 virtio_enqueue(vsc, vq, slot, sc->sc_tx_dmamaps[slot], true); 725 virtio_enqueue_commit(vsc, vq, slot, false); 726 queued++; 727 bpf_mtap(ifp, m); 728 } 729 730 if (queued > 0) { 731 virtio_enqueue_commit(vsc, vq, -1, true); 732 ifp->if_timer = 5; 733 } 734 } 735 736 static int 737 vioif_ioctl(struct ifnet *ifp, u_long cmd, void *data) 738 { 739 int s, r; 740 741 s = splnet(); 742 743 r = ether_ioctl(ifp, cmd, data); 744 if ((r == 0 && cmd == SIOCSIFFLAGS) || 745 (r == ENETRESET && (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI))) { 746 if (ifp->if_flags & IFF_RUNNING) 747 r = vioif_rx_filter(ifp->if_softc); 748 else 749 r = 0; 750 } 751 752 splx(s); 753 754 return r; 755 } 756 757 void 758 vioif_watchdog(struct ifnet *ifp) 759 { 760 struct vioif_softc *sc = ifp->if_softc; 761 762 if (ifp->if_flags & IFF_RUNNING) 763 vioif_tx_vq_done(&sc->sc_vq[1]); 764 } 765 766 767 /* 768 * Recieve implementation 769 */ 770 /* allocate and initialize a mbuf for recieve */ 771 static int 772 vioif_add_rx_mbuf(struct vioif_softc *sc, int i) 773 { 774 struct mbuf *m; 775 int r; 776 777 MGETHDR(m, M_DONTWAIT, MT_DATA); 778 if (m == NULL) 779 return ENOBUFS; 780 MCLGET(m, M_DONTWAIT); 781 if ((m->m_flags & M_EXT) == 0) { 782 m_freem(m); 783 return ENOBUFS; 784 } 785 sc->sc_rx_mbufs[i] = m; 786 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 787 r = bus_dmamap_load_mbuf(sc->sc_virtio->sc_dmat, 788 sc->sc_rx_dmamaps[i], 789 m, BUS_DMA_READ|BUS_DMA_NOWAIT); 790 if (r) { 791 m_freem(m); 792 sc->sc_rx_mbufs[i] = 0; 793 return r; 794 } 795 796 return 0; 797 } 798 799 /* free a mbuf for recieve */ 800 static void 801 vioif_free_rx_mbuf(struct vioif_softc *sc, int i) 802 { 803 bus_dmamap_unload(sc->sc_virtio->sc_dmat, sc->sc_rx_dmamaps[i]); 804 m_freem(sc->sc_rx_mbufs[i]); 805 sc->sc_rx_mbufs[i] = NULL; 806 } 807 808 /* add mbufs for all the empty recieve slots */ 809 static void 810 vioif_populate_rx_mbufs(struct vioif_softc *sc) 811 { 812 struct virtio_softc *vsc = sc->sc_virtio; 813 int i, r, ndone = 0; 814 struct virtqueue *vq = &sc->sc_vq[0]; /* rx vq */ 815 816 for (i = 0; i < vq->vq_num; i++) { 817 int slot; 818 r = virtio_enqueue_prep(vsc, vq, &slot); 819 if (r == EAGAIN) 820 break; 821 if (r != 0) 822 panic("enqueue_prep for rx buffers"); 823 if (sc->sc_rx_mbufs[slot] == NULL) { 824 r = vioif_add_rx_mbuf(sc, slot); 825 if (r != 0) { 826 printf("%s: rx mbuf allocation failed, " 827 "error code %d\n", 828 device_xname(sc->sc_dev), r); 829 break; 830 } 831 } 832 r = virtio_enqueue_reserve(vsc, vq, slot, 833 sc->sc_rx_dmamaps[slot]->dm_nsegs + 1); 834 if (r != 0) { 835 vioif_free_rx_mbuf(sc, slot); 836 break; 837 } 838 bus_dmamap_sync(vsc->sc_dmat, sc->sc_rxhdr_dmamaps[slot], 839 0, sizeof(struct virtio_net_hdr), BUS_DMASYNC_PREREAD); 840 bus_dmamap_sync(vsc->sc_dmat, sc->sc_rx_dmamaps[slot], 841 0, MCLBYTES, BUS_DMASYNC_PREREAD); 842 virtio_enqueue(vsc, vq, slot, sc->sc_rxhdr_dmamaps[slot], false); 843 virtio_enqueue(vsc, vq, slot, sc->sc_rx_dmamaps[slot], false); 844 virtio_enqueue_commit(vsc, vq, slot, false); 845 ndone++; 846 } 847 if (ndone > 0) 848 virtio_enqueue_commit(vsc, vq, -1, true); 849 } 850 851 /* dequeue recieved packets */ 852 static int 853 vioif_rx_deq(struct vioif_softc *sc) 854 { 855 struct virtio_softc *vsc = sc->sc_virtio; 856 struct virtqueue *vq = &sc->sc_vq[0]; 857 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 858 struct mbuf *m; 859 int r = 0; 860 int slot, len; 861 862 while (virtio_dequeue(vsc, vq, &slot, &len) == 0) { 863 len -= sizeof(struct virtio_net_hdr); 864 r = 1; 865 bus_dmamap_sync(vsc->sc_dmat, sc->sc_rxhdr_dmamaps[slot], 866 0, sizeof(struct virtio_net_hdr), 867 BUS_DMASYNC_POSTREAD); 868 bus_dmamap_sync(vsc->sc_dmat, sc->sc_rx_dmamaps[slot], 869 0, MCLBYTES, 870 BUS_DMASYNC_POSTREAD); 871 m = sc->sc_rx_mbufs[slot]; 872 KASSERT(m != NULL); 873 bus_dmamap_unload(vsc->sc_dmat, sc->sc_rx_dmamaps[slot]); 874 sc->sc_rx_mbufs[slot] = 0; 875 virtio_dequeue_commit(vsc, vq, slot); 876 m->m_pkthdr.rcvif = ifp; 877 m->m_len = m->m_pkthdr.len = len; 878 ifp->if_ipackets++; 879 bpf_mtap(ifp, m); 880 (*ifp->if_input)(ifp, m); 881 } 882 883 return r; 884 } 885 886 /* rx interrupt; call _dequeue above and schedule a softint */ 887 static int 888 vioif_rx_vq_done(struct virtqueue *vq) 889 { 890 struct virtio_softc *vsc = vq->vq_owner; 891 struct vioif_softc *sc = device_private(vsc->sc_child); 892 int r; 893 894 r = vioif_rx_deq(sc); 895 if (r) 896 softint_schedule(sc->sc_rx_softint); 897 898 return r; 899 } 900 901 /* softint: enqueue recieve requests for new incoming packets */ 902 static void 903 vioif_rx_softint(void *arg) 904 { 905 struct vioif_softc *sc = arg; 906 907 vioif_populate_rx_mbufs(sc); 908 } 909 910 /* free all the mbufs; called from if_stop(disable) */ 911 static void 912 vioif_rx_drain(struct vioif_softc *sc) 913 { 914 struct virtqueue *vq = &sc->sc_vq[0]; 915 int i; 916 917 for (i = 0; i < vq->vq_num; i++) { 918 if (sc->sc_rx_mbufs[i] == NULL) 919 continue; 920 vioif_free_rx_mbuf(sc, i); 921 } 922 } 923 924 925 /* 926 * Transmition implementation 927 */ 928 /* actual transmission is done in if_start */ 929 /* tx interrupt; dequeue and free mbufs */ 930 /* 931 * tx interrupt is actually disabled; this should be called upon 932 * tx vq full and watchdog 933 */ 934 static int 935 vioif_tx_vq_done(struct virtqueue *vq) 936 { 937 struct virtio_softc *vsc = vq->vq_owner; 938 struct vioif_softc *sc = device_private(vsc->sc_child); 939 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 940 struct mbuf *m; 941 int r = 0; 942 int slot, len; 943 944 while (virtio_dequeue(vsc, vq, &slot, &len) == 0) { 945 r++; 946 bus_dmamap_sync(vsc->sc_dmat, sc->sc_txhdr_dmamaps[slot], 947 0, sizeof(struct virtio_net_hdr), 948 BUS_DMASYNC_POSTWRITE); 949 bus_dmamap_sync(vsc->sc_dmat, sc->sc_tx_dmamaps[slot], 950 0, sc->sc_tx_dmamaps[slot]->dm_mapsize, 951 BUS_DMASYNC_POSTWRITE); 952 m = sc->sc_tx_mbufs[slot]; 953 bus_dmamap_unload(vsc->sc_dmat, sc->sc_tx_dmamaps[slot]); 954 sc->sc_tx_mbufs[slot] = 0; 955 virtio_dequeue_commit(vsc, vq, slot); 956 ifp->if_opackets++; 957 m_freem(m); 958 } 959 960 if (r) 961 ifp->if_flags &= ~IFF_OACTIVE; 962 return r; 963 } 964 965 /* free all the mbufs already put on vq; called from if_stop(disable) */ 966 static void 967 vioif_tx_drain(struct vioif_softc *sc) 968 { 969 struct virtio_softc *vsc = sc->sc_virtio; 970 struct virtqueue *vq = &sc->sc_vq[1]; 971 int i; 972 973 for (i = 0; i < vq->vq_num; i++) { 974 if (sc->sc_tx_mbufs[i] == NULL) 975 continue; 976 bus_dmamap_unload(vsc->sc_dmat, sc->sc_tx_dmamaps[i]); 977 m_freem(sc->sc_tx_mbufs[i]); 978 sc->sc_tx_mbufs[i] = NULL; 979 } 980 } 981 982 /* 983 * Control vq 984 */ 985 /* issue a VIRTIO_NET_CTRL_RX class command and wait for completion */ 986 static int 987 vioif_ctrl_rx(struct vioif_softc *sc, int cmd, bool onoff) 988 { 989 struct virtio_softc *vsc = sc->sc_virtio; 990 struct virtqueue *vq = &sc->sc_vq[2]; 991 int r, slot; 992 993 if (vsc->sc_nvqs < 3) 994 return ENOTSUP; 995 996 mutex_enter(&sc->sc_ctrl_wait_lock); 997 while (sc->sc_ctrl_inuse != FREE) 998 cv_wait(&sc->sc_ctrl_wait, &sc->sc_ctrl_wait_lock); 999 sc->sc_ctrl_inuse = INUSE; 1000 mutex_exit(&sc->sc_ctrl_wait_lock); 1001 1002 sc->sc_ctrl_cmd->class = VIRTIO_NET_CTRL_RX; 1003 sc->sc_ctrl_cmd->command = cmd; 1004 sc->sc_ctrl_rx->onoff = onoff; 1005 1006 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_cmd_dmamap, 1007 0, sizeof(struct virtio_net_ctrl_cmd), 1008 BUS_DMASYNC_PREWRITE); 1009 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_rx_dmamap, 1010 0, sizeof(struct virtio_net_ctrl_rx), 1011 BUS_DMASYNC_PREWRITE); 1012 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_status_dmamap, 1013 0, sizeof(struct virtio_net_ctrl_status), 1014 BUS_DMASYNC_PREREAD); 1015 1016 r = virtio_enqueue_prep(vsc, vq, &slot); 1017 if (r != 0) 1018 panic("%s: control vq busy!?", device_xname(sc->sc_dev)); 1019 r = virtio_enqueue_reserve(vsc, vq, slot, 3); 1020 if (r != 0) 1021 panic("%s: control vq busy!?", device_xname(sc->sc_dev)); 1022 virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_cmd_dmamap, true); 1023 virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_rx_dmamap, true); 1024 virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_status_dmamap, false); 1025 virtio_enqueue_commit(vsc, vq, slot, true); 1026 1027 /* wait for done */ 1028 mutex_enter(&sc->sc_ctrl_wait_lock); 1029 while (sc->sc_ctrl_inuse != DONE) 1030 cv_wait(&sc->sc_ctrl_wait, &sc->sc_ctrl_wait_lock); 1031 mutex_exit(&sc->sc_ctrl_wait_lock); 1032 /* already dequeueued */ 1033 1034 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_cmd_dmamap, 0, 1035 sizeof(struct virtio_net_ctrl_cmd), 1036 BUS_DMASYNC_POSTWRITE); 1037 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_rx_dmamap, 0, 1038 sizeof(struct virtio_net_ctrl_rx), 1039 BUS_DMASYNC_POSTWRITE); 1040 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_status_dmamap, 0, 1041 sizeof(struct virtio_net_ctrl_status), 1042 BUS_DMASYNC_POSTREAD); 1043 1044 if (sc->sc_ctrl_status->ack == VIRTIO_NET_OK) 1045 r = 0; 1046 else { 1047 printf("%s: failed setting rx mode\n", 1048 device_xname(sc->sc_dev)); 1049 r = EIO; 1050 } 1051 1052 mutex_enter(&sc->sc_ctrl_wait_lock); 1053 sc->sc_ctrl_inuse = FREE; 1054 cv_signal(&sc->sc_ctrl_wait); 1055 mutex_exit(&sc->sc_ctrl_wait_lock); 1056 1057 return r; 1058 } 1059 1060 static int 1061 vioif_set_promisc(struct vioif_softc *sc, bool onoff) 1062 { 1063 int r; 1064 1065 r = vioif_ctrl_rx(sc, VIRTIO_NET_CTRL_RX_PROMISC, onoff); 1066 1067 return r; 1068 } 1069 1070 static int 1071 vioif_set_allmulti(struct vioif_softc *sc, bool onoff) 1072 { 1073 int r; 1074 1075 r = vioif_ctrl_rx(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, onoff); 1076 1077 return r; 1078 } 1079 1080 /* issue VIRTIO_NET_CTRL_MAC_TABLE_SET command and wait for completion */ 1081 static int 1082 vioif_set_rx_filter(struct vioif_softc *sc) 1083 { 1084 /* filter already set in sc_ctrl_mac_tbl */ 1085 struct virtio_softc *vsc = sc->sc_virtio; 1086 struct virtqueue *vq = &sc->sc_vq[2]; 1087 int r, slot; 1088 1089 if (vsc->sc_nvqs < 3) 1090 return ENOTSUP; 1091 1092 mutex_enter(&sc->sc_ctrl_wait_lock); 1093 while (sc->sc_ctrl_inuse != FREE) 1094 cv_wait(&sc->sc_ctrl_wait, &sc->sc_ctrl_wait_lock); 1095 sc->sc_ctrl_inuse = INUSE; 1096 mutex_exit(&sc->sc_ctrl_wait_lock); 1097 1098 sc->sc_ctrl_cmd->class = VIRTIO_NET_CTRL_MAC; 1099 sc->sc_ctrl_cmd->command = VIRTIO_NET_CTRL_MAC_TABLE_SET; 1100 1101 r = bus_dmamap_load(vsc->sc_dmat, sc->sc_ctrl_tbl_uc_dmamap, 1102 sc->sc_ctrl_mac_tbl_uc, 1103 (sizeof(struct virtio_net_ctrl_mac_tbl) 1104 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_uc->nentries), 1105 NULL, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1106 if (r) { 1107 printf("%s: control command dmamap load failed, " 1108 "error code %d\n", device_xname(sc->sc_dev), r); 1109 goto out; 1110 } 1111 r = bus_dmamap_load(vsc->sc_dmat, sc->sc_ctrl_tbl_mc_dmamap, 1112 sc->sc_ctrl_mac_tbl_mc, 1113 (sizeof(struct virtio_net_ctrl_mac_tbl) 1114 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_mc->nentries), 1115 NULL, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1116 if (r) { 1117 printf("%s: control command dmamap load failed, " 1118 "error code %d\n", device_xname(sc->sc_dev), r); 1119 bus_dmamap_unload(vsc->sc_dmat, sc->sc_ctrl_tbl_uc_dmamap); 1120 goto out; 1121 } 1122 1123 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_cmd_dmamap, 1124 0, sizeof(struct virtio_net_ctrl_cmd), 1125 BUS_DMASYNC_PREWRITE); 1126 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_tbl_uc_dmamap, 0, 1127 (sizeof(struct virtio_net_ctrl_mac_tbl) 1128 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_uc->nentries), 1129 BUS_DMASYNC_PREWRITE); 1130 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_tbl_mc_dmamap, 0, 1131 (sizeof(struct virtio_net_ctrl_mac_tbl) 1132 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_mc->nentries), 1133 BUS_DMASYNC_PREWRITE); 1134 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_status_dmamap, 1135 0, sizeof(struct virtio_net_ctrl_status), 1136 BUS_DMASYNC_PREREAD); 1137 1138 r = virtio_enqueue_prep(vsc, vq, &slot); 1139 if (r != 0) 1140 panic("%s: control vq busy!?", device_xname(sc->sc_dev)); 1141 r = virtio_enqueue_reserve(vsc, vq, slot, 4); 1142 if (r != 0) 1143 panic("%s: control vq busy!?", device_xname(sc->sc_dev)); 1144 virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_cmd_dmamap, true); 1145 virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_tbl_uc_dmamap, true); 1146 virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_tbl_mc_dmamap, true); 1147 virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_status_dmamap, false); 1148 virtio_enqueue_commit(vsc, vq, slot, true); 1149 1150 /* wait for done */ 1151 mutex_enter(&sc->sc_ctrl_wait_lock); 1152 while (sc->sc_ctrl_inuse != DONE) 1153 cv_wait(&sc->sc_ctrl_wait, &sc->sc_ctrl_wait_lock); 1154 mutex_exit(&sc->sc_ctrl_wait_lock); 1155 /* already dequeueued */ 1156 1157 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_cmd_dmamap, 0, 1158 sizeof(struct virtio_net_ctrl_cmd), 1159 BUS_DMASYNC_POSTWRITE); 1160 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_tbl_uc_dmamap, 0, 1161 (sizeof(struct virtio_net_ctrl_mac_tbl) 1162 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_uc->nentries), 1163 BUS_DMASYNC_POSTWRITE); 1164 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_tbl_mc_dmamap, 0, 1165 (sizeof(struct virtio_net_ctrl_mac_tbl) 1166 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_mc->nentries), 1167 BUS_DMASYNC_POSTWRITE); 1168 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_status_dmamap, 0, 1169 sizeof(struct virtio_net_ctrl_status), 1170 BUS_DMASYNC_POSTREAD); 1171 bus_dmamap_unload(vsc->sc_dmat, sc->sc_ctrl_tbl_uc_dmamap); 1172 bus_dmamap_unload(vsc->sc_dmat, sc->sc_ctrl_tbl_mc_dmamap); 1173 1174 if (sc->sc_ctrl_status->ack == VIRTIO_NET_OK) 1175 r = 0; 1176 else { 1177 printf("%s: failed setting rx filter\n", 1178 device_xname(sc->sc_dev)); 1179 r = EIO; 1180 } 1181 1182 out: 1183 mutex_enter(&sc->sc_ctrl_wait_lock); 1184 sc->sc_ctrl_inuse = FREE; 1185 cv_signal(&sc->sc_ctrl_wait); 1186 mutex_exit(&sc->sc_ctrl_wait_lock); 1187 1188 return r; 1189 } 1190 1191 /* ctrl vq interrupt; wake up the command issuer */ 1192 static int 1193 vioif_ctrl_vq_done(struct virtqueue *vq) 1194 { 1195 struct virtio_softc *vsc = vq->vq_owner; 1196 struct vioif_softc *sc = device_private(vsc->sc_child); 1197 int r, slot; 1198 1199 r = virtio_dequeue(vsc, vq, &slot, NULL); 1200 if (r == ENOENT) 1201 return 0; 1202 virtio_dequeue_commit(vsc, vq, slot); 1203 1204 mutex_enter(&sc->sc_ctrl_wait_lock); 1205 sc->sc_ctrl_inuse = DONE; 1206 cv_signal(&sc->sc_ctrl_wait); 1207 mutex_exit(&sc->sc_ctrl_wait_lock); 1208 1209 return 1; 1210 } 1211 1212 /* 1213 * If IFF_PROMISC requested, set promiscuous 1214 * If multicast filter small enough (<=MAXENTRIES) set rx filter 1215 * If large multicast filter exist use ALLMULTI 1216 */ 1217 /* 1218 * If setting rx filter fails fall back to ALLMULTI 1219 * If ALLMULTI fails fall back to PROMISC 1220 */ 1221 static int 1222 vioif_rx_filter(struct vioif_softc *sc) 1223 { 1224 struct virtio_softc *vsc = sc->sc_virtio; 1225 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1226 struct ether_multi *enm; 1227 struct ether_multistep step; 1228 int nentries; 1229 int promisc = 0, allmulti = 0, rxfilter = 0; 1230 int r; 1231 1232 if (vsc->sc_nvqs < 3) { /* no ctrl vq; always promisc */ 1233 ifp->if_flags |= IFF_PROMISC; 1234 return 0; 1235 } 1236 1237 if (ifp->if_flags & IFF_PROMISC) { 1238 promisc = 1; 1239 goto set; 1240 } 1241 1242 nentries = -1; 1243 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm); 1244 while (nentries++, enm != NULL) { 1245 if (nentries >= VIRTIO_NET_CTRL_MAC_MAXENTRIES) { 1246 allmulti = 1; 1247 goto set; 1248 } 1249 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 1250 ETHER_ADDR_LEN)) { 1251 allmulti = 1; 1252 goto set; 1253 } 1254 memcpy(sc->sc_ctrl_mac_tbl_mc->macs[nentries], 1255 enm->enm_addrlo, ETHER_ADDR_LEN); 1256 ETHER_NEXT_MULTI(step, enm); 1257 } 1258 rxfilter = 1; 1259 1260 set: 1261 if (rxfilter) { 1262 sc->sc_ctrl_mac_tbl_uc->nentries = 0; 1263 sc->sc_ctrl_mac_tbl_mc->nentries = nentries; 1264 r = vioif_set_rx_filter(sc); 1265 if (r != 0) { 1266 rxfilter = 0; 1267 allmulti = 1; /* fallback */ 1268 } 1269 } else { 1270 /* remove rx filter */ 1271 sc->sc_ctrl_mac_tbl_uc->nentries = 0; 1272 sc->sc_ctrl_mac_tbl_mc->nentries = 0; 1273 r = vioif_set_rx_filter(sc); 1274 /* what to do on failure? */ 1275 } 1276 if (allmulti) { 1277 r = vioif_set_allmulti(sc, true); 1278 if (r != 0) { 1279 allmulti = 0; 1280 promisc = 1; /* fallback */ 1281 } 1282 } else { 1283 r = vioif_set_allmulti(sc, false); 1284 /* what to do on failure? */ 1285 } 1286 if (promisc) { 1287 r = vioif_set_promisc(sc, true); 1288 } else { 1289 r = vioif_set_promisc(sc, false); 1290 } 1291 1292 return r; 1293 } 1294 1295 /* change link status */ 1296 static int 1297 vioif_updown(struct vioif_softc *sc, bool isup) 1298 { 1299 struct virtio_softc *vsc = sc->sc_virtio; 1300 1301 if (!(vsc->sc_features & VIRTIO_NET_F_STATUS)) 1302 return ENODEV; 1303 virtio_write_device_config_1(vsc, 1304 VIRTIO_NET_CONFIG_STATUS, 1305 isup?VIRTIO_NET_S_LINK_UP:0); 1306 return 0; 1307 } 1308