1 /* $NetBSD: if_vioif.c,v 1.16 2015/05/05 10:56:13 ozaki-r Exp $ */ 2 3 /* 4 * Copyright (c) 2010 Minoura Makoto. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.16 2015/05/05 10:56:13 ozaki-r Exp $"); 30 31 #ifdef _KERNEL_OPT 32 #include "opt_net_mpsafe.h" 33 #endif 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/bus.h> 39 #include <sys/condvar.h> 40 #include <sys/device.h> 41 #include <sys/intr.h> 42 #include <sys/kmem.h> 43 #include <sys/mbuf.h> 44 #include <sys/mutex.h> 45 #include <sys/sockio.h> 46 #include <sys/cpu.h> 47 48 #include <dev/pci/pcidevs.h> 49 #include <dev/pci/pcireg.h> 50 #include <dev/pci/pcivar.h> 51 #include <dev/pci/virtioreg.h> 52 #include <dev/pci/virtiovar.h> 53 54 #include <net/if.h> 55 #include <net/if_media.h> 56 #include <net/if_ether.h> 57 58 #include <net/bpf.h> 59 60 61 #ifdef NET_MPSAFE 62 #define VIOIF_MPSAFE 1 63 #endif 64 65 #ifdef SOFTINT_INTR 66 #define VIOIF_SOFTINT_INTR 1 67 #endif 68 69 /* 70 * if_vioifreg.h: 71 */ 72 /* Configuration registers */ 73 #define VIRTIO_NET_CONFIG_MAC 0 /* 8bit x 6byte */ 74 #define VIRTIO_NET_CONFIG_STATUS 6 /* 16bit */ 75 76 /* Feature bits */ 77 #define VIRTIO_NET_F_CSUM (1<<0) 78 #define VIRTIO_NET_F_GUEST_CSUM (1<<1) 79 #define VIRTIO_NET_F_MAC (1<<5) 80 #define VIRTIO_NET_F_GSO (1<<6) 81 #define VIRTIO_NET_F_GUEST_TSO4 (1<<7) 82 #define VIRTIO_NET_F_GUEST_TSO6 (1<<8) 83 #define VIRTIO_NET_F_GUEST_ECN (1<<9) 84 #define VIRTIO_NET_F_GUEST_UFO (1<<10) 85 #define VIRTIO_NET_F_HOST_TSO4 (1<<11) 86 #define VIRTIO_NET_F_HOST_TSO6 (1<<12) 87 #define VIRTIO_NET_F_HOST_ECN (1<<13) 88 #define VIRTIO_NET_F_HOST_UFO (1<<14) 89 #define VIRTIO_NET_F_MRG_RXBUF (1<<15) 90 #define VIRTIO_NET_F_STATUS (1<<16) 91 #define VIRTIO_NET_F_CTRL_VQ (1<<17) 92 #define VIRTIO_NET_F_CTRL_RX (1<<18) 93 #define VIRTIO_NET_F_CTRL_VLAN (1<<19) 94 95 /* Status */ 96 #define VIRTIO_NET_S_LINK_UP 1 97 98 /* Packet header structure */ 99 struct virtio_net_hdr { 100 uint8_t flags; 101 uint8_t gso_type; 102 uint16_t hdr_len; 103 uint16_t gso_size; 104 uint16_t csum_start; 105 uint16_t csum_offset; 106 #if 0 107 uint16_t num_buffers; /* if VIRTIO_NET_F_MRG_RXBUF enabled */ 108 #endif 109 } __packed; 110 111 #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* flags */ 112 #define VIRTIO_NET_HDR_GSO_NONE 0 /* gso_type */ 113 #define VIRTIO_NET_HDR_GSO_TCPV4 1 /* gso_type */ 114 #define VIRTIO_NET_HDR_GSO_UDP 3 /* gso_type */ 115 #define VIRTIO_NET_HDR_GSO_TCPV6 4 /* gso_type */ 116 #define VIRTIO_NET_HDR_GSO_ECN 0x80 /* gso_type, |'ed */ 117 118 #define VIRTIO_NET_MAX_GSO_LEN (65536+ETHER_HDR_LEN) 119 120 /* Control virtqueue */ 121 struct virtio_net_ctrl_cmd { 122 uint8_t class; 123 uint8_t command; 124 } __packed; 125 #define VIRTIO_NET_CTRL_RX 0 126 # define VIRTIO_NET_CTRL_RX_PROMISC 0 127 # define VIRTIO_NET_CTRL_RX_ALLMULTI 1 128 129 #define VIRTIO_NET_CTRL_MAC 1 130 # define VIRTIO_NET_CTRL_MAC_TABLE_SET 0 131 132 #define VIRTIO_NET_CTRL_VLAN 2 133 # define VIRTIO_NET_CTRL_VLAN_ADD 0 134 # define VIRTIO_NET_CTRL_VLAN_DEL 1 135 136 struct virtio_net_ctrl_status { 137 uint8_t ack; 138 } __packed; 139 #define VIRTIO_NET_OK 0 140 #define VIRTIO_NET_ERR 1 141 142 struct virtio_net_ctrl_rx { 143 uint8_t onoff; 144 } __packed; 145 146 struct virtio_net_ctrl_mac_tbl { 147 uint32_t nentries; 148 uint8_t macs[][ETHER_ADDR_LEN]; 149 } __packed; 150 151 struct virtio_net_ctrl_vlan { 152 uint16_t id; 153 } __packed; 154 155 156 /* 157 * if_vioifvar.h: 158 */ 159 struct vioif_softc { 160 device_t sc_dev; 161 162 struct virtio_softc *sc_virtio; 163 struct virtqueue sc_vq[3]; 164 165 uint8_t sc_mac[ETHER_ADDR_LEN]; 166 struct ethercom sc_ethercom; 167 short sc_deferred_init_done; 168 169 /* bus_dmamem */ 170 bus_dma_segment_t sc_hdr_segs[1]; 171 struct virtio_net_hdr *sc_hdrs; 172 #define sc_rx_hdrs sc_hdrs 173 struct virtio_net_hdr *sc_tx_hdrs; 174 struct virtio_net_ctrl_cmd *sc_ctrl_cmd; 175 struct virtio_net_ctrl_status *sc_ctrl_status; 176 struct virtio_net_ctrl_rx *sc_ctrl_rx; 177 struct virtio_net_ctrl_mac_tbl *sc_ctrl_mac_tbl_uc; 178 struct virtio_net_ctrl_mac_tbl *sc_ctrl_mac_tbl_mc; 179 180 /* kmem */ 181 bus_dmamap_t *sc_arrays; 182 #define sc_rxhdr_dmamaps sc_arrays 183 bus_dmamap_t *sc_txhdr_dmamaps; 184 bus_dmamap_t *sc_rx_dmamaps; 185 bus_dmamap_t *sc_tx_dmamaps; 186 struct mbuf **sc_rx_mbufs; 187 struct mbuf **sc_tx_mbufs; 188 189 bus_dmamap_t sc_ctrl_cmd_dmamap; 190 bus_dmamap_t sc_ctrl_status_dmamap; 191 bus_dmamap_t sc_ctrl_rx_dmamap; 192 bus_dmamap_t sc_ctrl_tbl_uc_dmamap; 193 bus_dmamap_t sc_ctrl_tbl_mc_dmamap; 194 195 void *sc_rx_softint; 196 197 enum { 198 FREE, INUSE, DONE 199 } sc_ctrl_inuse; 200 kcondvar_t sc_ctrl_wait; 201 kmutex_t sc_ctrl_wait_lock; 202 kmutex_t *sc_tx_lock; 203 kmutex_t *sc_rx_lock; 204 bool sc_stopping; 205 }; 206 #define VIRTIO_NET_TX_MAXNSEGS (16) /* XXX */ 207 #define VIRTIO_NET_CTRL_MAC_MAXENTRIES (64) /* XXX */ 208 209 #define VIOIF_TX_LOCK(_sc) if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock) 210 #define VIOIF_TX_UNLOCK(_sc) if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock) 211 #define VIOIF_TX_LOCKED(_sc) (!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock)) 212 #define VIOIF_RX_LOCK(_sc) if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock) 213 #define VIOIF_RX_UNLOCK(_sc) if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock) 214 #define VIOIF_RX_LOCKED(_sc) (!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock)) 215 216 /* cfattach interface functions */ 217 static int vioif_match(device_t, cfdata_t, void *); 218 static void vioif_attach(device_t, device_t, void *); 219 static void vioif_deferred_init(device_t); 220 221 /* ifnet interface functions */ 222 static int vioif_init(struct ifnet *); 223 static void vioif_stop(struct ifnet *, int); 224 static void vioif_start(struct ifnet *); 225 static int vioif_ioctl(struct ifnet *, u_long, void *); 226 static void vioif_watchdog(struct ifnet *); 227 228 /* rx */ 229 static int vioif_add_rx_mbuf(struct vioif_softc *, int); 230 static void vioif_free_rx_mbuf(struct vioif_softc *, int); 231 static void vioif_populate_rx_mbufs(struct vioif_softc *); 232 static void vioif_populate_rx_mbufs_locked(struct vioif_softc *); 233 static int vioif_rx_deq(struct vioif_softc *); 234 static int vioif_rx_deq_locked(struct vioif_softc *); 235 static int vioif_rx_vq_done(struct virtqueue *); 236 static void vioif_rx_softint(void *); 237 static void vioif_rx_drain(struct vioif_softc *); 238 239 /* tx */ 240 static int vioif_tx_vq_done(struct virtqueue *); 241 static int vioif_tx_vq_done_locked(struct virtqueue *); 242 static void vioif_tx_drain(struct vioif_softc *); 243 244 /* other control */ 245 static int vioif_updown(struct vioif_softc *, bool); 246 static int vioif_ctrl_rx(struct vioif_softc *, int, bool); 247 static int vioif_set_promisc(struct vioif_softc *, bool); 248 static int vioif_set_allmulti(struct vioif_softc *, bool); 249 static int vioif_set_rx_filter(struct vioif_softc *); 250 static int vioif_rx_filter(struct vioif_softc *); 251 static int vioif_ctrl_vq_done(struct virtqueue *); 252 253 CFATTACH_DECL_NEW(vioif, sizeof(struct vioif_softc), 254 vioif_match, vioif_attach, NULL, NULL); 255 256 static int 257 vioif_match(device_t parent, cfdata_t match, void *aux) 258 { 259 struct virtio_softc *va = aux; 260 261 if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_NETWORK) 262 return 1; 263 264 return 0; 265 } 266 267 /* allocate memory */ 268 /* 269 * dma memory is used for: 270 * sc_rx_hdrs[slot]: metadata array for recieved frames (READ) 271 * sc_tx_hdrs[slot]: metadata array for frames to be sent (WRITE) 272 * sc_ctrl_cmd: command to be sent via ctrl vq (WRITE) 273 * sc_ctrl_status: return value for a command via ctrl vq (READ) 274 * sc_ctrl_rx: parameter for a VIRTIO_NET_CTRL_RX class command 275 * (WRITE) 276 * sc_ctrl_mac_tbl_uc: unicast MAC address filter for a VIRTIO_NET_CTRL_MAC 277 * class command (WRITE) 278 * sc_ctrl_mac_tbl_mc: multicast MAC address filter for a VIRTIO_NET_CTRL_MAC 279 * class command (WRITE) 280 * sc_ctrl_* structures are allocated only one each; they are protected by 281 * sc_ctrl_inuse variable and sc_ctrl_wait condvar. 282 */ 283 /* 284 * dynamically allocated memory is used for: 285 * sc_rxhdr_dmamaps[slot]: bus_dmamap_t array for sc_rx_hdrs[slot] 286 * sc_txhdr_dmamaps[slot]: bus_dmamap_t array for sc_tx_hdrs[slot] 287 * sc_rx_dmamaps[slot]: bus_dmamap_t array for recieved payload 288 * sc_tx_dmamaps[slot]: bus_dmamap_t array for sent payload 289 * sc_rx_mbufs[slot]: mbuf pointer array for recieved frames 290 * sc_tx_mbufs[slot]: mbuf pointer array for sent frames 291 */ 292 static int 293 vioif_alloc_mems(struct vioif_softc *sc) 294 { 295 struct virtio_softc *vsc = sc->sc_virtio; 296 int allocsize, allocsize2, r, rsegs, i; 297 void *vaddr; 298 intptr_t p; 299 int rxqsize, txqsize; 300 301 rxqsize = vsc->sc_vqs[0].vq_num; 302 txqsize = vsc->sc_vqs[1].vq_num; 303 304 allocsize = sizeof(struct virtio_net_hdr) * rxqsize; 305 allocsize += sizeof(struct virtio_net_hdr) * txqsize; 306 if (vsc->sc_nvqs == 3) { 307 allocsize += sizeof(struct virtio_net_ctrl_cmd) * 1; 308 allocsize += sizeof(struct virtio_net_ctrl_status) * 1; 309 allocsize += sizeof(struct virtio_net_ctrl_rx) * 1; 310 allocsize += sizeof(struct virtio_net_ctrl_mac_tbl) 311 + sizeof(struct virtio_net_ctrl_mac_tbl) 312 + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES; 313 } 314 r = bus_dmamem_alloc(vsc->sc_dmat, allocsize, 0, 0, 315 &sc->sc_hdr_segs[0], 1, &rsegs, BUS_DMA_NOWAIT); 316 if (r != 0) { 317 aprint_error_dev(sc->sc_dev, 318 "DMA memory allocation failed, size %d, " 319 "error code %d\n", allocsize, r); 320 goto err_none; 321 } 322 r = bus_dmamem_map(vsc->sc_dmat, 323 &sc->sc_hdr_segs[0], 1, allocsize, 324 &vaddr, BUS_DMA_NOWAIT); 325 if (r != 0) { 326 aprint_error_dev(sc->sc_dev, 327 "DMA memory map failed, " 328 "error code %d\n", r); 329 goto err_dmamem_alloc; 330 } 331 sc->sc_hdrs = vaddr; 332 memset(vaddr, 0, allocsize); 333 p = (intptr_t) vaddr; 334 p += sizeof(struct virtio_net_hdr) * rxqsize; 335 #define P(name,size) do { sc->sc_ ##name = (void*) p; \ 336 p += size; } while (0) 337 P(tx_hdrs, sizeof(struct virtio_net_hdr) * txqsize); 338 if (vsc->sc_nvqs == 3) { 339 P(ctrl_cmd, sizeof(struct virtio_net_ctrl_cmd)); 340 P(ctrl_status, sizeof(struct virtio_net_ctrl_status)); 341 P(ctrl_rx, sizeof(struct virtio_net_ctrl_rx)); 342 P(ctrl_mac_tbl_uc, sizeof(struct virtio_net_ctrl_mac_tbl)); 343 P(ctrl_mac_tbl_mc, 344 (sizeof(struct virtio_net_ctrl_mac_tbl) 345 + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES)); 346 } 347 #undef P 348 349 allocsize2 = sizeof(bus_dmamap_t) * (rxqsize + txqsize); 350 allocsize2 += sizeof(bus_dmamap_t) * (rxqsize + txqsize); 351 allocsize2 += sizeof(struct mbuf*) * (rxqsize + txqsize); 352 sc->sc_arrays = kmem_zalloc(allocsize2, KM_SLEEP); 353 if (sc->sc_arrays == NULL) 354 goto err_dmamem_map; 355 sc->sc_txhdr_dmamaps = sc->sc_arrays + rxqsize; 356 sc->sc_rx_dmamaps = sc->sc_txhdr_dmamaps + txqsize; 357 sc->sc_tx_dmamaps = sc->sc_rx_dmamaps + rxqsize; 358 sc->sc_rx_mbufs = (void*) (sc->sc_tx_dmamaps + txqsize); 359 sc->sc_tx_mbufs = sc->sc_rx_mbufs + rxqsize; 360 361 #define C(map, buf, size, nsegs, rw, usage) \ 362 do { \ 363 r = bus_dmamap_create(vsc->sc_dmat, size, nsegs, size, 0, \ 364 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, \ 365 &sc->sc_ ##map); \ 366 if (r != 0) { \ 367 aprint_error_dev(sc->sc_dev, \ 368 usage " dmamap creation failed, " \ 369 "error code %d\n", r); \ 370 goto err_reqs; \ 371 } \ 372 } while (0) 373 #define C_L1(map, buf, size, nsegs, rw, usage) \ 374 C(map, buf, size, nsegs, rw, usage); \ 375 do { \ 376 r = bus_dmamap_load(vsc->sc_dmat, sc->sc_ ##map, \ 377 &sc->sc_ ##buf, size, NULL, \ 378 BUS_DMA_ ##rw | BUS_DMA_NOWAIT); \ 379 if (r != 0) { \ 380 aprint_error_dev(sc->sc_dev, \ 381 usage " dmamap load failed, " \ 382 "error code %d\n", r); \ 383 goto err_reqs; \ 384 } \ 385 } while (0) 386 #define C_L2(map, buf, size, nsegs, rw, usage) \ 387 C(map, buf, size, nsegs, rw, usage); \ 388 do { \ 389 r = bus_dmamap_load(vsc->sc_dmat, sc->sc_ ##map, \ 390 sc->sc_ ##buf, size, NULL, \ 391 BUS_DMA_ ##rw | BUS_DMA_NOWAIT); \ 392 if (r != 0) { \ 393 aprint_error_dev(sc->sc_dev, \ 394 usage " dmamap load failed, " \ 395 "error code %d\n", r); \ 396 goto err_reqs; \ 397 } \ 398 } while (0) 399 for (i = 0; i < rxqsize; i++) { 400 C_L1(rxhdr_dmamaps[i], rx_hdrs[i], 401 sizeof(struct virtio_net_hdr), 1, 402 READ, "rx header"); 403 C(rx_dmamaps[i], NULL, MCLBYTES, 1, 0, "rx payload"); 404 } 405 406 for (i = 0; i < txqsize; i++) { 407 C_L1(txhdr_dmamaps[i], rx_hdrs[i], 408 sizeof(struct virtio_net_hdr), 1, 409 WRITE, "tx header"); 410 C(tx_dmamaps[i], NULL, ETHER_MAX_LEN, 256 /* XXX */, 0, 411 "tx payload"); 412 } 413 414 if (vsc->sc_nvqs == 3) { 415 /* control vq class & command */ 416 C_L2(ctrl_cmd_dmamap, ctrl_cmd, 417 sizeof(struct virtio_net_ctrl_cmd), 1, WRITE, 418 "control command"); 419 420 /* control vq status */ 421 C_L2(ctrl_status_dmamap, ctrl_status, 422 sizeof(struct virtio_net_ctrl_status), 1, READ, 423 "control status"); 424 425 /* control vq rx mode command parameter */ 426 C_L2(ctrl_rx_dmamap, ctrl_rx, 427 sizeof(struct virtio_net_ctrl_rx), 1, WRITE, 428 "rx mode control command"); 429 430 /* control vq MAC filter table for unicast */ 431 /* do not load now since its length is variable */ 432 C(ctrl_tbl_uc_dmamap, NULL, 433 sizeof(struct virtio_net_ctrl_mac_tbl) + 0, 1, WRITE, 434 "unicast MAC address filter command"); 435 436 /* control vq MAC filter table for multicast */ 437 C(ctrl_tbl_mc_dmamap, NULL, 438 (sizeof(struct virtio_net_ctrl_mac_tbl) 439 + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES), 440 1, WRITE, "multicast MAC address filter command"); 441 } 442 #undef C_L2 443 #undef C_L1 444 #undef C 445 446 return 0; 447 448 err_reqs: 449 #define D(map) \ 450 do { \ 451 if (sc->sc_ ##map) { \ 452 bus_dmamap_destroy(vsc->sc_dmat, sc->sc_ ##map); \ 453 sc->sc_ ##map = NULL; \ 454 } \ 455 } while (0) 456 D(ctrl_tbl_mc_dmamap); 457 D(ctrl_tbl_uc_dmamap); 458 D(ctrl_rx_dmamap); 459 D(ctrl_status_dmamap); 460 D(ctrl_cmd_dmamap); 461 for (i = 0; i < txqsize; i++) { 462 D(tx_dmamaps[i]); 463 D(txhdr_dmamaps[i]); 464 } 465 for (i = 0; i < rxqsize; i++) { 466 D(rx_dmamaps[i]); 467 D(rxhdr_dmamaps[i]); 468 } 469 #undef D 470 if (sc->sc_arrays) { 471 kmem_free(sc->sc_arrays, allocsize2); 472 sc->sc_arrays = 0; 473 } 474 err_dmamem_map: 475 bus_dmamem_unmap(vsc->sc_dmat, sc->sc_hdrs, allocsize); 476 err_dmamem_alloc: 477 bus_dmamem_free(vsc->sc_dmat, &sc->sc_hdr_segs[0], 1); 478 err_none: 479 return -1; 480 } 481 482 static void 483 vioif_attach(device_t parent, device_t self, void *aux) 484 { 485 struct vioif_softc *sc = device_private(self); 486 struct virtio_softc *vsc = device_private(parent); 487 uint32_t features; 488 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 489 u_int flags; 490 491 if (vsc->sc_child != NULL) { 492 aprint_normal(": child already attached for %s; " 493 "something wrong...\n", 494 device_xname(parent)); 495 return; 496 } 497 498 sc->sc_dev = self; 499 sc->sc_virtio = vsc; 500 501 vsc->sc_child = self; 502 vsc->sc_ipl = IPL_NET; 503 vsc->sc_vqs = &sc->sc_vq[0]; 504 vsc->sc_config_change = NULL; 505 vsc->sc_intrhand = virtio_vq_intr; 506 vsc->sc_flags = 0; 507 508 #ifdef VIOIF_MPSAFE 509 vsc->sc_flags |= VIRTIO_F_PCI_INTR_MPSAFE; 510 #endif 511 #ifdef VIOIF_SOFTINT_INTR 512 vsc->sc_flags |= VIRTIO_F_PCI_INTR_SOFTINT; 513 #endif 514 515 features = virtio_negotiate_features(vsc, 516 (VIRTIO_NET_F_MAC | 517 VIRTIO_NET_F_STATUS | 518 VIRTIO_NET_F_CTRL_VQ | 519 VIRTIO_NET_F_CTRL_RX | 520 VIRTIO_F_NOTIFY_ON_EMPTY)); 521 if (features & VIRTIO_NET_F_MAC) { 522 sc->sc_mac[0] = virtio_read_device_config_1(vsc, 523 VIRTIO_NET_CONFIG_MAC+0); 524 sc->sc_mac[1] = virtio_read_device_config_1(vsc, 525 VIRTIO_NET_CONFIG_MAC+1); 526 sc->sc_mac[2] = virtio_read_device_config_1(vsc, 527 VIRTIO_NET_CONFIG_MAC+2); 528 sc->sc_mac[3] = virtio_read_device_config_1(vsc, 529 VIRTIO_NET_CONFIG_MAC+3); 530 sc->sc_mac[4] = virtio_read_device_config_1(vsc, 531 VIRTIO_NET_CONFIG_MAC+4); 532 sc->sc_mac[5] = virtio_read_device_config_1(vsc, 533 VIRTIO_NET_CONFIG_MAC+5); 534 } else { 535 /* code stolen from sys/net/if_tap.c */ 536 struct timeval tv; 537 uint32_t ui; 538 getmicrouptime(&tv); 539 ui = (tv.tv_sec ^ tv.tv_usec) & 0xffffff; 540 memcpy(sc->sc_mac+3, (uint8_t *)&ui, 3); 541 virtio_write_device_config_1(vsc, 542 VIRTIO_NET_CONFIG_MAC+0, 543 sc->sc_mac[0]); 544 virtio_write_device_config_1(vsc, 545 VIRTIO_NET_CONFIG_MAC+1, 546 sc->sc_mac[1]); 547 virtio_write_device_config_1(vsc, 548 VIRTIO_NET_CONFIG_MAC+2, 549 sc->sc_mac[2]); 550 virtio_write_device_config_1(vsc, 551 VIRTIO_NET_CONFIG_MAC+3, 552 sc->sc_mac[3]); 553 virtio_write_device_config_1(vsc, 554 VIRTIO_NET_CONFIG_MAC+4, 555 sc->sc_mac[4]); 556 virtio_write_device_config_1(vsc, 557 VIRTIO_NET_CONFIG_MAC+5, 558 sc->sc_mac[5]); 559 } 560 aprint_normal(": Ethernet address %s\n", ether_sprintf(sc->sc_mac)); 561 aprint_naive("\n"); 562 563 if (virtio_alloc_vq(vsc, &sc->sc_vq[0], 0, 564 MCLBYTES+sizeof(struct virtio_net_hdr), 2, 565 "rx") != 0) { 566 goto err; 567 } 568 vsc->sc_nvqs = 1; 569 sc->sc_vq[0].vq_done = vioif_rx_vq_done; 570 if (virtio_alloc_vq(vsc, &sc->sc_vq[1], 1, 571 (sizeof(struct virtio_net_hdr) 572 + (ETHER_MAX_LEN - ETHER_HDR_LEN)), 573 VIRTIO_NET_TX_MAXNSEGS + 1, 574 "tx") != 0) { 575 goto err; 576 } 577 578 #ifdef VIOIF_MPSAFE 579 sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); 580 sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); 581 #else 582 sc->sc_tx_lock = NULL; 583 sc->sc_rx_lock = NULL; 584 #endif 585 sc->sc_stopping = false; 586 587 vsc->sc_nvqs = 2; 588 sc->sc_vq[1].vq_done = vioif_tx_vq_done; 589 virtio_start_vq_intr(vsc, &sc->sc_vq[0]); 590 virtio_stop_vq_intr(vsc, &sc->sc_vq[1]); /* not urgent; do it later */ 591 if ((features & VIRTIO_NET_F_CTRL_VQ) 592 && (features & VIRTIO_NET_F_CTRL_RX)) { 593 if (virtio_alloc_vq(vsc, &sc->sc_vq[2], 2, 594 NBPG, 1, "control") == 0) { 595 sc->sc_vq[2].vq_done = vioif_ctrl_vq_done; 596 cv_init(&sc->sc_ctrl_wait, "ctrl_vq"); 597 mutex_init(&sc->sc_ctrl_wait_lock, 598 MUTEX_DEFAULT, IPL_NET); 599 sc->sc_ctrl_inuse = FREE; 600 virtio_start_vq_intr(vsc, &sc->sc_vq[2]); 601 vsc->sc_nvqs = 3; 602 } 603 } 604 605 #ifdef VIOIF_MPSAFE 606 flags = SOFTINT_NET | SOFTINT_MPSAFE; 607 #else 608 flags = SOFTINT_NET; 609 #endif 610 sc->sc_rx_softint = softint_establish(flags, vioif_rx_softint, sc); 611 if (sc->sc_rx_softint == NULL) { 612 aprint_error_dev(self, "cannot establish softint\n"); 613 goto err; 614 } 615 616 if (vioif_alloc_mems(sc) < 0) 617 goto err; 618 619 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 620 ifp->if_softc = sc; 621 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 622 ifp->if_start = vioif_start; 623 ifp->if_ioctl = vioif_ioctl; 624 ifp->if_init = vioif_init; 625 ifp->if_stop = vioif_stop; 626 ifp->if_capabilities = 0; 627 ifp->if_watchdog = vioif_watchdog; 628 629 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 630 631 if_attach(ifp); 632 ether_ifattach(ifp, sc->sc_mac); 633 634 return; 635 636 err: 637 if (sc->sc_tx_lock) 638 mutex_obj_free(sc->sc_tx_lock); 639 if (sc->sc_rx_lock) 640 mutex_obj_free(sc->sc_rx_lock); 641 642 if (vsc->sc_nvqs == 3) { 643 virtio_free_vq(vsc, &sc->sc_vq[2]); 644 cv_destroy(&sc->sc_ctrl_wait); 645 mutex_destroy(&sc->sc_ctrl_wait_lock); 646 vsc->sc_nvqs = 2; 647 } 648 if (vsc->sc_nvqs == 2) { 649 virtio_free_vq(vsc, &sc->sc_vq[1]); 650 vsc->sc_nvqs = 1; 651 } 652 if (vsc->sc_nvqs == 1) { 653 virtio_free_vq(vsc, &sc->sc_vq[0]); 654 vsc->sc_nvqs = 0; 655 } 656 vsc->sc_child = (void*)1; 657 return; 658 } 659 660 /* we need interrupts to make promiscuous mode off */ 661 static void 662 vioif_deferred_init(device_t self) 663 { 664 struct vioif_softc *sc = device_private(self); 665 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 666 int r; 667 668 if (ifp->if_flags & IFF_PROMISC) 669 return; 670 671 r = vioif_set_promisc(sc, false); 672 if (r != 0) 673 aprint_error_dev(self, "resetting promisc mode failed, " 674 "errror code %d\n", r); 675 } 676 677 /* 678 * Interface functions for ifnet 679 */ 680 static int 681 vioif_init(struct ifnet *ifp) 682 { 683 struct vioif_softc *sc = ifp->if_softc; 684 685 vioif_stop(ifp, 0); 686 687 if (!sc->sc_deferred_init_done) { 688 struct virtio_softc *vsc = sc->sc_virtio; 689 690 sc->sc_deferred_init_done = 1; 691 if (vsc->sc_nvqs == 3) 692 vioif_deferred_init(sc->sc_dev); 693 } 694 695 /* Have to set false before vioif_populate_rx_mbufs */ 696 sc->sc_stopping = false; 697 698 vioif_populate_rx_mbufs(sc); 699 700 vioif_updown(sc, true); 701 ifp->if_flags |= IFF_RUNNING; 702 ifp->if_flags &= ~IFF_OACTIVE; 703 vioif_rx_filter(sc); 704 705 return 0; 706 } 707 708 static void 709 vioif_stop(struct ifnet *ifp, int disable) 710 { 711 struct vioif_softc *sc = ifp->if_softc; 712 struct virtio_softc *vsc = sc->sc_virtio; 713 714 /* Take the locks to ensure that ongoing TX/RX finish */ 715 VIOIF_TX_LOCK(sc); 716 VIOIF_RX_LOCK(sc); 717 sc->sc_stopping = true; 718 VIOIF_RX_UNLOCK(sc); 719 VIOIF_TX_UNLOCK(sc); 720 721 /* only way to stop I/O and DMA is resetting... */ 722 virtio_reset(vsc); 723 vioif_rx_deq(sc); 724 vioif_tx_drain(sc); 725 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 726 727 if (disable) 728 vioif_rx_drain(sc); 729 730 virtio_reinit_start(vsc); 731 virtio_negotiate_features(vsc, vsc->sc_features); 732 virtio_start_vq_intr(vsc, &sc->sc_vq[0]); 733 virtio_stop_vq_intr(vsc, &sc->sc_vq[1]); 734 if (vsc->sc_nvqs >= 3) 735 virtio_start_vq_intr(vsc, &sc->sc_vq[2]); 736 virtio_reinit_end(vsc); 737 vioif_updown(sc, false); 738 } 739 740 static void 741 vioif_start(struct ifnet *ifp) 742 { 743 struct vioif_softc *sc = ifp->if_softc; 744 struct virtio_softc *vsc = sc->sc_virtio; 745 struct virtqueue *vq = &sc->sc_vq[1]; /* tx vq */ 746 struct mbuf *m; 747 int queued = 0, retry = 0; 748 749 VIOIF_TX_LOCK(sc); 750 751 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 752 goto out; 753 754 if (sc->sc_stopping) 755 goto out; 756 757 for (;;) { 758 int slot, r; 759 760 IFQ_DEQUEUE(&ifp->if_snd, m); 761 762 if (m == NULL) 763 break; 764 765 retry: 766 r = virtio_enqueue_prep(vsc, vq, &slot); 767 if (r == EAGAIN) { 768 ifp->if_flags |= IFF_OACTIVE; 769 vioif_tx_vq_done_locked(vq); 770 if (retry++ == 0) 771 goto retry; 772 else 773 break; 774 } 775 if (r != 0) 776 panic("enqueue_prep for a tx buffer"); 777 r = bus_dmamap_load_mbuf(vsc->sc_dmat, 778 sc->sc_tx_dmamaps[slot], 779 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 780 if (r != 0) { 781 virtio_enqueue_abort(vsc, vq, slot); 782 printf("%s: tx dmamap load failed, error code %d\n", 783 device_xname(sc->sc_dev), r); 784 break; 785 } 786 r = virtio_enqueue_reserve(vsc, vq, slot, 787 sc->sc_tx_dmamaps[slot]->dm_nsegs + 1); 788 if (r != 0) { 789 bus_dmamap_unload(vsc->sc_dmat, 790 sc->sc_tx_dmamaps[slot]); 791 ifp->if_flags |= IFF_OACTIVE; 792 vioif_tx_vq_done_locked(vq); 793 if (retry++ == 0) 794 goto retry; 795 else 796 break; 797 } 798 799 sc->sc_tx_mbufs[slot] = m; 800 801 memset(&sc->sc_tx_hdrs[slot], 0, sizeof(struct virtio_net_hdr)); 802 bus_dmamap_sync(vsc->sc_dmat, sc->sc_tx_dmamaps[slot], 803 0, sc->sc_tx_dmamaps[slot]->dm_mapsize, 804 BUS_DMASYNC_PREWRITE); 805 bus_dmamap_sync(vsc->sc_dmat, sc->sc_txhdr_dmamaps[slot], 806 0, sc->sc_txhdr_dmamaps[slot]->dm_mapsize, 807 BUS_DMASYNC_PREWRITE); 808 virtio_enqueue(vsc, vq, slot, sc->sc_txhdr_dmamaps[slot], true); 809 virtio_enqueue(vsc, vq, slot, sc->sc_tx_dmamaps[slot], true); 810 virtio_enqueue_commit(vsc, vq, slot, false); 811 queued++; 812 bpf_mtap(ifp, m); 813 } 814 815 if (m != NULL) { 816 ifp->if_flags |= IFF_OACTIVE; 817 m_freem(m); 818 } 819 820 if (queued > 0) { 821 virtio_enqueue_commit(vsc, vq, -1, true); 822 ifp->if_timer = 5; 823 } 824 825 out: 826 VIOIF_TX_UNLOCK(sc); 827 } 828 829 static int 830 vioif_ioctl(struct ifnet *ifp, u_long cmd, void *data) 831 { 832 int s, r; 833 834 s = splnet(); 835 836 r = ether_ioctl(ifp, cmd, data); 837 if ((r == 0 && cmd == SIOCSIFFLAGS) || 838 (r == ENETRESET && (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI))) { 839 if (ifp->if_flags & IFF_RUNNING) 840 r = vioif_rx_filter(ifp->if_softc); 841 else 842 r = 0; 843 } 844 845 splx(s); 846 847 return r; 848 } 849 850 void 851 vioif_watchdog(struct ifnet *ifp) 852 { 853 struct vioif_softc *sc = ifp->if_softc; 854 855 if (ifp->if_flags & IFF_RUNNING) 856 vioif_tx_vq_done(&sc->sc_vq[1]); 857 } 858 859 860 /* 861 * Recieve implementation 862 */ 863 /* allocate and initialize a mbuf for recieve */ 864 static int 865 vioif_add_rx_mbuf(struct vioif_softc *sc, int i) 866 { 867 struct mbuf *m; 868 int r; 869 870 MGETHDR(m, M_DONTWAIT, MT_DATA); 871 if (m == NULL) 872 return ENOBUFS; 873 MCLGET(m, M_DONTWAIT); 874 if ((m->m_flags & M_EXT) == 0) { 875 m_freem(m); 876 return ENOBUFS; 877 } 878 sc->sc_rx_mbufs[i] = m; 879 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 880 r = bus_dmamap_load_mbuf(sc->sc_virtio->sc_dmat, 881 sc->sc_rx_dmamaps[i], 882 m, BUS_DMA_READ|BUS_DMA_NOWAIT); 883 if (r) { 884 m_freem(m); 885 sc->sc_rx_mbufs[i] = 0; 886 return r; 887 } 888 889 return 0; 890 } 891 892 /* free a mbuf for recieve */ 893 static void 894 vioif_free_rx_mbuf(struct vioif_softc *sc, int i) 895 { 896 bus_dmamap_unload(sc->sc_virtio->sc_dmat, sc->sc_rx_dmamaps[i]); 897 m_freem(sc->sc_rx_mbufs[i]); 898 sc->sc_rx_mbufs[i] = NULL; 899 } 900 901 /* add mbufs for all the empty recieve slots */ 902 static void 903 vioif_populate_rx_mbufs(struct vioif_softc *sc) 904 { 905 VIOIF_RX_LOCK(sc); 906 vioif_populate_rx_mbufs_locked(sc); 907 VIOIF_RX_UNLOCK(sc); 908 } 909 910 static void 911 vioif_populate_rx_mbufs_locked(struct vioif_softc *sc) 912 { 913 struct virtio_softc *vsc = sc->sc_virtio; 914 int i, r, ndone = 0; 915 struct virtqueue *vq = &sc->sc_vq[0]; /* rx vq */ 916 917 KASSERT(VIOIF_RX_LOCKED(sc)); 918 919 if (sc->sc_stopping) 920 return; 921 922 for (i = 0; i < vq->vq_num; i++) { 923 int slot; 924 r = virtio_enqueue_prep(vsc, vq, &slot); 925 if (r == EAGAIN) 926 break; 927 if (r != 0) 928 panic("enqueue_prep for rx buffers"); 929 if (sc->sc_rx_mbufs[slot] == NULL) { 930 r = vioif_add_rx_mbuf(sc, slot); 931 if (r != 0) { 932 printf("%s: rx mbuf allocation failed, " 933 "error code %d\n", 934 device_xname(sc->sc_dev), r); 935 break; 936 } 937 } 938 r = virtio_enqueue_reserve(vsc, vq, slot, 939 sc->sc_rx_dmamaps[slot]->dm_nsegs + 1); 940 if (r != 0) { 941 vioif_free_rx_mbuf(sc, slot); 942 break; 943 } 944 bus_dmamap_sync(vsc->sc_dmat, sc->sc_rxhdr_dmamaps[slot], 945 0, sizeof(struct virtio_net_hdr), BUS_DMASYNC_PREREAD); 946 bus_dmamap_sync(vsc->sc_dmat, sc->sc_rx_dmamaps[slot], 947 0, MCLBYTES, BUS_DMASYNC_PREREAD); 948 virtio_enqueue(vsc, vq, slot, sc->sc_rxhdr_dmamaps[slot], false); 949 virtio_enqueue(vsc, vq, slot, sc->sc_rx_dmamaps[slot], false); 950 virtio_enqueue_commit(vsc, vq, slot, false); 951 ndone++; 952 } 953 if (ndone > 0) 954 virtio_enqueue_commit(vsc, vq, -1, true); 955 } 956 957 /* dequeue recieved packets */ 958 static int 959 vioif_rx_deq(struct vioif_softc *sc) 960 { 961 int r; 962 963 KASSERT(sc->sc_stopping); 964 965 VIOIF_RX_LOCK(sc); 966 r = vioif_rx_deq_locked(sc); 967 VIOIF_RX_UNLOCK(sc); 968 969 return r; 970 } 971 972 /* dequeue recieved packets */ 973 static int 974 vioif_rx_deq_locked(struct vioif_softc *sc) 975 { 976 struct virtio_softc *vsc = sc->sc_virtio; 977 struct virtqueue *vq = &sc->sc_vq[0]; 978 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 979 struct mbuf *m; 980 int r = 0; 981 int slot, len; 982 983 KASSERT(VIOIF_RX_LOCKED(sc)); 984 985 while (virtio_dequeue(vsc, vq, &slot, &len) == 0) { 986 len -= sizeof(struct virtio_net_hdr); 987 r = 1; 988 bus_dmamap_sync(vsc->sc_dmat, sc->sc_rxhdr_dmamaps[slot], 989 0, sizeof(struct virtio_net_hdr), 990 BUS_DMASYNC_POSTREAD); 991 bus_dmamap_sync(vsc->sc_dmat, sc->sc_rx_dmamaps[slot], 992 0, MCLBYTES, 993 BUS_DMASYNC_POSTREAD); 994 m = sc->sc_rx_mbufs[slot]; 995 KASSERT(m != NULL); 996 bus_dmamap_unload(vsc->sc_dmat, sc->sc_rx_dmamaps[slot]); 997 sc->sc_rx_mbufs[slot] = 0; 998 virtio_dequeue_commit(vsc, vq, slot); 999 m->m_pkthdr.rcvif = ifp; 1000 m->m_len = m->m_pkthdr.len = len; 1001 ifp->if_ipackets++; 1002 bpf_mtap(ifp, m); 1003 1004 VIOIF_RX_UNLOCK(sc); 1005 (*ifp->if_input)(ifp, m); 1006 VIOIF_RX_LOCK(sc); 1007 1008 if (sc->sc_stopping) 1009 break; 1010 } 1011 1012 return r; 1013 } 1014 1015 /* rx interrupt; call _dequeue above and schedule a softint */ 1016 static int 1017 vioif_rx_vq_done(struct virtqueue *vq) 1018 { 1019 struct virtio_softc *vsc = vq->vq_owner; 1020 struct vioif_softc *sc = device_private(vsc->sc_child); 1021 int r = 0; 1022 1023 #ifdef VIOIF_SOFTINT_INTR 1024 KASSERT(!cpu_intr_p()); 1025 #endif 1026 1027 VIOIF_RX_LOCK(sc); 1028 1029 if (sc->sc_stopping) 1030 goto out; 1031 1032 r = vioif_rx_deq_locked(sc); 1033 if (r) 1034 #ifdef VIOIF_SOFTINT_INTR 1035 vioif_populate_rx_mbufs_locked(sc); 1036 #else 1037 softint_schedule(sc->sc_rx_softint); 1038 #endif 1039 1040 out: 1041 VIOIF_RX_UNLOCK(sc); 1042 return r; 1043 } 1044 1045 /* softint: enqueue recieve requests for new incoming packets */ 1046 static void 1047 vioif_rx_softint(void *arg) 1048 { 1049 struct vioif_softc *sc = arg; 1050 1051 vioif_populate_rx_mbufs(sc); 1052 } 1053 1054 /* free all the mbufs; called from if_stop(disable) */ 1055 static void 1056 vioif_rx_drain(struct vioif_softc *sc) 1057 { 1058 struct virtqueue *vq = &sc->sc_vq[0]; 1059 int i; 1060 1061 for (i = 0; i < vq->vq_num; i++) { 1062 if (sc->sc_rx_mbufs[i] == NULL) 1063 continue; 1064 vioif_free_rx_mbuf(sc, i); 1065 } 1066 } 1067 1068 1069 /* 1070 * Transmition implementation 1071 */ 1072 /* actual transmission is done in if_start */ 1073 /* tx interrupt; dequeue and free mbufs */ 1074 /* 1075 * tx interrupt is actually disabled; this should be called upon 1076 * tx vq full and watchdog 1077 */ 1078 static int 1079 vioif_tx_vq_done(struct virtqueue *vq) 1080 { 1081 struct virtio_softc *vsc = vq->vq_owner; 1082 struct vioif_softc *sc = device_private(vsc->sc_child); 1083 int r = 0; 1084 1085 VIOIF_TX_LOCK(sc); 1086 1087 if (sc->sc_stopping) 1088 goto out; 1089 1090 r = vioif_tx_vq_done_locked(vq); 1091 1092 out: 1093 VIOIF_TX_UNLOCK(sc); 1094 return r; 1095 } 1096 1097 static int 1098 vioif_tx_vq_done_locked(struct virtqueue *vq) 1099 { 1100 struct virtio_softc *vsc = vq->vq_owner; 1101 struct vioif_softc *sc = device_private(vsc->sc_child); 1102 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1103 struct mbuf *m; 1104 int r = 0; 1105 int slot, len; 1106 1107 KASSERT(VIOIF_TX_LOCKED(sc)); 1108 1109 while (virtio_dequeue(vsc, vq, &slot, &len) == 0) { 1110 r++; 1111 bus_dmamap_sync(vsc->sc_dmat, sc->sc_txhdr_dmamaps[slot], 1112 0, sizeof(struct virtio_net_hdr), 1113 BUS_DMASYNC_POSTWRITE); 1114 bus_dmamap_sync(vsc->sc_dmat, sc->sc_tx_dmamaps[slot], 1115 0, sc->sc_tx_dmamaps[slot]->dm_mapsize, 1116 BUS_DMASYNC_POSTWRITE); 1117 m = sc->sc_tx_mbufs[slot]; 1118 bus_dmamap_unload(vsc->sc_dmat, sc->sc_tx_dmamaps[slot]); 1119 sc->sc_tx_mbufs[slot] = 0; 1120 virtio_dequeue_commit(vsc, vq, slot); 1121 ifp->if_opackets++; 1122 m_freem(m); 1123 } 1124 1125 if (r) 1126 ifp->if_flags &= ~IFF_OACTIVE; 1127 return r; 1128 } 1129 1130 /* free all the mbufs already put on vq; called from if_stop(disable) */ 1131 static void 1132 vioif_tx_drain(struct vioif_softc *sc) 1133 { 1134 struct virtio_softc *vsc = sc->sc_virtio; 1135 struct virtqueue *vq = &sc->sc_vq[1]; 1136 int i; 1137 1138 KASSERT(sc->sc_stopping); 1139 1140 for (i = 0; i < vq->vq_num; i++) { 1141 if (sc->sc_tx_mbufs[i] == NULL) 1142 continue; 1143 bus_dmamap_unload(vsc->sc_dmat, sc->sc_tx_dmamaps[i]); 1144 m_freem(sc->sc_tx_mbufs[i]); 1145 sc->sc_tx_mbufs[i] = NULL; 1146 } 1147 } 1148 1149 /* 1150 * Control vq 1151 */ 1152 /* issue a VIRTIO_NET_CTRL_RX class command and wait for completion */ 1153 static int 1154 vioif_ctrl_rx(struct vioif_softc *sc, int cmd, bool onoff) 1155 { 1156 struct virtio_softc *vsc = sc->sc_virtio; 1157 struct virtqueue *vq = &sc->sc_vq[2]; 1158 int r, slot; 1159 1160 if (vsc->sc_nvqs < 3) 1161 return ENOTSUP; 1162 1163 mutex_enter(&sc->sc_ctrl_wait_lock); 1164 while (sc->sc_ctrl_inuse != FREE) 1165 cv_wait(&sc->sc_ctrl_wait, &sc->sc_ctrl_wait_lock); 1166 sc->sc_ctrl_inuse = INUSE; 1167 mutex_exit(&sc->sc_ctrl_wait_lock); 1168 1169 sc->sc_ctrl_cmd->class = VIRTIO_NET_CTRL_RX; 1170 sc->sc_ctrl_cmd->command = cmd; 1171 sc->sc_ctrl_rx->onoff = onoff; 1172 1173 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_cmd_dmamap, 1174 0, sizeof(struct virtio_net_ctrl_cmd), 1175 BUS_DMASYNC_PREWRITE); 1176 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_rx_dmamap, 1177 0, sizeof(struct virtio_net_ctrl_rx), 1178 BUS_DMASYNC_PREWRITE); 1179 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_status_dmamap, 1180 0, sizeof(struct virtio_net_ctrl_status), 1181 BUS_DMASYNC_PREREAD); 1182 1183 r = virtio_enqueue_prep(vsc, vq, &slot); 1184 if (r != 0) 1185 panic("%s: control vq busy!?", device_xname(sc->sc_dev)); 1186 r = virtio_enqueue_reserve(vsc, vq, slot, 3); 1187 if (r != 0) 1188 panic("%s: control vq busy!?", device_xname(sc->sc_dev)); 1189 virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_cmd_dmamap, true); 1190 virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_rx_dmamap, true); 1191 virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_status_dmamap, false); 1192 virtio_enqueue_commit(vsc, vq, slot, true); 1193 1194 /* wait for done */ 1195 mutex_enter(&sc->sc_ctrl_wait_lock); 1196 while (sc->sc_ctrl_inuse != DONE) 1197 cv_wait(&sc->sc_ctrl_wait, &sc->sc_ctrl_wait_lock); 1198 mutex_exit(&sc->sc_ctrl_wait_lock); 1199 /* already dequeueued */ 1200 1201 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_cmd_dmamap, 0, 1202 sizeof(struct virtio_net_ctrl_cmd), 1203 BUS_DMASYNC_POSTWRITE); 1204 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_rx_dmamap, 0, 1205 sizeof(struct virtio_net_ctrl_rx), 1206 BUS_DMASYNC_POSTWRITE); 1207 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_status_dmamap, 0, 1208 sizeof(struct virtio_net_ctrl_status), 1209 BUS_DMASYNC_POSTREAD); 1210 1211 if (sc->sc_ctrl_status->ack == VIRTIO_NET_OK) 1212 r = 0; 1213 else { 1214 printf("%s: failed setting rx mode\n", 1215 device_xname(sc->sc_dev)); 1216 r = EIO; 1217 } 1218 1219 mutex_enter(&sc->sc_ctrl_wait_lock); 1220 sc->sc_ctrl_inuse = FREE; 1221 cv_signal(&sc->sc_ctrl_wait); 1222 mutex_exit(&sc->sc_ctrl_wait_lock); 1223 1224 return r; 1225 } 1226 1227 static int 1228 vioif_set_promisc(struct vioif_softc *sc, bool onoff) 1229 { 1230 int r; 1231 1232 r = vioif_ctrl_rx(sc, VIRTIO_NET_CTRL_RX_PROMISC, onoff); 1233 1234 return r; 1235 } 1236 1237 static int 1238 vioif_set_allmulti(struct vioif_softc *sc, bool onoff) 1239 { 1240 int r; 1241 1242 r = vioif_ctrl_rx(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, onoff); 1243 1244 return r; 1245 } 1246 1247 /* issue VIRTIO_NET_CTRL_MAC_TABLE_SET command and wait for completion */ 1248 static int 1249 vioif_set_rx_filter(struct vioif_softc *sc) 1250 { 1251 /* filter already set in sc_ctrl_mac_tbl */ 1252 struct virtio_softc *vsc = sc->sc_virtio; 1253 struct virtqueue *vq = &sc->sc_vq[2]; 1254 int r, slot; 1255 1256 if (vsc->sc_nvqs < 3) 1257 return ENOTSUP; 1258 1259 mutex_enter(&sc->sc_ctrl_wait_lock); 1260 while (sc->sc_ctrl_inuse != FREE) 1261 cv_wait(&sc->sc_ctrl_wait, &sc->sc_ctrl_wait_lock); 1262 sc->sc_ctrl_inuse = INUSE; 1263 mutex_exit(&sc->sc_ctrl_wait_lock); 1264 1265 sc->sc_ctrl_cmd->class = VIRTIO_NET_CTRL_MAC; 1266 sc->sc_ctrl_cmd->command = VIRTIO_NET_CTRL_MAC_TABLE_SET; 1267 1268 r = bus_dmamap_load(vsc->sc_dmat, sc->sc_ctrl_tbl_uc_dmamap, 1269 sc->sc_ctrl_mac_tbl_uc, 1270 (sizeof(struct virtio_net_ctrl_mac_tbl) 1271 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_uc->nentries), 1272 NULL, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1273 if (r) { 1274 printf("%s: control command dmamap load failed, " 1275 "error code %d\n", device_xname(sc->sc_dev), r); 1276 goto out; 1277 } 1278 r = bus_dmamap_load(vsc->sc_dmat, sc->sc_ctrl_tbl_mc_dmamap, 1279 sc->sc_ctrl_mac_tbl_mc, 1280 (sizeof(struct virtio_net_ctrl_mac_tbl) 1281 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_mc->nentries), 1282 NULL, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1283 if (r) { 1284 printf("%s: control command dmamap load failed, " 1285 "error code %d\n", device_xname(sc->sc_dev), r); 1286 bus_dmamap_unload(vsc->sc_dmat, sc->sc_ctrl_tbl_uc_dmamap); 1287 goto out; 1288 } 1289 1290 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_cmd_dmamap, 1291 0, sizeof(struct virtio_net_ctrl_cmd), 1292 BUS_DMASYNC_PREWRITE); 1293 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_tbl_uc_dmamap, 0, 1294 (sizeof(struct virtio_net_ctrl_mac_tbl) 1295 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_uc->nentries), 1296 BUS_DMASYNC_PREWRITE); 1297 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_tbl_mc_dmamap, 0, 1298 (sizeof(struct virtio_net_ctrl_mac_tbl) 1299 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_mc->nentries), 1300 BUS_DMASYNC_PREWRITE); 1301 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_status_dmamap, 1302 0, sizeof(struct virtio_net_ctrl_status), 1303 BUS_DMASYNC_PREREAD); 1304 1305 r = virtio_enqueue_prep(vsc, vq, &slot); 1306 if (r != 0) 1307 panic("%s: control vq busy!?", device_xname(sc->sc_dev)); 1308 r = virtio_enqueue_reserve(vsc, vq, slot, 4); 1309 if (r != 0) 1310 panic("%s: control vq busy!?", device_xname(sc->sc_dev)); 1311 virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_cmd_dmamap, true); 1312 virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_tbl_uc_dmamap, true); 1313 virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_tbl_mc_dmamap, true); 1314 virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_status_dmamap, false); 1315 virtio_enqueue_commit(vsc, vq, slot, true); 1316 1317 /* wait for done */ 1318 mutex_enter(&sc->sc_ctrl_wait_lock); 1319 while (sc->sc_ctrl_inuse != DONE) 1320 cv_wait(&sc->sc_ctrl_wait, &sc->sc_ctrl_wait_lock); 1321 mutex_exit(&sc->sc_ctrl_wait_lock); 1322 /* already dequeueued */ 1323 1324 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_cmd_dmamap, 0, 1325 sizeof(struct virtio_net_ctrl_cmd), 1326 BUS_DMASYNC_POSTWRITE); 1327 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_tbl_uc_dmamap, 0, 1328 (sizeof(struct virtio_net_ctrl_mac_tbl) 1329 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_uc->nentries), 1330 BUS_DMASYNC_POSTWRITE); 1331 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_tbl_mc_dmamap, 0, 1332 (sizeof(struct virtio_net_ctrl_mac_tbl) 1333 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_mc->nentries), 1334 BUS_DMASYNC_POSTWRITE); 1335 bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_status_dmamap, 0, 1336 sizeof(struct virtio_net_ctrl_status), 1337 BUS_DMASYNC_POSTREAD); 1338 bus_dmamap_unload(vsc->sc_dmat, sc->sc_ctrl_tbl_uc_dmamap); 1339 bus_dmamap_unload(vsc->sc_dmat, sc->sc_ctrl_tbl_mc_dmamap); 1340 1341 if (sc->sc_ctrl_status->ack == VIRTIO_NET_OK) 1342 r = 0; 1343 else { 1344 printf("%s: failed setting rx filter\n", 1345 device_xname(sc->sc_dev)); 1346 r = EIO; 1347 } 1348 1349 out: 1350 mutex_enter(&sc->sc_ctrl_wait_lock); 1351 sc->sc_ctrl_inuse = FREE; 1352 cv_signal(&sc->sc_ctrl_wait); 1353 mutex_exit(&sc->sc_ctrl_wait_lock); 1354 1355 return r; 1356 } 1357 1358 /* ctrl vq interrupt; wake up the command issuer */ 1359 static int 1360 vioif_ctrl_vq_done(struct virtqueue *vq) 1361 { 1362 struct virtio_softc *vsc = vq->vq_owner; 1363 struct vioif_softc *sc = device_private(vsc->sc_child); 1364 int r, slot; 1365 1366 r = virtio_dequeue(vsc, vq, &slot, NULL); 1367 if (r == ENOENT) 1368 return 0; 1369 virtio_dequeue_commit(vsc, vq, slot); 1370 1371 mutex_enter(&sc->sc_ctrl_wait_lock); 1372 sc->sc_ctrl_inuse = DONE; 1373 cv_signal(&sc->sc_ctrl_wait); 1374 mutex_exit(&sc->sc_ctrl_wait_lock); 1375 1376 return 1; 1377 } 1378 1379 /* 1380 * If IFF_PROMISC requested, set promiscuous 1381 * If multicast filter small enough (<=MAXENTRIES) set rx filter 1382 * If large multicast filter exist use ALLMULTI 1383 */ 1384 /* 1385 * If setting rx filter fails fall back to ALLMULTI 1386 * If ALLMULTI fails fall back to PROMISC 1387 */ 1388 static int 1389 vioif_rx_filter(struct vioif_softc *sc) 1390 { 1391 struct virtio_softc *vsc = sc->sc_virtio; 1392 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1393 struct ether_multi *enm; 1394 struct ether_multistep step; 1395 int nentries; 1396 int promisc = 0, allmulti = 0, rxfilter = 0; 1397 int r; 1398 1399 if (vsc->sc_nvqs < 3) { /* no ctrl vq; always promisc */ 1400 ifp->if_flags |= IFF_PROMISC; 1401 return 0; 1402 } 1403 1404 if (ifp->if_flags & IFF_PROMISC) { 1405 promisc = 1; 1406 goto set; 1407 } 1408 1409 nentries = -1; 1410 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm); 1411 while (nentries++, enm != NULL) { 1412 if (nentries >= VIRTIO_NET_CTRL_MAC_MAXENTRIES) { 1413 allmulti = 1; 1414 goto set; 1415 } 1416 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 1417 ETHER_ADDR_LEN)) { 1418 allmulti = 1; 1419 goto set; 1420 } 1421 memcpy(sc->sc_ctrl_mac_tbl_mc->macs[nentries], 1422 enm->enm_addrlo, ETHER_ADDR_LEN); 1423 ETHER_NEXT_MULTI(step, enm); 1424 } 1425 rxfilter = 1; 1426 1427 set: 1428 if (rxfilter) { 1429 sc->sc_ctrl_mac_tbl_uc->nentries = 0; 1430 sc->sc_ctrl_mac_tbl_mc->nentries = nentries; 1431 r = vioif_set_rx_filter(sc); 1432 if (r != 0) { 1433 rxfilter = 0; 1434 allmulti = 1; /* fallback */ 1435 } 1436 } else { 1437 /* remove rx filter */ 1438 sc->sc_ctrl_mac_tbl_uc->nentries = 0; 1439 sc->sc_ctrl_mac_tbl_mc->nentries = 0; 1440 r = vioif_set_rx_filter(sc); 1441 /* what to do on failure? */ 1442 } 1443 if (allmulti) { 1444 r = vioif_set_allmulti(sc, true); 1445 if (r != 0) { 1446 allmulti = 0; 1447 promisc = 1; /* fallback */ 1448 } 1449 } else { 1450 r = vioif_set_allmulti(sc, false); 1451 /* what to do on failure? */ 1452 } 1453 if (promisc) { 1454 r = vioif_set_promisc(sc, true); 1455 } else { 1456 r = vioif_set_promisc(sc, false); 1457 } 1458 1459 return r; 1460 } 1461 1462 /* change link status */ 1463 static int 1464 vioif_updown(struct vioif_softc *sc, bool isup) 1465 { 1466 struct virtio_softc *vsc = sc->sc_virtio; 1467 1468 if (!(vsc->sc_features & VIRTIO_NET_F_STATUS)) 1469 return ENODEV; 1470 virtio_write_device_config_1(vsc, 1471 VIRTIO_NET_CONFIG_STATUS, 1472 isup?VIRTIO_NET_S_LINK_UP:0); 1473 return 0; 1474 } 1475