1 /* 2 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Sepherosa Ziehau <sepherosa@gmail.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 #include <sys/param.h> 36 #include <sys/endian.h> 37 #include <sys/kernel.h> 38 #include <sys/malloc.h> 39 #include <sys/proc.h> 40 #include <sys/serialize.h> 41 #include <sys/socket.h> 42 #include <sys/sockio.h> 43 #include <sys/sysctl.h> 44 45 #include <machine/md_var.h> 46 #include <machine/cothread.h> 47 48 #include <net/ethernet.h> 49 #include <net/if.h> 50 #include <net/bpf.h> 51 #include <net/if_arp.h> 52 #include <net/ifq_var.h> 53 54 #include <netinet/in_var.h> 55 56 #include <sys/stat.h> 57 #include <net/tap/if_tap.h> 58 #include <err.h> 59 #include <errno.h> 60 #include <stdio.h> 61 #include <string.h> 62 #include <unistd.h> 63 #include <fcntl.h> 64 65 #define VKE_DEVNAME "vke" 66 67 #define VKE_CHUNK 8 /* number of mbufs to queue before interrupting */ 68 69 #define NETFIFOSIZE 256 70 #define NETFIFOMASK (NETFIFOSIZE -1) 71 #define NETFIFOINDEX(u) ((u) & NETFIFOMASK) 72 73 #define VKE_COTD_RUN 0 74 #define VKE_COTD_EXIT 1 75 #define VKE_COTD_DEAD 2 76 77 struct vke_fifo { 78 struct mbuf *array[NETFIFOSIZE]; 79 int rindex; 80 int windex; 81 }; 82 typedef struct vke_fifo *fifo_t; 83 84 struct vke_softc { 85 struct arpcom arpcom; 86 int sc_fd; 87 int sc_unit; 88 89 cothread_t cotd_tx; 90 cothread_t cotd_rx; 91 92 int cotd_tx_exit; 93 int cotd_rx_exit; 94 95 void *sc_txbuf; 96 int sc_txbuf_len; 97 98 fifo_t sc_txfifo; 99 fifo_t sc_txfifo_done; 100 fifo_t sc_rxfifo; 101 102 struct sysctl_ctx_list sc_sysctl_ctx; 103 struct sysctl_oid *sc_sysctl_tree; 104 105 int sc_tap_unit; /* unit of backend tap(4) */ 106 in_addr_t sc_addr; /* address */ 107 in_addr_t sc_mask; /* netmask */ 108 }; 109 110 static void vke_start(struct ifnet *); 111 static void vke_init(void *); 112 static int vke_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 113 114 static int vke_attach(const struct vknetif_info *, int); 115 static int vke_stop(struct vke_softc *); 116 static int vke_init_addr(struct ifnet *, in_addr_t, in_addr_t); 117 static void vke_tx_intr(cothread_t cotd); 118 static void vke_tx_thread(cothread_t cotd); 119 static void vke_rx_intr(cothread_t cotd); 120 static void vke_rx_thread(cothread_t cotd); 121 122 static int vke_txfifo_enqueue(struct vke_softc *sc, struct mbuf *m); 123 static struct mbuf *vke_txfifo_dequeue(struct vke_softc *sc); 124 125 static int vke_txfifo_done_enqueue(struct vke_softc *sc, struct mbuf *m); 126 static struct mbuf * vke_txfifo_done_dequeue(struct vke_softc *sc, struct mbuf *nm); 127 128 static struct mbuf *vke_rxfifo_dequeue(struct vke_softc *sc, struct mbuf *nm); 129 static struct mbuf *vke_rxfifo_sniff(struct vke_softc *sc); 130 131 static void 132 vke_sysinit(void *arg __unused) 133 { 134 int i, unit; 135 136 KASSERT(NetifNum <= VKNETIF_MAX, ("too many netifs: %d", NetifNum)); 137 138 unit = 0; 139 for (i = 0; i < NetifNum; ++i) { 140 if (vke_attach(&NetifInfo[i], unit) == 0) 141 ++unit; 142 } 143 } 144 SYSINIT(vke, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, vke_sysinit, NULL); 145 146 /* 147 * vke_txfifo_done_enqueue() - Add an mbuf to the transmit done fifo. Since 148 * the cothread cannot free transmit mbufs after processing we put them on 149 * the done fifo so the kernel can free them. 150 */ 151 static int 152 vke_txfifo_done_enqueue(struct vke_softc *sc, struct mbuf *m) 153 { 154 fifo_t fifo = sc->sc_txfifo_done; 155 156 while (NETFIFOINDEX(fifo->windex + 1) == NETFIFOINDEX(fifo->rindex)) { 157 usleep(20000); 158 } 159 160 fifo->array[NETFIFOINDEX(fifo->windex)] = m; 161 cpu_sfence(); 162 ++fifo->windex; 163 return (0); 164 } 165 166 /* 167 * vke_txfifo_done_dequeue() - Remove an mbuf from the transmit done fifo. 168 */ 169 static struct mbuf * 170 vke_txfifo_done_dequeue(struct vke_softc *sc, struct mbuf *nm) 171 { 172 fifo_t fifo = sc->sc_txfifo_done; 173 struct mbuf *m; 174 175 if (NETFIFOINDEX(fifo->rindex) == NETFIFOINDEX(fifo->windex)) 176 return (NULL); 177 178 m = fifo->array[NETFIFOINDEX(fifo->rindex)]; 179 fifo->array[NETFIFOINDEX(fifo->rindex)] = nm; 180 cpu_lfence(); 181 ++fifo->rindex; 182 return (m); 183 } 184 185 /* 186 * vke_txfifo_enqueue() - Add an mbuf to the transmit fifo. 187 */ 188 static int 189 vke_txfifo_enqueue(struct vke_softc *sc, struct mbuf *m) 190 { 191 fifo_t fifo = sc->sc_txfifo; 192 193 if (NETFIFOINDEX(fifo->windex + 1) == NETFIFOINDEX(fifo->rindex)) 194 return (-1); 195 196 fifo->array[NETFIFOINDEX(fifo->windex)] = m; 197 cpu_sfence(); 198 ++fifo->windex; 199 200 return (0); 201 } 202 203 /* 204 * vke_txfifo_dequeue() - Return next mbuf on the transmit fifo if one 205 * exists. 206 */ 207 static struct mbuf * 208 vke_txfifo_dequeue(struct vke_softc *sc) 209 { 210 fifo_t fifo = sc->sc_txfifo; 211 struct mbuf *m; 212 213 if (NETFIFOINDEX(fifo->rindex) == NETFIFOINDEX(fifo->windex)) 214 return (NULL); 215 216 m = fifo->array[NETFIFOINDEX(fifo->rindex)]; 217 fifo->array[NETFIFOINDEX(fifo->rindex)] = NULL; 218 219 cpu_lfence(); 220 ++fifo->rindex; 221 return (m); 222 } 223 224 static int 225 vke_txfifo_empty(struct vke_softc *sc) 226 { 227 fifo_t fifo = sc->sc_txfifo; 228 229 if (NETFIFOINDEX(fifo->rindex) == NETFIFOINDEX(fifo->windex)) 230 return (1); 231 return(0); 232 } 233 234 /* 235 * vke_rxfifo_dequeue() - Return next mbuf on the receice fifo if one 236 * exists replacing it with newm which should point to a newly allocated 237 * mbuf. 238 */ 239 static struct mbuf * 240 vke_rxfifo_dequeue(struct vke_softc *sc, struct mbuf *newm) 241 { 242 fifo_t fifo = sc->sc_rxfifo; 243 struct mbuf *m; 244 245 if (NETFIFOINDEX(fifo->rindex) == NETFIFOINDEX(fifo->windex)) 246 return (NULL); 247 248 m = fifo->array[NETFIFOINDEX(fifo->rindex)]; 249 fifo->array[NETFIFOINDEX(fifo->rindex)] = newm; 250 cpu_lfence(); 251 ++fifo->rindex; 252 return (m); 253 } 254 255 /* 256 * Return the next mbuf if available but do NOT remove it from the FIFO. 257 */ 258 static struct mbuf * 259 vke_rxfifo_sniff(struct vke_softc *sc) 260 { 261 fifo_t fifo = sc->sc_rxfifo; 262 struct mbuf *m; 263 264 if (NETFIFOINDEX(fifo->rindex) == NETFIFOINDEX(fifo->windex)) 265 return (NULL); 266 267 m = fifo->array[NETFIFOINDEX(fifo->rindex)]; 268 cpu_lfence(); 269 return (m); 270 } 271 272 static void 273 vke_init(void *xsc) 274 { 275 struct vke_softc *sc = xsc; 276 struct ifnet *ifp = &sc->arpcom.ac_if; 277 int i; 278 279 ASSERT_SERIALIZED(ifp->if_serializer); 280 281 vke_stop(sc); 282 283 ifp->if_flags |= IFF_RUNNING; 284 ifq_clr_oactive(&ifp->if_snd); 285 286 sc->sc_txfifo = kmalloc(sizeof(*sc->sc_txfifo), M_DEVBUF, M_WAITOK); 287 sc->sc_txfifo_done = kmalloc(sizeof(*sc->sc_txfifo_done), M_DEVBUF, M_WAITOK); 288 289 sc->sc_rxfifo = kmalloc(sizeof(*sc->sc_rxfifo), M_DEVBUF, M_WAITOK); 290 for (i = 0; i < NETFIFOSIZE; i++) { 291 sc->sc_rxfifo->array[i] = m_getcl(MB_WAIT, MT_DATA, M_PKTHDR); 292 sc->sc_txfifo->array[i] = NULL; 293 sc->sc_txfifo_done->array[i] = NULL; 294 } 295 296 sc->cotd_tx_exit = sc->cotd_rx_exit = VKE_COTD_RUN; 297 sc->cotd_tx = cothread_create(vke_tx_thread, vke_tx_intr, sc, "vke_tx"); 298 sc->cotd_rx = cothread_create(vke_rx_thread, vke_rx_intr, sc, "vke_rx"); 299 300 if (sc->sc_addr != 0) { 301 in_addr_t addr, mask; 302 303 addr = sc->sc_addr; 304 mask = sc->sc_mask; 305 306 /* 307 * Make sure vkernel assigned 308 * address will not be added 309 * again. 310 */ 311 sc->sc_addr = 0; 312 sc->sc_mask = 0; 313 314 vke_init_addr(ifp, addr, mask); 315 } 316 317 } 318 319 /* 320 * Called from kernel. 321 * 322 * NOTE: We can't make any kernel callbacks while holding cothread lock 323 * because the cothread lock is not governed by the kernel scheduler 324 * (so mplock, tokens, etc will not be released). 325 */ 326 static void 327 vke_start(struct ifnet *ifp) 328 { 329 struct vke_softc *sc = ifp->if_softc; 330 struct mbuf *m; 331 cothread_t cotd = sc->cotd_tx; 332 int count; 333 334 ASSERT_SERIALIZED(ifp->if_serializer); 335 336 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) 337 return; 338 339 count = 0; 340 while ((m = ifq_dequeue(&ifp->if_snd, NULL)) != NULL) { 341 if (vke_txfifo_enqueue(sc, m) != -1) { 342 if (count++ == VKE_CHUNK) { 343 cothread_lock(cotd, 0); 344 cothread_signal(cotd); 345 cothread_unlock(cotd, 0); 346 count = 0; 347 } 348 } else { 349 m_freem(m); 350 } 351 } 352 if (count) { 353 cothread_lock(cotd, 0); 354 cothread_signal(cotd); 355 cothread_unlock(cotd, 0); 356 } 357 } 358 359 static int 360 vke_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 361 { 362 struct vke_softc *sc = ifp->if_softc; 363 int error = 0; 364 365 ASSERT_SERIALIZED(ifp->if_serializer); 366 367 switch (cmd) { 368 case SIOCSIFFLAGS: 369 if (ifp->if_flags & IFF_UP) { 370 if ((ifp->if_flags & IFF_RUNNING) == 0) 371 vke_init(sc); 372 } else { 373 if (ifp->if_flags & IFF_RUNNING) 374 vke_stop(sc); 375 } 376 break; 377 case SIOCGIFMEDIA: 378 case SIOCSIFMEDIA: 379 error = EOPNOTSUPP; 380 /* TODO */ 381 break; 382 case SIOCGIFSTATUS: { 383 struct ifstat *ifs = (struct ifstat *)data; 384 int len; 385 386 len = strlen(ifs->ascii); 387 if (len < sizeof(ifs->ascii)) { 388 ksnprintf(ifs->ascii + len, sizeof(ifs->ascii) - len, 389 "\tBacked by tap%d\n", sc->sc_tap_unit); 390 } 391 break; 392 } 393 case SIOCSIFADDR: 394 if (((struct ifaddr *)data)->ifa_addr->sa_family == AF_INET) { 395 /* 396 * If we are explicitly requested to change address, 397 * we should invalidate address/netmask passed in 398 * from vkernel command line. 399 */ 400 sc->sc_addr = 0; 401 sc->sc_mask = 0; 402 } 403 /* FALL THROUGH */ 404 default: 405 error = ether_ioctl(ifp, cmd, data); 406 break; 407 } 408 return error; 409 } 410 411 static int 412 vke_stop(struct vke_softc *sc) 413 { 414 struct ifnet *ifp = &sc->arpcom.ac_if; 415 int i; 416 417 ASSERT_SERIALIZED(ifp->if_serializer); 418 419 ifp->if_flags &= ~IFF_RUNNING; 420 ifq_clr_oactive(&ifp->if_snd); 421 422 if (sc) { 423 if (sc->cotd_tx) { 424 cothread_lock(sc->cotd_tx, 0); 425 if (sc->cotd_tx_exit == VKE_COTD_RUN) 426 sc->cotd_tx_exit = VKE_COTD_EXIT; 427 cothread_signal(sc->cotd_tx); 428 cothread_unlock(sc->cotd_tx, 0); 429 cothread_delete(&sc->cotd_tx); 430 } 431 if (sc->cotd_rx) { 432 cothread_lock(sc->cotd_rx, 0); 433 if (sc->cotd_rx_exit == VKE_COTD_RUN) 434 sc->cotd_rx_exit = VKE_COTD_EXIT; 435 cothread_signal(sc->cotd_rx); 436 cothread_unlock(sc->cotd_rx, 0); 437 cothread_delete(&sc->cotd_rx); 438 } 439 440 for (i = 0; i < NETFIFOSIZE; i++) { 441 if (sc->sc_rxfifo && sc->sc_rxfifo->array[i]) { 442 m_freem(sc->sc_rxfifo->array[i]); 443 sc->sc_rxfifo->array[i] = NULL; 444 } 445 if (sc->sc_txfifo && sc->sc_txfifo->array[i]) { 446 m_freem(sc->sc_txfifo->array[i]); 447 sc->sc_txfifo->array[i] = NULL; 448 } 449 if (sc->sc_txfifo_done && sc->sc_txfifo_done->array[i]) { 450 m_freem(sc->sc_txfifo_done->array[i]); 451 sc->sc_txfifo_done->array[i] = NULL; 452 } 453 } 454 455 if (sc->sc_txfifo) { 456 kfree(sc->sc_txfifo, M_DEVBUF); 457 sc->sc_txfifo = NULL; 458 } 459 460 if (sc->sc_txfifo_done) { 461 kfree(sc->sc_txfifo_done, M_DEVBUF); 462 sc->sc_txfifo_done = NULL; 463 } 464 465 if (sc->sc_rxfifo) { 466 kfree(sc->sc_rxfifo, M_DEVBUF); 467 sc->sc_rxfifo = NULL; 468 } 469 } 470 471 472 return 0; 473 } 474 475 /* 476 * vke_rx_intr() is the interrupt function for the receive cothread. 477 */ 478 static void 479 vke_rx_intr(cothread_t cotd) 480 { 481 struct mbuf *m; 482 struct mbuf *nm; 483 struct vke_softc *sc = cotd->arg; 484 struct ifnet *ifp = &sc->arpcom.ac_if; 485 static int count = 0; 486 487 ifnet_serialize_all(ifp); 488 cothread_lock(cotd, 0); 489 490 if (sc->cotd_rx_exit != VKE_COTD_RUN) { 491 cothread_unlock(cotd, 0); 492 ifnet_deserialize_all(ifp); 493 return; 494 } 495 cothread_unlock(cotd, 0); 496 497 while ((m = vke_rxfifo_sniff(sc)) != NULL) { 498 nm = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR); 499 if (nm) { 500 vke_rxfifo_dequeue(sc, nm); 501 ifp->if_input(ifp, m); 502 if (count++ == VKE_CHUNK) { 503 cothread_lock(cotd, 0); 504 cothread_signal(cotd); 505 cothread_unlock(cotd, 0); 506 count = 0; 507 } 508 } else { 509 vke_rxfifo_dequeue(sc, m); 510 } 511 } 512 513 if (count) { 514 cothread_lock(cotd, 0); 515 cothread_signal(cotd); 516 cothread_unlock(cotd, 0); 517 } 518 ifnet_deserialize_all(ifp); 519 } 520 521 /* 522 * vke_tx_intr() is the interrupt function for the transmit cothread. 523 * Calls vke_start() to handle processing transmit mbufs. 524 */ 525 static void 526 vke_tx_intr(cothread_t cotd) 527 { 528 struct vke_softc *sc = cotd->arg; 529 struct ifnet *ifp = &sc->arpcom.ac_if; 530 struct mbuf *m; 531 532 ifnet_serialize_all(ifp); 533 cothread_lock(cotd, 0); 534 if (sc->cotd_tx_exit != VKE_COTD_RUN) { 535 cothread_unlock(cotd, 0); 536 ifnet_deserialize_all(ifp); 537 return; 538 } 539 cothread_unlock(cotd, 0); 540 541 /* 542 * Free TX mbufs that have been processed before starting new 543 * ones going to be pipeline friendly. 544 */ 545 while ((m = vke_txfifo_done_dequeue(sc, NULL)) != NULL) { 546 m_freem(m); 547 } 548 549 if ((ifp->if_flags & IFF_RUNNING) == 0) 550 ifp->if_start(ifp); 551 552 ifnet_deserialize_all(ifp); 553 } 554 555 /* 556 * vke_rx_thread() is the body of the receive cothread. 557 */ 558 static void 559 vke_rx_thread(cothread_t cotd) 560 { 561 struct mbuf *m; 562 struct vke_softc *sc = cotd->arg; 563 struct ifnet *ifp = &sc->arpcom.ac_if; 564 fifo_t fifo = sc->sc_rxfifo; 565 fd_set fdset; 566 struct timeval tv; 567 int count; 568 int n; 569 570 /* Select timeout cannot be infinite since we need to check for 571 * the exit flag sc->cotd_rx_exit. 572 */ 573 tv.tv_sec = 0; 574 tv.tv_usec = 500000; 575 576 FD_ZERO(&fdset); 577 count = 0; 578 579 while (sc->cotd_rx_exit == VKE_COTD_RUN) { 580 /* 581 * Wait for the RX FIFO to be loaded with 582 * empty mbufs. 583 */ 584 if (NETFIFOINDEX(fifo->windex + 1) == 585 NETFIFOINDEX(fifo->rindex)) { 586 usleep(20000); 587 continue; 588 } 589 590 /* 591 * Load data into the rx fifo 592 */ 593 m = fifo->array[NETFIFOINDEX(fifo->windex)]; 594 if (m == NULL) 595 continue; 596 n = read(sc->sc_fd, mtod(m, void *), MCLBYTES); 597 if (n > 0) { 598 ifp->if_ipackets++; 599 m->m_pkthdr.rcvif = ifp; 600 m->m_pkthdr.len = m->m_len = n; 601 cpu_sfence(); 602 ++fifo->windex; 603 if (count++ == VKE_CHUNK) { 604 cothread_intr(cotd); 605 count = 0; 606 } 607 } else { 608 if (count) { 609 cothread_intr(cotd); 610 count = 0; 611 } 612 FD_SET(sc->sc_fd, &fdset); 613 614 if (select(sc->sc_fd + 1, &fdset, NULL, NULL, &tv) == -1) { 615 kprintf(VKE_DEVNAME "%d: select failed for " 616 "TAP device\n", sc->sc_unit); 617 usleep(1000000); 618 } 619 } 620 } 621 cpu_sfence(); 622 sc->cotd_rx_exit = VKE_COTD_DEAD; 623 } 624 625 /* 626 * vke_tx_thread() is the body of the transmit cothread. 627 */ 628 static void 629 vke_tx_thread(cothread_t cotd) 630 { 631 struct mbuf *m; 632 struct vke_softc *sc = cotd->arg; 633 struct ifnet *ifp = &sc->arpcom.ac_if; 634 int count = 0; 635 636 while (sc->cotd_tx_exit == VKE_COTD_RUN) { 637 /* 638 * Write outgoing packets to the TAP interface 639 */ 640 m = vke_txfifo_dequeue(sc); 641 if (m) { 642 if (m->m_pkthdr.len <= MCLBYTES) { 643 m_copydata(m, 0, m->m_pkthdr.len, sc->sc_txbuf); 644 sc->sc_txbuf_len = m->m_pkthdr.len; 645 646 if (write(sc->sc_fd, sc->sc_txbuf, 647 sc->sc_txbuf_len) < 0) { 648 ifp->if_oerrors++; 649 } else { 650 ifp->if_opackets++; 651 } 652 } 653 if (count++ == VKE_CHUNK) { 654 cothread_intr(cotd); 655 count = 0; 656 } 657 vke_txfifo_done_enqueue(sc, m); 658 } else { 659 if (count) { 660 cothread_intr(cotd); 661 count = 0; 662 } 663 cothread_lock(cotd, 1); 664 if (vke_txfifo_empty(sc)) 665 cothread_wait(cotd); 666 cothread_unlock(cotd, 1); 667 } 668 } 669 cpu_sfence(); 670 sc->cotd_tx_exit = VKE_COTD_DEAD; 671 } 672 673 static int 674 vke_attach(const struct vknetif_info *info, int unit) 675 { 676 struct vke_softc *sc; 677 struct ifnet *ifp; 678 struct tapinfo tapinfo; 679 uint8_t enaddr[ETHER_ADDR_LEN]; 680 int fd; 681 682 KKASSERT(info->tap_fd >= 0); 683 fd = info->tap_fd; 684 685 /* 686 * This is only a TAP device if tap_unit is non-zero. If 687 * connecting to a virtual socket we generate a unique MAC. 688 */ 689 if (info->tap_unit >= 0) { 690 if (ioctl(fd, TAPGIFINFO, &tapinfo) < 0) { 691 kprintf(VKE_DEVNAME "%d: ioctl(TAPGIFINFO) " 692 "failed: %s\n", unit, strerror(errno)); 693 return ENXIO; 694 } 695 696 if (ioctl(fd, SIOCGIFADDR, enaddr) < 0) { 697 kprintf(VKE_DEVNAME "%d: ioctl(SIOCGIFADDR) " 698 "failed: %s\n", unit, strerror(errno)); 699 return ENXIO; 700 } 701 } else { 702 int fd = open("/dev/urandom", O_RDONLY); 703 if (fd >= 0) { 704 read(fd, enaddr + 2, 4); 705 close(fd); 706 } 707 enaddr[4] = (int)getpid() >> 8; 708 enaddr[5] = (int)getpid() & 255; 709 710 } 711 enaddr[1] += 1; 712 713 sc = kmalloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO); 714 715 sc->sc_txbuf = kmalloc(MCLBYTES, M_DEVBUF, M_WAITOK); 716 sc->sc_fd = fd; 717 sc->sc_unit = unit; 718 sc->sc_tap_unit = info->tap_unit; 719 sc->sc_addr = info->netif_addr; 720 sc->sc_mask = info->netif_mask; 721 722 ifp = &sc->arpcom.ac_if; 723 if_initname(ifp, VKE_DEVNAME, sc->sc_unit); 724 725 /* NB: after if_initname() */ 726 sysctl_ctx_init(&sc->sc_sysctl_ctx); 727 sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx, 728 SYSCTL_STATIC_CHILDREN(_hw), 729 OID_AUTO, ifp->if_xname, 730 CTLFLAG_RD, 0, ""); 731 if (sc->sc_sysctl_tree == NULL) { 732 kprintf(VKE_DEVNAME "%d: can't add sysctl node\n", unit); 733 } else { 734 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx, 735 SYSCTL_CHILDREN(sc->sc_sysctl_tree), 736 OID_AUTO, "tap_unit", 737 CTLFLAG_RD, &sc->sc_tap_unit, 0, 738 "Backend tap(4) unit"); 739 } 740 741 ifp->if_softc = sc; 742 ifp->if_ioctl = vke_ioctl; 743 ifp->if_start = vke_start; 744 ifp->if_init = vke_init; 745 ifp->if_mtu = tapinfo.mtu; 746 ifp->if_baudrate = tapinfo.baudrate; 747 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 748 ifq_set_maxlen(&ifp->if_snd, IFQ_MAXLEN); 749 ifq_set_ready(&ifp->if_snd); 750 751 /* TODO: if_media */ 752 753 ether_ifattach(ifp, enaddr, NULL); 754 755 if (bootverbose && sc->sc_addr != 0) { 756 if_printf(ifp, "pre-configured " 757 "address 0x%08x, netmask 0x%08x\n", 758 ntohl(sc->sc_addr), ntohl(sc->sc_mask)); 759 } 760 761 return 0; 762 } 763 764 static int 765 vke_init_addr(struct ifnet *ifp, in_addr_t addr, in_addr_t mask) 766 { 767 struct ifaliasreq ifra; 768 struct sockaddr_in *sin; 769 int ret; 770 771 ASSERT_SERIALIZED(ifp->if_serializer); 772 773 if (bootverbose) { 774 if_printf(ifp, "add pre-configured " 775 "address 0x%08x, netmask 0x%08x\n", 776 ntohl(addr), ntohl(mask)); 777 } 778 779 bzero(&ifra, sizeof(ifra)); 780 781 /* NB: no need to set ifaliasreq.ifra_name */ 782 783 sin = (struct sockaddr_in *)&ifra.ifra_addr; 784 sin->sin_family = AF_INET; 785 sin->sin_len = sizeof(*sin); 786 sin->sin_addr.s_addr = addr; 787 788 if (mask != 0) { 789 sin = (struct sockaddr_in *)&ifra.ifra_mask; 790 sin->sin_len = sizeof(*sin); 791 sin->sin_addr.s_addr = mask; 792 } 793 794 /* 795 * Temporarily release serializer, in_control() will hold 796 * it again before calling ifnet.if_ioctl(). 797 */ 798 ifnet_deserialize_all(ifp); 799 ret = in_control(NULL, SIOCAIFADDR, (caddr_t)&ifra, ifp, NULL); 800 ifnet_serialize_all(ifp); 801 802 return ret; 803 } 804