1 /* $NetBSD: if_tap.c,v 1.92 2016/08/15 05:10:33 christos Exp $ */ 2 3 /* 4 * Copyright (c) 2003, 2004, 2008, 2009 The NetBSD Foundation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * tap(4) is a virtual Ethernet interface. It appears as a real Ethernet 31 * device to the system, but can also be accessed by userland through a 32 * character device interface, which allows reading and injecting frames. 33 */ 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: if_tap.c,v 1.92 2016/08/15 05:10:33 christos Exp $"); 37 38 #if defined(_KERNEL_OPT) 39 40 #include "opt_modular.h" 41 #include "opt_compat_netbsd.h" 42 #endif 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/kernel.h> 47 #include <sys/malloc.h> 48 #include <sys/conf.h> 49 #include <sys/cprng.h> 50 #include <sys/device.h> 51 #include <sys/file.h> 52 #include <sys/filedesc.h> 53 #include <sys/poll.h> 54 #include <sys/proc.h> 55 #include <sys/select.h> 56 #include <sys/sockio.h> 57 #include <sys/sysctl.h> 58 #include <sys/kauth.h> 59 #include <sys/mutex.h> 60 #include <sys/intr.h> 61 #include <sys/stat.h> 62 #include <sys/device.h> 63 #include <sys/module.h> 64 #include <sys/atomic.h> 65 66 #include <net/if.h> 67 #include <net/if_dl.h> 68 #include <net/if_ether.h> 69 #include <net/if_media.h> 70 #include <net/if_tap.h> 71 #include <net/bpf.h> 72 73 #include <compat/sys/sockio.h> 74 75 #include "ioconf.h" 76 77 /* 78 * sysctl node management 79 * 80 * It's not really possible to use a SYSCTL_SETUP block with 81 * current module implementation, so it is easier to just define 82 * our own function. 83 * 84 * The handler function is a "helper" in Andrew Brown's sysctl 85 * framework terminology. It is used as a gateway for sysctl 86 * requests over the nodes. 87 * 88 * tap_log allows the module to log creations of nodes and 89 * destroy them all at once using sysctl_teardown. 90 */ 91 static int tap_node; 92 static int tap_sysctl_handler(SYSCTLFN_PROTO); 93 static void sysctl_tap_setup(struct sysctllog **); 94 95 /* 96 * Since we're an Ethernet device, we need the 2 following 97 * components: a struct ethercom and a struct ifmedia 98 * since we don't attach a PHY to ourselves. 99 * We could emulate one, but there's no real point. 100 */ 101 102 struct tap_softc { 103 device_t sc_dev; 104 struct ifmedia sc_im; 105 struct ethercom sc_ec; 106 int sc_flags; 107 #define TAP_INUSE 0x00000001 /* tap device can only be opened once */ 108 #define TAP_ASYNCIO 0x00000002 /* user is using async I/O (SIGIO) on the device */ 109 #define TAP_NBIO 0x00000004 /* user wants calls to avoid blocking */ 110 #define TAP_GOING 0x00000008 /* interface is being destroyed */ 111 struct selinfo sc_rsel; 112 pid_t sc_pgid; /* For async. IO */ 113 kmutex_t sc_rdlock; 114 kmutex_t sc_kqlock; 115 void *sc_sih; 116 struct timespec sc_atime; 117 struct timespec sc_mtime; 118 struct timespec sc_btime; 119 }; 120 121 /* autoconf(9) glue */ 122 123 static int tap_match(device_t, cfdata_t, void *); 124 static void tap_attach(device_t, device_t, void *); 125 static int tap_detach(device_t, int); 126 127 CFATTACH_DECL_NEW(tap, sizeof(struct tap_softc), 128 tap_match, tap_attach, tap_detach, NULL); 129 extern struct cfdriver tap_cd; 130 131 /* Real device access routines */ 132 static int tap_dev_close(struct tap_softc *); 133 static int tap_dev_read(int, struct uio *, int); 134 static int tap_dev_write(int, struct uio *, int); 135 static int tap_dev_ioctl(int, u_long, void *, struct lwp *); 136 static int tap_dev_poll(int, int, struct lwp *); 137 static int tap_dev_kqfilter(int, struct knote *); 138 139 /* Fileops access routines */ 140 static int tap_fops_close(file_t *); 141 static int tap_fops_read(file_t *, off_t *, struct uio *, 142 kauth_cred_t, int); 143 static int tap_fops_write(file_t *, off_t *, struct uio *, 144 kauth_cred_t, int); 145 static int tap_fops_ioctl(file_t *, u_long, void *); 146 static int tap_fops_poll(file_t *, int); 147 static int tap_fops_stat(file_t *, struct stat *); 148 static int tap_fops_kqfilter(file_t *, struct knote *); 149 150 static const struct fileops tap_fileops = { 151 .fo_read = tap_fops_read, 152 .fo_write = tap_fops_write, 153 .fo_ioctl = tap_fops_ioctl, 154 .fo_fcntl = fnullop_fcntl, 155 .fo_poll = tap_fops_poll, 156 .fo_stat = tap_fops_stat, 157 .fo_close = tap_fops_close, 158 .fo_kqfilter = tap_fops_kqfilter, 159 .fo_restart = fnullop_restart, 160 }; 161 162 /* Helper for cloning open() */ 163 static int tap_dev_cloner(struct lwp *); 164 165 /* Character device routines */ 166 static int tap_cdev_open(dev_t, int, int, struct lwp *); 167 static int tap_cdev_close(dev_t, int, int, struct lwp *); 168 static int tap_cdev_read(dev_t, struct uio *, int); 169 static int tap_cdev_write(dev_t, struct uio *, int); 170 static int tap_cdev_ioctl(dev_t, u_long, void *, int, struct lwp *); 171 static int tap_cdev_poll(dev_t, int, struct lwp *); 172 static int tap_cdev_kqfilter(dev_t, struct knote *); 173 174 const struct cdevsw tap_cdevsw = { 175 .d_open = tap_cdev_open, 176 .d_close = tap_cdev_close, 177 .d_read = tap_cdev_read, 178 .d_write = tap_cdev_write, 179 .d_ioctl = tap_cdev_ioctl, 180 .d_stop = nostop, 181 .d_tty = notty, 182 .d_poll = tap_cdev_poll, 183 .d_mmap = nommap, 184 .d_kqfilter = tap_cdev_kqfilter, 185 .d_discard = nodiscard, 186 .d_flag = D_OTHER 187 }; 188 189 #define TAP_CLONER 0xfffff /* Maximal minor value */ 190 191 /* kqueue-related routines */ 192 static void tap_kqdetach(struct knote *); 193 static int tap_kqread(struct knote *, long); 194 195 /* 196 * Those are needed by the if_media interface. 197 */ 198 199 static int tap_mediachange(struct ifnet *); 200 static void tap_mediastatus(struct ifnet *, struct ifmediareq *); 201 202 /* 203 * Those are needed by the ifnet interface, and would typically be 204 * there for any network interface driver. 205 * Some other routines are optional: watchdog and drain. 206 */ 207 208 static void tap_start(struct ifnet *); 209 static void tap_stop(struct ifnet *, int); 210 static int tap_init(struct ifnet *); 211 static int tap_ioctl(struct ifnet *, u_long, void *); 212 213 /* Internal functions */ 214 static int tap_lifaddr(struct ifnet *, u_long, struct ifaliasreq *); 215 static void tap_softintr(void *); 216 217 /* 218 * tap is a clonable interface, although it is highly unrealistic for 219 * an Ethernet device. 220 * 221 * Here are the bits needed for a clonable interface. 222 */ 223 static int tap_clone_create(struct if_clone *, int); 224 static int tap_clone_destroy(struct ifnet *); 225 226 struct if_clone tap_cloners = IF_CLONE_INITIALIZER("tap", 227 tap_clone_create, 228 tap_clone_destroy); 229 230 /* Helper functionis shared by the two cloning code paths */ 231 static struct tap_softc * tap_clone_creator(int); 232 int tap_clone_destroyer(device_t); 233 234 static struct sysctllog *tap_sysctl_clog; 235 236 #ifdef _MODULE 237 devmajor_t tap_bmajor = -1, tap_cmajor = -1; 238 #endif 239 240 static u_int tap_count; 241 242 void 243 tapattach(int n) 244 { 245 246 /* 247 * Nothing to do here, initialization is handled by the 248 * module initialization code in tapinit() below). 249 */ 250 } 251 252 static void 253 tapinit(void) 254 { 255 int error = config_cfattach_attach(tap_cd.cd_name, &tap_ca); 256 if (error) { 257 aprint_error("%s: unable to register cfattach\n", 258 tap_cd.cd_name); 259 (void)config_cfdriver_detach(&tap_cd); 260 return; 261 } 262 263 if_clone_attach(&tap_cloners); 264 sysctl_tap_setup(&tap_sysctl_clog); 265 #ifdef _MODULE 266 devsw_attach("tap", NULL, &tap_bmajor, &tap_cdevsw, &tap_cmajor); 267 #endif 268 } 269 270 static int 271 tapdetach(void) 272 { 273 int error = 0; 274 275 if (tap_count != 0) 276 return EBUSY; 277 278 #ifdef _MODULE 279 if (error == 0) 280 error = devsw_detach(NULL, &tap_cdevsw); 281 #endif 282 if (error == 0) 283 sysctl_teardown(&tap_sysctl_clog); 284 if (error == 0) 285 if_clone_detach(&tap_cloners); 286 287 if (error == 0) 288 error = config_cfattach_detach(tap_cd.cd_name, &tap_ca); 289 290 return error; 291 } 292 293 /* Pretty much useless for a pseudo-device */ 294 static int 295 tap_match(device_t parent, cfdata_t cfdata, void *arg) 296 { 297 298 return (1); 299 } 300 301 void 302 tap_attach(device_t parent, device_t self, void *aux) 303 { 304 struct tap_softc *sc = device_private(self); 305 struct ifnet *ifp; 306 const struct sysctlnode *node; 307 int error; 308 uint8_t enaddr[ETHER_ADDR_LEN] = 309 { 0xf2, 0x0b, 0xa4, 0xff, 0xff, 0xff }; 310 char enaddrstr[3 * ETHER_ADDR_LEN]; 311 312 sc->sc_dev = self; 313 sc->sc_sih = NULL; 314 getnanotime(&sc->sc_btime); 315 sc->sc_atime = sc->sc_mtime = sc->sc_btime; 316 sc->sc_flags = 0; 317 selinit(&sc->sc_rsel); 318 319 /* 320 * Initialize the two locks for the device. 321 * 322 * We need a lock here because even though the tap device can be 323 * opened only once, the file descriptor might be passed to another 324 * process, say a fork(2)ed child. 325 * 326 * The Giant saves us from most of the hassle, but since the read 327 * operation can sleep, we don't want two processes to wake up at 328 * the same moment and both try and dequeue a single packet. 329 * 330 * The queue for event listeners (used by kqueue(9), see below) has 331 * to be protected too, so use a spin lock. 332 */ 333 mutex_init(&sc->sc_rdlock, MUTEX_DEFAULT, IPL_NONE); 334 mutex_init(&sc->sc_kqlock, MUTEX_DEFAULT, IPL_VM); 335 336 if (!pmf_device_register(self, NULL, NULL)) 337 aprint_error_dev(self, "couldn't establish power handler\n"); 338 339 /* 340 * In order to obtain unique initial Ethernet address on a host, 341 * do some randomisation. It's not meant for anything but avoiding 342 * hard-coding an address. 343 */ 344 cprng_fast(&enaddr[3], 3); 345 346 aprint_verbose_dev(self, "Ethernet address %s\n", 347 ether_snprintf(enaddrstr, sizeof(enaddrstr), enaddr)); 348 349 /* 350 * Why 1000baseT? Why not? You can add more. 351 * 352 * Note that there are 3 steps: init, one or several additions to 353 * list of supported media, and in the end, the selection of one 354 * of them. 355 */ 356 ifmedia_init(&sc->sc_im, 0, tap_mediachange, tap_mediastatus); 357 ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_1000_T, 0, NULL); 358 ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL); 359 ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_100_TX, 0, NULL); 360 ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); 361 ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_10_T, 0, NULL); 362 ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 363 ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_AUTO, 0, NULL); 364 ifmedia_set(&sc->sc_im, IFM_ETHER|IFM_AUTO); 365 366 /* 367 * One should note that an interface must do multicast in order 368 * to support IPv6. 369 */ 370 ifp = &sc->sc_ec.ec_if; 371 strcpy(ifp->if_xname, device_xname(self)); 372 ifp->if_softc = sc; 373 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 374 ifp->if_ioctl = tap_ioctl; 375 ifp->if_start = tap_start; 376 ifp->if_stop = tap_stop; 377 ifp->if_init = tap_init; 378 IFQ_SET_READY(&ifp->if_snd); 379 380 sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU; 381 382 /* Those steps are mandatory for an Ethernet driver. */ 383 if_initialize(ifp); 384 ether_ifattach(ifp, enaddr); 385 if_register(ifp); 386 387 /* 388 * Add a sysctl node for that interface. 389 * 390 * The pointer transmitted is not a string, but instead a pointer to 391 * the softc structure, which we can use to build the string value on 392 * the fly in the helper function of the node. See the comments for 393 * tap_sysctl_handler for details. 394 * 395 * Usually sysctl_createv is called with CTL_CREATE as the before-last 396 * component. However, we can allocate a number ourselves, as we are 397 * the only consumer of the net.link.<iface> node. In this case, the 398 * unit number is conveniently used to number the node. CTL_CREATE 399 * would just work, too. 400 */ 401 if ((error = sysctl_createv(NULL, 0, NULL, 402 &node, CTLFLAG_READWRITE, 403 CTLTYPE_STRING, device_xname(self), NULL, 404 tap_sysctl_handler, 0, (void *)sc, 18, 405 CTL_NET, AF_LINK, tap_node, device_unit(sc->sc_dev), 406 CTL_EOL)) != 0) 407 aprint_error_dev(self, "sysctl_createv returned %d, ignoring\n", 408 error); 409 } 410 411 /* 412 * When detaching, we do the inverse of what is done in the attach 413 * routine, in reversed order. 414 */ 415 static int 416 tap_detach(device_t self, int flags) 417 { 418 struct tap_softc *sc = device_private(self); 419 struct ifnet *ifp = &sc->sc_ec.ec_if; 420 int error; 421 int s; 422 423 sc->sc_flags |= TAP_GOING; 424 s = splnet(); 425 tap_stop(ifp, 1); 426 if_down(ifp); 427 splx(s); 428 429 if (sc->sc_sih != NULL) { 430 softint_disestablish(sc->sc_sih); 431 sc->sc_sih = NULL; 432 } 433 434 /* 435 * Destroying a single leaf is a very straightforward operation using 436 * sysctl_destroyv. One should be sure to always end the path with 437 * CTL_EOL. 438 */ 439 if ((error = sysctl_destroyv(NULL, CTL_NET, AF_LINK, tap_node, 440 device_unit(sc->sc_dev), CTL_EOL)) != 0) 441 aprint_error_dev(self, 442 "sysctl_destroyv returned %d, ignoring\n", error); 443 ether_ifdetach(ifp); 444 if_detach(ifp); 445 ifmedia_delete_instance(&sc->sc_im, IFM_INST_ANY); 446 seldestroy(&sc->sc_rsel); 447 mutex_destroy(&sc->sc_rdlock); 448 mutex_destroy(&sc->sc_kqlock); 449 450 pmf_device_deregister(self); 451 452 return (0); 453 } 454 455 /* 456 * This function is called by the ifmedia layer to notify the driver 457 * that the user requested a media change. A real driver would 458 * reconfigure the hardware. 459 */ 460 static int 461 tap_mediachange(struct ifnet *ifp) 462 { 463 return (0); 464 } 465 466 /* 467 * Here the user asks for the currently used media. 468 */ 469 static void 470 tap_mediastatus(struct ifnet *ifp, struct ifmediareq *imr) 471 { 472 struct tap_softc *sc = (struct tap_softc *)ifp->if_softc; 473 imr->ifm_active = sc->sc_im.ifm_cur->ifm_media; 474 } 475 476 /* 477 * This is the function where we SEND packets. 478 * 479 * There is no 'receive' equivalent. A typical driver will get 480 * interrupts from the hardware, and from there will inject new packets 481 * into the network stack. 482 * 483 * Once handled, a packet must be freed. A real driver might not be able 484 * to fit all the pending packets into the hardware, and is allowed to 485 * return before having sent all the packets. It should then use the 486 * if_flags flag IFF_OACTIVE to notify the upper layer. 487 * 488 * There are also other flags one should check, such as IFF_PAUSE. 489 * 490 * It is our duty to make packets available to BPF listeners. 491 * 492 * You should be aware that this function is called by the Ethernet layer 493 * at splnet(). 494 * 495 * When the device is opened, we have to pass the packet(s) to the 496 * userland. For that we stay in OACTIVE mode while the userland gets 497 * the packets, and we send a signal to the processes waiting to read. 498 * 499 * wakeup(sc) is the counterpart to the tsleep call in 500 * tap_dev_read, while selnotify() is used for kevent(2) and 501 * poll(2) (which includes select(2)) listeners. 502 */ 503 static void 504 tap_start(struct ifnet *ifp) 505 { 506 struct tap_softc *sc = (struct tap_softc *)ifp->if_softc; 507 struct mbuf *m0; 508 509 if ((sc->sc_flags & TAP_INUSE) == 0) { 510 /* Simply drop packets */ 511 for(;;) { 512 IFQ_DEQUEUE(&ifp->if_snd, m0); 513 if (m0 == NULL) 514 return; 515 516 ifp->if_opackets++; 517 bpf_mtap(ifp, m0); 518 519 m_freem(m0); 520 } 521 } else if (!IFQ_IS_EMPTY(&ifp->if_snd)) { 522 ifp->if_flags |= IFF_OACTIVE; 523 wakeup(sc); 524 selnotify(&sc->sc_rsel, 0, 1); 525 if (sc->sc_flags & TAP_ASYNCIO) 526 softint_schedule(sc->sc_sih); 527 } 528 } 529 530 static void 531 tap_softintr(void *cookie) 532 { 533 struct tap_softc *sc; 534 struct ifnet *ifp; 535 int a, b; 536 537 sc = cookie; 538 539 if (sc->sc_flags & TAP_ASYNCIO) { 540 ifp = &sc->sc_ec.ec_if; 541 if (ifp->if_flags & IFF_RUNNING) { 542 a = POLL_IN; 543 b = POLLIN|POLLRDNORM; 544 } else { 545 a = POLL_HUP; 546 b = 0; 547 } 548 fownsignal(sc->sc_pgid, SIGIO, a, b, NULL); 549 } 550 } 551 552 /* 553 * A typical driver will only contain the following handlers for 554 * ioctl calls, except SIOCSIFPHYADDR. 555 * The latter is a hack I used to set the Ethernet address of the 556 * faked device. 557 * 558 * Note that both ifmedia_ioctl() and ether_ioctl() have to be 559 * called under splnet(). 560 */ 561 static int 562 tap_ioctl(struct ifnet *ifp, u_long cmd, void *data) 563 { 564 struct tap_softc *sc = (struct tap_softc *)ifp->if_softc; 565 struct ifreq *ifr = (struct ifreq *)data; 566 int s, error; 567 568 s = splnet(); 569 570 switch (cmd) { 571 #ifdef OSIOCSIFMEDIA 572 case OSIOCSIFMEDIA: 573 #endif 574 case SIOCSIFMEDIA: 575 case SIOCGIFMEDIA: 576 error = ifmedia_ioctl(ifp, ifr, &sc->sc_im, cmd); 577 break; 578 case SIOCSIFPHYADDR: 579 error = tap_lifaddr(ifp, cmd, (struct ifaliasreq *)data); 580 break; 581 default: 582 error = ether_ioctl(ifp, cmd, data); 583 if (error == ENETRESET) 584 error = 0; 585 break; 586 } 587 588 splx(s); 589 590 return (error); 591 } 592 593 /* 594 * Helper function to set Ethernet address. This has been replaced by 595 * the generic SIOCALIFADDR ioctl on a PF_LINK socket. 596 */ 597 static int 598 tap_lifaddr(struct ifnet *ifp, u_long cmd, struct ifaliasreq *ifra) 599 { 600 const struct sockaddr *sa = &ifra->ifra_addr; 601 602 if (sa->sa_family != AF_LINK) 603 return (EINVAL); 604 605 if_set_sadl(ifp, sa->sa_data, ETHER_ADDR_LEN, false); 606 607 return (0); 608 } 609 610 /* 611 * _init() would typically be called when an interface goes up, 612 * meaning it should configure itself into the state in which it 613 * can send packets. 614 */ 615 static int 616 tap_init(struct ifnet *ifp) 617 { 618 ifp->if_flags |= IFF_RUNNING; 619 620 tap_start(ifp); 621 622 return (0); 623 } 624 625 /* 626 * _stop() is called when an interface goes down. It is our 627 * responsability to validate that state by clearing the 628 * IFF_RUNNING flag. 629 * 630 * We have to wake up all the sleeping processes to have the pending 631 * read requests cancelled. 632 */ 633 static void 634 tap_stop(struct ifnet *ifp, int disable) 635 { 636 struct tap_softc *sc = (struct tap_softc *)ifp->if_softc; 637 638 ifp->if_flags &= ~IFF_RUNNING; 639 wakeup(sc); 640 selnotify(&sc->sc_rsel, 0, 1); 641 if (sc->sc_flags & TAP_ASYNCIO) 642 softint_schedule(sc->sc_sih); 643 } 644 645 /* 646 * The 'create' command of ifconfig can be used to create 647 * any numbered instance of a given device. Thus we have to 648 * make sure we have enough room in cd_devs to create the 649 * user-specified instance. config_attach_pseudo will do this 650 * for us. 651 */ 652 static int 653 tap_clone_create(struct if_clone *ifc, int unit) 654 { 655 if (tap_clone_creator(unit) == NULL) { 656 aprint_error("%s%d: unable to attach an instance\n", 657 tap_cd.cd_name, unit); 658 return (ENXIO); 659 } 660 atomic_inc_uint(&tap_count); 661 return (0); 662 } 663 664 /* 665 * tap(4) can be cloned by two ways: 666 * using 'ifconfig tap0 create', which will use the network 667 * interface cloning API, and call tap_clone_create above. 668 * opening the cloning device node, whose minor number is TAP_CLONER. 669 * See below for an explanation on how this part work. 670 */ 671 static struct tap_softc * 672 tap_clone_creator(int unit) 673 { 674 struct cfdata *cf; 675 676 cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK); 677 cf->cf_name = tap_cd.cd_name; 678 cf->cf_atname = tap_ca.ca_name; 679 if (unit == -1) { 680 /* let autoconf find the first free one */ 681 cf->cf_unit = 0; 682 cf->cf_fstate = FSTATE_STAR; 683 } else { 684 cf->cf_unit = unit; 685 cf->cf_fstate = FSTATE_NOTFOUND; 686 } 687 688 return device_private(config_attach_pseudo(cf)); 689 } 690 691 /* 692 * The clean design of if_clone and autoconf(9) makes that part 693 * really straightforward. The second argument of config_detach 694 * means neither QUIET nor FORCED. 695 */ 696 static int 697 tap_clone_destroy(struct ifnet *ifp) 698 { 699 struct tap_softc *sc = ifp->if_softc; 700 int error = tap_clone_destroyer(sc->sc_dev); 701 702 if (error == 0) 703 atomic_dec_uint(&tap_count); 704 return error; 705 } 706 707 int 708 tap_clone_destroyer(device_t dev) 709 { 710 cfdata_t cf = device_cfdata(dev); 711 int error; 712 713 if ((error = config_detach(dev, 0)) != 0) 714 aprint_error_dev(dev, "unable to detach instance\n"); 715 free(cf, M_DEVBUF); 716 717 return (error); 718 } 719 720 /* 721 * tap(4) is a bit of an hybrid device. It can be used in two different 722 * ways: 723 * 1. ifconfig tapN create, then use /dev/tapN to read/write off it. 724 * 2. open /dev/tap, get a new interface created and read/write off it. 725 * That interface is destroyed when the process that had it created exits. 726 * 727 * The first way is managed by the cdevsw structure, and you access interfaces 728 * through a (major, minor) mapping: tap4 is obtained by the minor number 729 * 4. The entry points for the cdevsw interface are prefixed by tap_cdev_. 730 * 731 * The second way is the so-called "cloning" device. It's a special minor 732 * number (chosen as the maximal number, to allow as much tap devices as 733 * possible). The user first opens the cloner (e.g., /dev/tap), and that 734 * call ends in tap_cdev_open. The actual place where it is handled is 735 * tap_dev_cloner. 736 * 737 * An tap device cannot be opened more than once at a time, so the cdevsw 738 * part of open() does nothing but noting that the interface is being used and 739 * hence ready to actually handle packets. 740 */ 741 742 static int 743 tap_cdev_open(dev_t dev, int flags, int fmt, struct lwp *l) 744 { 745 struct tap_softc *sc; 746 747 if (minor(dev) == TAP_CLONER) 748 return tap_dev_cloner(l); 749 750 sc = device_lookup_private(&tap_cd, minor(dev)); 751 if (sc == NULL) 752 return (ENXIO); 753 754 /* The device can only be opened once */ 755 if (sc->sc_flags & TAP_INUSE) 756 return (EBUSY); 757 sc->sc_flags |= TAP_INUSE; 758 return (0); 759 } 760 761 /* 762 * There are several kinds of cloning devices, and the most simple is the one 763 * tap(4) uses. What it does is change the file descriptor with a new one, 764 * with its own fileops structure (which maps to the various read, write, 765 * ioctl functions). It starts allocating a new file descriptor with falloc, 766 * then actually creates the new tap devices. 767 * 768 * Once those two steps are successful, we can re-wire the existing file 769 * descriptor to its new self. This is done with fdclone(): it fills the fp 770 * structure as needed (notably f_devunit gets filled with the fifth parameter 771 * passed, the unit of the tap device which will allows us identifying the 772 * device later), and returns EMOVEFD. 773 * 774 * That magic value is interpreted by sys_open() which then replaces the 775 * current file descriptor by the new one (through a magic member of struct 776 * lwp, l_dupfd). 777 * 778 * The tap device is flagged as being busy since it otherwise could be 779 * externally accessed through the corresponding device node with the cdevsw 780 * interface. 781 */ 782 783 static int 784 tap_dev_cloner(struct lwp *l) 785 { 786 struct tap_softc *sc; 787 file_t *fp; 788 int error, fd; 789 790 if ((error = fd_allocfile(&fp, &fd)) != 0) 791 return (error); 792 793 if ((sc = tap_clone_creator(-1)) == NULL) { 794 fd_abort(curproc, fp, fd); 795 return (ENXIO); 796 } 797 798 sc->sc_flags |= TAP_INUSE; 799 800 return fd_clone(fp, fd, FREAD|FWRITE, &tap_fileops, 801 (void *)(intptr_t)device_unit(sc->sc_dev)); 802 } 803 804 /* 805 * While all other operations (read, write, ioctl, poll and kqfilter) are 806 * really the same whether we are in cdevsw or fileops mode, the close() 807 * function is slightly different in the two cases. 808 * 809 * As for the other, the core of it is shared in tap_dev_close. What 810 * it does is sufficient for the cdevsw interface, but the cloning interface 811 * needs another thing: the interface is destroyed when the processes that 812 * created it closes it. 813 */ 814 static int 815 tap_cdev_close(dev_t dev, int flags, int fmt, 816 struct lwp *l) 817 { 818 struct tap_softc *sc = 819 device_lookup_private(&tap_cd, minor(dev)); 820 821 if (sc == NULL) 822 return (ENXIO); 823 824 return tap_dev_close(sc); 825 } 826 827 /* 828 * It might happen that the administrator used ifconfig to externally destroy 829 * the interface. In that case, tap_fops_close will be called while 830 * tap_detach is already happening. If we called it again from here, we 831 * would dead lock. TAP_GOING ensures that this situation doesn't happen. 832 */ 833 static int 834 tap_fops_close(file_t *fp) 835 { 836 int unit = fp->f_devunit; 837 struct tap_softc *sc; 838 int error; 839 840 sc = device_lookup_private(&tap_cd, unit); 841 if (sc == NULL) 842 return (ENXIO); 843 844 /* tap_dev_close currently always succeeds, but it might not 845 * always be the case. */ 846 KERNEL_LOCK(1, NULL); 847 if ((error = tap_dev_close(sc)) != 0) { 848 KERNEL_UNLOCK_ONE(NULL); 849 return (error); 850 } 851 852 /* Destroy the device now that it is no longer useful, 853 * unless it's already being destroyed. */ 854 if ((sc->sc_flags & TAP_GOING) != 0) { 855 KERNEL_UNLOCK_ONE(NULL); 856 return (0); 857 } 858 859 error = tap_clone_destroyer(sc->sc_dev); 860 KERNEL_UNLOCK_ONE(NULL); 861 return error; 862 } 863 864 static int 865 tap_dev_close(struct tap_softc *sc) 866 { 867 struct ifnet *ifp; 868 int s; 869 870 s = splnet(); 871 /* Let tap_start handle packets again */ 872 ifp = &sc->sc_ec.ec_if; 873 ifp->if_flags &= ~IFF_OACTIVE; 874 875 /* Purge output queue */ 876 if (!(IFQ_IS_EMPTY(&ifp->if_snd))) { 877 struct mbuf *m; 878 879 for (;;) { 880 IFQ_DEQUEUE(&ifp->if_snd, m); 881 if (m == NULL) 882 break; 883 884 ifp->if_opackets++; 885 bpf_mtap(ifp, m); 886 m_freem(m); 887 } 888 } 889 splx(s); 890 891 if (sc->sc_sih != NULL) { 892 softint_disestablish(sc->sc_sih); 893 sc->sc_sih = NULL; 894 } 895 sc->sc_flags &= ~(TAP_INUSE | TAP_ASYNCIO); 896 897 return (0); 898 } 899 900 static int 901 tap_cdev_read(dev_t dev, struct uio *uio, int flags) 902 { 903 return tap_dev_read(minor(dev), uio, flags); 904 } 905 906 static int 907 tap_fops_read(file_t *fp, off_t *offp, struct uio *uio, 908 kauth_cred_t cred, int flags) 909 { 910 int error; 911 912 KERNEL_LOCK(1, NULL); 913 error = tap_dev_read(fp->f_devunit, uio, flags); 914 KERNEL_UNLOCK_ONE(NULL); 915 return error; 916 } 917 918 static int 919 tap_dev_read(int unit, struct uio *uio, int flags) 920 { 921 struct tap_softc *sc = device_lookup_private(&tap_cd, unit); 922 struct ifnet *ifp; 923 struct mbuf *m, *n; 924 int error = 0, s; 925 926 if (sc == NULL) 927 return (ENXIO); 928 929 getnanotime(&sc->sc_atime); 930 931 ifp = &sc->sc_ec.ec_if; 932 if ((ifp->if_flags & IFF_UP) == 0) 933 return (EHOSTDOWN); 934 935 /* 936 * In the TAP_NBIO case, we have to make sure we won't be sleeping 937 */ 938 if ((sc->sc_flags & TAP_NBIO) != 0) { 939 if (!mutex_tryenter(&sc->sc_rdlock)) 940 return (EWOULDBLOCK); 941 } else { 942 mutex_enter(&sc->sc_rdlock); 943 } 944 945 s = splnet(); 946 if (IFQ_IS_EMPTY(&ifp->if_snd)) { 947 ifp->if_flags &= ~IFF_OACTIVE; 948 /* 949 * We must release the lock before sleeping, and re-acquire it 950 * after. 951 */ 952 mutex_exit(&sc->sc_rdlock); 953 if (sc->sc_flags & TAP_NBIO) 954 error = EWOULDBLOCK; 955 else 956 error = tsleep(sc, PSOCK|PCATCH, "tap", 0); 957 splx(s); 958 959 if (error != 0) 960 return (error); 961 /* The device might have been downed */ 962 if ((ifp->if_flags & IFF_UP) == 0) 963 return (EHOSTDOWN); 964 if ((sc->sc_flags & TAP_NBIO)) { 965 if (!mutex_tryenter(&sc->sc_rdlock)) 966 return (EWOULDBLOCK); 967 } else { 968 mutex_enter(&sc->sc_rdlock); 969 } 970 s = splnet(); 971 } 972 973 IFQ_DEQUEUE(&ifp->if_snd, m); 974 ifp->if_flags &= ~IFF_OACTIVE; 975 splx(s); 976 if (m == NULL) { 977 error = 0; 978 goto out; 979 } 980 981 ifp->if_opackets++; 982 bpf_mtap(ifp, m); 983 984 /* 985 * One read is one packet. 986 */ 987 do { 988 error = uiomove(mtod(m, void *), 989 min(m->m_len, uio->uio_resid), uio); 990 MFREE(m, n); 991 m = n; 992 } while (m != NULL && uio->uio_resid > 0 && error == 0); 993 994 if (m != NULL) 995 m_freem(m); 996 997 out: 998 mutex_exit(&sc->sc_rdlock); 999 return (error); 1000 } 1001 1002 static int 1003 tap_fops_stat(file_t *fp, struct stat *st) 1004 { 1005 int error = 0; 1006 struct tap_softc *sc; 1007 int unit = fp->f_devunit; 1008 1009 (void)memset(st, 0, sizeof(*st)); 1010 1011 KERNEL_LOCK(1, NULL); 1012 sc = device_lookup_private(&tap_cd, unit); 1013 if (sc == NULL) { 1014 error = ENXIO; 1015 goto out; 1016 } 1017 1018 st->st_dev = makedev(cdevsw_lookup_major(&tap_cdevsw), unit); 1019 st->st_atimespec = sc->sc_atime; 1020 st->st_mtimespec = sc->sc_mtime; 1021 st->st_ctimespec = st->st_birthtimespec = sc->sc_btime; 1022 st->st_uid = kauth_cred_geteuid(fp->f_cred); 1023 st->st_gid = kauth_cred_getegid(fp->f_cred); 1024 out: 1025 KERNEL_UNLOCK_ONE(NULL); 1026 return error; 1027 } 1028 1029 static int 1030 tap_cdev_write(dev_t dev, struct uio *uio, int flags) 1031 { 1032 return tap_dev_write(minor(dev), uio, flags); 1033 } 1034 1035 static int 1036 tap_fops_write(file_t *fp, off_t *offp, struct uio *uio, 1037 kauth_cred_t cred, int flags) 1038 { 1039 int error; 1040 1041 KERNEL_LOCK(1, NULL); 1042 error = tap_dev_write(fp->f_devunit, uio, flags); 1043 KERNEL_UNLOCK_ONE(NULL); 1044 return error; 1045 } 1046 1047 static int 1048 tap_dev_write(int unit, struct uio *uio, int flags) 1049 { 1050 struct tap_softc *sc = 1051 device_lookup_private(&tap_cd, unit); 1052 struct ifnet *ifp; 1053 struct mbuf *m, **mp; 1054 int error = 0; 1055 int s; 1056 1057 if (sc == NULL) 1058 return (ENXIO); 1059 1060 getnanotime(&sc->sc_mtime); 1061 ifp = &sc->sc_ec.ec_if; 1062 1063 /* One write, one packet, that's the rule */ 1064 MGETHDR(m, M_DONTWAIT, MT_DATA); 1065 if (m == NULL) { 1066 ifp->if_ierrors++; 1067 return (ENOBUFS); 1068 } 1069 m->m_pkthdr.len = uio->uio_resid; 1070 1071 mp = &m; 1072 while (error == 0 && uio->uio_resid > 0) { 1073 if (*mp != m) { 1074 MGET(*mp, M_DONTWAIT, MT_DATA); 1075 if (*mp == NULL) { 1076 error = ENOBUFS; 1077 break; 1078 } 1079 } 1080 (*mp)->m_len = min(MHLEN, uio->uio_resid); 1081 error = uiomove(mtod(*mp, void *), (*mp)->m_len, uio); 1082 mp = &(*mp)->m_next; 1083 } 1084 if (error) { 1085 ifp->if_ierrors++; 1086 m_freem(m); 1087 return (error); 1088 } 1089 1090 ifp->if_ipackets++; 1091 m_set_rcvif(m, ifp); 1092 1093 bpf_mtap(ifp, m); 1094 s = splnet(); 1095 if_input(ifp, m); 1096 splx(s); 1097 1098 return (0); 1099 } 1100 1101 static int 1102 tap_cdev_ioctl(dev_t dev, u_long cmd, void *data, int flags, 1103 struct lwp *l) 1104 { 1105 return tap_dev_ioctl(minor(dev), cmd, data, l); 1106 } 1107 1108 static int 1109 tap_fops_ioctl(file_t *fp, u_long cmd, void *data) 1110 { 1111 return tap_dev_ioctl(fp->f_devunit, cmd, data, curlwp); 1112 } 1113 1114 static int 1115 tap_dev_ioctl(int unit, u_long cmd, void *data, struct lwp *l) 1116 { 1117 struct tap_softc *sc = device_lookup_private(&tap_cd, unit); 1118 1119 if (sc == NULL) 1120 return ENXIO; 1121 1122 switch (cmd) { 1123 case FIONREAD: 1124 { 1125 struct ifnet *ifp = &sc->sc_ec.ec_if; 1126 struct mbuf *m; 1127 int s; 1128 1129 s = splnet(); 1130 IFQ_POLL(&ifp->if_snd, m); 1131 1132 if (m == NULL) 1133 *(int *)data = 0; 1134 else 1135 *(int *)data = m->m_pkthdr.len; 1136 splx(s); 1137 return 0; 1138 } 1139 case TIOCSPGRP: 1140 case FIOSETOWN: 1141 return fsetown(&sc->sc_pgid, cmd, data); 1142 case TIOCGPGRP: 1143 case FIOGETOWN: 1144 return fgetown(sc->sc_pgid, cmd, data); 1145 case FIOASYNC: 1146 if (*(int *)data) { 1147 if (sc->sc_sih == NULL) { 1148 sc->sc_sih = softint_establish(SOFTINT_CLOCK, 1149 tap_softintr, sc); 1150 if (sc->sc_sih == NULL) 1151 return EBUSY; /* XXX */ 1152 } 1153 sc->sc_flags |= TAP_ASYNCIO; 1154 } else { 1155 sc->sc_flags &= ~TAP_ASYNCIO; 1156 if (sc->sc_sih != NULL) { 1157 softint_disestablish(sc->sc_sih); 1158 sc->sc_sih = NULL; 1159 } 1160 } 1161 return 0; 1162 case FIONBIO: 1163 if (*(int *)data) 1164 sc->sc_flags |= TAP_NBIO; 1165 else 1166 sc->sc_flags &= ~TAP_NBIO; 1167 return 0; 1168 #ifdef OTAPGIFNAME 1169 case OTAPGIFNAME: 1170 #endif 1171 case TAPGIFNAME: 1172 { 1173 struct ifreq *ifr = (struct ifreq *)data; 1174 struct ifnet *ifp = &sc->sc_ec.ec_if; 1175 1176 strlcpy(ifr->ifr_name, ifp->if_xname, IFNAMSIZ); 1177 return 0; 1178 } 1179 default: 1180 return ENOTTY; 1181 } 1182 } 1183 1184 static int 1185 tap_cdev_poll(dev_t dev, int events, struct lwp *l) 1186 { 1187 return tap_dev_poll(minor(dev), events, l); 1188 } 1189 1190 static int 1191 tap_fops_poll(file_t *fp, int events) 1192 { 1193 return tap_dev_poll(fp->f_devunit, events, curlwp); 1194 } 1195 1196 static int 1197 tap_dev_poll(int unit, int events, struct lwp *l) 1198 { 1199 struct tap_softc *sc = 1200 device_lookup_private(&tap_cd, unit); 1201 int revents = 0; 1202 1203 if (sc == NULL) 1204 return POLLERR; 1205 1206 if (events & (POLLIN|POLLRDNORM)) { 1207 struct ifnet *ifp = &sc->sc_ec.ec_if; 1208 struct mbuf *m; 1209 int s; 1210 1211 s = splnet(); 1212 IFQ_POLL(&ifp->if_snd, m); 1213 1214 if (m != NULL) 1215 revents |= events & (POLLIN|POLLRDNORM); 1216 else { 1217 mutex_spin_enter(&sc->sc_kqlock); 1218 selrecord(l, &sc->sc_rsel); 1219 mutex_spin_exit(&sc->sc_kqlock); 1220 } 1221 splx(s); 1222 } 1223 revents |= events & (POLLOUT|POLLWRNORM); 1224 1225 return (revents); 1226 } 1227 1228 static struct filterops tap_read_filterops = { 1, NULL, tap_kqdetach, 1229 tap_kqread }; 1230 static struct filterops tap_seltrue_filterops = { 1, NULL, tap_kqdetach, 1231 filt_seltrue }; 1232 1233 static int 1234 tap_cdev_kqfilter(dev_t dev, struct knote *kn) 1235 { 1236 return tap_dev_kqfilter(minor(dev), kn); 1237 } 1238 1239 static int 1240 tap_fops_kqfilter(file_t *fp, struct knote *kn) 1241 { 1242 return tap_dev_kqfilter(fp->f_devunit, kn); 1243 } 1244 1245 static int 1246 tap_dev_kqfilter(int unit, struct knote *kn) 1247 { 1248 struct tap_softc *sc = 1249 device_lookup_private(&tap_cd, unit); 1250 1251 if (sc == NULL) 1252 return (ENXIO); 1253 1254 KERNEL_LOCK(1, NULL); 1255 switch(kn->kn_filter) { 1256 case EVFILT_READ: 1257 kn->kn_fop = &tap_read_filterops; 1258 break; 1259 case EVFILT_WRITE: 1260 kn->kn_fop = &tap_seltrue_filterops; 1261 break; 1262 default: 1263 KERNEL_UNLOCK_ONE(NULL); 1264 return (EINVAL); 1265 } 1266 1267 kn->kn_hook = sc; 1268 mutex_spin_enter(&sc->sc_kqlock); 1269 SLIST_INSERT_HEAD(&sc->sc_rsel.sel_klist, kn, kn_selnext); 1270 mutex_spin_exit(&sc->sc_kqlock); 1271 KERNEL_UNLOCK_ONE(NULL); 1272 return (0); 1273 } 1274 1275 static void 1276 tap_kqdetach(struct knote *kn) 1277 { 1278 struct tap_softc *sc = (struct tap_softc *)kn->kn_hook; 1279 1280 KERNEL_LOCK(1, NULL); 1281 mutex_spin_enter(&sc->sc_kqlock); 1282 SLIST_REMOVE(&sc->sc_rsel.sel_klist, kn, knote, kn_selnext); 1283 mutex_spin_exit(&sc->sc_kqlock); 1284 KERNEL_UNLOCK_ONE(NULL); 1285 } 1286 1287 static int 1288 tap_kqread(struct knote *kn, long hint) 1289 { 1290 struct tap_softc *sc = (struct tap_softc *)kn->kn_hook; 1291 struct ifnet *ifp = &sc->sc_ec.ec_if; 1292 struct mbuf *m; 1293 int s, rv; 1294 1295 KERNEL_LOCK(1, NULL); 1296 s = splnet(); 1297 IFQ_POLL(&ifp->if_snd, m); 1298 1299 if (m == NULL) 1300 kn->kn_data = 0; 1301 else 1302 kn->kn_data = m->m_pkthdr.len; 1303 splx(s); 1304 rv = (kn->kn_data != 0 ? 1 : 0); 1305 KERNEL_UNLOCK_ONE(NULL); 1306 return rv; 1307 } 1308 1309 /* 1310 * sysctl management routines 1311 * You can set the address of an interface through: 1312 * net.link.tap.tap<number> 1313 * 1314 * Note the consistent use of tap_log in order to use 1315 * sysctl_teardown at unload time. 1316 * 1317 * In the kernel you will find a lot of SYSCTL_SETUP blocks. Those 1318 * blocks register a function in a special section of the kernel 1319 * (called a link set) which is used at init_sysctl() time to cycle 1320 * through all those functions to create the kernel's sysctl tree. 1321 * 1322 * It is not possible to use link sets in a module, so the 1323 * easiest is to simply call our own setup routine at load time. 1324 * 1325 * In the SYSCTL_SETUP blocks you find in the kernel, nodes have the 1326 * CTLFLAG_PERMANENT flag, meaning they cannot be removed. Once the 1327 * whole kernel sysctl tree is built, it is not possible to add any 1328 * permanent node. 1329 * 1330 * It should be noted that we're not saving the sysctlnode pointer 1331 * we are returned when creating the "tap" node. That structure 1332 * cannot be trusted once out of the calling function, as it might 1333 * get reused. So we just save the MIB number, and always give the 1334 * full path starting from the root for later calls to sysctl_createv 1335 * and sysctl_destroyv. 1336 */ 1337 static void 1338 sysctl_tap_setup(struct sysctllog **clog) 1339 { 1340 const struct sysctlnode *node; 1341 int error = 0; 1342 1343 if ((error = sysctl_createv(clog, 0, NULL, NULL, 1344 CTLFLAG_PERMANENT, 1345 CTLTYPE_NODE, "link", NULL, 1346 NULL, 0, NULL, 0, 1347 CTL_NET, AF_LINK, CTL_EOL)) != 0) 1348 return; 1349 1350 /* 1351 * The first four parameters of sysctl_createv are for management. 1352 * 1353 * The four that follows, here starting with a '0' for the flags, 1354 * describe the node. 1355 * 1356 * The next series of four set its value, through various possible 1357 * means. 1358 * 1359 * Last but not least, the path to the node is described. That path 1360 * is relative to the given root (third argument). Here we're 1361 * starting from the root. 1362 */ 1363 if ((error = sysctl_createv(clog, 0, NULL, &node, 1364 CTLFLAG_PERMANENT, 1365 CTLTYPE_NODE, "tap", NULL, 1366 NULL, 0, NULL, 0, 1367 CTL_NET, AF_LINK, CTL_CREATE, CTL_EOL)) != 0) 1368 return; 1369 tap_node = node->sysctl_num; 1370 } 1371 1372 /* 1373 * The helper functions make Andrew Brown's interface really 1374 * shine. It makes possible to create value on the fly whether 1375 * the sysctl value is read or written. 1376 * 1377 * As shown as an example in the man page, the first step is to 1378 * create a copy of the node to have sysctl_lookup work on it. 1379 * 1380 * Here, we have more work to do than just a copy, since we have 1381 * to create the string. The first step is to collect the actual 1382 * value of the node, which is a convenient pointer to the softc 1383 * of the interface. From there we create the string and use it 1384 * as the value, but only for the *copy* of the node. 1385 * 1386 * Then we let sysctl_lookup do the magic, which consists in 1387 * setting oldp and newp as required by the operation. When the 1388 * value is read, that means that the string will be copied to 1389 * the user, and when it is written, the new value will be copied 1390 * over in the addr array. 1391 * 1392 * If newp is NULL, the user was reading the value, so we don't 1393 * have anything else to do. If a new value was written, we 1394 * have to check it. 1395 * 1396 * If it is incorrect, we can return an error and leave 'node' as 1397 * it is: since it is a copy of the actual node, the change will 1398 * be forgotten. 1399 * 1400 * Upon a correct input, we commit the change to the ifnet 1401 * structure of our interface. 1402 */ 1403 static int 1404 tap_sysctl_handler(SYSCTLFN_ARGS) 1405 { 1406 struct sysctlnode node; 1407 struct tap_softc *sc; 1408 struct ifnet *ifp; 1409 int error; 1410 size_t len; 1411 char addr[3 * ETHER_ADDR_LEN]; 1412 uint8_t enaddr[ETHER_ADDR_LEN]; 1413 1414 node = *rnode; 1415 sc = node.sysctl_data; 1416 ifp = &sc->sc_ec.ec_if; 1417 (void)ether_snprintf(addr, sizeof(addr), CLLADDR(ifp->if_sadl)); 1418 node.sysctl_data = addr; 1419 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1420 if (error || newp == NULL) 1421 return (error); 1422 1423 len = strlen(addr); 1424 if (len < 11 || len > 17) 1425 return (EINVAL); 1426 1427 /* Commit change */ 1428 if (ether_aton_r(enaddr, sizeof(enaddr), addr) != 0) 1429 return (EINVAL); 1430 if_set_sadl(ifp, enaddr, ETHER_ADDR_LEN, false); 1431 return (error); 1432 } 1433 1434 /* 1435 * Module infrastructure 1436 */ 1437 #include "if_module.h" 1438 1439 IF_MODULE(MODULE_CLASS_DRIVER, tap, "") 1440