1 /* $NetBSD: if_tap.c,v 1.113 2019/05/29 10:07:30 msaitoh Exp $ */ 2 3 /* 4 * Copyright (c) 2003, 2004, 2008, 2009 The NetBSD Foundation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * tap(4) is a virtual Ethernet interface. It appears as a real Ethernet 31 * device to the system, but can also be accessed by userland through a 32 * character device interface, which allows reading and injecting frames. 33 */ 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: if_tap.c,v 1.113 2019/05/29 10:07:30 msaitoh Exp $"); 37 38 #if defined(_KERNEL_OPT) 39 40 #include "opt_modular.h" 41 #endif 42 43 #include <sys/param.h> 44 #include <sys/atomic.h> 45 #include <sys/conf.h> 46 #include <sys/cprng.h> 47 #include <sys/device.h> 48 #include <sys/file.h> 49 #include <sys/filedesc.h> 50 #include <sys/intr.h> 51 #include <sys/kauth.h> 52 #include <sys/kernel.h> 53 #include <sys/kmem.h> 54 #include <sys/module.h> 55 #include <sys/mutex.h> 56 #include <sys/condvar.h> 57 #include <sys/poll.h> 58 #include <sys/proc.h> 59 #include <sys/select.h> 60 #include <sys/sockio.h> 61 #include <sys/stat.h> 62 #include <sys/sysctl.h> 63 #include <sys/systm.h> 64 65 #include <net/if.h> 66 #include <net/if_dl.h> 67 #include <net/if_ether.h> 68 #include <net/if_media.h> 69 #include <net/if_tap.h> 70 #include <net/bpf.h> 71 72 #include "ioconf.h" 73 74 /* 75 * sysctl node management 76 * 77 * It's not really possible to use a SYSCTL_SETUP block with 78 * current module implementation, so it is easier to just define 79 * our own function. 80 * 81 * The handler function is a "helper" in Andrew Brown's sysctl 82 * framework terminology. It is used as a gateway for sysctl 83 * requests over the nodes. 84 * 85 * tap_log allows the module to log creations of nodes and 86 * destroy them all at once using sysctl_teardown. 87 */ 88 static int tap_node; 89 static int tap_sysctl_handler(SYSCTLFN_PROTO); 90 static void sysctl_tap_setup(struct sysctllog **); 91 92 /* 93 * Since we're an Ethernet device, we need the 2 following 94 * components: a struct ethercom and a struct ifmedia 95 * since we don't attach a PHY to ourselves. 96 * We could emulate one, but there's no real point. 97 */ 98 99 struct tap_softc { 100 device_t sc_dev; 101 struct ifmedia sc_im; 102 struct ethercom sc_ec; 103 int sc_flags; 104 #define TAP_INUSE 0x00000001 /* tap device can only be opened once */ 105 #define TAP_ASYNCIO 0x00000002 /* user is using async I/O (SIGIO) on the device */ 106 #define TAP_NBIO 0x00000004 /* user wants calls to avoid blocking */ 107 #define TAP_GOING 0x00000008 /* interface is being destroyed */ 108 struct selinfo sc_rsel; 109 pid_t sc_pgid; /* For async. IO */ 110 kmutex_t sc_lock; 111 kcondvar_t sc_cv; 112 void *sc_sih; 113 struct timespec sc_atime; 114 struct timespec sc_mtime; 115 struct timespec sc_btime; 116 }; 117 118 /* autoconf(9) glue */ 119 120 static int tap_match(device_t, cfdata_t, void *); 121 static void tap_attach(device_t, device_t, void *); 122 static int tap_detach(device_t, int); 123 124 CFATTACH_DECL_NEW(tap, sizeof(struct tap_softc), 125 tap_match, tap_attach, tap_detach, NULL); 126 extern struct cfdriver tap_cd; 127 128 /* Real device access routines */ 129 static int tap_dev_close(struct tap_softc *); 130 static int tap_dev_read(int, struct uio *, int); 131 static int tap_dev_write(int, struct uio *, int); 132 static int tap_dev_ioctl(int, u_long, void *, struct lwp *); 133 static int tap_dev_poll(int, int, struct lwp *); 134 static int tap_dev_kqfilter(int, struct knote *); 135 136 /* Fileops access routines */ 137 static int tap_fops_close(file_t *); 138 static int tap_fops_read(file_t *, off_t *, struct uio *, 139 kauth_cred_t, int); 140 static int tap_fops_write(file_t *, off_t *, struct uio *, 141 kauth_cred_t, int); 142 static int tap_fops_ioctl(file_t *, u_long, void *); 143 static int tap_fops_poll(file_t *, int); 144 static int tap_fops_stat(file_t *, struct stat *); 145 static int tap_fops_kqfilter(file_t *, struct knote *); 146 147 static const struct fileops tap_fileops = { 148 .fo_name = "tap", 149 .fo_read = tap_fops_read, 150 .fo_write = tap_fops_write, 151 .fo_ioctl = tap_fops_ioctl, 152 .fo_fcntl = fnullop_fcntl, 153 .fo_poll = tap_fops_poll, 154 .fo_stat = tap_fops_stat, 155 .fo_close = tap_fops_close, 156 .fo_kqfilter = tap_fops_kqfilter, 157 .fo_restart = fnullop_restart, 158 }; 159 160 /* Helper for cloning open() */ 161 static int tap_dev_cloner(struct lwp *); 162 163 /* Character device routines */ 164 static int tap_cdev_open(dev_t, int, int, struct lwp *); 165 static int tap_cdev_close(dev_t, int, int, struct lwp *); 166 static int tap_cdev_read(dev_t, struct uio *, int); 167 static int tap_cdev_write(dev_t, struct uio *, int); 168 static int tap_cdev_ioctl(dev_t, u_long, void *, int, struct lwp *); 169 static int tap_cdev_poll(dev_t, int, struct lwp *); 170 static int tap_cdev_kqfilter(dev_t, struct knote *); 171 172 const struct cdevsw tap_cdevsw = { 173 .d_open = tap_cdev_open, 174 .d_close = tap_cdev_close, 175 .d_read = tap_cdev_read, 176 .d_write = tap_cdev_write, 177 .d_ioctl = tap_cdev_ioctl, 178 .d_stop = nostop, 179 .d_tty = notty, 180 .d_poll = tap_cdev_poll, 181 .d_mmap = nommap, 182 .d_kqfilter = tap_cdev_kqfilter, 183 .d_discard = nodiscard, 184 .d_flag = D_OTHER | D_MPSAFE 185 }; 186 187 #define TAP_CLONER 0xfffff /* Maximal minor value */ 188 189 /* kqueue-related routines */ 190 static void tap_kqdetach(struct knote *); 191 static int tap_kqread(struct knote *, long); 192 193 /* 194 * Those are needed by the if_media interface. 195 */ 196 197 static int tap_mediachange(struct ifnet *); 198 static void tap_mediastatus(struct ifnet *, struct ifmediareq *); 199 200 /* 201 * Those are needed by the ifnet interface, and would typically be 202 * there for any network interface driver. 203 * Some other routines are optional: watchdog and drain. 204 */ 205 206 static void tap_start(struct ifnet *); 207 static void tap_stop(struct ifnet *, int); 208 static int tap_init(struct ifnet *); 209 static int tap_ioctl(struct ifnet *, u_long, void *); 210 211 /* Internal functions */ 212 static int tap_lifaddr(struct ifnet *, u_long, struct ifaliasreq *); 213 static void tap_softintr(void *); 214 215 /* 216 * tap is a clonable interface, although it is highly unrealistic for 217 * an Ethernet device. 218 * 219 * Here are the bits needed for a clonable interface. 220 */ 221 static int tap_clone_create(struct if_clone *, int); 222 static int tap_clone_destroy(struct ifnet *); 223 224 struct if_clone tap_cloners = IF_CLONE_INITIALIZER("tap", 225 tap_clone_create, 226 tap_clone_destroy); 227 228 /* Helper functions shared by the two cloning code paths */ 229 static struct tap_softc * tap_clone_creator(int); 230 int tap_clone_destroyer(device_t); 231 232 static struct sysctllog *tap_sysctl_clog; 233 234 #ifdef _MODULE 235 devmajor_t tap_bmajor = -1, tap_cmajor = -1; 236 #endif 237 238 static u_int tap_count; 239 240 void 241 tapattach(int n) 242 { 243 244 /* 245 * Nothing to do here, initialization is handled by the 246 * module initialization code in tapinit() below). 247 */ 248 } 249 250 static void 251 tapinit(void) 252 { 253 int error = config_cfattach_attach(tap_cd.cd_name, &tap_ca); 254 255 if (error) { 256 aprint_error("%s: unable to register cfattach\n", 257 tap_cd.cd_name); 258 (void)config_cfdriver_detach(&tap_cd); 259 return; 260 } 261 262 if_clone_attach(&tap_cloners); 263 sysctl_tap_setup(&tap_sysctl_clog); 264 #ifdef _MODULE 265 devsw_attach("tap", NULL, &tap_bmajor, &tap_cdevsw, &tap_cmajor); 266 #endif 267 } 268 269 static int 270 tapdetach(void) 271 { 272 int error = 0; 273 274 if_clone_detach(&tap_cloners); 275 #ifdef _MODULE 276 error = devsw_detach(NULL, &tap_cdevsw); 277 if (error != 0) 278 goto out2; 279 #endif 280 281 if (tap_count != 0) { 282 error = EBUSY; 283 goto out1; 284 } 285 286 error = config_cfattach_detach(tap_cd.cd_name, &tap_ca); 287 if (error != 0) 288 goto out1; 289 290 sysctl_teardown(&tap_sysctl_clog); 291 292 return 0; 293 294 out1: 295 #ifdef _MODULE 296 devsw_attach("tap", NULL, &tap_bmajor, &tap_cdevsw, &tap_cmajor); 297 out2: 298 #endif 299 if_clone_attach(&tap_cloners); 300 301 return error; 302 } 303 304 /* Pretty much useless for a pseudo-device */ 305 static int 306 tap_match(device_t parent, cfdata_t cfdata, void *arg) 307 { 308 309 return 1; 310 } 311 312 void 313 tap_attach(device_t parent, device_t self, void *aux) 314 { 315 struct tap_softc *sc = device_private(self); 316 struct ifnet *ifp; 317 const struct sysctlnode *node; 318 int error; 319 uint8_t enaddr[ETHER_ADDR_LEN] = 320 { 0xf2, 0x0b, 0xa4, 0xff, 0xff, 0xff }; 321 char enaddrstr[3 * ETHER_ADDR_LEN]; 322 323 sc->sc_dev = self; 324 sc->sc_sih = NULL; 325 getnanotime(&sc->sc_btime); 326 sc->sc_atime = sc->sc_mtime = sc->sc_btime; 327 sc->sc_flags = 0; 328 selinit(&sc->sc_rsel); 329 330 cv_init(&sc->sc_cv, "tapread"); 331 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NET); 332 333 if (!pmf_device_register(self, NULL, NULL)) 334 aprint_error_dev(self, "couldn't establish power handler\n"); 335 336 /* 337 * In order to obtain unique initial Ethernet address on a host, 338 * do some randomisation. It's not meant for anything but avoiding 339 * hard-coding an address. 340 */ 341 cprng_fast(&enaddr[3], 3); 342 343 aprint_verbose_dev(self, "Ethernet address %s\n", 344 ether_snprintf(enaddrstr, sizeof(enaddrstr), enaddr)); 345 346 /* 347 * Why 1000baseT? Why not? You can add more. 348 * 349 * Note that there are 3 steps: init, one or several additions to 350 * list of supported media, and in the end, the selection of one 351 * of them. 352 */ 353 sc->sc_ec.ec_ifmedia = &sc->sc_im; 354 ifmedia_init(&sc->sc_im, 0, tap_mediachange, tap_mediastatus); 355 ifmedia_add(&sc->sc_im, IFM_ETHER | IFM_1000_T, 0, NULL); 356 ifmedia_add(&sc->sc_im, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 357 ifmedia_add(&sc->sc_im, IFM_ETHER | IFM_100_TX, 0, NULL); 358 ifmedia_add(&sc->sc_im, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); 359 ifmedia_add(&sc->sc_im, IFM_ETHER | IFM_10_T, 0, NULL); 360 ifmedia_add(&sc->sc_im, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); 361 ifmedia_add(&sc->sc_im, IFM_ETHER | IFM_AUTO, 0, NULL); 362 ifmedia_set(&sc->sc_im, IFM_ETHER | IFM_AUTO); 363 364 /* 365 * One should note that an interface must do multicast in order 366 * to support IPv6. 367 */ 368 ifp = &sc->sc_ec.ec_if; 369 strcpy(ifp->if_xname, device_xname(self)); 370 ifp->if_softc = sc; 371 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 372 ifp->if_extflags = IFEF_NO_LINK_STATE_CHANGE; 373 #ifdef NET_MPSAFE 374 ifp->if_extflags |= IFEF_MPSAFE; 375 #endif 376 ifp->if_ioctl = tap_ioctl; 377 ifp->if_start = tap_start; 378 ifp->if_stop = tap_stop; 379 ifp->if_init = tap_init; 380 IFQ_SET_READY(&ifp->if_snd); 381 382 sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU; 383 384 /* Those steps are mandatory for an Ethernet driver. */ 385 error = if_initialize(ifp); 386 if (error != 0) { 387 aprint_error_dev(self, "if_initialize failed(%d)\n", error); 388 ifmedia_removeall(&sc->sc_im); 389 pmf_device_deregister(self); 390 mutex_destroy(&sc->sc_lock); 391 seldestroy(&sc->sc_rsel); 392 393 return; /* Error */ 394 } 395 ifp->if_percpuq = if_percpuq_create(ifp); 396 ether_ifattach(ifp, enaddr); 397 if_register(ifp); 398 399 /* 400 * Add a sysctl node for that interface. 401 * 402 * The pointer transmitted is not a string, but instead a pointer to 403 * the softc structure, which we can use to build the string value on 404 * the fly in the helper function of the node. See the comments for 405 * tap_sysctl_handler for details. 406 * 407 * Usually sysctl_createv is called with CTL_CREATE as the before-last 408 * component. However, we can allocate a number ourselves, as we are 409 * the only consumer of the net.link.<iface> node. In this case, the 410 * unit number is conveniently used to number the node. CTL_CREATE 411 * would just work, too. 412 */ 413 if ((error = sysctl_createv(NULL, 0, NULL, 414 &node, CTLFLAG_READWRITE, 415 CTLTYPE_STRING, device_xname(self), NULL, 416 tap_sysctl_handler, 0, (void *)sc, 18, 417 CTL_NET, AF_LINK, tap_node, device_unit(sc->sc_dev), 418 CTL_EOL)) != 0) 419 aprint_error_dev(self, 420 "sysctl_createv returned %d, ignoring\n", error); 421 } 422 423 /* 424 * When detaching, we do the inverse of what is done in the attach 425 * routine, in reversed order. 426 */ 427 static int 428 tap_detach(device_t self, int flags) 429 { 430 struct tap_softc *sc = device_private(self); 431 struct ifnet *ifp = &sc->sc_ec.ec_if; 432 int error; 433 434 sc->sc_flags |= TAP_GOING; 435 tap_stop(ifp, 1); 436 if_down(ifp); 437 438 if (sc->sc_sih != NULL) { 439 softint_disestablish(sc->sc_sih); 440 sc->sc_sih = NULL; 441 } 442 443 /* 444 * Destroying a single leaf is a very straightforward operation using 445 * sysctl_destroyv. One should be sure to always end the path with 446 * CTL_EOL. 447 */ 448 if ((error = sysctl_destroyv(NULL, CTL_NET, AF_LINK, tap_node, 449 device_unit(sc->sc_dev), CTL_EOL)) != 0) 450 aprint_error_dev(self, 451 "sysctl_destroyv returned %d, ignoring\n", error); 452 ether_ifdetach(ifp); 453 if_detach(ifp); 454 ifmedia_removeall(&sc->sc_im); 455 seldestroy(&sc->sc_rsel); 456 mutex_destroy(&sc->sc_lock); 457 cv_destroy(&sc->sc_cv); 458 459 pmf_device_deregister(self); 460 461 return 0; 462 } 463 464 /* 465 * This function is called by the ifmedia layer to notify the driver 466 * that the user requested a media change. A real driver would 467 * reconfigure the hardware. 468 */ 469 static int 470 tap_mediachange(struct ifnet *ifp) 471 { 472 return 0; 473 } 474 475 /* 476 * Here the user asks for the currently used media. 477 */ 478 static void 479 tap_mediastatus(struct ifnet *ifp, struct ifmediareq *imr) 480 { 481 struct tap_softc *sc = (struct tap_softc *)ifp->if_softc; 482 483 imr->ifm_active = sc->sc_im.ifm_cur->ifm_media; 484 } 485 486 /* 487 * This is the function where we SEND packets. 488 * 489 * There is no 'receive' equivalent. A typical driver will get 490 * interrupts from the hardware, and from there will inject new packets 491 * into the network stack. 492 * 493 * Once handled, a packet must be freed. A real driver might not be able 494 * to fit all the pending packets into the hardware, and is allowed to 495 * return before having sent all the packets. It should then use the 496 * if_flags flag IFF_OACTIVE to notify the upper layer. 497 * 498 * There are also other flags one should check, such as IFF_PAUSE. 499 * 500 * It is our duty to make packets available to BPF listeners. 501 * 502 * You should be aware that this function is called by the Ethernet layer 503 * at splnet(). 504 * 505 * When the device is opened, we have to pass the packet(s) to the 506 * userland. For that we stay in OACTIVE mode while the userland gets 507 * the packets, and we send a signal to the processes waiting to read. 508 * 509 * wakeup(sc) is the counterpart to the tsleep call in 510 * tap_dev_read, while selnotify() is used for kevent(2) and 511 * poll(2) (which includes select(2)) listeners. 512 */ 513 static void 514 tap_start(struct ifnet *ifp) 515 { 516 struct tap_softc *sc = (struct tap_softc *)ifp->if_softc; 517 struct mbuf *m0; 518 519 mutex_enter(&sc->sc_lock); 520 if ((sc->sc_flags & TAP_INUSE) == 0) { 521 /* Simply drop packets */ 522 for (;;) { 523 IFQ_DEQUEUE(&ifp->if_snd, m0); 524 if (m0 == NULL) 525 goto done; 526 527 ifp->if_opackets++; 528 bpf_mtap(ifp, m0, BPF_D_OUT); 529 530 m_freem(m0); 531 } 532 } else if (!IFQ_IS_EMPTY(&ifp->if_snd)) { 533 ifp->if_flags |= IFF_OACTIVE; 534 cv_broadcast(&sc->sc_cv); 535 selnotify(&sc->sc_rsel, 0, 1); 536 if (sc->sc_flags & TAP_ASYNCIO) 537 softint_schedule(sc->sc_sih); 538 } 539 done: 540 mutex_exit(&sc->sc_lock); 541 } 542 543 static void 544 tap_softintr(void *cookie) 545 { 546 struct tap_softc *sc; 547 struct ifnet *ifp; 548 int a, b; 549 550 sc = cookie; 551 552 if (sc->sc_flags & TAP_ASYNCIO) { 553 ifp = &sc->sc_ec.ec_if; 554 if (ifp->if_flags & IFF_RUNNING) { 555 a = POLL_IN; 556 b = POLLIN | POLLRDNORM; 557 } else { 558 a = POLL_HUP; 559 b = 0; 560 } 561 fownsignal(sc->sc_pgid, SIGIO, a, b, NULL); 562 } 563 } 564 565 /* 566 * A typical driver will only contain the following handlers for 567 * ioctl calls, except SIOCSIFPHYADDR. 568 * The latter is a hack I used to set the Ethernet address of the 569 * faked device. 570 * 571 * Note that both ifmedia_ioctl() and ether_ioctl() have to be 572 * called under splnet(). 573 */ 574 static int 575 tap_ioctl(struct ifnet *ifp, u_long cmd, void *data) 576 { 577 int s, error; 578 579 s = splnet(); 580 581 switch (cmd) { 582 case SIOCSIFPHYADDR: 583 error = tap_lifaddr(ifp, cmd, (struct ifaliasreq *)data); 584 break; 585 default: 586 error = ether_ioctl(ifp, cmd, data); 587 if (error == ENETRESET) 588 error = 0; 589 break; 590 } 591 592 splx(s); 593 594 return error; 595 } 596 597 /* 598 * Helper function to set Ethernet address. This has been replaced by 599 * the generic SIOCALIFADDR ioctl on a PF_LINK socket. 600 */ 601 static int 602 tap_lifaddr(struct ifnet *ifp, u_long cmd, struct ifaliasreq *ifra) 603 { 604 const struct sockaddr *sa = &ifra->ifra_addr; 605 606 if (sa->sa_family != AF_LINK) 607 return EINVAL; 608 609 if_set_sadl(ifp, sa->sa_data, ETHER_ADDR_LEN, false); 610 611 return 0; 612 } 613 614 /* 615 * _init() would typically be called when an interface goes up, 616 * meaning it should configure itself into the state in which it 617 * can send packets. 618 */ 619 static int 620 tap_init(struct ifnet *ifp) 621 { 622 ifp->if_flags |= IFF_RUNNING; 623 624 tap_start(ifp); 625 626 return 0; 627 } 628 629 /* 630 * _stop() is called when an interface goes down. It is our 631 * responsability to validate that state by clearing the 632 * IFF_RUNNING flag. 633 * 634 * We have to wake up all the sleeping processes to have the pending 635 * read requests cancelled. 636 */ 637 static void 638 tap_stop(struct ifnet *ifp, int disable) 639 { 640 struct tap_softc *sc = (struct tap_softc *)ifp->if_softc; 641 642 mutex_enter(&sc->sc_lock); 643 ifp->if_flags &= ~IFF_RUNNING; 644 cv_broadcast(&sc->sc_cv); 645 selnotify(&sc->sc_rsel, 0, 1); 646 if (sc->sc_flags & TAP_ASYNCIO) 647 softint_schedule(sc->sc_sih); 648 mutex_exit(&sc->sc_lock); 649 } 650 651 /* 652 * The 'create' command of ifconfig can be used to create 653 * any numbered instance of a given device. Thus we have to 654 * make sure we have enough room in cd_devs to create the 655 * user-specified instance. config_attach_pseudo will do this 656 * for us. 657 */ 658 static int 659 tap_clone_create(struct if_clone *ifc, int unit) 660 { 661 662 if (tap_clone_creator(unit) == NULL) { 663 aprint_error("%s%d: unable to attach an instance\n", 664 tap_cd.cd_name, unit); 665 return ENXIO; 666 } 667 atomic_inc_uint(&tap_count); 668 return 0; 669 } 670 671 /* 672 * tap(4) can be cloned by two ways: 673 * using 'ifconfig tap0 create', which will use the network 674 * interface cloning API, and call tap_clone_create above. 675 * opening the cloning device node, whose minor number is TAP_CLONER. 676 * See below for an explanation on how this part work. 677 */ 678 static struct tap_softc * 679 tap_clone_creator(int unit) 680 { 681 cfdata_t cf; 682 683 cf = kmem_alloc(sizeof(*cf), KM_SLEEP); 684 cf->cf_name = tap_cd.cd_name; 685 cf->cf_atname = tap_ca.ca_name; 686 if (unit == -1) { 687 /* let autoconf find the first free one */ 688 cf->cf_unit = 0; 689 cf->cf_fstate = FSTATE_STAR; 690 } else { 691 cf->cf_unit = unit; 692 cf->cf_fstate = FSTATE_NOTFOUND; 693 } 694 695 return device_private(config_attach_pseudo(cf)); 696 } 697 698 /* 699 * The clean design of if_clone and autoconf(9) makes that part 700 * really straightforward. The second argument of config_detach 701 * means neither QUIET nor FORCED. 702 */ 703 static int 704 tap_clone_destroy(struct ifnet *ifp) 705 { 706 struct tap_softc *sc = ifp->if_softc; 707 int error = tap_clone_destroyer(sc->sc_dev); 708 709 if (error == 0) 710 atomic_dec_uint(&tap_count); 711 return error; 712 } 713 714 int 715 tap_clone_destroyer(device_t dev) 716 { 717 cfdata_t cf = device_cfdata(dev); 718 int error; 719 720 if ((error = config_detach(dev, 0)) != 0) 721 aprint_error_dev(dev, "unable to detach instance\n"); 722 kmem_free(cf, sizeof(*cf)); 723 724 return error; 725 } 726 727 /* 728 * tap(4) is a bit of an hybrid device. It can be used in two different 729 * ways: 730 * 1. ifconfig tapN create, then use /dev/tapN to read/write off it. 731 * 2. open /dev/tap, get a new interface created and read/write off it. 732 * That interface is destroyed when the process that had it created exits. 733 * 734 * The first way is managed by the cdevsw structure, and you access interfaces 735 * through a (major, minor) mapping: tap4 is obtained by the minor number 736 * 4. The entry points for the cdevsw interface are prefixed by tap_cdev_. 737 * 738 * The second way is the so-called "cloning" device. It's a special minor 739 * number (chosen as the maximal number, to allow as much tap devices as 740 * possible). The user first opens the cloner (e.g., /dev/tap), and that 741 * call ends in tap_cdev_open. The actual place where it is handled is 742 * tap_dev_cloner. 743 * 744 * An tap device cannot be opened more than once at a time, so the cdevsw 745 * part of open() does nothing but noting that the interface is being used and 746 * hence ready to actually handle packets. 747 */ 748 749 static int 750 tap_cdev_open(dev_t dev, int flags, int fmt, struct lwp *l) 751 { 752 struct tap_softc *sc; 753 754 if (minor(dev) == TAP_CLONER) 755 return tap_dev_cloner(l); 756 757 sc = device_lookup_private(&tap_cd, minor(dev)); 758 if (sc == NULL) 759 return ENXIO; 760 761 /* The device can only be opened once */ 762 if (sc->sc_flags & TAP_INUSE) 763 return EBUSY; 764 sc->sc_flags |= TAP_INUSE; 765 return 0; 766 } 767 768 /* 769 * There are several kinds of cloning devices, and the most simple is the one 770 * tap(4) uses. What it does is change the file descriptor with a new one, 771 * with its own fileops structure (which maps to the various read, write, 772 * ioctl functions). It starts allocating a new file descriptor with falloc, 773 * then actually creates the new tap devices. 774 * 775 * Once those two steps are successful, we can re-wire the existing file 776 * descriptor to its new self. This is done with fdclone(): it fills the fp 777 * structure as needed (notably f_devunit gets filled with the fifth parameter 778 * passed, the unit of the tap device which will allows us identifying the 779 * device later), and returns EMOVEFD. 780 * 781 * That magic value is interpreted by sys_open() which then replaces the 782 * current file descriptor by the new one (through a magic member of struct 783 * lwp, l_dupfd). 784 * 785 * The tap device is flagged as being busy since it otherwise could be 786 * externally accessed through the corresponding device node with the cdevsw 787 * interface. 788 */ 789 790 static int 791 tap_dev_cloner(struct lwp *l) 792 { 793 struct tap_softc *sc; 794 file_t *fp; 795 int error, fd; 796 797 if ((error = fd_allocfile(&fp, &fd)) != 0) 798 return error; 799 800 if ((sc = tap_clone_creator(-1)) == NULL) { 801 fd_abort(curproc, fp, fd); 802 return ENXIO; 803 } 804 805 sc->sc_flags |= TAP_INUSE; 806 807 return fd_clone(fp, fd, FREAD | FWRITE, &tap_fileops, 808 (void *)(intptr_t)device_unit(sc->sc_dev)); 809 } 810 811 /* 812 * While all other operations (read, write, ioctl, poll and kqfilter) are 813 * really the same whether we are in cdevsw or fileops mode, the close() 814 * function is slightly different in the two cases. 815 * 816 * As for the other, the core of it is shared in tap_dev_close. What 817 * it does is sufficient for the cdevsw interface, but the cloning interface 818 * needs another thing: the interface is destroyed when the processes that 819 * created it closes it. 820 */ 821 static int 822 tap_cdev_close(dev_t dev, int flags, int fmt, struct lwp *l) 823 { 824 struct tap_softc *sc = device_lookup_private(&tap_cd, minor(dev)); 825 826 if (sc == NULL) 827 return ENXIO; 828 829 return tap_dev_close(sc); 830 } 831 832 /* 833 * It might happen that the administrator used ifconfig to externally destroy 834 * the interface. In that case, tap_fops_close will be called while 835 * tap_detach is already happening. If we called it again from here, we 836 * would dead lock. TAP_GOING ensures that this situation doesn't happen. 837 */ 838 static int 839 tap_fops_close(file_t *fp) 840 { 841 struct tap_softc *sc; 842 int unit = fp->f_devunit; 843 int error; 844 845 sc = device_lookup_private(&tap_cd, unit); 846 if (sc == NULL) 847 return ENXIO; 848 849 /* tap_dev_close currently always succeeds, but it might not 850 * always be the case. */ 851 KERNEL_LOCK(1, NULL); 852 if ((error = tap_dev_close(sc)) != 0) { 853 KERNEL_UNLOCK_ONE(NULL); 854 return error; 855 } 856 857 /* Destroy the device now that it is no longer useful, 858 * unless it's already being destroyed. */ 859 if ((sc->sc_flags & TAP_GOING) != 0) { 860 KERNEL_UNLOCK_ONE(NULL); 861 return 0; 862 } 863 864 error = tap_clone_destroyer(sc->sc_dev); 865 KERNEL_UNLOCK_ONE(NULL); 866 return error; 867 } 868 869 static int 870 tap_dev_close(struct tap_softc *sc) 871 { 872 struct ifnet *ifp; 873 int s; 874 875 s = splnet(); 876 /* Let tap_start handle packets again */ 877 ifp = &sc->sc_ec.ec_if; 878 ifp->if_flags &= ~IFF_OACTIVE; 879 880 /* Purge output queue */ 881 if (!(IFQ_IS_EMPTY(&ifp->if_snd))) { 882 struct mbuf *m; 883 884 for (;;) { 885 IFQ_DEQUEUE(&ifp->if_snd, m); 886 if (m == NULL) 887 break; 888 889 ifp->if_opackets++; 890 bpf_mtap(ifp, m, BPF_D_OUT); 891 m_freem(m); 892 } 893 } 894 splx(s); 895 896 if (sc->sc_sih != NULL) { 897 softint_disestablish(sc->sc_sih); 898 sc->sc_sih = NULL; 899 } 900 sc->sc_flags &= ~(TAP_INUSE | TAP_ASYNCIO); 901 902 return 0; 903 } 904 905 static int 906 tap_cdev_read(dev_t dev, struct uio *uio, int flags) 907 { 908 909 return tap_dev_read(minor(dev), uio, flags); 910 } 911 912 static int 913 tap_fops_read(file_t *fp, off_t *offp, struct uio *uio, 914 kauth_cred_t cred, int flags) 915 { 916 int error; 917 918 KERNEL_LOCK(1, NULL); 919 error = tap_dev_read(fp->f_devunit, uio, flags); 920 KERNEL_UNLOCK_ONE(NULL); 921 return error; 922 } 923 924 static int 925 tap_dev_read(int unit, struct uio *uio, int flags) 926 { 927 struct tap_softc *sc = device_lookup_private(&tap_cd, unit); 928 struct ifnet *ifp; 929 struct mbuf *m, *n; 930 int error = 0; 931 932 if (sc == NULL) 933 return ENXIO; 934 935 getnanotime(&sc->sc_atime); 936 937 ifp = &sc->sc_ec.ec_if; 938 if ((ifp->if_flags & IFF_UP) == 0) 939 return EHOSTDOWN; 940 941 /* In the TAP_NBIO case, we have to make sure we won't be sleeping */ 942 if ((sc->sc_flags & TAP_NBIO) != 0) { 943 if (!mutex_tryenter(&sc->sc_lock)) 944 return EWOULDBLOCK; 945 } else 946 mutex_enter(&sc->sc_lock); 947 948 if (IFQ_IS_EMPTY(&ifp->if_snd)) { 949 ifp->if_flags &= ~IFF_OACTIVE; 950 if (sc->sc_flags & TAP_NBIO) 951 error = EWOULDBLOCK; 952 else 953 error = cv_wait_sig(&sc->sc_cv, &sc->sc_lock); 954 955 if (error != 0) { 956 mutex_exit(&sc->sc_lock); 957 return error; 958 } 959 /* The device might have been downed */ 960 if ((ifp->if_flags & IFF_UP) == 0) { 961 mutex_exit(&sc->sc_lock); 962 return EHOSTDOWN; 963 } 964 } 965 966 IFQ_DEQUEUE(&ifp->if_snd, m); 967 mutex_exit(&sc->sc_lock); 968 969 ifp->if_flags &= ~IFF_OACTIVE; 970 if (m == NULL) { 971 error = 0; 972 goto out; 973 } 974 975 ifp->if_opackets++; 976 bpf_mtap(ifp, m, BPF_D_OUT); 977 978 /* 979 * One read is one packet. 980 */ 981 do { 982 error = uiomove(mtod(m, void *), 983 uimin(m->m_len, uio->uio_resid), uio); 984 m = n = m_free(m); 985 } while (m != NULL && uio->uio_resid > 0 && error == 0); 986 987 if (m != NULL) 988 m_freem(m); 989 990 out: 991 return error; 992 } 993 994 static int 995 tap_fops_stat(file_t *fp, struct stat *st) 996 { 997 int error = 0; 998 struct tap_softc *sc; 999 int unit = fp->f_devunit; 1000 1001 (void)memset(st, 0, sizeof(*st)); 1002 1003 KERNEL_LOCK(1, NULL); 1004 sc = device_lookup_private(&tap_cd, unit); 1005 if (sc == NULL) { 1006 error = ENXIO; 1007 goto out; 1008 } 1009 1010 st->st_dev = makedev(cdevsw_lookup_major(&tap_cdevsw), unit); 1011 st->st_atimespec = sc->sc_atime; 1012 st->st_mtimespec = sc->sc_mtime; 1013 st->st_ctimespec = st->st_birthtimespec = sc->sc_btime; 1014 st->st_uid = kauth_cred_geteuid(fp->f_cred); 1015 st->st_gid = kauth_cred_getegid(fp->f_cred); 1016 out: 1017 KERNEL_UNLOCK_ONE(NULL); 1018 return error; 1019 } 1020 1021 static int 1022 tap_cdev_write(dev_t dev, struct uio *uio, int flags) 1023 { 1024 1025 return tap_dev_write(minor(dev), uio, flags); 1026 } 1027 1028 static int 1029 tap_fops_write(file_t *fp, off_t *offp, struct uio *uio, 1030 kauth_cred_t cred, int flags) 1031 { 1032 int error; 1033 1034 KERNEL_LOCK(1, NULL); 1035 error = tap_dev_write(fp->f_devunit, uio, flags); 1036 KERNEL_UNLOCK_ONE(NULL); 1037 return error; 1038 } 1039 1040 static int 1041 tap_dev_write(int unit, struct uio *uio, int flags) 1042 { 1043 struct tap_softc *sc = 1044 device_lookup_private(&tap_cd, unit); 1045 struct ifnet *ifp; 1046 struct mbuf *m, **mp; 1047 int error = 0; 1048 1049 if (sc == NULL) 1050 return ENXIO; 1051 1052 getnanotime(&sc->sc_mtime); 1053 ifp = &sc->sc_ec.ec_if; 1054 1055 /* One write, one packet, that's the rule */ 1056 MGETHDR(m, M_DONTWAIT, MT_DATA); 1057 if (m == NULL) { 1058 ifp->if_ierrors++; 1059 return ENOBUFS; 1060 } 1061 m->m_pkthdr.len = uio->uio_resid; 1062 1063 mp = &m; 1064 while (error == 0 && uio->uio_resid > 0) { 1065 if (*mp != m) { 1066 MGET(*mp, M_DONTWAIT, MT_DATA); 1067 if (*mp == NULL) { 1068 error = ENOBUFS; 1069 break; 1070 } 1071 } 1072 (*mp)->m_len = uimin(MHLEN, uio->uio_resid); 1073 error = uiomove(mtod(*mp, void *), (*mp)->m_len, uio); 1074 mp = &(*mp)->m_next; 1075 } 1076 if (error) { 1077 ifp->if_ierrors++; 1078 m_freem(m); 1079 return error; 1080 } 1081 1082 m_set_rcvif(m, ifp); 1083 1084 if_percpuq_enqueue(ifp->if_percpuq, m); 1085 1086 return 0; 1087 } 1088 1089 static int 1090 tap_cdev_ioctl(dev_t dev, u_long cmd, void *data, int flags, struct lwp *l) 1091 { 1092 1093 return tap_dev_ioctl(minor(dev), cmd, data, l); 1094 } 1095 1096 static int 1097 tap_fops_ioctl(file_t *fp, u_long cmd, void *data) 1098 { 1099 1100 return tap_dev_ioctl(fp->f_devunit, cmd, data, curlwp); 1101 } 1102 1103 static int 1104 tap_dev_ioctl(int unit, u_long cmd, void *data, struct lwp *l) 1105 { 1106 struct tap_softc *sc = device_lookup_private(&tap_cd, unit); 1107 1108 if (sc == NULL) 1109 return ENXIO; 1110 1111 switch (cmd) { 1112 case FIONREAD: 1113 { 1114 struct ifnet *ifp = &sc->sc_ec.ec_if; 1115 struct mbuf *m; 1116 int s; 1117 1118 s = splnet(); 1119 IFQ_POLL(&ifp->if_snd, m); 1120 1121 if (m == NULL) 1122 *(int *)data = 0; 1123 else 1124 *(int *)data = m->m_pkthdr.len; 1125 splx(s); 1126 return 0; 1127 } 1128 case TIOCSPGRP: 1129 case FIOSETOWN: 1130 return fsetown(&sc->sc_pgid, cmd, data); 1131 case TIOCGPGRP: 1132 case FIOGETOWN: 1133 return fgetown(sc->sc_pgid, cmd, data); 1134 case FIOASYNC: 1135 if (*(int *)data) { 1136 if (sc->sc_sih == NULL) { 1137 sc->sc_sih = softint_establish(SOFTINT_CLOCK, 1138 tap_softintr, sc); 1139 if (sc->sc_sih == NULL) 1140 return EBUSY; /* XXX */ 1141 } 1142 sc->sc_flags |= TAP_ASYNCIO; 1143 } else { 1144 sc->sc_flags &= ~TAP_ASYNCIO; 1145 if (sc->sc_sih != NULL) { 1146 softint_disestablish(sc->sc_sih); 1147 sc->sc_sih = NULL; 1148 } 1149 } 1150 return 0; 1151 case FIONBIO: 1152 if (*(int *)data) 1153 sc->sc_flags |= TAP_NBIO; 1154 else 1155 sc->sc_flags &= ~TAP_NBIO; 1156 return 0; 1157 case TAPGIFNAME: 1158 { 1159 struct ifreq *ifr = (struct ifreq *)data; 1160 struct ifnet *ifp = &sc->sc_ec.ec_if; 1161 1162 strlcpy(ifr->ifr_name, ifp->if_xname, IFNAMSIZ); 1163 return 0; 1164 } 1165 default: 1166 return ENOTTY; 1167 } 1168 } 1169 1170 static int 1171 tap_cdev_poll(dev_t dev, int events, struct lwp *l) 1172 { 1173 1174 return tap_dev_poll(minor(dev), events, l); 1175 } 1176 1177 static int 1178 tap_fops_poll(file_t *fp, int events) 1179 { 1180 1181 return tap_dev_poll(fp->f_devunit, events, curlwp); 1182 } 1183 1184 static int 1185 tap_dev_poll(int unit, int events, struct lwp *l) 1186 { 1187 struct tap_softc *sc = device_lookup_private(&tap_cd, unit); 1188 int revents = 0; 1189 1190 if (sc == NULL) 1191 return POLLERR; 1192 1193 if (events & (POLLIN | POLLRDNORM)) { 1194 struct ifnet *ifp = &sc->sc_ec.ec_if; 1195 struct mbuf *m; 1196 int s; 1197 1198 s = splnet(); 1199 IFQ_POLL(&ifp->if_snd, m); 1200 1201 if (m != NULL) 1202 revents |= events & (POLLIN | POLLRDNORM); 1203 else { 1204 mutex_spin_enter(&sc->sc_lock); 1205 selrecord(l, &sc->sc_rsel); 1206 mutex_spin_exit(&sc->sc_lock); 1207 } 1208 splx(s); 1209 } 1210 revents |= events & (POLLOUT | POLLWRNORM); 1211 1212 return revents; 1213 } 1214 1215 static struct filterops tap_read_filterops = { 1, NULL, tap_kqdetach, 1216 tap_kqread }; 1217 static struct filterops tap_seltrue_filterops = { 1, NULL, tap_kqdetach, 1218 filt_seltrue }; 1219 1220 static int 1221 tap_cdev_kqfilter(dev_t dev, struct knote *kn) 1222 { 1223 1224 return tap_dev_kqfilter(minor(dev), kn); 1225 } 1226 1227 static int 1228 tap_fops_kqfilter(file_t *fp, struct knote *kn) 1229 { 1230 1231 return tap_dev_kqfilter(fp->f_devunit, kn); 1232 } 1233 1234 static int 1235 tap_dev_kqfilter(int unit, struct knote *kn) 1236 { 1237 struct tap_softc *sc = device_lookup_private(&tap_cd, unit); 1238 1239 if (sc == NULL) 1240 return ENXIO; 1241 1242 KERNEL_LOCK(1, NULL); 1243 switch(kn->kn_filter) { 1244 case EVFILT_READ: 1245 kn->kn_fop = &tap_read_filterops; 1246 break; 1247 case EVFILT_WRITE: 1248 kn->kn_fop = &tap_seltrue_filterops; 1249 break; 1250 default: 1251 KERNEL_UNLOCK_ONE(NULL); 1252 return EINVAL; 1253 } 1254 1255 kn->kn_hook = sc; 1256 mutex_spin_enter(&sc->sc_lock); 1257 SLIST_INSERT_HEAD(&sc->sc_rsel.sel_klist, kn, kn_selnext); 1258 mutex_spin_exit(&sc->sc_lock); 1259 KERNEL_UNLOCK_ONE(NULL); 1260 return 0; 1261 } 1262 1263 static void 1264 tap_kqdetach(struct knote *kn) 1265 { 1266 struct tap_softc *sc = (struct tap_softc *)kn->kn_hook; 1267 1268 KERNEL_LOCK(1, NULL); 1269 mutex_spin_enter(&sc->sc_lock); 1270 SLIST_REMOVE(&sc->sc_rsel.sel_klist, kn, knote, kn_selnext); 1271 mutex_spin_exit(&sc->sc_lock); 1272 KERNEL_UNLOCK_ONE(NULL); 1273 } 1274 1275 static int 1276 tap_kqread(struct knote *kn, long hint) 1277 { 1278 struct tap_softc *sc = (struct tap_softc *)kn->kn_hook; 1279 struct ifnet *ifp = &sc->sc_ec.ec_if; 1280 struct mbuf *m; 1281 int s, rv; 1282 1283 KERNEL_LOCK(1, NULL); 1284 s = splnet(); 1285 IFQ_POLL(&ifp->if_snd, m); 1286 1287 if (m == NULL) 1288 kn->kn_data = 0; 1289 else 1290 kn->kn_data = m->m_pkthdr.len; 1291 splx(s); 1292 rv = (kn->kn_data != 0 ? 1 : 0); 1293 KERNEL_UNLOCK_ONE(NULL); 1294 return rv; 1295 } 1296 1297 /* 1298 * sysctl management routines 1299 * You can set the address of an interface through: 1300 * net.link.tap.tap<number> 1301 * 1302 * Note the consistent use of tap_log in order to use 1303 * sysctl_teardown at unload time. 1304 * 1305 * In the kernel you will find a lot of SYSCTL_SETUP blocks. Those 1306 * blocks register a function in a special section of the kernel 1307 * (called a link set) which is used at init_sysctl() time to cycle 1308 * through all those functions to create the kernel's sysctl tree. 1309 * 1310 * It is not possible to use link sets in a module, so the 1311 * easiest is to simply call our own setup routine at load time. 1312 * 1313 * In the SYSCTL_SETUP blocks you find in the kernel, nodes have the 1314 * CTLFLAG_PERMANENT flag, meaning they cannot be removed. Once the 1315 * whole kernel sysctl tree is built, it is not possible to add any 1316 * permanent node. 1317 * 1318 * It should be noted that we're not saving the sysctlnode pointer 1319 * we are returned when creating the "tap" node. That structure 1320 * cannot be trusted once out of the calling function, as it might 1321 * get reused. So we just save the MIB number, and always give the 1322 * full path starting from the root for later calls to sysctl_createv 1323 * and sysctl_destroyv. 1324 */ 1325 static void 1326 sysctl_tap_setup(struct sysctllog **clog) 1327 { 1328 const struct sysctlnode *node; 1329 int error = 0; 1330 1331 if ((error = sysctl_createv(clog, 0, NULL, NULL, 1332 CTLFLAG_PERMANENT, 1333 CTLTYPE_NODE, "link", NULL, 1334 NULL, 0, NULL, 0, 1335 CTL_NET, AF_LINK, CTL_EOL)) != 0) 1336 return; 1337 1338 /* 1339 * The first four parameters of sysctl_createv are for management. 1340 * 1341 * The four that follows, here starting with a '0' for the flags, 1342 * describe the node. 1343 * 1344 * The next series of four set its value, through various possible 1345 * means. 1346 * 1347 * Last but not least, the path to the node is described. That path 1348 * is relative to the given root (third argument). Here we're 1349 * starting from the root. 1350 */ 1351 if ((error = sysctl_createv(clog, 0, NULL, &node, 1352 CTLFLAG_PERMANENT, 1353 CTLTYPE_NODE, "tap", NULL, 1354 NULL, 0, NULL, 0, 1355 CTL_NET, AF_LINK, CTL_CREATE, CTL_EOL)) != 0) 1356 return; 1357 tap_node = node->sysctl_num; 1358 } 1359 1360 /* 1361 * The helper functions make Andrew Brown's interface really 1362 * shine. It makes possible to create value on the fly whether 1363 * the sysctl value is read or written. 1364 * 1365 * As shown as an example in the man page, the first step is to 1366 * create a copy of the node to have sysctl_lookup work on it. 1367 * 1368 * Here, we have more work to do than just a copy, since we have 1369 * to create the string. The first step is to collect the actual 1370 * value of the node, which is a convenient pointer to the softc 1371 * of the interface. From there we create the string and use it 1372 * as the value, but only for the *copy* of the node. 1373 * 1374 * Then we let sysctl_lookup do the magic, which consists in 1375 * setting oldp and newp as required by the operation. When the 1376 * value is read, that means that the string will be copied to 1377 * the user, and when it is written, the new value will be copied 1378 * over in the addr array. 1379 * 1380 * If newp is NULL, the user was reading the value, so we don't 1381 * have anything else to do. If a new value was written, we 1382 * have to check it. 1383 * 1384 * If it is incorrect, we can return an error and leave 'node' as 1385 * it is: since it is a copy of the actual node, the change will 1386 * be forgotten. 1387 * 1388 * Upon a correct input, we commit the change to the ifnet 1389 * structure of our interface. 1390 */ 1391 static int 1392 tap_sysctl_handler(SYSCTLFN_ARGS) 1393 { 1394 struct sysctlnode node; 1395 struct tap_softc *sc; 1396 struct ifnet *ifp; 1397 int error; 1398 size_t len; 1399 char addr[3 * ETHER_ADDR_LEN]; 1400 uint8_t enaddr[ETHER_ADDR_LEN]; 1401 1402 node = *rnode; 1403 sc = node.sysctl_data; 1404 ifp = &sc->sc_ec.ec_if; 1405 (void)ether_snprintf(addr, sizeof(addr), CLLADDR(ifp->if_sadl)); 1406 node.sysctl_data = addr; 1407 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1408 if (error || newp == NULL) 1409 return error; 1410 1411 len = strlen(addr); 1412 if (len < 11 || len > 17) 1413 return EINVAL; 1414 1415 /* Commit change */ 1416 if (ether_aton_r(enaddr, sizeof(enaddr), addr) != 0) 1417 return EINVAL; 1418 if_set_sadl(ifp, enaddr, ETHER_ADDR_LEN, false); 1419 return error; 1420 } 1421 1422 /* 1423 * Module infrastructure 1424 */ 1425 #include "if_module.h" 1426 1427 IF_MODULE(MODULE_CLASS_DRIVER, tap, NULL) 1428