1 /* $NetBSD: if_tun.c,v 1.177 2024/09/18 23:20:20 rin Exp $ */ 2 3 /* 4 * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk> 5 * Nottingham University 1987. 6 * 7 * This source may be freely distributed, however I would be interested 8 * in any changes that are made. 9 * 10 * This driver takes packets off the IP i/f and hands them up to a 11 * user process to have its wicked way with. This driver has its 12 * roots in a similar driver written by Phil Cockcroft (formerly) at 13 * UCL. This driver is based much more on read/write/poll mode of 14 * operation though. 15 */ 16 17 /* 18 * tun - tunnel software network interface. 19 */ 20 21 #include <sys/cdefs.h> 22 __KERNEL_RCSID(0, "$NetBSD: if_tun.c,v 1.177 2024/09/18 23:20:20 rin Exp $"); 23 24 #ifdef _KERNEL_OPT 25 #include "opt_inet.h" 26 #endif 27 28 #include <sys/param.h> 29 30 #include <sys/buf.h> 31 #include <sys/conf.h> 32 #include <sys/cpu.h> 33 #include <sys/device.h> 34 #include <sys/file.h> 35 #include <sys/ioctl.h> 36 #include <sys/kauth.h> 37 #include <sys/kmem.h> 38 #include <sys/lwp.h> 39 #include <sys/mbuf.h> 40 #include <sys/module.h> 41 #include <sys/mutex.h> 42 #include <sys/poll.h> 43 #include <sys/select.h> 44 #include <sys/signalvar.h> 45 #include <sys/socket.h> 46 47 #include <net/bpf.h> 48 #include <net/if.h> 49 #include <net/if_types.h> 50 #include <net/route.h> 51 52 #ifdef INET 53 #include <netinet/in.h> 54 #include <netinet/in_systm.h> 55 #include <netinet/in_var.h> 56 #include <netinet/ip.h> 57 #include <netinet/if_inarp.h> 58 #endif 59 60 #include <net/if_tun.h> 61 62 #include "ioconf.h" 63 64 #define TUNDEBUG if (tundebug) printf 65 int tundebug = 0; 66 67 extern int ifqmaxlen; 68 69 static LIST_HEAD(, tun_softc) tun_softc_list; 70 static LIST_HEAD(, tun_softc) tunz_softc_list; 71 static kmutex_t tun_softc_lock; 72 73 static int tun_ioctl(struct ifnet *, u_long, void *); 74 static int tun_output(struct ifnet *, struct mbuf *, 75 const struct sockaddr *, const struct rtentry *rt); 76 static int tun_clone_create(struct if_clone *, int); 77 static int tun_clone_destroy(struct ifnet *); 78 79 static struct if_clone tun_cloner = 80 IF_CLONE_INITIALIZER("tun", tun_clone_create, tun_clone_destroy); 81 82 static void tunattach0(struct tun_softc *); 83 static void tun_enable(struct tun_softc *, const struct ifaddr *); 84 static void tun_i_softintr(void *); 85 static void tun_o_softintr(void *); 86 #ifdef ALTQ 87 static void tunstart(struct ifnet *); 88 #endif 89 static struct tun_softc *tun_find_unit(dev_t); 90 static struct tun_softc *tun_find_zunit(int); 91 92 static dev_type_open(tunopen); 93 static dev_type_close(tunclose); 94 static dev_type_read(tunread); 95 static dev_type_write(tunwrite); 96 static dev_type_ioctl(tunioctl); 97 static dev_type_poll(tunpoll); 98 static dev_type_kqfilter(tunkqfilter); 99 100 const struct cdevsw tun_cdevsw = { 101 .d_open = tunopen, 102 .d_close = tunclose, 103 .d_read = tunread, 104 .d_write = tunwrite, 105 .d_ioctl = tunioctl, 106 .d_stop = nostop, 107 .d_tty = notty, 108 .d_poll = tunpoll, 109 .d_mmap = nommap, 110 .d_kqfilter = tunkqfilter, 111 .d_discard = nodiscard, 112 .d_flag = D_OTHER | D_MPSAFE 113 }; 114 115 #ifdef _MODULE 116 devmajor_t tun_bmajor = -1, tun_cmajor = -1; 117 #endif 118 119 void 120 tunattach(int unused) 121 { 122 123 /* 124 * Nothing to do here, initialization is handled by the 125 * module initialization code in tuninit() below). 126 */ 127 } 128 129 static void 130 tuninit(void) 131 { 132 133 mutex_init(&tun_softc_lock, MUTEX_DEFAULT, IPL_NONE); 134 LIST_INIT(&tun_softc_list); 135 LIST_INIT(&tunz_softc_list); 136 if_clone_attach(&tun_cloner); 137 #ifdef _MODULE 138 devsw_attach("tun", NULL, &tun_bmajor, &tun_cdevsw, &tun_cmajor); 139 #endif 140 } 141 142 static int 143 tundetach(void) 144 { 145 146 if_clone_detach(&tun_cloner); 147 #ifdef _MODULE 148 devsw_detach(NULL, &tun_cdevsw); 149 #endif 150 151 if (!LIST_EMPTY(&tun_softc_list) || !LIST_EMPTY(&tunz_softc_list)) { 152 #ifdef _MODULE 153 devsw_attach("tun", NULL, &tun_bmajor, &tun_cdevsw, &tun_cmajor); 154 #endif 155 if_clone_attach(&tun_cloner); 156 return EBUSY; 157 } 158 159 mutex_destroy(&tun_softc_lock); 160 161 return 0; 162 } 163 164 /* 165 * Find driver instance from dev_t. 166 * Returns with tp locked (if found). 167 */ 168 static struct tun_softc * 169 tun_find_unit(dev_t dev) 170 { 171 struct tun_softc *tp; 172 int unit = minor(dev); 173 174 mutex_enter(&tun_softc_lock); 175 LIST_FOREACH(tp, &tun_softc_list, tun_list) 176 if (unit == tp->tun_unit) 177 break; 178 if (tp) 179 mutex_enter(&tp->tun_lock); 180 mutex_exit(&tun_softc_lock); 181 182 return tp; 183 } 184 185 /* 186 * Find zombie driver instance by unit number. 187 * Remove tp from list and return it unlocked (if found). 188 */ 189 static struct tun_softc * 190 tun_find_zunit(int unit) 191 { 192 struct tun_softc *tp; 193 194 mutex_enter(&tun_softc_lock); 195 LIST_FOREACH(tp, &tunz_softc_list, tun_list) 196 if (unit == tp->tun_unit) 197 break; 198 if (tp) 199 LIST_REMOVE(tp, tun_list); 200 mutex_exit(&tun_softc_lock); 201 KASSERTMSG(!tp || (tp->tun_flags & (TUN_INITED|TUN_OPEN)) == TUN_OPEN, 202 "tun%d: inconsistent flags: %x", unit, tp->tun_flags); 203 204 return tp; 205 } 206 207 static void 208 tun_init(struct tun_softc *tp, int unit) 209 { 210 211 tp->tun_unit = unit; 212 mutex_init(&tp->tun_lock, MUTEX_DEFAULT, IPL_SOFTNET); 213 cv_init(&tp->tun_cv, "tunread"); 214 selinit(&tp->tun_rsel); 215 selinit(&tp->tun_wsel); 216 217 tp->tun_osih = softint_establish(SOFTINT_CLOCK, tun_o_softintr, tp); 218 tp->tun_isih = softint_establish(SOFTINT_CLOCK, tun_i_softintr, tp); 219 } 220 221 static void 222 tun_fini(struct tun_softc *tp) 223 { 224 225 softint_disestablish(tp->tun_isih); 226 softint_disestablish(tp->tun_osih); 227 228 seldestroy(&tp->tun_wsel); 229 seldestroy(&tp->tun_rsel); 230 mutex_destroy(&tp->tun_lock); 231 cv_destroy(&tp->tun_cv); 232 } 233 234 static struct tun_softc * 235 tun_alloc(int unit) 236 { 237 struct tun_softc *tp; 238 239 tp = kmem_zalloc(sizeof(*tp), KM_SLEEP); 240 tun_init(tp, unit); 241 242 return tp; 243 } 244 245 static void 246 tun_recycle(struct tun_softc *tp) 247 { 248 249 memset(&tp->tun_if, 0, sizeof(struct ifnet)); /* XXX ??? */ 250 } 251 252 static void 253 tun_free(struct tun_softc *tp) 254 { 255 256 tun_fini(tp); 257 kmem_free(tp, sizeof(*tp)); 258 } 259 260 static int 261 tun_clone_create(struct if_clone *ifc, int unit) 262 { 263 struct tun_softc *tp; 264 265 if ((tp = tun_find_zunit(unit)) == NULL) { 266 tp = tun_alloc(unit); 267 } else { 268 tun_recycle(tp); 269 } 270 271 if_initname(&tp->tun_if, ifc->ifc_name, unit); 272 tunattach0(tp); 273 tp->tun_flags |= TUN_INITED; 274 275 mutex_enter(&tun_softc_lock); 276 LIST_INSERT_HEAD(&tun_softc_list, tp, tun_list); 277 mutex_exit(&tun_softc_lock); 278 279 return 0; 280 } 281 282 static void 283 tunattach0(struct tun_softc *tp) 284 { 285 struct ifnet *ifp; 286 287 ifp = &tp->tun_if; 288 ifp->if_softc = tp; 289 ifp->if_mtu = TUNMTU; 290 ifp->if_ioctl = tun_ioctl; 291 ifp->if_output = tun_output; 292 #ifdef ALTQ 293 ifp->if_start = tunstart; 294 #endif 295 ifp->if_flags = IFF_POINTOPOINT; 296 ifp->if_type = IFT_TUNNEL; 297 ifp->if_snd.ifq_maxlen = ifqmaxlen; 298 ifp->if_dlt = DLT_NULL; 299 IFQ_SET_READY(&ifp->if_snd); 300 if_attach(ifp); 301 ifp->if_link_state = LINK_STATE_DOWN; 302 if_alloc_sadl(ifp); 303 bpf_attach(ifp, DLT_NULL, sizeof(uint32_t)); 304 } 305 306 static int 307 tun_clone_destroy(struct ifnet *ifp) 308 { 309 struct tun_softc *tp = (void *)ifp; 310 bool zombie = false; 311 312 IF_PURGE(&ifp->if_snd); 313 ifp->if_flags &= ~IFF_RUNNING; 314 315 mutex_enter(&tun_softc_lock); 316 mutex_enter(&tp->tun_lock); 317 LIST_REMOVE(tp, tun_list); 318 if (tp->tun_flags & TUN_OPEN) { 319 /* Hang on to storage until last close. */ 320 tp->tun_flags &= ~TUN_INITED; 321 LIST_INSERT_HEAD(&tunz_softc_list, tp, tun_list); 322 zombie = true; 323 } 324 mutex_exit(&tun_softc_lock); 325 326 cv_broadcast(&tp->tun_cv); 327 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid) 328 fownsignal(tp->tun_pgid, SIGIO, POLL_HUP, 0, NULL); 329 selnotify(&tp->tun_rsel, 0, NOTE_SUBMIT); 330 mutex_exit(&tp->tun_lock); 331 332 bpf_detach(ifp); 333 if_detach(ifp); 334 335 if (!zombie) { 336 tun_free(tp); 337 } 338 339 return 0; 340 } 341 342 /* 343 * tunnel open - must be superuser & the device must be 344 * configured in 345 */ 346 static int 347 tunopen(dev_t dev, int flag, int mode, struct lwp *l) 348 { 349 struct ifnet *ifp; 350 struct tun_softc *tp; 351 int error; 352 353 error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_INTERFACE_TUN, 354 KAUTH_REQ_NETWORK_INTERFACE_TUN_ADD, NULL, NULL, NULL); 355 if (error) 356 return error; 357 358 tp = tun_find_unit(dev); 359 360 if (tp == NULL) { 361 (void)tun_clone_create(&tun_cloner, minor(dev)); 362 tp = tun_find_unit(dev); 363 if (tp == NULL) { 364 return ENXIO; 365 } 366 } 367 368 if (tp->tun_flags & TUN_OPEN) { 369 mutex_exit(&tp->tun_lock); 370 return EBUSY; 371 } 372 373 ifp = &tp->tun_if; 374 tp->tun_flags |= TUN_OPEN; 375 TUNDEBUG("%s: open\n", ifp->if_xname); 376 if_link_state_change(ifp, LINK_STATE_UP); 377 378 mutex_exit(&tp->tun_lock); 379 380 return error; 381 } 382 383 /* 384 * tunclose - close the device - mark i/f down & delete 385 * routing info 386 */ 387 int 388 tunclose(dev_t dev, int flag, int mode, 389 struct lwp *l) 390 { 391 struct tun_softc *tp; 392 struct ifnet *ifp; 393 394 if ((tp = tun_find_zunit(minor(dev))) != NULL) { 395 /* interface was "destroyed" before the close */ 396 tun_free(tp); 397 return 0; 398 } 399 400 if ((tp = tun_find_unit(dev)) == NULL) 401 goto out_nolock; 402 403 ifp = &tp->tun_if; 404 405 tp->tun_flags &= ~TUN_OPEN; 406 407 tp->tun_pgid = 0; 408 selnotify(&tp->tun_rsel, 0, NOTE_SUBMIT); 409 410 TUNDEBUG ("%s: closed\n", ifp->if_xname); 411 mutex_exit(&tp->tun_lock); 412 413 /* 414 * junk all pending output 415 */ 416 IFQ_PURGE(&ifp->if_snd); 417 418 if (ifp->if_flags & IFF_UP) { 419 if_down(ifp); 420 if (ifp->if_flags & IFF_RUNNING) { 421 /* find internet addresses and delete routes */ 422 struct ifaddr *ifa; 423 IFADDR_READER_FOREACH(ifa, ifp) { 424 #if defined(INET) || defined(INET6) 425 if (ifa->ifa_addr->sa_family == AF_INET || 426 ifa->ifa_addr->sa_family == AF_INET6) { 427 rtinit(ifa, (int)RTM_DELETE, 428 tp->tun_flags & TUN_DSTADDR 429 ? RTF_HOST 430 : 0); 431 } 432 #endif 433 } 434 } 435 } 436 437 if_link_state_change(ifp, LINK_STATE_DOWN); 438 439 out_nolock: 440 return 0; 441 } 442 443 static void 444 tun_enable(struct tun_softc *tp, const struct ifaddr *ifa) 445 { 446 struct ifnet *ifp = &tp->tun_if; 447 448 TUNDEBUG("%s: %s\n", __func__, ifp->if_xname); 449 450 mutex_enter(&tp->tun_lock); 451 tp->tun_flags &= ~(TUN_IASET|TUN_DSTADDR); 452 453 switch (ifa->ifa_addr->sa_family) { 454 #ifdef INET 455 case AF_INET: { 456 struct sockaddr_in *sin; 457 458 sin = satosin(ifa->ifa_addr); 459 if (sin && sin->sin_addr.s_addr) 460 tp->tun_flags |= TUN_IASET; 461 462 if (ifp->if_flags & IFF_POINTOPOINT) { 463 sin = satosin(ifa->ifa_dstaddr); 464 if (sin && sin->sin_addr.s_addr) 465 tp->tun_flags |= TUN_DSTADDR; 466 } 467 break; 468 } 469 #endif 470 #ifdef INET6 471 case AF_INET6: { 472 struct sockaddr_in6 *sin; 473 474 sin = satosin6(ifa->ifa_addr); 475 if (!IN6_IS_ADDR_UNSPECIFIED(&sin->sin6_addr)) 476 tp->tun_flags |= TUN_IASET; 477 478 if (ifp->if_flags & IFF_POINTOPOINT) { 479 sin = satosin6(ifa->ifa_dstaddr); 480 if (sin && !IN6_IS_ADDR_UNSPECIFIED(&sin->sin6_addr)) 481 tp->tun_flags |= TUN_DSTADDR; 482 } else 483 tp->tun_flags &= ~TUN_DSTADDR; 484 break; 485 } 486 #endif /* INET6 */ 487 default: 488 break; 489 } 490 ifp->if_flags |= IFF_UP | IFF_RUNNING; 491 mutex_exit(&tp->tun_lock); 492 } 493 494 /* 495 * Process an ioctl request. 496 */ 497 static int 498 tun_ioctl(struct ifnet *ifp, u_long cmd, void *data) 499 { 500 struct tun_softc *tp = (struct tun_softc *)(ifp->if_softc); 501 struct ifreq *ifr = (struct ifreq *)data; 502 struct ifaddr *ifa = (struct ifaddr *)data; 503 int error = 0; 504 505 switch (cmd) { 506 case SIOCINITIFADDR: 507 tun_enable(tp, ifa); 508 ifa->ifa_rtrequest = p2p_rtrequest; 509 TUNDEBUG("%s: address set\n", ifp->if_xname); 510 break; 511 case SIOCSIFBRDADDR: 512 TUNDEBUG("%s: broadcast address set\n", ifp->if_xname); 513 break; 514 case SIOCSIFMTU: 515 if (ifr->ifr_mtu > TUNMTU || ifr->ifr_mtu < 576) { 516 error = EINVAL; 517 break; 518 } 519 TUNDEBUG("%s: interface mtu set\n", ifp->if_xname); 520 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET) 521 error = 0; 522 break; 523 case SIOCADDMULTI: 524 case SIOCDELMULTI: 525 if (ifr == NULL) { 526 error = EAFNOSUPPORT; /* XXX */ 527 break; 528 } 529 switch (ifreq_getaddr(cmd, ifr)->sa_family) { 530 #ifdef INET 531 case AF_INET: 532 break; 533 #endif 534 #ifdef INET6 535 case AF_INET6: 536 break; 537 #endif 538 default: 539 error = EAFNOSUPPORT; 540 break; 541 } 542 break; 543 default: 544 error = ifioctl_common(ifp, cmd, data); 545 } 546 547 return error; 548 } 549 550 /* 551 * tun_output - queue packets from higher level ready to put out. 552 */ 553 static int 554 tun_output(struct ifnet *ifp, struct mbuf *m0, const struct sockaddr *dst, 555 const struct rtentry *rt) 556 { 557 struct tun_softc *tp = ifp->if_softc; 558 int error; 559 #if defined(INET) || defined(INET6) 560 int mlen; 561 uint32_t *af; 562 #endif 563 564 mutex_enter(&tp->tun_lock); 565 TUNDEBUG ("%s: tun_output\n", ifp->if_xname); 566 567 if ((tp->tun_flags & TUN_READY) != TUN_READY) { 568 TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname, 569 tp->tun_flags); 570 error = EHOSTDOWN; 571 mutex_exit(&tp->tun_lock); 572 goto out; 573 } 574 // XXXrmind 575 mutex_exit(&tp->tun_lock); 576 577 /* 578 * if the queueing discipline needs packet classification, 579 * do it before prepending link headers. 580 */ 581 IFQ_CLASSIFY(&ifp->if_snd, m0, dst->sa_family); 582 583 bpf_mtap_af(ifp, dst->sa_family, m0, BPF_D_OUT); 584 585 if ((error = pfil_run_hooks(ifp->if_pfil, &m0, ifp, PFIL_OUT)) != 0) 586 goto out; 587 if (m0 == NULL) 588 goto out; 589 590 switch(dst->sa_family) { 591 #ifdef INET6 592 case AF_INET6: 593 #endif 594 #ifdef INET 595 case AF_INET: 596 #endif 597 #if defined(INET) || defined(INET6) 598 if (tp->tun_flags & TUN_PREPADDR) { 599 /* Simple link-layer header */ 600 M_PREPEND(m0, dst->sa_len, M_DONTWAIT); 601 if (m0 == NULL) { 602 IF_DROP(&ifp->if_snd); 603 error = ENOBUFS; 604 goto out; 605 } 606 memcpy(mtod(m0, char *), dst, dst->sa_len); 607 } else if (tp->tun_flags & TUN_IFHEAD) { 608 /* Prepend the address family */ 609 M_PREPEND(m0, sizeof(*af), M_DONTWAIT); 610 if (m0 == NULL) { 611 IF_DROP(&ifp->if_snd); 612 error = ENOBUFS; 613 goto out; 614 } 615 af = mtod(m0,uint32_t *); 616 *af = htonl(dst->sa_family); 617 } else { 618 #ifdef INET 619 if (dst->sa_family != AF_INET) 620 #endif 621 { 622 error = EAFNOSUPPORT; 623 goto out; 624 } 625 } 626 /* FALLTHROUGH */ 627 case AF_UNSPEC: 628 mlen = m0->m_pkthdr.len; 629 IFQ_ENQUEUE(&ifp->if_snd, m0, error); 630 if (error) { 631 if_statinc(ifp, if_collisions); 632 error = EAFNOSUPPORT; 633 m0 = NULL; 634 goto out; 635 } 636 if_statadd2(ifp, if_opackets, 1, if_obytes, mlen); 637 break; 638 #endif 639 default: 640 error = EAFNOSUPPORT; 641 goto out; 642 } 643 644 mutex_enter(&tp->tun_lock); 645 cv_broadcast(&tp->tun_cv); 646 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid) 647 softint_schedule(tp->tun_isih); 648 selnotify(&tp->tun_rsel, 0, NOTE_SUBMIT); 649 mutex_exit(&tp->tun_lock); 650 out: 651 if (error && m0) 652 m_freem(m0); 653 654 return error; 655 } 656 657 static void 658 tun_i_softintr(void *cookie) 659 { 660 struct tun_softc *tp = cookie; 661 662 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid) 663 fownsignal(tp->tun_pgid, SIGIO, POLL_IN, POLLIN|POLLRDNORM, 664 NULL); 665 } 666 667 static void 668 tun_o_softintr(void *cookie) 669 { 670 struct tun_softc *tp = cookie; 671 672 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid) 673 fownsignal(tp->tun_pgid, SIGIO, POLL_OUT, POLLOUT|POLLWRNORM, 674 NULL); 675 } 676 677 /* 678 * the cdevsw interface is now pretty minimal. 679 */ 680 int 681 tunioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l) 682 { 683 struct tun_softc *tp; 684 int error = 0; 685 686 tp = tun_find_unit(dev); 687 688 /* interface was "destroyed" already */ 689 if (tp == NULL) { 690 return ENXIO; 691 } 692 693 switch (cmd) { 694 case TUNSDEBUG: 695 tundebug = *(int *)data; 696 break; 697 698 case TUNGDEBUG: 699 *(int *)data = tundebug; 700 break; 701 702 case TUNSIFMODE: 703 switch (*(int *)data & (IFF_POINTOPOINT|IFF_BROADCAST)) { 704 case IFF_POINTOPOINT: 705 case IFF_BROADCAST: 706 if (tp->tun_if.if_flags & IFF_UP) { 707 error = EBUSY; 708 goto out; 709 } 710 tp->tun_if.if_flags &= 711 ~(IFF_BROADCAST|IFF_POINTOPOINT|IFF_MULTICAST); 712 tp->tun_if.if_flags |= *(int *)data; 713 break; 714 default: 715 error = EINVAL; 716 goto out; 717 } 718 break; 719 720 case TUNSLMODE: 721 if (*(int *)data) { 722 tp->tun_flags |= TUN_PREPADDR; 723 tp->tun_flags &= ~TUN_IFHEAD; 724 } else 725 tp->tun_flags &= ~TUN_PREPADDR; 726 break; 727 728 case TUNSIFHEAD: 729 if (*(int *)data) { 730 tp->tun_flags |= TUN_IFHEAD; 731 tp->tun_flags &= ~TUN_PREPADDR; 732 } else 733 tp->tun_flags &= ~TUN_IFHEAD; 734 break; 735 736 case TUNGIFHEAD: 737 *(int *)data = (tp->tun_flags & TUN_IFHEAD); 738 break; 739 740 case FIONBIO: 741 if (*(int *)data) 742 tp->tun_flags |= TUN_NBIO; 743 else 744 tp->tun_flags &= ~TUN_NBIO; 745 break; 746 747 case FIOASYNC: 748 if (*(int *)data) 749 tp->tun_flags |= TUN_ASYNC; 750 else 751 tp->tun_flags &= ~TUN_ASYNC; 752 break; 753 754 case FIONREAD: 755 if (tp->tun_if.if_snd.ifq_head) 756 *(int *)data = tp->tun_if.if_snd.ifq_head->m_pkthdr.len; 757 else 758 *(int *)data = 0; 759 break; 760 761 case TIOCSPGRP: 762 case FIOSETOWN: 763 error = fsetown(&tp->tun_pgid, cmd, data); 764 break; 765 766 case TIOCGPGRP: 767 case FIOGETOWN: 768 error = fgetown(tp->tun_pgid, cmd, data); 769 break; 770 771 default: 772 error = ENOTTY; 773 } 774 775 out: 776 mutex_exit(&tp->tun_lock); 777 778 return error; 779 } 780 781 /* 782 * The cdevsw read interface - reads a packet at a time, or at 783 * least as much of a packet as can be read. 784 */ 785 int 786 tunread(dev_t dev, struct uio *uio, int ioflag) 787 { 788 struct tun_softc *tp; 789 struct ifnet *ifp; 790 struct mbuf *m, *m0; 791 int error = 0, len; 792 793 tp = tun_find_unit(dev); 794 795 /* interface was "destroyed" already */ 796 if (tp == NULL) { 797 return ENXIO; 798 } 799 800 ifp = &tp->tun_if; 801 802 TUNDEBUG ("%s: read\n", ifp->if_xname); 803 if ((tp->tun_flags & TUN_READY) != TUN_READY) { 804 TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname, tp->tun_flags); 805 error = EHOSTDOWN; 806 goto out; 807 } 808 809 do { 810 IFQ_DEQUEUE(&ifp->if_snd, m0); 811 if (m0 == 0) { 812 if (tp->tun_flags & TUN_NBIO) { 813 error = EWOULDBLOCK; 814 goto out; 815 } 816 if (cv_wait_sig(&tp->tun_cv, &tp->tun_lock)) { 817 error = EINTR; 818 goto out; 819 } 820 } 821 } while (m0 == 0); 822 823 mutex_exit(&tp->tun_lock); 824 825 /* Copy the mbuf chain */ 826 while (m0 && uio->uio_resid > 0 && error == 0) { 827 len = uimin(uio->uio_resid, m0->m_len); 828 if (len != 0) 829 error = uiomove(mtod(m0, void *), len, uio); 830 m0 = m = m_free(m0); 831 } 832 833 if (m0) { 834 TUNDEBUG("Dropping mbuf\n"); 835 m_freem(m0); 836 } 837 if (error) 838 if_statinc(ifp, if_ierrors); 839 840 return error; 841 842 out: 843 mutex_exit(&tp->tun_lock); 844 845 return error; 846 } 847 848 /* 849 * the cdevsw write interface - an atomic write is a packet - or else! 850 */ 851 int 852 tunwrite(dev_t dev, struct uio *uio, int ioflag) 853 { 854 struct tun_softc *tp; 855 struct ifnet *ifp; 856 struct mbuf *top, **mp, *m; 857 pktqueue_t *pktq; 858 struct sockaddr dst; 859 int error = 0, tlen, mlen; 860 uint32_t family; 861 862 tp = tun_find_unit(dev); 863 if (tp == NULL) { 864 /* Interface was "destroyed" already. */ 865 return ENXIO; 866 } 867 868 /* Unlock until we've got the data */ 869 mutex_exit(&tp->tun_lock); 870 871 ifp = &tp->tun_if; 872 873 TUNDEBUG("%s: tunwrite\n", ifp->if_xname); 874 875 if (tp->tun_flags & TUN_PREPADDR) { 876 if (uio->uio_resid < sizeof(dst)) { 877 error = EIO; 878 goto out0; 879 } 880 error = uiomove((void *)&dst, sizeof(dst), uio); 881 if (error) 882 goto out0; 883 if (dst.sa_len > sizeof(dst)) { 884 /* Duh.. */ 885 int n = dst.sa_len - sizeof(dst); 886 while (n--) { 887 char discard; 888 error = uiomove(&discard, 1, uio); 889 if (error) { 890 goto out0; 891 } 892 } 893 } 894 } else if (tp->tun_flags & TUN_IFHEAD) { 895 if (uio->uio_resid < sizeof(family)){ 896 error = EIO; 897 goto out0; 898 } 899 error = uiomove((void *)&family, sizeof(family), uio); 900 if (error) 901 goto out0; 902 dst.sa_family = ntohl(family); 903 } else { 904 #ifdef INET 905 dst.sa_family = AF_INET; 906 #endif 907 } 908 909 if (uio->uio_resid == 0 || uio->uio_resid > TUNMTU) { 910 TUNDEBUG("%s: len=%lu!\n", ifp->if_xname, 911 (unsigned long)uio->uio_resid); 912 error = EIO; 913 goto out0; 914 } 915 916 switch (dst.sa_family) { 917 #ifdef INET 918 case AF_INET: 919 pktq = ip_pktq; 920 break; 921 #endif 922 #ifdef INET6 923 case AF_INET6: 924 pktq = ip6_pktq; 925 break; 926 #endif 927 default: 928 error = EAFNOSUPPORT; 929 goto out0; 930 } 931 932 tlen = uio->uio_resid; 933 934 /* get a header mbuf */ 935 MGETHDR(m, M_DONTWAIT, MT_DATA); 936 if (m == NULL) { 937 error = ENOBUFS; 938 goto out0; 939 } 940 mlen = MHLEN; 941 942 top = NULL; 943 mp = ⊤ 944 while (error == 0 && uio->uio_resid > 0) { 945 m->m_len = uimin(mlen, uio->uio_resid); 946 error = uiomove(mtod(m, void *), m->m_len, uio); 947 *mp = m; 948 mp = &m->m_next; 949 if (error == 0 && uio->uio_resid > 0) { 950 MGET(m, M_DONTWAIT, MT_DATA); 951 if (m == NULL) { 952 error = ENOBUFS; 953 break; 954 } 955 mlen = MLEN; 956 } 957 } 958 if (error) { 959 m_freem(top); 960 if_statinc(ifp, if_ierrors); 961 goto out0; 962 } 963 964 top->m_pkthdr.len = tlen; 965 m_set_rcvif(top, ifp); 966 967 bpf_mtap_af(ifp, dst.sa_family, top, BPF_D_IN); 968 969 if ((error = pfil_run_hooks(ifp->if_pfil, &top, ifp, PFIL_IN)) != 0) 970 goto out0; 971 if (top == NULL) 972 goto out0; 973 974 mutex_enter(&tp->tun_lock); 975 if ((tp->tun_flags & TUN_INITED) == 0) { 976 /* Interface was destroyed */ 977 error = ENXIO; 978 goto out; 979 } 980 kpreempt_disable(); 981 if (__predict_false(!pktq_enqueue(pktq, top, 0))) { 982 kpreempt_enable(); 983 if_statinc(ifp, if_collisions); 984 mutex_exit(&tp->tun_lock); 985 error = ENOBUFS; 986 m_freem(top); 987 goto out0; 988 } 989 kpreempt_enable(); 990 if_statadd2(ifp, if_ipackets, 1, if_ibytes, tlen); 991 out: 992 mutex_exit(&tp->tun_lock); 993 out0: 994 return error; 995 } 996 997 #ifdef ALTQ 998 /* 999 * Start packet transmission on the interface. 1000 * when the interface queue is rate-limited by ALTQ or TBR, 1001 * if_start is needed to drain packets from the queue in order 1002 * to notify readers when outgoing packets become ready. 1003 */ 1004 static void 1005 tunstart(struct ifnet *ifp) 1006 { 1007 struct tun_softc *tp = ifp->if_softc; 1008 1009 if (!ALTQ_IS_ENABLED(&ifp->if_snd) && !TBR_IS_ENABLED(&ifp->if_snd)) 1010 return; 1011 1012 mutex_enter(&tp->tun_lock); 1013 if (!IF_IS_EMPTY(&ifp->if_snd)) { 1014 cv_broadcast(&tp->tun_cv); 1015 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid) 1016 softint_schedule(tp->tun_osih); 1017 1018 selnotify(&tp->tun_rsel, 0, NOTE_SUBMIT); 1019 } 1020 mutex_exit(&tp->tun_lock); 1021 } 1022 #endif /* ALTQ */ 1023 /* 1024 * tunpoll - the poll interface, this is only useful on reads 1025 * really. The write detect always returns true, write never blocks 1026 * anyway, it either accepts the packet or drops it. 1027 */ 1028 int 1029 tunpoll(dev_t dev, int events, struct lwp *l) 1030 { 1031 struct tun_softc *tp; 1032 struct ifnet *ifp; 1033 int revents = 0; 1034 1035 tp = tun_find_unit(dev); 1036 if (tp == NULL) { 1037 /* Interface was "destroyed" already. */ 1038 return 0; 1039 } 1040 ifp = &tp->tun_if; 1041 1042 TUNDEBUG("%s: tunpoll\n", ifp->if_xname); 1043 1044 if (events & (POLLIN | POLLRDNORM)) { 1045 if (!IFQ_IS_EMPTY(&ifp->if_snd)) { 1046 TUNDEBUG("%s: tunpoll q=%d\n", ifp->if_xname, 1047 ifp->if_snd.ifq_len); 1048 revents |= events & (POLLIN | POLLRDNORM); 1049 } else { 1050 TUNDEBUG("%s: tunpoll waiting\n", ifp->if_xname); 1051 selrecord(l, &tp->tun_rsel); 1052 } 1053 } 1054 1055 if (events & (POLLOUT | POLLWRNORM)) 1056 revents |= events & (POLLOUT | POLLWRNORM); 1057 1058 mutex_exit(&tp->tun_lock); 1059 1060 return revents; 1061 } 1062 1063 static void 1064 filt_tunrdetach(struct knote *kn) 1065 { 1066 struct tun_softc *tp = kn->kn_hook; 1067 1068 mutex_enter(&tp->tun_lock); 1069 selremove_knote(&tp->tun_rsel, kn); 1070 mutex_exit(&tp->tun_lock); 1071 } 1072 1073 static int 1074 filt_tunread(struct knote *kn, long hint) 1075 { 1076 struct tun_softc *tp = kn->kn_hook; 1077 struct ifnet *ifp = &tp->tun_if; 1078 struct mbuf *m; 1079 int ready; 1080 1081 if (hint & NOTE_SUBMIT) 1082 KASSERT(mutex_owned(&tp->tun_lock)); 1083 else 1084 mutex_enter(&tp->tun_lock); 1085 1086 IF_POLL(&ifp->if_snd, m); 1087 ready = (m != NULL); 1088 for (kn->kn_data = 0; m != NULL; m = m->m_next) 1089 kn->kn_data += m->m_len; 1090 1091 if (hint & NOTE_SUBMIT) 1092 KASSERT(mutex_owned(&tp->tun_lock)); 1093 else 1094 mutex_exit(&tp->tun_lock); 1095 1096 return ready; 1097 } 1098 1099 static const struct filterops tunread_filtops = { 1100 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE, 1101 .f_attach = NULL, 1102 .f_detach = filt_tunrdetach, 1103 .f_event = filt_tunread, 1104 }; 1105 1106 int 1107 tunkqfilter(dev_t dev, struct knote *kn) 1108 { 1109 struct tun_softc *tp; 1110 int rv = 0; 1111 1112 tp = tun_find_unit(dev); 1113 if (tp == NULL) 1114 goto out_nolock; 1115 1116 switch (kn->kn_filter) { 1117 case EVFILT_READ: 1118 kn->kn_fop = &tunread_filtops; 1119 kn->kn_hook = tp; 1120 selrecord_knote(&tp->tun_rsel, kn); 1121 break; 1122 1123 case EVFILT_WRITE: 1124 kn->kn_fop = &seltrue_filtops; 1125 break; 1126 1127 default: 1128 rv = EINVAL; 1129 goto out; 1130 } 1131 1132 out: 1133 mutex_exit(&tp->tun_lock); 1134 out_nolock: 1135 return rv; 1136 } 1137 1138 /* 1139 * Module infrastructure 1140 */ 1141 #include "if_module.h" 1142 1143 IF_MODULE(MODULE_CLASS_DRIVER, tun, NULL) 1144