1 /* $NetBSD: if_tun.c,v 1.14 1994/06/29 06:36:25 cgd Exp $ */ 2 3 /* 4 * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk> 5 * Nottingham University 1987. 6 * 7 * This source may be freely distributed, however I would be interested 8 * in any changes that are made. 9 * 10 * This driver takes packets off the IP i/f and hands them up to a 11 * user process to have its wicked way with. This driver has it's 12 * roots in a similar driver written by Phil Cockcroft (formerly) at 13 * UCL. This driver is based much more on read/write/poll mode of 14 * operation though. 15 * 16 * $FreeBSD: src/sys/net/if_tun.c,v 1.74.2.8 2002/02/13 00:43:11 dillon Exp $ 17 * $DragonFly: src/sys/net/tun/if_tun.c,v 1.37 2008/06/05 18:06:32 swildner Exp $ 18 */ 19 20 #include "use_tun.h" 21 #include "opt_atalk.h" 22 #include "opt_inet.h" 23 #include "opt_inet6.h" 24 #include "opt_ipx.h" 25 26 #include <sys/param.h> 27 #include <sys/proc.h> 28 #include <sys/priv.h> 29 #include <sys/systm.h> 30 #include <sys/mbuf.h> 31 #include <sys/socket.h> 32 #include <sys/conf.h> 33 #include <sys/device.h> 34 #include <sys/filio.h> 35 #include <sys/sockio.h> 36 #include <sys/thread2.h> 37 #include <sys/ttycom.h> 38 #include <sys/poll.h> 39 #include <sys/signalvar.h> 40 #include <sys/filedesc.h> 41 #include <sys/kernel.h> 42 #include <sys/sysctl.h> 43 #include <sys/uio.h> 44 #include <sys/vnode.h> 45 #include <sys/malloc.h> 46 47 #include <sys/mplock2.h> 48 49 #include <net/if.h> 50 #include <net/if_types.h> 51 #include <net/ifq_var.h> 52 #include <net/netisr.h> 53 #include <net/route.h> 54 #include <sys/devfs.h> 55 56 #ifdef INET 57 #include <netinet/in.h> 58 #endif 59 60 #include <net/bpf.h> 61 62 #include "if_tunvar.h" 63 #include "if_tun.h" 64 65 static MALLOC_DEFINE(M_TUN, "tun", "Tunnel Interface"); 66 67 static void tunattach (void *); 68 PSEUDO_SET(tunattach, if_tun); 69 70 static void tuncreate (cdev_t dev); 71 72 #define TUNDEBUG if (tundebug) if_printf 73 static int tundebug = 0; 74 SYSCTL_INT(_debug, OID_AUTO, if_tun_debug, CTLFLAG_RW, &tundebug, 0, ""); 75 76 static int tunoutput (struct ifnet *, struct mbuf *, struct sockaddr *, 77 struct rtentry *rt); 78 static int tunifioctl (struct ifnet *, u_long, caddr_t, struct ucred *); 79 static int tuninit (struct ifnet *); 80 static void tunstart(struct ifnet *); 81 static void tun_filter_detach(struct knote *); 82 static int tun_filter_read(struct knote *, long); 83 84 static d_open_t tunopen; 85 static d_close_t tunclose; 86 static d_read_t tunread; 87 static d_write_t tunwrite; 88 static d_ioctl_t tunioctl; 89 static d_poll_t tunpoll; 90 static d_kqfilter_t tunkqfilter; 91 92 static d_clone_t tunclone; 93 DEVFS_DECLARE_CLONE_BITMAP(tun); 94 95 #if NTUN <= 1 96 #define TUN_PREALLOCATED_UNITS 4 97 #else 98 #define TUN_PREALLOCATED_UNITS NTUN 99 #endif 100 101 #define CDEV_MAJOR 52 102 static struct dev_ops tun_ops = { 103 { "tun", CDEV_MAJOR, D_KQFILTER }, 104 .d_open = tunopen, 105 .d_close = tunclose, 106 .d_read = tunread, 107 .d_write = tunwrite, 108 .d_ioctl = tunioctl, 109 .d_poll = tunpoll, 110 .d_kqfilter = tunkqfilter 111 }; 112 113 static void 114 tunattach(void *dummy) 115 { 116 int i; 117 make_autoclone_dev(&tun_ops, &DEVFS_CLONE_BITMAP(tun), 118 tunclone, UID_UUCP, GID_DIALER, 0600, "tun"); 119 for (i = 0; i < TUN_PREALLOCATED_UNITS; i++) { 120 make_dev(&tun_ops, i, UID_UUCP, GID_DIALER, 0600, "tun%d", i); 121 devfs_clone_bitmap_set(&DEVFS_CLONE_BITMAP(tun), i); 122 } 123 /* Doesn't need uninit because unloading is not possible, see PSEUDO_SET */ 124 } 125 126 static int 127 tunclone(struct dev_clone_args *ap) 128 { 129 int unit; 130 131 unit = devfs_clone_bitmap_get(&DEVFS_CLONE_BITMAP(tun), 0); 132 ap->a_dev = make_only_dev(&tun_ops, unit, UID_UUCP, GID_DIALER, 0600, 133 "tun%d", unit); 134 135 return 0; 136 } 137 138 static void 139 tuncreate(cdev_t dev) 140 { 141 struct tun_softc *sc; 142 struct ifnet *ifp; 143 144 #if 0 145 dev = make_dev(&tun_ops, minor(dev), 146 UID_UUCP, GID_DIALER, 0600, "tun%d", lminor(dev)); 147 #endif 148 149 MALLOC(sc, struct tun_softc *, sizeof(*sc), M_TUN, M_WAITOK | M_ZERO); 150 sc->tun_flags = TUN_INITED; 151 152 ifp = &sc->tun_if; 153 if_initname(ifp, "tun", lminor(dev)); 154 ifp->if_mtu = TUNMTU; 155 ifp->if_ioctl = tunifioctl; 156 ifp->if_output = tunoutput; 157 ifp->if_start = tunstart; 158 ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST; 159 ifp->if_type = IFT_PPP; 160 ifq_set_maxlen(&ifp->if_snd, ifqmaxlen); 161 ifq_set_ready(&ifp->if_snd); 162 ifp->if_softc = sc; 163 if_attach(ifp, NULL); 164 bpfattach(ifp, DLT_NULL, sizeof(u_int)); 165 dev->si_drv1 = sc; 166 } 167 168 /* 169 * tunnel open - must be superuser & the device must be 170 * configured in 171 */ 172 static int 173 tunopen(struct dev_open_args *ap) 174 { 175 cdev_t dev = ap->a_head.a_dev; 176 struct ifnet *ifp; 177 struct tun_softc *tp; 178 int error; 179 180 if ((error = priv_check_cred(ap->a_cred, PRIV_ROOT, 0)) != 0) 181 return (error); 182 183 tp = dev->si_drv1; 184 if (!tp) { 185 tuncreate(dev); 186 tp = dev->si_drv1; 187 } 188 if (tp->tun_flags & TUN_OPEN) 189 return EBUSY; 190 tp->tun_pid = curproc->p_pid; 191 ifp = &tp->tun_if; 192 tp->tun_flags |= TUN_OPEN; 193 TUNDEBUG(ifp, "open\n"); 194 return (0); 195 } 196 197 /* 198 * tunclose - close the device - mark i/f down & delete 199 * routing info 200 */ 201 static int 202 tunclose(struct dev_close_args *ap) 203 { 204 cdev_t dev = ap->a_head.a_dev; 205 struct tun_softc *tp; 206 struct ifnet *ifp; 207 208 tp = dev->si_drv1; 209 ifp = &tp->tun_if; 210 211 tp->tun_flags &= ~TUN_OPEN; 212 tp->tun_pid = 0; 213 214 /* Junk all pending output. */ 215 ifq_purge(&ifp->if_snd); 216 217 if (ifp->if_flags & IFF_UP) 218 if_down(ifp); 219 ifp->if_flags &= ~IFF_RUNNING; 220 if_purgeaddrs_nolink(ifp); 221 222 funsetown(tp->tun_sigio); 223 selwakeup(&tp->tun_rsel); 224 225 TUNDEBUG(ifp, "closed\n"); 226 #if 0 227 if (dev->si_uminor >= TUN_PREALLOCATED_UNITS) { 228 devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(tun), dev->si_uminor); 229 } 230 #endif 231 return (0); 232 } 233 234 static int 235 tuninit(struct ifnet *ifp) 236 { 237 struct tun_softc *tp = ifp->if_softc; 238 struct ifaddr_container *ifac; 239 int error = 0; 240 241 TUNDEBUG(ifp, "tuninit\n"); 242 243 ifp->if_flags |= IFF_UP | IFF_RUNNING; 244 getmicrotime(&ifp->if_lastchange); 245 246 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 247 struct ifaddr *ifa = ifac->ifa; 248 249 if (ifa->ifa_addr == NULL) { 250 error = EFAULT; 251 /* XXX: Should maybe return straight off? */ 252 } else { 253 #ifdef INET 254 if (ifa->ifa_addr->sa_family == AF_INET) { 255 struct sockaddr_in *si; 256 257 si = (struct sockaddr_in *)ifa->ifa_addr; 258 if (si->sin_addr.s_addr) 259 tp->tun_flags |= TUN_IASET; 260 } 261 #endif 262 } 263 } 264 return (error); 265 } 266 267 /* 268 * Process an ioctl request. 269 * 270 * MPSAFE 271 */ 272 int 273 tunifioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 274 { 275 struct ifreq *ifr = (struct ifreq *)data; 276 struct tun_softc *tp = ifp->if_softc; 277 struct ifstat *ifs; 278 int error = 0; 279 280 switch(cmd) { 281 case SIOCGIFSTATUS: 282 ifs = (struct ifstat *)data; 283 if (tp->tun_pid) 284 ksprintf(ifs->ascii + strlen(ifs->ascii), 285 "\tOpened by PID %d\n", tp->tun_pid); 286 break; 287 case SIOCSIFADDR: 288 error = tuninit(ifp); 289 TUNDEBUG(ifp, "address set, error=%d\n", error); 290 break; 291 case SIOCSIFDSTADDR: 292 error = tuninit(ifp); 293 TUNDEBUG(ifp, "destination address set, error=%d\n", error); 294 break; 295 case SIOCSIFMTU: 296 ifp->if_mtu = ifr->ifr_mtu; 297 TUNDEBUG(ifp, "mtu set\n"); 298 break; 299 case SIOCSIFFLAGS: 300 case SIOCADDMULTI: 301 case SIOCDELMULTI: 302 break; 303 default: 304 error = EINVAL; 305 } 306 return (error); 307 } 308 309 /* 310 * tunoutput - queue packets from higher level ready to put out. 311 * 312 * MPSAFE 313 */ 314 static int 315 tunoutput_serialized(struct ifnet *ifp, struct mbuf *m0, struct sockaddr *dst, 316 struct rtentry *rt) 317 { 318 struct tun_softc *tp = ifp->if_softc; 319 int error; 320 struct altq_pktattr pktattr; 321 322 TUNDEBUG(ifp, "tunoutput\n"); 323 324 if ((tp->tun_flags & TUN_READY) != TUN_READY) { 325 TUNDEBUG(ifp, "not ready 0%o\n", tp->tun_flags); 326 m_freem (m0); 327 return EHOSTDOWN; 328 } 329 330 /* 331 * if the queueing discipline needs packet classification, 332 * do it before prepending link headers. 333 */ 334 ifq_classify(&ifp->if_snd, m0, dst->sa_family, &pktattr); 335 336 /* BPF write needs to be handled specially */ 337 if (dst->sa_family == AF_UNSPEC) { 338 dst->sa_family = *(mtod(m0, int *)); 339 m0->m_len -= sizeof(int); 340 m0->m_pkthdr.len -= sizeof(int); 341 m0->m_data += sizeof(int); 342 } 343 344 if (ifp->if_bpf) { 345 /* 346 * We need to prepend the address family as 347 * a four byte field. 348 */ 349 uint32_t af = dst->sa_family; 350 351 bpf_ptap(ifp->if_bpf, m0, &af, sizeof(af)); 352 } 353 354 /* prepend sockaddr? this may abort if the mbuf allocation fails */ 355 if (tp->tun_flags & TUN_LMODE) { 356 /* allocate space for sockaddr */ 357 M_PREPEND(m0, dst->sa_len, MB_DONTWAIT); 358 359 /* if allocation failed drop packet */ 360 if (m0 == NULL){ 361 IF_DROP(&ifp->if_snd); 362 ifp->if_oerrors++; 363 return (ENOBUFS); 364 } else { 365 bcopy(dst, m0->m_data, dst->sa_len); 366 } 367 } 368 369 if (tp->tun_flags & TUN_IFHEAD) { 370 /* Prepend the address family */ 371 M_PREPEND(m0, 4, MB_DONTWAIT); 372 373 /* if allocation failed drop packet */ 374 if (m0 == NULL){ 375 IF_DROP(&ifp->if_snd); 376 ifp->if_oerrors++; 377 return ENOBUFS; 378 } else 379 *(u_int32_t *)m0->m_data = htonl(dst->sa_family); 380 } else { 381 #ifdef INET 382 if (dst->sa_family != AF_INET) 383 #endif 384 { 385 m_freem(m0); 386 return EAFNOSUPPORT; 387 } 388 } 389 390 error = ifq_handoff(ifp, m0, &pktattr); 391 if (error) { 392 ifp->if_collisions++; 393 } else { 394 ifp->if_opackets++; 395 if (tp->tun_flags & TUN_RWAIT) { 396 tp->tun_flags &= ~TUN_RWAIT; 397 wakeup((caddr_t)tp); 398 } 399 get_mplock(); 400 if (tp->tun_flags & TUN_ASYNC && tp->tun_sigio) 401 pgsigio(tp->tun_sigio, SIGIO, 0); 402 selwakeup(&tp->tun_rsel); 403 rel_mplock(); 404 } 405 return (error); 406 } 407 408 static int 409 tunoutput(struct ifnet *ifp, struct mbuf *m0, struct sockaddr *dst, 410 struct rtentry *rt) 411 { 412 int error; 413 414 ifnet_serialize_all(ifp); 415 error = tunoutput_serialized(ifp, m0, dst, rt); 416 ifnet_deserialize_all(ifp); 417 418 return error; 419 } 420 421 /* 422 * the ops interface is now pretty minimal. 423 */ 424 static int 425 tunioctl(struct dev_ioctl_args *ap) 426 { 427 cdev_t dev = ap->a_head.a_dev; 428 struct tun_softc *tp = dev->si_drv1; 429 struct tuninfo *tunp; 430 431 switch (ap->a_cmd) { 432 case TUNSIFINFO: 433 tunp = (struct tuninfo *)ap->a_data; 434 if (tunp->mtu < IF_MINMTU) 435 return (EINVAL); 436 tp->tun_if.if_mtu = tunp->mtu; 437 tp->tun_if.if_type = tunp->type; 438 tp->tun_if.if_baudrate = tunp->baudrate; 439 break; 440 case TUNGIFINFO: 441 tunp = (struct tuninfo *)ap->a_data; 442 tunp->mtu = tp->tun_if.if_mtu; 443 tunp->type = tp->tun_if.if_type; 444 tunp->baudrate = tp->tun_if.if_baudrate; 445 break; 446 case TUNSDEBUG: 447 tundebug = *(int *)ap->a_data; 448 break; 449 case TUNGDEBUG: 450 *(int *)ap->a_data = tundebug; 451 break; 452 case TUNSLMODE: 453 if (*(int *)ap->a_data) { 454 tp->tun_flags |= TUN_LMODE; 455 tp->tun_flags &= ~TUN_IFHEAD; 456 } else 457 tp->tun_flags &= ~TUN_LMODE; 458 break; 459 case TUNSIFHEAD: 460 if (*(int *)ap->a_data) { 461 tp->tun_flags |= TUN_IFHEAD; 462 tp->tun_flags &= ~TUN_LMODE; 463 } else 464 tp->tun_flags &= ~TUN_IFHEAD; 465 break; 466 case TUNGIFHEAD: 467 *(int *)ap->a_data = (tp->tun_flags & TUN_IFHEAD) ? 1 : 0; 468 break; 469 case TUNSIFMODE: 470 /* deny this if UP */ 471 if (tp->tun_if.if_flags & IFF_UP) 472 return(EBUSY); 473 474 switch (*(int *)ap->a_data & ~IFF_MULTICAST) { 475 case IFF_POINTOPOINT: 476 case IFF_BROADCAST: 477 tp->tun_if.if_flags &= ~(IFF_BROADCAST|IFF_POINTOPOINT); 478 tp->tun_if.if_flags |= *(int *)ap->a_data; 479 break; 480 default: 481 return(EINVAL); 482 } 483 break; 484 case TUNSIFPID: 485 tp->tun_pid = curproc->p_pid; 486 break; 487 case FIOASYNC: 488 if (*(int *)ap->a_data) 489 tp->tun_flags |= TUN_ASYNC; 490 else 491 tp->tun_flags &= ~TUN_ASYNC; 492 break; 493 case FIONREAD: 494 if (!ifq_is_empty(&tp->tun_if.if_snd)) { 495 struct mbuf *mb; 496 497 mb = ifq_poll(&tp->tun_if.if_snd); 498 for( *(int *)ap->a_data = 0; mb != 0; mb = mb->m_next) 499 *(int *)ap->a_data += mb->m_len; 500 } else { 501 *(int *)ap->a_data = 0; 502 } 503 break; 504 case FIOSETOWN: 505 return (fsetown(*(int *)ap->a_data, &tp->tun_sigio)); 506 507 case FIOGETOWN: 508 *(int *)ap->a_data = fgetown(tp->tun_sigio); 509 return (0); 510 511 /* This is deprecated, FIOSETOWN should be used instead. */ 512 case TIOCSPGRP: 513 return (fsetown(-(*(int *)ap->a_data), &tp->tun_sigio)); 514 515 /* This is deprecated, FIOGETOWN should be used instead. */ 516 case TIOCGPGRP: 517 *(int *)ap->a_data = -fgetown(tp->tun_sigio); 518 return (0); 519 520 default: 521 return (ENOTTY); 522 } 523 return (0); 524 } 525 526 /* 527 * The ops read interface - reads a packet at a time, or at 528 * least as much of a packet as can be read. 529 */ 530 static int 531 tunread(struct dev_read_args *ap) 532 { 533 cdev_t dev = ap->a_head.a_dev; 534 struct uio *uio = ap->a_uio; 535 struct tun_softc *tp = dev->si_drv1; 536 struct ifnet *ifp = &tp->tun_if; 537 struct mbuf *m0; 538 int error=0, len; 539 540 TUNDEBUG(ifp, "read\n"); 541 if ((tp->tun_flags & TUN_READY) != TUN_READY) { 542 TUNDEBUG(ifp, "not ready 0%o\n", tp->tun_flags); 543 return EHOSTDOWN; 544 } 545 546 tp->tun_flags &= ~TUN_RWAIT; 547 548 ifnet_serialize_all(ifp); 549 550 while ((m0 = ifq_dequeue(&ifp->if_snd, NULL)) == NULL) { 551 if (ap->a_ioflag & IO_NDELAY) { 552 ifnet_deserialize_all(ifp); 553 return EWOULDBLOCK; 554 } 555 tp->tun_flags |= TUN_RWAIT; 556 ifnet_deserialize_all(ifp); 557 if ((error = tsleep(tp, PCATCH, "tunread", 0)) != 0) 558 return error; 559 ifnet_serialize_all(ifp); 560 } 561 562 ifnet_deserialize_all(ifp); 563 564 while (m0 && uio->uio_resid > 0 && error == 0) { 565 len = (int)szmin(uio->uio_resid, m0->m_len); 566 if (len != 0) 567 error = uiomove(mtod(m0, caddr_t), (size_t)len, uio); 568 m0 = m_free(m0); 569 } 570 571 if (m0) { 572 TUNDEBUG(ifp, "Dropping mbuf\n"); 573 m_freem(m0); 574 } 575 return error; 576 } 577 578 /* 579 * the ops write interface - an atomic write is a packet - or else! 580 */ 581 static int 582 tunwrite(struct dev_write_args *ap) 583 { 584 cdev_t dev = ap->a_head.a_dev; 585 struct uio *uio = ap->a_uio; 586 struct tun_softc *tp = dev->si_drv1; 587 struct ifnet *ifp = &tp->tun_if; 588 struct mbuf *top, **mp, *m; 589 int error=0; 590 size_t tlen, mlen; 591 uint32_t family; 592 int isr; 593 594 TUNDEBUG(ifp, "tunwrite\n"); 595 596 if (uio->uio_resid == 0) 597 return 0; 598 599 if (uio->uio_resid > TUNMRU) { 600 TUNDEBUG(ifp, "len=%zd!\n", uio->uio_resid); 601 return EIO; 602 } 603 tlen = uio->uio_resid; 604 605 /* get a header mbuf */ 606 MGETHDR(m, MB_DONTWAIT, MT_DATA); 607 if (m == NULL) 608 return ENOBUFS; 609 mlen = MHLEN; 610 611 top = 0; 612 mp = ⊤ 613 while (error == 0 && uio->uio_resid > 0) { 614 m->m_len = (int)szmin(mlen, uio->uio_resid); 615 error = uiomove(mtod (m, caddr_t), (size_t)m->m_len, uio); 616 *mp = m; 617 mp = &m->m_next; 618 if (uio->uio_resid > 0) { 619 MGET (m, MB_DONTWAIT, MT_DATA); 620 if (m == 0) { 621 error = ENOBUFS; 622 break; 623 } 624 mlen = MLEN; 625 } 626 } 627 if (error) { 628 if (top) 629 m_freem (top); 630 ifp->if_ierrors++; 631 return error; 632 } 633 634 top->m_pkthdr.len = (int)tlen; 635 top->m_pkthdr.rcvif = ifp; 636 637 if (ifp->if_bpf) { 638 if (tp->tun_flags & TUN_IFHEAD) { 639 /* 640 * Conveniently, we already have a 4-byte address 641 * family prepended to our packet ! 642 * Inconveniently, it's in the wrong byte order ! 643 */ 644 if ((top = m_pullup(top, sizeof(family))) == NULL) 645 return ENOBUFS; 646 *mtod(top, u_int32_t *) = 647 ntohl(*mtod(top, u_int32_t *)); 648 bpf_mtap(ifp->if_bpf, top); 649 *mtod(top, u_int32_t *) = 650 htonl(*mtod(top, u_int32_t *)); 651 } else { 652 /* 653 * We need to prepend the address family as 654 * a four byte field. 655 */ 656 static const uint32_t af = AF_INET; 657 658 bpf_ptap(ifp->if_bpf, top, &af, sizeof(af)); 659 } 660 } 661 662 if (tp->tun_flags & TUN_IFHEAD) { 663 if (top->m_len < sizeof(family) && 664 (top = m_pullup(top, sizeof(family))) == NULL) 665 return ENOBUFS; 666 family = ntohl(*mtod(top, u_int32_t *)); 667 m_adj(top, sizeof(family)); 668 } else 669 family = AF_INET; 670 671 ifp->if_ibytes += top->m_pkthdr.len; 672 ifp->if_ipackets++; 673 674 switch (family) { 675 #ifdef INET 676 case AF_INET: 677 isr = NETISR_IP; 678 break; 679 #endif 680 #ifdef INET6 681 case AF_INET6: 682 isr = NETISR_IPV6; 683 break; 684 #endif 685 #ifdef IPX 686 case AF_IPX: 687 isr = NETISR_IPX; 688 break; 689 #endif 690 #ifdef NETATALK 691 case AF_APPLETALK: 692 isr = NETISR_ATALK2; 693 break; 694 #endif 695 default: 696 m_freem(m); 697 return (EAFNOSUPPORT); 698 } 699 700 netisr_dispatch(isr, top); 701 return (0); 702 } 703 704 /* 705 * tunpoll - the poll interface, this is only useful on reads 706 * really. The write detect always returns true, write never blocks 707 * anyway, it either accepts the packet or drops it. 708 */ 709 static int 710 tunpoll(struct dev_poll_args *ap) 711 { 712 cdev_t dev = ap->a_head.a_dev; 713 struct tun_softc *tp = dev->si_drv1; 714 struct ifnet *ifp = &tp->tun_if; 715 int revents = 0; 716 717 TUNDEBUG(ifp, "tunpoll\n"); 718 719 ifnet_serialize_all(ifp); 720 721 if (ap->a_events & (POLLIN | POLLRDNORM)) { 722 if (!ifq_is_empty(&ifp->if_snd)) { 723 TUNDEBUG(ifp, "tunpoll q=%d\n", ifp->if_snd.ifq_len); 724 revents |= ap->a_events & (POLLIN | POLLRDNORM); 725 } else { 726 TUNDEBUG(ifp, "tunpoll waiting\n"); 727 selrecord(curthread, &tp->tun_rsel); 728 } 729 } 730 if (ap->a_events & (POLLOUT | POLLWRNORM)) 731 revents |= ap->a_events & (POLLOUT | POLLWRNORM); 732 733 ifnet_deserialize_all(ifp); 734 ap->a_events = revents; 735 return(0); 736 } 737 738 static struct filterops tun_read_filtops = 739 { 1, NULL, tun_filter_detach, tun_filter_read }; 740 741 static int 742 tunkqfilter(struct dev_kqfilter_args *ap) 743 { 744 cdev_t dev = ap->a_head.a_dev; 745 struct tun_softc *tp = dev->si_drv1; 746 struct knote *kn = ap->a_kn; 747 struct klist *klist; 748 749 ap->a_result = 0; 750 ifnet_serialize_all(&tp->tun_if); 751 752 switch (kn->kn_filter) { 753 case EVFILT_READ: 754 kn->kn_fop = &tun_read_filtops; 755 kn->kn_hook = (caddr_t)&tp; 756 break; 757 default: 758 ap->a_result = 1; 759 ifnet_deserialize_all(&tp->tun_if); 760 return (0); 761 } 762 763 klist = &tp->tun_rsel.si_note; 764 crit_enter(); 765 SLIST_INSERT_HEAD(klist, kn, kn_selnext); 766 crit_exit(); 767 768 ifnet_deserialize_all(&tp->tun_if); 769 770 return (0); 771 } 772 773 static void 774 tun_filter_detach(struct knote *kn) 775 { 776 struct tun_softc *tp = (struct tun_softc *)kn->kn_hook; 777 struct klist *klist; 778 779 klist = &tp->tun_rsel.si_note; 780 crit_enter(); 781 SLIST_REMOVE(klist, kn, knote, kn_selnext); 782 crit_exit(); 783 } 784 785 static int 786 tun_filter_read(struct knote *kn, long hint) 787 { 788 struct tun_softc *tp = (struct tun_softc *)kn->kn_hook; 789 int ready = 0; 790 791 ifnet_serialize_all(&tp->tun_if); 792 if (!ifq_is_empty(&tp->tun_if.if_snd)) 793 ready = 1; 794 ifnet_deserialize_all(&tp->tun_if); 795 796 return (ready); 797 } 798 799 /* 800 * Start packet transmission on the interface. 801 * when the interface queue is rate-limited by ALTQ, 802 * if_start is needed to drain packets from the queue in order 803 * to notify readers when outgoing packets become ready. 804 */ 805 static void 806 tunstart(struct ifnet *ifp) 807 { 808 struct tun_softc *tp = ifp->if_softc; 809 struct mbuf *m; 810 811 if (!ifq_is_enabled(&ifp->if_snd)) 812 return; 813 814 m = ifq_poll(&ifp->if_snd); 815 if (m != NULL) { 816 if (tp->tun_flags & TUN_RWAIT) { 817 tp->tun_flags &= ~TUN_RWAIT; 818 wakeup((caddr_t)tp); 819 } 820 if (tp->tun_flags & TUN_ASYNC && tp->tun_sigio) 821 pgsigio(tp->tun_sigio, SIGIO, 0); 822 selwakeup(&tp->tun_rsel); 823 } 824 } 825