xref: /netbsd-src/sys/net/if_tun.c (revision 946379e7b37692fc43f68eb0d1c10daa0a7f3b6c)
1 /*	$NetBSD: if_tun.c,v 1.123 2015/08/24 22:21:26 pooka Exp $	*/
2 
3 /*
4  * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk>
5  * Nottingham University 1987.
6  *
7  * This source may be freely distributed, however I would be interested
8  * in any changes that are made.
9  *
10  * This driver takes packets off the IP i/f and hands them up to a
11  * user process to have its wicked way with. This driver has its
12  * roots in a similar driver written by Phil Cockcroft (formerly) at
13  * UCL. This driver is based much more on read/write/poll mode of
14  * operation though.
15  */
16 
17 #include <sys/cdefs.h>
18 __KERNEL_RCSID(0, "$NetBSD: if_tun.c,v 1.123 2015/08/24 22:21:26 pooka Exp $");
19 
20 #ifdef _KERNEL_OPT
21 #include "opt_inet.h"
22 #endif
23 
24 #include <sys/param.h>
25 #include <sys/proc.h>
26 #include <sys/systm.h>
27 #include <sys/mbuf.h>
28 #include <sys/buf.h>
29 #include <sys/protosw.h>
30 #include <sys/socket.h>
31 #include <sys/ioctl.h>
32 #include <sys/errno.h>
33 #include <sys/syslog.h>
34 #include <sys/select.h>
35 #include <sys/poll.h>
36 #include <sys/file.h>
37 #include <sys/signalvar.h>
38 #include <sys/conf.h>
39 #include <sys/kauth.h>
40 #include <sys/mutex.h>
41 #include <sys/cpu.h>
42 
43 #include <net/if.h>
44 #include <net/if_types.h>
45 #include <net/netisr.h>
46 #include <net/route.h>
47 
48 
49 #ifdef INET
50 #include <netinet/in.h>
51 #include <netinet/in_systm.h>
52 #include <netinet/in_var.h>
53 #include <netinet/ip.h>
54 #include <netinet/if_inarp.h>
55 #endif
56 
57 
58 #include <sys/time.h>
59 #include <net/bpf.h>
60 
61 #include <net/if_tun.h>
62 
63 #include "ioconf.h"
64 
65 #define TUNDEBUG	if (tundebug) printf
66 int	tundebug = 0;
67 
68 extern int ifqmaxlen;
69 
70 static LIST_HEAD(, tun_softc) tun_softc_list;
71 static LIST_HEAD(, tun_softc) tunz_softc_list;
72 static kmutex_t tun_softc_lock;
73 
74 static int	tun_ioctl(struct ifnet *, u_long, void *);
75 static int	tun_output(struct ifnet *, struct mbuf *,
76 			const struct sockaddr *, struct rtentry *rt);
77 static int	tun_clone_create(struct if_clone *, int);
78 static int	tun_clone_destroy(struct ifnet *);
79 
80 static struct if_clone tun_cloner =
81     IF_CLONE_INITIALIZER("tun", tun_clone_create, tun_clone_destroy);
82 
83 static void tunattach0(struct tun_softc *);
84 static void tuninit(struct tun_softc *);
85 static void tun_i_softintr(void *);
86 static void tun_o_softintr(void *);
87 #ifdef ALTQ
88 static void tunstart(struct ifnet *);
89 #endif
90 static struct tun_softc *tun_find_unit(dev_t);
91 static struct tun_softc *tun_find_zunit(int);
92 
93 static dev_type_open(tunopen);
94 static dev_type_close(tunclose);
95 static dev_type_read(tunread);
96 static dev_type_write(tunwrite);
97 static dev_type_ioctl(tunioctl);
98 static dev_type_poll(tunpoll);
99 static dev_type_kqfilter(tunkqfilter);
100 
101 const struct cdevsw tun_cdevsw = {
102 	.d_open = tunopen,
103 	.d_close = tunclose,
104 	.d_read = tunread,
105 	.d_write = tunwrite,
106 	.d_ioctl = tunioctl,
107 	.d_stop = nostop,
108 	.d_tty = notty,
109 	.d_poll = tunpoll,
110 	.d_mmap = nommap,
111 	.d_kqfilter = tunkqfilter,
112 	.d_discard = nodiscard,
113 	.d_flag = D_OTHER
114 };
115 
116 void
117 tunattach(int unused)
118 {
119 
120 	mutex_init(&tun_softc_lock, MUTEX_DEFAULT, IPL_NET);
121 	LIST_INIT(&tun_softc_list);
122 	LIST_INIT(&tunz_softc_list);
123 	if_clone_attach(&tun_cloner);
124 }
125 
126 /*
127  * Find driver instance from dev_t.
128  * Returns with tp locked (if found).
129  */
130 static struct tun_softc *
131 tun_find_unit(dev_t dev)
132 {
133 	struct tun_softc *tp;
134 	int unit = minor(dev);
135 
136 	mutex_enter(&tun_softc_lock);
137 	LIST_FOREACH(tp, &tun_softc_list, tun_list)
138 		if (unit == tp->tun_unit)
139 			break;
140 	if (tp)
141 		mutex_enter(&tp->tun_lock);
142 	mutex_exit(&tun_softc_lock);
143 
144 	return (tp);
145 }
146 
147 /*
148  * Find zombie driver instance by unit number.
149  * Remove tp from list and return it unlocked (if found).
150  */
151 static struct tun_softc *
152 tun_find_zunit(int unit)
153 {
154 	struct tun_softc *tp;
155 
156 	mutex_enter(&tun_softc_lock);
157 	LIST_FOREACH(tp, &tunz_softc_list, tun_list)
158 		if (unit == tp->tun_unit)
159 			break;
160 	if (tp)
161 		LIST_REMOVE(tp, tun_list);
162 	mutex_exit(&tun_softc_lock);
163 #ifdef DIAGNOSTIC
164 	if (tp != NULL && (tp->tun_flags & (TUN_INITED|TUN_OPEN)) != TUN_OPEN)
165 		printf("tun%d: inconsistent flags: %x\n", unit, tp->tun_flags);
166 #endif
167 
168 	return (tp);
169 }
170 
171 static int
172 tun_clone_create(struct if_clone *ifc, int unit)
173 {
174 	struct tun_softc *tp;
175 
176 	if ((tp = tun_find_zunit(unit)) == NULL) {
177 		/* Allocate a new instance */
178 		tp = malloc(sizeof(*tp), M_DEVBUF, M_WAITOK|M_ZERO);
179 
180 		tp->tun_unit = unit;
181 		mutex_init(&tp->tun_lock, MUTEX_DEFAULT, IPL_NET);
182 		selinit(&tp->tun_rsel);
183 		selinit(&tp->tun_wsel);
184 	} else {
185 		/* Revive tunnel instance; clear ifp part */
186 		(void)memset(&tp->tun_if, 0, sizeof(struct ifnet));
187 	}
188 
189 	if_initname(&tp->tun_if, ifc->ifc_name, unit);
190 	tunattach0(tp);
191 	tp->tun_flags |= TUN_INITED;
192 	tp->tun_osih = softint_establish(SOFTINT_CLOCK, tun_o_softintr, tp);
193 	tp->tun_isih = softint_establish(SOFTINT_CLOCK, tun_i_softintr, tp);
194 
195 	mutex_enter(&tun_softc_lock);
196 	LIST_INSERT_HEAD(&tun_softc_list, tp, tun_list);
197 	mutex_exit(&tun_softc_lock);
198 
199 	return (0);
200 }
201 
202 static void
203 tunattach0(struct tun_softc *tp)
204 {
205 	struct ifnet *ifp;
206 
207 	ifp = &tp->tun_if;
208 	ifp->if_softc = tp;
209 	ifp->if_mtu = TUNMTU;
210 	ifp->if_ioctl = tun_ioctl;
211 	ifp->if_output = tun_output;
212 #ifdef ALTQ
213 	ifp->if_start = tunstart;
214 #endif
215 	ifp->if_flags = IFF_POINTOPOINT;
216 	ifp->if_type = IFT_TUNNEL;
217 	ifp->if_snd.ifq_maxlen = ifqmaxlen;
218 	ifp->if_collisions = 0;
219 	ifp->if_ierrors = 0;
220 	ifp->if_oerrors = 0;
221 	ifp->if_ipackets = 0;
222 	ifp->if_opackets = 0;
223 	ifp->if_ibytes   = 0;
224 	ifp->if_obytes   = 0;
225 	ifp->if_dlt = DLT_NULL;
226 	IFQ_SET_READY(&ifp->if_snd);
227 	if_attach(ifp);
228 	if_alloc_sadl(ifp);
229 	bpf_attach(ifp, DLT_NULL, sizeof(uint32_t));
230 }
231 
232 static int
233 tun_clone_destroy(struct ifnet *ifp)
234 {
235 	struct tun_softc *tp = (void *)ifp;
236 	int zombie = 0;
237 
238 	IF_PURGE(&ifp->if_snd);
239 	ifp->if_flags &= ~IFF_RUNNING;
240 
241 	mutex_enter(&tun_softc_lock);
242 	mutex_enter(&tp->tun_lock);
243 	LIST_REMOVE(tp, tun_list);
244 	if (tp->tun_flags & TUN_OPEN) {
245 		/* Hang on to storage until last close */
246 		zombie = 1;
247 		tp->tun_flags &= ~TUN_INITED;
248 		LIST_INSERT_HEAD(&tunz_softc_list, tp, tun_list);
249 	}
250 	mutex_exit(&tun_softc_lock);
251 
252 	if (tp->tun_flags & TUN_RWAIT) {
253 		tp->tun_flags &= ~TUN_RWAIT;
254 		wakeup((void *)tp);
255 	}
256 	selnotify(&tp->tun_rsel, 0, 0);
257 
258 	mutex_exit(&tp->tun_lock);
259 
260 	if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
261 		fownsignal(tp->tun_pgid, SIGIO, POLL_HUP, 0, NULL);
262 
263 	bpf_detach(ifp);
264 	if_detach(ifp);
265 
266 	if (!zombie) {
267 		seldestroy(&tp->tun_rsel);
268 		seldestroy(&tp->tun_wsel);
269 		softint_disestablish(tp->tun_osih);
270 		softint_disestablish(tp->tun_isih);
271 		mutex_destroy(&tp->tun_lock);
272 		free(tp, M_DEVBUF);
273 	}
274 
275 	return (0);
276 }
277 
278 /*
279  * tunnel open - must be superuser & the device must be
280  * configured in
281  */
282 static int
283 tunopen(dev_t dev, int flag, int mode, struct lwp *l)
284 {
285 	struct ifnet	*ifp;
286 	struct tun_softc *tp;
287 	int	error;
288 
289 	error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_INTERFACE_TUN,
290 	    KAUTH_REQ_NETWORK_INTERFACE_TUN_ADD, NULL, NULL, NULL);
291 	if (error)
292 		return (error);
293 
294 	tp = tun_find_unit(dev);
295 
296 	if (tp == NULL) {
297 		(void)tun_clone_create(&tun_cloner, minor(dev));
298 		tp = tun_find_unit(dev);
299 		if (tp == NULL) {
300 			error = ENXIO;
301 			goto out_nolock;
302 		}
303 	}
304 
305 	if (tp->tun_flags & TUN_OPEN) {
306 		error = EBUSY;
307 		goto out;
308 	}
309 
310 	ifp = &tp->tun_if;
311 	tp->tun_flags |= TUN_OPEN;
312 	TUNDEBUG("%s: open\n", ifp->if_xname);
313 out:
314 	mutex_exit(&tp->tun_lock);
315 out_nolock:
316 	return (error);
317 }
318 
319 /*
320  * tunclose - close the device - mark i/f down & delete
321  * routing info
322  */
323 int
324 tunclose(dev_t dev, int flag, int mode,
325     struct lwp *l)
326 {
327 	struct tun_softc *tp;
328 	struct ifnet	*ifp;
329 
330 	if ((tp = tun_find_zunit(minor(dev))) != NULL) {
331 		/* interface was "destroyed" before the close */
332 		seldestroy(&tp->tun_rsel);
333 		seldestroy(&tp->tun_wsel);
334 		softint_disestablish(tp->tun_osih);
335 		softint_disestablish(tp->tun_isih);
336 		mutex_destroy(&tp->tun_lock);
337 		free(tp, M_DEVBUF);
338 		goto out_nolock;
339 	}
340 
341 	if ((tp = tun_find_unit(dev)) == NULL)
342 		goto out_nolock;
343 
344 	ifp = &tp->tun_if;
345 
346 	tp->tun_flags &= ~TUN_OPEN;
347 
348 	tp->tun_pgid = 0;
349 	selnotify(&tp->tun_rsel, 0, 0);
350 
351 	TUNDEBUG ("%s: closed\n", ifp->if_xname);
352 	mutex_exit(&tp->tun_lock);
353 
354 	/*
355 	 * junk all pending output
356 	 */
357 	IFQ_PURGE(&ifp->if_snd);
358 
359 	if (ifp->if_flags & IFF_UP) {
360 		if_down(ifp);
361 		if (ifp->if_flags & IFF_RUNNING) {
362 			/* find internet addresses and delete routes */
363 			struct ifaddr *ifa;
364 			IFADDR_FOREACH(ifa, ifp) {
365 #if defined(INET) || defined(INET6)
366 				if (ifa->ifa_addr->sa_family == AF_INET ||
367 				    ifa->ifa_addr->sa_family == AF_INET6) {
368 					rtinit(ifa, (int)RTM_DELETE,
369 					       tp->tun_flags & TUN_DSTADDR
370 							? RTF_HOST
371 							: 0);
372 				}
373 #endif
374 			}
375 		}
376 	}
377 out_nolock:
378 	return (0);
379 }
380 
381 /*
382  * Call at splnet().
383  */
384 static void
385 tuninit(struct tun_softc *tp)
386 {
387 	struct ifnet	*ifp = &tp->tun_if;
388 	struct ifaddr	*ifa;
389 
390 	TUNDEBUG("%s: tuninit\n", ifp->if_xname);
391 
392 	mutex_enter(&tp->tun_lock);
393 	ifp->if_flags |= IFF_UP | IFF_RUNNING;
394 
395 	tp->tun_flags &= ~(TUN_IASET|TUN_DSTADDR);
396 	IFADDR_FOREACH(ifa, ifp) {
397 #ifdef INET
398 		if (ifa->ifa_addr->sa_family == AF_INET) {
399 			struct sockaddr_in *sin;
400 
401 			sin = satosin(ifa->ifa_addr);
402 			if (sin && sin->sin_addr.s_addr)
403 				tp->tun_flags |= TUN_IASET;
404 
405 			if (ifp->if_flags & IFF_POINTOPOINT) {
406 				sin = satosin(ifa->ifa_dstaddr);
407 				if (sin && sin->sin_addr.s_addr)
408 					tp->tun_flags |= TUN_DSTADDR;
409 			}
410 		}
411 #endif
412 #ifdef INET6
413 		if (ifa->ifa_addr->sa_family == AF_INET6) {
414 			struct sockaddr_in6 *sin;
415 
416 			sin = (struct sockaddr_in6 *)ifa->ifa_addr;
417 			if (!IN6_IS_ADDR_UNSPECIFIED(&sin->sin6_addr))
418 				tp->tun_flags |= TUN_IASET;
419 
420 			if (ifp->if_flags & IFF_POINTOPOINT) {
421 				sin = (struct sockaddr_in6 *)ifa->ifa_dstaddr;
422 				if (sin &&
423 				    !IN6_IS_ADDR_UNSPECIFIED(&sin->sin6_addr))
424 					tp->tun_flags |= TUN_DSTADDR;
425 			} else
426 				tp->tun_flags &= ~TUN_DSTADDR;
427 		}
428 #endif /* INET6 */
429 	}
430 	mutex_exit(&tp->tun_lock);
431 }
432 
433 /*
434  * Process an ioctl request.
435  */
436 static int
437 tun_ioctl(struct ifnet *ifp, u_long cmd, void *data)
438 {
439 	int		error = 0, s;
440 	struct tun_softc *tp = (struct tun_softc *)(ifp->if_softc);
441 	struct ifreq *ifr = (struct ifreq *)data;
442 	struct ifaddr *ifa = (struct ifaddr *)data;
443 
444 	s = splnet();
445 
446 	switch (cmd) {
447 	case SIOCINITIFADDR:
448 		tuninit(tp);
449 		ifa->ifa_rtrequest = p2p_rtrequest;
450 		TUNDEBUG("%s: address set\n", ifp->if_xname);
451 		break;
452 	case SIOCSIFBRDADDR:
453 		TUNDEBUG("%s: broadcast address set\n", ifp->if_xname);
454 		break;
455 	case SIOCSIFMTU:
456 		if (ifr->ifr_mtu > TUNMTU || ifr->ifr_mtu < 576) {
457 			error = EINVAL;
458 			break;
459 		}
460 		TUNDEBUG("%s: interface mtu set\n", ifp->if_xname);
461 		if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
462 			error = 0;
463 		break;
464 	case SIOCADDMULTI:
465 	case SIOCDELMULTI:
466 		if (ifr == NULL) {
467 	        	error = EAFNOSUPPORT;           /* XXX */
468 			break;
469 		}
470 		switch (ifreq_getaddr(cmd, ifr)->sa_family) {
471 #ifdef INET
472 		case AF_INET:
473 			break;
474 #endif
475 #ifdef INET6
476 		case AF_INET6:
477 			break;
478 #endif
479 		default:
480 			error = EAFNOSUPPORT;
481 			break;
482 		}
483 		break;
484 	default:
485 		error = ifioctl_common(ifp, cmd, data);
486 	}
487 
488 	splx(s);
489 	return (error);
490 }
491 
492 /*
493  * tun_output - queue packets from higher level ready to put out.
494  */
495 static int
496 tun_output(struct ifnet *ifp, struct mbuf *m0, const struct sockaddr *dst,
497     struct rtentry *rt)
498 {
499 	struct tun_softc *tp = ifp->if_softc;
500 	int		s;
501 	int		error;
502 #if defined(INET) || defined(INET6)
503 	int		mlen;
504 	uint32_t	*af;
505 #endif
506 	ALTQ_DECL(struct altq_pktattr pktattr;)
507 
508 	s = splnet();
509 	mutex_enter(&tp->tun_lock);
510 	TUNDEBUG ("%s: tun_output\n", ifp->if_xname);
511 
512 	if ((tp->tun_flags & TUN_READY) != TUN_READY) {
513 		TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname,
514 			  tp->tun_flags);
515 		error = EHOSTDOWN;
516 		goto out;
517 	}
518 
519 	/*
520 	 * if the queueing discipline needs packet classification,
521 	 * do it before prepending link headers.
522 	 */
523 	IFQ_CLASSIFY(&ifp->if_snd, m0, dst->sa_family, &pktattr);
524 
525 	bpf_mtap_af(ifp, dst->sa_family, m0);
526 
527 	switch(dst->sa_family) {
528 #ifdef INET6
529 	case AF_INET6:
530 #endif
531 #ifdef INET
532 	case AF_INET:
533 #endif
534 #if defined(INET) || defined(INET6)
535 		if (tp->tun_flags & TUN_PREPADDR) {
536 			/* Simple link-layer header */
537 			M_PREPEND(m0, dst->sa_len, M_DONTWAIT);
538 			if (m0 == NULL) {
539 				IF_DROP(&ifp->if_snd);
540 				error = ENOBUFS;
541 				goto out;
542 			}
543 			bcopy(dst, mtod(m0, char *), dst->sa_len);
544 		}
545 
546 		if (tp->tun_flags & TUN_IFHEAD) {
547 			/* Prepend the address family */
548 			M_PREPEND(m0, sizeof(*af), M_DONTWAIT);
549 			if (m0 == NULL) {
550 				IF_DROP(&ifp->if_snd);
551 				error = ENOBUFS;
552 				goto out;
553 			}
554 			af = mtod(m0,uint32_t *);
555 			*af = htonl(dst->sa_family);
556 		} else {
557 #ifdef INET
558 			if (dst->sa_family != AF_INET)
559 #endif
560 			{
561 				error = EAFNOSUPPORT;
562 				goto out;
563 			}
564 		}
565 		/* FALLTHROUGH */
566 	case AF_UNSPEC:
567 		IFQ_ENQUEUE(&ifp->if_snd, m0, &pktattr, error);
568 		if (error) {
569 			ifp->if_collisions++;
570 			error = EAFNOSUPPORT;
571 			m0 = NULL;
572 			goto out;
573 		}
574 		mlen = m0->m_pkthdr.len;
575 		ifp->if_opackets++;
576 		ifp->if_obytes += mlen;
577 		break;
578 #endif
579 	default:
580 		error = EAFNOSUPPORT;
581 		goto out;
582 	}
583 
584 	if (tp->tun_flags & TUN_RWAIT) {
585 		tp->tun_flags &= ~TUN_RWAIT;
586 		wakeup((void *)tp);
587 	}
588 	if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
589 		softint_schedule(tp->tun_isih);
590 
591 	selnotify(&tp->tun_rsel, 0, 0);
592 out:
593 	mutex_exit(&tp->tun_lock);
594 	splx(s);
595 
596 	if (error && m0) {
597 		m_freem(m0);
598 	}
599 	return 0;
600 }
601 
602 static void
603 tun_i_softintr(void *cookie)
604 {
605 	struct tun_softc *tp = cookie;
606 
607 	if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
608 		fownsignal(tp->tun_pgid, SIGIO, POLL_IN, POLLIN|POLLRDNORM,
609 		    NULL);
610 }
611 
612 static void
613 tun_o_softintr(void *cookie)
614 {
615 	struct tun_softc *tp = cookie;
616 
617 	if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
618 		fownsignal(tp->tun_pgid, SIGIO, POLL_OUT, POLLOUT|POLLWRNORM,
619 		    NULL);
620 }
621 
622 /*
623  * the cdevsw interface is now pretty minimal.
624  */
625 int
626 tunioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
627 {
628 	struct tun_softc *tp;
629 	int s, error = 0;
630 
631 	s = splnet();
632 	tp = tun_find_unit(dev);
633 
634 	/* interface was "destroyed" already */
635 	if (tp == NULL) {
636 		error = ENXIO;
637 		goto out_nolock;
638 	}
639 
640 	switch (cmd) {
641 	case TUNSDEBUG:
642 		tundebug = *(int *)data;
643 		break;
644 
645 	case TUNGDEBUG:
646 		*(int *)data = tundebug;
647 		break;
648 
649 	case TUNSIFMODE:
650 		switch (*(int *)data & (IFF_POINTOPOINT|IFF_BROADCAST)) {
651 		case IFF_POINTOPOINT:
652 		case IFF_BROADCAST:
653 			if (tp->tun_if.if_flags & IFF_UP) {
654 				error = EBUSY;
655 				goto out;
656 			}
657 			tp->tun_if.if_flags &=
658 				~(IFF_BROADCAST|IFF_POINTOPOINT|IFF_MULTICAST);
659 			tp->tun_if.if_flags |= *(int *)data;
660 			break;
661 		default:
662 			error = EINVAL;
663 			goto out;
664 		}
665 		break;
666 
667 	case TUNSLMODE:
668 		if (*(int *)data) {
669 			tp->tun_flags |= TUN_PREPADDR;
670 			tp->tun_flags &= ~TUN_IFHEAD;
671 		} else
672 			tp->tun_flags &= ~TUN_PREPADDR;
673 		break;
674 
675 	case TUNSIFHEAD:
676 		if (*(int *)data) {
677 			tp->tun_flags |= TUN_IFHEAD;
678 			tp->tun_flags &= ~TUN_PREPADDR;
679 		} else
680 			tp->tun_flags &= ~TUN_IFHEAD;
681 		break;
682 
683 	case TUNGIFHEAD:
684 		*(int *)data = (tp->tun_flags & TUN_IFHEAD);
685 		break;
686 
687 	case FIONBIO:
688 		if (*(int *)data)
689 			tp->tun_flags |= TUN_NBIO;
690 		else
691 			tp->tun_flags &= ~TUN_NBIO;
692 		break;
693 
694 	case FIOASYNC:
695 		if (*(int *)data)
696 			tp->tun_flags |= TUN_ASYNC;
697 		else
698 			tp->tun_flags &= ~TUN_ASYNC;
699 		break;
700 
701 	case FIONREAD:
702 		if (tp->tun_if.if_snd.ifq_head)
703 			*(int *)data = tp->tun_if.if_snd.ifq_head->m_pkthdr.len;
704 		else
705 			*(int *)data = 0;
706 		break;
707 
708 	case TIOCSPGRP:
709 	case FIOSETOWN:
710 		error = fsetown(&tp->tun_pgid, cmd, data);
711 		break;
712 
713 	case TIOCGPGRP:
714 	case FIOGETOWN:
715 		error = fgetown(tp->tun_pgid, cmd, data);
716 		break;
717 
718 	default:
719 		error = ENOTTY;
720 	}
721 
722 out:
723 	mutex_exit(&tp->tun_lock);
724 out_nolock:
725 	splx(s);
726 	return (error);
727 }
728 
729 /*
730  * The cdevsw read interface - reads a packet at a time, or at
731  * least as much of a packet as can be read.
732  */
733 int
734 tunread(dev_t dev, struct uio *uio, int ioflag)
735 {
736 	struct tun_softc *tp;
737 	struct ifnet	*ifp;
738 	struct mbuf	*m, *m0;
739 	int		error = 0, len, s, index;
740 
741 	s = splnet();
742 	tp = tun_find_unit(dev);
743 
744 	/* interface was "destroyed" already */
745 	if (tp == NULL) {
746 		error = ENXIO;
747 		goto out_nolock;
748 	}
749 
750 	index = tp->tun_if.if_index;
751 	ifp = &tp->tun_if;
752 
753 	TUNDEBUG ("%s: read\n", ifp->if_xname);
754 	if ((tp->tun_flags & TUN_READY) != TUN_READY) {
755 		TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname, tp->tun_flags);
756 		error = EHOSTDOWN;
757 		goto out;
758 	}
759 
760 	tp->tun_flags &= ~TUN_RWAIT;
761 
762 	do {
763 		IFQ_DEQUEUE(&ifp->if_snd, m0);
764 		if (m0 == 0) {
765 			if (tp->tun_flags & TUN_NBIO) {
766 				error = EWOULDBLOCK;
767 				goto out;
768 			}
769 			tp->tun_flags |= TUN_RWAIT;
770 			if (mtsleep((void *)tp, PZERO|PCATCH|PNORELOCK,
771 					"tunread", 0, &tp->tun_lock) != 0) {
772 				error = EINTR;
773 				goto out_nolock;
774 			} else {
775 				/*
776 				 * Maybe the interface was destroyed while
777 				 * we were sleeping, so let's ensure that
778 				 * we're looking at the same (valid) tun
779 				 * interface before looping.
780 				 */
781 				tp = tun_find_unit(dev);
782 				if (tp == NULL) {
783 					error = ENXIO;
784 					goto out_nolock;
785 				}
786 				if (tp->tun_if.if_index != index) {
787 					error = ENXIO;
788 					goto out;
789 				}
790 			}
791 		}
792 	} while (m0 == 0);
793 
794 	mutex_exit(&tp->tun_lock);
795 	splx(s);
796 
797 	/* Copy the mbuf chain */
798 	while (m0 && uio->uio_resid > 0 && error == 0) {
799 		len = min(uio->uio_resid, m0->m_len);
800 		if (len != 0)
801 			error = uiomove(mtod(m0, void *), len, uio);
802 		MFREE(m0, m);
803 		m0 = m;
804 	}
805 
806 	if (m0) {
807 		TUNDEBUG("Dropping mbuf\n");
808 		m_freem(m0);
809 	}
810 	if (error)
811 		ifp->if_ierrors++;
812 
813 	return (error);
814 
815 out:
816 	mutex_exit(&tp->tun_lock);
817 out_nolock:
818 	splx(s);
819 	return (error);
820 }
821 
822 /*
823  * the cdevsw write interface - an atomic write is a packet - or else!
824  */
825 int
826 tunwrite(dev_t dev, struct uio *uio, int ioflag)
827 {
828 	struct tun_softc *tp;
829 	struct ifnet	*ifp;
830 	struct mbuf	*top, **mp, *m;
831 	pktqueue_t	*pktq;
832 	struct sockaddr	dst;
833 	int		error = 0, s, tlen, mlen;
834 	uint32_t	family;
835 
836 	s = splnet();
837 	tp = tun_find_unit(dev);
838 
839 	/* interface was "destroyed" already */
840 	if (tp == NULL) {
841 		error = ENXIO;
842 		goto out_nolock;
843 	}
844 
845 	/* Unlock until we've got the data */
846 	mutex_exit(&tp->tun_lock);
847 	splx(s);
848 
849 	ifp = &tp->tun_if;
850 
851 	TUNDEBUG("%s: tunwrite\n", ifp->if_xname);
852 
853 	if (tp->tun_flags & TUN_PREPADDR) {
854 		if (uio->uio_resid < sizeof(dst)) {
855 			error = EIO;
856 			goto out0;
857 		}
858 		error = uiomove((void *)&dst, sizeof(dst), uio);
859 		if (dst.sa_len > sizeof(dst)) {
860 			/* Duh.. */
861 			char discard;
862 			int n = dst.sa_len - sizeof(dst);
863 			while (n--)
864 				if ((error = uiomove(&discard, 1, uio)) != 0) {
865 					goto out0;
866 				}
867 		}
868 	} else if (tp->tun_flags & TUN_IFHEAD) {
869 		if (uio->uio_resid < sizeof(family)){
870 			error = EIO;
871 			goto out0;
872 		}
873 		error = uiomove((void *)&family, sizeof(family), uio);
874 		dst.sa_family = ntohl(family);
875 	} else {
876 #ifdef INET
877 		dst.sa_family = AF_INET;
878 #endif
879 	}
880 
881 	if (uio->uio_resid > TUNMTU) {
882 		TUNDEBUG("%s: len=%lu!\n", ifp->if_xname,
883 		    (unsigned long)uio->uio_resid);
884 		error = EIO;
885 		goto out0;
886 	}
887 
888 	switch (dst.sa_family) {
889 #ifdef INET
890 	case AF_INET:
891 		pktq = ip_pktq;
892 		break;
893 #endif
894 #ifdef INET6
895 	case AF_INET6:
896 		pktq = ip6_pktq;
897 		break;
898 #endif
899 	default:
900 		error = EAFNOSUPPORT;
901 		goto out0;
902 	}
903 
904 	tlen = uio->uio_resid;
905 
906 	/* get a header mbuf */
907 	MGETHDR(m, M_DONTWAIT, MT_DATA);
908 	if (m == NULL) {
909 		error = ENOBUFS;
910 		goto out0;
911 	}
912 	mlen = MHLEN;
913 
914 	top = NULL;
915 	mp = &top;
916 	while (error == 0 && uio->uio_resid > 0) {
917 		m->m_len = min(mlen, uio->uio_resid);
918 		error = uiomove(mtod(m, void *), m->m_len, uio);
919 		*mp = m;
920 		mp = &m->m_next;
921 		if (error == 0 && uio->uio_resid > 0) {
922 			MGET(m, M_DONTWAIT, MT_DATA);
923 			if (m == NULL) {
924 				error = ENOBUFS;
925 				break;
926 			}
927 			mlen = MLEN;
928 		}
929 	}
930 	if (error) {
931 		if (top != NULL)
932 			m_freem (top);
933 		ifp->if_ierrors++;
934 		goto out0;
935 	}
936 
937 	top->m_pkthdr.len = tlen;
938 	top->m_pkthdr.rcvif = ifp;
939 
940 	bpf_mtap_af(ifp, dst.sa_family, top);
941 
942 	s = splnet();
943 	mutex_enter(&tp->tun_lock);
944 	if ((tp->tun_flags & TUN_INITED) == 0) {
945 		/* Interface was destroyed */
946 		error = ENXIO;
947 		goto out;
948 	}
949 	if (__predict_false(!pktq_enqueue(pktq, top, 0))) {
950 		ifp->if_collisions++;
951 		mutex_exit(&tp->tun_lock);
952 		error = ENOBUFS;
953 		m_freem(top);
954 		goto out_nolock;
955 	}
956 	ifp->if_ipackets++;
957 	ifp->if_ibytes += tlen;
958 out:
959 	mutex_exit(&tp->tun_lock);
960 out_nolock:
961 	splx(s);
962 out0:
963 	return (error);
964 }
965 
966 #ifdef ALTQ
967 /*
968  * Start packet transmission on the interface.
969  * when the interface queue is rate-limited by ALTQ or TBR,
970  * if_start is needed to drain packets from the queue in order
971  * to notify readers when outgoing packets become ready.
972  *
973  * Should be called at splnet.
974  */
975 static void
976 tunstart(struct ifnet *ifp)
977 {
978 	struct tun_softc *tp = ifp->if_softc;
979 
980 	if (!ALTQ_IS_ENABLED(&ifp->if_snd) && !TBR_IS_ENABLED(&ifp->if_snd))
981 		return;
982 
983 	mutex_enter(&tp->tun_lock);
984 	if (!IF_IS_EMPTY(&ifp->if_snd)) {
985 		if (tp->tun_flags & TUN_RWAIT) {
986 			tp->tun_flags &= ~TUN_RWAIT;
987 			wakeup((void *)tp);
988 		}
989 		if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
990 			softint_schedule(tp->tun_osih);
991 
992 		selnotify(&tp->tun_rsel, 0, 0);
993 	}
994 	mutex_exit(&tp->tun_lock);
995 }
996 #endif /* ALTQ */
997 /*
998  * tunpoll - the poll interface, this is only useful on reads
999  * really. The write detect always returns true, write never blocks
1000  * anyway, it either accepts the packet or drops it.
1001  */
1002 int
1003 tunpoll(dev_t dev, int events, struct lwp *l)
1004 {
1005 	struct tun_softc *tp;
1006 	struct ifnet	*ifp;
1007 	int		s, revents = 0;
1008 
1009 	s = splnet();
1010 	tp = tun_find_unit(dev);
1011 
1012 	/* interface was "destroyed" already */
1013 	if (tp == NULL)
1014 		goto out_nolock;
1015 
1016 	ifp = &tp->tun_if;
1017 
1018 	TUNDEBUG("%s: tunpoll\n", ifp->if_xname);
1019 
1020 	if (events & (POLLIN | POLLRDNORM)) {
1021 		if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
1022 			TUNDEBUG("%s: tunpoll q=%d\n", ifp->if_xname,
1023 			    ifp->if_snd.ifq_len);
1024 			revents |= events & (POLLIN | POLLRDNORM);
1025 		} else {
1026 			TUNDEBUG("%s: tunpoll waiting\n", ifp->if_xname);
1027 			selrecord(l, &tp->tun_rsel);
1028 		}
1029 	}
1030 
1031 	if (events & (POLLOUT | POLLWRNORM))
1032 		revents |= events & (POLLOUT | POLLWRNORM);
1033 
1034 	mutex_exit(&tp->tun_lock);
1035 out_nolock:
1036 	splx(s);
1037 	return (revents);
1038 }
1039 
1040 static void
1041 filt_tunrdetach(struct knote *kn)
1042 {
1043 	struct tun_softc *tp = kn->kn_hook;
1044 	int s;
1045 
1046 	s = splnet();
1047 	SLIST_REMOVE(&tp->tun_rsel.sel_klist, kn, knote, kn_selnext);
1048 	splx(s);
1049 }
1050 
1051 static int
1052 filt_tunread(struct knote *kn, long hint)
1053 {
1054 	struct tun_softc *tp = kn->kn_hook;
1055 	struct ifnet *ifp = &tp->tun_if;
1056 	struct mbuf *m;
1057 	int s;
1058 
1059 	s = splnet();
1060 	IF_POLL(&ifp->if_snd, m);
1061 	if (m == NULL) {
1062 		splx(s);
1063 		return (0);
1064 	}
1065 
1066 	for (kn->kn_data = 0; m != NULL; m = m->m_next)
1067 		kn->kn_data += m->m_len;
1068 
1069 	splx(s);
1070 	return (1);
1071 }
1072 
1073 static const struct filterops tunread_filtops =
1074 	{ 1, NULL, filt_tunrdetach, filt_tunread };
1075 
1076 static const struct filterops tun_seltrue_filtops =
1077 	{ 1, NULL, filt_tunrdetach, filt_seltrue };
1078 
1079 int
1080 tunkqfilter(dev_t dev, struct knote *kn)
1081 {
1082 	struct tun_softc *tp;
1083 	struct klist *klist;
1084 	int rv = 0, s;
1085 
1086 	s = splnet();
1087 	tp = tun_find_unit(dev);
1088 	if (tp == NULL)
1089 		goto out_nolock;
1090 
1091 	switch (kn->kn_filter) {
1092 	case EVFILT_READ:
1093 		klist = &tp->tun_rsel.sel_klist;
1094 		kn->kn_fop = &tunread_filtops;
1095 		break;
1096 
1097 	case EVFILT_WRITE:
1098 		klist = &tp->tun_rsel.sel_klist;
1099 		kn->kn_fop = &tun_seltrue_filtops;
1100 		break;
1101 
1102 	default:
1103 		rv = EINVAL;
1104 		goto out;
1105 	}
1106 
1107 	kn->kn_hook = tp;
1108 
1109 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
1110 
1111 out:
1112 	mutex_exit(&tp->tun_lock);
1113 out_nolock:
1114 	splx(s);
1115 	return (rv);
1116 }
1117