xref: /netbsd-src/sys/net/if_tun.c (revision f21b7d7f2cbdd5c14b3882c4e8a3d43580d460a6)
1 /*	$NetBSD: if_tun.c,v 1.133 2016/10/02 14:17:07 christos Exp $	*/
2 
3 /*
4  * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk>
5  * Nottingham University 1987.
6  *
7  * This source may be freely distributed, however I would be interested
8  * in any changes that are made.
9  *
10  * This driver takes packets off the IP i/f and hands them up to a
11  * user process to have its wicked way with. This driver has its
12  * roots in a similar driver written by Phil Cockcroft (formerly) at
13  * UCL. This driver is based much more on read/write/poll mode of
14  * operation though.
15  */
16 
17 #include <sys/cdefs.h>
18 __KERNEL_RCSID(0, "$NetBSD: if_tun.c,v 1.133 2016/10/02 14:17:07 christos Exp $");
19 
20 #ifdef _KERNEL_OPT
21 #include "opt_inet.h"
22 #endif
23 
24 #include <sys/param.h>
25 #include <sys/proc.h>
26 #include <sys/systm.h>
27 #include <sys/mbuf.h>
28 #include <sys/buf.h>
29 #include <sys/protosw.h>
30 #include <sys/socket.h>
31 #include <sys/ioctl.h>
32 #include <sys/errno.h>
33 #include <sys/syslog.h>
34 #include <sys/select.h>
35 #include <sys/poll.h>
36 #include <sys/file.h>
37 #include <sys/signalvar.h>
38 #include <sys/conf.h>
39 #include <sys/kauth.h>
40 #include <sys/mutex.h>
41 #include <sys/cpu.h>
42 #include <sys/device.h>
43 #include <sys/module.h>
44 
45 #include <net/if.h>
46 #include <net/if_types.h>
47 #include <net/netisr.h>
48 #include <net/route.h>
49 
50 
51 #ifdef INET
52 #include <netinet/in.h>
53 #include <netinet/in_systm.h>
54 #include <netinet/in_var.h>
55 #include <netinet/ip.h>
56 #include <netinet/if_inarp.h>
57 #endif
58 
59 
60 #include <sys/time.h>
61 #include <net/bpf.h>
62 
63 #include <net/if_tun.h>
64 
65 #include "ioconf.h"
66 
67 #define TUNDEBUG	if (tundebug) printf
68 int	tundebug = 0;
69 
70 extern int ifqmaxlen;
71 
72 static LIST_HEAD(, tun_softc) tun_softc_list;
73 static LIST_HEAD(, tun_softc) tunz_softc_list;
74 static kmutex_t tun_softc_lock;
75 
76 static int	tun_ioctl(struct ifnet *, u_long, void *);
77 static int	tun_output(struct ifnet *, struct mbuf *,
78 			const struct sockaddr *, const struct rtentry *rt);
79 static int	tun_clone_create(struct if_clone *, int);
80 static int	tun_clone_destroy(struct ifnet *);
81 
82 static struct if_clone tun_cloner =
83     IF_CLONE_INITIALIZER("tun", tun_clone_create, tun_clone_destroy);
84 
85 static void tunattach0(struct tun_softc *);
86 static void tun_enable(struct tun_softc *, const struct ifaddr *);
87 static void tun_i_softintr(void *);
88 static void tun_o_softintr(void *);
89 #ifdef ALTQ
90 static void tunstart(struct ifnet *);
91 #endif
92 static struct tun_softc *tun_find_unit(dev_t);
93 static struct tun_softc *tun_find_zunit(int);
94 
95 static dev_type_open(tunopen);
96 static dev_type_close(tunclose);
97 static dev_type_read(tunread);
98 static dev_type_write(tunwrite);
99 static dev_type_ioctl(tunioctl);
100 static dev_type_poll(tunpoll);
101 static dev_type_kqfilter(tunkqfilter);
102 
103 const struct cdevsw tun_cdevsw = {
104 	.d_open = tunopen,
105 	.d_close = tunclose,
106 	.d_read = tunread,
107 	.d_write = tunwrite,
108 	.d_ioctl = tunioctl,
109 	.d_stop = nostop,
110 	.d_tty = notty,
111 	.d_poll = tunpoll,
112 	.d_mmap = nommap,
113 	.d_kqfilter = tunkqfilter,
114 	.d_discard = nodiscard,
115 	.d_flag = D_OTHER
116 };
117 
118 #ifdef _MODULE
119 devmajor_t tun_bmajor = -1, tun_cmajor = -1;
120 #endif
121 
122 void
123 tunattach(int unused)
124 {
125 
126 	/*
127 	 * Nothing to do here, initialization is handled by the
128 	 * module initialization code in tuninit() below).
129 	 */
130 }
131 
132 static void
133 tuninit(void)
134 {
135 
136 	mutex_init(&tun_softc_lock, MUTEX_DEFAULT, IPL_NET);
137 	LIST_INIT(&tun_softc_list);
138 	LIST_INIT(&tunz_softc_list);
139 	if_clone_attach(&tun_cloner);
140 #ifdef _MODULE
141 	devsw_attach("tun", NULL, &tun_bmajor, &tun_cdevsw, &tun_cmajor);
142 #endif
143 }
144 
145 static int
146 tundetach(void)
147 {
148 	int error = 0;
149 
150 	if (!LIST_EMPTY(&tun_softc_list) || !LIST_EMPTY(&tunz_softc_list))
151 		error = EBUSY;
152 
153 #ifdef _MODULE
154 	if (error == 0)
155 		error = devsw_detach(NULL, &tun_cdevsw);
156 #endif
157 	if (error == 0) {
158 		if_clone_detach(&tun_cloner);
159 		mutex_destroy(&tun_softc_lock);
160 	}
161 
162 	return error;
163 }
164 
165 /*
166  * Find driver instance from dev_t.
167  * Returns with tp locked (if found).
168  */
169 static struct tun_softc *
170 tun_find_unit(dev_t dev)
171 {
172 	struct tun_softc *tp;
173 	int unit = minor(dev);
174 
175 	mutex_enter(&tun_softc_lock);
176 	LIST_FOREACH(tp, &tun_softc_list, tun_list)
177 		if (unit == tp->tun_unit)
178 			break;
179 	if (tp)
180 		mutex_enter(&tp->tun_lock);
181 	mutex_exit(&tun_softc_lock);
182 
183 	return (tp);
184 }
185 
186 /*
187  * Find zombie driver instance by unit number.
188  * Remove tp from list and return it unlocked (if found).
189  */
190 static struct tun_softc *
191 tun_find_zunit(int unit)
192 {
193 	struct tun_softc *tp;
194 
195 	mutex_enter(&tun_softc_lock);
196 	LIST_FOREACH(tp, &tunz_softc_list, tun_list)
197 		if (unit == tp->tun_unit)
198 			break;
199 	if (tp)
200 		LIST_REMOVE(tp, tun_list);
201 	mutex_exit(&tun_softc_lock);
202 #ifdef DIAGNOSTIC
203 	if (tp != NULL && (tp->tun_flags & (TUN_INITED|TUN_OPEN)) != TUN_OPEN)
204 		printf("tun%d: inconsistent flags: %x\n", unit, tp->tun_flags);
205 #endif
206 
207 	return (tp);
208 }
209 
210 static int
211 tun_clone_create(struct if_clone *ifc, int unit)
212 {
213 	struct tun_softc *tp;
214 
215 	if ((tp = tun_find_zunit(unit)) == NULL) {
216 		/* Allocate a new instance */
217 		tp = malloc(sizeof(*tp), M_DEVBUF, M_WAITOK|M_ZERO);
218 
219 		tp->tun_unit = unit;
220 		mutex_init(&tp->tun_lock, MUTEX_DEFAULT, IPL_NET);
221 		selinit(&tp->tun_rsel);
222 		selinit(&tp->tun_wsel);
223 	} else {
224 		/* Revive tunnel instance; clear ifp part */
225 		(void)memset(&tp->tun_if, 0, sizeof(struct ifnet));
226 	}
227 
228 	if_initname(&tp->tun_if, ifc->ifc_name, unit);
229 	tunattach0(tp);
230 	tp->tun_flags |= TUN_INITED;
231 	tp->tun_osih = softint_establish(SOFTINT_CLOCK, tun_o_softintr, tp);
232 	tp->tun_isih = softint_establish(SOFTINT_CLOCK, tun_i_softintr, tp);
233 
234 	mutex_enter(&tun_softc_lock);
235 	LIST_INSERT_HEAD(&tun_softc_list, tp, tun_list);
236 	mutex_exit(&tun_softc_lock);
237 
238 	return (0);
239 }
240 
241 static void
242 tunattach0(struct tun_softc *tp)
243 {
244 	struct ifnet *ifp;
245 
246 	ifp = &tp->tun_if;
247 	ifp->if_softc = tp;
248 	ifp->if_mtu = TUNMTU;
249 	ifp->if_ioctl = tun_ioctl;
250 	ifp->if_output = tun_output;
251 #ifdef ALTQ
252 	ifp->if_start = tunstart;
253 #endif
254 	ifp->if_flags = IFF_POINTOPOINT;
255 	ifp->if_type = IFT_TUNNEL;
256 	ifp->if_snd.ifq_maxlen = ifqmaxlen;
257 	ifp->if_collisions = 0;
258 	ifp->if_ierrors = 0;
259 	ifp->if_oerrors = 0;
260 	ifp->if_ipackets = 0;
261 	ifp->if_opackets = 0;
262 	ifp->if_ibytes   = 0;
263 	ifp->if_obytes   = 0;
264 	ifp->if_dlt = DLT_NULL;
265 	IFQ_SET_READY(&ifp->if_snd);
266 	if_attach(ifp);
267 	if_alloc_sadl(ifp);
268 	bpf_attach(ifp, DLT_NULL, sizeof(uint32_t));
269 }
270 
271 static int
272 tun_clone_destroy(struct ifnet *ifp)
273 {
274 	struct tun_softc *tp = (void *)ifp;
275 	int zombie = 0;
276 
277 	IF_PURGE(&ifp->if_snd);
278 	ifp->if_flags &= ~IFF_RUNNING;
279 
280 	mutex_enter(&tun_softc_lock);
281 	mutex_enter(&tp->tun_lock);
282 	LIST_REMOVE(tp, tun_list);
283 	if (tp->tun_flags & TUN_OPEN) {
284 		/* Hang on to storage until last close */
285 		zombie = 1;
286 		tp->tun_flags &= ~TUN_INITED;
287 		LIST_INSERT_HEAD(&tunz_softc_list, tp, tun_list);
288 	}
289 	mutex_exit(&tun_softc_lock);
290 
291 	if (tp->tun_flags & TUN_RWAIT) {
292 		tp->tun_flags &= ~TUN_RWAIT;
293 		wakeup((void *)tp);
294 	}
295 	selnotify(&tp->tun_rsel, 0, 0);
296 
297 	mutex_exit(&tp->tun_lock);
298 
299 	if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
300 		fownsignal(tp->tun_pgid, SIGIO, POLL_HUP, 0, NULL);
301 
302 	bpf_detach(ifp);
303 	if_detach(ifp);
304 
305 	if (!zombie) {
306 		seldestroy(&tp->tun_rsel);
307 		seldestroy(&tp->tun_wsel);
308 		softint_disestablish(tp->tun_osih);
309 		softint_disestablish(tp->tun_isih);
310 		mutex_destroy(&tp->tun_lock);
311 		free(tp, M_DEVBUF);
312 	}
313 
314 	return (0);
315 }
316 
317 /*
318  * tunnel open - must be superuser & the device must be
319  * configured in
320  */
321 static int
322 tunopen(dev_t dev, int flag, int mode, struct lwp *l)
323 {
324 	struct ifnet	*ifp;
325 	struct tun_softc *tp;
326 	int	error;
327 
328 	error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_INTERFACE_TUN,
329 	    KAUTH_REQ_NETWORK_INTERFACE_TUN_ADD, NULL, NULL, NULL);
330 	if (error)
331 		return (error);
332 
333 	tp = tun_find_unit(dev);
334 
335 	if (tp == NULL) {
336 		(void)tun_clone_create(&tun_cloner, minor(dev));
337 		tp = tun_find_unit(dev);
338 		if (tp == NULL) {
339 			error = ENXIO;
340 			goto out_nolock;
341 		}
342 	}
343 
344 	if (tp->tun_flags & TUN_OPEN) {
345 		error = EBUSY;
346 		goto out;
347 	}
348 
349 	ifp = &tp->tun_if;
350 	tp->tun_flags |= TUN_OPEN;
351 	TUNDEBUG("%s: open\n", ifp->if_xname);
352 out:
353 	mutex_exit(&tp->tun_lock);
354 out_nolock:
355 	return (error);
356 }
357 
358 /*
359  * tunclose - close the device - mark i/f down & delete
360  * routing info
361  */
362 int
363 tunclose(dev_t dev, int flag, int mode,
364     struct lwp *l)
365 {
366 	struct tun_softc *tp;
367 	struct ifnet	*ifp;
368 
369 	if ((tp = tun_find_zunit(minor(dev))) != NULL) {
370 		/* interface was "destroyed" before the close */
371 		seldestroy(&tp->tun_rsel);
372 		seldestroy(&tp->tun_wsel);
373 		softint_disestablish(tp->tun_osih);
374 		softint_disestablish(tp->tun_isih);
375 		mutex_destroy(&tp->tun_lock);
376 		free(tp, M_DEVBUF);
377 		goto out_nolock;
378 	}
379 
380 	if ((tp = tun_find_unit(dev)) == NULL)
381 		goto out_nolock;
382 
383 	ifp = &tp->tun_if;
384 
385 	tp->tun_flags &= ~TUN_OPEN;
386 
387 	tp->tun_pgid = 0;
388 	selnotify(&tp->tun_rsel, 0, 0);
389 
390 	TUNDEBUG ("%s: closed\n", ifp->if_xname);
391 	mutex_exit(&tp->tun_lock);
392 
393 	/*
394 	 * junk all pending output
395 	 */
396 	IFQ_PURGE(&ifp->if_snd);
397 
398 	if (ifp->if_flags & IFF_UP) {
399 		if_down(ifp);
400 		if (ifp->if_flags & IFF_RUNNING) {
401 			/* find internet addresses and delete routes */
402 			struct ifaddr *ifa;
403 			IFADDR_READER_FOREACH(ifa, ifp) {
404 #if defined(INET) || defined(INET6)
405 				if (ifa->ifa_addr->sa_family == AF_INET ||
406 				    ifa->ifa_addr->sa_family == AF_INET6) {
407 					rtinit(ifa, (int)RTM_DELETE,
408 					       tp->tun_flags & TUN_DSTADDR
409 							? RTF_HOST
410 							: 0);
411 				}
412 #endif
413 			}
414 		}
415 	}
416 out_nolock:
417 	return (0);
418 }
419 
420 /*
421  * Call at splnet().
422  */
423 static void
424 tun_enable(struct tun_softc *tp, const struct ifaddr *ifa)
425 {
426 	struct ifnet	*ifp = &tp->tun_if;
427 
428 	TUNDEBUG("%s: %s\n", __func__, ifp->if_xname);
429 
430 	mutex_enter(&tp->tun_lock);
431 	ifp->if_flags |= IFF_UP | IFF_RUNNING;
432 
433 	tp->tun_flags &= ~(TUN_IASET|TUN_DSTADDR);
434 
435 	switch (ifa->ifa_addr->sa_family) {
436 #ifdef INET
437 	case AF_INET: {
438 		struct sockaddr_in *sin;
439 
440 		sin = satosin(ifa->ifa_addr);
441 		if (sin && sin->sin_addr.s_addr)
442 			tp->tun_flags |= TUN_IASET;
443 
444 		if (ifp->if_flags & IFF_POINTOPOINT) {
445 			sin = satosin(ifa->ifa_dstaddr);
446 			if (sin && sin->sin_addr.s_addr)
447 				tp->tun_flags |= TUN_DSTADDR;
448 		}
449 		break;
450 	    }
451 #endif
452 #ifdef INET6
453 	case AF_INET6: {
454 		struct sockaddr_in6 *sin;
455 
456 		sin = satosin6(ifa->ifa_addr);
457 		if (!IN6_IS_ADDR_UNSPECIFIED(&sin->sin6_addr))
458 			tp->tun_flags |= TUN_IASET;
459 
460 		if (ifp->if_flags & IFF_POINTOPOINT) {
461 			sin = satosin6(ifa->ifa_dstaddr);
462 			if (sin && !IN6_IS_ADDR_UNSPECIFIED(&sin->sin6_addr))
463 				tp->tun_flags |= TUN_DSTADDR;
464 		} else
465 			tp->tun_flags &= ~TUN_DSTADDR;
466 		break;
467 	    }
468 #endif /* INET6 */
469 	default:
470 		break;
471 	}
472 	mutex_exit(&tp->tun_lock);
473 }
474 
475 /*
476  * Process an ioctl request.
477  */
478 static int
479 tun_ioctl(struct ifnet *ifp, u_long cmd, void *data)
480 {
481 	int		error = 0, s;
482 	struct tun_softc *tp = (struct tun_softc *)(ifp->if_softc);
483 	struct ifreq *ifr = (struct ifreq *)data;
484 	struct ifaddr *ifa = (struct ifaddr *)data;
485 
486 	s = splnet();
487 
488 	switch (cmd) {
489 	case SIOCINITIFADDR:
490 		tun_enable(tp, ifa);
491 		ifa->ifa_rtrequest = p2p_rtrequest;
492 		TUNDEBUG("%s: address set\n", ifp->if_xname);
493 		break;
494 	case SIOCSIFBRDADDR:
495 		TUNDEBUG("%s: broadcast address set\n", ifp->if_xname);
496 		break;
497 	case SIOCSIFMTU:
498 		if (ifr->ifr_mtu > TUNMTU || ifr->ifr_mtu < 576) {
499 			error = EINVAL;
500 			break;
501 		}
502 		TUNDEBUG("%s: interface mtu set\n", ifp->if_xname);
503 		if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
504 			error = 0;
505 		break;
506 	case SIOCADDMULTI:
507 	case SIOCDELMULTI:
508 		if (ifr == NULL) {
509 	        	error = EAFNOSUPPORT;           /* XXX */
510 			break;
511 		}
512 		switch (ifreq_getaddr(cmd, ifr)->sa_family) {
513 #ifdef INET
514 		case AF_INET:
515 			break;
516 #endif
517 #ifdef INET6
518 		case AF_INET6:
519 			break;
520 #endif
521 		default:
522 			error = EAFNOSUPPORT;
523 			break;
524 		}
525 		break;
526 	default:
527 		error = ifioctl_common(ifp, cmd, data);
528 	}
529 
530 	splx(s);
531 	return (error);
532 }
533 
534 /*
535  * tun_output - queue packets from higher level ready to put out.
536  */
537 static int
538 tun_output(struct ifnet *ifp, struct mbuf *m0, const struct sockaddr *dst,
539     const struct rtentry *rt)
540 {
541 	struct tun_softc *tp = ifp->if_softc;
542 	int		s;
543 	int		error;
544 #if defined(INET) || defined(INET6)
545 	int		mlen;
546 	uint32_t	*af;
547 #endif
548 
549 	s = splnet();
550 	mutex_enter(&tp->tun_lock);
551 	TUNDEBUG ("%s: tun_output\n", ifp->if_xname);
552 
553 	if ((tp->tun_flags & TUN_READY) != TUN_READY) {
554 		TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname,
555 			  tp->tun_flags);
556 		error = EHOSTDOWN;
557 		goto out;
558 	}
559 
560 	/*
561 	 * if the queueing discipline needs packet classification,
562 	 * do it before prepending link headers.
563 	 */
564 	IFQ_CLASSIFY(&ifp->if_snd, m0, dst->sa_family);
565 
566 	bpf_mtap_af(ifp, dst->sa_family, m0);
567 
568 	switch(dst->sa_family) {
569 #ifdef INET6
570 	case AF_INET6:
571 #endif
572 #ifdef INET
573 	case AF_INET:
574 #endif
575 #if defined(INET) || defined(INET6)
576 		if (tp->tun_flags & TUN_PREPADDR) {
577 			/* Simple link-layer header */
578 			M_PREPEND(m0, dst->sa_len, M_DONTWAIT);
579 			if (m0 == NULL) {
580 				IF_DROP(&ifp->if_snd);
581 				error = ENOBUFS;
582 				goto out;
583 			}
584 			bcopy(dst, mtod(m0, char *), dst->sa_len);
585 		}
586 
587 		if (tp->tun_flags & TUN_IFHEAD) {
588 			/* Prepend the address family */
589 			M_PREPEND(m0, sizeof(*af), M_DONTWAIT);
590 			if (m0 == NULL) {
591 				IF_DROP(&ifp->if_snd);
592 				error = ENOBUFS;
593 				goto out;
594 			}
595 			af = mtod(m0,uint32_t *);
596 			*af = htonl(dst->sa_family);
597 		} else {
598 #ifdef INET
599 			if (dst->sa_family != AF_INET)
600 #endif
601 			{
602 				error = EAFNOSUPPORT;
603 				goto out;
604 			}
605 		}
606 		/* FALLTHROUGH */
607 	case AF_UNSPEC:
608 		IFQ_ENQUEUE(&ifp->if_snd, m0, error);
609 		if (error) {
610 			ifp->if_collisions++;
611 			error = EAFNOSUPPORT;
612 			m0 = NULL;
613 			goto out;
614 		}
615 		mlen = m0->m_pkthdr.len;
616 		ifp->if_opackets++;
617 		ifp->if_obytes += mlen;
618 		break;
619 #endif
620 	default:
621 		error = EAFNOSUPPORT;
622 		goto out;
623 	}
624 
625 	if (tp->tun_flags & TUN_RWAIT) {
626 		tp->tun_flags &= ~TUN_RWAIT;
627 		wakeup((void *)tp);
628 	}
629 	if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
630 		softint_schedule(tp->tun_isih);
631 
632 	selnotify(&tp->tun_rsel, 0, 0);
633 out:
634 	mutex_exit(&tp->tun_lock);
635 	splx(s);
636 
637 	if (error && m0) {
638 		m_freem(m0);
639 	}
640 	return 0;
641 }
642 
643 static void
644 tun_i_softintr(void *cookie)
645 {
646 	struct tun_softc *tp = cookie;
647 
648 	if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
649 		fownsignal(tp->tun_pgid, SIGIO, POLL_IN, POLLIN|POLLRDNORM,
650 		    NULL);
651 }
652 
653 static void
654 tun_o_softintr(void *cookie)
655 {
656 	struct tun_softc *tp = cookie;
657 
658 	if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
659 		fownsignal(tp->tun_pgid, SIGIO, POLL_OUT, POLLOUT|POLLWRNORM,
660 		    NULL);
661 }
662 
663 /*
664  * the cdevsw interface is now pretty minimal.
665  */
666 int
667 tunioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
668 {
669 	struct tun_softc *tp;
670 	int s, error = 0;
671 
672 	s = splnet();
673 	tp = tun_find_unit(dev);
674 
675 	/* interface was "destroyed" already */
676 	if (tp == NULL) {
677 		error = ENXIO;
678 		goto out_nolock;
679 	}
680 
681 	switch (cmd) {
682 	case TUNSDEBUG:
683 		tundebug = *(int *)data;
684 		break;
685 
686 	case TUNGDEBUG:
687 		*(int *)data = tundebug;
688 		break;
689 
690 	case TUNSIFMODE:
691 		switch (*(int *)data & (IFF_POINTOPOINT|IFF_BROADCAST)) {
692 		case IFF_POINTOPOINT:
693 		case IFF_BROADCAST:
694 			if (tp->tun_if.if_flags & IFF_UP) {
695 				error = EBUSY;
696 				goto out;
697 			}
698 			tp->tun_if.if_flags &=
699 				~(IFF_BROADCAST|IFF_POINTOPOINT|IFF_MULTICAST);
700 			tp->tun_if.if_flags |= *(int *)data;
701 			break;
702 		default:
703 			error = EINVAL;
704 			goto out;
705 		}
706 		break;
707 
708 	case TUNSLMODE:
709 		if (*(int *)data) {
710 			tp->tun_flags |= TUN_PREPADDR;
711 			tp->tun_flags &= ~TUN_IFHEAD;
712 		} else
713 			tp->tun_flags &= ~TUN_PREPADDR;
714 		break;
715 
716 	case TUNSIFHEAD:
717 		if (*(int *)data) {
718 			tp->tun_flags |= TUN_IFHEAD;
719 			tp->tun_flags &= ~TUN_PREPADDR;
720 		} else
721 			tp->tun_flags &= ~TUN_IFHEAD;
722 		break;
723 
724 	case TUNGIFHEAD:
725 		*(int *)data = (tp->tun_flags & TUN_IFHEAD);
726 		break;
727 
728 	case FIONBIO:
729 		if (*(int *)data)
730 			tp->tun_flags |= TUN_NBIO;
731 		else
732 			tp->tun_flags &= ~TUN_NBIO;
733 		break;
734 
735 	case FIOASYNC:
736 		if (*(int *)data)
737 			tp->tun_flags |= TUN_ASYNC;
738 		else
739 			tp->tun_flags &= ~TUN_ASYNC;
740 		break;
741 
742 	case FIONREAD:
743 		if (tp->tun_if.if_snd.ifq_head)
744 			*(int *)data = tp->tun_if.if_snd.ifq_head->m_pkthdr.len;
745 		else
746 			*(int *)data = 0;
747 		break;
748 
749 	case TIOCSPGRP:
750 	case FIOSETOWN:
751 		error = fsetown(&tp->tun_pgid, cmd, data);
752 		break;
753 
754 	case TIOCGPGRP:
755 	case FIOGETOWN:
756 		error = fgetown(tp->tun_pgid, cmd, data);
757 		break;
758 
759 	default:
760 		error = ENOTTY;
761 	}
762 
763 out:
764 	mutex_exit(&tp->tun_lock);
765 out_nolock:
766 	splx(s);
767 	return (error);
768 }
769 
770 /*
771  * The cdevsw read interface - reads a packet at a time, or at
772  * least as much of a packet as can be read.
773  */
774 int
775 tunread(dev_t dev, struct uio *uio, int ioflag)
776 {
777 	struct tun_softc *tp;
778 	struct ifnet	*ifp;
779 	struct mbuf	*m, *m0;
780 	int		error = 0, len, s, index;
781 
782 	s = splnet();
783 	tp = tun_find_unit(dev);
784 
785 	/* interface was "destroyed" already */
786 	if (tp == NULL) {
787 		error = ENXIO;
788 		goto out_nolock;
789 	}
790 
791 	index = tp->tun_if.if_index;
792 	ifp = &tp->tun_if;
793 
794 	TUNDEBUG ("%s: read\n", ifp->if_xname);
795 	if ((tp->tun_flags & TUN_READY) != TUN_READY) {
796 		TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname, tp->tun_flags);
797 		error = EHOSTDOWN;
798 		goto out;
799 	}
800 
801 	tp->tun_flags &= ~TUN_RWAIT;
802 
803 	do {
804 		IFQ_DEQUEUE(&ifp->if_snd, m0);
805 		if (m0 == 0) {
806 			if (tp->tun_flags & TUN_NBIO) {
807 				error = EWOULDBLOCK;
808 				goto out;
809 			}
810 			tp->tun_flags |= TUN_RWAIT;
811 			if (mtsleep((void *)tp, PZERO|PCATCH|PNORELOCK,
812 					"tunread", 0, &tp->tun_lock) != 0) {
813 				error = EINTR;
814 				goto out_nolock;
815 			} else {
816 				/*
817 				 * Maybe the interface was destroyed while
818 				 * we were sleeping, so let's ensure that
819 				 * we're looking at the same (valid) tun
820 				 * interface before looping.
821 				 */
822 				tp = tun_find_unit(dev);
823 				if (tp == NULL) {
824 					error = ENXIO;
825 					goto out_nolock;
826 				}
827 				if (tp->tun_if.if_index != index) {
828 					error = ENXIO;
829 					goto out;
830 				}
831 			}
832 		}
833 	} while (m0 == 0);
834 
835 	mutex_exit(&tp->tun_lock);
836 	splx(s);
837 
838 	/* Copy the mbuf chain */
839 	while (m0 && uio->uio_resid > 0 && error == 0) {
840 		len = min(uio->uio_resid, m0->m_len);
841 		if (len != 0)
842 			error = uiomove(mtod(m0, void *), len, uio);
843 		m0 = m = m_free(m0);
844 	}
845 
846 	if (m0) {
847 		TUNDEBUG("Dropping mbuf\n");
848 		m_freem(m0);
849 	}
850 	if (error)
851 		ifp->if_ierrors++;
852 
853 	return (error);
854 
855 out:
856 	mutex_exit(&tp->tun_lock);
857 out_nolock:
858 	splx(s);
859 	return (error);
860 }
861 
862 /*
863  * the cdevsw write interface - an atomic write is a packet - or else!
864  */
865 int
866 tunwrite(dev_t dev, struct uio *uio, int ioflag)
867 {
868 	struct tun_softc *tp;
869 	struct ifnet	*ifp;
870 	struct mbuf	*top, **mp, *m;
871 	pktqueue_t	*pktq;
872 	struct sockaddr	dst;
873 	int		error = 0, s, tlen, mlen;
874 	uint32_t	family;
875 
876 	s = splnet();
877 	tp = tun_find_unit(dev);
878 
879 	/* interface was "destroyed" already */
880 	if (tp == NULL) {
881 		error = ENXIO;
882 		goto out_nolock;
883 	}
884 
885 	/* Unlock until we've got the data */
886 	mutex_exit(&tp->tun_lock);
887 	splx(s);
888 
889 	ifp = &tp->tun_if;
890 
891 	TUNDEBUG("%s: tunwrite\n", ifp->if_xname);
892 
893 	if (tp->tun_flags & TUN_PREPADDR) {
894 		if (uio->uio_resid < sizeof(dst)) {
895 			error = EIO;
896 			goto out0;
897 		}
898 		error = uiomove((void *)&dst, sizeof(dst), uio);
899 		if (dst.sa_len > sizeof(dst)) {
900 			/* Duh.. */
901 			char discard;
902 			int n = dst.sa_len - sizeof(dst);
903 			while (n--)
904 				if ((error = uiomove(&discard, 1, uio)) != 0) {
905 					goto out0;
906 				}
907 		}
908 	} else if (tp->tun_flags & TUN_IFHEAD) {
909 		if (uio->uio_resid < sizeof(family)){
910 			error = EIO;
911 			goto out0;
912 		}
913 		error = uiomove((void *)&family, sizeof(family), uio);
914 		dst.sa_family = ntohl(family);
915 	} else {
916 #ifdef INET
917 		dst.sa_family = AF_INET;
918 #endif
919 	}
920 
921 	if (uio->uio_resid > TUNMTU) {
922 		TUNDEBUG("%s: len=%lu!\n", ifp->if_xname,
923 		    (unsigned long)uio->uio_resid);
924 		error = EIO;
925 		goto out0;
926 	}
927 
928 	switch (dst.sa_family) {
929 #ifdef INET
930 	case AF_INET:
931 		pktq = ip_pktq;
932 		break;
933 #endif
934 #ifdef INET6
935 	case AF_INET6:
936 		pktq = ip6_pktq;
937 		break;
938 #endif
939 	default:
940 		error = EAFNOSUPPORT;
941 		goto out0;
942 	}
943 
944 	tlen = uio->uio_resid;
945 
946 	/* get a header mbuf */
947 	MGETHDR(m, M_DONTWAIT, MT_DATA);
948 	if (m == NULL) {
949 		error = ENOBUFS;
950 		goto out0;
951 	}
952 	mlen = MHLEN;
953 
954 	top = NULL;
955 	mp = &top;
956 	while (error == 0 && uio->uio_resid > 0) {
957 		m->m_len = min(mlen, uio->uio_resid);
958 		error = uiomove(mtod(m, void *), m->m_len, uio);
959 		*mp = m;
960 		mp = &m->m_next;
961 		if (error == 0 && uio->uio_resid > 0) {
962 			MGET(m, M_DONTWAIT, MT_DATA);
963 			if (m == NULL) {
964 				error = ENOBUFS;
965 				break;
966 			}
967 			mlen = MLEN;
968 		}
969 	}
970 	if (error) {
971 		if (top != NULL)
972 			m_freem (top);
973 		ifp->if_ierrors++;
974 		goto out0;
975 	}
976 
977 	top->m_pkthdr.len = tlen;
978 	m_set_rcvif(top, ifp);
979 
980 	bpf_mtap_af(ifp, dst.sa_family, top);
981 
982 	s = splnet();
983 	mutex_enter(&tp->tun_lock);
984 	if ((tp->tun_flags & TUN_INITED) == 0) {
985 		/* Interface was destroyed */
986 		error = ENXIO;
987 		goto out;
988 	}
989 	if (__predict_false(!pktq_enqueue(pktq, top, 0))) {
990 		ifp->if_collisions++;
991 		mutex_exit(&tp->tun_lock);
992 		error = ENOBUFS;
993 		m_freem(top);
994 		goto out_nolock;
995 	}
996 	ifp->if_ipackets++;
997 	ifp->if_ibytes += tlen;
998 out:
999 	mutex_exit(&tp->tun_lock);
1000 out_nolock:
1001 	splx(s);
1002 out0:
1003 	return (error);
1004 }
1005 
1006 #ifdef ALTQ
1007 /*
1008  * Start packet transmission on the interface.
1009  * when the interface queue is rate-limited by ALTQ or TBR,
1010  * if_start is needed to drain packets from the queue in order
1011  * to notify readers when outgoing packets become ready.
1012  *
1013  * Should be called at splnet.
1014  */
1015 static void
1016 tunstart(struct ifnet *ifp)
1017 {
1018 	struct tun_softc *tp = ifp->if_softc;
1019 
1020 	if (!ALTQ_IS_ENABLED(&ifp->if_snd) && !TBR_IS_ENABLED(&ifp->if_snd))
1021 		return;
1022 
1023 	mutex_enter(&tp->tun_lock);
1024 	if (!IF_IS_EMPTY(&ifp->if_snd)) {
1025 		if (tp->tun_flags & TUN_RWAIT) {
1026 			tp->tun_flags &= ~TUN_RWAIT;
1027 			wakeup((void *)tp);
1028 		}
1029 		if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
1030 			softint_schedule(tp->tun_osih);
1031 
1032 		selnotify(&tp->tun_rsel, 0, 0);
1033 	}
1034 	mutex_exit(&tp->tun_lock);
1035 }
1036 #endif /* ALTQ */
1037 /*
1038  * tunpoll - the poll interface, this is only useful on reads
1039  * really. The write detect always returns true, write never blocks
1040  * anyway, it either accepts the packet or drops it.
1041  */
1042 int
1043 tunpoll(dev_t dev, int events, struct lwp *l)
1044 {
1045 	struct tun_softc *tp;
1046 	struct ifnet	*ifp;
1047 	int		s, revents = 0;
1048 
1049 	s = splnet();
1050 	tp = tun_find_unit(dev);
1051 
1052 	/* interface was "destroyed" already */
1053 	if (tp == NULL)
1054 		goto out_nolock;
1055 
1056 	ifp = &tp->tun_if;
1057 
1058 	TUNDEBUG("%s: tunpoll\n", ifp->if_xname);
1059 
1060 	if (events & (POLLIN | POLLRDNORM)) {
1061 		if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
1062 			TUNDEBUG("%s: tunpoll q=%d\n", ifp->if_xname,
1063 			    ifp->if_snd.ifq_len);
1064 			revents |= events & (POLLIN | POLLRDNORM);
1065 		} else {
1066 			TUNDEBUG("%s: tunpoll waiting\n", ifp->if_xname);
1067 			selrecord(l, &tp->tun_rsel);
1068 		}
1069 	}
1070 
1071 	if (events & (POLLOUT | POLLWRNORM))
1072 		revents |= events & (POLLOUT | POLLWRNORM);
1073 
1074 	mutex_exit(&tp->tun_lock);
1075 out_nolock:
1076 	splx(s);
1077 	return (revents);
1078 }
1079 
1080 static void
1081 filt_tunrdetach(struct knote *kn)
1082 {
1083 	struct tun_softc *tp = kn->kn_hook;
1084 	int s;
1085 
1086 	s = splnet();
1087 	SLIST_REMOVE(&tp->tun_rsel.sel_klist, kn, knote, kn_selnext);
1088 	splx(s);
1089 }
1090 
1091 static int
1092 filt_tunread(struct knote *kn, long hint)
1093 {
1094 	struct tun_softc *tp = kn->kn_hook;
1095 	struct ifnet *ifp = &tp->tun_if;
1096 	struct mbuf *m;
1097 	int s;
1098 
1099 	s = splnet();
1100 	IF_POLL(&ifp->if_snd, m);
1101 	if (m == NULL) {
1102 		splx(s);
1103 		return (0);
1104 	}
1105 
1106 	for (kn->kn_data = 0; m != NULL; m = m->m_next)
1107 		kn->kn_data += m->m_len;
1108 
1109 	splx(s);
1110 	return (1);
1111 }
1112 
1113 static const struct filterops tunread_filtops =
1114 	{ 1, NULL, filt_tunrdetach, filt_tunread };
1115 
1116 static const struct filterops tun_seltrue_filtops =
1117 	{ 1, NULL, filt_tunrdetach, filt_seltrue };
1118 
1119 int
1120 tunkqfilter(dev_t dev, struct knote *kn)
1121 {
1122 	struct tun_softc *tp;
1123 	struct klist *klist;
1124 	int rv = 0, s;
1125 
1126 	s = splnet();
1127 	tp = tun_find_unit(dev);
1128 	if (tp == NULL)
1129 		goto out_nolock;
1130 
1131 	switch (kn->kn_filter) {
1132 	case EVFILT_READ:
1133 		klist = &tp->tun_rsel.sel_klist;
1134 		kn->kn_fop = &tunread_filtops;
1135 		break;
1136 
1137 	case EVFILT_WRITE:
1138 		klist = &tp->tun_rsel.sel_klist;
1139 		kn->kn_fop = &tun_seltrue_filtops;
1140 		break;
1141 
1142 	default:
1143 		rv = EINVAL;
1144 		goto out;
1145 	}
1146 
1147 	kn->kn_hook = tp;
1148 
1149 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
1150 
1151 out:
1152 	mutex_exit(&tp->tun_lock);
1153 out_nolock:
1154 	splx(s);
1155 	return (rv);
1156 }
1157 
1158 /*
1159  * Module infrastructure
1160  */
1161 #include "if_module.h"
1162 
1163 IF_MODULE(MODULE_CLASS_DRIVER, tun, "")
1164