xref: /netbsd-src/sys/net/if_tun.c (revision 154bfe8e089c1a0a4e9ed8414f08d3da90949162)
1 /*	$NetBSD: if_tun.c,v 1.160 2020/08/29 07:14:50 maxv Exp $	*/
2 
3 /*
4  * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk>
5  * Nottingham University 1987.
6  *
7  * This source may be freely distributed, however I would be interested
8  * in any changes that are made.
9  *
10  * This driver takes packets off the IP i/f and hands them up to a
11  * user process to have its wicked way with. This driver has its
12  * roots in a similar driver written by Phil Cockcroft (formerly) at
13  * UCL. This driver is based much more on read/write/poll mode of
14  * operation though.
15  */
16 
17 /*
18  * tun - tunnel software network interface.
19  */
20 
21 #include <sys/cdefs.h>
22 __KERNEL_RCSID(0, "$NetBSD: if_tun.c,v 1.160 2020/08/29 07:14:50 maxv Exp $");
23 
24 #ifdef _KERNEL_OPT
25 #include "opt_inet.h"
26 #endif
27 
28 #include <sys/param.h>
29 
30 #include <sys/buf.h>
31 #include <sys/conf.h>
32 #include <sys/cpu.h>
33 #include <sys/device.h>
34 #include <sys/file.h>
35 #include <sys/ioctl.h>
36 #include <sys/kauth.h>
37 #include <sys/kmem.h>
38 #include <sys/lwp.h>
39 #include <sys/mbuf.h>
40 #include <sys/module.h>
41 #include <sys/mutex.h>
42 #include <sys/poll.h>
43 #include <sys/select.h>
44 #include <sys/signalvar.h>
45 #include <sys/socket.h>
46 
47 #include <net/bpf.h>
48 #include <net/if.h>
49 #include <net/if_types.h>
50 #include <net/route.h>
51 
52 #ifdef INET
53 #include <netinet/in.h>
54 #include <netinet/in_systm.h>
55 #include <netinet/in_var.h>
56 #include <netinet/ip.h>
57 #include <netinet/if_inarp.h>
58 #endif
59 
60 #include <net/if_tun.h>
61 
62 #include "ioconf.h"
63 
64 #define TUNDEBUG	if (tundebug) printf
65 int	tundebug = 0;
66 
67 extern int ifqmaxlen;
68 
69 static LIST_HEAD(, tun_softc) tun_softc_list;
70 static LIST_HEAD(, tun_softc) tunz_softc_list;
71 static kmutex_t tun_softc_lock;
72 
73 static int	tun_ioctl(struct ifnet *, u_long, void *);
74 static int	tun_output(struct ifnet *, struct mbuf *,
75 			const struct sockaddr *, const struct rtentry *rt);
76 static int	tun_clone_create(struct if_clone *, int);
77 static int	tun_clone_destroy(struct ifnet *);
78 
79 static struct if_clone tun_cloner =
80     IF_CLONE_INITIALIZER("tun", tun_clone_create, tun_clone_destroy);
81 
82 static void tunattach0(struct tun_softc *);
83 static void tun_enable(struct tun_softc *, const struct ifaddr *);
84 static void tun_i_softintr(void *);
85 static void tun_o_softintr(void *);
86 #ifdef ALTQ
87 static void tunstart(struct ifnet *);
88 #endif
89 static struct tun_softc *tun_find_unit(dev_t);
90 static struct tun_softc *tun_find_zunit(int);
91 
92 static dev_type_open(tunopen);
93 static dev_type_close(tunclose);
94 static dev_type_read(tunread);
95 static dev_type_write(tunwrite);
96 static dev_type_ioctl(tunioctl);
97 static dev_type_poll(tunpoll);
98 static dev_type_kqfilter(tunkqfilter);
99 
100 const struct cdevsw tun_cdevsw = {
101 	.d_open = tunopen,
102 	.d_close = tunclose,
103 	.d_read = tunread,
104 	.d_write = tunwrite,
105 	.d_ioctl = tunioctl,
106 	.d_stop = nostop,
107 	.d_tty = notty,
108 	.d_poll = tunpoll,
109 	.d_mmap = nommap,
110 	.d_kqfilter = tunkqfilter,
111 	.d_discard = nodiscard,
112 	.d_flag = D_OTHER | D_MPSAFE
113 };
114 
115 #ifdef _MODULE
116 devmajor_t tun_bmajor = -1, tun_cmajor = -1;
117 #endif
118 
119 void
120 tunattach(int unused)
121 {
122 
123 	/*
124 	 * Nothing to do here, initialization is handled by the
125 	 * module initialization code in tuninit() below).
126 	 */
127 }
128 
129 static void
130 tuninit(void)
131 {
132 
133 	mutex_init(&tun_softc_lock, MUTEX_DEFAULT, IPL_NET);
134 	LIST_INIT(&tun_softc_list);
135 	LIST_INIT(&tunz_softc_list);
136 	if_clone_attach(&tun_cloner);
137 #ifdef _MODULE
138 	devsw_attach("tun", NULL, &tun_bmajor, &tun_cdevsw, &tun_cmajor);
139 #endif
140 }
141 
142 static int
143 tundetach(void)
144 {
145 #ifdef _MODULE
146 	int error;
147 #endif
148 
149 	if_clone_detach(&tun_cloner);
150 #ifdef _MODULE
151 	error = devsw_detach(NULL, &tun_cdevsw);
152 	if (error != 0) {
153 		if_clone_attach(&tun_cloner);
154 		return error;
155 	}
156 #endif
157 
158 	if (!LIST_EMPTY(&tun_softc_list) || !LIST_EMPTY(&tunz_softc_list)) {
159 #ifdef _MODULE
160 		devsw_attach("tun", NULL, &tun_bmajor, &tun_cdevsw, &tun_cmajor);
161 #endif
162 		if_clone_attach(&tun_cloner);
163 		return EBUSY;
164 }
165 
166 	mutex_destroy(&tun_softc_lock);
167 
168 	return 0;
169 }
170 
171 /*
172  * Find driver instance from dev_t.
173  * Returns with tp locked (if found).
174  */
175 static struct tun_softc *
176 tun_find_unit(dev_t dev)
177 {
178 	struct tun_softc *tp;
179 	int unit = minor(dev);
180 
181 	mutex_enter(&tun_softc_lock);
182 	LIST_FOREACH(tp, &tun_softc_list, tun_list)
183 		if (unit == tp->tun_unit)
184 			break;
185 	if (tp)
186 		mutex_enter(&tp->tun_lock);
187 	mutex_exit(&tun_softc_lock);
188 
189 	return tp;
190 }
191 
192 /*
193  * Find zombie driver instance by unit number.
194  * Remove tp from list and return it unlocked (if found).
195  */
196 static struct tun_softc *
197 tun_find_zunit(int unit)
198 {
199 	struct tun_softc *tp;
200 
201 	mutex_enter(&tun_softc_lock);
202 	LIST_FOREACH(tp, &tunz_softc_list, tun_list)
203 		if (unit == tp->tun_unit)
204 			break;
205 	if (tp)
206 		LIST_REMOVE(tp, tun_list);
207 	mutex_exit(&tun_softc_lock);
208 	KASSERTMSG(!tp || (tp->tun_flags & (TUN_INITED|TUN_OPEN)) == TUN_OPEN,
209 	    "tun%d: inconsistent flags: %x", unit, tp->tun_flags);
210 
211 	return tp;
212 }
213 
214 static int
215 tun_clone_create(struct if_clone *ifc, int unit)
216 {
217 	struct tun_softc *tp;
218 
219 	if ((tp = tun_find_zunit(unit)) == NULL) {
220 		tp = kmem_zalloc(sizeof(*tp), KM_SLEEP);
221 
222 		tp->tun_unit = unit;
223 		mutex_init(&tp->tun_lock, MUTEX_DEFAULT, IPL_NET);
224 		cv_init(&tp->tun_cv, "tunread");
225 		selinit(&tp->tun_rsel);
226 		selinit(&tp->tun_wsel);
227 	} else {
228 		/* Revive tunnel instance; clear ifp part */
229 		(void)memset(&tp->tun_if, 0, sizeof(struct ifnet));
230 	}
231 
232 	if_initname(&tp->tun_if, ifc->ifc_name, unit);
233 	tunattach0(tp);
234 	tp->tun_flags |= TUN_INITED;
235 	tp->tun_osih = softint_establish(SOFTINT_CLOCK, tun_o_softintr, tp);
236 	tp->tun_isih = softint_establish(SOFTINT_CLOCK, tun_i_softintr, tp);
237 
238 	mutex_enter(&tun_softc_lock);
239 	LIST_INSERT_HEAD(&tun_softc_list, tp, tun_list);
240 	mutex_exit(&tun_softc_lock);
241 
242 	return 0;
243 }
244 
245 static void
246 tunattach0(struct tun_softc *tp)
247 {
248 	struct ifnet *ifp;
249 
250 	ifp = &tp->tun_if;
251 	ifp->if_softc = tp;
252 	ifp->if_mtu = TUNMTU;
253 	ifp->if_ioctl = tun_ioctl;
254 	ifp->if_output = tun_output;
255 #ifdef ALTQ
256 	ifp->if_start = tunstart;
257 #endif
258 	ifp->if_flags = IFF_POINTOPOINT;
259 	ifp->if_extflags = IFEF_NO_LINK_STATE_CHANGE;
260 	ifp->if_type = IFT_TUNNEL;
261 	ifp->if_snd.ifq_maxlen = ifqmaxlen;
262 	ifp->if_dlt = DLT_NULL;
263 	IFQ_SET_READY(&ifp->if_snd);
264 	if_attach(ifp);
265 	if_alloc_sadl(ifp);
266 	bpf_attach(ifp, DLT_NULL, sizeof(uint32_t));
267 }
268 
269 static int
270 tun_clone_destroy(struct ifnet *ifp)
271 {
272 	struct tun_softc *tp = (void *)ifp;
273 	bool zombie = false;
274 
275 	IF_PURGE(&ifp->if_snd);
276 	ifp->if_flags &= ~IFF_RUNNING;
277 
278 	mutex_enter(&tun_softc_lock);
279 	mutex_enter(&tp->tun_lock);
280 	LIST_REMOVE(tp, tun_list);
281 	if (tp->tun_flags & TUN_OPEN) {
282 		/* Hang on to storage until last close. */
283 		tp->tun_flags &= ~TUN_INITED;
284 		LIST_INSERT_HEAD(&tunz_softc_list, tp, tun_list);
285 		zombie = true;
286 	}
287 	mutex_exit(&tun_softc_lock);
288 
289 	if (tp->tun_flags & TUN_RWAIT) {
290 		tp->tun_flags &= ~TUN_RWAIT;
291 		cv_broadcast(&tp->tun_cv);
292 	}
293 	selnotify(&tp->tun_rsel, 0, NOTE_SUBMIT);
294 
295 	mutex_exit(&tp->tun_lock);
296 
297 	if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
298 		fownsignal(tp->tun_pgid, SIGIO, POLL_HUP, 0, NULL);
299 
300 	bpf_detach(ifp);
301 	if_detach(ifp);
302 
303 	if (!zombie) {
304 		seldestroy(&tp->tun_rsel);
305 		seldestroy(&tp->tun_wsel);
306 		softint_disestablish(tp->tun_osih);
307 		softint_disestablish(tp->tun_isih);
308 		mutex_destroy(&tp->tun_lock);
309 		cv_destroy(&tp->tun_cv);
310 		kmem_free(tp, sizeof(*tp));
311 	}
312 
313 	return 0;
314 }
315 
316 /*
317  * tunnel open - must be superuser & the device must be
318  * configured in
319  */
320 static int
321 tunopen(dev_t dev, int flag, int mode, struct lwp *l)
322 {
323 	struct ifnet	*ifp;
324 	struct tun_softc *tp;
325 	int	error;
326 
327 	error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_INTERFACE_TUN,
328 	    KAUTH_REQ_NETWORK_INTERFACE_TUN_ADD, NULL, NULL, NULL);
329 	if (error)
330 		return error;
331 
332 	tp = tun_find_unit(dev);
333 
334 	if (tp == NULL) {
335 		(void)tun_clone_create(&tun_cloner, minor(dev));
336 		tp = tun_find_unit(dev);
337 		if (tp == NULL) {
338 			return ENXIO;
339 		}
340 	}
341 
342 	if (tp->tun_flags & TUN_OPEN) {
343 		mutex_exit(&tp->tun_lock);
344 		return EBUSY;
345 	}
346 
347 	ifp = &tp->tun_if;
348 	tp->tun_flags |= TUN_OPEN;
349 	TUNDEBUG("%s: open\n", ifp->if_xname);
350 
351 	mutex_exit(&tp->tun_lock);
352 
353 	return error;
354 }
355 
356 /*
357  * tunclose - close the device - mark i/f down & delete
358  * routing info
359  */
360 int
361 tunclose(dev_t dev, int flag, int mode,
362     struct lwp *l)
363 {
364 	struct tun_softc *tp;
365 	struct ifnet	*ifp;
366 
367 	if ((tp = tun_find_zunit(minor(dev))) != NULL) {
368 		/* interface was "destroyed" before the close */
369 		seldestroy(&tp->tun_rsel);
370 		seldestroy(&tp->tun_wsel);
371 		softint_disestablish(tp->tun_osih);
372 		softint_disestablish(tp->tun_isih);
373 		mutex_destroy(&tp->tun_lock);
374 		kmem_free(tp, sizeof(*tp));
375 		return 0;
376 	}
377 
378 	if ((tp = tun_find_unit(dev)) == NULL)
379 		goto out_nolock;
380 
381 	ifp = &tp->tun_if;
382 
383 	tp->tun_flags &= ~TUN_OPEN;
384 
385 	tp->tun_pgid = 0;
386 	selnotify(&tp->tun_rsel, 0, NOTE_SUBMIT);
387 
388 	TUNDEBUG ("%s: closed\n", ifp->if_xname);
389 	mutex_exit(&tp->tun_lock);
390 
391 	/*
392 	 * junk all pending output
393 	 */
394 	IFQ_PURGE(&ifp->if_snd);
395 
396 	if (ifp->if_flags & IFF_UP) {
397 		if_down(ifp);
398 		if (ifp->if_flags & IFF_RUNNING) {
399 			/* find internet addresses and delete routes */
400 			struct ifaddr *ifa;
401 			IFADDR_READER_FOREACH(ifa, ifp) {
402 #if defined(INET) || defined(INET6)
403 				if (ifa->ifa_addr->sa_family == AF_INET ||
404 				    ifa->ifa_addr->sa_family == AF_INET6) {
405 					rtinit(ifa, (int)RTM_DELETE,
406 					       tp->tun_flags & TUN_DSTADDR
407 							? RTF_HOST
408 							: 0);
409 				}
410 #endif
411 			}
412 		}
413 	}
414 out_nolock:
415 	return 0;
416 }
417 
418 static void
419 tun_enable(struct tun_softc *tp, const struct ifaddr *ifa)
420 {
421 	struct ifnet	*ifp = &tp->tun_if;
422 
423 	TUNDEBUG("%s: %s\n", __func__, ifp->if_xname);
424 
425 	mutex_enter(&tp->tun_lock);
426 	tp->tun_flags &= ~(TUN_IASET|TUN_DSTADDR);
427 
428 	switch (ifa->ifa_addr->sa_family) {
429 #ifdef INET
430 	case AF_INET: {
431 		struct sockaddr_in *sin;
432 
433 		sin = satosin(ifa->ifa_addr);
434 		if (sin && sin->sin_addr.s_addr)
435 			tp->tun_flags |= TUN_IASET;
436 
437 		if (ifp->if_flags & IFF_POINTOPOINT) {
438 			sin = satosin(ifa->ifa_dstaddr);
439 			if (sin && sin->sin_addr.s_addr)
440 				tp->tun_flags |= TUN_DSTADDR;
441 		}
442 		break;
443 	    }
444 #endif
445 #ifdef INET6
446 	case AF_INET6: {
447 		struct sockaddr_in6 *sin;
448 
449 		sin = satosin6(ifa->ifa_addr);
450 		if (!IN6_IS_ADDR_UNSPECIFIED(&sin->sin6_addr))
451 			tp->tun_flags |= TUN_IASET;
452 
453 		if (ifp->if_flags & IFF_POINTOPOINT) {
454 			sin = satosin6(ifa->ifa_dstaddr);
455 			if (sin && !IN6_IS_ADDR_UNSPECIFIED(&sin->sin6_addr))
456 				tp->tun_flags |= TUN_DSTADDR;
457 		} else
458 			tp->tun_flags &= ~TUN_DSTADDR;
459 		break;
460 	    }
461 #endif /* INET6 */
462 	default:
463 		break;
464 	}
465 	ifp->if_flags |= IFF_UP | IFF_RUNNING;
466 	mutex_exit(&tp->tun_lock);
467 }
468 
469 /*
470  * Process an ioctl request.
471  */
472 static int
473 tun_ioctl(struct ifnet *ifp, u_long cmd, void *data)
474 {
475 	struct tun_softc *tp = (struct tun_softc *)(ifp->if_softc);
476 	struct ifreq *ifr = (struct ifreq *)data;
477 	struct ifaddr *ifa = (struct ifaddr *)data;
478 	int error = 0;
479 
480 	switch (cmd) {
481 	case SIOCINITIFADDR:
482 		tun_enable(tp, ifa);
483 		ifa->ifa_rtrequest = p2p_rtrequest;
484 		TUNDEBUG("%s: address set\n", ifp->if_xname);
485 		break;
486 	case SIOCSIFBRDADDR:
487 		TUNDEBUG("%s: broadcast address set\n", ifp->if_xname);
488 		break;
489 	case SIOCSIFMTU:
490 		if (ifr->ifr_mtu > TUNMTU || ifr->ifr_mtu < 576) {
491 			error = EINVAL;
492 			break;
493 		}
494 		TUNDEBUG("%s: interface mtu set\n", ifp->if_xname);
495 		if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
496 			error = 0;
497 		break;
498 	case SIOCADDMULTI:
499 	case SIOCDELMULTI:
500 		if (ifr == NULL) {
501 	        	error = EAFNOSUPPORT;           /* XXX */
502 			break;
503 		}
504 		switch (ifreq_getaddr(cmd, ifr)->sa_family) {
505 #ifdef INET
506 		case AF_INET:
507 			break;
508 #endif
509 #ifdef INET6
510 		case AF_INET6:
511 			break;
512 #endif
513 		default:
514 			error = EAFNOSUPPORT;
515 			break;
516 		}
517 		break;
518 	default:
519 		error = ifioctl_common(ifp, cmd, data);
520 	}
521 
522 	return error;
523 }
524 
525 /*
526  * tun_output - queue packets from higher level ready to put out.
527  */
528 static int
529 tun_output(struct ifnet *ifp, struct mbuf *m0, const struct sockaddr *dst,
530     const struct rtentry *rt)
531 {
532 	struct tun_softc *tp = ifp->if_softc;
533 	int		error;
534 #if defined(INET) || defined(INET6)
535 	int		mlen;
536 	uint32_t	*af;
537 #endif
538 
539 	mutex_enter(&tp->tun_lock);
540 	TUNDEBUG ("%s: tun_output\n", ifp->if_xname);
541 
542 	if ((tp->tun_flags & TUN_READY) != TUN_READY) {
543 		TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname,
544 			  tp->tun_flags);
545 		error = EHOSTDOWN;
546 		mutex_exit(&tp->tun_lock);
547 		goto out;
548 	}
549 	// XXXrmind
550 	mutex_exit(&tp->tun_lock);
551 
552 	/*
553 	 * if the queueing discipline needs packet classification,
554 	 * do it before prepending link headers.
555 	 */
556 	IFQ_CLASSIFY(&ifp->if_snd, m0, dst->sa_family);
557 
558 	bpf_mtap_af(ifp, dst->sa_family, m0, BPF_D_OUT);
559 
560 	if ((error = pfil_run_hooks(ifp->if_pfil, &m0, ifp, PFIL_OUT)) != 0)
561 		goto out;
562 	if (m0 == NULL)
563 		goto out;
564 
565 	switch(dst->sa_family) {
566 #ifdef INET6
567 	case AF_INET6:
568 #endif
569 #ifdef INET
570 	case AF_INET:
571 #endif
572 #if defined(INET) || defined(INET6)
573 		if (tp->tun_flags & TUN_PREPADDR) {
574 			/* Simple link-layer header */
575 			M_PREPEND(m0, dst->sa_len, M_DONTWAIT);
576 			if (m0 == NULL) {
577 				IF_DROP(&ifp->if_snd);
578 				error = ENOBUFS;
579 				goto out;
580 			}
581 			memcpy(mtod(m0, char *), dst, dst->sa_len);
582 		}
583 
584 		if (tp->tun_flags & TUN_IFHEAD) {
585 			/* Prepend the address family */
586 			M_PREPEND(m0, sizeof(*af), M_DONTWAIT);
587 			if (m0 == NULL) {
588 				IF_DROP(&ifp->if_snd);
589 				error = ENOBUFS;
590 				goto out;
591 			}
592 			af = mtod(m0,uint32_t *);
593 			*af = htonl(dst->sa_family);
594 		} else {
595 #ifdef INET
596 			if (dst->sa_family != AF_INET)
597 #endif
598 			{
599 				error = EAFNOSUPPORT;
600 				goto out;
601 			}
602 		}
603 		/* FALLTHROUGH */
604 	case AF_UNSPEC:
605 		mlen = m0->m_pkthdr.len;
606 		IFQ_ENQUEUE(&ifp->if_snd, m0, error);
607 		if (error) {
608 			if_statinc(ifp, if_collisions);
609 			error = EAFNOSUPPORT;
610 			m0 = NULL;
611 			goto out;
612 		}
613 		if_statadd2(ifp, if_opackets, 1, if_obytes, mlen);
614 		break;
615 #endif
616 	default:
617 		error = EAFNOSUPPORT;
618 		goto out;
619 	}
620 
621 	mutex_enter(&tp->tun_lock);
622 	if (tp->tun_flags & TUN_RWAIT) {
623 		tp->tun_flags &= ~TUN_RWAIT;
624 		cv_broadcast(&tp->tun_cv);
625 	}
626 	if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
627 		softint_schedule(tp->tun_isih);
628 
629 	selnotify(&tp->tun_rsel, 0, NOTE_SUBMIT);
630 
631 	mutex_exit(&tp->tun_lock);
632 out:
633 	if (error && m0)
634 		m_freem(m0);
635 
636 	return error;
637 }
638 
639 static void
640 tun_i_softintr(void *cookie)
641 {
642 	struct tun_softc *tp = cookie;
643 
644 	if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
645 		fownsignal(tp->tun_pgid, SIGIO, POLL_IN, POLLIN|POLLRDNORM,
646 		    NULL);
647 }
648 
649 static void
650 tun_o_softintr(void *cookie)
651 {
652 	struct tun_softc *tp = cookie;
653 
654 	if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
655 		fownsignal(tp->tun_pgid, SIGIO, POLL_OUT, POLLOUT|POLLWRNORM,
656 		    NULL);
657 }
658 
659 /*
660  * the cdevsw interface is now pretty minimal.
661  */
662 int
663 tunioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
664 {
665 	struct tun_softc *tp;
666 	int error = 0;
667 
668 	tp = tun_find_unit(dev);
669 
670 	/* interface was "destroyed" already */
671 	if (tp == NULL) {
672 		return ENXIO;
673 	}
674 
675 	switch (cmd) {
676 	case TUNSDEBUG:
677 		tundebug = *(int *)data;
678 		break;
679 
680 	case TUNGDEBUG:
681 		*(int *)data = tundebug;
682 		break;
683 
684 	case TUNSIFMODE:
685 		switch (*(int *)data & (IFF_POINTOPOINT|IFF_BROADCAST)) {
686 		case IFF_POINTOPOINT:
687 		case IFF_BROADCAST:
688 			if (tp->tun_if.if_flags & IFF_UP) {
689 				error = EBUSY;
690 				goto out;
691 			}
692 			tp->tun_if.if_flags &=
693 				~(IFF_BROADCAST|IFF_POINTOPOINT|IFF_MULTICAST);
694 			tp->tun_if.if_flags |= *(int *)data;
695 			break;
696 		default:
697 			error = EINVAL;
698 			goto out;
699 		}
700 		break;
701 
702 	case TUNSLMODE:
703 		if (*(int *)data) {
704 			tp->tun_flags |= TUN_PREPADDR;
705 			tp->tun_flags &= ~TUN_IFHEAD;
706 		} else
707 			tp->tun_flags &= ~TUN_PREPADDR;
708 		break;
709 
710 	case TUNSIFHEAD:
711 		if (*(int *)data) {
712 			tp->tun_flags |= TUN_IFHEAD;
713 			tp->tun_flags &= ~TUN_PREPADDR;
714 		} else
715 			tp->tun_flags &= ~TUN_IFHEAD;
716 		break;
717 
718 	case TUNGIFHEAD:
719 		*(int *)data = (tp->tun_flags & TUN_IFHEAD);
720 		break;
721 
722 	case FIONBIO:
723 		if (*(int *)data)
724 			tp->tun_flags |= TUN_NBIO;
725 		else
726 			tp->tun_flags &= ~TUN_NBIO;
727 		break;
728 
729 	case FIOASYNC:
730 		if (*(int *)data)
731 			tp->tun_flags |= TUN_ASYNC;
732 		else
733 			tp->tun_flags &= ~TUN_ASYNC;
734 		break;
735 
736 	case FIONREAD:
737 		if (tp->tun_if.if_snd.ifq_head)
738 			*(int *)data = tp->tun_if.if_snd.ifq_head->m_pkthdr.len;
739 		else
740 			*(int *)data = 0;
741 		break;
742 
743 	case TIOCSPGRP:
744 	case FIOSETOWN:
745 		error = fsetown(&tp->tun_pgid, cmd, data);
746 		break;
747 
748 	case TIOCGPGRP:
749 	case FIOGETOWN:
750 		error = fgetown(tp->tun_pgid, cmd, data);
751 		break;
752 
753 	default:
754 		error = ENOTTY;
755 	}
756 
757 out:
758 	mutex_exit(&tp->tun_lock);
759 
760 	return error;
761 }
762 
763 /*
764  * The cdevsw read interface - reads a packet at a time, or at
765  * least as much of a packet as can be read.
766  */
767 int
768 tunread(dev_t dev, struct uio *uio, int ioflag)
769 {
770 	struct tun_softc *tp;
771 	struct ifnet	*ifp;
772 	struct mbuf	*m, *m0;
773 	int		error = 0, len;
774 
775 	tp = tun_find_unit(dev);
776 
777 	/* interface was "destroyed" already */
778 	if (tp == NULL) {
779 		return ENXIO;
780 	}
781 
782 	ifp = &tp->tun_if;
783 
784 	TUNDEBUG ("%s: read\n", ifp->if_xname);
785 	if ((tp->tun_flags & TUN_READY) != TUN_READY) {
786 		TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname, tp->tun_flags);
787 		error = EHOSTDOWN;
788 		goto out;
789 	}
790 
791 	tp->tun_flags &= ~TUN_RWAIT;
792 
793 	do {
794 		IFQ_DEQUEUE(&ifp->if_snd, m0);
795 		if (m0 == 0) {
796 			if (tp->tun_flags & TUN_NBIO) {
797 				error = EWOULDBLOCK;
798 				goto out;
799 			}
800 			tp->tun_flags |= TUN_RWAIT;
801 			if (cv_wait_sig(&tp->tun_cv, &tp->tun_lock)) {
802 				error = EINTR;
803 				goto out;
804 			}
805 		}
806 	} while (m0 == 0);
807 
808 	mutex_exit(&tp->tun_lock);
809 
810 	/* Copy the mbuf chain */
811 	while (m0 && uio->uio_resid > 0 && error == 0) {
812 		len = uimin(uio->uio_resid, m0->m_len);
813 		if (len != 0)
814 			error = uiomove(mtod(m0, void *), len, uio);
815 		m0 = m = m_free(m0);
816 	}
817 
818 	if (m0) {
819 		TUNDEBUG("Dropping mbuf\n");
820 		m_freem(m0);
821 	}
822 	if (error)
823 		if_statinc(ifp, if_ierrors);
824 
825 	return error;
826 
827 out:
828 	mutex_exit(&tp->tun_lock);
829 
830 	return error;
831 }
832 
833 /*
834  * the cdevsw write interface - an atomic write is a packet - or else!
835  */
836 int
837 tunwrite(dev_t dev, struct uio *uio, int ioflag)
838 {
839 	struct tun_softc *tp;
840 	struct ifnet	*ifp;
841 	struct mbuf	*top, **mp, *m;
842 	pktqueue_t	*pktq;
843 	struct sockaddr	dst;
844 	int		error = 0, tlen, mlen;
845 	uint32_t	family;
846 
847 	tp = tun_find_unit(dev);
848 	if (tp == NULL) {
849 		/* Interface was "destroyed" already. */
850 		return ENXIO;
851 	}
852 
853 	/* Unlock until we've got the data */
854 	mutex_exit(&tp->tun_lock);
855 
856 	ifp = &tp->tun_if;
857 
858 	TUNDEBUG("%s: tunwrite\n", ifp->if_xname);
859 
860 	if (tp->tun_flags & TUN_PREPADDR) {
861 		if (uio->uio_resid < sizeof(dst)) {
862 			error = EIO;
863 			goto out0;
864 		}
865 		error = uiomove((void *)&dst, sizeof(dst), uio);
866 		if (dst.sa_len > sizeof(dst)) {
867 			/* Duh.. */
868 			int n = dst.sa_len - sizeof(dst);
869 			while (n--) {
870 				char discard;
871 				error = uiomove(&discard, 1, uio);
872 				if (error) {
873 					goto out0;
874 				}
875 			}
876 		}
877 	} else if (tp->tun_flags & TUN_IFHEAD) {
878 		if (uio->uio_resid < sizeof(family)){
879 			error = EIO;
880 			goto out0;
881 		}
882 		error = uiomove((void *)&family, sizeof(family), uio);
883 		dst.sa_family = ntohl(family);
884 	} else {
885 #ifdef INET
886 		dst.sa_family = AF_INET;
887 #endif
888 	}
889 
890 	if (uio->uio_resid == 0 || uio->uio_resid > TUNMTU) {
891 		TUNDEBUG("%s: len=%lu!\n", ifp->if_xname,
892 		    (unsigned long)uio->uio_resid);
893 		error = EIO;
894 		goto out0;
895 	}
896 
897 	switch (dst.sa_family) {
898 #ifdef INET
899 	case AF_INET:
900 		pktq = ip_pktq;
901 		break;
902 #endif
903 #ifdef INET6
904 	case AF_INET6:
905 		pktq = ip6_pktq;
906 		break;
907 #endif
908 	default:
909 		error = EAFNOSUPPORT;
910 		goto out0;
911 	}
912 
913 	tlen = uio->uio_resid;
914 
915 	/* get a header mbuf */
916 	MGETHDR(m, M_DONTWAIT, MT_DATA);
917 	if (m == NULL) {
918 		return ENOBUFS;
919 	}
920 	mlen = MHLEN;
921 
922 	top = NULL;
923 	mp = &top;
924 	while (error == 0 && uio->uio_resid > 0) {
925 		m->m_len = uimin(mlen, uio->uio_resid);
926 		error = uiomove(mtod(m, void *), m->m_len, uio);
927 		*mp = m;
928 		mp = &m->m_next;
929 		if (error == 0 && uio->uio_resid > 0) {
930 			MGET(m, M_DONTWAIT, MT_DATA);
931 			if (m == NULL) {
932 				error = ENOBUFS;
933 				break;
934 			}
935 			mlen = MLEN;
936 		}
937 	}
938 	if (error) {
939 		if (top != NULL)
940 			m_freem(top);
941 		if_statinc(ifp, if_ierrors);
942 		goto out0;
943 	}
944 
945 	top->m_pkthdr.len = tlen;
946 	m_set_rcvif(top, ifp);
947 
948 	bpf_mtap_af(ifp, dst.sa_family, top, BPF_D_IN);
949 
950 	if ((error = pfil_run_hooks(ifp->if_pfil, &top, ifp, PFIL_IN)) != 0)
951 		goto out0;
952 	if (top == NULL)
953 		goto out0;
954 
955 	mutex_enter(&tp->tun_lock);
956 	if ((tp->tun_flags & TUN_INITED) == 0) {
957 		/* Interface was destroyed */
958 		error = ENXIO;
959 		goto out;
960 	}
961 	if (__predict_false(!pktq_enqueue(pktq, top, 0))) {
962 		if_statinc(ifp, if_collisions);
963 		mutex_exit(&tp->tun_lock);
964 		error = ENOBUFS;
965 		m_freem(top);
966 		goto out0;
967 	}
968 	if_statadd2(ifp, if_ipackets, 1, if_ibytes, tlen);
969 out:
970 	mutex_exit(&tp->tun_lock);
971 out0:
972 	return error;
973 }
974 
975 #ifdef ALTQ
976 /*
977  * Start packet transmission on the interface.
978  * when the interface queue is rate-limited by ALTQ or TBR,
979  * if_start is needed to drain packets from the queue in order
980  * to notify readers when outgoing packets become ready.
981  */
982 static void
983 tunstart(struct ifnet *ifp)
984 {
985 	struct tun_softc *tp = ifp->if_softc;
986 
987 	if (!ALTQ_IS_ENABLED(&ifp->if_snd) && !TBR_IS_ENABLED(&ifp->if_snd))
988 		return;
989 
990 	mutex_enter(&tp->tun_lock);
991 	if (!IF_IS_EMPTY(&ifp->if_snd)) {
992 		if (tp->tun_flags & TUN_RWAIT) {
993 			tp->tun_flags &= ~TUN_RWAIT;
994 			cv_broadcast(&tp->tun_cv);
995 		}
996 		if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
997 			softint_schedule(tp->tun_osih);
998 
999 		selnotify(&tp->tun_rsel, 0, NOTE_SUBMIT);
1000 	}
1001 	mutex_exit(&tp->tun_lock);
1002 }
1003 #endif /* ALTQ */
1004 /*
1005  * tunpoll - the poll interface, this is only useful on reads
1006  * really. The write detect always returns true, write never blocks
1007  * anyway, it either accepts the packet or drops it.
1008  */
1009 int
1010 tunpoll(dev_t dev, int events, struct lwp *l)
1011 {
1012 	struct tun_softc *tp;
1013 	struct ifnet	*ifp;
1014 	int revents = 0;
1015 
1016 	tp = tun_find_unit(dev);
1017 	if (tp == NULL) {
1018 		/* Interface was "destroyed" already. */
1019 		return 0;
1020 	}
1021 	ifp = &tp->tun_if;
1022 
1023 	TUNDEBUG("%s: tunpoll\n", ifp->if_xname);
1024 
1025 	if (events & (POLLIN | POLLRDNORM)) {
1026 		if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
1027 			TUNDEBUG("%s: tunpoll q=%d\n", ifp->if_xname,
1028 			    ifp->if_snd.ifq_len);
1029 			revents |= events & (POLLIN | POLLRDNORM);
1030 		} else {
1031 			TUNDEBUG("%s: tunpoll waiting\n", ifp->if_xname);
1032 			selrecord(l, &tp->tun_rsel);
1033 		}
1034 	}
1035 
1036 	if (events & (POLLOUT | POLLWRNORM))
1037 		revents |= events & (POLLOUT | POLLWRNORM);
1038 
1039 	mutex_exit(&tp->tun_lock);
1040 
1041 	return revents;
1042 }
1043 
1044 static void
1045 filt_tunrdetach(struct knote *kn)
1046 {
1047 	struct tun_softc *tp = kn->kn_hook;
1048 
1049 	mutex_enter(&tp->tun_lock);
1050 	SLIST_REMOVE(&tp->tun_rsel.sel_klist, kn, knote, kn_selnext);
1051 	mutex_exit(&tp->tun_lock);
1052 }
1053 
1054 static int
1055 filt_tunread(struct knote *kn, long hint)
1056 {
1057 	struct tun_softc *tp = kn->kn_hook;
1058 	struct ifnet *ifp = &tp->tun_if;
1059 	struct mbuf *m;
1060 	int ready;
1061 
1062 	if (hint & NOTE_SUBMIT)
1063 		KASSERT(mutex_owned(&tp->tun_lock));
1064 	else
1065 		mutex_enter(&tp->tun_lock);
1066 
1067 	IF_POLL(&ifp->if_snd, m);
1068 	ready = (m != NULL);
1069 	for (kn->kn_data = 0; m != NULL; m = m->m_next)
1070 		kn->kn_data += m->m_len;
1071 
1072 	if (hint & NOTE_SUBMIT)
1073 		KASSERT(mutex_owned(&tp->tun_lock));
1074 	else
1075 		mutex_exit(&tp->tun_lock);
1076 
1077 	return ready;
1078 }
1079 
1080 static const struct filterops tunread_filtops = {
1081 	.f_isfd = 1,
1082 	.f_attach = NULL,
1083 	.f_detach = filt_tunrdetach,
1084 	.f_event = filt_tunread,
1085 };
1086 
1087 static const struct filterops tun_seltrue_filtops = {
1088 	.f_isfd = 1,
1089 	.f_attach = NULL,
1090 	.f_detach = filt_tunrdetach,
1091 	.f_event = filt_seltrue,
1092 };
1093 
1094 int
1095 tunkqfilter(dev_t dev, struct knote *kn)
1096 {
1097 	struct tun_softc *tp;
1098 	struct klist *klist;
1099 	int rv = 0;
1100 
1101 	tp = tun_find_unit(dev);
1102 	if (tp == NULL)
1103 		goto out_nolock;
1104 
1105 	switch (kn->kn_filter) {
1106 	case EVFILT_READ:
1107 		klist = &tp->tun_rsel.sel_klist;
1108 		kn->kn_fop = &tunread_filtops;
1109 		break;
1110 
1111 	case EVFILT_WRITE:
1112 		klist = &tp->tun_rsel.sel_klist;
1113 		kn->kn_fop = &tun_seltrue_filtops;
1114 		break;
1115 
1116 	default:
1117 		rv = EINVAL;
1118 		goto out;
1119 	}
1120 
1121 	kn->kn_hook = tp;
1122 
1123 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
1124 
1125 out:
1126 	mutex_exit(&tp->tun_lock);
1127 out_nolock:
1128 	return rv;
1129 }
1130 
1131 /*
1132  * Module infrastructure
1133  */
1134 #include "if_module.h"
1135 
1136 IF_MODULE(MODULE_CLASS_DRIVER, tun, NULL)
1137