xref: /netbsd-src/sys/net/if_tun.c (revision f3cfa6f6ce31685c6c4a758bc430e69eb99f50a4)
1 /*	$NetBSD: if_tun.c,v 1.156 2019/04/26 08:38:25 pgoyette Exp $	*/
2 
3 /*
4  * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk>
5  * Nottingham University 1987.
6  *
7  * This source may be freely distributed, however I would be interested
8  * in any changes that are made.
9  *
10  * This driver takes packets off the IP i/f and hands them up to a
11  * user process to have its wicked way with. This driver has its
12  * roots in a similar driver written by Phil Cockcroft (formerly) at
13  * UCL. This driver is based much more on read/write/poll mode of
14  * operation though.
15  */
16 
17 /*
18  * tun - tunnel software network interface.
19  */
20 
21 #include <sys/cdefs.h>
22 __KERNEL_RCSID(0, "$NetBSD: if_tun.c,v 1.156 2019/04/26 08:38:25 pgoyette Exp $");
23 
24 #ifdef _KERNEL_OPT
25 #include "opt_inet.h"
26 #endif
27 
28 #include <sys/param.h>
29 
30 #include <sys/buf.h>
31 #include <sys/conf.h>
32 #include <sys/cpu.h>
33 #include <sys/device.h>
34 #include <sys/file.h>
35 #include <sys/ioctl.h>
36 #include <sys/kauth.h>
37 #include <sys/kmem.h>
38 #include <sys/lwp.h>
39 #include <sys/mbuf.h>
40 #include <sys/module.h>
41 #include <sys/mutex.h>
42 #include <sys/poll.h>
43 #include <sys/select.h>
44 #include <sys/signalvar.h>
45 #include <sys/socket.h>
46 
47 #include <net/bpf.h>
48 #include <net/if.h>
49 #include <net/if_types.h>
50 #include <net/route.h>
51 
52 #ifdef INET
53 #include <netinet/in.h>
54 #include <netinet/in_systm.h>
55 #include <netinet/in_var.h>
56 #include <netinet/ip.h>
57 #include <netinet/if_inarp.h>
58 #endif
59 
60 #include <net/if_tun.h>
61 
62 #include "ioconf.h"
63 
64 #define TUNDEBUG	if (tundebug) printf
65 int	tundebug = 0;
66 
67 extern int ifqmaxlen;
68 
69 static LIST_HEAD(, tun_softc) tun_softc_list;
70 static LIST_HEAD(, tun_softc) tunz_softc_list;
71 static kmutex_t tun_softc_lock;
72 
73 static int	tun_ioctl(struct ifnet *, u_long, void *);
74 static int	tun_output(struct ifnet *, struct mbuf *,
75 			const struct sockaddr *, const struct rtentry *rt);
76 static int	tun_clone_create(struct if_clone *, int);
77 static int	tun_clone_destroy(struct ifnet *);
78 
79 static struct if_clone tun_cloner =
80     IF_CLONE_INITIALIZER("tun", tun_clone_create, tun_clone_destroy);
81 
82 static void tunattach0(struct tun_softc *);
83 static void tun_enable(struct tun_softc *, const struct ifaddr *);
84 static void tun_i_softintr(void *);
85 static void tun_o_softintr(void *);
86 #ifdef ALTQ
87 static void tunstart(struct ifnet *);
88 #endif
89 static struct tun_softc *tun_find_unit(dev_t);
90 static struct tun_softc *tun_find_zunit(int);
91 
92 static dev_type_open(tunopen);
93 static dev_type_close(tunclose);
94 static dev_type_read(tunread);
95 static dev_type_write(tunwrite);
96 static dev_type_ioctl(tunioctl);
97 static dev_type_poll(tunpoll);
98 static dev_type_kqfilter(tunkqfilter);
99 
100 const struct cdevsw tun_cdevsw = {
101 	.d_open = tunopen,
102 	.d_close = tunclose,
103 	.d_read = tunread,
104 	.d_write = tunwrite,
105 	.d_ioctl = tunioctl,
106 	.d_stop = nostop,
107 	.d_tty = notty,
108 	.d_poll = tunpoll,
109 	.d_mmap = nommap,
110 	.d_kqfilter = tunkqfilter,
111 	.d_discard = nodiscard,
112 	.d_flag = D_OTHER | D_MPSAFE
113 };
114 
115 #ifdef _MODULE
116 devmajor_t tun_bmajor = -1, tun_cmajor = -1;
117 #endif
118 
119 void
120 tunattach(int unused)
121 {
122 
123 	/*
124 	 * Nothing to do here, initialization is handled by the
125 	 * module initialization code in tuninit() below).
126 	 */
127 }
128 
129 static void
130 tuninit(void)
131 {
132 
133 	mutex_init(&tun_softc_lock, MUTEX_DEFAULT, IPL_NET);
134 	LIST_INIT(&tun_softc_list);
135 	LIST_INIT(&tunz_softc_list);
136 	if_clone_attach(&tun_cloner);
137 #ifdef _MODULE
138 	devsw_attach("tun", NULL, &tun_bmajor, &tun_cdevsw, &tun_cmajor);
139 #endif
140 }
141 
142 static int
143 tundetach(void)
144 {
145 #ifdef _MODULE
146 	int error;
147 #endif
148 
149 	if_clone_detach(&tun_cloner);
150 #ifdef _MODULE
151 	error = devsw_detach(NULL, &tun_cdevsw);
152 	if (error != 0) {
153 		if_clone_attach(&tun_cloner);
154 		return error;
155 	}
156 #endif
157 
158 	if (!LIST_EMPTY(&tun_softc_list) || !LIST_EMPTY(&tunz_softc_list)) {
159 #ifdef _MODULE
160 		devsw_attach("tun", NULL, &tun_bmajor, &tun_cdevsw, &tun_cmajor);
161 #endif
162 		if_clone_attach(&tun_cloner);
163 		return EBUSY;
164 }
165 
166 	mutex_destroy(&tun_softc_lock);
167 
168 	return 0;
169 }
170 
171 /*
172  * Find driver instance from dev_t.
173  * Returns with tp locked (if found).
174  */
175 static struct tun_softc *
176 tun_find_unit(dev_t dev)
177 {
178 	struct tun_softc *tp;
179 	int unit = minor(dev);
180 
181 	mutex_enter(&tun_softc_lock);
182 	LIST_FOREACH(tp, &tun_softc_list, tun_list)
183 		if (unit == tp->tun_unit)
184 			break;
185 	if (tp)
186 		mutex_enter(&tp->tun_lock);
187 	mutex_exit(&tun_softc_lock);
188 
189 	return tp;
190 }
191 
192 /*
193  * Find zombie driver instance by unit number.
194  * Remove tp from list and return it unlocked (if found).
195  */
196 static struct tun_softc *
197 tun_find_zunit(int unit)
198 {
199 	struct tun_softc *tp;
200 
201 	mutex_enter(&tun_softc_lock);
202 	LIST_FOREACH(tp, &tunz_softc_list, tun_list)
203 		if (unit == tp->tun_unit)
204 			break;
205 	if (tp)
206 		LIST_REMOVE(tp, tun_list);
207 	mutex_exit(&tun_softc_lock);
208 	KASSERTMSG(!tp || (tp->tun_flags & (TUN_INITED|TUN_OPEN)) == TUN_OPEN,
209 	    "tun%d: inconsistent flags: %x", unit, tp->tun_flags);
210 
211 	return tp;
212 }
213 
214 static int
215 tun_clone_create(struct if_clone *ifc, int unit)
216 {
217 	struct tun_softc *tp;
218 
219 	if ((tp = tun_find_zunit(unit)) == NULL) {
220 		tp = kmem_zalloc(sizeof(*tp), KM_SLEEP);
221 
222 		tp->tun_unit = unit;
223 		mutex_init(&tp->tun_lock, MUTEX_DEFAULT, IPL_NET);
224 		cv_init(&tp->tun_cv, "tunread");
225 		selinit(&tp->tun_rsel);
226 		selinit(&tp->tun_wsel);
227 	} else {
228 		/* Revive tunnel instance; clear ifp part */
229 		(void)memset(&tp->tun_if, 0, sizeof(struct ifnet));
230 	}
231 
232 	if_initname(&tp->tun_if, ifc->ifc_name, unit);
233 	tunattach0(tp);
234 	tp->tun_flags |= TUN_INITED;
235 	tp->tun_osih = softint_establish(SOFTINT_CLOCK, tun_o_softintr, tp);
236 	tp->tun_isih = softint_establish(SOFTINT_CLOCK, tun_i_softintr, tp);
237 
238 	mutex_enter(&tun_softc_lock);
239 	LIST_INSERT_HEAD(&tun_softc_list, tp, tun_list);
240 	mutex_exit(&tun_softc_lock);
241 
242 	return 0;
243 }
244 
245 static void
246 tunattach0(struct tun_softc *tp)
247 {
248 	struct ifnet *ifp;
249 
250 	ifp = &tp->tun_if;
251 	ifp->if_softc = tp;
252 	ifp->if_mtu = TUNMTU;
253 	ifp->if_ioctl = tun_ioctl;
254 	ifp->if_output = tun_output;
255 #ifdef ALTQ
256 	ifp->if_start = tunstart;
257 #endif
258 	ifp->if_flags = IFF_POINTOPOINT;
259 	ifp->if_extflags = IFEF_NO_LINK_STATE_CHANGE;
260 	ifp->if_type = IFT_TUNNEL;
261 	ifp->if_snd.ifq_maxlen = ifqmaxlen;
262 	ifp->if_collisions = 0;
263 	ifp->if_ierrors = 0;
264 	ifp->if_oerrors = 0;
265 	ifp->if_ipackets = 0;
266 	ifp->if_opackets = 0;
267 	ifp->if_ibytes   = 0;
268 	ifp->if_obytes   = 0;
269 	ifp->if_dlt = DLT_NULL;
270 	IFQ_SET_READY(&ifp->if_snd);
271 	if_attach(ifp);
272 	if_alloc_sadl(ifp);
273 	bpf_attach(ifp, DLT_NULL, sizeof(uint32_t));
274 }
275 
276 static int
277 tun_clone_destroy(struct ifnet *ifp)
278 {
279 	struct tun_softc *tp = (void *)ifp;
280 	bool zombie = false;
281 
282 	IF_PURGE(&ifp->if_snd);
283 	ifp->if_flags &= ~IFF_RUNNING;
284 
285 	mutex_enter(&tun_softc_lock);
286 	mutex_enter(&tp->tun_lock);
287 	LIST_REMOVE(tp, tun_list);
288 	if (tp->tun_flags & TUN_OPEN) {
289 		/* Hang on to storage until last close. */
290 		tp->tun_flags &= ~TUN_INITED;
291 		LIST_INSERT_HEAD(&tunz_softc_list, tp, tun_list);
292 		zombie = true;
293 	}
294 	mutex_exit(&tun_softc_lock);
295 
296 	if (tp->tun_flags & TUN_RWAIT) {
297 		tp->tun_flags &= ~TUN_RWAIT;
298 		cv_broadcast(&tp->tun_cv);
299 	}
300 	selnotify(&tp->tun_rsel, 0, NOTE_SUBMIT);
301 
302 	mutex_exit(&tp->tun_lock);
303 
304 	if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
305 		fownsignal(tp->tun_pgid, SIGIO, POLL_HUP, 0, NULL);
306 
307 	bpf_detach(ifp);
308 	if_detach(ifp);
309 
310 	if (!zombie) {
311 		seldestroy(&tp->tun_rsel);
312 		seldestroy(&tp->tun_wsel);
313 		softint_disestablish(tp->tun_osih);
314 		softint_disestablish(tp->tun_isih);
315 		mutex_destroy(&tp->tun_lock);
316 		cv_destroy(&tp->tun_cv);
317 		kmem_free(tp, sizeof(*tp));
318 	}
319 
320 	return 0;
321 }
322 
323 /*
324  * tunnel open - must be superuser & the device must be
325  * configured in
326  */
327 static int
328 tunopen(dev_t dev, int flag, int mode, struct lwp *l)
329 {
330 	struct ifnet	*ifp;
331 	struct tun_softc *tp;
332 	int	error;
333 
334 	error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_INTERFACE_TUN,
335 	    KAUTH_REQ_NETWORK_INTERFACE_TUN_ADD, NULL, NULL, NULL);
336 	if (error)
337 		return error;
338 
339 	tp = tun_find_unit(dev);
340 
341 	if (tp == NULL) {
342 		(void)tun_clone_create(&tun_cloner, minor(dev));
343 		tp = tun_find_unit(dev);
344 		if (tp == NULL) {
345 			return ENXIO;
346 		}
347 	}
348 
349 	if (tp->tun_flags & TUN_OPEN) {
350 		mutex_exit(&tp->tun_lock);
351 		return EBUSY;
352 	}
353 
354 	ifp = &tp->tun_if;
355 	tp->tun_flags |= TUN_OPEN;
356 	TUNDEBUG("%s: open\n", ifp->if_xname);
357 
358 	mutex_exit(&tp->tun_lock);
359 
360 	return error;
361 }
362 
363 /*
364  * tunclose - close the device - mark i/f down & delete
365  * routing info
366  */
367 int
368 tunclose(dev_t dev, int flag, int mode,
369     struct lwp *l)
370 {
371 	struct tun_softc *tp;
372 	struct ifnet	*ifp;
373 
374 	if ((tp = tun_find_zunit(minor(dev))) != NULL) {
375 		/* interface was "destroyed" before the close */
376 		seldestroy(&tp->tun_rsel);
377 		seldestroy(&tp->tun_wsel);
378 		softint_disestablish(tp->tun_osih);
379 		softint_disestablish(tp->tun_isih);
380 		mutex_destroy(&tp->tun_lock);
381 		kmem_free(tp, sizeof(*tp));
382 		return 0;
383 	}
384 
385 	if ((tp = tun_find_unit(dev)) == NULL)
386 		goto out_nolock;
387 
388 	ifp = &tp->tun_if;
389 
390 	tp->tun_flags &= ~TUN_OPEN;
391 
392 	tp->tun_pgid = 0;
393 	selnotify(&tp->tun_rsel, 0, NOTE_SUBMIT);
394 
395 	TUNDEBUG ("%s: closed\n", ifp->if_xname);
396 	mutex_exit(&tp->tun_lock);
397 
398 	/*
399 	 * junk all pending output
400 	 */
401 	IFQ_PURGE(&ifp->if_snd);
402 
403 	if (ifp->if_flags & IFF_UP) {
404 		if_down(ifp);
405 		if (ifp->if_flags & IFF_RUNNING) {
406 			/* find internet addresses and delete routes */
407 			struct ifaddr *ifa;
408 			IFADDR_READER_FOREACH(ifa, ifp) {
409 #if defined(INET) || defined(INET6)
410 				if (ifa->ifa_addr->sa_family == AF_INET ||
411 				    ifa->ifa_addr->sa_family == AF_INET6) {
412 					rtinit(ifa, (int)RTM_DELETE,
413 					       tp->tun_flags & TUN_DSTADDR
414 							? RTF_HOST
415 							: 0);
416 				}
417 #endif
418 			}
419 		}
420 	}
421 out_nolock:
422 	return 0;
423 }
424 
425 static void
426 tun_enable(struct tun_softc *tp, const struct ifaddr *ifa)
427 {
428 	struct ifnet	*ifp = &tp->tun_if;
429 
430 	TUNDEBUG("%s: %s\n", __func__, ifp->if_xname);
431 
432 	mutex_enter(&tp->tun_lock);
433 	tp->tun_flags &= ~(TUN_IASET|TUN_DSTADDR);
434 
435 	switch (ifa->ifa_addr->sa_family) {
436 #ifdef INET
437 	case AF_INET: {
438 		struct sockaddr_in *sin;
439 
440 		sin = satosin(ifa->ifa_addr);
441 		if (sin && sin->sin_addr.s_addr)
442 			tp->tun_flags |= TUN_IASET;
443 
444 		if (ifp->if_flags & IFF_POINTOPOINT) {
445 			sin = satosin(ifa->ifa_dstaddr);
446 			if (sin && sin->sin_addr.s_addr)
447 				tp->tun_flags |= TUN_DSTADDR;
448 		}
449 		break;
450 	    }
451 #endif
452 #ifdef INET6
453 	case AF_INET6: {
454 		struct sockaddr_in6 *sin;
455 
456 		sin = satosin6(ifa->ifa_addr);
457 		if (!IN6_IS_ADDR_UNSPECIFIED(&sin->sin6_addr))
458 			tp->tun_flags |= TUN_IASET;
459 
460 		if (ifp->if_flags & IFF_POINTOPOINT) {
461 			sin = satosin6(ifa->ifa_dstaddr);
462 			if (sin && !IN6_IS_ADDR_UNSPECIFIED(&sin->sin6_addr))
463 				tp->tun_flags |= TUN_DSTADDR;
464 		} else
465 			tp->tun_flags &= ~TUN_DSTADDR;
466 		break;
467 	    }
468 #endif /* INET6 */
469 	default:
470 		break;
471 	}
472 	ifp->if_flags |= IFF_UP | IFF_RUNNING;
473 	mutex_exit(&tp->tun_lock);
474 }
475 
476 /*
477  * Process an ioctl request.
478  */
479 static int
480 tun_ioctl(struct ifnet *ifp, u_long cmd, void *data)
481 {
482 	struct tun_softc *tp = (struct tun_softc *)(ifp->if_softc);
483 	struct ifreq *ifr = (struct ifreq *)data;
484 	struct ifaddr *ifa = (struct ifaddr *)data;
485 	int error = 0;
486 
487 	switch (cmd) {
488 	case SIOCINITIFADDR:
489 		tun_enable(tp, ifa);
490 		ifa->ifa_rtrequest = p2p_rtrequest;
491 		TUNDEBUG("%s: address set\n", ifp->if_xname);
492 		break;
493 	case SIOCSIFBRDADDR:
494 		TUNDEBUG("%s: broadcast address set\n", ifp->if_xname);
495 		break;
496 	case SIOCSIFMTU:
497 		if (ifr->ifr_mtu > TUNMTU || ifr->ifr_mtu < 576) {
498 			error = EINVAL;
499 			break;
500 		}
501 		TUNDEBUG("%s: interface mtu set\n", ifp->if_xname);
502 		if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
503 			error = 0;
504 		break;
505 	case SIOCADDMULTI:
506 	case SIOCDELMULTI:
507 		if (ifr == NULL) {
508 	        	error = EAFNOSUPPORT;           /* XXX */
509 			break;
510 		}
511 		switch (ifreq_getaddr(cmd, ifr)->sa_family) {
512 #ifdef INET
513 		case AF_INET:
514 			break;
515 #endif
516 #ifdef INET6
517 		case AF_INET6:
518 			break;
519 #endif
520 		default:
521 			error = EAFNOSUPPORT;
522 			break;
523 		}
524 		break;
525 	default:
526 		error = ifioctl_common(ifp, cmd, data);
527 	}
528 
529 	return error;
530 }
531 
532 /*
533  * tun_output - queue packets from higher level ready to put out.
534  */
535 static int
536 tun_output(struct ifnet *ifp, struct mbuf *m0, const struct sockaddr *dst,
537     const struct rtentry *rt)
538 {
539 	struct tun_softc *tp = ifp->if_softc;
540 	int		error;
541 #if defined(INET) || defined(INET6)
542 	int		mlen;
543 	uint32_t	*af;
544 #endif
545 
546 	mutex_enter(&tp->tun_lock);
547 	TUNDEBUG ("%s: tun_output\n", ifp->if_xname);
548 
549 	if ((tp->tun_flags & TUN_READY) != TUN_READY) {
550 		TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname,
551 			  tp->tun_flags);
552 		error = EHOSTDOWN;
553 		mutex_exit(&tp->tun_lock);
554 		goto out;
555 	}
556 	// XXXrmind
557 	mutex_exit(&tp->tun_lock);
558 
559 	/*
560 	 * if the queueing discipline needs packet classification,
561 	 * do it before prepending link headers.
562 	 */
563 	IFQ_CLASSIFY(&ifp->if_snd, m0, dst->sa_family);
564 
565 	bpf_mtap_af(ifp, dst->sa_family, m0, BPF_D_OUT);
566 
567 	if ((error = pfil_run_hooks(ifp->if_pfil, &m0, ifp, PFIL_OUT)) != 0)
568 		goto out;
569 	if (m0 == NULL)
570 		goto out;
571 
572 	switch(dst->sa_family) {
573 #ifdef INET6
574 	case AF_INET6:
575 #endif
576 #ifdef INET
577 	case AF_INET:
578 #endif
579 #if defined(INET) || defined(INET6)
580 		if (tp->tun_flags & TUN_PREPADDR) {
581 			/* Simple link-layer header */
582 			M_PREPEND(m0, dst->sa_len, M_DONTWAIT);
583 			if (m0 == NULL) {
584 				IF_DROP(&ifp->if_snd);
585 				error = ENOBUFS;
586 				goto out;
587 			}
588 			memcpy(mtod(m0, char *), dst, dst->sa_len);
589 		}
590 
591 		if (tp->tun_flags & TUN_IFHEAD) {
592 			/* Prepend the address family */
593 			M_PREPEND(m0, sizeof(*af), M_DONTWAIT);
594 			if (m0 == NULL) {
595 				IF_DROP(&ifp->if_snd);
596 				error = ENOBUFS;
597 				goto out;
598 			}
599 			af = mtod(m0,uint32_t *);
600 			*af = htonl(dst->sa_family);
601 		} else {
602 #ifdef INET
603 			if (dst->sa_family != AF_INET)
604 #endif
605 			{
606 				error = EAFNOSUPPORT;
607 				goto out;
608 			}
609 		}
610 		/* FALLTHROUGH */
611 	case AF_UNSPEC:
612 		IFQ_ENQUEUE(&ifp->if_snd, m0, error);
613 		if (error) {
614 			ifp->if_collisions++;
615 			error = EAFNOSUPPORT;
616 			m0 = NULL;
617 			goto out;
618 		}
619 		mlen = m0->m_pkthdr.len;
620 		ifp->if_opackets++;
621 		ifp->if_obytes += mlen;
622 		break;
623 #endif
624 	default:
625 		error = EAFNOSUPPORT;
626 		goto out;
627 	}
628 
629 	mutex_enter(&tp->tun_lock);
630 	if (tp->tun_flags & TUN_RWAIT) {
631 		tp->tun_flags &= ~TUN_RWAIT;
632 		cv_broadcast(&tp->tun_cv);
633 	}
634 	if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
635 		softint_schedule(tp->tun_isih);
636 
637 	selnotify(&tp->tun_rsel, 0, NOTE_SUBMIT);
638 
639 	mutex_exit(&tp->tun_lock);
640 out:
641 	if (error && m0)
642 		m_freem(m0);
643 
644 	return error;
645 }
646 
647 static void
648 tun_i_softintr(void *cookie)
649 {
650 	struct tun_softc *tp = cookie;
651 
652 	if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
653 		fownsignal(tp->tun_pgid, SIGIO, POLL_IN, POLLIN|POLLRDNORM,
654 		    NULL);
655 }
656 
657 static void
658 tun_o_softintr(void *cookie)
659 {
660 	struct tun_softc *tp = cookie;
661 
662 	if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
663 		fownsignal(tp->tun_pgid, SIGIO, POLL_OUT, POLLOUT|POLLWRNORM,
664 		    NULL);
665 }
666 
667 /*
668  * the cdevsw interface is now pretty minimal.
669  */
670 int
671 tunioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
672 {
673 	struct tun_softc *tp;
674 	int error = 0;
675 
676 	tp = tun_find_unit(dev);
677 
678 	/* interface was "destroyed" already */
679 	if (tp == NULL) {
680 		return ENXIO;
681 	}
682 
683 	switch (cmd) {
684 	case TUNSDEBUG:
685 		tundebug = *(int *)data;
686 		break;
687 
688 	case TUNGDEBUG:
689 		*(int *)data = tundebug;
690 		break;
691 
692 	case TUNSIFMODE:
693 		switch (*(int *)data & (IFF_POINTOPOINT|IFF_BROADCAST)) {
694 		case IFF_POINTOPOINT:
695 		case IFF_BROADCAST:
696 			if (tp->tun_if.if_flags & IFF_UP) {
697 				error = EBUSY;
698 				goto out;
699 			}
700 			tp->tun_if.if_flags &=
701 				~(IFF_BROADCAST|IFF_POINTOPOINT|IFF_MULTICAST);
702 			tp->tun_if.if_flags |= *(int *)data;
703 			break;
704 		default:
705 			error = EINVAL;
706 			goto out;
707 		}
708 		break;
709 
710 	case TUNSLMODE:
711 		if (*(int *)data) {
712 			tp->tun_flags |= TUN_PREPADDR;
713 			tp->tun_flags &= ~TUN_IFHEAD;
714 		} else
715 			tp->tun_flags &= ~TUN_PREPADDR;
716 		break;
717 
718 	case TUNSIFHEAD:
719 		if (*(int *)data) {
720 			tp->tun_flags |= TUN_IFHEAD;
721 			tp->tun_flags &= ~TUN_PREPADDR;
722 		} else
723 			tp->tun_flags &= ~TUN_IFHEAD;
724 		break;
725 
726 	case TUNGIFHEAD:
727 		*(int *)data = (tp->tun_flags & TUN_IFHEAD);
728 		break;
729 
730 	case FIONBIO:
731 		if (*(int *)data)
732 			tp->tun_flags |= TUN_NBIO;
733 		else
734 			tp->tun_flags &= ~TUN_NBIO;
735 		break;
736 
737 	case FIOASYNC:
738 		if (*(int *)data)
739 			tp->tun_flags |= TUN_ASYNC;
740 		else
741 			tp->tun_flags &= ~TUN_ASYNC;
742 		break;
743 
744 	case FIONREAD:
745 		if (tp->tun_if.if_snd.ifq_head)
746 			*(int *)data = tp->tun_if.if_snd.ifq_head->m_pkthdr.len;
747 		else
748 			*(int *)data = 0;
749 		break;
750 
751 	case TIOCSPGRP:
752 	case FIOSETOWN:
753 		error = fsetown(&tp->tun_pgid, cmd, data);
754 		break;
755 
756 	case TIOCGPGRP:
757 	case FIOGETOWN:
758 		error = fgetown(tp->tun_pgid, cmd, data);
759 		break;
760 
761 	default:
762 		error = ENOTTY;
763 	}
764 
765 out:
766 	mutex_exit(&tp->tun_lock);
767 
768 	return error;
769 }
770 
771 /*
772  * The cdevsw read interface - reads a packet at a time, or at
773  * least as much of a packet as can be read.
774  */
775 int
776 tunread(dev_t dev, struct uio *uio, int ioflag)
777 {
778 	struct tun_softc *tp;
779 	struct ifnet	*ifp;
780 	struct mbuf	*m, *m0;
781 	int		error = 0, len;
782 
783 	tp = tun_find_unit(dev);
784 
785 	/* interface was "destroyed" already */
786 	if (tp == NULL) {
787 		return ENXIO;
788 	}
789 
790 	ifp = &tp->tun_if;
791 
792 	TUNDEBUG ("%s: read\n", ifp->if_xname);
793 	if ((tp->tun_flags & TUN_READY) != TUN_READY) {
794 		TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname, tp->tun_flags);
795 		error = EHOSTDOWN;
796 		goto out;
797 	}
798 
799 	tp->tun_flags &= ~TUN_RWAIT;
800 
801 	do {
802 		IFQ_DEQUEUE(&ifp->if_snd, m0);
803 		if (m0 == 0) {
804 			if (tp->tun_flags & TUN_NBIO) {
805 				error = EWOULDBLOCK;
806 				goto out;
807 			}
808 			tp->tun_flags |= TUN_RWAIT;
809 			if (cv_wait_sig(&tp->tun_cv, &tp->tun_lock)) {
810 				error = EINTR;
811 				goto out;
812 			}
813 		}
814 	} while (m0 == 0);
815 
816 	mutex_exit(&tp->tun_lock);
817 
818 	/* Copy the mbuf chain */
819 	while (m0 && uio->uio_resid > 0 && error == 0) {
820 		len = uimin(uio->uio_resid, m0->m_len);
821 		if (len != 0)
822 			error = uiomove(mtod(m0, void *), len, uio);
823 		m0 = m = m_free(m0);
824 	}
825 
826 	if (m0) {
827 		TUNDEBUG("Dropping mbuf\n");
828 		m_freem(m0);
829 	}
830 	if (error)
831 		ifp->if_ierrors++;
832 
833 	return error;
834 
835 out:
836 	mutex_exit(&tp->tun_lock);
837 
838 	return error;
839 }
840 
841 /*
842  * the cdevsw write interface - an atomic write is a packet - or else!
843  */
844 int
845 tunwrite(dev_t dev, struct uio *uio, int ioflag)
846 {
847 	struct tun_softc *tp;
848 	struct ifnet	*ifp;
849 	struct mbuf	*top, **mp, *m;
850 	pktqueue_t	*pktq;
851 	struct sockaddr	dst;
852 	int		error = 0, tlen, mlen;
853 	uint32_t	family;
854 
855 	tp = tun_find_unit(dev);
856 	if (tp == NULL) {
857 		/* Interface was "destroyed" already. */
858 		return ENXIO;
859 	}
860 
861 	/* Unlock until we've got the data */
862 	mutex_exit(&tp->tun_lock);
863 
864 	ifp = &tp->tun_if;
865 
866 	TUNDEBUG("%s: tunwrite\n", ifp->if_xname);
867 
868 	if (tp->tun_flags & TUN_PREPADDR) {
869 		if (uio->uio_resid < sizeof(dst)) {
870 			error = EIO;
871 			goto out0;
872 		}
873 		error = uiomove((void *)&dst, sizeof(dst), uio);
874 		if (dst.sa_len > sizeof(dst)) {
875 			/* Duh.. */
876 			int n = dst.sa_len - sizeof(dst);
877 			while (n--) {
878 				char discard;
879 				error = uiomove(&discard, 1, uio);
880 				if (error) {
881 					goto out0;
882 				}
883 			}
884 		}
885 	} else if (tp->tun_flags & TUN_IFHEAD) {
886 		if (uio->uio_resid < sizeof(family)){
887 			error = EIO;
888 			goto out0;
889 		}
890 		error = uiomove((void *)&family, sizeof(family), uio);
891 		dst.sa_family = ntohl(family);
892 	} else {
893 #ifdef INET
894 		dst.sa_family = AF_INET;
895 #endif
896 	}
897 
898 	if (uio->uio_resid > TUNMTU) {
899 		TUNDEBUG("%s: len=%lu!\n", ifp->if_xname,
900 		    (unsigned long)uio->uio_resid);
901 		error = EIO;
902 		goto out0;
903 	}
904 
905 	switch (dst.sa_family) {
906 #ifdef INET
907 	case AF_INET:
908 		pktq = ip_pktq;
909 		break;
910 #endif
911 #ifdef INET6
912 	case AF_INET6:
913 		pktq = ip6_pktq;
914 		break;
915 #endif
916 	default:
917 		error = EAFNOSUPPORT;
918 		goto out0;
919 	}
920 
921 	tlen = uio->uio_resid;
922 
923 	/* get a header mbuf */
924 	MGETHDR(m, M_DONTWAIT, MT_DATA);
925 	if (m == NULL) {
926 		return ENOBUFS;
927 	}
928 	mlen = MHLEN;
929 
930 	top = NULL;
931 	mp = &top;
932 	while (error == 0 && uio->uio_resid > 0) {
933 		m->m_len = uimin(mlen, uio->uio_resid);
934 		error = uiomove(mtod(m, void *), m->m_len, uio);
935 		*mp = m;
936 		mp = &m->m_next;
937 		if (error == 0 && uio->uio_resid > 0) {
938 			MGET(m, M_DONTWAIT, MT_DATA);
939 			if (m == NULL) {
940 				error = ENOBUFS;
941 				break;
942 			}
943 			mlen = MLEN;
944 		}
945 	}
946 	if (error) {
947 		if (top != NULL)
948 			m_freem(top);
949 		ifp->if_ierrors++;
950 		goto out0;
951 	}
952 
953 	top->m_pkthdr.len = tlen;
954 	m_set_rcvif(top, ifp);
955 
956 	bpf_mtap_af(ifp, dst.sa_family, top, BPF_D_IN);
957 
958 	if ((error = pfil_run_hooks(ifp->if_pfil, &top, ifp, PFIL_IN)) != 0)
959 		goto out0;
960 	if (top == NULL)
961 		goto out0;
962 
963 	mutex_enter(&tp->tun_lock);
964 	if ((tp->tun_flags & TUN_INITED) == 0) {
965 		/* Interface was destroyed */
966 		error = ENXIO;
967 		goto out;
968 	}
969 	if (__predict_false(!pktq_enqueue(pktq, top, 0))) {
970 		ifp->if_collisions++;
971 		mutex_exit(&tp->tun_lock);
972 		error = ENOBUFS;
973 		m_freem(top);
974 		goto out0;
975 	}
976 	ifp->if_ipackets++;
977 	ifp->if_ibytes += tlen;
978 out:
979 	mutex_exit(&tp->tun_lock);
980 out0:
981 	return error;
982 }
983 
984 #ifdef ALTQ
985 /*
986  * Start packet transmission on the interface.
987  * when the interface queue is rate-limited by ALTQ or TBR,
988  * if_start is needed to drain packets from the queue in order
989  * to notify readers when outgoing packets become ready.
990  */
991 static void
992 tunstart(struct ifnet *ifp)
993 {
994 	struct tun_softc *tp = ifp->if_softc;
995 
996 	if (!ALTQ_IS_ENABLED(&ifp->if_snd) && !TBR_IS_ENABLED(&ifp->if_snd))
997 		return;
998 
999 	mutex_enter(&tp->tun_lock);
1000 	if (!IF_IS_EMPTY(&ifp->if_snd)) {
1001 		if (tp->tun_flags & TUN_RWAIT) {
1002 			tp->tun_flags &= ~TUN_RWAIT;
1003 			cv_broadcast(&tp->tun_cv);
1004 		}
1005 		if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
1006 			softint_schedule(tp->tun_osih);
1007 
1008 		selnotify(&tp->tun_rsel, 0, NOTE_SUBMIT);
1009 	}
1010 	mutex_exit(&tp->tun_lock);
1011 }
1012 #endif /* ALTQ */
1013 /*
1014  * tunpoll - the poll interface, this is only useful on reads
1015  * really. The write detect always returns true, write never blocks
1016  * anyway, it either accepts the packet or drops it.
1017  */
1018 int
1019 tunpoll(dev_t dev, int events, struct lwp *l)
1020 {
1021 	struct tun_softc *tp;
1022 	struct ifnet	*ifp;
1023 	int revents = 0;
1024 
1025 	tp = tun_find_unit(dev);
1026 	if (tp == NULL) {
1027 		/* Interface was "destroyed" already. */
1028 		return 0;
1029 	}
1030 	ifp = &tp->tun_if;
1031 
1032 	TUNDEBUG("%s: tunpoll\n", ifp->if_xname);
1033 
1034 	if (events & (POLLIN | POLLRDNORM)) {
1035 		if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
1036 			TUNDEBUG("%s: tunpoll q=%d\n", ifp->if_xname,
1037 			    ifp->if_snd.ifq_len);
1038 			revents |= events & (POLLIN | POLLRDNORM);
1039 		} else {
1040 			TUNDEBUG("%s: tunpoll waiting\n", ifp->if_xname);
1041 			selrecord(l, &tp->tun_rsel);
1042 		}
1043 	}
1044 
1045 	if (events & (POLLOUT | POLLWRNORM))
1046 		revents |= events & (POLLOUT | POLLWRNORM);
1047 
1048 	mutex_exit(&tp->tun_lock);
1049 
1050 	return revents;
1051 }
1052 
1053 static void
1054 filt_tunrdetach(struct knote *kn)
1055 {
1056 	struct tun_softc *tp = kn->kn_hook;
1057 
1058 	mutex_enter(&tp->tun_lock);
1059 	SLIST_REMOVE(&tp->tun_rsel.sel_klist, kn, knote, kn_selnext);
1060 	mutex_exit(&tp->tun_lock);
1061 }
1062 
1063 static int
1064 filt_tunread(struct knote *kn, long hint)
1065 {
1066 	struct tun_softc *tp = kn->kn_hook;
1067 	struct ifnet *ifp = &tp->tun_if;
1068 	struct mbuf *m;
1069 	int ready;
1070 
1071 	if (hint & NOTE_SUBMIT)
1072 		KASSERT(mutex_owned(&tp->tun_lock));
1073 	else
1074 		mutex_enter(&tp->tun_lock);
1075 
1076 	IF_POLL(&ifp->if_snd, m);
1077 	ready = (m != NULL);
1078 	for (kn->kn_data = 0; m != NULL; m = m->m_next)
1079 		kn->kn_data += m->m_len;
1080 
1081 	if (hint & NOTE_SUBMIT)
1082 		KASSERT(mutex_owned(&tp->tun_lock));
1083 	else
1084 		mutex_exit(&tp->tun_lock);
1085 
1086 	return ready;
1087 }
1088 
1089 static const struct filterops tunread_filtops = {
1090 	.f_isfd = 1,
1091 	.f_attach = NULL,
1092 	.f_detach = filt_tunrdetach,
1093 	.f_event = filt_tunread,
1094 };
1095 
1096 static const struct filterops tun_seltrue_filtops = {
1097 	.f_isfd = 1,
1098 	.f_attach = NULL,
1099 	.f_detach = filt_tunrdetach,
1100 	.f_event = filt_seltrue,
1101 };
1102 
1103 int
1104 tunkqfilter(dev_t dev, struct knote *kn)
1105 {
1106 	struct tun_softc *tp;
1107 	struct klist *klist;
1108 	int rv = 0;
1109 
1110 	tp = tun_find_unit(dev);
1111 	if (tp == NULL)
1112 		goto out_nolock;
1113 
1114 	switch (kn->kn_filter) {
1115 	case EVFILT_READ:
1116 		klist = &tp->tun_rsel.sel_klist;
1117 		kn->kn_fop = &tunread_filtops;
1118 		break;
1119 
1120 	case EVFILT_WRITE:
1121 		klist = &tp->tun_rsel.sel_klist;
1122 		kn->kn_fop = &tun_seltrue_filtops;
1123 		break;
1124 
1125 	default:
1126 		rv = EINVAL;
1127 		goto out;
1128 	}
1129 
1130 	kn->kn_hook = tp;
1131 
1132 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
1133 
1134 out:
1135 	mutex_exit(&tp->tun_lock);
1136 out_nolock:
1137 	return rv;
1138 }
1139 
1140 /*
1141  * Module infrastructure
1142  */
1143 #include "if_module.h"
1144 
1145 IF_MODULE(MODULE_CLASS_DRIVER, tun, NULL)
1146