xref: /netbsd-src/sys/net/if_tun.c (revision 6a493d6bc668897c91594964a732d38505b70cbb)
1 /*	$NetBSD: if_tun.c,v 1.115 2012/01/28 01:02:27 rmind Exp $	*/
2 
3 /*
4  * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk>
5  * Nottingham University 1987.
6  *
7  * This source may be freely distributed, however I would be interested
8  * in any changes that are made.
9  *
10  * This driver takes packets off the IP i/f and hands them up to a
11  * user process to have its wicked way with. This driver has its
12  * roots in a similar driver written by Phil Cockcroft (formerly) at
13  * UCL. This driver is based much more on read/write/poll mode of
14  * operation though.
15  */
16 
17 #include <sys/cdefs.h>
18 __KERNEL_RCSID(0, "$NetBSD: if_tun.c,v 1.115 2012/01/28 01:02:27 rmind Exp $");
19 
20 #include "opt_inet.h"
21 
22 #include <sys/param.h>
23 #include <sys/proc.h>
24 #include <sys/systm.h>
25 #include <sys/mbuf.h>
26 #include <sys/buf.h>
27 #include <sys/protosw.h>
28 #include <sys/socket.h>
29 #include <sys/ioctl.h>
30 #include <sys/errno.h>
31 #include <sys/syslog.h>
32 #include <sys/select.h>
33 #include <sys/poll.h>
34 #include <sys/file.h>
35 #include <sys/signalvar.h>
36 #include <sys/conf.h>
37 #include <sys/kauth.h>
38 #include <sys/simplelock.h>
39 #include <sys/mutex.h>
40 #include <sys/cpu.h>
41 
42 #include <net/if.h>
43 #include <net/if_types.h>
44 #include <net/netisr.h>
45 #include <net/route.h>
46 
47 
48 #ifdef INET
49 #include <netinet/in.h>
50 #include <netinet/in_systm.h>
51 #include <netinet/in_var.h>
52 #include <netinet/ip.h>
53 #include <netinet/if_inarp.h>
54 #endif
55 
56 
57 #include <sys/time.h>
58 #include <net/bpf.h>
59 
60 #include <net/if_tun.h>
61 
62 #define TUNDEBUG	if (tundebug) printf
63 int	tundebug = 0;
64 
65 extern int ifqmaxlen;
66 void	tunattach(int);
67 
68 static LIST_HEAD(, tun_softc) tun_softc_list;
69 static LIST_HEAD(, tun_softc) tunz_softc_list;
70 static struct simplelock tun_softc_lock;
71 
72 static int	tun_ioctl(struct ifnet *, u_long, void *);
73 static int	tun_output(struct ifnet *, struct mbuf *,
74 			const struct sockaddr *, struct rtentry *rt);
75 static int	tun_clone_create(struct if_clone *, int);
76 static int	tun_clone_destroy(struct ifnet *);
77 
78 static struct if_clone tun_cloner =
79     IF_CLONE_INITIALIZER("tun", tun_clone_create, tun_clone_destroy);
80 
81 static void tunattach0(struct tun_softc *);
82 static void tuninit(struct tun_softc *);
83 static void tun_i_softintr(void *);
84 static void tun_o_softintr(void *);
85 #ifdef ALTQ
86 static void tunstart(struct ifnet *);
87 #endif
88 static struct tun_softc *tun_find_unit(dev_t);
89 static struct tun_softc *tun_find_zunit(int);
90 
91 static dev_type_open(tunopen);
92 static dev_type_close(tunclose);
93 static dev_type_read(tunread);
94 static dev_type_write(tunwrite);
95 static dev_type_ioctl(tunioctl);
96 static dev_type_poll(tunpoll);
97 static dev_type_kqfilter(tunkqfilter);
98 
99 const struct cdevsw tun_cdevsw = {
100 	tunopen, tunclose, tunread, tunwrite, tunioctl,
101 	nostop, notty, tunpoll, nommap, tunkqfilter, D_OTHER,
102 };
103 
104 void
105 tunattach(int unused)
106 {
107 
108 	simple_lock_init(&tun_softc_lock);
109 	LIST_INIT(&tun_softc_list);
110 	LIST_INIT(&tunz_softc_list);
111 	if_clone_attach(&tun_cloner);
112 }
113 
114 /*
115  * Find driver instance from dev_t.
116  * Call at splnet().
117  * Returns with tp locked (if found).
118  */
119 static struct tun_softc *
120 tun_find_unit(dev_t dev)
121 {
122 	struct tun_softc *tp;
123 	int unit = minor(dev);
124 
125 	simple_lock(&tun_softc_lock);
126 	LIST_FOREACH(tp, &tun_softc_list, tun_list)
127 		if (unit == tp->tun_unit)
128 			break;
129 	if (tp)
130 		mutex_enter(&tp->tun_lock);
131 	simple_unlock(&tun_softc_lock);
132 
133 	return (tp);
134 }
135 
136 /*
137  * Find zombie driver instance by unit number.
138  * Call at splnet().
139  * Remove tp from list and return it unlocked (if found).
140  */
141 static struct tun_softc *
142 tun_find_zunit(int unit)
143 {
144 	struct tun_softc *tp;
145 
146 	simple_lock(&tun_softc_lock);
147 	LIST_FOREACH(tp, &tunz_softc_list, tun_list)
148 		if (unit == tp->tun_unit)
149 			break;
150 	if (tp)
151 		LIST_REMOVE(tp, tun_list);
152 	simple_unlock(&tun_softc_lock);
153 #ifdef DIAGNOSTIC
154 	if (tp != NULL && (tp->tun_flags & (TUN_INITED|TUN_OPEN)) != TUN_OPEN)
155 		printf("tun%d: inconsistent flags: %x\n", unit, tp->tun_flags);
156 #endif
157 
158 	return (tp);
159 }
160 
161 static int
162 tun_clone_create(struct if_clone *ifc, int unit)
163 {
164 	struct tun_softc *tp;
165 
166 	if ((tp = tun_find_zunit(unit)) == NULL) {
167 		/* Allocate a new instance */
168 		tp = malloc(sizeof(*tp), M_DEVBUF, M_WAITOK|M_ZERO);
169 
170 		tp->tun_unit = unit;
171 		mutex_init(&tp->tun_lock, MUTEX_DEFAULT, IPL_NET);
172 		selinit(&tp->tun_rsel);
173 		selinit(&tp->tun_wsel);
174 	} else {
175 		/* Revive tunnel instance; clear ifp part */
176 		(void)memset(&tp->tun_if, 0, sizeof(struct ifnet));
177 	}
178 
179 	if_initname(&tp->tun_if, ifc->ifc_name, unit);
180 	tunattach0(tp);
181 	tp->tun_flags |= TUN_INITED;
182 	tp->tun_osih = softint_establish(SOFTINT_CLOCK, tun_o_softintr, tp);
183 	tp->tun_isih = softint_establish(SOFTINT_CLOCK, tun_i_softintr, tp);
184 
185 	simple_lock(&tun_softc_lock);
186 	LIST_INSERT_HEAD(&tun_softc_list, tp, tun_list);
187 	simple_unlock(&tun_softc_lock);
188 
189 	return (0);
190 }
191 
192 static void
193 tunattach0(struct tun_softc *tp)
194 {
195 	struct ifnet *ifp;
196 
197 	ifp = &tp->tun_if;
198 	ifp->if_softc = tp;
199 	ifp->if_mtu = TUNMTU;
200 	ifp->if_ioctl = tun_ioctl;
201 	ifp->if_output = tun_output;
202 #ifdef ALTQ
203 	ifp->if_start = tunstart;
204 #endif
205 	ifp->if_flags = IFF_POINTOPOINT;
206 	ifp->if_type = IFT_TUNNEL;
207 	ifp->if_snd.ifq_maxlen = ifqmaxlen;
208 	ifp->if_collisions = 0;
209 	ifp->if_ierrors = 0;
210 	ifp->if_oerrors = 0;
211 	ifp->if_ipackets = 0;
212 	ifp->if_opackets = 0;
213 	ifp->if_ibytes   = 0;
214 	ifp->if_obytes   = 0;
215 	ifp->if_dlt = DLT_NULL;
216 	IFQ_SET_READY(&ifp->if_snd);
217 	if_attach(ifp);
218 	if_alloc_sadl(ifp);
219 	bpf_attach(ifp, DLT_NULL, sizeof(uint32_t));
220 }
221 
222 static int
223 tun_clone_destroy(struct ifnet *ifp)
224 {
225 	struct tun_softc *tp = (void *)ifp;
226 	int s, zombie = 0;
227 
228 	IF_PURGE(&ifp->if_snd);
229 	ifp->if_flags &= ~IFF_RUNNING;
230 
231 	s = splnet();
232 	simple_lock(&tun_softc_lock);
233 	mutex_enter(&tp->tun_lock);
234 	LIST_REMOVE(tp, tun_list);
235 	if (tp->tun_flags & TUN_OPEN) {
236 		/* Hang on to storage until last close */
237 		zombie = 1;
238 		tp->tun_flags &= ~TUN_INITED;
239 		LIST_INSERT_HEAD(&tunz_softc_list, tp, tun_list);
240 	}
241 	simple_unlock(&tun_softc_lock);
242 
243 	if (tp->tun_flags & TUN_RWAIT) {
244 		tp->tun_flags &= ~TUN_RWAIT;
245 		wakeup((void *)tp);
246 	}
247 	selnotify(&tp->tun_rsel, 0, 0);
248 
249 	mutex_exit(&tp->tun_lock);
250 	splx(s);
251 
252 	if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
253 		fownsignal(tp->tun_pgid, SIGIO, POLL_HUP, 0, NULL);
254 
255 	bpf_detach(ifp);
256 	if_detach(ifp);
257 
258 	if (!zombie) {
259 		seldestroy(&tp->tun_rsel);
260 		seldestroy(&tp->tun_wsel);
261 		softint_disestablish(tp->tun_osih);
262 		softint_disestablish(tp->tun_isih);
263 		mutex_destroy(&tp->tun_lock);
264 		free(tp, M_DEVBUF);
265 	}
266 
267 	return (0);
268 }
269 
270 /*
271  * tunnel open - must be superuser & the device must be
272  * configured in
273  */
274 static int
275 tunopen(dev_t dev, int flag, int mode, struct lwp *l)
276 {
277 	struct ifnet	*ifp;
278 	struct tun_softc *tp;
279 	int	s, error;
280 
281 	error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_INTERFACE_TUN,
282 	    KAUTH_REQ_NETWORK_INTERFACE_TUN_ADD, NULL, NULL, NULL);
283 	if (error)
284 		return (error);
285 
286 	s = splnet();
287 	tp = tun_find_unit(dev);
288 
289 	if (tp == NULL) {
290 		(void)tun_clone_create(&tun_cloner, minor(dev));
291 		tp = tun_find_unit(dev);
292 		if (tp == NULL) {
293 			error = ENXIO;
294 			goto out_nolock;
295 		}
296 	}
297 
298 	if (tp->tun_flags & TUN_OPEN) {
299 		error = EBUSY;
300 		goto out;
301 	}
302 
303 	ifp = &tp->tun_if;
304 	tp->tun_flags |= TUN_OPEN;
305 	TUNDEBUG("%s: open\n", ifp->if_xname);
306 out:
307 	mutex_exit(&tp->tun_lock);
308 out_nolock:
309 	splx(s);
310 	return (error);
311 }
312 
313 /*
314  * tunclose - close the device - mark i/f down & delete
315  * routing info
316  */
317 int
318 tunclose(dev_t dev, int flag, int mode,
319     struct lwp *l)
320 {
321 	int	s;
322 	struct tun_softc *tp;
323 	struct ifnet	*ifp;
324 
325 	s = splnet();
326 	if ((tp = tun_find_zunit(minor(dev))) != NULL) {
327 		/* interface was "destroyed" before the close */
328 		seldestroy(&tp->tun_rsel);
329 		seldestroy(&tp->tun_wsel);
330 		softint_disestablish(tp->tun_osih);
331 		softint_disestablish(tp->tun_isih);
332 		mutex_destroy(&tp->tun_lock);
333 		free(tp, M_DEVBUF);
334 		goto out_nolock;
335 	}
336 
337 	if ((tp = tun_find_unit(dev)) == NULL)
338 		goto out_nolock;
339 
340 	ifp = &tp->tun_if;
341 
342 	tp->tun_flags &= ~TUN_OPEN;
343 
344 	tp->tun_pgid = 0;
345 	selnotify(&tp->tun_rsel, 0, 0);
346 
347 	TUNDEBUG ("%s: closed\n", ifp->if_xname);
348 	mutex_exit(&tp->tun_lock);
349 
350 	/*
351 	 * junk all pending output
352 	 */
353 	IFQ_PURGE(&ifp->if_snd);
354 
355 	if (ifp->if_flags & IFF_UP) {
356 		if_down(ifp);
357 		if (ifp->if_flags & IFF_RUNNING) {
358 			/* find internet addresses and delete routes */
359 			struct ifaddr *ifa;
360 			IFADDR_FOREACH(ifa, ifp) {
361 #if defined(INET) || defined(INET6)
362 				if (ifa->ifa_addr->sa_family == AF_INET ||
363 				    ifa->ifa_addr->sa_family == AF_INET6) {
364 					rtinit(ifa, (int)RTM_DELETE,
365 					       tp->tun_flags & TUN_DSTADDR
366 							? RTF_HOST
367 							: 0);
368 				}
369 #endif
370 			}
371 		}
372 	}
373 out_nolock:
374 	splx(s);
375 	return (0);
376 }
377 
378 /*
379  * Call at splnet().
380  */
381 static void
382 tuninit(struct tun_softc *tp)
383 {
384 	struct ifnet	*ifp = &tp->tun_if;
385 	struct ifaddr	*ifa;
386 
387 	TUNDEBUG("%s: tuninit\n", ifp->if_xname);
388 
389 	mutex_enter(&tp->tun_lock);
390 	ifp->if_flags |= IFF_UP | IFF_RUNNING;
391 
392 	tp->tun_flags &= ~(TUN_IASET|TUN_DSTADDR);
393 	IFADDR_FOREACH(ifa, ifp) {
394 #ifdef INET
395 		if (ifa->ifa_addr->sa_family == AF_INET) {
396 			struct sockaddr_in *sin;
397 
398 			sin = satosin(ifa->ifa_addr);
399 			if (sin && sin->sin_addr.s_addr)
400 				tp->tun_flags |= TUN_IASET;
401 
402 			if (ifp->if_flags & IFF_POINTOPOINT) {
403 				sin = satosin(ifa->ifa_dstaddr);
404 				if (sin && sin->sin_addr.s_addr)
405 					tp->tun_flags |= TUN_DSTADDR;
406 			}
407 		}
408 #endif
409 #ifdef INET6
410 		if (ifa->ifa_addr->sa_family == AF_INET6) {
411 			struct sockaddr_in6 *sin;
412 
413 			sin = (struct sockaddr_in6 *)ifa->ifa_addr;
414 			if (!IN6_IS_ADDR_UNSPECIFIED(&sin->sin6_addr))
415 				tp->tun_flags |= TUN_IASET;
416 
417 			if (ifp->if_flags & IFF_POINTOPOINT) {
418 				sin = (struct sockaddr_in6 *)ifa->ifa_dstaddr;
419 				if (sin &&
420 				    !IN6_IS_ADDR_UNSPECIFIED(&sin->sin6_addr))
421 					tp->tun_flags |= TUN_DSTADDR;
422 			} else
423 				tp->tun_flags &= ~TUN_DSTADDR;
424 		}
425 #endif /* INET6 */
426 	}
427 	mutex_exit(&tp->tun_lock);
428 }
429 
430 /*
431  * Process an ioctl request.
432  */
433 static int
434 tun_ioctl(struct ifnet *ifp, u_long cmd, void *data)
435 {
436 	int		error = 0, s;
437 	struct tun_softc *tp = (struct tun_softc *)(ifp->if_softc);
438 	struct ifreq *ifr = data;
439 
440 	s = splnet();
441 
442 	switch (cmd) {
443 	case SIOCINITIFADDR:
444 		tuninit(tp);
445 		TUNDEBUG("%s: address set\n", ifp->if_xname);
446 		break;
447 	case SIOCSIFBRDADDR:
448 		TUNDEBUG("%s: broadcast address set\n", ifp->if_xname);
449 		break;
450 	case SIOCSIFMTU:
451 		if (ifr->ifr_mtu > TUNMTU || ifr->ifr_mtu < 576) {
452 			error = EINVAL;
453 			break;
454 		}
455 		TUNDEBUG("%s: interface mtu set\n", ifp->if_xname);
456 		if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
457 			error = 0;
458 		break;
459 	case SIOCADDMULTI:
460 	case SIOCDELMULTI:
461 		if (ifr == NULL) {
462 	        	error = EAFNOSUPPORT;           /* XXX */
463 			break;
464 		}
465 		switch (ifreq_getaddr(cmd, ifr)->sa_family) {
466 #ifdef INET
467 		case AF_INET:
468 			break;
469 #endif
470 #ifdef INET6
471 		case AF_INET6:
472 			break;
473 #endif
474 		default:
475 			error = EAFNOSUPPORT;
476 			break;
477 		}
478 		break;
479 	default:
480 		error = ifioctl_common(ifp, cmd, data);
481 	}
482 
483 	splx(s);
484 	return (error);
485 }
486 
487 /*
488  * tun_output - queue packets from higher level ready to put out.
489  */
490 static int
491 tun_output(struct ifnet *ifp, struct mbuf *m0, const struct sockaddr *dst,
492     struct rtentry *rt)
493 {
494 	struct tun_softc *tp = ifp->if_softc;
495 	int		s;
496 	int		error;
497 #if defined(INET) || defined(INET6)
498 	int		mlen;
499 	uint32_t	*af;
500 #endif
501 	ALTQ_DECL(struct altq_pktattr pktattr;)
502 
503 	s = splnet();
504 	mutex_enter(&tp->tun_lock);
505 	TUNDEBUG ("%s: tun_output\n", ifp->if_xname);
506 
507 	if ((tp->tun_flags & TUN_READY) != TUN_READY) {
508 		TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname,
509 			  tp->tun_flags);
510 		error = EHOSTDOWN;
511 		goto out;
512 	}
513 
514 	/*
515 	 * if the queueing discipline needs packet classification,
516 	 * do it before prepending link headers.
517 	 */
518 	IFQ_CLASSIFY(&ifp->if_snd, m0, dst->sa_family, &pktattr);
519 
520 	bpf_mtap_af(ifp, dst->sa_family, m0);
521 
522 	switch(dst->sa_family) {
523 #ifdef INET6
524 	case AF_INET6:
525 #endif
526 #ifdef INET
527 	case AF_INET:
528 #endif
529 #if defined(INET) || defined(INET6)
530 		if (tp->tun_flags & TUN_PREPADDR) {
531 			/* Simple link-layer header */
532 			M_PREPEND(m0, dst->sa_len, M_DONTWAIT);
533 			if (m0 == NULL) {
534 				IF_DROP(&ifp->if_snd);
535 				error = ENOBUFS;
536 				goto out;
537 			}
538 			bcopy(dst, mtod(m0, char *), dst->sa_len);
539 		}
540 
541 		if (tp->tun_flags & TUN_IFHEAD) {
542 			/* Prepend the address family */
543 			M_PREPEND(m0, sizeof(*af), M_DONTWAIT);
544 			if (m0 == NULL) {
545 				IF_DROP(&ifp->if_snd);
546 				error = ENOBUFS;
547 				goto out;
548 			}
549 			af = mtod(m0,uint32_t *);
550 			*af = htonl(dst->sa_family);
551 		} else {
552 #ifdef INET
553 			if (dst->sa_family != AF_INET)
554 #endif
555 			{
556 				error = EAFNOSUPPORT;
557 				goto out;
558 			}
559 		}
560 		/* FALLTHROUGH */
561 	case AF_UNSPEC:
562 		IFQ_ENQUEUE(&ifp->if_snd, m0, &pktattr, error);
563 		if (error) {
564 			ifp->if_collisions++;
565 			error = EAFNOSUPPORT;
566 			m0 = NULL;
567 			goto out;
568 		}
569 		mlen = m0->m_pkthdr.len;
570 		ifp->if_opackets++;
571 		ifp->if_obytes += mlen;
572 		break;
573 #endif
574 	default:
575 		error = EAFNOSUPPORT;
576 		goto out;
577 	}
578 
579 	if (tp->tun_flags & TUN_RWAIT) {
580 		tp->tun_flags &= ~TUN_RWAIT;
581 		wakeup((void *)tp);
582 	}
583 	if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
584 		softint_schedule(tp->tun_isih);
585 
586 	selnotify(&tp->tun_rsel, 0, 0);
587 out:
588 	mutex_exit(&tp->tun_lock);
589 	splx(s);
590 
591 	if (error && m0) {
592 		m_freem(m0);
593 	}
594 	return 0;
595 }
596 
597 static void
598 tun_i_softintr(void *cookie)
599 {
600 	struct tun_softc *tp = cookie;
601 
602 	if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
603 		fownsignal(tp->tun_pgid, SIGIO, POLL_IN, POLLIN|POLLRDNORM,
604 		    NULL);
605 }
606 
607 static void
608 tun_o_softintr(void *cookie)
609 {
610 	struct tun_softc *tp = cookie;
611 
612 	if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
613 		fownsignal(tp->tun_pgid, SIGIO, POLL_OUT, POLLOUT|POLLWRNORM,
614 		    NULL);
615 }
616 
617 /*
618  * the cdevsw interface is now pretty minimal.
619  */
620 int
621 tunioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
622 {
623 	struct tun_softc *tp;
624 	int s, error = 0;
625 
626 	s = splnet();
627 	tp = tun_find_unit(dev);
628 
629 	/* interface was "destroyed" already */
630 	if (tp == NULL) {
631 		error = ENXIO;
632 		goto out_nolock;
633 	}
634 
635 	switch (cmd) {
636 	case TUNSDEBUG:
637 		tundebug = *(int *)data;
638 		break;
639 
640 	case TUNGDEBUG:
641 		*(int *)data = tundebug;
642 		break;
643 
644 	case TUNSIFMODE:
645 		switch (*(int *)data & (IFF_POINTOPOINT|IFF_BROADCAST)) {
646 		case IFF_POINTOPOINT:
647 		case IFF_BROADCAST:
648 			if (tp->tun_if.if_flags & IFF_UP) {
649 				error = EBUSY;
650 				goto out;
651 			}
652 			tp->tun_if.if_flags &=
653 				~(IFF_BROADCAST|IFF_POINTOPOINT|IFF_MULTICAST);
654 			tp->tun_if.if_flags |= *(int *)data;
655 			break;
656 		default:
657 			error = EINVAL;
658 			goto out;
659 		}
660 		break;
661 
662 	case TUNSLMODE:
663 		if (*(int *)data) {
664 			tp->tun_flags |= TUN_PREPADDR;
665 			tp->tun_flags &= ~TUN_IFHEAD;
666 		} else
667 			tp->tun_flags &= ~TUN_PREPADDR;
668 		break;
669 
670 	case TUNSIFHEAD:
671 		if (*(int *)data) {
672 			tp->tun_flags |= TUN_IFHEAD;
673 			tp->tun_flags &= ~TUN_PREPADDR;
674 		} else
675 			tp->tun_flags &= ~TUN_IFHEAD;
676 		break;
677 
678 	case TUNGIFHEAD:
679 		*(int *)data = (tp->tun_flags & TUN_IFHEAD);
680 		break;
681 
682 	case FIONBIO:
683 		if (*(int *)data)
684 			tp->tun_flags |= TUN_NBIO;
685 		else
686 			tp->tun_flags &= ~TUN_NBIO;
687 		break;
688 
689 	case FIOASYNC:
690 		if (*(int *)data)
691 			tp->tun_flags |= TUN_ASYNC;
692 		else
693 			tp->tun_flags &= ~TUN_ASYNC;
694 		break;
695 
696 	case FIONREAD:
697 		if (tp->tun_if.if_snd.ifq_head)
698 			*(int *)data = tp->tun_if.if_snd.ifq_head->m_pkthdr.len;
699 		else
700 			*(int *)data = 0;
701 		break;
702 
703 	case TIOCSPGRP:
704 	case FIOSETOWN:
705 		error = fsetown(&tp->tun_pgid, cmd, data);
706 		break;
707 
708 	case TIOCGPGRP:
709 	case FIOGETOWN:
710 		error = fgetown(tp->tun_pgid, cmd, data);
711 		break;
712 
713 	default:
714 		error = ENOTTY;
715 	}
716 
717 out:
718 	mutex_exit(&tp->tun_lock);
719 out_nolock:
720 	splx(s);
721 	return (error);
722 }
723 
724 /*
725  * The cdevsw read interface - reads a packet at a time, or at
726  * least as much of a packet as can be read.
727  */
728 int
729 tunread(dev_t dev, struct uio *uio, int ioflag)
730 {
731 	struct tun_softc *tp;
732 	struct ifnet	*ifp;
733 	struct mbuf	*m, *m0;
734 	int		error = 0, len, s, index;
735 
736 	s = splnet();
737 	tp = tun_find_unit(dev);
738 
739 	/* interface was "destroyed" already */
740 	if (tp == NULL) {
741 		error = ENXIO;
742 		goto out_nolock;
743 	}
744 
745 	index = tp->tun_if.if_index;
746 	ifp = &tp->tun_if;
747 
748 	TUNDEBUG ("%s: read\n", ifp->if_xname);
749 	if ((tp->tun_flags & TUN_READY) != TUN_READY) {
750 		TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname, tp->tun_flags);
751 		error = EHOSTDOWN;
752 		goto out;
753 	}
754 
755 	tp->tun_flags &= ~TUN_RWAIT;
756 
757 	do {
758 		IFQ_DEQUEUE(&ifp->if_snd, m0);
759 		if (m0 == 0) {
760 			if (tp->tun_flags & TUN_NBIO) {
761 				error = EWOULDBLOCK;
762 				goto out;
763 			}
764 			tp->tun_flags |= TUN_RWAIT;
765 			if (mtsleep((void *)tp, PZERO|PCATCH|PNORELOCK,
766 					"tunread", 0, &tp->tun_lock) != 0) {
767 				error = EINTR;
768 				goto out_nolock;
769 			} else {
770 				/*
771 				 * Maybe the interface was destroyed while
772 				 * we were sleeping, so let's ensure that
773 				 * we're looking at the same (valid) tun
774 				 * interface before looping.
775 				 */
776 				tp = tun_find_unit(dev);
777 				if (tp == NULL) {
778 					error = ENXIO;
779 					goto out_nolock;
780 				}
781 				if (tp->tun_if.if_index != index) {
782 					error = ENXIO;
783 					goto out;
784 				}
785 			}
786 		}
787 	} while (m0 == 0);
788 
789 	mutex_exit(&tp->tun_lock);
790 	splx(s);
791 
792 	/* Copy the mbuf chain */
793 	while (m0 && uio->uio_resid > 0 && error == 0) {
794 		len = min(uio->uio_resid, m0->m_len);
795 		if (len != 0)
796 			error = uiomove(mtod(m0, void *), len, uio);
797 		MFREE(m0, m);
798 		m0 = m;
799 	}
800 
801 	if (m0) {
802 		TUNDEBUG("Dropping mbuf\n");
803 		m_freem(m0);
804 	}
805 	if (error)
806 		ifp->if_ierrors++;
807 
808 	return (error);
809 
810 out:
811 	mutex_exit(&tp->tun_lock);
812 out_nolock:
813 	splx(s);
814 	return (error);
815 }
816 
817 /*
818  * the cdevsw write interface - an atomic write is a packet - or else!
819  */
820 int
821 tunwrite(dev_t dev, struct uio *uio, int ioflag)
822 {
823 	struct tun_softc *tp;
824 	struct ifnet	*ifp;
825 	struct mbuf	*top, **mp, *m;
826 	struct ifqueue	*ifq;
827 	struct sockaddr	dst;
828 	int		isr, error = 0, s, tlen, mlen;
829 	uint32_t	family;
830 
831 	s = splnet();
832 	tp = tun_find_unit(dev);
833 
834 	/* interface was "destroyed" already */
835 	if (tp == NULL) {
836 		error = ENXIO;
837 		goto out_nolock;
838 	}
839 
840 	/* Unlock until we've got the data */
841 	mutex_exit(&tp->tun_lock);
842 	splx(s);
843 
844 	ifp = &tp->tun_if;
845 
846 	TUNDEBUG("%s: tunwrite\n", ifp->if_xname);
847 
848 	if (tp->tun_flags & TUN_PREPADDR) {
849 		if (uio->uio_resid < sizeof(dst)) {
850 			error = EIO;
851 			goto out0;
852 		}
853 		error = uiomove((void *)&dst, sizeof(dst), uio);
854 		if (dst.sa_len > sizeof(dst)) {
855 			/* Duh.. */
856 			char discard;
857 			int n = dst.sa_len - sizeof(dst);
858 			while (n--)
859 				if ((error = uiomove(&discard, 1, uio)) != 0) {
860 					goto out0;
861 				}
862 		}
863 	} else if (tp->tun_flags & TUN_IFHEAD) {
864 		if (uio->uio_resid < sizeof(family)){
865 			error = EIO;
866 			goto out0;
867 		}
868 		error = uiomove((void *)&family, sizeof(family), uio);
869 		dst.sa_family = ntohl(family);
870 	} else {
871 #ifdef INET
872 		dst.sa_family = AF_INET;
873 #endif
874 	}
875 
876 	if (uio->uio_resid > TUNMTU) {
877 		TUNDEBUG("%s: len=%lu!\n", ifp->if_xname,
878 		    (unsigned long)uio->uio_resid);
879 		error = EIO;
880 		goto out0;
881 	}
882 
883 	switch (dst.sa_family) {
884 #ifdef INET
885 	case AF_INET:
886 		ifq = &ipintrq;
887 		isr = NETISR_IP;
888 		break;
889 #endif
890 #ifdef INET6
891 	case AF_INET6:
892 		ifq = &ip6intrq;
893 		isr = NETISR_IPV6;
894 		break;
895 #endif
896 	default:
897 		error = EAFNOSUPPORT;
898 		goto out0;
899 	}
900 
901 	tlen = uio->uio_resid;
902 
903 	/* get a header mbuf */
904 	MGETHDR(m, M_DONTWAIT, MT_DATA);
905 	if (m == NULL) {
906 		error = ENOBUFS;
907 		goto out0;
908 	}
909 	mlen = MHLEN;
910 
911 	top = NULL;
912 	mp = &top;
913 	while (error == 0 && uio->uio_resid > 0) {
914 		m->m_len = min(mlen, uio->uio_resid);
915 		error = uiomove(mtod(m, void *), m->m_len, uio);
916 		*mp = m;
917 		mp = &m->m_next;
918 		if (error == 0 && uio->uio_resid > 0) {
919 			MGET(m, M_DONTWAIT, MT_DATA);
920 			if (m == NULL) {
921 				error = ENOBUFS;
922 				break;
923 			}
924 			mlen = MLEN;
925 		}
926 	}
927 	if (error) {
928 		if (top != NULL)
929 			m_freem (top);
930 		ifp->if_ierrors++;
931 		goto out0;
932 	}
933 
934 	top->m_pkthdr.len = tlen;
935 	top->m_pkthdr.rcvif = ifp;
936 
937 	bpf_mtap_af(ifp, dst.sa_family, top);
938 
939 	s = splnet();
940 	mutex_enter(&tp->tun_lock);
941 	if ((tp->tun_flags & TUN_INITED) == 0) {
942 		/* Interface was destroyed */
943 		error = ENXIO;
944 		goto out;
945 	}
946 	if (IF_QFULL(ifq)) {
947 		IF_DROP(ifq);
948 		ifp->if_collisions++;
949 		mutex_exit(&tp->tun_lock);
950 		m_freem(top);
951 		error = ENOBUFS;
952 		goto out_nolock;
953 	}
954 
955 	IF_ENQUEUE(ifq, top);
956 	ifp->if_ipackets++;
957 	ifp->if_ibytes += tlen;
958 	schednetisr(isr);
959 out:
960 	mutex_exit(&tp->tun_lock);
961 out_nolock:
962 	splx(s);
963 out0:
964 	return (error);
965 }
966 
967 #ifdef ALTQ
968 /*
969  * Start packet transmission on the interface.
970  * when the interface queue is rate-limited by ALTQ or TBR,
971  * if_start is needed to drain packets from the queue in order
972  * to notify readers when outgoing packets become ready.
973  *
974  * Should be called at splnet.
975  */
976 static void
977 tunstart(struct ifnet *ifp)
978 {
979 	struct tun_softc *tp = ifp->if_softc;
980 
981 	if (!ALTQ_IS_ENABLED(&ifp->if_snd) && !TBR_IS_ENABLED(&ifp->if_snd))
982 		return;
983 
984 	mutex_enter(&tp->tun_lock);
985 	if (!IF_IS_EMPTY(&ifp->if_snd)) {
986 		if (tp->tun_flags & TUN_RWAIT) {
987 			tp->tun_flags &= ~TUN_RWAIT;
988 			wakeup((void *)tp);
989 		}
990 		if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
991 			softint_schedule(tp->tun_osih);
992 
993 		selnotify(&tp->tun_rsel, 0, 0);
994 	}
995 	mutex_exit(&tp->tun_lock);
996 }
997 #endif /* ALTQ */
998 /*
999  * tunpoll - the poll interface, this is only useful on reads
1000  * really. The write detect always returns true, write never blocks
1001  * anyway, it either accepts the packet or drops it.
1002  */
1003 int
1004 tunpoll(dev_t dev, int events, struct lwp *l)
1005 {
1006 	struct tun_softc *tp;
1007 	struct ifnet	*ifp;
1008 	int		s, revents = 0;
1009 
1010 	s = splnet();
1011 	tp = tun_find_unit(dev);
1012 
1013 	/* interface was "destroyed" already */
1014 	if (tp == NULL)
1015 		goto out_nolock;
1016 
1017 	ifp = &tp->tun_if;
1018 
1019 	TUNDEBUG("%s: tunpoll\n", ifp->if_xname);
1020 
1021 	if (events & (POLLIN | POLLRDNORM)) {
1022 		if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
1023 			TUNDEBUG("%s: tunpoll q=%d\n", ifp->if_xname,
1024 			    ifp->if_snd.ifq_len);
1025 			revents |= events & (POLLIN | POLLRDNORM);
1026 		} else {
1027 			TUNDEBUG("%s: tunpoll waiting\n", ifp->if_xname);
1028 			selrecord(l, &tp->tun_rsel);
1029 		}
1030 	}
1031 
1032 	if (events & (POLLOUT | POLLWRNORM))
1033 		revents |= events & (POLLOUT | POLLWRNORM);
1034 
1035 	mutex_exit(&tp->tun_lock);
1036 out_nolock:
1037 	splx(s);
1038 	return (revents);
1039 }
1040 
1041 static void
1042 filt_tunrdetach(struct knote *kn)
1043 {
1044 	struct tun_softc *tp = kn->kn_hook;
1045 	int s;
1046 
1047 	s = splnet();
1048 	SLIST_REMOVE(&tp->tun_rsel.sel_klist, kn, knote, kn_selnext);
1049 	splx(s);
1050 }
1051 
1052 static int
1053 filt_tunread(struct knote *kn, long hint)
1054 {
1055 	struct tun_softc *tp = kn->kn_hook;
1056 	struct ifnet *ifp = &tp->tun_if;
1057 	struct mbuf *m;
1058 	int s;
1059 
1060 	s = splnet();
1061 	IF_POLL(&ifp->if_snd, m);
1062 	if (m == NULL) {
1063 		splx(s);
1064 		return (0);
1065 	}
1066 
1067 	for (kn->kn_data = 0; m != NULL; m = m->m_next)
1068 		kn->kn_data += m->m_len;
1069 
1070 	splx(s);
1071 	return (1);
1072 }
1073 
1074 static const struct filterops tunread_filtops =
1075 	{ 1, NULL, filt_tunrdetach, filt_tunread };
1076 
1077 static const struct filterops tun_seltrue_filtops =
1078 	{ 1, NULL, filt_tunrdetach, filt_seltrue };
1079 
1080 int
1081 tunkqfilter(dev_t dev, struct knote *kn)
1082 {
1083 	struct tun_softc *tp;
1084 	struct klist *klist;
1085 	int rv = 0, s;
1086 
1087 	s = splnet();
1088 	tp = tun_find_unit(dev);
1089 	if (tp == NULL)
1090 		goto out_nolock;
1091 
1092 	switch (kn->kn_filter) {
1093 	case EVFILT_READ:
1094 		klist = &tp->tun_rsel.sel_klist;
1095 		kn->kn_fop = &tunread_filtops;
1096 		break;
1097 
1098 	case EVFILT_WRITE:
1099 		klist = &tp->tun_rsel.sel_klist;
1100 		kn->kn_fop = &tun_seltrue_filtops;
1101 		break;
1102 
1103 	default:
1104 		rv = EINVAL;
1105 		goto out;
1106 	}
1107 
1108 	kn->kn_hook = tp;
1109 
1110 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
1111 
1112 out:
1113 	mutex_exit(&tp->tun_lock);
1114 out_nolock:
1115 	splx(s);
1116 	return (rv);
1117 }
1118