xref: /openbsd-src/sys/net/if_pppx.c (revision 1a8dbaac879b9f3335ad7fb25429ce63ac1d6bac)
1 /*	$OpenBSD: if_pppx.c,v 1.105 2020/09/20 12:27:40 mvs Exp $ */
2 
3 /*
4  * Copyright (c) 2010 Claudio Jeker <claudio@openbsd.org>
5  * Copyright (c) 2010 David Gwynne <dlg@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Copyright (c) 2009 Internet Initiative Japan Inc.
22  * All rights reserved.
23  *
24  * Redistribution and use in source and binary forms, with or without
25  * modification, are permitted provided that the following conditions
26  * are met:
27  * 1. Redistributions of source code must retain the above copyright
28  *    notice, this list of conditions and the following disclaimer.
29  * 2. Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in the
31  *    documentation and/or other materials provided with the distribution.
32  *
33  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
34  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
37  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
38  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
39  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
41  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
42  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43  * SUCH DAMAGE.
44  */
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/buf.h>
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/device.h>
51 #include <sys/conf.h>
52 #include <sys/queue.h>
53 #include <sys/pool.h>
54 #include <sys/mbuf.h>
55 #include <sys/errno.h>
56 #include <sys/protosw.h>
57 #include <sys/socket.h>
58 #include <sys/ioctl.h>
59 #include <sys/vnode.h>
60 #include <sys/poll.h>
61 #include <sys/selinfo.h>
62 
63 #include <net/if.h>
64 #include <net/if_types.h>
65 #include <netinet/in.h>
66 #include <netinet/if_ether.h>
67 #include <net/if_dl.h>
68 
69 #include <netinet/in_var.h>
70 #include <netinet/ip.h>
71 #include <netinet/ip_var.h>
72 
73 #ifdef INET6
74 #include <netinet6/in6_var.h>
75 #include <netinet/ip6.h>
76 #include <netinet6/nd6.h>
77 #endif /* INET6 */
78 
79 #include "bpfilter.h"
80 #if NBPFILTER > 0
81 #include <net/bpf.h>
82 #endif
83 
84 #include "pf.h"
85 #if NPF > 0
86 #include <net/pfvar.h>
87 #endif
88 
89 #include <net/ppp_defs.h>
90 #include <net/ppp-comp.h>
91 #include <crypto/arc4.h>
92 
93 #ifdef PIPEX
94 #include <net/radix.h>
95 #include <net/pipex.h>
96 #include <net/pipex_local.h>
97 #else
98 #error PIPEX option not enabled
99 #endif
100 
101 #ifdef PPPX_DEBUG
102 #define PPPX_D_INIT	(1<<0)
103 
104 int pppxdebug = 0;
105 
106 #define DPRINTF(_m, _p...)	do { \
107 					if (ISSET(pppxdebug, (_m))) \
108 						printf(_p); \
109 				} while (0)
110 #else
111 #define DPRINTF(_m, _p...)	/* _m, _p */
112 #endif
113 
114 
115 struct pppx_if;
116 
117 /*
118  * Locks used to protect struct members and global data
119  *       I       immutable after creation
120  *       K       kernel lock
121  *       N       net lock
122  */
123 
124 struct pppx_dev {
125 	LIST_ENTRY(pppx_dev)	pxd_entry;	/* [K] */
126 	int			pxd_unit;	/* [I] */
127 
128 	/* kq shizz */
129 	struct selinfo		pxd_rsel;
130 	struct mutex		pxd_rsel_mtx;
131 	struct selinfo		pxd_wsel;
132 	struct mutex		pxd_wsel_mtx;
133 
134 	/* queue of packets for userland to service - protected by splnet */
135 	struct mbuf_queue	pxd_svcq;
136 	int			pxd_waiting;	/* [N] */
137 	LIST_HEAD(,pppx_if)	pxd_pxis;	/* [N] */
138 };
139 
140 LIST_HEAD(, pppx_dev)		pppx_devs =
141 				    LIST_HEAD_INITIALIZER(pppx_devs); /* [K] */
142 struct pool			pppx_if_pl;
143 
144 struct pppx_dev			*pppx_dev_lookup(dev_t);
145 struct pppx_dev			*pppx_dev2pxd(dev_t);
146 
147 struct pppx_if_key {
148 	int			pxik_session_id;	/* [I] */
149 	int			pxik_protocol;		/* [I] */
150 };
151 
152 struct pppx_if {
153 	struct pppx_if_key	pxi_key;		/* [I] must be first
154 							    in the struct */
155 
156 	RBT_ENTRY(pppx_if)	pxi_entry;		/* [N] */
157 	LIST_ENTRY(pppx_if)	pxi_list;		/* [N] */
158 
159 	int			pxi_ready;		/* [N] */
160 
161 	int			pxi_unit;		/* [I] */
162 	struct ifnet		pxi_if;
163 	struct pppx_dev		*pxi_dev;		/* [I] */
164 	struct pipex_session	*pxi_session;		/* [I] */
165 };
166 
167 static inline int
168 pppx_if_cmp(const struct pppx_if *a, const struct pppx_if *b)
169 {
170 	return memcmp(&a->pxi_key, &b->pxi_key, sizeof(a->pxi_key));
171 }
172 
173 RBT_HEAD(pppx_ifs, pppx_if) pppx_ifs = RBT_INITIALIZER(&pppx_ifs); /* [N] */
174 RBT_PROTOTYPE(pppx_ifs, pppx_if, pxi_entry, pppx_if_cmp);
175 
176 int		pppx_if_next_unit(void);
177 struct pppx_if *pppx_if_find(struct pppx_dev *, int, int);
178 int		pppx_add_session(struct pppx_dev *,
179 		    struct pipex_session_req *);
180 int		pppx_del_session(struct pppx_dev *,
181 		    struct pipex_session_close_req *);
182 int		pppx_set_session_descr(struct pppx_dev *,
183 		    struct pipex_session_descr_req *);
184 
185 void		pppx_if_destroy(struct pppx_dev *, struct pppx_if *);
186 void		pppx_if_qstart(struct ifqueue *);
187 int		pppx_if_output(struct ifnet *, struct mbuf *,
188 		    struct sockaddr *, struct rtentry *);
189 int		pppx_if_ioctl(struct ifnet *, u_long, caddr_t);
190 
191 
192 void		pppxattach(int);
193 
194 void		filt_pppx_rdetach(struct knote *);
195 int		filt_pppx_read(struct knote *, long);
196 
197 const struct filterops pppx_rd_filtops = {
198 	.f_flags	= FILTEROP_ISFD,
199 	.f_attach	= NULL,
200 	.f_detach	= filt_pppx_rdetach,
201 	.f_event	= filt_pppx_read,
202 };
203 
204 void		filt_pppx_wdetach(struct knote *);
205 int		filt_pppx_write(struct knote *, long);
206 
207 const struct filterops pppx_wr_filtops = {
208 	.f_flags	= FILTEROP_ISFD,
209 	.f_attach	= NULL,
210 	.f_detach	= filt_pppx_wdetach,
211 	.f_event	= filt_pppx_write,
212 };
213 
214 struct pppx_dev *
215 pppx_dev_lookup(dev_t dev)
216 {
217 	struct pppx_dev *pxd;
218 	int unit = minor(dev);
219 
220 	LIST_FOREACH(pxd, &pppx_devs, pxd_entry) {
221 		if (pxd->pxd_unit == unit)
222 			return (pxd);
223 	}
224 
225 	return (NULL);
226 }
227 
228 struct pppx_dev *
229 pppx_dev2pxd(dev_t dev)
230 {
231 	struct pppx_dev *pxd;
232 
233 	pxd = pppx_dev_lookup(dev);
234 
235 	return (pxd);
236 }
237 
238 void
239 pppxattach(int n)
240 {
241 	pool_init(&pppx_if_pl, sizeof(struct pppx_if), 0, IPL_NONE,
242 	    PR_WAITOK, "pppxif", NULL);
243 	pipex_init();
244 }
245 
246 int
247 pppxopen(dev_t dev, int flags, int mode, struct proc *p)
248 {
249 	struct pppx_dev *pxd;
250 
251 	pxd = malloc(sizeof(*pxd), M_DEVBUF, M_WAITOK | M_ZERO);
252 	if (pppx_dev_lookup(dev) != NULL) {
253 		free(pxd, M_DEVBUF, sizeof(*pxd));
254 		return (EBUSY);
255 	}
256 
257 	pxd->pxd_unit = minor(dev);
258 	mtx_init(&pxd->pxd_rsel_mtx, IPL_NET);
259 	mtx_init(&pxd->pxd_wsel_mtx, IPL_NET);
260 	LIST_INIT(&pxd->pxd_pxis);
261 
262 	mq_init(&pxd->pxd_svcq, 128, IPL_NET);
263 	LIST_INSERT_HEAD(&pppx_devs, pxd, pxd_entry);
264 
265 	return 0;
266 }
267 
268 int
269 pppxread(dev_t dev, struct uio *uio, int ioflag)
270 {
271 	struct pppx_dev *pxd = pppx_dev2pxd(dev);
272 	struct mbuf *m, *m0;
273 	int error = 0;
274 	size_t len;
275 
276 	if (!pxd)
277 		return (ENXIO);
278 
279 	while ((m0 = mq_dequeue(&pxd->pxd_svcq)) == NULL) {
280 		if (ISSET(ioflag, IO_NDELAY))
281 			return (EWOULDBLOCK);
282 
283 		NET_LOCK();
284 		pxd->pxd_waiting = 1;
285 		error = rwsleep_nsec(pxd, &netlock,
286 		    (PZERO + 1)|PCATCH, "pppxread", INFSLP);
287 		NET_UNLOCK();
288 		if (error != 0) {
289 			return (error);
290 		}
291 	}
292 
293 	while (m0 != NULL && uio->uio_resid > 0 && error == 0) {
294 		len = ulmin(uio->uio_resid, m0->m_len);
295 		if (len != 0)
296 			error = uiomove(mtod(m0, caddr_t), len, uio);
297 		m = m_free(m0);
298 		m0 = m;
299 	}
300 
301 	m_freem(m0);
302 
303 	return (error);
304 }
305 
306 int
307 pppxwrite(dev_t dev, struct uio *uio, int ioflag)
308 {
309 	struct pppx_dev *pxd = pppx_dev2pxd(dev);
310 	struct pppx_hdr *th;
311 	struct pppx_if	*pxi;
312 	uint32_t proto;
313 	struct mbuf *top, **mp, *m;
314 	int tlen;
315 	int error = 0;
316 	size_t mlen;
317 
318 	if (uio->uio_resid < sizeof(*th) + sizeof(uint32_t) ||
319 	    uio->uio_resid > MCLBYTES)
320 		return (EMSGSIZE);
321 
322 	tlen = uio->uio_resid;
323 
324 	MGETHDR(m, M_DONTWAIT, MT_DATA);
325 	if (m == NULL)
326 		return (ENOBUFS);
327 	mlen = MHLEN;
328 	if (uio->uio_resid > MHLEN) {
329 		MCLGET(m, M_DONTWAIT);
330 		if (!(m->m_flags & M_EXT)) {
331 			m_free(m);
332 			return (ENOBUFS);
333 		}
334 		mlen = MCLBYTES;
335 	}
336 
337 	top = NULL;
338 	mp = &top;
339 
340 	while (error == 0 && uio->uio_resid > 0) {
341 		m->m_len = ulmin(mlen, uio->uio_resid);
342 		error = uiomove(mtod (m, caddr_t), m->m_len, uio);
343 		*mp = m;
344 		mp = &m->m_next;
345 		if (error == 0 && uio->uio_resid > 0) {
346 			MGET(m, M_DONTWAIT, MT_DATA);
347 			if (m == NULL) {
348 				error = ENOBUFS;
349 				break;
350 			}
351 			mlen = MLEN;
352 			if (uio->uio_resid >= MINCLSIZE) {
353 				MCLGET(m, M_DONTWAIT);
354 				if (!(m->m_flags & M_EXT)) {
355 					error = ENOBUFS;
356 					m_free(m);
357 					break;
358 				}
359 				mlen = MCLBYTES;
360 			}
361 		}
362 	}
363 
364 	if (error) {
365 		m_freem(top);
366 		return (error);
367 	}
368 
369 	top->m_pkthdr.len = tlen;
370 
371 	/* Find the interface */
372 	th = mtod(top, struct pppx_hdr *);
373 	m_adj(top, sizeof(struct pppx_hdr));
374 	pxi = pppx_if_find(pxd, th->pppx_id, th->pppx_proto);
375 	if (pxi == NULL) {
376 		m_freem(top);
377 		return (EINVAL);
378 	}
379 	top->m_pkthdr.ph_ifidx = pxi->pxi_if.if_index;
380 
381 #if NBPFILTER > 0
382 	if (pxi->pxi_if.if_bpf)
383 		bpf_mtap(pxi->pxi_if.if_bpf, top, BPF_DIRECTION_IN);
384 #endif
385 	/* strip the tunnel header */
386 	proto = ntohl(*(uint32_t *)(th + 1));
387 	m_adj(top, sizeof(uint32_t));
388 
389 	NET_LOCK();
390 
391 	switch (proto) {
392 	case AF_INET:
393 		ipv4_input(&pxi->pxi_if, top);
394 		break;
395 #ifdef INET6
396 	case AF_INET6:
397 		ipv6_input(&pxi->pxi_if, top);
398 		break;
399 #endif
400 	default:
401 		m_freem(top);
402 		error = EAFNOSUPPORT;
403 		break;
404 	}
405 
406 	NET_UNLOCK();
407 
408 	return (error);
409 }
410 
411 int
412 pppxioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
413 {
414 	struct pppx_dev *pxd = pppx_dev2pxd(dev);
415 	int error = 0;
416 
417 	NET_LOCK();
418 	switch (cmd) {
419 	case PIPEXASESSION:
420 		error = pppx_add_session(pxd,
421 		    (struct pipex_session_req *)addr);
422 		break;
423 
424 	case PIPEXDSESSION:
425 		error = pppx_del_session(pxd,
426 		    (struct pipex_session_close_req *)addr);
427 		break;
428 
429 	case PIPEXSIFDESCR:
430 		error = pppx_set_session_descr(pxd,
431 		    (struct pipex_session_descr_req *)addr);
432 		break;
433 
434 	case FIONBIO:
435 		break;
436 	case FIONREAD:
437 		*(int *)addr = mq_hdatalen(&pxd->pxd_svcq);
438 		break;
439 
440 	default:
441 		error = pipex_ioctl(pxd, cmd, addr);
442 		break;
443 	}
444 	NET_UNLOCK();
445 
446 	return (error);
447 }
448 
449 int
450 pppxpoll(dev_t dev, int events, struct proc *p)
451 {
452 	struct pppx_dev *pxd = pppx_dev2pxd(dev);
453 	int revents = 0;
454 
455 	if (events & (POLLIN | POLLRDNORM)) {
456 		if (!mq_empty(&pxd->pxd_svcq))
457 			revents |= events & (POLLIN | POLLRDNORM);
458 	}
459 	if (events & (POLLOUT | POLLWRNORM))
460 		revents |= events & (POLLOUT | POLLWRNORM);
461 
462 	if (revents == 0) {
463 		if (events & (POLLIN | POLLRDNORM))
464 			selrecord(p, &pxd->pxd_rsel);
465 	}
466 
467 	return (revents);
468 }
469 
470 int
471 pppxkqfilter(dev_t dev, struct knote *kn)
472 {
473 	struct pppx_dev *pxd = pppx_dev2pxd(dev);
474 	struct mutex *mtx;
475 	struct klist *klist;
476 
477 	switch (kn->kn_filter) {
478 	case EVFILT_READ:
479 		mtx = &pxd->pxd_rsel_mtx;
480 		klist = &pxd->pxd_rsel.si_note;
481 		kn->kn_fop = &pppx_rd_filtops;
482 		break;
483 	case EVFILT_WRITE:
484 		mtx = &pxd->pxd_wsel_mtx;
485 		klist = &pxd->pxd_wsel.si_note;
486 		kn->kn_fop = &pppx_wr_filtops;
487 		break;
488 	default:
489 		return (EINVAL);
490 	}
491 
492 	kn->kn_hook = (caddr_t)pxd;
493 
494 	mtx_enter(mtx);
495 	klist_insert(klist, kn);
496 	mtx_leave(mtx);
497 
498 	return (0);
499 }
500 
501 void
502 filt_pppx_rdetach(struct knote *kn)
503 {
504 	struct pppx_dev *pxd = (struct pppx_dev *)kn->kn_hook;
505 	struct klist *klist = &pxd->pxd_rsel.si_note;
506 
507 	mtx_enter(&pxd->pxd_rsel_mtx);
508 	klist_remove(klist, kn);
509 	mtx_leave(&pxd->pxd_rsel_mtx);
510 }
511 
512 int
513 filt_pppx_read(struct knote *kn, long hint)
514 {
515 	struct pppx_dev *pxd = (struct pppx_dev *)kn->kn_hook;
516 
517 	kn->kn_data = mq_hdatalen(&pxd->pxd_svcq);
518 
519 	return (kn->kn_data > 0);
520 }
521 
522 void
523 filt_pppx_wdetach(struct knote *kn)
524 {
525 	struct pppx_dev *pxd = (struct pppx_dev *)kn->kn_hook;
526 	struct klist *klist = &pxd->pxd_wsel.si_note;
527 
528 	mtx_enter(&pxd->pxd_wsel_mtx);
529 	klist_remove(klist, kn);
530 	mtx_leave(&pxd->pxd_wsel_mtx);
531 }
532 
533 int
534 filt_pppx_write(struct knote *kn, long hint)
535 {
536 	/* We're always ready to accept a write. */
537 	return (1);
538 }
539 
540 int
541 pppxclose(dev_t dev, int flags, int mode, struct proc *p)
542 {
543 	struct pppx_dev *pxd;
544 	struct pppx_if	*pxi;
545 
546 	pxd = pppx_dev_lookup(dev);
547 
548 	/* XXX */
549 	NET_LOCK();
550 	while ((pxi = LIST_FIRST(&pxd->pxd_pxis)))
551 		pppx_if_destroy(pxd, pxi);
552 	NET_UNLOCK();
553 
554 	LIST_REMOVE(pxd, pxd_entry);
555 
556 	mq_purge(&pxd->pxd_svcq);
557 
558 	free(pxd, M_DEVBUF, sizeof(*pxd));
559 
560 	return (0);
561 }
562 
563 int
564 pppx_if_next_unit(void)
565 {
566 	struct pppx_if *pxi;
567 	int unit = 0;
568 
569 	/* this is safe without splnet since we're not modifying it */
570 	do {
571 		int found = 0;
572 		RBT_FOREACH(pxi, pppx_ifs, &pppx_ifs) {
573 			if (pxi->pxi_unit == unit) {
574 				found = 1;
575 				break;
576 			}
577 		}
578 
579 		if (found == 0)
580 			break;
581 		unit++;
582 	} while (unit > 0);
583 
584 	return (unit);
585 }
586 
587 struct pppx_if *
588 pppx_if_find(struct pppx_dev *pxd, int session_id, int protocol)
589 {
590 	struct pppx_if_key key;
591 	struct pppx_if *pxi;
592 
593 	memset(&key, 0, sizeof(key));
594 	key.pxik_session_id = session_id;
595 	key.pxik_protocol = protocol;
596 
597 	pxi = RBT_FIND(pppx_ifs, &pppx_ifs, (struct pppx_if *)&key);
598 	if (pxi && pxi->pxi_ready == 0)
599 		pxi = NULL;
600 
601 	return pxi;
602 }
603 
604 int
605 pppx_add_session(struct pppx_dev *pxd, struct pipex_session_req *req)
606 {
607 	struct pppx_if *pxi;
608 	struct pipex_session *session;
609 	struct ifnet *ifp;
610 	int unit, error = 0;
611 	struct in_ifaddr *ia;
612 	struct sockaddr_in ifaddr;
613 
614 	/*
615 	 * XXX: As long as `session' is allocated as part of a `pxi'
616 	 *	it isn't possible to free it separately.  So disallow
617 	 *	the timeout feature until this is fixed.
618 	 */
619 	if (req->pr_timeout_sec != 0)
620 		return (EINVAL);
621 
622 	error = pipex_init_session(&session, req);
623 	if (error)
624 		return (error);
625 
626 	pxi = pool_get(&pppx_if_pl, PR_WAITOK | PR_ZERO);
627 	ifp = &pxi->pxi_if;
628 
629 	pxi->pxi_session = session;
630 
631 	/* try to set the interface up */
632 	unit = pppx_if_next_unit();
633 	if (unit < 0) {
634 		error = ENOMEM;
635 		goto out;
636 	}
637 
638 	pxi->pxi_unit = unit;
639 	pxi->pxi_key.pxik_session_id = req->pr_session_id;
640 	pxi->pxi_key.pxik_protocol = req->pr_protocol;
641 	pxi->pxi_dev = pxd;
642 
643 	if (RBT_INSERT(pppx_ifs, &pppx_ifs, pxi) != NULL) {
644 		error = EADDRINUSE;
645 		goto out;
646 	}
647 	LIST_INSERT_HEAD(&pxd->pxd_pxis, pxi, pxi_list);
648 
649 	snprintf(ifp->if_xname, sizeof(ifp->if_xname), "%s%d", "pppx", unit);
650 	ifp->if_mtu = req->pr_peer_mru;	/* XXX */
651 	ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST | IFF_UP;
652 	ifp->if_xflags = IFXF_CLONED | IFXF_MPSAFE;
653 	ifp->if_qstart = pppx_if_qstart;
654 	ifp->if_output = pppx_if_output;
655 	ifp->if_ioctl = pppx_if_ioctl;
656 	ifp->if_rtrequest = p2p_rtrequest;
657 	ifp->if_type = IFT_PPP;
658 	ifp->if_softc = pxi;
659 	/* ifp->if_rdomain = req->pr_rdomain; */
660 	if_counters_alloc(ifp);
661 	/* XXXSMP: be sure pppx_if_qstart() called with NET_LOCK held */
662 	ifq_set_maxlen(&ifp->if_snd, 1);
663 
664 	/* XXXSMP breaks atomicity */
665 	NET_UNLOCK();
666 	if_attach(ifp);
667 	NET_LOCK();
668 
669 	if_addgroup(ifp, "pppx");
670 	if_alloc_sadl(ifp);
671 
672 #if NBPFILTER > 0
673 	bpfattach(&ifp->if_bpf, ifp, DLT_LOOP, sizeof(u_int32_t));
674 #endif
675 
676 	/* XXX ipv6 support?  how does the caller indicate it wants ipv6
677 	 * instead of ipv4?
678 	 */
679 	memset(&ifaddr, 0, sizeof(ifaddr));
680 	ifaddr.sin_family = AF_INET;
681 	ifaddr.sin_len = sizeof(ifaddr);
682 	ifaddr.sin_addr = req->pr_ip_srcaddr;
683 
684 	ia = malloc(sizeof (*ia), M_IFADDR, M_WAITOK | M_ZERO);
685 
686 	ia->ia_addr.sin_family = AF_INET;
687 	ia->ia_addr.sin_len = sizeof(struct sockaddr_in);
688 	ia->ia_addr.sin_addr = req->pr_ip_srcaddr;
689 
690 	ia->ia_dstaddr.sin_family = AF_INET;
691 	ia->ia_dstaddr.sin_len = sizeof(struct sockaddr_in);
692 	ia->ia_dstaddr.sin_addr = req->pr_ip_address;
693 
694 	ia->ia_sockmask.sin_family = AF_INET;
695 	ia->ia_sockmask.sin_len = sizeof(struct sockaddr_in);
696 	ia->ia_sockmask.sin_addr = req->pr_ip_netmask;
697 
698 	ia->ia_ifa.ifa_addr = sintosa(&ia->ia_addr);
699 	ia->ia_ifa.ifa_dstaddr = sintosa(&ia->ia_dstaddr);
700 	ia->ia_ifa.ifa_netmask = sintosa(&ia->ia_sockmask);
701 	ia->ia_ifa.ifa_ifp = ifp;
702 
703 	ia->ia_netmask = ia->ia_sockmask.sin_addr.s_addr;
704 
705 	error = in_ifinit(ifp, ia, &ifaddr, 1);
706 	if (error) {
707 		printf("pppx: unable to set addresses for %s, error=%d\n",
708 		    ifp->if_xname, error);
709 	} else {
710 		if_addrhooks_run(ifp);
711 	}
712 
713 	error = pipex_link_session(session, ifp, pxd);
714 	if (error)
715 		goto detach;
716 
717 	SET(ifp->if_flags, IFF_RUNNING);
718 	pxi->pxi_ready = 1;
719 
720 	return (error);
721 
722 detach:
723 	/* XXXSMP breaks atomicity */
724 	NET_UNLOCK();
725 	if_detach(ifp);
726 	NET_LOCK();
727 
728 	if (RBT_REMOVE(pppx_ifs, &pppx_ifs, pxi) == NULL)
729 		panic("%s: inconsistent RB tree", __func__);
730 	LIST_REMOVE(pxi, pxi_list);
731 out:
732 	pool_put(&pppx_if_pl, pxi);
733 	pipex_rele_session(session);
734 
735 	return (error);
736 }
737 
738 int
739 pppx_del_session(struct pppx_dev *pxd, struct pipex_session_close_req *req)
740 {
741 	struct pppx_if *pxi;
742 
743 	pxi = pppx_if_find(pxd, req->pcr_session_id, req->pcr_protocol);
744 	if (pxi == NULL)
745 		return (EINVAL);
746 
747 	req->pcr_stat = pxi->pxi_session->stat;
748 
749 	pppx_if_destroy(pxd, pxi);
750 	return (0);
751 }
752 
753 int
754 pppx_set_session_descr(struct pppx_dev *pxd,
755     struct pipex_session_descr_req *req)
756 {
757 	struct pppx_if *pxi;
758 
759 	pxi = pppx_if_find(pxd, req->pdr_session_id, req->pdr_protocol);
760 	if (pxi == NULL)
761 		return (EINVAL);
762 
763 	(void)memset(pxi->pxi_if.if_description, 0, IFDESCRSIZE);
764 	strlcpy(pxi->pxi_if.if_description, req->pdr_descr, IFDESCRSIZE);
765 
766 	return (0);
767 }
768 
769 void
770 pppx_if_destroy(struct pppx_dev *pxd, struct pppx_if *pxi)
771 {
772 	struct ifnet *ifp;
773 	struct pipex_session *session;
774 
775 	NET_ASSERT_LOCKED();
776 	session = pxi->pxi_session;
777 	ifp = &pxi->pxi_if;
778 	pxi->pxi_ready = 0;
779 	CLR(ifp->if_flags, IFF_RUNNING);
780 
781 	pipex_unlink_session(session);
782 
783 	/* XXXSMP breaks atomicity */
784 	NET_UNLOCK();
785 	if_detach(ifp);
786 	NET_LOCK();
787 
788 	pipex_rele_session(session);
789 	if (RBT_REMOVE(pppx_ifs, &pppx_ifs, pxi) == NULL)
790 		panic("%s: inconsistent RB tree", __func__);
791 	LIST_REMOVE(pxi, pxi_list);
792 
793 	pool_put(&pppx_if_pl, pxi);
794 }
795 
796 void
797 pppx_if_qstart(struct ifqueue *ifq)
798 {
799 	struct ifnet *ifp = ifq->ifq_if;
800 	struct pppx_if *pxi = (struct pppx_if *)ifp->if_softc;
801 	struct mbuf *m;
802 	int proto;
803 
804 	NET_ASSERT_LOCKED();
805 	while ((m = ifq_dequeue(ifq)) != NULL) {
806 		proto = *mtod(m, int *);
807 		m_adj(m, sizeof(proto));
808 
809 		pipex_ppp_output(m, pxi->pxi_session, proto);
810 	}
811 }
812 
813 int
814 pppx_if_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst,
815     struct rtentry *rt)
816 {
817 	struct pppx_if *pxi = (struct pppx_if *)ifp->if_softc;
818 	struct pppx_hdr *th;
819 	int error = 0;
820 	int proto;
821 
822 	NET_ASSERT_LOCKED();
823 
824 	if (!ISSET(ifp->if_flags, IFF_RUNNING)) {
825 		m_freem(m);
826 		error = ENETDOWN;
827 		goto out;
828 	}
829 
830 #if NBPFILTER > 0
831 	if (ifp->if_bpf)
832 		bpf_mtap_af(ifp->if_bpf, dst->sa_family, m, BPF_DIRECTION_OUT);
833 #endif
834 	if (pipex_enable) {
835 		switch (dst->sa_family) {
836 #ifdef INET6
837 		case AF_INET6:
838 			proto = PPP_IPV6;
839 			break;
840 #endif
841 		case AF_INET:
842 			proto = PPP_IP;
843 			break;
844 		default:
845 			m_freem(m);
846 			error = EPFNOSUPPORT;
847 			goto out;
848 		}
849 	} else
850 		proto = htonl(dst->sa_family);
851 
852 	M_PREPEND(m, sizeof(int), M_DONTWAIT);
853 	if (m == NULL) {
854 		error = ENOBUFS;
855 		goto out;
856 	}
857 	*mtod(m, int *) = proto;
858 
859 	if (pipex_enable)
860 		error = if_enqueue(ifp, m);
861 	else {
862 		M_PREPEND(m, sizeof(struct pppx_hdr), M_DONTWAIT);
863 		if (m == NULL) {
864 			error = ENOBUFS;
865 			goto out;
866 		}
867 		th = mtod(m, struct pppx_hdr *);
868 		th->pppx_proto = 0;	/* not used */
869 		th->pppx_id = pxi->pxi_session->ppp_id;
870 		error = mq_enqueue(&pxi->pxi_dev->pxd_svcq, m);
871 		if (error == 0) {
872 			if (pxi->pxi_dev->pxd_waiting) {
873 				wakeup((caddr_t)pxi->pxi_dev);
874 				pxi->pxi_dev->pxd_waiting = 0;
875 			}
876 			selwakeup(&pxi->pxi_dev->pxd_rsel);
877 		}
878 	}
879 
880 out:
881 	if (error)
882 		counters_inc(ifp->if_counters, ifc_oerrors);
883 	return (error);
884 }
885 
886 int
887 pppx_if_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
888 {
889 	struct pppx_if *pxi = (struct pppx_if *)ifp->if_softc;
890 	struct ifreq *ifr = (struct ifreq *)addr;
891 	int error = 0;
892 
893 	switch (cmd) {
894 	case SIOCSIFADDR:
895 		break;
896 
897 	case SIOCSIFFLAGS:
898 		break;
899 
900 	case SIOCADDMULTI:
901 	case SIOCDELMULTI:
902 		break;
903 
904 	case SIOCSIFMTU:
905 		if (ifr->ifr_mtu < 512 ||
906 		    ifr->ifr_mtu > pxi->pxi_session->peer_mru)
907 			error = EINVAL;
908 		else
909 			ifp->if_mtu = ifr->ifr_mtu;
910 		break;
911 
912 	default:
913 		error = ENOTTY;
914 		break;
915 	}
916 
917 	return (error);
918 }
919 
920 RBT_GENERATE(pppx_ifs, pppx_if, pxi_entry, pppx_if_cmp);
921 
922 /*
923  * pppac(4) - PPP Access Concentrator interface
924  */
925 
926 #include <net/if_tun.h>
927 
928 /*
929  * Locks used to protect struct members and global data
930  *       I       immutable after creation
931  *       K       kernel lock
932  *       N       net lock
933  */
934 
935 struct pppac_softc {
936 	struct ifnet	sc_if;
937 	unsigned int	sc_dead;	/* [N] */
938 	dev_t		sc_dev;		/* [I] */
939 	LIST_ENTRY(pppac_softc)
940 			sc_entry;	/* [K] */
941 
942 	struct mutex	sc_rsel_mtx;
943 	struct selinfo	sc_rsel;
944 	struct mutex	sc_wsel_mtx;
945 	struct selinfo	sc_wsel;
946 
947 	struct pipex_session
948 			*sc_multicast_session;
949 
950 	struct mbuf_queue
951 			sc_mq;
952 };
953 
954 LIST_HEAD(pppac_list, pppac_softc);	/* [K] */
955 
956 static void	filt_pppac_rdetach(struct knote *);
957 static int	filt_pppac_read(struct knote *, long);
958 
959 static const struct filterops pppac_rd_filtops = {
960 	.f_flags	= FILTEROP_ISFD,
961 	.f_attach	= NULL,
962 	.f_detach	= filt_pppac_rdetach,
963 	.f_event	= filt_pppac_read
964 };
965 
966 static void	filt_pppac_wdetach(struct knote *);
967 static int	filt_pppac_write(struct knote *, long);
968 
969 static const struct filterops pppac_wr_filtops = {
970 	.f_flags	= FILTEROP_ISFD,
971 	.f_attach	= NULL,
972 	.f_detach	= filt_pppac_wdetach,
973 	.f_event	= filt_pppac_write
974 };
975 
976 static struct pppac_list pppac_devs = LIST_HEAD_INITIALIZER(pppac_devs);
977 
978 static int	pppac_ioctl(struct ifnet *, u_long, caddr_t);
979 
980 static int	pppac_add_session(struct pppac_softc *,
981 		    struct pipex_session_req *);
982 static int	pppac_del_session(struct pppac_softc *,
983 		    struct pipex_session_close_req *);
984 static int	pppac_output(struct ifnet *, struct mbuf *, struct sockaddr *,
985 		    struct rtentry *);
986 static void	pppac_qstart(struct ifqueue *);
987 
988 static inline struct pppac_softc *
989 pppac_lookup(dev_t dev)
990 {
991 	struct pppac_softc *sc;
992 
993 	LIST_FOREACH(sc, &pppac_devs, sc_entry) {
994 		if (sc->sc_dev == dev)
995 			return (sc);
996 	}
997 
998 	return (NULL);
999 }
1000 
1001 void
1002 pppacattach(int n)
1003 {
1004 	pipex_init(); /* to be sure, to be sure */
1005 }
1006 
1007 int
1008 pppacopen(dev_t dev, int flags, int mode, struct proc *p)
1009 {
1010 	struct pppac_softc *sc;
1011 	struct ifnet *ifp;
1012 	struct pipex_session *session;
1013 
1014 	sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
1015 	if (pppac_lookup(dev) != NULL) {
1016 		free(sc, M_DEVBUF, sizeof(*sc));
1017 		return (EBUSY);
1018 	}
1019 
1020 	/* virtual pipex_session entry for multicast */
1021 	session = pool_get(&pipex_session_pool, PR_WAITOK | PR_ZERO);
1022 	session->is_multicast = 1;
1023 	session->ownersc = sc;
1024 	sc->sc_multicast_session = session;
1025 
1026 	sc->sc_dev = dev;
1027 
1028 	mtx_init(&sc->sc_rsel_mtx, IPL_SOFTNET);
1029 	mtx_init(&sc->sc_wsel_mtx, IPL_SOFTNET);
1030 	mq_init(&sc->sc_mq, IFQ_MAXLEN, IPL_SOFTNET);
1031 
1032 	LIST_INSERT_HEAD(&pppac_devs, sc, sc_entry);
1033 
1034 	ifp = &sc->sc_if;
1035 	snprintf(ifp->if_xname, sizeof(ifp->if_xname), "pppac%u", minor(dev));
1036 
1037 	ifp->if_softc = sc;
1038 	ifp->if_type = IFT_L3IPVLAN;
1039 	ifp->if_hdrlen = sizeof(uint32_t); /* for BPF */;
1040 	ifp->if_mtu = MAXMCLBYTES - sizeof(uint32_t);
1041 	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST;
1042 	ifp->if_xflags = IFXF_CLONED | IFXF_MPSAFE;
1043 	ifp->if_rtrequest = p2p_rtrequest; /* XXX */
1044 	ifp->if_output = pppac_output;
1045 	ifp->if_qstart = pppac_qstart;
1046 	ifp->if_ioctl = pppac_ioctl;
1047 	/* XXXSMP: be sure pppac_qstart() called with NET_LOCK held */
1048 	ifq_set_maxlen(&ifp->if_snd, 1);
1049 
1050 	if_counters_alloc(ifp);
1051 	if_attach(ifp);
1052 	if_alloc_sadl(ifp);
1053 
1054 #if NBPFILTER > 0
1055 	bpfattach(&ifp->if_bpf, ifp, DLT_LOOP, sizeof(uint32_t));
1056 #endif
1057 
1058 	return (0);
1059 }
1060 
1061 int
1062 pppacread(dev_t dev, struct uio *uio, int ioflag)
1063 {
1064 	struct pppac_softc *sc = pppac_lookup(dev);
1065 	struct ifnet *ifp = &sc->sc_if;
1066 	struct mbuf *m0, *m;
1067 	int error = 0;
1068 	size_t len;
1069 
1070 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
1071 		return (EHOSTDOWN);
1072 
1073 	m0 = mq_dequeue(&sc->sc_mq);
1074 	if (m0 == NULL) {
1075 		if (ISSET(ioflag, IO_NDELAY))
1076 			return (EWOULDBLOCK);
1077 
1078 		do {
1079 			error = tsleep_nsec(sc, (PZERO + 1)|PCATCH,
1080 			    "pppacrd", INFSLP);
1081 			if (error != 0)
1082 				return (error);
1083 
1084 			m0 = mq_dequeue(&sc->sc_mq);
1085 		} while (m0 == NULL);
1086 	}
1087 
1088 	m = m0;
1089 	while (uio->uio_resid > 0) {
1090 		len = ulmin(uio->uio_resid, m->m_len);
1091 		if (len != 0) {
1092 			error = uiomove(mtod(m, caddr_t), len, uio);
1093 			if (error != 0)
1094 				break;
1095 		}
1096 
1097 		m = m->m_next;
1098 		if (m == NULL)
1099 			break;
1100 	}
1101 	m_freem(m0);
1102 
1103 	return (error);
1104 }
1105 
1106 int
1107 pppacwrite(dev_t dev, struct uio *uio, int ioflag)
1108 {
1109 	struct pppac_softc *sc = pppac_lookup(dev);
1110 	struct ifnet *ifp = &sc->sc_if;
1111 	uint32_t proto;
1112 	int error;
1113 	struct mbuf *m;
1114 
1115 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
1116 		return (EHOSTDOWN);
1117 
1118 	if (uio->uio_resid < ifp->if_hdrlen || uio->uio_resid > MAXMCLBYTES)
1119 		return (EMSGSIZE);
1120 
1121 	m = m_gethdr(M_DONTWAIT, MT_DATA);
1122 	if (m == NULL)
1123 		return (ENOMEM);
1124 
1125 	if (uio->uio_resid > MHLEN) {
1126 		m_clget(m, M_WAITOK, uio->uio_resid);
1127 		if (!ISSET(m->m_flags, M_EXT)) {
1128 			m_free(m);
1129 			return (ENOMEM);
1130 		}
1131 	}
1132 
1133 	m->m_pkthdr.len = m->m_len = uio->uio_resid;
1134 
1135 	error = uiomove(mtod(m, void *), m->m_len, uio);
1136 	if (error != 0) {
1137 		m_freem(m);
1138 		return (error);
1139 	}
1140 
1141 #if NBPFILTER > 0
1142 	if (ifp->if_bpf)
1143 		bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
1144 #endif
1145 
1146 	/* strip the tunnel header */
1147 	proto = ntohl(*mtod(m, uint32_t *));
1148 	m_adj(m, sizeof(uint32_t));
1149 
1150 	m->m_flags &= ~(M_MCAST|M_BCAST);
1151 	m->m_pkthdr.ph_ifidx = ifp->if_index;
1152 	m->m_pkthdr.ph_rtableid = ifp->if_rdomain;
1153 
1154 #if NPF > 0
1155 	pf_pkt_addr_changed(m);
1156 #endif
1157 
1158 	counters_pkt(ifp->if_counters,
1159 	    ifc_ipackets, ifc_ibytes, m->m_pkthdr.len);
1160 
1161 	NET_LOCK();
1162 
1163 	switch (proto) {
1164 	case AF_INET:
1165 		ipv4_input(ifp, m);
1166 		break;
1167 #ifdef INET6
1168 	case AF_INET6:
1169 		ipv6_input(ifp, m);
1170 		break;
1171 #endif
1172 	default:
1173 		m_freem(m);
1174 		error = EAFNOSUPPORT;
1175 		break;
1176 	}
1177 
1178 	NET_UNLOCK();
1179 
1180 	return (error);
1181 }
1182 
1183 int
1184 pppacioctl(dev_t dev, u_long cmd, caddr_t data, int flags, struct proc *p)
1185 {
1186 	struct pppac_softc *sc = pppac_lookup(dev);
1187 	int error = 0;
1188 
1189 	NET_LOCK();
1190 	switch (cmd) {
1191 	case TUNSIFMODE: /* make npppd happy */
1192 		break;
1193 
1194 	case FIONBIO:
1195 		break;
1196 	case FIONREAD:
1197 		*(int *)data = mq_hdatalen(&sc->sc_mq);
1198 		break;
1199 
1200 	case PIPEXASESSION:
1201 		error = pppac_add_session(sc, (struct pipex_session_req *)data);
1202 		break;
1203 	case PIPEXDSESSION:
1204 		error = pppac_del_session(sc,
1205 		    (struct pipex_session_close_req *)data);
1206 		break;
1207 	default:
1208 		error = pipex_ioctl(sc, cmd, data);
1209 		break;
1210 	}
1211 	NET_UNLOCK();
1212 
1213 	return (error);
1214 }
1215 
1216 int
1217 pppacpoll(dev_t dev, int events, struct proc *p)
1218 {
1219 	struct pppac_softc *sc = pppac_lookup(dev);
1220 	int revents = 0;
1221 
1222 	if (events & (POLLIN | POLLRDNORM)) {
1223 		if (!mq_empty(&sc->sc_mq))
1224 			revents |= events & (POLLIN | POLLRDNORM);
1225 	}
1226 	if (events & (POLLOUT | POLLWRNORM))
1227 		revents |= events & (POLLOUT | POLLWRNORM);
1228 
1229 	if (revents == 0) {
1230 		if (events & (POLLIN | POLLRDNORM))
1231 			selrecord(p, &sc->sc_rsel);
1232 	}
1233 
1234 	return (revents);
1235 }
1236 
1237 int
1238 pppackqfilter(dev_t dev, struct knote *kn)
1239 {
1240 	struct pppac_softc *sc = pppac_lookup(dev);
1241 	struct mutex *mtx;
1242 	struct klist *klist;
1243 
1244 	switch (kn->kn_filter) {
1245 	case EVFILT_READ:
1246 		mtx = &sc->sc_rsel_mtx;
1247 		klist = &sc->sc_rsel.si_note;
1248 		kn->kn_fop = &pppac_rd_filtops;
1249 		break;
1250 	case EVFILT_WRITE:
1251 		mtx = &sc->sc_wsel_mtx;
1252 		klist = &sc->sc_wsel.si_note;
1253 		kn->kn_fop = &pppac_wr_filtops;
1254 		break;
1255 	default:
1256 		return (EINVAL);
1257 	}
1258 
1259 	kn->kn_hook = sc;
1260 
1261 	mtx_enter(mtx);
1262 	klist_insert(klist, kn);
1263 	mtx_leave(mtx);
1264 
1265 	return (0);
1266 }
1267 
1268 static void
1269 filt_pppac_rdetach(struct knote *kn)
1270 {
1271 	struct pppac_softc *sc = kn->kn_hook;
1272 	struct klist *klist = &sc->sc_rsel.si_note;
1273 
1274 	mtx_enter(&sc->sc_rsel_mtx);
1275 	klist_remove(klist, kn);
1276 	mtx_leave(&sc->sc_rsel_mtx);
1277 }
1278 
1279 static int
1280 filt_pppac_read(struct knote *kn, long hint)
1281 {
1282 	struct pppac_softc *sc = kn->kn_hook;
1283 
1284 	kn->kn_data = mq_hdatalen(&sc->sc_mq);
1285 
1286 	return (kn->kn_data > 0);
1287 }
1288 
1289 static void
1290 filt_pppac_wdetach(struct knote *kn)
1291 {
1292 	struct pppac_softc *sc = kn->kn_hook;
1293 	struct klist *klist = &sc->sc_wsel.si_note;
1294 
1295 	mtx_enter(&sc->sc_wsel_mtx);
1296 	klist_remove(klist, kn);
1297 	mtx_leave(&sc->sc_wsel_mtx);
1298 }
1299 
1300 static int
1301 filt_pppac_write(struct knote *kn, long hint)
1302 {
1303 	/* We're always ready to accept a write. */
1304 	return (1);
1305 }
1306 
1307 int
1308 pppacclose(dev_t dev, int flags, int mode, struct proc *p)
1309 {
1310 	struct pppac_softc *sc = pppac_lookup(dev);
1311 	struct ifnet *ifp = &sc->sc_if;
1312 	int s;
1313 
1314 	NET_LOCK();
1315 	sc->sc_dead = 1;
1316 	CLR(ifp->if_flags, IFF_RUNNING);
1317 	NET_UNLOCK();
1318 
1319 	s = splhigh();
1320 	klist_invalidate(&sc->sc_rsel.si_note);
1321 	klist_invalidate(&sc->sc_wsel.si_note);
1322 	splx(s);
1323 
1324 	if_detach(ifp);
1325 
1326 	pool_put(&pipex_session_pool, sc->sc_multicast_session);
1327 	NET_LOCK();
1328 	pipex_destroy_all_sessions(sc);
1329 	NET_UNLOCK();
1330 
1331 	LIST_REMOVE(sc, sc_entry);
1332 	free(sc, M_DEVBUF, sizeof(*sc));
1333 
1334 	return (0);
1335 }
1336 
1337 static int
1338 pppac_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1339 {
1340 	struct pppac_softc *sc = ifp->if_softc;
1341 	/* struct ifreq *ifr = (struct ifreq *)data; */
1342 	int error = 0;
1343 
1344 	if (sc->sc_dead)
1345 		return (ENXIO);
1346 
1347 	switch (cmd) {
1348 	case SIOCSIFADDR:
1349 		SET(ifp->if_flags, IFF_UP); /* XXX cry cry */
1350 		/* FALLTHROUGH */
1351 	case SIOCSIFFLAGS:
1352 		if (ISSET(ifp->if_flags, IFF_UP))
1353 			SET(ifp->if_flags, IFF_RUNNING);
1354 		else
1355 			CLR(ifp->if_flags, IFF_RUNNING);
1356 		break;
1357 	case SIOCSIFMTU:
1358 		break;
1359 	case SIOCADDMULTI:
1360 	case SIOCDELMULTI:
1361 		/* XXX */
1362 		break;
1363 
1364 	default:
1365 		error = ENOTTY;
1366 		break;
1367 	}
1368 
1369 	return (error);
1370 }
1371 
1372 static int
1373 pppac_add_session(struct pppac_softc *sc, struct pipex_session_req *req)
1374 {
1375 	int error;
1376 	struct pipex_session *session;
1377 
1378 	error = pipex_init_session(&session, req);
1379 	if (error != 0)
1380 		return (error);
1381 	error = pipex_link_session(session, &sc->sc_if, sc);
1382 	if (error != 0)
1383 		pipex_rele_session(session);
1384 
1385 	return (error);
1386 }
1387 
1388 static int
1389 pppac_del_session(struct pppac_softc *sc, struct pipex_session_close_req *req)
1390 {
1391 	struct pipex_session *session;
1392 
1393 	session = pipex_lookup_by_session_id(req->pcr_protocol,
1394 	    req->pcr_session_id);
1395 	if (session == NULL || session->ownersc != sc)
1396 		return (EINVAL);
1397 	pipex_unlink_session(session);
1398 	pipex_rele_session(session);
1399 
1400 	return (0);
1401 }
1402 
1403 static int
1404 pppac_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst,
1405     struct rtentry *rt)
1406 {
1407 	int error;
1408 
1409 	if (!ISSET(ifp->if_flags, IFF_RUNNING)) {
1410 		error = EHOSTDOWN;
1411 		goto drop;
1412 	}
1413 
1414 	switch (dst->sa_family) {
1415 	case AF_INET:
1416 #ifdef INET6
1417 	case AF_INET6:
1418 #endif
1419 		break;
1420 	default:
1421 		error = EAFNOSUPPORT;
1422 		goto drop;
1423 	}
1424 
1425 	m->m_pkthdr.ph_family = dst->sa_family;
1426 
1427 	return (if_enqueue(ifp, m));
1428 
1429 drop:
1430 	m_freem(m);
1431 	return (error);
1432 }
1433 
1434 static void
1435 pppac_qstart(struct ifqueue *ifq)
1436 {
1437 	struct ifnet *ifp = ifq->ifq_if;
1438 	struct pppac_softc *sc = ifp->if_softc;
1439 	struct mbuf *m, *m0;
1440 	struct pipex_session *session;
1441 	struct ip ip;
1442 	int rv;
1443 
1444 	NET_ASSERT_LOCKED();
1445 	while ((m = ifq_dequeue(ifq)) != NULL) {
1446 #if NBPFILTER > 0
1447 		if (ifp->if_bpf) {
1448 			bpf_mtap_af(ifp->if_bpf, m->m_pkthdr.ph_family, m,
1449 			    BPF_DIRECTION_OUT);
1450 		}
1451 #endif
1452 
1453 		switch (m->m_pkthdr.ph_family) {
1454 		case AF_INET:
1455 			if (m->m_pkthdr.len < sizeof(struct ip))
1456 				goto bad;
1457 			m_copydata(m, 0, sizeof(struct ip), (caddr_t)&ip);
1458 			if (IN_MULTICAST(ip.ip_dst.s_addr)) {
1459 				/* pass a copy to pipex */
1460 				m0 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
1461 				if (m0 != NULL)
1462 					pipex_ip_output(m0,
1463 					    sc->sc_multicast_session);
1464 				else
1465 					goto bad;
1466 			} else {
1467 				session = pipex_lookup_by_ip_address(ip.ip_dst);
1468 				if (session != NULL) {
1469 					pipex_ip_output(m, session);
1470 					m = NULL;
1471 				}
1472 			}
1473 			break;
1474 		}
1475 		if (m == NULL)	/* handled by pipex */
1476 			continue;
1477 
1478 		m = m_prepend(m, sizeof(uint32_t), M_DONTWAIT);
1479 		if (m == NULL)
1480 			goto bad;
1481 		*mtod(m, uint32_t *) = htonl(m->m_pkthdr.ph_family);
1482 
1483 		rv = mq_enqueue(&sc->sc_mq, m);
1484 		if (rv == 1)
1485 			counters_inc(ifp->if_counters, ifc_collisions);
1486 		continue;
1487 bad:
1488 		counters_inc(ifp->if_counters, ifc_oerrors);
1489 		if (m != NULL)
1490 			m_freem(m);
1491 		continue;
1492 	}
1493 
1494 	if (!mq_empty(&sc->sc_mq)) {
1495 		wakeup(sc);
1496 		selwakeup(&sc->sc_rsel);
1497 	}
1498 }
1499