1 /* $OpenBSD: if_pppx.c,v 1.114 2022/02/22 01:15:02 guenther Exp $ */ 2 3 /* 4 * Copyright (c) 2010 Claudio Jeker <claudio@openbsd.org> 5 * Copyright (c) 2010 David Gwynne <dlg@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /*- 21 * Copyright (c) 2009 Internet Initiative Japan Inc. 22 * All rights reserved. 23 * 24 * Redistribution and use in source and binary forms, with or without 25 * modification, are permitted provided that the following conditions 26 * are met: 27 * 1. Redistributions of source code must retain the above copyright 28 * notice, this list of conditions and the following disclaimer. 29 * 2. Redistributions in binary form must reproduce the above copyright 30 * notice, this list of conditions and the following disclaimer in the 31 * documentation and/or other materials provided with the distribution. 32 * 33 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 34 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 36 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 37 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 38 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 39 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 41 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 42 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 43 * SUCH DAMAGE. 44 */ 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/buf.h> 48 #include <sys/kernel.h> 49 #include <sys/malloc.h> 50 #include <sys/device.h> 51 #include <sys/conf.h> 52 #include <sys/queue.h> 53 #include <sys/pool.h> 54 #include <sys/mbuf.h> 55 #include <sys/errno.h> 56 #include <sys/socket.h> 57 #include <sys/ioctl.h> 58 #include <sys/vnode.h> 59 #include <sys/poll.h> 60 #include <sys/selinfo.h> 61 62 #include <net/if.h> 63 #include <net/if_types.h> 64 #include <netinet/in.h> 65 #include <netinet/if_ether.h> 66 #include <net/if_dl.h> 67 68 #include <netinet/in_var.h> 69 #include <netinet/ip.h> 70 #include <netinet/ip_var.h> 71 72 #ifdef INET6 73 #include <netinet6/in6_var.h> 74 #include <netinet/ip6.h> 75 #include <netinet6/nd6.h> 76 #endif /* INET6 */ 77 78 #include "bpfilter.h" 79 #if NBPFILTER > 0 80 #include <net/bpf.h> 81 #endif 82 83 #include "pf.h" 84 #if NPF > 0 85 #include <net/pfvar.h> 86 #endif 87 88 #include <net/ppp_defs.h> 89 #include <net/ppp-comp.h> 90 #include <crypto/arc4.h> 91 92 #ifdef PIPEX 93 #include <net/radix.h> 94 #include <net/pipex.h> 95 #include <net/pipex_local.h> 96 #else 97 #error PIPEX option not enabled 98 #endif 99 100 #ifdef PPPX_DEBUG 101 #define PPPX_D_INIT (1<<0) 102 103 int pppxdebug = 0; 104 105 #define DPRINTF(_m, _p...) do { \ 106 if (ISSET(pppxdebug, (_m))) \ 107 printf(_p); \ 108 } while (0) 109 #else 110 #define DPRINTF(_m, _p...) /* _m, _p */ 111 #endif 112 113 114 struct pppx_if; 115 116 /* 117 * Locks used to protect struct members and global data 118 * I immutable after creation 119 * K kernel lock 120 * N net lock 121 */ 122 123 struct pppx_dev { 124 LIST_ENTRY(pppx_dev) pxd_entry; /* [K] */ 125 int pxd_unit; /* [I] */ 126 127 /* kq shizz */ 128 struct selinfo pxd_rsel; 129 struct mutex pxd_rsel_mtx; 130 struct selinfo pxd_wsel; 131 struct mutex pxd_wsel_mtx; 132 133 /* queue of packets for userland to service - protected by splnet */ 134 struct mbuf_queue pxd_svcq; 135 int pxd_waiting; /* [N] */ 136 LIST_HEAD(,pppx_if) pxd_pxis; /* [N] */ 137 }; 138 139 LIST_HEAD(, pppx_dev) pppx_devs = 140 LIST_HEAD_INITIALIZER(pppx_devs); /* [K] */ 141 struct pool pppx_if_pl; 142 143 struct pppx_dev *pppx_dev_lookup(dev_t); 144 struct pppx_dev *pppx_dev2pxd(dev_t); 145 146 struct pppx_if_key { 147 int pxik_session_id; /* [I] */ 148 int pxik_protocol; /* [I] */ 149 }; 150 151 struct pppx_if { 152 struct pppx_if_key pxi_key; /* [I] must be first 153 in the struct */ 154 155 RBT_ENTRY(pppx_if) pxi_entry; /* [N] */ 156 LIST_ENTRY(pppx_if) pxi_list; /* [N] */ 157 158 int pxi_ready; /* [N] */ 159 160 int pxi_unit; /* [I] */ 161 struct ifnet pxi_if; 162 struct pppx_dev *pxi_dev; /* [I] */ 163 struct pipex_session *pxi_session; /* [I] */ 164 }; 165 166 static inline int 167 pppx_if_cmp(const struct pppx_if *a, const struct pppx_if *b) 168 { 169 return memcmp(&a->pxi_key, &b->pxi_key, sizeof(a->pxi_key)); 170 } 171 172 RBT_HEAD(pppx_ifs, pppx_if) pppx_ifs = RBT_INITIALIZER(&pppx_ifs); /* [N] */ 173 RBT_PROTOTYPE(pppx_ifs, pppx_if, pxi_entry, pppx_if_cmp); 174 175 int pppx_if_next_unit(void); 176 struct pppx_if *pppx_if_find(struct pppx_dev *, int, int); 177 int pppx_add_session(struct pppx_dev *, 178 struct pipex_session_req *); 179 int pppx_del_session(struct pppx_dev *, 180 struct pipex_session_close_req *); 181 int pppx_set_session_descr(struct pppx_dev *, 182 struct pipex_session_descr_req *); 183 184 void pppx_if_destroy(struct pppx_dev *, struct pppx_if *); 185 void pppx_if_qstart(struct ifqueue *); 186 int pppx_if_output(struct ifnet *, struct mbuf *, 187 struct sockaddr *, struct rtentry *); 188 int pppx_if_ioctl(struct ifnet *, u_long, caddr_t); 189 190 191 void pppxattach(int); 192 193 void filt_pppx_rdetach(struct knote *); 194 int filt_pppx_read(struct knote *, long); 195 196 const struct filterops pppx_rd_filtops = { 197 .f_flags = FILTEROP_ISFD, 198 .f_attach = NULL, 199 .f_detach = filt_pppx_rdetach, 200 .f_event = filt_pppx_read, 201 }; 202 203 void filt_pppx_wdetach(struct knote *); 204 int filt_pppx_write(struct knote *, long); 205 206 const struct filterops pppx_wr_filtops = { 207 .f_flags = FILTEROP_ISFD, 208 .f_attach = NULL, 209 .f_detach = filt_pppx_wdetach, 210 .f_event = filt_pppx_write, 211 }; 212 213 struct pppx_dev * 214 pppx_dev_lookup(dev_t dev) 215 { 216 struct pppx_dev *pxd; 217 int unit = minor(dev); 218 219 LIST_FOREACH(pxd, &pppx_devs, pxd_entry) { 220 if (pxd->pxd_unit == unit) 221 return (pxd); 222 } 223 224 return (NULL); 225 } 226 227 struct pppx_dev * 228 pppx_dev2pxd(dev_t dev) 229 { 230 struct pppx_dev *pxd; 231 232 pxd = pppx_dev_lookup(dev); 233 234 return (pxd); 235 } 236 237 void 238 pppxattach(int n) 239 { 240 pool_init(&pppx_if_pl, sizeof(struct pppx_if), 0, IPL_NONE, 241 PR_WAITOK, "pppxif", NULL); 242 pipex_init(); 243 } 244 245 int 246 pppxopen(dev_t dev, int flags, int mode, struct proc *p) 247 { 248 struct pppx_dev *pxd; 249 250 pxd = malloc(sizeof(*pxd), M_DEVBUF, M_WAITOK | M_ZERO); 251 if (pppx_dev_lookup(dev) != NULL) { 252 free(pxd, M_DEVBUF, sizeof(*pxd)); 253 return (EBUSY); 254 } 255 256 pxd->pxd_unit = minor(dev); 257 mtx_init(&pxd->pxd_rsel_mtx, IPL_NET); 258 mtx_init(&pxd->pxd_wsel_mtx, IPL_NET); 259 LIST_INIT(&pxd->pxd_pxis); 260 261 mq_init(&pxd->pxd_svcq, 128, IPL_NET); 262 LIST_INSERT_HEAD(&pppx_devs, pxd, pxd_entry); 263 264 return 0; 265 } 266 267 int 268 pppxread(dev_t dev, struct uio *uio, int ioflag) 269 { 270 struct pppx_dev *pxd = pppx_dev2pxd(dev); 271 struct mbuf *m, *m0; 272 int error = 0; 273 size_t len; 274 275 if (!pxd) 276 return (ENXIO); 277 278 while ((m0 = mq_dequeue(&pxd->pxd_svcq)) == NULL) { 279 if (ISSET(ioflag, IO_NDELAY)) 280 return (EWOULDBLOCK); 281 282 NET_LOCK(); 283 pxd->pxd_waiting = 1; 284 error = rwsleep_nsec(pxd, &netlock, 285 (PZERO + 1)|PCATCH, "pppxread", INFSLP); 286 NET_UNLOCK(); 287 if (error != 0) { 288 return (error); 289 } 290 } 291 292 while (m0 != NULL && uio->uio_resid > 0 && error == 0) { 293 len = ulmin(uio->uio_resid, m0->m_len); 294 if (len != 0) 295 error = uiomove(mtod(m0, caddr_t), len, uio); 296 m = m_free(m0); 297 m0 = m; 298 } 299 300 m_freem(m0); 301 302 return (error); 303 } 304 305 int 306 pppxwrite(dev_t dev, struct uio *uio, int ioflag) 307 { 308 struct pppx_dev *pxd = pppx_dev2pxd(dev); 309 struct pppx_hdr *th; 310 struct pppx_if *pxi; 311 uint32_t proto; 312 struct mbuf *top, **mp, *m; 313 int tlen; 314 int error = 0; 315 size_t mlen; 316 317 if (uio->uio_resid < sizeof(*th) + sizeof(uint32_t) || 318 uio->uio_resid > MCLBYTES) 319 return (EMSGSIZE); 320 321 tlen = uio->uio_resid; 322 323 MGETHDR(m, M_DONTWAIT, MT_DATA); 324 if (m == NULL) 325 return (ENOBUFS); 326 mlen = MHLEN; 327 if (uio->uio_resid > MHLEN) { 328 MCLGET(m, M_DONTWAIT); 329 if (!(m->m_flags & M_EXT)) { 330 m_free(m); 331 return (ENOBUFS); 332 } 333 mlen = MCLBYTES; 334 } 335 336 top = NULL; 337 mp = ⊤ 338 339 while (error == 0 && uio->uio_resid > 0) { 340 m->m_len = ulmin(mlen, uio->uio_resid); 341 error = uiomove(mtod (m, caddr_t), m->m_len, uio); 342 *mp = m; 343 mp = &m->m_next; 344 if (error == 0 && uio->uio_resid > 0) { 345 MGET(m, M_DONTWAIT, MT_DATA); 346 if (m == NULL) { 347 error = ENOBUFS; 348 break; 349 } 350 mlen = MLEN; 351 if (uio->uio_resid >= MINCLSIZE) { 352 MCLGET(m, M_DONTWAIT); 353 if (!(m->m_flags & M_EXT)) { 354 error = ENOBUFS; 355 m_free(m); 356 break; 357 } 358 mlen = MCLBYTES; 359 } 360 } 361 } 362 363 if (error) { 364 m_freem(top); 365 return (error); 366 } 367 368 top->m_pkthdr.len = tlen; 369 370 /* Find the interface */ 371 th = mtod(top, struct pppx_hdr *); 372 m_adj(top, sizeof(struct pppx_hdr)); 373 374 NET_LOCK(); 375 376 pxi = pppx_if_find(pxd, th->pppx_id, th->pppx_proto); 377 if (pxi == NULL) { 378 NET_UNLOCK(); 379 m_freem(top); 380 return (EINVAL); 381 } 382 top->m_pkthdr.ph_ifidx = pxi->pxi_if.if_index; 383 384 #if NBPFILTER > 0 385 if (pxi->pxi_if.if_bpf) 386 bpf_mtap(pxi->pxi_if.if_bpf, top, BPF_DIRECTION_IN); 387 #endif 388 /* strip the tunnel header */ 389 proto = ntohl(*(uint32_t *)(th + 1)); 390 m_adj(top, sizeof(uint32_t)); 391 392 switch (proto) { 393 case AF_INET: 394 ipv4_input(&pxi->pxi_if, top); 395 break; 396 #ifdef INET6 397 case AF_INET6: 398 ipv6_input(&pxi->pxi_if, top); 399 break; 400 #endif 401 default: 402 m_freem(top); 403 error = EAFNOSUPPORT; 404 break; 405 } 406 407 NET_UNLOCK(); 408 409 return (error); 410 } 411 412 int 413 pppxioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) 414 { 415 struct pppx_dev *pxd = pppx_dev2pxd(dev); 416 int error = 0; 417 418 NET_LOCK(); 419 switch (cmd) { 420 case PIPEXASESSION: 421 error = pppx_add_session(pxd, 422 (struct pipex_session_req *)addr); 423 break; 424 425 case PIPEXDSESSION: 426 error = pppx_del_session(pxd, 427 (struct pipex_session_close_req *)addr); 428 break; 429 430 case PIPEXSIFDESCR: 431 error = pppx_set_session_descr(pxd, 432 (struct pipex_session_descr_req *)addr); 433 break; 434 435 case FIONBIO: 436 break; 437 case FIONREAD: 438 *(int *)addr = mq_hdatalen(&pxd->pxd_svcq); 439 break; 440 441 default: 442 error = pipex_ioctl(pxd, cmd, addr); 443 break; 444 } 445 NET_UNLOCK(); 446 447 return (error); 448 } 449 450 int 451 pppxpoll(dev_t dev, int events, struct proc *p) 452 { 453 struct pppx_dev *pxd = pppx_dev2pxd(dev); 454 int revents = 0; 455 456 if (events & (POLLIN | POLLRDNORM)) { 457 if (!mq_empty(&pxd->pxd_svcq)) 458 revents |= events & (POLLIN | POLLRDNORM); 459 } 460 if (events & (POLLOUT | POLLWRNORM)) 461 revents |= events & (POLLOUT | POLLWRNORM); 462 463 if (revents == 0) { 464 if (events & (POLLIN | POLLRDNORM)) 465 selrecord(p, &pxd->pxd_rsel); 466 } 467 468 return (revents); 469 } 470 471 int 472 pppxkqfilter(dev_t dev, struct knote *kn) 473 { 474 struct pppx_dev *pxd = pppx_dev2pxd(dev); 475 struct mutex *mtx; 476 struct klist *klist; 477 478 switch (kn->kn_filter) { 479 case EVFILT_READ: 480 mtx = &pxd->pxd_rsel_mtx; 481 klist = &pxd->pxd_rsel.si_note; 482 kn->kn_fop = &pppx_rd_filtops; 483 break; 484 case EVFILT_WRITE: 485 mtx = &pxd->pxd_wsel_mtx; 486 klist = &pxd->pxd_wsel.si_note; 487 kn->kn_fop = &pppx_wr_filtops; 488 break; 489 default: 490 return (EINVAL); 491 } 492 493 kn->kn_hook = (caddr_t)pxd; 494 495 mtx_enter(mtx); 496 klist_insert_locked(klist, kn); 497 mtx_leave(mtx); 498 499 return (0); 500 } 501 502 void 503 filt_pppx_rdetach(struct knote *kn) 504 { 505 struct pppx_dev *pxd = (struct pppx_dev *)kn->kn_hook; 506 struct klist *klist = &pxd->pxd_rsel.si_note; 507 508 mtx_enter(&pxd->pxd_rsel_mtx); 509 klist_remove_locked(klist, kn); 510 mtx_leave(&pxd->pxd_rsel_mtx); 511 } 512 513 int 514 filt_pppx_read(struct knote *kn, long hint) 515 { 516 struct pppx_dev *pxd = (struct pppx_dev *)kn->kn_hook; 517 518 kn->kn_data = mq_hdatalen(&pxd->pxd_svcq); 519 520 return (kn->kn_data > 0); 521 } 522 523 void 524 filt_pppx_wdetach(struct knote *kn) 525 { 526 struct pppx_dev *pxd = (struct pppx_dev *)kn->kn_hook; 527 struct klist *klist = &pxd->pxd_wsel.si_note; 528 529 mtx_enter(&pxd->pxd_wsel_mtx); 530 klist_remove_locked(klist, kn); 531 mtx_leave(&pxd->pxd_wsel_mtx); 532 } 533 534 int 535 filt_pppx_write(struct knote *kn, long hint) 536 { 537 /* We're always ready to accept a write. */ 538 return (1); 539 } 540 541 int 542 pppxclose(dev_t dev, int flags, int mode, struct proc *p) 543 { 544 struct pppx_dev *pxd; 545 struct pppx_if *pxi; 546 547 pxd = pppx_dev_lookup(dev); 548 549 /* XXX */ 550 NET_LOCK(); 551 while ((pxi = LIST_FIRST(&pxd->pxd_pxis))) 552 pppx_if_destroy(pxd, pxi); 553 NET_UNLOCK(); 554 555 LIST_REMOVE(pxd, pxd_entry); 556 557 mq_purge(&pxd->pxd_svcq); 558 559 free(pxd, M_DEVBUF, sizeof(*pxd)); 560 561 return (0); 562 } 563 564 int 565 pppx_if_next_unit(void) 566 { 567 struct pppx_if *pxi; 568 int unit = 0; 569 570 /* this is safe without splnet since we're not modifying it */ 571 do { 572 int found = 0; 573 RBT_FOREACH(pxi, pppx_ifs, &pppx_ifs) { 574 if (pxi->pxi_unit == unit) { 575 found = 1; 576 break; 577 } 578 } 579 580 if (found == 0) 581 break; 582 unit++; 583 } while (unit > 0); 584 585 return (unit); 586 } 587 588 struct pppx_if * 589 pppx_if_find(struct pppx_dev *pxd, int session_id, int protocol) 590 { 591 struct pppx_if_key key; 592 struct pppx_if *pxi; 593 594 memset(&key, 0, sizeof(key)); 595 key.pxik_session_id = session_id; 596 key.pxik_protocol = protocol; 597 598 pxi = RBT_FIND(pppx_ifs, &pppx_ifs, (struct pppx_if *)&key); 599 if (pxi && pxi->pxi_ready == 0) 600 pxi = NULL; 601 602 return pxi; 603 } 604 605 int 606 pppx_add_session(struct pppx_dev *pxd, struct pipex_session_req *req) 607 { 608 struct pppx_if *pxi; 609 struct pipex_session *session; 610 struct ifnet *ifp; 611 int unit, error = 0; 612 struct in_ifaddr *ia; 613 struct sockaddr_in ifaddr; 614 615 /* 616 * XXX: As long as `session' is allocated as part of a `pxi' 617 * it isn't possible to free it separately. So disallow 618 * the timeout feature until this is fixed. 619 */ 620 if (req->pr_timeout_sec != 0) 621 return (EINVAL); 622 623 error = pipex_init_session(&session, req); 624 if (error) 625 return (error); 626 627 pxi = pool_get(&pppx_if_pl, PR_WAITOK | PR_ZERO); 628 ifp = &pxi->pxi_if; 629 630 pxi->pxi_session = session; 631 632 /* try to set the interface up */ 633 unit = pppx_if_next_unit(); 634 if (unit < 0) { 635 error = ENOMEM; 636 goto out; 637 } 638 639 pxi->pxi_unit = unit; 640 pxi->pxi_key.pxik_session_id = req->pr_session_id; 641 pxi->pxi_key.pxik_protocol = req->pr_protocol; 642 pxi->pxi_dev = pxd; 643 644 if (RBT_INSERT(pppx_ifs, &pppx_ifs, pxi) != NULL) { 645 error = EADDRINUSE; 646 goto out; 647 } 648 LIST_INSERT_HEAD(&pxd->pxd_pxis, pxi, pxi_list); 649 650 snprintf(ifp->if_xname, sizeof(ifp->if_xname), "%s%d", "pppx", unit); 651 ifp->if_mtu = req->pr_peer_mru; /* XXX */ 652 ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST | IFF_UP; 653 ifp->if_xflags = IFXF_CLONED | IFXF_MPSAFE; 654 ifp->if_qstart = pppx_if_qstart; 655 ifp->if_output = pppx_if_output; 656 ifp->if_ioctl = pppx_if_ioctl; 657 ifp->if_rtrequest = p2p_rtrequest; 658 ifp->if_type = IFT_PPP; 659 ifp->if_softc = pxi; 660 /* ifp->if_rdomain = req->pr_rdomain; */ 661 if_counters_alloc(ifp); 662 /* XXXSMP: be sure pppx_if_qstart() called with NET_LOCK held */ 663 ifq_set_maxlen(&ifp->if_snd, 1); 664 665 /* XXXSMP breaks atomicity */ 666 NET_UNLOCK(); 667 if_attach(ifp); 668 NET_LOCK(); 669 670 if_addgroup(ifp, "pppx"); 671 if_alloc_sadl(ifp); 672 673 #if NBPFILTER > 0 674 bpfattach(&ifp->if_bpf, ifp, DLT_LOOP, sizeof(u_int32_t)); 675 #endif 676 677 /* XXX ipv6 support? how does the caller indicate it wants ipv6 678 * instead of ipv4? 679 */ 680 memset(&ifaddr, 0, sizeof(ifaddr)); 681 ifaddr.sin_family = AF_INET; 682 ifaddr.sin_len = sizeof(ifaddr); 683 ifaddr.sin_addr = req->pr_ip_srcaddr; 684 685 ia = malloc(sizeof (*ia), M_IFADDR, M_WAITOK | M_ZERO); 686 687 ia->ia_addr.sin_family = AF_INET; 688 ia->ia_addr.sin_len = sizeof(struct sockaddr_in); 689 ia->ia_addr.sin_addr = req->pr_ip_srcaddr; 690 691 ia->ia_dstaddr.sin_family = AF_INET; 692 ia->ia_dstaddr.sin_len = sizeof(struct sockaddr_in); 693 ia->ia_dstaddr.sin_addr = req->pr_ip_address; 694 695 ia->ia_sockmask.sin_family = AF_INET; 696 ia->ia_sockmask.sin_len = sizeof(struct sockaddr_in); 697 ia->ia_sockmask.sin_addr = req->pr_ip_netmask; 698 699 ia->ia_ifa.ifa_addr = sintosa(&ia->ia_addr); 700 ia->ia_ifa.ifa_dstaddr = sintosa(&ia->ia_dstaddr); 701 ia->ia_ifa.ifa_netmask = sintosa(&ia->ia_sockmask); 702 ia->ia_ifa.ifa_ifp = ifp; 703 704 ia->ia_netmask = ia->ia_sockmask.sin_addr.s_addr; 705 706 error = in_ifinit(ifp, ia, &ifaddr, 1); 707 if (error) { 708 printf("pppx: unable to set addresses for %s, error=%d\n", 709 ifp->if_xname, error); 710 } else { 711 if_addrhooks_run(ifp); 712 } 713 714 error = pipex_link_session(session, ifp, pxd); 715 if (error) 716 goto detach; 717 718 SET(ifp->if_flags, IFF_RUNNING); 719 pxi->pxi_ready = 1; 720 721 return (error); 722 723 detach: 724 /* XXXSMP breaks atomicity */ 725 NET_UNLOCK(); 726 if_detach(ifp); 727 NET_LOCK(); 728 729 if (RBT_REMOVE(pppx_ifs, &pppx_ifs, pxi) == NULL) 730 panic("%s: inconsistent RB tree", __func__); 731 LIST_REMOVE(pxi, pxi_list); 732 out: 733 pool_put(&pppx_if_pl, pxi); 734 pipex_rele_session(session); 735 736 return (error); 737 } 738 739 int 740 pppx_del_session(struct pppx_dev *pxd, struct pipex_session_close_req *req) 741 { 742 struct pppx_if *pxi; 743 744 pxi = pppx_if_find(pxd, req->pcr_session_id, req->pcr_protocol); 745 if (pxi == NULL) 746 return (EINVAL); 747 748 pipex_export_session_stats(pxi->pxi_session, &req->pcr_stat); 749 pppx_if_destroy(pxd, pxi); 750 return (0); 751 } 752 753 int 754 pppx_set_session_descr(struct pppx_dev *pxd, 755 struct pipex_session_descr_req *req) 756 { 757 struct pppx_if *pxi; 758 759 pxi = pppx_if_find(pxd, req->pdr_session_id, req->pdr_protocol); 760 if (pxi == NULL) 761 return (EINVAL); 762 763 (void)memset(pxi->pxi_if.if_description, 0, IFDESCRSIZE); 764 strlcpy(pxi->pxi_if.if_description, req->pdr_descr, IFDESCRSIZE); 765 766 return (0); 767 } 768 769 void 770 pppx_if_destroy(struct pppx_dev *pxd, struct pppx_if *pxi) 771 { 772 struct ifnet *ifp; 773 struct pipex_session *session; 774 775 NET_ASSERT_LOCKED(); 776 session = pxi->pxi_session; 777 ifp = &pxi->pxi_if; 778 pxi->pxi_ready = 0; 779 CLR(ifp->if_flags, IFF_RUNNING); 780 781 pipex_unlink_session(session); 782 783 /* XXXSMP breaks atomicity */ 784 NET_UNLOCK(); 785 if_detach(ifp); 786 NET_LOCK(); 787 788 pipex_rele_session(session); 789 if (RBT_REMOVE(pppx_ifs, &pppx_ifs, pxi) == NULL) 790 panic("%s: inconsistent RB tree", __func__); 791 LIST_REMOVE(pxi, pxi_list); 792 793 pool_put(&pppx_if_pl, pxi); 794 } 795 796 void 797 pppx_if_qstart(struct ifqueue *ifq) 798 { 799 struct ifnet *ifp = ifq->ifq_if; 800 struct pppx_if *pxi = (struct pppx_if *)ifp->if_softc; 801 struct mbuf *m; 802 int proto; 803 804 NET_ASSERT_LOCKED(); 805 while ((m = ifq_dequeue(ifq)) != NULL) { 806 proto = *mtod(m, int *); 807 m_adj(m, sizeof(proto)); 808 809 pipex_ppp_output(m, pxi->pxi_session, proto); 810 } 811 } 812 813 int 814 pppx_if_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, 815 struct rtentry *rt) 816 { 817 struct pppx_if *pxi = (struct pppx_if *)ifp->if_softc; 818 struct pppx_hdr *th; 819 int error = 0; 820 int proto; 821 822 NET_ASSERT_LOCKED(); 823 824 if (!ISSET(ifp->if_flags, IFF_RUNNING)) { 825 m_freem(m); 826 error = ENETDOWN; 827 goto out; 828 } 829 830 #if NBPFILTER > 0 831 if (ifp->if_bpf) 832 bpf_mtap_af(ifp->if_bpf, dst->sa_family, m, BPF_DIRECTION_OUT); 833 #endif 834 if (pipex_enable) { 835 switch (dst->sa_family) { 836 #ifdef INET6 837 case AF_INET6: 838 proto = PPP_IPV6; 839 break; 840 #endif 841 case AF_INET: 842 proto = PPP_IP; 843 break; 844 default: 845 m_freem(m); 846 error = EPFNOSUPPORT; 847 goto out; 848 } 849 } else 850 proto = htonl(dst->sa_family); 851 852 M_PREPEND(m, sizeof(int), M_DONTWAIT); 853 if (m == NULL) { 854 error = ENOBUFS; 855 goto out; 856 } 857 *mtod(m, int *) = proto; 858 859 if (pipex_enable) 860 error = if_enqueue(ifp, m); 861 else { 862 M_PREPEND(m, sizeof(struct pppx_hdr), M_DONTWAIT); 863 if (m == NULL) { 864 error = ENOBUFS; 865 goto out; 866 } 867 th = mtod(m, struct pppx_hdr *); 868 th->pppx_proto = 0; /* not used */ 869 th->pppx_id = pxi->pxi_session->ppp_id; 870 error = mq_enqueue(&pxi->pxi_dev->pxd_svcq, m); 871 if (error == 0) { 872 if (pxi->pxi_dev->pxd_waiting) { 873 wakeup((caddr_t)pxi->pxi_dev); 874 pxi->pxi_dev->pxd_waiting = 0; 875 } 876 selwakeup(&pxi->pxi_dev->pxd_rsel); 877 } 878 } 879 880 out: 881 if (error) 882 counters_inc(ifp->if_counters, ifc_oerrors); 883 return (error); 884 } 885 886 int 887 pppx_if_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr) 888 { 889 struct pppx_if *pxi = (struct pppx_if *)ifp->if_softc; 890 struct ifreq *ifr = (struct ifreq *)addr; 891 int error = 0; 892 893 switch (cmd) { 894 case SIOCSIFADDR: 895 break; 896 897 case SIOCSIFFLAGS: 898 break; 899 900 case SIOCADDMULTI: 901 case SIOCDELMULTI: 902 break; 903 904 case SIOCSIFMTU: 905 if (ifr->ifr_mtu < 512 || 906 ifr->ifr_mtu > pxi->pxi_session->peer_mru) 907 error = EINVAL; 908 else 909 ifp->if_mtu = ifr->ifr_mtu; 910 break; 911 912 default: 913 error = ENOTTY; 914 break; 915 } 916 917 return (error); 918 } 919 920 RBT_GENERATE(pppx_ifs, pppx_if, pxi_entry, pppx_if_cmp); 921 922 /* 923 * Locks used to protect struct members and global data 924 * I immutable after creation 925 * K kernel lock 926 * N net lock 927 */ 928 929 struct pppac_softc { 930 struct ifnet sc_if; 931 dev_t sc_dev; /* [I] */ 932 int sc_ready; /* [K] */ 933 LIST_ENTRY(pppac_softc) 934 sc_entry; /* [K] */ 935 936 struct mutex sc_rsel_mtx; 937 struct selinfo sc_rsel; 938 struct mutex sc_wsel_mtx; 939 struct selinfo sc_wsel; 940 941 struct pipex_session 942 *sc_multicast_session; 943 944 struct mbuf_queue 945 sc_mq; 946 }; 947 948 LIST_HEAD(pppac_list, pppac_softc); /* [K] */ 949 950 static void filt_pppac_rdetach(struct knote *); 951 static int filt_pppac_read(struct knote *, long); 952 953 static const struct filterops pppac_rd_filtops = { 954 .f_flags = FILTEROP_ISFD, 955 .f_attach = NULL, 956 .f_detach = filt_pppac_rdetach, 957 .f_event = filt_pppac_read 958 }; 959 960 static void filt_pppac_wdetach(struct knote *); 961 static int filt_pppac_write(struct knote *, long); 962 963 static const struct filterops pppac_wr_filtops = { 964 .f_flags = FILTEROP_ISFD, 965 .f_attach = NULL, 966 .f_detach = filt_pppac_wdetach, 967 .f_event = filt_pppac_write 968 }; 969 970 static struct pppac_list pppac_devs = LIST_HEAD_INITIALIZER(pppac_devs); 971 972 static int pppac_ioctl(struct ifnet *, u_long, caddr_t); 973 974 static int pppac_add_session(struct pppac_softc *, 975 struct pipex_session_req *); 976 static int pppac_del_session(struct pppac_softc *, 977 struct pipex_session_close_req *); 978 static int pppac_output(struct ifnet *, struct mbuf *, struct sockaddr *, 979 struct rtentry *); 980 static void pppac_qstart(struct ifqueue *); 981 982 static inline struct pppac_softc * 983 pppac_lookup(dev_t dev) 984 { 985 struct pppac_softc *sc; 986 987 LIST_FOREACH(sc, &pppac_devs, sc_entry) { 988 if (sc->sc_dev == dev) { 989 if (sc->sc_ready == 0) 990 break; 991 992 return (sc); 993 } 994 } 995 996 return (NULL); 997 } 998 999 void 1000 pppacattach(int n) 1001 { 1002 pipex_init(); /* to be sure, to be sure */ 1003 } 1004 1005 int 1006 pppacopen(dev_t dev, int flags, int mode, struct proc *p) 1007 { 1008 struct pppac_softc *sc, *tmp; 1009 struct ifnet *ifp; 1010 struct pipex_session *session; 1011 1012 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO); 1013 sc->sc_dev = dev; 1014 LIST_FOREACH(tmp, &pppac_devs, sc_entry) { 1015 if (tmp->sc_dev == dev) { 1016 free(sc, M_DEVBUF, sizeof(*sc)); 1017 return (EBUSY); 1018 } 1019 } 1020 LIST_INSERT_HEAD(&pppac_devs, sc, sc_entry); 1021 1022 /* virtual pipex_session entry for multicast */ 1023 session = pool_get(&pipex_session_pool, PR_WAITOK | PR_ZERO); 1024 session->is_multicast = 1; 1025 session->ownersc = sc; 1026 sc->sc_multicast_session = session; 1027 1028 mtx_init(&sc->sc_rsel_mtx, IPL_SOFTNET); 1029 mtx_init(&sc->sc_wsel_mtx, IPL_SOFTNET); 1030 mq_init(&sc->sc_mq, IFQ_MAXLEN, IPL_SOFTNET); 1031 1032 ifp = &sc->sc_if; 1033 snprintf(ifp->if_xname, sizeof(ifp->if_xname), "pppac%u", minor(dev)); 1034 1035 ifp->if_softc = sc; 1036 ifp->if_type = IFT_L3IPVLAN; 1037 ifp->if_hdrlen = sizeof(uint32_t); /* for BPF */; 1038 ifp->if_mtu = MAXMCLBYTES - sizeof(uint32_t); 1039 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST; 1040 ifp->if_xflags = IFXF_CLONED | IFXF_MPSAFE; 1041 ifp->if_rtrequest = p2p_rtrequest; /* XXX */ 1042 ifp->if_output = pppac_output; 1043 ifp->if_qstart = pppac_qstart; 1044 ifp->if_ioctl = pppac_ioctl; 1045 /* XXXSMP: be sure pppac_qstart() called with NET_LOCK held */ 1046 ifq_set_maxlen(&ifp->if_snd, 1); 1047 1048 if_counters_alloc(ifp); 1049 if_attach(ifp); 1050 if_alloc_sadl(ifp); 1051 1052 #if NBPFILTER > 0 1053 bpfattach(&ifp->if_bpf, ifp, DLT_LOOP, sizeof(uint32_t)); 1054 #endif 1055 1056 sc->sc_ready = 1; 1057 1058 return (0); 1059 } 1060 1061 int 1062 pppacread(dev_t dev, struct uio *uio, int ioflag) 1063 { 1064 struct pppac_softc *sc = pppac_lookup(dev); 1065 struct ifnet *ifp = &sc->sc_if; 1066 struct mbuf *m0, *m; 1067 int error = 0; 1068 size_t len; 1069 1070 if (!ISSET(ifp->if_flags, IFF_RUNNING)) 1071 return (EHOSTDOWN); 1072 1073 m0 = mq_dequeue(&sc->sc_mq); 1074 if (m0 == NULL) { 1075 if (ISSET(ioflag, IO_NDELAY)) 1076 return (EWOULDBLOCK); 1077 1078 do { 1079 error = tsleep_nsec(sc, (PZERO + 1)|PCATCH, 1080 "pppacrd", INFSLP); 1081 if (error != 0) 1082 return (error); 1083 1084 m0 = mq_dequeue(&sc->sc_mq); 1085 } while (m0 == NULL); 1086 } 1087 1088 m = m0; 1089 while (uio->uio_resid > 0) { 1090 len = ulmin(uio->uio_resid, m->m_len); 1091 if (len != 0) { 1092 error = uiomove(mtod(m, caddr_t), len, uio); 1093 if (error != 0) 1094 break; 1095 } 1096 1097 m = m->m_next; 1098 if (m == NULL) 1099 break; 1100 } 1101 m_freem(m0); 1102 1103 return (error); 1104 } 1105 1106 int 1107 pppacwrite(dev_t dev, struct uio *uio, int ioflag) 1108 { 1109 struct pppac_softc *sc = pppac_lookup(dev); 1110 struct ifnet *ifp = &sc->sc_if; 1111 uint32_t proto; 1112 int error; 1113 struct mbuf *m; 1114 1115 if (!ISSET(ifp->if_flags, IFF_RUNNING)) 1116 return (EHOSTDOWN); 1117 1118 if (uio->uio_resid < ifp->if_hdrlen || uio->uio_resid > MAXMCLBYTES) 1119 return (EMSGSIZE); 1120 1121 m = m_gethdr(M_DONTWAIT, MT_DATA); 1122 if (m == NULL) 1123 return (ENOMEM); 1124 1125 if (uio->uio_resid > MHLEN) { 1126 m_clget(m, M_WAITOK, uio->uio_resid); 1127 if (!ISSET(m->m_flags, M_EXT)) { 1128 m_free(m); 1129 return (ENOMEM); 1130 } 1131 } 1132 1133 m->m_pkthdr.len = m->m_len = uio->uio_resid; 1134 1135 error = uiomove(mtod(m, void *), m->m_len, uio); 1136 if (error != 0) { 1137 m_freem(m); 1138 return (error); 1139 } 1140 1141 #if NBPFILTER > 0 1142 if (ifp->if_bpf) 1143 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 1144 #endif 1145 1146 /* strip the tunnel header */ 1147 proto = ntohl(*mtod(m, uint32_t *)); 1148 m_adj(m, sizeof(uint32_t)); 1149 1150 m->m_flags &= ~(M_MCAST|M_BCAST); 1151 m->m_pkthdr.ph_ifidx = ifp->if_index; 1152 m->m_pkthdr.ph_rtableid = ifp->if_rdomain; 1153 1154 #if NPF > 0 1155 pf_pkt_addr_changed(m); 1156 #endif 1157 1158 counters_pkt(ifp->if_counters, 1159 ifc_ipackets, ifc_ibytes, m->m_pkthdr.len); 1160 1161 NET_LOCK(); 1162 1163 switch (proto) { 1164 case AF_INET: 1165 ipv4_input(ifp, m); 1166 break; 1167 #ifdef INET6 1168 case AF_INET6: 1169 ipv6_input(ifp, m); 1170 break; 1171 #endif 1172 default: 1173 m_freem(m); 1174 error = EAFNOSUPPORT; 1175 break; 1176 } 1177 1178 NET_UNLOCK(); 1179 1180 return (error); 1181 } 1182 1183 int 1184 pppacioctl(dev_t dev, u_long cmd, caddr_t data, int flags, struct proc *p) 1185 { 1186 struct pppac_softc *sc = pppac_lookup(dev); 1187 int error = 0; 1188 1189 NET_LOCK(); 1190 switch (cmd) { 1191 case FIONBIO: 1192 break; 1193 case FIONREAD: 1194 *(int *)data = mq_hdatalen(&sc->sc_mq); 1195 break; 1196 1197 case PIPEXASESSION: 1198 error = pppac_add_session(sc, (struct pipex_session_req *)data); 1199 break; 1200 case PIPEXDSESSION: 1201 error = pppac_del_session(sc, 1202 (struct pipex_session_close_req *)data); 1203 break; 1204 default: 1205 error = pipex_ioctl(sc, cmd, data); 1206 break; 1207 } 1208 NET_UNLOCK(); 1209 1210 return (error); 1211 } 1212 1213 int 1214 pppacpoll(dev_t dev, int events, struct proc *p) 1215 { 1216 struct pppac_softc *sc = pppac_lookup(dev); 1217 int revents = 0; 1218 1219 if (events & (POLLIN | POLLRDNORM)) { 1220 if (!mq_empty(&sc->sc_mq)) 1221 revents |= events & (POLLIN | POLLRDNORM); 1222 } 1223 if (events & (POLLOUT | POLLWRNORM)) 1224 revents |= events & (POLLOUT | POLLWRNORM); 1225 1226 if (revents == 0) { 1227 if (events & (POLLIN | POLLRDNORM)) 1228 selrecord(p, &sc->sc_rsel); 1229 } 1230 1231 return (revents); 1232 } 1233 1234 int 1235 pppackqfilter(dev_t dev, struct knote *kn) 1236 { 1237 struct pppac_softc *sc = pppac_lookup(dev); 1238 struct mutex *mtx; 1239 struct klist *klist; 1240 1241 switch (kn->kn_filter) { 1242 case EVFILT_READ: 1243 mtx = &sc->sc_rsel_mtx; 1244 klist = &sc->sc_rsel.si_note; 1245 kn->kn_fop = &pppac_rd_filtops; 1246 break; 1247 case EVFILT_WRITE: 1248 mtx = &sc->sc_wsel_mtx; 1249 klist = &sc->sc_wsel.si_note; 1250 kn->kn_fop = &pppac_wr_filtops; 1251 break; 1252 default: 1253 return (EINVAL); 1254 } 1255 1256 kn->kn_hook = sc; 1257 1258 mtx_enter(mtx); 1259 klist_insert_locked(klist, kn); 1260 mtx_leave(mtx); 1261 1262 return (0); 1263 } 1264 1265 static void 1266 filt_pppac_rdetach(struct knote *kn) 1267 { 1268 struct pppac_softc *sc = kn->kn_hook; 1269 struct klist *klist = &sc->sc_rsel.si_note; 1270 1271 mtx_enter(&sc->sc_rsel_mtx); 1272 klist_remove_locked(klist, kn); 1273 mtx_leave(&sc->sc_rsel_mtx); 1274 } 1275 1276 static int 1277 filt_pppac_read(struct knote *kn, long hint) 1278 { 1279 struct pppac_softc *sc = kn->kn_hook; 1280 1281 kn->kn_data = mq_hdatalen(&sc->sc_mq); 1282 1283 return (kn->kn_data > 0); 1284 } 1285 1286 static void 1287 filt_pppac_wdetach(struct knote *kn) 1288 { 1289 struct pppac_softc *sc = kn->kn_hook; 1290 struct klist *klist = &sc->sc_wsel.si_note; 1291 1292 mtx_enter(&sc->sc_wsel_mtx); 1293 klist_remove_locked(klist, kn); 1294 mtx_leave(&sc->sc_wsel_mtx); 1295 } 1296 1297 static int 1298 filt_pppac_write(struct knote *kn, long hint) 1299 { 1300 /* We're always ready to accept a write. */ 1301 return (1); 1302 } 1303 1304 int 1305 pppacclose(dev_t dev, int flags, int mode, struct proc *p) 1306 { 1307 struct pppac_softc *sc = pppac_lookup(dev); 1308 struct ifnet *ifp = &sc->sc_if; 1309 int s; 1310 1311 sc->sc_ready = 0; 1312 1313 NET_LOCK(); 1314 CLR(ifp->if_flags, IFF_RUNNING); 1315 NET_UNLOCK(); 1316 1317 if_detach(ifp); 1318 1319 s = splhigh(); 1320 klist_invalidate(&sc->sc_rsel.si_note); 1321 klist_invalidate(&sc->sc_wsel.si_note); 1322 splx(s); 1323 1324 pool_put(&pipex_session_pool, sc->sc_multicast_session); 1325 NET_LOCK(); 1326 pipex_destroy_all_sessions(sc); 1327 NET_UNLOCK(); 1328 1329 LIST_REMOVE(sc, sc_entry); 1330 free(sc, M_DEVBUF, sizeof(*sc)); 1331 1332 return (0); 1333 } 1334 1335 static int 1336 pppac_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1337 { 1338 /* struct ifreq *ifr = (struct ifreq *)data; */ 1339 int error = 0; 1340 1341 switch (cmd) { 1342 case SIOCSIFADDR: 1343 SET(ifp->if_flags, IFF_UP); /* XXX cry cry */ 1344 /* FALLTHROUGH */ 1345 case SIOCSIFFLAGS: 1346 if (ISSET(ifp->if_flags, IFF_UP)) 1347 SET(ifp->if_flags, IFF_RUNNING); 1348 else 1349 CLR(ifp->if_flags, IFF_RUNNING); 1350 break; 1351 case SIOCSIFMTU: 1352 break; 1353 case SIOCADDMULTI: 1354 case SIOCDELMULTI: 1355 /* XXX */ 1356 break; 1357 1358 default: 1359 error = ENOTTY; 1360 break; 1361 } 1362 1363 return (error); 1364 } 1365 1366 static int 1367 pppac_add_session(struct pppac_softc *sc, struct pipex_session_req *req) 1368 { 1369 int error; 1370 struct pipex_session *session; 1371 1372 error = pipex_init_session(&session, req); 1373 if (error != 0) 1374 return (error); 1375 error = pipex_link_session(session, &sc->sc_if, sc); 1376 if (error != 0) 1377 pipex_rele_session(session); 1378 1379 return (error); 1380 } 1381 1382 static int 1383 pppac_del_session(struct pppac_softc *sc, struct pipex_session_close_req *req) 1384 { 1385 struct pipex_session *session; 1386 1387 session = pipex_lookup_by_session_id(req->pcr_protocol, 1388 req->pcr_session_id); 1389 if (session == NULL || session->ownersc != sc) 1390 return (EINVAL); 1391 pipex_unlink_session(session); 1392 pipex_rele_session(session); 1393 1394 return (0); 1395 } 1396 1397 static int 1398 pppac_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, 1399 struct rtentry *rt) 1400 { 1401 int error; 1402 1403 if (!ISSET(ifp->if_flags, IFF_RUNNING)) { 1404 error = EHOSTDOWN; 1405 goto drop; 1406 } 1407 1408 switch (dst->sa_family) { 1409 case AF_INET: 1410 #ifdef INET6 1411 case AF_INET6: 1412 #endif 1413 break; 1414 default: 1415 error = EAFNOSUPPORT; 1416 goto drop; 1417 } 1418 1419 m->m_pkthdr.ph_family = dst->sa_family; 1420 1421 return (if_enqueue(ifp, m)); 1422 1423 drop: 1424 m_freem(m); 1425 return (error); 1426 } 1427 1428 static void 1429 pppac_qstart(struct ifqueue *ifq) 1430 { 1431 struct ifnet *ifp = ifq->ifq_if; 1432 struct pppac_softc *sc = ifp->if_softc; 1433 struct mbuf *m, *m0; 1434 struct pipex_session *session; 1435 struct ip ip; 1436 int rv; 1437 1438 NET_ASSERT_LOCKED(); 1439 while ((m = ifq_dequeue(ifq)) != NULL) { 1440 #if NBPFILTER > 0 1441 if (ifp->if_bpf) { 1442 bpf_mtap_af(ifp->if_bpf, m->m_pkthdr.ph_family, m, 1443 BPF_DIRECTION_OUT); 1444 } 1445 #endif 1446 1447 switch (m->m_pkthdr.ph_family) { 1448 case AF_INET: 1449 if (m->m_pkthdr.len < sizeof(struct ip)) 1450 goto bad; 1451 m_copydata(m, 0, sizeof(struct ip), &ip); 1452 if (IN_MULTICAST(ip.ip_dst.s_addr)) { 1453 /* pass a copy to pipex */ 1454 m0 = m_copym(m, 0, M_COPYALL, M_NOWAIT); 1455 if (m0 != NULL) 1456 pipex_ip_output(m0, 1457 sc->sc_multicast_session); 1458 else 1459 goto bad; 1460 } else { 1461 session = pipex_lookup_by_ip_address(ip.ip_dst); 1462 if (session != NULL) { 1463 pipex_ip_output(m, session); 1464 m = NULL; 1465 } 1466 } 1467 break; 1468 } 1469 if (m == NULL) /* handled by pipex */ 1470 continue; 1471 1472 m = m_prepend(m, sizeof(uint32_t), M_DONTWAIT); 1473 if (m == NULL) 1474 goto bad; 1475 *mtod(m, uint32_t *) = htonl(m->m_pkthdr.ph_family); 1476 1477 rv = mq_enqueue(&sc->sc_mq, m); 1478 if (rv == 1) 1479 counters_inc(ifp->if_counters, ifc_collisions); 1480 continue; 1481 bad: 1482 counters_inc(ifp->if_counters, ifc_oerrors); 1483 if (m != NULL) 1484 m_freem(m); 1485 continue; 1486 } 1487 1488 if (!mq_empty(&sc->sc_mq)) { 1489 wakeup(sc); 1490 selwakeup(&sc->sc_rsel); 1491 } 1492 } 1493