1 /* $OpenBSD: if_pppx.c,v 1.109 2021/02/10 13:38:46 mvs Exp $ */ 2 3 /* 4 * Copyright (c) 2010 Claudio Jeker <claudio@openbsd.org> 5 * Copyright (c) 2010 David Gwynne <dlg@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /*- 21 * Copyright (c) 2009 Internet Initiative Japan Inc. 22 * All rights reserved. 23 * 24 * Redistribution and use in source and binary forms, with or without 25 * modification, are permitted provided that the following conditions 26 * are met: 27 * 1. Redistributions of source code must retain the above copyright 28 * notice, this list of conditions and the following disclaimer. 29 * 2. Redistributions in binary form must reproduce the above copyright 30 * notice, this list of conditions and the following disclaimer in the 31 * documentation and/or other materials provided with the distribution. 32 * 33 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 34 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 36 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 37 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 38 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 39 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 41 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 42 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 43 * SUCH DAMAGE. 44 */ 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/buf.h> 48 #include <sys/kernel.h> 49 #include <sys/malloc.h> 50 #include <sys/device.h> 51 #include <sys/conf.h> 52 #include <sys/queue.h> 53 #include <sys/pool.h> 54 #include <sys/mbuf.h> 55 #include <sys/errno.h> 56 #include <sys/protosw.h> 57 #include <sys/socket.h> 58 #include <sys/ioctl.h> 59 #include <sys/vnode.h> 60 #include <sys/poll.h> 61 #include <sys/selinfo.h> 62 63 #include <net/if.h> 64 #include <net/if_types.h> 65 #include <netinet/in.h> 66 #include <netinet/if_ether.h> 67 #include <net/if_dl.h> 68 69 #include <netinet/in_var.h> 70 #include <netinet/ip.h> 71 #include <netinet/ip_var.h> 72 73 #ifdef INET6 74 #include <netinet6/in6_var.h> 75 #include <netinet/ip6.h> 76 #include <netinet6/nd6.h> 77 #endif /* INET6 */ 78 79 #include "bpfilter.h" 80 #if NBPFILTER > 0 81 #include <net/bpf.h> 82 #endif 83 84 #include "pf.h" 85 #if NPF > 0 86 #include <net/pfvar.h> 87 #endif 88 89 #include <net/ppp_defs.h> 90 #include <net/ppp-comp.h> 91 #include <crypto/arc4.h> 92 93 #ifdef PIPEX 94 #include <net/radix.h> 95 #include <net/pipex.h> 96 #include <net/pipex_local.h> 97 #else 98 #error PIPEX option not enabled 99 #endif 100 101 #ifdef PPPX_DEBUG 102 #define PPPX_D_INIT (1<<0) 103 104 int pppxdebug = 0; 105 106 #define DPRINTF(_m, _p...) do { \ 107 if (ISSET(pppxdebug, (_m))) \ 108 printf(_p); \ 109 } while (0) 110 #else 111 #define DPRINTF(_m, _p...) /* _m, _p */ 112 #endif 113 114 115 struct pppx_if; 116 117 /* 118 * Locks used to protect struct members and global data 119 * I immutable after creation 120 * K kernel lock 121 * N net lock 122 */ 123 124 struct pppx_dev { 125 LIST_ENTRY(pppx_dev) pxd_entry; /* [K] */ 126 int pxd_unit; /* [I] */ 127 128 /* kq shizz */ 129 struct selinfo pxd_rsel; 130 struct mutex pxd_rsel_mtx; 131 struct selinfo pxd_wsel; 132 struct mutex pxd_wsel_mtx; 133 134 /* queue of packets for userland to service - protected by splnet */ 135 struct mbuf_queue pxd_svcq; 136 int pxd_waiting; /* [N] */ 137 LIST_HEAD(,pppx_if) pxd_pxis; /* [N] */ 138 }; 139 140 LIST_HEAD(, pppx_dev) pppx_devs = 141 LIST_HEAD_INITIALIZER(pppx_devs); /* [K] */ 142 struct pool pppx_if_pl; 143 144 struct pppx_dev *pppx_dev_lookup(dev_t); 145 struct pppx_dev *pppx_dev2pxd(dev_t); 146 147 struct pppx_if_key { 148 int pxik_session_id; /* [I] */ 149 int pxik_protocol; /* [I] */ 150 }; 151 152 struct pppx_if { 153 struct pppx_if_key pxi_key; /* [I] must be first 154 in the struct */ 155 156 RBT_ENTRY(pppx_if) pxi_entry; /* [N] */ 157 LIST_ENTRY(pppx_if) pxi_list; /* [N] */ 158 159 int pxi_ready; /* [N] */ 160 161 int pxi_unit; /* [I] */ 162 struct ifnet pxi_if; 163 struct pppx_dev *pxi_dev; /* [I] */ 164 struct pipex_session *pxi_session; /* [I] */ 165 }; 166 167 static inline int 168 pppx_if_cmp(const struct pppx_if *a, const struct pppx_if *b) 169 { 170 return memcmp(&a->pxi_key, &b->pxi_key, sizeof(a->pxi_key)); 171 } 172 173 RBT_HEAD(pppx_ifs, pppx_if) pppx_ifs = RBT_INITIALIZER(&pppx_ifs); /* [N] */ 174 RBT_PROTOTYPE(pppx_ifs, pppx_if, pxi_entry, pppx_if_cmp); 175 176 int pppx_if_next_unit(void); 177 struct pppx_if *pppx_if_find(struct pppx_dev *, int, int); 178 int pppx_add_session(struct pppx_dev *, 179 struct pipex_session_req *); 180 int pppx_del_session(struct pppx_dev *, 181 struct pipex_session_close_req *); 182 int pppx_set_session_descr(struct pppx_dev *, 183 struct pipex_session_descr_req *); 184 185 void pppx_if_destroy(struct pppx_dev *, struct pppx_if *); 186 void pppx_if_qstart(struct ifqueue *); 187 int pppx_if_output(struct ifnet *, struct mbuf *, 188 struct sockaddr *, struct rtentry *); 189 int pppx_if_ioctl(struct ifnet *, u_long, caddr_t); 190 191 192 void pppxattach(int); 193 194 void filt_pppx_rdetach(struct knote *); 195 int filt_pppx_read(struct knote *, long); 196 197 const struct filterops pppx_rd_filtops = { 198 .f_flags = FILTEROP_ISFD, 199 .f_attach = NULL, 200 .f_detach = filt_pppx_rdetach, 201 .f_event = filt_pppx_read, 202 }; 203 204 void filt_pppx_wdetach(struct knote *); 205 int filt_pppx_write(struct knote *, long); 206 207 const struct filterops pppx_wr_filtops = { 208 .f_flags = FILTEROP_ISFD, 209 .f_attach = NULL, 210 .f_detach = filt_pppx_wdetach, 211 .f_event = filt_pppx_write, 212 }; 213 214 struct pppx_dev * 215 pppx_dev_lookup(dev_t dev) 216 { 217 struct pppx_dev *pxd; 218 int unit = minor(dev); 219 220 LIST_FOREACH(pxd, &pppx_devs, pxd_entry) { 221 if (pxd->pxd_unit == unit) 222 return (pxd); 223 } 224 225 return (NULL); 226 } 227 228 struct pppx_dev * 229 pppx_dev2pxd(dev_t dev) 230 { 231 struct pppx_dev *pxd; 232 233 pxd = pppx_dev_lookup(dev); 234 235 return (pxd); 236 } 237 238 void 239 pppxattach(int n) 240 { 241 pool_init(&pppx_if_pl, sizeof(struct pppx_if), 0, IPL_NONE, 242 PR_WAITOK, "pppxif", NULL); 243 pipex_init(); 244 } 245 246 int 247 pppxopen(dev_t dev, int flags, int mode, struct proc *p) 248 { 249 struct pppx_dev *pxd; 250 251 pxd = malloc(sizeof(*pxd), M_DEVBUF, M_WAITOK | M_ZERO); 252 if (pppx_dev_lookup(dev) != NULL) { 253 free(pxd, M_DEVBUF, sizeof(*pxd)); 254 return (EBUSY); 255 } 256 257 pxd->pxd_unit = minor(dev); 258 mtx_init(&pxd->pxd_rsel_mtx, IPL_NET); 259 mtx_init(&pxd->pxd_wsel_mtx, IPL_NET); 260 LIST_INIT(&pxd->pxd_pxis); 261 262 mq_init(&pxd->pxd_svcq, 128, IPL_NET); 263 LIST_INSERT_HEAD(&pppx_devs, pxd, pxd_entry); 264 265 return 0; 266 } 267 268 int 269 pppxread(dev_t dev, struct uio *uio, int ioflag) 270 { 271 struct pppx_dev *pxd = pppx_dev2pxd(dev); 272 struct mbuf *m, *m0; 273 int error = 0; 274 size_t len; 275 276 if (!pxd) 277 return (ENXIO); 278 279 while ((m0 = mq_dequeue(&pxd->pxd_svcq)) == NULL) { 280 if (ISSET(ioflag, IO_NDELAY)) 281 return (EWOULDBLOCK); 282 283 NET_LOCK(); 284 pxd->pxd_waiting = 1; 285 error = rwsleep_nsec(pxd, &netlock, 286 (PZERO + 1)|PCATCH, "pppxread", INFSLP); 287 NET_UNLOCK(); 288 if (error != 0) { 289 return (error); 290 } 291 } 292 293 while (m0 != NULL && uio->uio_resid > 0 && error == 0) { 294 len = ulmin(uio->uio_resid, m0->m_len); 295 if (len != 0) 296 error = uiomove(mtod(m0, caddr_t), len, uio); 297 m = m_free(m0); 298 m0 = m; 299 } 300 301 m_freem(m0); 302 303 return (error); 304 } 305 306 int 307 pppxwrite(dev_t dev, struct uio *uio, int ioflag) 308 { 309 struct pppx_dev *pxd = pppx_dev2pxd(dev); 310 struct pppx_hdr *th; 311 struct pppx_if *pxi; 312 uint32_t proto; 313 struct mbuf *top, **mp, *m; 314 int tlen; 315 int error = 0; 316 size_t mlen; 317 318 if (uio->uio_resid < sizeof(*th) + sizeof(uint32_t) || 319 uio->uio_resid > MCLBYTES) 320 return (EMSGSIZE); 321 322 tlen = uio->uio_resid; 323 324 MGETHDR(m, M_DONTWAIT, MT_DATA); 325 if (m == NULL) 326 return (ENOBUFS); 327 mlen = MHLEN; 328 if (uio->uio_resid > MHLEN) { 329 MCLGET(m, M_DONTWAIT); 330 if (!(m->m_flags & M_EXT)) { 331 m_free(m); 332 return (ENOBUFS); 333 } 334 mlen = MCLBYTES; 335 } 336 337 top = NULL; 338 mp = ⊤ 339 340 while (error == 0 && uio->uio_resid > 0) { 341 m->m_len = ulmin(mlen, uio->uio_resid); 342 error = uiomove(mtod (m, caddr_t), m->m_len, uio); 343 *mp = m; 344 mp = &m->m_next; 345 if (error == 0 && uio->uio_resid > 0) { 346 MGET(m, M_DONTWAIT, MT_DATA); 347 if (m == NULL) { 348 error = ENOBUFS; 349 break; 350 } 351 mlen = MLEN; 352 if (uio->uio_resid >= MINCLSIZE) { 353 MCLGET(m, M_DONTWAIT); 354 if (!(m->m_flags & M_EXT)) { 355 error = ENOBUFS; 356 m_free(m); 357 break; 358 } 359 mlen = MCLBYTES; 360 } 361 } 362 } 363 364 if (error) { 365 m_freem(top); 366 return (error); 367 } 368 369 top->m_pkthdr.len = tlen; 370 371 /* Find the interface */ 372 th = mtod(top, struct pppx_hdr *); 373 m_adj(top, sizeof(struct pppx_hdr)); 374 375 NET_LOCK(); 376 377 pxi = pppx_if_find(pxd, th->pppx_id, th->pppx_proto); 378 if (pxi == NULL) { 379 NET_UNLOCK(); 380 m_freem(top); 381 return (EINVAL); 382 } 383 top->m_pkthdr.ph_ifidx = pxi->pxi_if.if_index; 384 385 #if NBPFILTER > 0 386 if (pxi->pxi_if.if_bpf) 387 bpf_mtap(pxi->pxi_if.if_bpf, top, BPF_DIRECTION_IN); 388 #endif 389 /* strip the tunnel header */ 390 proto = ntohl(*(uint32_t *)(th + 1)); 391 m_adj(top, sizeof(uint32_t)); 392 393 switch (proto) { 394 case AF_INET: 395 ipv4_input(&pxi->pxi_if, top); 396 break; 397 #ifdef INET6 398 case AF_INET6: 399 ipv6_input(&pxi->pxi_if, top); 400 break; 401 #endif 402 default: 403 m_freem(top); 404 error = EAFNOSUPPORT; 405 break; 406 } 407 408 NET_UNLOCK(); 409 410 return (error); 411 } 412 413 int 414 pppxioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) 415 { 416 struct pppx_dev *pxd = pppx_dev2pxd(dev); 417 int error = 0; 418 419 NET_LOCK(); 420 switch (cmd) { 421 case PIPEXASESSION: 422 error = pppx_add_session(pxd, 423 (struct pipex_session_req *)addr); 424 break; 425 426 case PIPEXDSESSION: 427 error = pppx_del_session(pxd, 428 (struct pipex_session_close_req *)addr); 429 break; 430 431 case PIPEXSIFDESCR: 432 error = pppx_set_session_descr(pxd, 433 (struct pipex_session_descr_req *)addr); 434 break; 435 436 case FIONBIO: 437 break; 438 case FIONREAD: 439 *(int *)addr = mq_hdatalen(&pxd->pxd_svcq); 440 break; 441 442 default: 443 error = pipex_ioctl(pxd, cmd, addr); 444 break; 445 } 446 NET_UNLOCK(); 447 448 return (error); 449 } 450 451 int 452 pppxpoll(dev_t dev, int events, struct proc *p) 453 { 454 struct pppx_dev *pxd = pppx_dev2pxd(dev); 455 int revents = 0; 456 457 if (events & (POLLIN | POLLRDNORM)) { 458 if (!mq_empty(&pxd->pxd_svcq)) 459 revents |= events & (POLLIN | POLLRDNORM); 460 } 461 if (events & (POLLOUT | POLLWRNORM)) 462 revents |= events & (POLLOUT | POLLWRNORM); 463 464 if (revents == 0) { 465 if (events & (POLLIN | POLLRDNORM)) 466 selrecord(p, &pxd->pxd_rsel); 467 } 468 469 return (revents); 470 } 471 472 int 473 pppxkqfilter(dev_t dev, struct knote *kn) 474 { 475 struct pppx_dev *pxd = pppx_dev2pxd(dev); 476 struct mutex *mtx; 477 struct klist *klist; 478 479 switch (kn->kn_filter) { 480 case EVFILT_READ: 481 mtx = &pxd->pxd_rsel_mtx; 482 klist = &pxd->pxd_rsel.si_note; 483 kn->kn_fop = &pppx_rd_filtops; 484 break; 485 case EVFILT_WRITE: 486 mtx = &pxd->pxd_wsel_mtx; 487 klist = &pxd->pxd_wsel.si_note; 488 kn->kn_fop = &pppx_wr_filtops; 489 break; 490 default: 491 return (EINVAL); 492 } 493 494 kn->kn_hook = (caddr_t)pxd; 495 496 mtx_enter(mtx); 497 klist_insert_locked(klist, kn); 498 mtx_leave(mtx); 499 500 return (0); 501 } 502 503 void 504 filt_pppx_rdetach(struct knote *kn) 505 { 506 struct pppx_dev *pxd = (struct pppx_dev *)kn->kn_hook; 507 struct klist *klist = &pxd->pxd_rsel.si_note; 508 509 mtx_enter(&pxd->pxd_rsel_mtx); 510 klist_remove_locked(klist, kn); 511 mtx_leave(&pxd->pxd_rsel_mtx); 512 } 513 514 int 515 filt_pppx_read(struct knote *kn, long hint) 516 { 517 struct pppx_dev *pxd = (struct pppx_dev *)kn->kn_hook; 518 519 kn->kn_data = mq_hdatalen(&pxd->pxd_svcq); 520 521 return (kn->kn_data > 0); 522 } 523 524 void 525 filt_pppx_wdetach(struct knote *kn) 526 { 527 struct pppx_dev *pxd = (struct pppx_dev *)kn->kn_hook; 528 struct klist *klist = &pxd->pxd_wsel.si_note; 529 530 mtx_enter(&pxd->pxd_wsel_mtx); 531 klist_remove_locked(klist, kn); 532 mtx_leave(&pxd->pxd_wsel_mtx); 533 } 534 535 int 536 filt_pppx_write(struct knote *kn, long hint) 537 { 538 /* We're always ready to accept a write. */ 539 return (1); 540 } 541 542 int 543 pppxclose(dev_t dev, int flags, int mode, struct proc *p) 544 { 545 struct pppx_dev *pxd; 546 struct pppx_if *pxi; 547 548 pxd = pppx_dev_lookup(dev); 549 550 /* XXX */ 551 NET_LOCK(); 552 while ((pxi = LIST_FIRST(&pxd->pxd_pxis))) 553 pppx_if_destroy(pxd, pxi); 554 NET_UNLOCK(); 555 556 LIST_REMOVE(pxd, pxd_entry); 557 558 mq_purge(&pxd->pxd_svcq); 559 560 free(pxd, M_DEVBUF, sizeof(*pxd)); 561 562 return (0); 563 } 564 565 int 566 pppx_if_next_unit(void) 567 { 568 struct pppx_if *pxi; 569 int unit = 0; 570 571 /* this is safe without splnet since we're not modifying it */ 572 do { 573 int found = 0; 574 RBT_FOREACH(pxi, pppx_ifs, &pppx_ifs) { 575 if (pxi->pxi_unit == unit) { 576 found = 1; 577 break; 578 } 579 } 580 581 if (found == 0) 582 break; 583 unit++; 584 } while (unit > 0); 585 586 return (unit); 587 } 588 589 struct pppx_if * 590 pppx_if_find(struct pppx_dev *pxd, int session_id, int protocol) 591 { 592 struct pppx_if_key key; 593 struct pppx_if *pxi; 594 595 memset(&key, 0, sizeof(key)); 596 key.pxik_session_id = session_id; 597 key.pxik_protocol = protocol; 598 599 pxi = RBT_FIND(pppx_ifs, &pppx_ifs, (struct pppx_if *)&key); 600 if (pxi && pxi->pxi_ready == 0) 601 pxi = NULL; 602 603 return pxi; 604 } 605 606 int 607 pppx_add_session(struct pppx_dev *pxd, struct pipex_session_req *req) 608 { 609 struct pppx_if *pxi; 610 struct pipex_session *session; 611 struct ifnet *ifp; 612 int unit, error = 0; 613 struct in_ifaddr *ia; 614 struct sockaddr_in ifaddr; 615 616 /* 617 * XXX: As long as `session' is allocated as part of a `pxi' 618 * it isn't possible to free it separately. So disallow 619 * the timeout feature until this is fixed. 620 */ 621 if (req->pr_timeout_sec != 0) 622 return (EINVAL); 623 624 error = pipex_init_session(&session, req); 625 if (error) 626 return (error); 627 628 pxi = pool_get(&pppx_if_pl, PR_WAITOK | PR_ZERO); 629 ifp = &pxi->pxi_if; 630 631 pxi->pxi_session = session; 632 633 /* try to set the interface up */ 634 unit = pppx_if_next_unit(); 635 if (unit < 0) { 636 error = ENOMEM; 637 goto out; 638 } 639 640 pxi->pxi_unit = unit; 641 pxi->pxi_key.pxik_session_id = req->pr_session_id; 642 pxi->pxi_key.pxik_protocol = req->pr_protocol; 643 pxi->pxi_dev = pxd; 644 645 if (RBT_INSERT(pppx_ifs, &pppx_ifs, pxi) != NULL) { 646 error = EADDRINUSE; 647 goto out; 648 } 649 LIST_INSERT_HEAD(&pxd->pxd_pxis, pxi, pxi_list); 650 651 snprintf(ifp->if_xname, sizeof(ifp->if_xname), "%s%d", "pppx", unit); 652 ifp->if_mtu = req->pr_peer_mru; /* XXX */ 653 ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST | IFF_UP; 654 ifp->if_xflags = IFXF_CLONED | IFXF_MPSAFE; 655 ifp->if_qstart = pppx_if_qstart; 656 ifp->if_output = pppx_if_output; 657 ifp->if_ioctl = pppx_if_ioctl; 658 ifp->if_rtrequest = p2p_rtrequest; 659 ifp->if_type = IFT_PPP; 660 ifp->if_softc = pxi; 661 /* ifp->if_rdomain = req->pr_rdomain; */ 662 if_counters_alloc(ifp); 663 /* XXXSMP: be sure pppx_if_qstart() called with NET_LOCK held */ 664 ifq_set_maxlen(&ifp->if_snd, 1); 665 666 /* XXXSMP breaks atomicity */ 667 NET_UNLOCK(); 668 if_attach(ifp); 669 NET_LOCK(); 670 671 if_addgroup(ifp, "pppx"); 672 if_alloc_sadl(ifp); 673 674 #if NBPFILTER > 0 675 bpfattach(&ifp->if_bpf, ifp, DLT_LOOP, sizeof(u_int32_t)); 676 #endif 677 678 /* XXX ipv6 support? how does the caller indicate it wants ipv6 679 * instead of ipv4? 680 */ 681 memset(&ifaddr, 0, sizeof(ifaddr)); 682 ifaddr.sin_family = AF_INET; 683 ifaddr.sin_len = sizeof(ifaddr); 684 ifaddr.sin_addr = req->pr_ip_srcaddr; 685 686 ia = malloc(sizeof (*ia), M_IFADDR, M_WAITOK | M_ZERO); 687 688 ia->ia_addr.sin_family = AF_INET; 689 ia->ia_addr.sin_len = sizeof(struct sockaddr_in); 690 ia->ia_addr.sin_addr = req->pr_ip_srcaddr; 691 692 ia->ia_dstaddr.sin_family = AF_INET; 693 ia->ia_dstaddr.sin_len = sizeof(struct sockaddr_in); 694 ia->ia_dstaddr.sin_addr = req->pr_ip_address; 695 696 ia->ia_sockmask.sin_family = AF_INET; 697 ia->ia_sockmask.sin_len = sizeof(struct sockaddr_in); 698 ia->ia_sockmask.sin_addr = req->pr_ip_netmask; 699 700 ia->ia_ifa.ifa_addr = sintosa(&ia->ia_addr); 701 ia->ia_ifa.ifa_dstaddr = sintosa(&ia->ia_dstaddr); 702 ia->ia_ifa.ifa_netmask = sintosa(&ia->ia_sockmask); 703 ia->ia_ifa.ifa_ifp = ifp; 704 705 ia->ia_netmask = ia->ia_sockmask.sin_addr.s_addr; 706 707 error = in_ifinit(ifp, ia, &ifaddr, 1); 708 if (error) { 709 printf("pppx: unable to set addresses for %s, error=%d\n", 710 ifp->if_xname, error); 711 } else { 712 if_addrhooks_run(ifp); 713 } 714 715 error = pipex_link_session(session, ifp, pxd); 716 if (error) 717 goto detach; 718 719 SET(ifp->if_flags, IFF_RUNNING); 720 pxi->pxi_ready = 1; 721 722 return (error); 723 724 detach: 725 /* XXXSMP breaks atomicity */ 726 NET_UNLOCK(); 727 if_detach(ifp); 728 NET_LOCK(); 729 730 if (RBT_REMOVE(pppx_ifs, &pppx_ifs, pxi) == NULL) 731 panic("%s: inconsistent RB tree", __func__); 732 LIST_REMOVE(pxi, pxi_list); 733 out: 734 pool_put(&pppx_if_pl, pxi); 735 pipex_rele_session(session); 736 737 return (error); 738 } 739 740 int 741 pppx_del_session(struct pppx_dev *pxd, struct pipex_session_close_req *req) 742 { 743 struct pppx_if *pxi; 744 745 pxi = pppx_if_find(pxd, req->pcr_session_id, req->pcr_protocol); 746 if (pxi == NULL) 747 return (EINVAL); 748 749 req->pcr_stat = pxi->pxi_session->stat; 750 751 pppx_if_destroy(pxd, pxi); 752 return (0); 753 } 754 755 int 756 pppx_set_session_descr(struct pppx_dev *pxd, 757 struct pipex_session_descr_req *req) 758 { 759 struct pppx_if *pxi; 760 761 pxi = pppx_if_find(pxd, req->pdr_session_id, req->pdr_protocol); 762 if (pxi == NULL) 763 return (EINVAL); 764 765 (void)memset(pxi->pxi_if.if_description, 0, IFDESCRSIZE); 766 strlcpy(pxi->pxi_if.if_description, req->pdr_descr, IFDESCRSIZE); 767 768 return (0); 769 } 770 771 void 772 pppx_if_destroy(struct pppx_dev *pxd, struct pppx_if *pxi) 773 { 774 struct ifnet *ifp; 775 struct pipex_session *session; 776 777 NET_ASSERT_LOCKED(); 778 session = pxi->pxi_session; 779 ifp = &pxi->pxi_if; 780 pxi->pxi_ready = 0; 781 CLR(ifp->if_flags, IFF_RUNNING); 782 783 pipex_unlink_session(session); 784 785 /* XXXSMP breaks atomicity */ 786 NET_UNLOCK(); 787 if_detach(ifp); 788 NET_LOCK(); 789 790 pipex_rele_session(session); 791 if (RBT_REMOVE(pppx_ifs, &pppx_ifs, pxi) == NULL) 792 panic("%s: inconsistent RB tree", __func__); 793 LIST_REMOVE(pxi, pxi_list); 794 795 pool_put(&pppx_if_pl, pxi); 796 } 797 798 void 799 pppx_if_qstart(struct ifqueue *ifq) 800 { 801 struct ifnet *ifp = ifq->ifq_if; 802 struct pppx_if *pxi = (struct pppx_if *)ifp->if_softc; 803 struct mbuf *m; 804 int proto; 805 806 NET_ASSERT_LOCKED(); 807 while ((m = ifq_dequeue(ifq)) != NULL) { 808 proto = *mtod(m, int *); 809 m_adj(m, sizeof(proto)); 810 811 pipex_ppp_output(m, pxi->pxi_session, proto); 812 } 813 } 814 815 int 816 pppx_if_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, 817 struct rtentry *rt) 818 { 819 struct pppx_if *pxi = (struct pppx_if *)ifp->if_softc; 820 struct pppx_hdr *th; 821 int error = 0; 822 int proto; 823 824 NET_ASSERT_LOCKED(); 825 826 if (!ISSET(ifp->if_flags, IFF_RUNNING)) { 827 m_freem(m); 828 error = ENETDOWN; 829 goto out; 830 } 831 832 #if NBPFILTER > 0 833 if (ifp->if_bpf) 834 bpf_mtap_af(ifp->if_bpf, dst->sa_family, m, BPF_DIRECTION_OUT); 835 #endif 836 if (pipex_enable) { 837 switch (dst->sa_family) { 838 #ifdef INET6 839 case AF_INET6: 840 proto = PPP_IPV6; 841 break; 842 #endif 843 case AF_INET: 844 proto = PPP_IP; 845 break; 846 default: 847 m_freem(m); 848 error = EPFNOSUPPORT; 849 goto out; 850 } 851 } else 852 proto = htonl(dst->sa_family); 853 854 M_PREPEND(m, sizeof(int), M_DONTWAIT); 855 if (m == NULL) { 856 error = ENOBUFS; 857 goto out; 858 } 859 *mtod(m, int *) = proto; 860 861 if (pipex_enable) 862 error = if_enqueue(ifp, m); 863 else { 864 M_PREPEND(m, sizeof(struct pppx_hdr), M_DONTWAIT); 865 if (m == NULL) { 866 error = ENOBUFS; 867 goto out; 868 } 869 th = mtod(m, struct pppx_hdr *); 870 th->pppx_proto = 0; /* not used */ 871 th->pppx_id = pxi->pxi_session->ppp_id; 872 error = mq_enqueue(&pxi->pxi_dev->pxd_svcq, m); 873 if (error == 0) { 874 if (pxi->pxi_dev->pxd_waiting) { 875 wakeup((caddr_t)pxi->pxi_dev); 876 pxi->pxi_dev->pxd_waiting = 0; 877 } 878 selwakeup(&pxi->pxi_dev->pxd_rsel); 879 } 880 } 881 882 out: 883 if (error) 884 counters_inc(ifp->if_counters, ifc_oerrors); 885 return (error); 886 } 887 888 int 889 pppx_if_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr) 890 { 891 struct pppx_if *pxi = (struct pppx_if *)ifp->if_softc; 892 struct ifreq *ifr = (struct ifreq *)addr; 893 int error = 0; 894 895 switch (cmd) { 896 case SIOCSIFADDR: 897 break; 898 899 case SIOCSIFFLAGS: 900 break; 901 902 case SIOCADDMULTI: 903 case SIOCDELMULTI: 904 break; 905 906 case SIOCSIFMTU: 907 if (ifr->ifr_mtu < 512 || 908 ifr->ifr_mtu > pxi->pxi_session->peer_mru) 909 error = EINVAL; 910 else 911 ifp->if_mtu = ifr->ifr_mtu; 912 break; 913 914 default: 915 error = ENOTTY; 916 break; 917 } 918 919 return (error); 920 } 921 922 RBT_GENERATE(pppx_ifs, pppx_if, pxi_entry, pppx_if_cmp); 923 924 /* 925 * Locks used to protect struct members and global data 926 * I immutable after creation 927 * K kernel lock 928 * N net lock 929 */ 930 931 struct pppac_softc { 932 struct ifnet sc_if; 933 dev_t sc_dev; /* [I] */ 934 LIST_ENTRY(pppac_softc) 935 sc_entry; /* [K] */ 936 937 struct mutex sc_rsel_mtx; 938 struct selinfo sc_rsel; 939 struct mutex sc_wsel_mtx; 940 struct selinfo sc_wsel; 941 942 struct pipex_session 943 *sc_multicast_session; 944 945 struct mbuf_queue 946 sc_mq; 947 }; 948 949 LIST_HEAD(pppac_list, pppac_softc); /* [K] */ 950 951 static void filt_pppac_rdetach(struct knote *); 952 static int filt_pppac_read(struct knote *, long); 953 954 static const struct filterops pppac_rd_filtops = { 955 .f_flags = FILTEROP_ISFD, 956 .f_attach = NULL, 957 .f_detach = filt_pppac_rdetach, 958 .f_event = filt_pppac_read 959 }; 960 961 static void filt_pppac_wdetach(struct knote *); 962 static int filt_pppac_write(struct knote *, long); 963 964 static const struct filterops pppac_wr_filtops = { 965 .f_flags = FILTEROP_ISFD, 966 .f_attach = NULL, 967 .f_detach = filt_pppac_wdetach, 968 .f_event = filt_pppac_write 969 }; 970 971 static struct pppac_list pppac_devs = LIST_HEAD_INITIALIZER(pppac_devs); 972 973 static int pppac_ioctl(struct ifnet *, u_long, caddr_t); 974 975 static int pppac_add_session(struct pppac_softc *, 976 struct pipex_session_req *); 977 static int pppac_del_session(struct pppac_softc *, 978 struct pipex_session_close_req *); 979 static int pppac_output(struct ifnet *, struct mbuf *, struct sockaddr *, 980 struct rtentry *); 981 static void pppac_qstart(struct ifqueue *); 982 983 static inline struct pppac_softc * 984 pppac_lookup(dev_t dev) 985 { 986 struct pppac_softc *sc; 987 988 LIST_FOREACH(sc, &pppac_devs, sc_entry) { 989 if (sc->sc_dev == dev) 990 return (sc); 991 } 992 993 return (NULL); 994 } 995 996 void 997 pppacattach(int n) 998 { 999 pipex_init(); /* to be sure, to be sure */ 1000 } 1001 1002 int 1003 pppacopen(dev_t dev, int flags, int mode, struct proc *p) 1004 { 1005 struct pppac_softc *sc; 1006 struct ifnet *ifp; 1007 struct pipex_session *session; 1008 1009 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO); 1010 if (pppac_lookup(dev) != NULL) { 1011 free(sc, M_DEVBUF, sizeof(*sc)); 1012 return (EBUSY); 1013 } 1014 1015 /* virtual pipex_session entry for multicast */ 1016 session = pool_get(&pipex_session_pool, PR_WAITOK | PR_ZERO); 1017 session->is_multicast = 1; 1018 session->ownersc = sc; 1019 sc->sc_multicast_session = session; 1020 1021 sc->sc_dev = dev; 1022 1023 mtx_init(&sc->sc_rsel_mtx, IPL_SOFTNET); 1024 mtx_init(&sc->sc_wsel_mtx, IPL_SOFTNET); 1025 mq_init(&sc->sc_mq, IFQ_MAXLEN, IPL_SOFTNET); 1026 1027 LIST_INSERT_HEAD(&pppac_devs, sc, sc_entry); 1028 1029 ifp = &sc->sc_if; 1030 snprintf(ifp->if_xname, sizeof(ifp->if_xname), "pppac%u", minor(dev)); 1031 1032 ifp->if_softc = sc; 1033 ifp->if_type = IFT_L3IPVLAN; 1034 ifp->if_hdrlen = sizeof(uint32_t); /* for BPF */; 1035 ifp->if_mtu = MAXMCLBYTES - sizeof(uint32_t); 1036 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST; 1037 ifp->if_xflags = IFXF_CLONED | IFXF_MPSAFE; 1038 ifp->if_rtrequest = p2p_rtrequest; /* XXX */ 1039 ifp->if_output = pppac_output; 1040 ifp->if_qstart = pppac_qstart; 1041 ifp->if_ioctl = pppac_ioctl; 1042 /* XXXSMP: be sure pppac_qstart() called with NET_LOCK held */ 1043 ifq_set_maxlen(&ifp->if_snd, 1); 1044 1045 if_counters_alloc(ifp); 1046 if_attach(ifp); 1047 if_alloc_sadl(ifp); 1048 1049 #if NBPFILTER > 0 1050 bpfattach(&ifp->if_bpf, ifp, DLT_LOOP, sizeof(uint32_t)); 1051 #endif 1052 1053 return (0); 1054 } 1055 1056 int 1057 pppacread(dev_t dev, struct uio *uio, int ioflag) 1058 { 1059 struct pppac_softc *sc = pppac_lookup(dev); 1060 struct ifnet *ifp = &sc->sc_if; 1061 struct mbuf *m0, *m; 1062 int error = 0; 1063 size_t len; 1064 1065 if (!ISSET(ifp->if_flags, IFF_RUNNING)) 1066 return (EHOSTDOWN); 1067 1068 m0 = mq_dequeue(&sc->sc_mq); 1069 if (m0 == NULL) { 1070 if (ISSET(ioflag, IO_NDELAY)) 1071 return (EWOULDBLOCK); 1072 1073 do { 1074 error = tsleep_nsec(sc, (PZERO + 1)|PCATCH, 1075 "pppacrd", INFSLP); 1076 if (error != 0) 1077 return (error); 1078 1079 m0 = mq_dequeue(&sc->sc_mq); 1080 } while (m0 == NULL); 1081 } 1082 1083 m = m0; 1084 while (uio->uio_resid > 0) { 1085 len = ulmin(uio->uio_resid, m->m_len); 1086 if (len != 0) { 1087 error = uiomove(mtod(m, caddr_t), len, uio); 1088 if (error != 0) 1089 break; 1090 } 1091 1092 m = m->m_next; 1093 if (m == NULL) 1094 break; 1095 } 1096 m_freem(m0); 1097 1098 return (error); 1099 } 1100 1101 int 1102 pppacwrite(dev_t dev, struct uio *uio, int ioflag) 1103 { 1104 struct pppac_softc *sc = pppac_lookup(dev); 1105 struct ifnet *ifp = &sc->sc_if; 1106 uint32_t proto; 1107 int error; 1108 struct mbuf *m; 1109 1110 if (!ISSET(ifp->if_flags, IFF_RUNNING)) 1111 return (EHOSTDOWN); 1112 1113 if (uio->uio_resid < ifp->if_hdrlen || uio->uio_resid > MAXMCLBYTES) 1114 return (EMSGSIZE); 1115 1116 m = m_gethdr(M_DONTWAIT, MT_DATA); 1117 if (m == NULL) 1118 return (ENOMEM); 1119 1120 if (uio->uio_resid > MHLEN) { 1121 m_clget(m, M_WAITOK, uio->uio_resid); 1122 if (!ISSET(m->m_flags, M_EXT)) { 1123 m_free(m); 1124 return (ENOMEM); 1125 } 1126 } 1127 1128 m->m_pkthdr.len = m->m_len = uio->uio_resid; 1129 1130 error = uiomove(mtod(m, void *), m->m_len, uio); 1131 if (error != 0) { 1132 m_freem(m); 1133 return (error); 1134 } 1135 1136 #if NBPFILTER > 0 1137 if (ifp->if_bpf) 1138 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 1139 #endif 1140 1141 /* strip the tunnel header */ 1142 proto = ntohl(*mtod(m, uint32_t *)); 1143 m_adj(m, sizeof(uint32_t)); 1144 1145 m->m_flags &= ~(M_MCAST|M_BCAST); 1146 m->m_pkthdr.ph_ifidx = ifp->if_index; 1147 m->m_pkthdr.ph_rtableid = ifp->if_rdomain; 1148 1149 #if NPF > 0 1150 pf_pkt_addr_changed(m); 1151 #endif 1152 1153 counters_pkt(ifp->if_counters, 1154 ifc_ipackets, ifc_ibytes, m->m_pkthdr.len); 1155 1156 NET_LOCK(); 1157 1158 switch (proto) { 1159 case AF_INET: 1160 ipv4_input(ifp, m); 1161 break; 1162 #ifdef INET6 1163 case AF_INET6: 1164 ipv6_input(ifp, m); 1165 break; 1166 #endif 1167 default: 1168 m_freem(m); 1169 error = EAFNOSUPPORT; 1170 break; 1171 } 1172 1173 NET_UNLOCK(); 1174 1175 return (error); 1176 } 1177 1178 int 1179 pppacioctl(dev_t dev, u_long cmd, caddr_t data, int flags, struct proc *p) 1180 { 1181 struct pppac_softc *sc = pppac_lookup(dev); 1182 int error = 0; 1183 1184 NET_LOCK(); 1185 switch (cmd) { 1186 case FIONBIO: 1187 break; 1188 case FIONREAD: 1189 *(int *)data = mq_hdatalen(&sc->sc_mq); 1190 break; 1191 1192 case PIPEXASESSION: 1193 error = pppac_add_session(sc, (struct pipex_session_req *)data); 1194 break; 1195 case PIPEXDSESSION: 1196 error = pppac_del_session(sc, 1197 (struct pipex_session_close_req *)data); 1198 break; 1199 default: 1200 error = pipex_ioctl(sc, cmd, data); 1201 break; 1202 } 1203 NET_UNLOCK(); 1204 1205 return (error); 1206 } 1207 1208 int 1209 pppacpoll(dev_t dev, int events, struct proc *p) 1210 { 1211 struct pppac_softc *sc = pppac_lookup(dev); 1212 int revents = 0; 1213 1214 if (events & (POLLIN | POLLRDNORM)) { 1215 if (!mq_empty(&sc->sc_mq)) 1216 revents |= events & (POLLIN | POLLRDNORM); 1217 } 1218 if (events & (POLLOUT | POLLWRNORM)) 1219 revents |= events & (POLLOUT | POLLWRNORM); 1220 1221 if (revents == 0) { 1222 if (events & (POLLIN | POLLRDNORM)) 1223 selrecord(p, &sc->sc_rsel); 1224 } 1225 1226 return (revents); 1227 } 1228 1229 int 1230 pppackqfilter(dev_t dev, struct knote *kn) 1231 { 1232 struct pppac_softc *sc = pppac_lookup(dev); 1233 struct mutex *mtx; 1234 struct klist *klist; 1235 1236 switch (kn->kn_filter) { 1237 case EVFILT_READ: 1238 mtx = &sc->sc_rsel_mtx; 1239 klist = &sc->sc_rsel.si_note; 1240 kn->kn_fop = &pppac_rd_filtops; 1241 break; 1242 case EVFILT_WRITE: 1243 mtx = &sc->sc_wsel_mtx; 1244 klist = &sc->sc_wsel.si_note; 1245 kn->kn_fop = &pppac_wr_filtops; 1246 break; 1247 default: 1248 return (EINVAL); 1249 } 1250 1251 kn->kn_hook = sc; 1252 1253 mtx_enter(mtx); 1254 klist_insert_locked(klist, kn); 1255 mtx_leave(mtx); 1256 1257 return (0); 1258 } 1259 1260 static void 1261 filt_pppac_rdetach(struct knote *kn) 1262 { 1263 struct pppac_softc *sc = kn->kn_hook; 1264 struct klist *klist = &sc->sc_rsel.si_note; 1265 1266 mtx_enter(&sc->sc_rsel_mtx); 1267 klist_remove_locked(klist, kn); 1268 mtx_leave(&sc->sc_rsel_mtx); 1269 } 1270 1271 static int 1272 filt_pppac_read(struct knote *kn, long hint) 1273 { 1274 struct pppac_softc *sc = kn->kn_hook; 1275 1276 kn->kn_data = mq_hdatalen(&sc->sc_mq); 1277 1278 return (kn->kn_data > 0); 1279 } 1280 1281 static void 1282 filt_pppac_wdetach(struct knote *kn) 1283 { 1284 struct pppac_softc *sc = kn->kn_hook; 1285 struct klist *klist = &sc->sc_wsel.si_note; 1286 1287 mtx_enter(&sc->sc_wsel_mtx); 1288 klist_remove_locked(klist, kn); 1289 mtx_leave(&sc->sc_wsel_mtx); 1290 } 1291 1292 static int 1293 filt_pppac_write(struct knote *kn, long hint) 1294 { 1295 /* We're always ready to accept a write. */ 1296 return (1); 1297 } 1298 1299 int 1300 pppacclose(dev_t dev, int flags, int mode, struct proc *p) 1301 { 1302 struct pppac_softc *sc = pppac_lookup(dev); 1303 struct ifnet *ifp = &sc->sc_if; 1304 int s; 1305 1306 NET_LOCK(); 1307 CLR(ifp->if_flags, IFF_RUNNING); 1308 NET_UNLOCK(); 1309 1310 if_detach(ifp); 1311 1312 s = splhigh(); 1313 klist_invalidate(&sc->sc_rsel.si_note); 1314 klist_invalidate(&sc->sc_wsel.si_note); 1315 splx(s); 1316 1317 pool_put(&pipex_session_pool, sc->sc_multicast_session); 1318 NET_LOCK(); 1319 pipex_destroy_all_sessions(sc); 1320 NET_UNLOCK(); 1321 1322 LIST_REMOVE(sc, sc_entry); 1323 free(sc, M_DEVBUF, sizeof(*sc)); 1324 1325 return (0); 1326 } 1327 1328 static int 1329 pppac_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1330 { 1331 /* struct ifreq *ifr = (struct ifreq *)data; */ 1332 int error = 0; 1333 1334 switch (cmd) { 1335 case SIOCSIFADDR: 1336 SET(ifp->if_flags, IFF_UP); /* XXX cry cry */ 1337 /* FALLTHROUGH */ 1338 case SIOCSIFFLAGS: 1339 if (ISSET(ifp->if_flags, IFF_UP)) 1340 SET(ifp->if_flags, IFF_RUNNING); 1341 else 1342 CLR(ifp->if_flags, IFF_RUNNING); 1343 break; 1344 case SIOCSIFMTU: 1345 break; 1346 case SIOCADDMULTI: 1347 case SIOCDELMULTI: 1348 /* XXX */ 1349 break; 1350 1351 default: 1352 error = ENOTTY; 1353 break; 1354 } 1355 1356 return (error); 1357 } 1358 1359 static int 1360 pppac_add_session(struct pppac_softc *sc, struct pipex_session_req *req) 1361 { 1362 int error; 1363 struct pipex_session *session; 1364 1365 error = pipex_init_session(&session, req); 1366 if (error != 0) 1367 return (error); 1368 error = pipex_link_session(session, &sc->sc_if, sc); 1369 if (error != 0) 1370 pipex_rele_session(session); 1371 1372 return (error); 1373 } 1374 1375 static int 1376 pppac_del_session(struct pppac_softc *sc, struct pipex_session_close_req *req) 1377 { 1378 struct pipex_session *session; 1379 1380 session = pipex_lookup_by_session_id(req->pcr_protocol, 1381 req->pcr_session_id); 1382 if (session == NULL || session->ownersc != sc) 1383 return (EINVAL); 1384 pipex_unlink_session(session); 1385 pipex_rele_session(session); 1386 1387 return (0); 1388 } 1389 1390 static int 1391 pppac_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, 1392 struct rtentry *rt) 1393 { 1394 int error; 1395 1396 if (!ISSET(ifp->if_flags, IFF_RUNNING)) { 1397 error = EHOSTDOWN; 1398 goto drop; 1399 } 1400 1401 switch (dst->sa_family) { 1402 case AF_INET: 1403 #ifdef INET6 1404 case AF_INET6: 1405 #endif 1406 break; 1407 default: 1408 error = EAFNOSUPPORT; 1409 goto drop; 1410 } 1411 1412 m->m_pkthdr.ph_family = dst->sa_family; 1413 1414 return (if_enqueue(ifp, m)); 1415 1416 drop: 1417 m_freem(m); 1418 return (error); 1419 } 1420 1421 static void 1422 pppac_qstart(struct ifqueue *ifq) 1423 { 1424 struct ifnet *ifp = ifq->ifq_if; 1425 struct pppac_softc *sc = ifp->if_softc; 1426 struct mbuf *m, *m0; 1427 struct pipex_session *session; 1428 struct ip ip; 1429 int rv; 1430 1431 NET_ASSERT_LOCKED(); 1432 while ((m = ifq_dequeue(ifq)) != NULL) { 1433 #if NBPFILTER > 0 1434 if (ifp->if_bpf) { 1435 bpf_mtap_af(ifp->if_bpf, m->m_pkthdr.ph_family, m, 1436 BPF_DIRECTION_OUT); 1437 } 1438 #endif 1439 1440 switch (m->m_pkthdr.ph_family) { 1441 case AF_INET: 1442 if (m->m_pkthdr.len < sizeof(struct ip)) 1443 goto bad; 1444 m_copydata(m, 0, sizeof(struct ip), (caddr_t)&ip); 1445 if (IN_MULTICAST(ip.ip_dst.s_addr)) { 1446 /* pass a copy to pipex */ 1447 m0 = m_copym(m, 0, M_COPYALL, M_NOWAIT); 1448 if (m0 != NULL) 1449 pipex_ip_output(m0, 1450 sc->sc_multicast_session); 1451 else 1452 goto bad; 1453 } else { 1454 session = pipex_lookup_by_ip_address(ip.ip_dst); 1455 if (session != NULL) { 1456 pipex_ip_output(m, session); 1457 m = NULL; 1458 } 1459 } 1460 break; 1461 } 1462 if (m == NULL) /* handled by pipex */ 1463 continue; 1464 1465 m = m_prepend(m, sizeof(uint32_t), M_DONTWAIT); 1466 if (m == NULL) 1467 goto bad; 1468 *mtod(m, uint32_t *) = htonl(m->m_pkthdr.ph_family); 1469 1470 rv = mq_enqueue(&sc->sc_mq, m); 1471 if (rv == 1) 1472 counters_inc(ifp->if_counters, ifc_collisions); 1473 continue; 1474 bad: 1475 counters_inc(ifp->if_counters, ifc_oerrors); 1476 if (m != NULL) 1477 m_freem(m); 1478 continue; 1479 } 1480 1481 if (!mq_empty(&sc->sc_mq)) { 1482 wakeup(sc); 1483 selwakeup(&sc->sc_rsel); 1484 } 1485 } 1486