1 /* $NetBSD: ip_fil_netbsd.c,v 1.19 2016/12/08 05:16:33 ozaki-r Exp $ */ 2 3 /* 4 * Copyright (C) 2012 by Darren Reed. 5 * 6 * See the IPFILTER.LICENCE file for details on licencing. 7 */ 8 #if !defined(lint) 9 #if defined(__NetBSD__) 10 #include <sys/cdefs.h> 11 __KERNEL_RCSID(0, "$NetBSD: ip_fil_netbsd.c,v 1.19 2016/12/08 05:16:33 ozaki-r Exp $"); 12 #else 13 static const char sccsid[] = "@(#)ip_fil.c 2.41 6/5/96 (C) 1993-2000 Darren Reed"; 14 static const char rcsid[] = "@(#)Id: ip_fil_netbsd.c,v 1.1.1.2 2012/07/22 13:45:17 darrenr Exp"; 15 #endif 16 #endif 17 18 #if defined(KERNEL) || defined(_KERNEL) 19 # undef KERNEL 20 # undef _KERNEL 21 # define KERNEL 1 22 # define _KERNEL 1 23 #endif 24 #include <sys/param.h> 25 #if (NetBSD >= 199905) && !defined(IPFILTER_LKM) 26 # if (__NetBSD_Version__ >= 799003000) 27 # ifdef _KERNEL_OPT 28 # include "opt_ipsec.h" 29 # endif 30 # else 31 # include "opt_ipsec.h" 32 # endif 33 #endif 34 #include <sys/errno.h> 35 #include <sys/types.h> 36 #include <sys/file.h> 37 #include <sys/ioctl.h> 38 #include <sys/time.h> 39 #include <sys/systm.h> 40 #include <sys/select.h> 41 #if (NetBSD > 199609) 42 # include <sys/dirent.h> 43 #else 44 # include <sys/dir.h> 45 #endif 46 #if (__NetBSD_Version__ >= 599005900) 47 # include <sys/cprng.h> 48 #endif 49 #include <sys/mbuf.h> 50 #include <sys/protosw.h> 51 #include <sys/socket.h> 52 #include <sys/poll.h> 53 #if (__NetBSD_Version__ >= 399002000) 54 # include <sys/kauth.h> 55 #endif 56 #if (__NetBSD_Version__ >= 799003000) 57 #include <sys/module.h> 58 #include <sys/mutex.h> 59 #endif 60 61 #include <net/if.h> 62 #include <net/route.h> 63 #include <netinet/in.h> 64 #include <netinet/in_var.h> 65 #include <netinet/in_systm.h> 66 #include <netinet/ip.h> 67 #include <netinet/ip_var.h> 68 #include <netinet/tcp.h> 69 #if __NetBSD_Version__ >= 105190000 /* 1.5T */ 70 # include <netinet/tcp_timer.h> 71 # include <netinet/tcp_var.h> 72 #endif 73 #include <netinet/udp.h> 74 #include <netinet/tcpip.h> 75 #include <netinet/ip_icmp.h> 76 #include "netinet/ip_compat.h" 77 #ifdef USE_INET6 78 # include <netinet/icmp6.h> 79 # if (__NetBSD_Version__ >= 106000000) 80 # include <netinet6/nd6.h> 81 # endif 82 #endif 83 #include "netinet/ip_fil.h" 84 #include "netinet/ip_nat.h" 85 #include "netinet/ip_frag.h" 86 #include "netinet/ip_state.h" 87 #include "netinet/ip_proxy.h" 88 #include "netinet/ip_auth.h" 89 #include "netinet/ip_sync.h" 90 #include "netinet/ip_lookup.h" 91 #include "netinet/ip_dstlist.h" 92 #ifdef IPFILTER_SCAN 93 #include "netinet/ip_scan.h" 94 #endif 95 #include <sys/md5.h> 96 #include <sys/kernel.h> 97 #include <sys/conf.h> 98 #ifdef INET 99 extern int ip_optcopy (struct ip *, struct ip *); 100 #endif 101 102 #ifdef IPFILTER_M_IPFILTER 103 MALLOC_DEFINE(M_IPFILTER, "IP Filter", "IP Filter packet filter data structures"); 104 #endif 105 106 #if __NetBSD_Version__ >= 105009999 107 # define csuminfo csum_flags 108 #endif 109 110 #if __NetBSD_Version__ < 200000000 111 extern struct protosw inetsw[]; 112 #endif 113 114 #if (__NetBSD_Version__ >= 599002000) 115 static kauth_listener_t ipf_listener; 116 #endif 117 118 #if (__NetBSD_Version__ < 399001400) 119 extern int ip6_getpmtu (struct route_in6 *, struct route_in6 *, 120 struct ifnet *, struct in6_addr *, u_long *, 121 int *); 122 #endif 123 #if (NetBSD >= 199511) 124 static int ipfopen(dev_t dev, int flags, int devtype, PROC_T *p); 125 static int ipfclose(dev_t dev, int flags, int devtype, PROC_T *p); 126 #else 127 # if (__NetBSD_Version__ >= 399001400) 128 static int ipfopen(dev_t dev, int flags, struct lwp *); 129 static int ipfclose(dev_t dev, int flags, struct lwp *); 130 # else 131 static int ipfopen(dev_t dev, int flags); 132 static int ipfclose(dev_t dev, int flags); 133 # endif /* __NetBSD_Version__ >= 399001400 */ 134 #endif 135 static int ipfread(dev_t, struct uio *, int ioflag); 136 static int ipfwrite(dev_t, struct uio *, int ioflag); 137 static int ipfpoll(dev_t, int events, PROC_T *); 138 static void ipf_timer_func(void *ptr); 139 140 const struct cdevsw ipl_cdevsw = { 141 .d_open = ipfopen, 142 .d_close = ipfclose, 143 .d_read = ipfread, 144 .d_write = ipfwrite, 145 .d_ioctl = ipfioctl, 146 .d_stop = nostop, 147 .d_tty = notty, 148 .d_poll = ipfpoll, 149 .d_mmap = nommap, 150 #if (__NetBSD_Version__ >= 200000000) 151 .d_kqfilter = nokqfilter, 152 #endif 153 .d_discard = nodiscard, 154 #ifdef D_OTHER 155 .d_flag = D_OTHER 156 #else 157 .d_flag = 0 158 #endif 159 }; 160 #if (__NetBSD_Version__ >= 799003000) 161 kmutex_t ipf_ref_mutex; 162 int ipf_active; 163 #endif 164 165 ipf_main_softc_t ipfmain; 166 167 static u_short ipid = 0; 168 static int (*ipf_savep)(void *, ip_t *, int, void *, int, struct mbuf **); 169 static int ipf_send_ip(fr_info_t *, mb_t *); 170 #ifdef USE_INET6 171 static int ipf_fastroute6(struct mbuf *, struct mbuf **, 172 fr_info_t *, frdest_t *); 173 #endif 174 175 #if defined(NETBSD_PF) 176 # include <net/pfil.h> 177 /* 178 * We provide the ipf_checkp name just to minimize changes later. 179 */ 180 int (*ipf_checkp)(void *, ip_t *ip, int hlen, void *ifp, int out, mb_t **mp); 181 #endif /* NETBSD_PF */ 182 183 #if defined(__NetBSD_Version__) && (__NetBSD_Version__ >= 105110000) 184 # include <net/pfil.h> 185 186 static int ipf_check_wrapper(void *, struct mbuf **, struct ifnet *, int ); 187 188 static int 189 ipf_check_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir) 190 { 191 struct ip *ip; 192 int rv, hlen; 193 194 #if __NetBSD_Version__ >= 200080000 195 /* 196 * ensure that mbufs are writable beforehand 197 * as it's assumed by ipf code. 198 * XXX inefficient 199 */ 200 int error = m_makewritable(mp, 0, M_COPYALL, M_DONTWAIT); 201 202 if (error) { 203 m_freem(*mp); 204 *mp = NULL; 205 return error; 206 } 207 #endif 208 ip = mtod(*mp, struct ip *); 209 hlen = ip->ip_hl << 2; 210 211 #ifdef INET 212 #if defined(M_CSUM_TCPv4) 213 /* 214 * If the packet is out-bound, we can't delay checksums 215 * here. For in-bound, the checksum has already been 216 * validated. 217 */ 218 if (dir == PFIL_OUT) { 219 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) { 220 in_delayed_cksum(*mp); 221 (*mp)->m_pkthdr.csum_flags &= 222 ~(M_CSUM_TCPv4|M_CSUM_UDPv4); 223 } 224 } 225 #endif /* M_CSUM_TCPv4 */ 226 #endif /* INET */ 227 228 /* 229 * Note, we don't need to update the checksum, because 230 * it has already been verified. 231 */ 232 rv = ipf_check(&ipfmain, ip, hlen, ifp, (dir == PFIL_OUT), mp); 233 234 return (rv); 235 } 236 237 # ifdef USE_INET6 238 # include <netinet/ip6.h> 239 240 static int ipf_check_wrapper6(void *, struct mbuf **, struct ifnet *, int ); 241 242 static int 243 ipf_check_wrapper6(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir) 244 { 245 #if defined(INET6) 246 # if defined(M_CSUM_TCPv6) && (__NetBSD_Version__ > 200000000) 247 /* 248 * If the packet is out-bound, we can't delay checksums 249 * here. For in-bound, the checksum has already been 250 * validated. 251 */ 252 if (dir == PFIL_OUT) { 253 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) { 254 # if (__NetBSD_Version__ > 399000600) 255 in6_delayed_cksum(*mp); 256 # endif 257 (*mp)->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv6| 258 M_CSUM_UDPv6); 259 } 260 } 261 # endif 262 #endif /* INET6 */ 263 264 return (ipf_check(&ipfmain, mtod(*mp, struct ip *), sizeof(struct ip6_hdr), 265 ifp, (dir == PFIL_OUT), mp)); 266 } 267 # endif 268 269 270 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET) 271 static int ipf_pfilsync(void *, struct mbuf **, struct ifnet *, int); 272 273 static int 274 ipf_pfilsync(void *hdr, struct mbuf **mp, struct ifnet *ifp, int dir) 275 { 276 /* 277 * The interface pointer is useless for create (we have nothing to 278 * compare it to) and at detach, the interface name is still in the 279 * list of active NICs (albeit, down, but that's not any real 280 * indicator) and doing ifunit() on the name will still return the 281 * pointer, so it's not much use then, either. 282 */ 283 ipf_sync(&ipfmain, NULL); 284 return 0; 285 } 286 # endif 287 288 #endif /* __NetBSD_Version__ >= 105110000 */ 289 290 291 #if defined(IPFILTER_LKM) 292 int 293 ipf_identify(s) 294 char *s; 295 { 296 if (strcmp(s, "ipl") == 0) 297 return 1; 298 return 0; 299 } 300 #endif /* IPFILTER_LKM */ 301 302 #if (__NetBSD_Version__ >= 599002000) 303 static int 304 ipf_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie, 305 void *arg0, void *arg1, void *arg2, void *arg3) 306 { 307 int result; 308 enum kauth_network_req req; 309 310 result = KAUTH_RESULT_DEFER; 311 req = (enum kauth_network_req)arg0; 312 313 if (action != KAUTH_NETWORK_FIREWALL) 314 return result; 315 316 /* These must have came from device context. */ 317 if ((req == KAUTH_REQ_NETWORK_FIREWALL_FW) || 318 (req == KAUTH_REQ_NETWORK_FIREWALL_NAT)) 319 result = KAUTH_RESULT_ALLOW; 320 321 return result; 322 } 323 #endif 324 325 /* 326 * Try to detect the case when compiling for NetBSD with pseudo-device 327 */ 328 void 329 ipfilterattach(int count) 330 { 331 332 #if (__NetBSD_Version__ >= 799003000) 333 return; 334 #else 335 #if (__NetBSD_Version__ >= 599002000) 336 ipf_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK, 337 ipf_listener_cb, NULL); 338 #endif 339 340 if (ipf_load_all() == 0) 341 (void) ipf_create_all(&ipfmain); 342 #endif 343 } 344 345 346 int 347 ipfattach(ipf_main_softc_t *softc) 348 { 349 SPL_INT(s); 350 #if (__NetBSD_Version__ >= 499005500) 351 int i; 352 #endif 353 #if defined(NETBSD_PF) && (__NetBSD_Version__ >= 104200000) 354 int error = 0; 355 # if defined(__NetBSD_Version__) && (__NetBSD_Version__ >= 105110000) 356 pfil_head_t *ph_inet; 357 # ifdef USE_INET6 358 pfil_head_t *ph_inet6; 359 # endif 360 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET) 361 pfil_head_t *ph_ifsync; 362 # endif 363 # endif 364 #endif 365 366 SPL_NET(s); 367 if ((softc->ipf_running > 0) || (ipf_checkp == ipf_check)) { 368 printf("IP Filter: already initialized\n"); 369 SPL_X(s); 370 IPFERROR(130017); 371 return EBUSY; 372 } 373 374 if (ipf_init_all(softc) < 0) { 375 SPL_X(s); 376 IPFERROR(130015); 377 return EIO; 378 } 379 380 #ifdef NETBSD_PF 381 # if (__NetBSD_Version__ >= 104200000) 382 # if __NetBSD_Version__ >= 105110000 383 ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET); 384 # ifdef USE_INET6 385 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6); 386 # endif 387 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET) 388 ph_ifsync = pfil_head_get(PFIL_TYPE_IFNET, 0); 389 # endif 390 391 if (ph_inet == NULL 392 # ifdef USE_INET6 393 && ph_inet6 == NULL 394 # endif 395 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET) 396 && ph_ifsync == NULL 397 # endif 398 ) { 399 SPL_X(s); 400 IPFERROR(130016); 401 return ENODEV; 402 } 403 404 if (ph_inet != NULL) 405 error = pfil_add_hook((void *)ipf_check_wrapper, NULL, 406 PFIL_IN|PFIL_OUT, ph_inet); 407 else 408 error = 0; 409 # else 410 error = pfil_add_hook((void *)ipf_check, PFIL_IN|PFIL_OUT, 411 &inetsw[ip_protox[IPPROTO_IP]].pr_pfh); 412 # endif 413 if (error) { 414 IPFERROR(130013); 415 goto pfil_error; 416 } 417 # else 418 pfil_add_hook((void *)ipf_check, PFIL_IN|PFIL_OUT); 419 # endif 420 421 # ifdef USE_INET6 422 # if __NetBSD_Version__ >= 105110000 423 if (ph_inet6 != NULL) 424 error = pfil_add_hook((void *)ipf_check_wrapper6, NULL, 425 PFIL_IN|PFIL_OUT, ph_inet6); 426 else 427 error = 0; 428 if (error) { 429 pfil_remove_hook((void *)ipf_check_wrapper6, NULL, 430 PFIL_IN|PFIL_OUT, ph_inet6); 431 ipfmain.ipf_interror = 130014; 432 goto pfil_error; 433 } 434 # else 435 error = pfil_add_hook((void *)ipf_check, PFIL_IN|PFIL_OUT, 436 &inetsw[ip_protox[IPPROTO_IPV6]].pr_pfh); 437 if (error) { 438 pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT, 439 &inetsw[ip_protox[IPPROTO_IP]].pr_pfh); 440 IPFERROR(130014); 441 goto pfil_error; 442 } 443 # endif 444 # endif 445 446 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET) 447 if (ph_ifsync != NULL) 448 (void) pfil_add_hook((void *)ipf_pfilsync, NULL, 449 PFIL_IFNET, ph_ifsync); 450 # endif 451 #endif 452 453 #if (__NetBSD_Version__ >= 499005500) 454 for (i = 0; i < IPL_LOGSIZE; i++) 455 selinit(&ipfmain.ipf_selwait[i]); 456 #else 457 bzero((char *)ipfmain.ipf_selwait, sizeof(ipfmain.ipf_selwait)); 458 #endif 459 ipf_savep = ipf_checkp; 460 ipf_checkp = ipf_check; 461 462 #ifdef INET 463 if (softc->ipf_control_forwarding & 1) 464 ipforwarding = 1; 465 #endif 466 467 ipid = 0; 468 469 SPL_X(s); 470 471 #if (__NetBSD_Version__ >= 104010000) 472 # if (__NetBSD_Version__ >= 499002000) 473 callout_init(&softc->ipf_slow_ch, 0); 474 # else 475 callout_init(&softc->ipf_slow_ch); 476 # endif 477 callout_reset(&softc->ipf_slow_ch, (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT, 478 ipf_timer_func, softc); 479 #else 480 timeout(ipf_timer_func, softc, (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT); 481 #endif 482 483 return 0; 484 485 #if __NetBSD_Version__ >= 105110000 486 pfil_error: 487 SPL_X(s); 488 ipf_fini_all(softc); 489 return error; 490 #endif 491 } 492 493 static void 494 ipf_timer_func(void *ptr) 495 { 496 ipf_main_softc_t *softc = ptr; 497 SPL_INT(s); 498 499 SPL_NET(s); 500 READ_ENTER(&softc->ipf_global); 501 502 if (softc->ipf_running > 0) 503 ipf_slowtimer(softc); 504 505 if (softc->ipf_running == -1 || softc->ipf_running == 1) { 506 #if NETBSD_GE_REV(104240000) 507 callout_reset(&softc->ipf_slow_ch, hz / 2, 508 ipf_timer_func, softc); 509 #else 510 timeout(ipf_timer_func, softc, 511 (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT); 512 #endif 513 } 514 RWLOCK_EXIT(&softc->ipf_global); 515 SPL_X(s); 516 } 517 518 519 /* 520 * Disable the filter by removing the hooks from the IP input/output 521 * stream. 522 */ 523 int 524 ipfdetach(ipf_main_softc_t *softc) 525 { 526 SPL_INT(s); 527 #if (__NetBSD_Version__ >= 499005500) 528 int i; 529 #endif 530 #if defined(NETBSD_PF) && (__NetBSD_Version__ >= 104200000) 531 int error = 0; 532 # if __NetBSD_Version__ >= 105150000 533 pfil_head_t *ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET); 534 # ifdef USE_INET6 535 pfil_head_t *ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6); 536 # endif 537 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET) 538 struct pfil_head *ph_ifsync = pfil_head_get(PFIL_TYPE_IFNET, 0); 539 # endif 540 # endif 541 #endif 542 543 SPL_NET(s); 544 545 #if (__NetBSD_Version__ >= 104010000) 546 if (softc->ipf_running > 0) 547 callout_stop(&softc->ipf_slow_ch); 548 #else 549 untimeout(ipf_slowtimer, NULL); 550 #endif /* NetBSD */ 551 552 ipf_checkp = ipf_savep; 553 (void) ipf_flush(softc, IPL_LOGIPF, FR_INQUE|FR_OUTQUE|FR_INACTIVE); 554 (void) ipf_flush(softc, IPL_LOGIPF, FR_INQUE|FR_OUTQUE); 555 556 #ifdef INET 557 if (softc->ipf_control_forwarding & 2) 558 ipforwarding = 0; 559 #endif 560 561 #ifdef NETBSD_PF 562 # if (__NetBSD_Version__ >= 104200000) 563 # if __NetBSD_Version__ >= 105110000 564 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET) 565 (void) pfil_remove_hook((void *)ipf_pfilsync, NULL, 566 PFIL_IFNET, ph_ifsync); 567 # endif 568 569 if (ph_inet != NULL) 570 error = pfil_remove_hook((void *)ipf_check_wrapper, NULL, 571 PFIL_IN|PFIL_OUT, ph_inet); 572 else 573 error = 0; 574 # else 575 error = pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT, 576 &inetsw[ip_protox[IPPROTO_IP]].pr_pfh); 577 # endif 578 if (error) { 579 SPL_X(s); 580 IPFERROR(130011); 581 return error; 582 } 583 # else 584 pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT); 585 # endif 586 # ifdef USE_INET6 587 # if __NetBSD_Version__ >= 105110000 588 if (ph_inet6 != NULL) 589 error = pfil_remove_hook((void *)ipf_check_wrapper6, NULL, 590 PFIL_IN|PFIL_OUT, ph_inet6); 591 else 592 error = 0; 593 # else 594 error = pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT, 595 &inetsw[ip_protox[IPPROTO_IPV6]].pr_pfh); 596 # endif 597 if (error) { 598 SPL_X(s); 599 IPFERROR(130012); 600 return error; 601 } 602 # endif 603 #endif 604 SPL_X(s); 605 606 #if (__NetBSD_Version__ >= 499005500) 607 for (i = 0; i < IPL_LOGSIZE; i++) 608 seldestroy(&ipfmain.ipf_selwait[i]); 609 #endif 610 611 ipf_fini_all(softc); 612 613 return 0; 614 } 615 616 617 /* 618 * Filter ioctl interface. 619 */ 620 int 621 ipfioctl(dev_t dev, u_long cmd, 622 #if (__NetBSD_Version__ >= 499001000) 623 void *data, 624 #else 625 caddr_t data, 626 #endif 627 int mode 628 #if (NetBSD >= 199511) 629 # if (__NetBSD_Version__ >= 399001400) 630 , struct lwp *p 631 # if (__NetBSD_Version__ >= 399002000) 632 # define UID(l) kauth_cred_getuid((l)->l_cred) 633 # else 634 # define UID(l) ((l)->l_proc->p_cred->p_ruid) 635 # endif 636 # else 637 , struct proc *p 638 # define UID(p) ((p)->p_cred->p_ruid) 639 # endif 640 #endif 641 ) 642 { 643 int error = 0, unit = 0; 644 SPL_INT(s); 645 646 #if (__NetBSD_Version__ >= 399002000) 647 if ((mode & FWRITE) && 648 kauth_authorize_network(p->l_cred, KAUTH_NETWORK_FIREWALL, 649 KAUTH_REQ_NETWORK_FIREWALL_FW, NULL, 650 NULL, NULL)) { 651 ipfmain.ipf_interror = 130005; 652 return EPERM; 653 } 654 #else 655 if ((securelevel >= 2) && (mode & FWRITE)) { 656 ipfmain.ipf_interror = 130001; 657 return EPERM; 658 } 659 #endif 660 661 unit = GET_MINOR(dev); 662 if ((IPL_LOGMAX < unit) || (unit < 0)) { 663 ipfmain.ipf_interror = 130002; 664 return ENXIO; 665 } 666 667 if (ipfmain.ipf_running <= 0) { 668 if (unit != IPL_LOGIPF && cmd != SIOCIPFINTERROR) { 669 ipfmain.ipf_interror = 130003; 670 return EIO; 671 } 672 if (cmd != SIOCIPFGETNEXT && cmd != SIOCIPFGET && 673 cmd != SIOCIPFSET && cmd != SIOCFRENB && 674 cmd != SIOCGETFS && cmd != SIOCGETFF && 675 cmd != SIOCIPFINTERROR) { 676 ipfmain.ipf_interror = 130004; 677 return EIO; 678 } 679 } 680 681 SPL_NET(s); 682 683 error = ipf_ioctlswitch(&ipfmain, unit, data, cmd, mode, UID(p), p); 684 if (error != -1) { 685 SPL_X(s); 686 return error; 687 } 688 689 SPL_X(s); 690 return error; 691 } 692 693 694 /* 695 * ipf_send_reset - this could conceivably be a call to tcp_respond(), but that 696 * requires a large amount of setting up and isn't any more efficient. 697 */ 698 int 699 ipf_send_reset(fr_info_t *fin) 700 { 701 struct tcphdr *tcp, *tcp2; 702 int tlen = 0, hlen; 703 struct mbuf *m; 704 #ifdef USE_INET6 705 ip6_t *ip6; 706 #endif 707 ip_t *ip; 708 709 tcp = fin->fin_dp; 710 if (tcp->th_flags & TH_RST) 711 return -1; /* feedback loop */ 712 713 if (ipf_checkl4sum(fin) == -1) 714 return -1; 715 716 tlen = fin->fin_dlen - (TCP_OFF(tcp) << 2) + 717 ((tcp->th_flags & TH_SYN) ? 1 : 0) + 718 ((tcp->th_flags & TH_FIN) ? 1 : 0); 719 720 #ifdef USE_INET6 721 hlen = (fin->fin_v == 6) ? sizeof(ip6_t) : sizeof(ip_t); 722 #else 723 hlen = sizeof(ip_t); 724 #endif 725 #ifdef MGETHDR 726 MGETHDR(m, M_DONTWAIT, MT_HEADER); 727 #else 728 MGET(m, M_DONTWAIT, MT_HEADER); 729 #endif 730 if (m == NULL) 731 return -1; 732 if (sizeof(*tcp2) + hlen > MHLEN) { 733 MCLGET(m, M_DONTWAIT); 734 if (m == NULL) 735 return -1; 736 if ((m->m_flags & M_EXT) == 0) { 737 FREE_MB_T(m); 738 return -1; 739 } 740 } 741 742 m->m_len = sizeof(*tcp2) + hlen; 743 m->m_data += max_linkhdr; 744 m->m_pkthdr.len = m->m_len; 745 m_reset_rcvif(m); 746 ip = mtod(m, struct ip *); 747 bzero((char *)ip, hlen); 748 #ifdef USE_INET6 749 ip6 = (ip6_t *)ip; 750 #endif 751 bzero((char *)ip, sizeof(*tcp2) + hlen); 752 tcp2 = (struct tcphdr *)((char *)ip + hlen); 753 tcp2->th_sport = tcp->th_dport; 754 tcp2->th_dport = tcp->th_sport; 755 756 if (tcp->th_flags & TH_ACK) { 757 tcp2->th_seq = tcp->th_ack; 758 tcp2->th_flags = TH_RST; 759 tcp2->th_ack = 0; 760 } else { 761 tcp2->th_seq = 0; 762 tcp2->th_ack = ntohl(tcp->th_seq); 763 tcp2->th_ack += tlen; 764 tcp2->th_ack = htonl(tcp2->th_ack); 765 tcp2->th_flags = TH_RST|TH_ACK; 766 } 767 tcp2->th_x2 = 0; 768 TCP_OFF_A(tcp2, sizeof(*tcp2) >> 2); 769 tcp2->th_win = tcp->th_win; 770 tcp2->th_sum = 0; 771 tcp2->th_urp = 0; 772 773 #ifdef USE_INET6 774 if (fin->fin_v == 6) { 775 ip6->ip6_flow = ((ip6_t *)fin->fin_ip)->ip6_flow; 776 ip6->ip6_plen = htons(sizeof(struct tcphdr)); 777 ip6->ip6_nxt = IPPROTO_TCP; 778 ip6->ip6_hlim = 0; 779 ip6->ip6_src = fin->fin_dst6.in6; 780 ip6->ip6_dst = fin->fin_src6.in6; 781 tcp2->th_sum = in6_cksum(m, IPPROTO_TCP, 782 sizeof(*ip6), sizeof(*tcp2)); 783 return ipf_send_ip(fin, m); 784 } 785 #endif 786 #ifdef INET 787 ip->ip_p = IPPROTO_TCP; 788 ip->ip_len = htons(sizeof(struct tcphdr)); 789 ip->ip_src.s_addr = fin->fin_daddr; 790 ip->ip_dst.s_addr = fin->fin_saddr; 791 tcp2->th_sum = in_cksum(m, hlen + sizeof(*tcp2)); 792 ip->ip_len = hlen + sizeof(*tcp2); 793 return ipf_send_ip(fin, m); 794 #else 795 return 0; 796 #endif 797 } 798 799 800 /* 801 * Expects ip_len to be in host byte order when called. 802 */ 803 static int 804 ipf_send_ip(fr_info_t *fin, mb_t *m) 805 { 806 fr_info_t fnew; 807 #ifdef INET 808 ip_t *oip; 809 #endif 810 ip_t *ip; 811 int hlen; 812 813 ip = mtod(m, ip_t *); 814 bzero((char *)&fnew, sizeof(fnew)); 815 fnew.fin_main_soft = fin->fin_main_soft; 816 817 IP_V_A(ip, fin->fin_v); 818 switch (fin->fin_v) 819 { 820 #ifdef INET 821 case 4 : 822 oip = fin->fin_ip; 823 hlen = sizeof(*oip); 824 fnew.fin_v = 4; 825 fnew.fin_p = ip->ip_p; 826 fnew.fin_plen = ntohs(ip->ip_len); 827 HTONS(ip->ip_len); 828 IP_HL_A(ip, sizeof(*oip) >> 2); 829 ip->ip_tos = oip->ip_tos; 830 ip->ip_id = ipf_nextipid(fin); 831 ip->ip_off = htons(ip_mtudisc ? IP_DF : 0); 832 ip->ip_ttl = ip_defttl; 833 ip->ip_sum = 0; 834 break; 835 #endif 836 #ifdef USE_INET6 837 case 6 : 838 { 839 ip6_t *ip6 = (ip6_t *)ip; 840 841 ip6->ip6_vfc = 0x60; 842 ip6->ip6_hlim = IPDEFTTL; 843 844 hlen = sizeof(*ip6); 845 fnew.fin_p = ip6->ip6_nxt; 846 fnew.fin_v = 6; 847 fnew.fin_plen = ntohs(ip6->ip6_plen) + hlen; 848 break; 849 } 850 #endif 851 default : 852 return EINVAL; 853 } 854 #ifdef KAME_IPSEC 855 m_reset_rcvif(m); 856 #endif 857 858 fnew.fin_ifp = fin->fin_ifp; 859 fnew.fin_flx = FI_NOCKSUM; 860 fnew.fin_m = m; 861 fnew.fin_ip = ip; 862 fnew.fin_mp = &m; 863 fnew.fin_hlen = hlen; 864 fnew.fin_dp = (char *)ip + hlen; 865 (void) ipf_makefrip(hlen, ip, &fnew); 866 867 return ipf_fastroute(m, &m, &fnew, NULL); 868 } 869 870 871 int 872 ipf_send_icmp_err(int type, fr_info_t *fin, int dst) 873 { 874 int err, hlen, xtra, iclen, ohlen, avail; 875 struct in_addr dst4; 876 struct icmp *icmp; 877 struct mbuf *m; 878 i6addr_t dst6; 879 void *ifp; 880 #ifdef USE_INET6 881 int code; 882 ip6_t *ip6; 883 #endif 884 ip_t *ip, *ip2; 885 886 if ((type < 0) || (type > ICMP_MAXTYPE)) 887 return -1; 888 889 #ifdef USE_INET6 890 code = fin->fin_icode; 891 if ((code < 0) || (code >= sizeof(icmptoicmp6unreach)/sizeof(int))) 892 return -1; 893 #endif 894 895 if (ipf_checkl4sum(fin) == -1) 896 return -1; 897 #ifdef MGETHDR 898 MGETHDR(m, M_DONTWAIT, MT_HEADER); 899 #else 900 MGET(m, M_DONTWAIT, MT_HEADER); 901 #endif 902 if (m == NULL) 903 return -1; 904 avail = MHLEN; 905 906 xtra = 0; 907 hlen = 0; 908 ohlen = 0; 909 dst4.s_addr = 0; 910 ifp = fin->fin_ifp; 911 if (fin->fin_v == 4) { 912 if ((fin->fin_p == IPPROTO_ICMP) && !(fin->fin_flx & FI_SHORT)) 913 switch (ntohs(fin->fin_data[0]) >> 8) 914 { 915 case ICMP_ECHO : 916 case ICMP_TSTAMP : 917 case ICMP_IREQ : 918 case ICMP_MASKREQ : 919 break; 920 default : 921 FREE_MB_T(m); 922 return 0; 923 } 924 925 if (dst == 0) { 926 if (ipf_ifpaddr(&ipfmain, 4, FRI_NORMAL, ifp, 927 &dst6, NULL) == -1) { 928 FREE_MB_T(m); 929 return -1; 930 } 931 dst4 = dst6.in4; 932 } else 933 dst4.s_addr = fin->fin_daddr; 934 935 hlen = sizeof(ip_t); 936 ohlen = fin->fin_hlen; 937 iclen = hlen + offsetof(struct icmp, icmp_ip) + ohlen; 938 if (fin->fin_hlen < fin->fin_plen) 939 xtra = MIN(fin->fin_dlen, 8); 940 else 941 xtra = 0; 942 } 943 944 #ifdef USE_INET6 945 else if (fin->fin_v == 6) { 946 hlen = sizeof(ip6_t); 947 ohlen = sizeof(ip6_t); 948 iclen = hlen + offsetof(struct icmp, icmp_ip) + ohlen; 949 type = icmptoicmp6types[type]; 950 if (type == ICMP6_DST_UNREACH) 951 code = icmptoicmp6unreach[code]; 952 953 if (iclen + max_linkhdr + fin->fin_plen > avail) { 954 MCLGET(m, M_DONTWAIT); 955 if (m == NULL) 956 return -1; 957 if ((m->m_flags & M_EXT) == 0) { 958 FREE_MB_T(m); 959 return -1; 960 } 961 avail = MCLBYTES; 962 } 963 xtra = MIN(fin->fin_plen, avail - iclen - max_linkhdr); 964 xtra = MIN(xtra, IPV6_MMTU - iclen); 965 if (dst == 0) { 966 if (ipf_ifpaddr(&ipfmain, 6, FRI_NORMAL, ifp, 967 &dst6, NULL) == -1) { 968 FREE_MB_T(m); 969 return -1; 970 } 971 } else 972 dst6 = fin->fin_dst6; 973 } 974 #endif 975 else { 976 FREE_MB_T(m); 977 return -1; 978 } 979 980 avail -= (max_linkhdr + iclen); 981 if (avail < 0) { 982 FREE_MB_T(m); 983 return -1; 984 } 985 if (xtra > avail) 986 xtra = avail; 987 iclen += xtra; 988 m->m_data += max_linkhdr; 989 m_reset_rcvif(m); 990 m->m_pkthdr.len = iclen; 991 m->m_len = iclen; 992 ip = mtod(m, ip_t *); 993 icmp = (struct icmp *)((char *)ip + hlen); 994 ip2 = (ip_t *)&icmp->icmp_ip; 995 996 icmp->icmp_type = type; 997 icmp->icmp_code = fin->fin_icode; 998 icmp->icmp_cksum = 0; 999 #ifdef icmp_nextmtu 1000 if (type == ICMP_UNREACH && fin->fin_icode == ICMP_UNREACH_NEEDFRAG) { 1001 if (fin->fin_mtu != 0) { 1002 icmp->icmp_nextmtu = htons(fin->fin_mtu); 1003 1004 } else if (ifp != NULL) { 1005 icmp->icmp_nextmtu = htons(GETIFMTU_4(ifp)); 1006 1007 } else { /* make up a number... */ 1008 icmp->icmp_nextmtu = htons(fin->fin_plen - 20); 1009 } 1010 } 1011 #endif 1012 1013 bcopy((char *)fin->fin_ip, (char *)ip2, ohlen); 1014 1015 #if defined(M_CSUM_IPv4) 1016 /* 1017 * Clear any in-bound checksum flags for this packet. 1018 */ 1019 m->m_pkthdr.csuminfo = 0; 1020 #endif /* __NetBSD__ && M_CSUM_IPv4 */ 1021 1022 #ifdef USE_INET6 1023 ip6 = (ip6_t *)ip; 1024 if (fin->fin_v == 6) { 1025 ip6->ip6_flow = ((ip6_t *)fin->fin_ip)->ip6_flow; 1026 ip6->ip6_plen = htons(iclen - hlen); 1027 ip6->ip6_nxt = IPPROTO_ICMPV6; 1028 ip6->ip6_hlim = 0; 1029 ip6->ip6_src = dst6.in6; 1030 ip6->ip6_dst = fin->fin_src6.in6; 1031 if (xtra > 0) 1032 bcopy((char *)fin->fin_ip + ohlen, 1033 (char *)&icmp->icmp_ip + ohlen, xtra); 1034 icmp->icmp_cksum = in6_cksum(m, IPPROTO_ICMPV6, 1035 sizeof(*ip6), iclen - hlen); 1036 } else 1037 #endif 1038 { 1039 ip->ip_p = IPPROTO_ICMP; 1040 ip->ip_src.s_addr = dst4.s_addr; 1041 ip->ip_dst.s_addr = fin->fin_saddr; 1042 1043 if (xtra > 0) 1044 bcopy((char *)fin->fin_ip + ohlen, 1045 (char *)&icmp->icmp_ip + ohlen, xtra); 1046 icmp->icmp_cksum = ipf_cksum((u_short *)icmp, 1047 sizeof(*icmp) + 8); 1048 ip->ip_len = iclen; 1049 ip->ip_p = IPPROTO_ICMP; 1050 } 1051 err = ipf_send_ip(fin, m); 1052 return err; 1053 } 1054 1055 1056 /* 1057 * m0 - pointer to mbuf where the IP packet starts 1058 * mpp - pointer to the mbuf pointer that is the start of the mbuf chain 1059 */ 1060 int 1061 ipf_fastroute(mb_t *m0, mb_t **mpp, fr_info_t *fin, frdest_t *fdp) 1062 { 1063 register struct ip *ip, *mhip; 1064 register struct mbuf *m = *mpp; 1065 register struct route *ro; 1066 int len, off, error = 0, hlen, code; 1067 struct ifnet *ifp, *sifp; 1068 ipf_main_softc_t *softc; 1069 #if __NetBSD_Version__ >= 499001100 1070 union { 1071 struct sockaddr dst; 1072 struct sockaddr_in dst4; 1073 } u; 1074 #else 1075 struct sockaddr_in *dst4; 1076 #endif 1077 struct sockaddr *dst; 1078 u_short ip_off, ip_len; 1079 struct route iproute; 1080 struct rtentry *rt; 1081 frdest_t node; 1082 frentry_t *fr; 1083 1084 if (fin->fin_v == 6) { 1085 #ifdef USE_INET6 1086 error = ipf_fastroute6(m0, mpp, fin, fdp); 1087 #else 1088 error = EPROTONOSUPPORT; 1089 #endif 1090 if ((error != 0) && (*mpp != NULL)) 1091 FREE_MB_T(*mpp); 1092 return error; 1093 } 1094 #ifndef INET 1095 FREE_MB_T(*mpp); 1096 return EPROTONOSUPPORT; 1097 #else 1098 1099 hlen = fin->fin_hlen; 1100 ip = mtod(m0, struct ip *); 1101 softc = fin->fin_main_soft; 1102 rt = NULL; 1103 ifp = NULL; 1104 1105 # if defined(M_CSUM_IPv4) 1106 /* 1107 * Clear any in-bound checksum flags for this packet. 1108 */ 1109 m0->m_pkthdr.csuminfo = 0; 1110 # endif /* __NetBSD__ && M_CSUM_IPv4 */ 1111 1112 /* 1113 * Route packet. 1114 */ 1115 ro = &iproute; 1116 memset(ro, 0, sizeof(*ro)); 1117 fr = fin->fin_fr; 1118 1119 if ((fr != NULL) && !(fr->fr_flags & FR_KEEPSTATE) && (fdp != NULL) && 1120 (fdp->fd_type == FRD_DSTLIST)) { 1121 if (ipf_dstlist_select_node(fin, fdp->fd_ptr, NULL, &node) == 0) 1122 fdp = &node; 1123 } 1124 if (fdp != NULL) 1125 ifp = fdp->fd_ptr; 1126 else 1127 ifp = fin->fin_ifp; 1128 1129 if ((ifp == NULL) && ((fr == NULL) || !(fr->fr_flags & FR_FASTROUTE))) { 1130 error = -2; 1131 goto bad; 1132 } 1133 1134 # if __NetBSD_Version__ >= 499001100 1135 if ((fdp != NULL) && (fdp->fd_ip.s_addr != 0)) 1136 sockaddr_in_init(&u.dst4, &fdp->fd_ip, 0); 1137 else 1138 sockaddr_in_init(&u.dst4, &ip->ip_dst, 0); 1139 dst = &u.dst; 1140 rtcache_setdst(ro, dst); 1141 rt = rtcache_init(ro); 1142 # else 1143 dst4 = (struct sockaddr_in *)&ro->ro_dst; 1144 dst = (struct sockaddr *)dst4; 1145 dst4->sin_family = AF_INET; 1146 dst4->sin_addr = ip->ip_dst; 1147 1148 if ((fdp != NULL) && (fdp->fd_ip.s_addr != 0)) 1149 dst4->sin_addr = fdp->fd_ip; 1150 1151 dst4->sin_len = sizeof(*dst); 1152 rtalloc(ro); 1153 rt = ro->ro_rt; 1154 # endif 1155 if ((ifp == NULL) && (rt != NULL)) 1156 ifp = rt->rt_ifp; 1157 if ((rt == NULL) || (ifp == NULL)) { 1158 #ifdef INET 1159 if (in_localaddr(ip->ip_dst)) 1160 error = EHOSTUNREACH; 1161 else 1162 #endif 1163 error = ENETUNREACH; 1164 goto bad; 1165 } 1166 1167 1168 if (rt->rt_flags & RTF_GATEWAY) 1169 dst = rt->rt_gateway; 1170 1171 rt->rt_use++; 1172 1173 /* 1174 * For input packets which are being "fastrouted", they won't 1175 * go back through output filtering and miss their chance to get 1176 * NAT'd and counted. Duplicated packets aren't considered to be 1177 * part of the normal packet stream, so do not NAT them or pass 1178 * them through stateful checking, etc. 1179 */ 1180 if ((fdp != &fr->fr_dif) && (fin->fin_out == 0)) { 1181 sifp = fin->fin_ifp; 1182 fin->fin_ifp = ifp; 1183 fin->fin_out = 1; 1184 (void) ipf_acctpkt(fin, NULL); 1185 fin->fin_fr = NULL; 1186 if (!fr || !(fr->fr_flags & FR_RETMASK)) { 1187 u_32_t pass; 1188 1189 (void) ipf_state_check(fin, &pass); 1190 } 1191 1192 switch (ipf_nat_checkout(fin, NULL)) 1193 { 1194 case 0 : 1195 break; 1196 case 1 : 1197 ip->ip_sum = 0; 1198 break; 1199 case -1 : 1200 error = -1; 1201 goto bad; 1202 break; 1203 } 1204 1205 fin->fin_ifp = sifp; 1206 fin->fin_out = 0; 1207 } else 1208 ip->ip_sum = 0; 1209 /* 1210 * If small enough for interface, can just send directly. 1211 */ 1212 m_set_rcvif(m, ifp); 1213 1214 ip_len = ntohs(ip->ip_len); 1215 if (ip_len <= ifp->if_mtu) { 1216 # if defined(M_CSUM_IPv4) 1217 # if (__NetBSD_Version__ >= 105009999) 1218 if (ifp->if_csum_flags_tx & M_CSUM_IPv4) 1219 m->m_pkthdr.csuminfo |= M_CSUM_IPv4; 1220 # else 1221 if (ifp->if_capabilities & IFCAP_CSUM_IPv4) 1222 m->m_pkthdr.csuminfo |= M_CSUM_IPv4; 1223 # endif /* (__NetBSD_Version__ >= 105009999) */ 1224 else if (ip->ip_sum == 0) 1225 ip->ip_sum = in_cksum(m, hlen); 1226 # else 1227 if (!ip->ip_sum) 1228 ip->ip_sum = in_cksum(m, hlen); 1229 # endif /* M_CSUM_IPv4 */ 1230 1231 error = if_output_lock(ifp, ifp, m, dst, rt); 1232 goto done; 1233 } 1234 1235 /* 1236 * Too large for interface; fragment if possible. 1237 * Must be able to put at least 8 bytes per fragment. 1238 */ 1239 ip_off = ntohs(ip->ip_off); 1240 if (ip_off & IP_DF) { 1241 error = EMSGSIZE; 1242 goto bad; 1243 } 1244 len = (ifp->if_mtu - hlen) &~ 7; 1245 if (len < 8) { 1246 error = EMSGSIZE; 1247 goto bad; 1248 } 1249 1250 { 1251 int mhlen, firstlen = len; 1252 struct mbuf **mnext = &m->m_act; 1253 1254 /* 1255 * Loop through length of segment after first fragment, 1256 * make new header and copy data of each part and link onto chain. 1257 */ 1258 m0 = m; 1259 mhlen = sizeof (struct ip); 1260 for (off = hlen + len; off < ip_len; off += len) { 1261 # ifdef MGETHDR 1262 MGETHDR(m, M_DONTWAIT, MT_HEADER); 1263 # else 1264 MGET(m, M_DONTWAIT, MT_HEADER); 1265 # endif 1266 if (m == 0) { 1267 m = m0; 1268 error = ENOBUFS; 1269 goto bad; 1270 } 1271 m->m_data += max_linkhdr; 1272 mhip = mtod(m, struct ip *); 1273 bcopy((char *)ip, (char *)mhip, sizeof(*ip)); 1274 #ifdef INET 1275 if (hlen > sizeof (struct ip)) { 1276 mhlen = ip_optcopy(ip, mhip) + sizeof (struct ip); 1277 IP_HL_A(mhip, mhlen >> 2); 1278 } 1279 #endif 1280 m->m_len = mhlen; 1281 mhip->ip_off = ((off - hlen) >> 3) + ip_off; 1282 if (off + len >= ip_len) 1283 len = ip_len - off; 1284 else 1285 mhip->ip_off |= IP_MF; 1286 mhip->ip_len = htons((u_short)(len + mhlen)); 1287 m->m_next = m_copy(m0, off, len); 1288 if (m->m_next == 0) { 1289 error = ENOBUFS; /* ??? */ 1290 goto sendorfree; 1291 } 1292 m->m_pkthdr.len = mhlen + len; 1293 m_reset_rcvif(m); 1294 mhip->ip_off = htons((u_short)mhip->ip_off); 1295 mhip->ip_sum = 0; 1296 #ifdef INET 1297 mhip->ip_sum = in_cksum(m, mhlen); 1298 #endif 1299 *mnext = m; 1300 mnext = &m->m_act; 1301 } 1302 /* 1303 * Update first fragment by trimming what's been copied out 1304 * and updating header, then send each fragment (in order). 1305 */ 1306 m_adj(m0, hlen + firstlen - ip_len); 1307 ip->ip_len = htons((u_short)(hlen + firstlen)); 1308 ip->ip_off = htons((u_short)IP_MF); 1309 ip->ip_sum = 0; 1310 #ifdef INET 1311 ip->ip_sum = in_cksum(m0, hlen); 1312 #endif 1313 sendorfree: 1314 for (m = m0; m; m = m0) { 1315 m0 = m->m_act; 1316 m->m_act = 0; 1317 if (error == 0) { 1318 KERNEL_LOCK(1, NULL); 1319 error = (*ifp->if_output)(ifp, m, dst, rt); 1320 KERNEL_UNLOCK_ONE(NULL); 1321 } else { 1322 FREE_MB_T(m); 1323 } 1324 } 1325 } 1326 done: 1327 if (!error) 1328 softc->ipf_frouteok[0]++; 1329 else 1330 softc->ipf_frouteok[1]++; 1331 1332 # if __NetBSD_Version__ >= 499001100 1333 rtcache_unref(rt, ro); 1334 rtcache_free(ro); 1335 # else 1336 if (rt) { 1337 RTFREE(rt); 1338 } 1339 # endif 1340 return error; 1341 bad: 1342 if (error == EMSGSIZE) { 1343 sifp = fin->fin_ifp; 1344 code = fin->fin_icode; 1345 fin->fin_icode = ICMP_UNREACH_NEEDFRAG; 1346 fin->fin_ifp = ifp; 1347 (void) ipf_send_icmp_err(ICMP_UNREACH, fin, 1); 1348 fin->fin_ifp = sifp; 1349 fin->fin_icode = code; 1350 } 1351 FREE_MB_T(m); 1352 goto done; 1353 #endif /* INET */ 1354 } 1355 1356 1357 #if defined(USE_INET6) 1358 /* 1359 * This is the IPv6 specific fastroute code. It doesn't clean up the mbuf's 1360 * or ensure that it is an IPv6 packet that is being forwarded, those are 1361 * expected to be done by the called (ipf_fastroute). 1362 */ 1363 static int 1364 ipf_fastroute6(struct mbuf *m0, struct mbuf **mpp, fr_info_t *fin, 1365 frdest_t *fdp) 1366 { 1367 # if __NetBSD_Version__ >= 499001100 1368 struct route ip6route; 1369 const struct sockaddr *dst; 1370 union { 1371 struct sockaddr dst; 1372 struct sockaddr_in6 dst6; 1373 } u; 1374 struct route *ro; 1375 # else 1376 struct route_in6 ip6route; 1377 struct sockaddr_in6 *dst6; 1378 struct route_in6 *ro; 1379 # endif 1380 struct rtentry *rt; 1381 struct ifnet *ifp; 1382 u_long mtu; 1383 int error; 1384 1385 error = 0; 1386 ro = &ip6route; 1387 1388 if (fdp != NULL) 1389 ifp = fdp->fd_ptr; 1390 else 1391 ifp = fin->fin_ifp; 1392 memset(ro, 0, sizeof(*ro)); 1393 # if __NetBSD_Version__ >= 499001100 1394 if (fdp != NULL && IP6_NOTZERO(&fdp->fd_ip6)) 1395 sockaddr_in6_init(&u.dst6, &fdp->fd_ip6.in6, 0, 0, 0); 1396 else 1397 sockaddr_in6_init(&u.dst6, &fin->fin_fi.fi_dst.in6, 0, 0, 0); 1398 dst = &u.dst; 1399 rtcache_setdst(ro, dst); 1400 1401 rt = rtcache_init(ro); 1402 if ((ifp == NULL) && (rt != NULL)) 1403 ifp = rt->rt_ifp; 1404 # else 1405 dst6 = (struct sockaddr_in6 *)&ro->ro_dst; 1406 dst6->sin6_family = AF_INET6; 1407 dst6->sin6_len = sizeof(struct sockaddr_in6); 1408 dst6->sin6_addr = fin->fin_fi.fi_dst.in6; 1409 1410 if (fdp != NULL) { 1411 if (IP6_NOTZERO(&fdp->fd_ip6)) 1412 dst6->sin6_addr = fdp->fd_ip6.in6; 1413 } 1414 1415 rtalloc((struct route *)ro); 1416 1417 if ((ifp == NULL) && (ro->ro_rt != NULL)) 1418 ifp = ro->ro_rt->rt_ifp; 1419 rt = ro->ro_rt; 1420 # endif 1421 if ((rt == NULL) || (ifp == NULL)) { 1422 1423 error = EHOSTUNREACH; 1424 goto bad; 1425 } 1426 1427 /* KAME */ 1428 # if __NetBSD_Version__ >= 499001100 1429 if (IN6_IS_ADDR_LINKLOCAL(&u.dst6.sin6_addr)) 1430 u.dst6.sin6_addr.s6_addr16[1] = htons(ifp->if_index); 1431 # else 1432 if (IN6_IS_ADDR_LINKLOCAL(&dst6->sin6_addr)) 1433 dst6->sin6_addr.s6_addr16[1] = htons(ifp->if_index); 1434 # endif 1435 1436 { 1437 # if (__NetBSD_Version__ >= 106010000) && !defined(IN6_LINKMTU) 1438 struct in6_ifextra *ife; 1439 # endif 1440 if (rt->rt_flags & RTF_GATEWAY) 1441 # if __NetBSD_Version__ >= 499001100 1442 dst = rt->rt_gateway; 1443 # else 1444 dst6 = (struct sockaddr_in6 *)rt->rt_gateway; 1445 # endif 1446 rt->rt_use++; 1447 1448 /* Determine path MTU. */ 1449 # if (__NetBSD_Version__ <= 106009999) 1450 mtu = nd_ifinfo[ifp->if_index].linkmtu; 1451 # else 1452 # ifdef IN6_LINKMTU 1453 mtu = IN6_LINKMTU(ifp); 1454 # else 1455 ife = (struct in6_ifextra *)(ifp)->if_afdata[AF_INET6]; 1456 mtu = ife->nd_ifinfo[ifp->if_index].linkmtu; 1457 # endif 1458 # endif 1459 if ((error == 0) && (m0->m_pkthdr.len <= mtu)) { 1460 # if __NetBSD_Version__ >= 499001100 1461 error = nd6_output(ifp, ifp, m0, satocsin6(dst), rt); 1462 # else 1463 error = nd6_output(ifp, ifp, m0, dst6, rt); 1464 # endif 1465 } else { 1466 error = EMSGSIZE; 1467 } 1468 } 1469 bad: 1470 # if __NetBSD_Version__ >= 499001100 1471 rtcache_unref(rt, ro); 1472 rtcache_free(ro); 1473 # else 1474 if (ro->ro_rt != NULL) { 1475 RTFREE(((struct route *)ro)->ro_rt); 1476 } 1477 # endif 1478 return error; 1479 } 1480 #endif /* INET6 */ 1481 1482 1483 int 1484 ipf_verifysrc(fr_info_t *fin) 1485 { 1486 #if __NetBSD_Version__ >= 499001100 1487 union { 1488 struct sockaddr dst; 1489 struct sockaddr_in dst4; 1490 } u; 1491 struct rtentry *rt; 1492 #else 1493 struct sockaddr_in *dst; 1494 #endif 1495 struct route iproute; 1496 int rc; 1497 1498 #if __NetBSD_Version__ >= 499001100 1499 sockaddr_in_init(&u.dst4, &fin->fin_src, 0); 1500 rtcache_setdst(&iproute, &u.dst); 1501 rt = rtcache_init(&iproute); 1502 if (rt == NULL) 1503 rc = 0; 1504 else 1505 rc = (fin->fin_ifp == rt->rt_ifp); 1506 rtcache_unref(rt, &iproute); 1507 rtcache_free(&iproute); 1508 #else 1509 dst = (struct sockaddr_in *)&iproute.ro_dst; 1510 dst->sin_len = sizeof(*dst); 1511 dst->sin_family = AF_INET; 1512 dst->sin_addr = fin->fin_src; 1513 rtalloc(&iproute); 1514 if (iproute.ro_rt == NULL) 1515 return 0; 1516 rc = (fin->fin_ifp == iproute.ro_rt->rt_ifp); 1517 RTFREE(iproute.ro_rt); 1518 #endif 1519 return rc; 1520 } 1521 1522 1523 /* 1524 * return the first IP Address associated with an interface 1525 */ 1526 int 1527 ipf_ifpaddr(ipf_main_softc_t *softc, int v, int atype, void *ifptr, 1528 i6addr_t *inp, i6addr_t *inpmask) 1529 { 1530 #ifdef USE_INET6 1531 struct in6_addr *inp6 = NULL; 1532 #endif 1533 struct sockaddr *sock, *mask; 1534 struct sockaddr_in *sin; 1535 struct ifaddr *ifa; 1536 struct ifnet *ifp; 1537 1538 if ((ifptr == NULL) || (ifptr == (void *)-1)) 1539 return -1; 1540 1541 ifp = ifptr; 1542 mask = NULL; 1543 1544 if (v == 4) 1545 inp->in4.s_addr = 0; 1546 #ifdef USE_INET6 1547 else if (v == 6) 1548 bzero((char *)inp, sizeof(*inp)); 1549 #endif 1550 1551 ifa = IFADDR_READER_FIRST(ifp); 1552 sock = ifa ? ifa->ifa_addr : NULL; 1553 while (sock != NULL && ifa != NULL) { 1554 sin = (struct sockaddr_in *)sock; 1555 if ((v == 4) && (sin->sin_family == AF_INET)) 1556 break; 1557 #ifdef USE_INET6 1558 if ((v == 6) && (sin->sin_family == AF_INET6)) { 1559 inp6 = &((struct sockaddr_in6 *)sin)->sin6_addr; 1560 if (!IN6_IS_ADDR_LINKLOCAL(inp6) && 1561 !IN6_IS_ADDR_LOOPBACK(inp6)) 1562 break; 1563 } 1564 #endif 1565 ifa = IFADDR_READER_NEXT(ifa); 1566 if (ifa != NULL) 1567 sock = ifa->ifa_addr; 1568 } 1569 if (ifa == NULL || sock == NULL) 1570 return -1; 1571 1572 mask = ifa->ifa_netmask; 1573 if (atype == FRI_BROADCAST) 1574 sock = ifa->ifa_broadaddr; 1575 else if (atype == FRI_PEERADDR) 1576 sock = ifa->ifa_dstaddr; 1577 1578 #ifdef USE_INET6 1579 if (v == 6) 1580 return ipf_ifpfillv6addr(atype, (struct sockaddr_in6 *)sock, 1581 (struct sockaddr_in6 *)mask, 1582 inp, inpmask); 1583 #endif 1584 return ipf_ifpfillv4addr(atype, (struct sockaddr_in *)sock, 1585 (struct sockaddr_in *)mask, 1586 &inp->in4, &inpmask->in4); 1587 } 1588 1589 1590 u_32_t 1591 ipf_newisn(fr_info_t *fin) 1592 { 1593 #if __NetBSD_Version__ >= 105190000 /* 1.5T */ 1594 size_t asz; 1595 1596 if (fin->fin_v == 4) 1597 asz = sizeof(struct in_addr); 1598 else if (fin->fin_v == 6) 1599 asz = sizeof(fin->fin_src); 1600 else /* XXX: no way to return error */ 1601 return 0; 1602 #ifdef INET 1603 return tcp_new_iss1((void *)&fin->fin_src, (void *)&fin->fin_dst, 1604 fin->fin_sport, fin->fin_dport, asz, 0); 1605 #else 1606 return ENOSYS; 1607 #endif 1608 #else 1609 static int iss_seq_off = 0; 1610 u_char hash[16]; 1611 u_32_t newiss; 1612 MD5_CTX ctx; 1613 1614 /* 1615 * Compute the base value of the ISS. It is a hash 1616 * of (saddr, sport, daddr, dport, secret). 1617 */ 1618 MD5Init(&ctx); 1619 1620 MD5Update(&ctx, (u_char *) &fin->fin_fi.fi_src, 1621 sizeof(fin->fin_fi.fi_src)); 1622 MD5Update(&ctx, (u_char *) &fin->fin_fi.fi_dst, 1623 sizeof(fin->fin_fi.fi_dst)); 1624 MD5Update(&ctx, (u_char *) &fin->fin_dat, sizeof(fin->fin_dat)); 1625 1626 MD5Update(&ctx, ipf_iss_secret, sizeof(ipf_iss_secret)); 1627 1628 MD5Final(hash, &ctx); 1629 1630 memcpy(&newiss, hash, sizeof(newiss)); 1631 1632 /* 1633 * Now increment our "timer", and add it in to 1634 * the computed value. 1635 * 1636 * XXX Use `addin'? 1637 * XXX TCP_ISSINCR too large to use? 1638 */ 1639 iss_seq_off += 0x00010000; 1640 newiss += iss_seq_off; 1641 return newiss; 1642 #endif 1643 } 1644 1645 1646 /* ------------------------------------------------------------------------ */ 1647 /* Function: ipf_nextipid */ 1648 /* Returns: int - 0 == success, -1 == error (packet should be droppped) */ 1649 /* Parameters: fin(I) - pointer to packet information */ 1650 /* */ 1651 /* Returns the next IPv4 ID to use for this packet. */ 1652 /* ------------------------------------------------------------------------ */ 1653 u_short 1654 ipf_nextipid(fr_info_t *fin) 1655 { 1656 #ifdef USE_MUTEXES 1657 ipf_main_softc_t *softc = fin->fin_main_soft; 1658 #endif 1659 u_short id; 1660 1661 MUTEX_ENTER(&softc->ipf_rw); 1662 id = ipid++; 1663 MUTEX_EXIT(&softc->ipf_rw); 1664 1665 return id; 1666 } 1667 1668 1669 EXTERN_INLINE int 1670 ipf_checkv4sum(fr_info_t *fin) 1671 { 1672 #ifdef M_CSUM_TCP_UDP_BAD 1673 int manual, pflag, cflags, active; 1674 mb_t *m; 1675 1676 if ((fin->fin_flx & FI_NOCKSUM) != 0) 1677 return 0; 1678 1679 if ((fin->fin_flx & FI_SHORT) != 0) 1680 return 1; 1681 1682 if (fin->fin_cksum != FI_CK_NEEDED) 1683 return (fin->fin_cksum > FI_CK_NEEDED) ? 0 : -1; 1684 1685 manual = 0; 1686 m = fin->fin_m; 1687 if (m == NULL) { 1688 manual = 1; 1689 goto skipauto; 1690 } 1691 1692 switch (fin->fin_p) 1693 { 1694 case IPPROTO_UDP : 1695 pflag = M_CSUM_UDPv4; 1696 break; 1697 case IPPROTO_TCP : 1698 pflag = M_CSUM_TCPv4; 1699 break; 1700 default : 1701 pflag = 0; 1702 manual = 1; 1703 break; 1704 } 1705 1706 active = ((struct ifnet *)fin->fin_ifp)->if_csum_flags_rx & pflag; 1707 active |= M_CSUM_TCP_UDP_BAD | M_CSUM_DATA; 1708 cflags = m->m_pkthdr.csum_flags & active; 1709 1710 if (pflag != 0) { 1711 if (cflags == (pflag | M_CSUM_TCP_UDP_BAD)) { 1712 fin->fin_flx |= FI_BAD; 1713 fin->fin_cksum = FI_CK_BAD; 1714 } else if (cflags == (pflag | M_CSUM_DATA)) { 1715 if ((m->m_pkthdr.csum_data ^ 0xffff) != 0) { 1716 fin->fin_flx |= FI_BAD; 1717 fin->fin_cksum = FI_CK_BAD; 1718 } else { 1719 fin->fin_cksum = FI_CK_SUMOK; 1720 } 1721 } else if (cflags == pflag) { 1722 fin->fin_cksum = FI_CK_SUMOK; 1723 } else { 1724 manual = 1; 1725 } 1726 } 1727 skipauto: 1728 if (manual != 0) { 1729 if (ipf_checkl4sum(fin) == -1) { 1730 fin->fin_flx |= FI_BAD; 1731 return -1; 1732 } 1733 } 1734 #else 1735 if (ipf_checkl4sum(fin) == -1) { 1736 fin->fin_flx |= FI_BAD; 1737 return -1; 1738 } 1739 #endif 1740 return 0; 1741 } 1742 1743 1744 #ifdef USE_INET6 1745 EXTERN_INLINE int 1746 ipf_checkv6sum(fr_info_t *fin) 1747 { 1748 # ifdef M_CSUM_TCP_UDP_BAD 1749 int manual, pflag, cflags, active; 1750 mb_t *m; 1751 1752 if ((fin->fin_flx & FI_NOCKSUM) != 0) 1753 return 0; 1754 1755 if ((fin->fin_flx & FI_SHORT) != 0) 1756 return 1; 1757 1758 if (fin->fin_cksum != FI_CK_SUMOK) 1759 return (fin->fin_cksum > FI_CK_NEEDED) ? 0 : -1; 1760 1761 1762 manual = 0; 1763 m = fin->fin_m; 1764 1765 switch (fin->fin_p) 1766 { 1767 case IPPROTO_UDP : 1768 pflag = M_CSUM_UDPv6; 1769 break; 1770 case IPPROTO_TCP : 1771 pflag = M_CSUM_TCPv6; 1772 break; 1773 default : 1774 pflag = 0; 1775 manual = 1; 1776 break; 1777 } 1778 1779 active = ((struct ifnet *)fin->fin_ifp)->if_csum_flags_rx & pflag; 1780 active |= M_CSUM_TCP_UDP_BAD | M_CSUM_DATA; 1781 cflags = m->m_pkthdr.csum_flags & active; 1782 1783 if (pflag != 0) { 1784 if (cflags == (pflag | M_CSUM_TCP_UDP_BAD)) { 1785 fin->fin_flx |= FI_BAD; 1786 } else if (cflags == (pflag | M_CSUM_DATA)) { 1787 if ((m->m_pkthdr.csum_data ^ 0xffff) != 0) 1788 fin->fin_flx |= FI_BAD; 1789 } else if (cflags == pflag) { 1790 ; 1791 } else { 1792 manual = 1; 1793 } 1794 } 1795 if (manual != 0) { 1796 if (ipf_checkl4sum(fin) == -1) { 1797 fin->fin_flx |= FI_BAD; 1798 return -1; 1799 } 1800 } 1801 # else 1802 if (ipf_checkl4sum(fin) == -1) { 1803 fin->fin_flx |= FI_BAD; 1804 return -1; 1805 } 1806 # endif 1807 return 0; 1808 } 1809 #endif /* USE_INET6 */ 1810 1811 1812 size_t 1813 mbufchainlen(struct mbuf *m0) 1814 { 1815 size_t len; 1816 1817 if ((m0->m_flags & M_PKTHDR) != 0) { 1818 len = m0->m_pkthdr.len; 1819 } else { 1820 struct mbuf *m; 1821 1822 for (m = m0, len = 0; m != NULL; m = m->m_next) 1823 len += m->m_len; 1824 } 1825 return len; 1826 } 1827 1828 1829 /* ------------------------------------------------------------------------ */ 1830 /* Function: ipf_pullup */ 1831 /* Returns: NULL == pullup failed, else pointer to protocol header */ 1832 /* Parameters: xmin(I)- pointer to buffer where data packet starts */ 1833 /* fin(I) - pointer to packet information */ 1834 /* len(I) - number of bytes to pullup */ 1835 /* */ 1836 /* Attempt to move at least len bytes (from the start of the buffer) into a */ 1837 /* single buffer for ease of access. Operating system native functions are */ 1838 /* used to manage buffers - if necessary. If the entire packet ends up in */ 1839 /* a single buffer, set the FI_COALESCE flag even though ipf_coalesce() has */ 1840 /* not been called. Both fin_ip and fin_dp are updated before exiting _IF_ */ 1841 /* and ONLY if the pullup succeeds. */ 1842 /* */ 1843 /* We assume that 'xmin' is a pointer to a buffer that is part of the chain */ 1844 /* of buffers that starts at *fin->fin_mp. */ 1845 /* ------------------------------------------------------------------------ */ 1846 void * 1847 ipf_pullup(mb_t *xmin, fr_info_t *fin, int len) 1848 { 1849 int dpoff, ipoff; 1850 mb_t *m = xmin; 1851 char *ip; 1852 1853 if (m == NULL) 1854 return NULL; 1855 1856 ip = (char *)fin->fin_ip; 1857 if ((fin->fin_flx & FI_COALESCE) != 0) 1858 return ip; 1859 1860 ipoff = fin->fin_ipoff; 1861 if (fin->fin_dp != NULL) 1862 dpoff = (char *)fin->fin_dp - (char *)ip; 1863 else 1864 dpoff = 0; 1865 1866 if (M_LEN(m) < len) { 1867 mb_t *n = *fin->fin_mp; 1868 /* 1869 * Assume that M_PKTHDR is set and just work with what is left 1870 * rather than check.. 1871 * Should not make any real difference, anyway. 1872 */ 1873 if (m != n) { 1874 /* 1875 * Record the mbuf that points to the mbuf that we're 1876 * about to go to work on so that we can update the 1877 * m_next appropriately later. 1878 */ 1879 for (; n->m_next != m; n = n->m_next) 1880 ; 1881 } else { 1882 n = NULL; 1883 } 1884 1885 #ifdef MHLEN 1886 if (len > MHLEN) 1887 #else 1888 if (len > MLEN) 1889 #endif 1890 { 1891 #ifdef HAVE_M_PULLDOWN 1892 if (m_pulldown(m, 0, len, NULL) == NULL) 1893 m = NULL; 1894 #else 1895 FREE_MB_T(*fin->fin_mp); 1896 m = NULL; 1897 n = NULL; 1898 #endif 1899 } else 1900 { 1901 m = m_pullup(m, len); 1902 } 1903 if (n != NULL) 1904 n->m_next = m; 1905 if (m == NULL) { 1906 /* 1907 * When n is non-NULL, it indicates that m pointed to 1908 * a sub-chain (tail) of the mbuf and that the head 1909 * of this chain has not yet been free'd. 1910 */ 1911 if (n != NULL) { 1912 FREE_MB_T(*fin->fin_mp); 1913 } 1914 1915 *fin->fin_mp = NULL; 1916 fin->fin_m = NULL; 1917 return NULL; 1918 } 1919 1920 if (n == NULL) 1921 *fin->fin_mp = m; 1922 1923 while (M_LEN(m) == 0) { 1924 m = m->m_next; 1925 } 1926 fin->fin_m = m; 1927 ip = MTOD(m, char *) + ipoff; 1928 1929 fin->fin_ip = (ip_t *)ip; 1930 if (fin->fin_dp != NULL) 1931 fin->fin_dp = (char *)fin->fin_ip + dpoff; 1932 if (fin->fin_fraghdr != NULL) 1933 fin->fin_fraghdr = (char *)ip + 1934 ((char *)fin->fin_fraghdr - 1935 (char *)fin->fin_ip); 1936 } 1937 1938 if (len == fin->fin_plen) 1939 fin->fin_flx |= FI_COALESCE; 1940 return ip; 1941 } 1942 1943 1944 int 1945 ipf_inject(fr_info_t *fin, mb_t *m) 1946 { 1947 int error; 1948 1949 if (fin->fin_out == 0) { 1950 if (__predict_false(!pktq_enqueue(ip_pktq, m, 0))) { 1951 FREE_MB_T(m); 1952 error = ENOBUFS; 1953 } else { 1954 error = 0; 1955 } 1956 } else { 1957 error = ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL); 1958 } 1959 return error; 1960 } 1961 1962 1963 u_32_t 1964 ipf_random(void) 1965 { 1966 int number; 1967 1968 #ifdef _CPRNG_H 1969 number = cprng_fast32(); 1970 #else 1971 number = arc4random(); 1972 #endif 1973 return number; 1974 } 1975 1976 1977 /* 1978 * routines below for saving IP headers to buffer 1979 */ 1980 static int ipfopen(dev_t dev, int flags 1981 #if (NetBSD >= 199511) 1982 , int devtype, PROC_T *p 1983 #endif 1984 ) 1985 { 1986 u_int unit = GET_MINOR(dev); 1987 int error; 1988 1989 if (IPL_LOGMAX < unit) { 1990 error = ENXIO; 1991 } else { 1992 switch (unit) 1993 { 1994 case IPL_LOGIPF : 1995 case IPL_LOGNAT : 1996 case IPL_LOGSTATE : 1997 case IPL_LOGAUTH : 1998 case IPL_LOGLOOKUP : 1999 case IPL_LOGSYNC : 2000 #ifdef IPFILTER_SCAN 2001 case IPL_LOGSCAN : 2002 #endif 2003 error = 0; 2004 break; 2005 default : 2006 error = ENXIO; 2007 break; 2008 } 2009 } 2010 #if (__NetBSD_Version__ >= 799003000) 2011 if (error == 0) { 2012 mutex_enter(&ipf_ref_mutex); 2013 ipf_active = 1; 2014 mutex_exit(&ipf_ref_mutex); 2015 } 2016 #endif 2017 return error; 2018 } 2019 2020 2021 static int ipfclose(dev_t dev, int flags 2022 #if (NetBSD >= 199511) 2023 , int devtype, PROC_T *p 2024 #endif 2025 ) 2026 { 2027 u_int unit = GET_MINOR(dev); 2028 2029 if (IPL_LOGMAX < unit) 2030 return ENXIO; 2031 else { 2032 #if (__NetBSD_Version__ >= 799003000) 2033 mutex_enter(&ipf_ref_mutex); 2034 ipf_active = 0; 2035 mutex_exit(&ipf_ref_mutex); 2036 #endif 2037 return 0; 2038 } 2039 } 2040 2041 /* 2042 * ipfread/ipflog 2043 * both of these must operate with at least splnet() lest they be 2044 * called during packet processing and cause an inconsistancy to appear in 2045 * the filter lists. 2046 */ 2047 static int ipfread(dev_t dev, struct uio *uio, int ioflag) 2048 { 2049 2050 if (ipfmain.ipf_running < 1) { 2051 ipfmain.ipf_interror = 130006; 2052 return EIO; 2053 } 2054 2055 if (GET_MINOR(dev) == IPL_LOGSYNC) 2056 return ipf_sync_read(&ipfmain, uio); 2057 2058 #ifdef IPFILTER_LOG 2059 return ipf_log_read(&ipfmain, GET_MINOR(dev), uio); 2060 #else 2061 ipfmain.ipf_interror = 130007; 2062 return ENXIO; 2063 #endif 2064 } 2065 2066 2067 /* 2068 * ipfwrite 2069 * both of these must operate with at least splnet() lest they be 2070 * called during packet processing and cause an inconsistancy to appear in 2071 * the filter lists. 2072 */ 2073 static int ipfwrite(dev_t dev, struct uio *uio, int ioflag) 2074 { 2075 2076 if (ipfmain.ipf_running < 1) { 2077 ipfmain.ipf_interror = 130008; 2078 return EIO; 2079 } 2080 2081 if (GET_MINOR(dev) == IPL_LOGSYNC) 2082 return ipf_sync_write(&ipfmain, uio); 2083 ipfmain.ipf_interror = 130009; 2084 return ENXIO; 2085 } 2086 2087 2088 static int ipfpoll(dev_t dev, int events, PROC_T *p) 2089 { 2090 u_int unit = GET_MINOR(dev); 2091 int revents = 0; 2092 2093 if (IPL_LOGMAX < unit) { 2094 ipfmain.ipf_interror = 130010; 2095 return ENXIO; 2096 } 2097 2098 switch (unit) 2099 { 2100 case IPL_LOGIPF : 2101 case IPL_LOGNAT : 2102 case IPL_LOGSTATE : 2103 #ifdef IPFILTER_LOG 2104 if ((events & (POLLIN | POLLRDNORM)) && 2105 ipf_log_canread(&ipfmain, unit)) 2106 revents |= events & (POLLIN | POLLRDNORM); 2107 #endif 2108 break; 2109 case IPL_LOGAUTH : 2110 if ((events & (POLLIN | POLLRDNORM)) && 2111 ipf_auth_waiting(&ipfmain)) 2112 revents |= events & (POLLIN | POLLRDNORM); 2113 break; 2114 case IPL_LOGSYNC : 2115 if ((events & (POLLIN | POLLRDNORM)) && 2116 ipf_sync_canread(&ipfmain)) 2117 revents |= events & (POLLIN | POLLRDNORM); 2118 if ((events & (POLLOUT | POLLWRNORM)) && 2119 ipf_sync_canwrite(&ipfmain)) 2120 revents |= events & (POLLOUT | POLLWRNORM); 2121 break; 2122 case IPL_LOGSCAN : 2123 case IPL_LOGLOOKUP : 2124 default : 2125 break; 2126 } 2127 2128 if ((revents == 0) && (((events & (POLLIN|POLLRDNORM)) != 0))) 2129 selrecord(p, &ipfmain.ipf_selwait[unit]); 2130 return revents; 2131 } 2132 2133 u_int 2134 ipf_pcksum(fr_info_t *fin, int hlen, u_int sum) 2135 { 2136 struct mbuf *m; 2137 u_int sum2; 2138 int off; 2139 2140 m = fin->fin_m; 2141 off = (char *)fin->fin_dp - (char *)fin->fin_ip; 2142 m->m_data += hlen; 2143 m->m_len -= hlen; 2144 sum2 = in_cksum(fin->fin_m, fin->fin_plen - off); 2145 m->m_len += hlen; 2146 m->m_data -= hlen; 2147 2148 /* 2149 * Both sum and sum2 are partial sums, so combine them together. 2150 */ 2151 sum += ~sum2 & 0xffff; 2152 while (sum > 0xffff) 2153 sum = (sum & 0xffff) + (sum >> 16); 2154 sum2 = ~sum & 0xffff; 2155 return sum2; 2156 } 2157 2158 #if (__NetBSD_Version__ >= 799003000) 2159 2160 /* NetBSD module interface */ 2161 2162 MODULE(MODULE_CLASS_DRIVER, ipl, "bpf_filter"); 2163 2164 static int ipl_init(void *); 2165 static int ipl_fini(void *); 2166 static int ipl_modcmd(modcmd_t, void *); 2167 2168 #ifdef _MODULE 2169 static devmajor_t ipl_cmaj = -1, ipl_bmaj = -1; 2170 #endif 2171 2172 static int 2173 ipl_modcmd(modcmd_t cmd, void *opaque) 2174 { 2175 2176 switch (cmd) { 2177 case MODULE_CMD_INIT: 2178 return ipl_init(opaque); 2179 case MODULE_CMD_FINI: 2180 return ipl_fini(opaque); 2181 default: 2182 return ENOTTY; 2183 } 2184 } 2185 2186 static int 2187 ipl_init(void *opaque) 2188 { 2189 int error; 2190 2191 ipf_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK, 2192 ipf_listener_cb, NULL); 2193 2194 if ((error = ipf_load_all()) != 0) 2195 return error; 2196 2197 if (ipf_create_all(&ipfmain) == NULL) { 2198 ipf_unload_all(); 2199 return ENODEV; 2200 } 2201 2202 /* Initialize our mutex and reference count */ 2203 mutex_init(&ipf_ref_mutex, MUTEX_DEFAULT, IPL_NONE); 2204 ipf_active = 0; 2205 2206 #ifdef _MODULE 2207 /* 2208 * Insert ourself into the cdevsw list. 2209 */ 2210 error = devsw_attach("ipl", NULL, &ipl_bmaj, &ipl_cdevsw, &ipl_cmaj); 2211 if (error) 2212 ipl_fini(opaque); 2213 #endif 2214 2215 return error; 2216 } 2217 2218 static int 2219 ipl_fini(void *opaque) 2220 { 2221 2222 #ifdef _MODULE 2223 (void)devsw_detach(NULL, &ipl_cdevsw); 2224 #endif 2225 2226 /* 2227 * Grab the mutex, verify that there are no references 2228 * and that there are no running filters. If either 2229 * of these exists, reinsert our cdevsw entry and return 2230 * an error. 2231 */ 2232 mutex_enter(&ipf_ref_mutex); 2233 if (ipf_active != 0 || ipfmain.ipf_running > 0) { 2234 #ifdef _MODULE 2235 (void)devsw_attach("ipl", NULL, &ipl_bmaj, 2236 &ipl_cdevsw, &ipl_cmaj); 2237 #endif 2238 mutex_exit(&ipf_ref_mutex); 2239 return EBUSY; 2240 } 2241 2242 /* Clean up the rest of our state before being unloaded */ 2243 2244 mutex_exit(&ipf_ref_mutex); 2245 mutex_destroy(&ipf_ref_mutex); 2246 ipf_destroy_all(&ipfmain); 2247 ipf_unload_all(); 2248 kauth_unlisten_scope(ipf_listener); 2249 2250 return 0; 2251 } 2252 #endif /* (__NetBSD_Version__ >= 799003000) */ 2253