1 /*- 2 * Copyright (c) 2009-2016 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This material is based upon work partially supported by The 6 * NetBSD Foundation under a contract with Mindaugas Rasiukevicius. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 /* 31 * NPF main: dynamic load/initialisation and unload routines. 32 */ 33 34 #ifdef _KERNEL 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: npf_os.c,v 1.21 2021/01/27 17:39:13 christos Exp $"); 37 38 #ifdef _KERNEL_OPT 39 #include "pf.h" 40 #if NPF > 0 41 #error "NPF and PF are mutually exclusive; please select one" 42 #endif 43 #endif 44 45 #include <sys/param.h> 46 #include <sys/types.h> 47 48 #include <sys/conf.h> 49 #include <sys/kauth.h> 50 #include <sys/kmem.h> 51 #include <sys/lwp.h> 52 #include <sys/module.h> 53 #include <sys/pserialize.h> 54 #include <sys/socketvar.h> 55 #include <sys/uio.h> 56 57 #include <netinet/in.h> 58 #include <netinet6/in6_var.h> 59 #endif 60 61 #include "npf_impl.h" 62 #include "npfkern.h" 63 64 #ifdef _KERNEL 65 #ifndef _MODULE 66 #include "opt_modular.h" 67 #include "opt_net_mpsafe.h" 68 #endif 69 #include "ioconf.h" 70 #endif 71 72 /* 73 * Module and device structures. 74 */ 75 #ifndef _MODULE 76 /* 77 * Modular kernels load drivers too early, and we need percpu to be inited 78 * So we make this misc; a better way would be to have early boot and late 79 * boot drivers. 80 */ 81 MODULE(MODULE_CLASS_MISC, npf, "bpf"); 82 #else 83 /* This module autoloads via /dev/npf so it needs to be a driver */ 84 MODULE(MODULE_CLASS_DRIVER, npf, "bpf"); 85 #endif 86 87 #define NPF_IOCTL_DATA_LIMIT (4 * 1024 * 1024) 88 89 static int npf_pfil_register(bool); 90 static void npf_pfil_unregister(bool); 91 92 static int npf_dev_open(dev_t, int, int, lwp_t *); 93 static int npf_dev_close(dev_t, int, int, lwp_t *); 94 static int npf_dev_ioctl(dev_t, u_long, void *, int, lwp_t *); 95 static int npf_dev_poll(dev_t, int, lwp_t *); 96 static int npf_dev_read(dev_t, struct uio *, int); 97 98 const struct cdevsw npf_cdevsw = { 99 .d_open = npf_dev_open, 100 .d_close = npf_dev_close, 101 .d_read = npf_dev_read, 102 .d_write = nowrite, 103 .d_ioctl = npf_dev_ioctl, 104 .d_stop = nostop, 105 .d_tty = notty, 106 .d_poll = npf_dev_poll, 107 .d_mmap = nommap, 108 .d_kqfilter = nokqfilter, 109 .d_discard = nodiscard, 110 .d_flag = D_OTHER | D_MPSAFE 111 }; 112 113 static const char * npf_ifop_getname(npf_t *, ifnet_t *); 114 static ifnet_t * npf_ifop_lookup(npf_t *, const char *); 115 static void npf_ifop_flush(npf_t *, void *); 116 static void * npf_ifop_getmeta(npf_t *, const ifnet_t *); 117 static void npf_ifop_setmeta(npf_t *, ifnet_t *, void *); 118 119 static const unsigned nworkers = 1; 120 121 static bool pfil_registered = false; 122 static pfil_head_t * npf_ph_if = NULL; 123 static pfil_head_t * npf_ph_inet = NULL; 124 static pfil_head_t * npf_ph_inet6 = NULL; 125 126 static const npf_ifops_t kern_ifops = { 127 .getname = npf_ifop_getname, 128 .lookup = npf_ifop_lookup, 129 .flush = npf_ifop_flush, 130 .getmeta = npf_ifop_getmeta, 131 .setmeta = npf_ifop_setmeta, 132 }; 133 134 static int 135 npf_fini(void) 136 { 137 npf_t *npf = npf_getkernctx(); 138 139 /* At first, detach device and remove pfil hooks. */ 140 #ifdef _MODULE 141 devsw_detach(NULL, &npf_cdevsw); 142 #endif 143 npf_pfil_unregister(true); 144 npfk_destroy(npf); 145 npfk_sysfini(); 146 return 0; 147 } 148 149 static int 150 npf_init(void) 151 { 152 npf_t *npf; 153 int error = 0; 154 155 error = npfk_sysinit(nworkers); 156 if (error) 157 return error; 158 npf = npfk_create(0, NULL, &kern_ifops, NULL); 159 npf_setkernctx(npf); 160 npf_pfil_register(true); 161 162 #ifdef _MODULE 163 devmajor_t bmajor = NODEVMAJOR, cmajor = NODEVMAJOR; 164 165 /* Attach /dev/npf device. */ 166 error = devsw_attach("npf", NULL, &bmajor, &npf_cdevsw, &cmajor); 167 if (error) { 168 /* It will call devsw_detach(), which is safe. */ 169 (void)npf_fini(); 170 } 171 #endif 172 return error; 173 } 174 175 176 /* 177 * Module interface. 178 */ 179 static int 180 npf_modcmd(modcmd_t cmd, void *arg) 181 { 182 switch (cmd) { 183 case MODULE_CMD_INIT: 184 return npf_init(); 185 case MODULE_CMD_FINI: 186 return npf_fini(); 187 case MODULE_CMD_AUTOUNLOAD: 188 if (npf_autounload_p()) { 189 return EBUSY; 190 } 191 break; 192 default: 193 return ENOTTY; 194 } 195 return 0; 196 } 197 198 void 199 npfattach(int nunits) 200 { 201 /* Nothing */ 202 } 203 204 static int 205 npf_dev_open(dev_t dev, int flag, int mode, lwp_t *l) 206 { 207 /* Available only for super-user. */ 208 if (kauth_authorize_network(l->l_cred, KAUTH_NETWORK_FIREWALL, 209 KAUTH_REQ_NETWORK_FIREWALL_FW, NULL, NULL, NULL)) { 210 return EPERM; 211 } 212 return 0; 213 } 214 215 static int 216 npf_dev_close(dev_t dev, int flag, int mode, lwp_t *l) 217 { 218 return 0; 219 } 220 221 static int 222 npf_stats_export(npf_t *npf, void *data) 223 { 224 uint64_t *fullst, *uptr = *(uint64_t **)data; 225 int error; 226 227 fullst = kmem_alloc(NPF_STATS_SIZE, KM_SLEEP); 228 npfk_stats(npf, fullst); /* will zero the buffer */ 229 error = copyout(fullst, uptr, NPF_STATS_SIZE); 230 kmem_free(fullst, NPF_STATS_SIZE); 231 return error; 232 } 233 234 /* 235 * npfctl_switch: enable or disable packet inspection. 236 */ 237 static int 238 npfctl_switch(void *data) 239 { 240 const bool onoff = *(int *)data ? true : false; 241 int error; 242 243 if (onoff) { 244 /* Enable: add pfil hooks. */ 245 error = npf_pfil_register(false); 246 } else { 247 /* Disable: remove pfil hooks. */ 248 npf_pfil_unregister(false); 249 error = 0; 250 } 251 return error; 252 } 253 254 static int 255 npf_dev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l) 256 { 257 npf_t *npf = npf_getkernctx(); 258 nvlist_t *req, *resp; 259 int error; 260 261 /* Available only for super-user. */ 262 if (kauth_authorize_network(l->l_cred, KAUTH_NETWORK_FIREWALL, 263 KAUTH_REQ_NETWORK_FIREWALL_FW, NULL, NULL, NULL)) { 264 return EPERM; 265 } 266 267 switch (cmd) { 268 case IOC_NPF_VERSION: 269 *(int *)data = NPF_VERSION; 270 return 0; 271 case IOC_NPF_SWITCH: 272 return npfctl_switch(data); 273 case IOC_NPF_TABLE: 274 return npfctl_table(npf, data); 275 case IOC_NPF_STATS: 276 return npf_stats_export(npf, data); 277 case IOC_NPF_LOAD: 278 case IOC_NPF_SAVE: 279 case IOC_NPF_RULE: 280 case IOC_NPF_CONN_LOOKUP: 281 case IOC_NPF_TABLE_REPLACE: 282 /* nvlist_ref_t argument, handled below */ 283 break; 284 default: 285 return EINVAL; 286 } 287 288 error = nvlist_copyin(data, &req, NPF_IOCTL_DATA_LIMIT); 289 if (__predict_false(error)) { 290 #ifdef __NetBSD__ 291 /* Until the version bump. */ 292 if (cmd != IOC_NPF_SAVE) { 293 return error; 294 } 295 req = nvlist_create(0); 296 #else 297 return error; 298 #endif 299 } 300 resp = nvlist_create(0); 301 302 if ((error = npfctl_run_op(npf, cmd, req, resp)) == 0) { 303 error = nvlist_copyout(data, resp); 304 } 305 306 nvlist_destroy(resp); 307 nvlist_destroy(req); 308 309 return error; 310 } 311 312 static int 313 npf_dev_poll(dev_t dev, int events, lwp_t *l) 314 { 315 return ENOTSUP; 316 } 317 318 static int 319 npf_dev_read(dev_t dev, struct uio *uio, int flag) 320 { 321 return ENOTSUP; 322 } 323 324 bool 325 npf_autounload_p(void) 326 { 327 if (npf_active_p()) 328 return false; 329 330 npf_t *npf = npf_getkernctx(); 331 332 npf_config_enter(npf); 333 bool pass = npf_default_pass(npf); 334 npf_config_exit(npf); 335 336 return pass; 337 } 338 339 /* 340 * Interface operations. 341 */ 342 343 static const char * 344 npf_ifop_getname(npf_t *npf __unused, ifnet_t *ifp) 345 { 346 return ifp->if_xname; 347 } 348 349 static ifnet_t * 350 npf_ifop_lookup(npf_t *npf __unused, const char *name) 351 { 352 return ifunit(name); 353 } 354 355 static void 356 npf_ifop_flush(npf_t *npf __unused, void *arg) 357 { 358 ifnet_t *ifp; 359 360 KERNEL_LOCK(1, NULL); 361 IFNET_GLOBAL_LOCK(); 362 IFNET_WRITER_FOREACH(ifp) { 363 ifp->if_npf_private = arg; 364 } 365 IFNET_GLOBAL_UNLOCK(); 366 KERNEL_UNLOCK_ONE(NULL); 367 } 368 369 static void * 370 npf_ifop_getmeta(npf_t *npf __unused, const ifnet_t *ifp) 371 { 372 return ifp->if_npf_private; 373 } 374 375 static void 376 npf_ifop_setmeta(npf_t *npf __unused, ifnet_t *ifp, void *arg) 377 { 378 ifp->if_npf_private = arg; 379 } 380 381 #ifdef _KERNEL 382 383 /* 384 * Wrapper of the main packet handler to pass the kernel NPF context. 385 */ 386 static int 387 npfos_packet_handler(void *arg, struct mbuf **mp, ifnet_t *ifp, int di) 388 { 389 npf_t *npf = npf_getkernctx(); 390 return npfk_packet_handler(npf, mp, ifp, di); 391 } 392 393 /* 394 * npf_ifhook: hook handling interface changes. 395 */ 396 static void 397 npf_ifhook(void *arg, unsigned long cmd, void *arg2) 398 { 399 npf_t *npf = npf_getkernctx(); 400 ifnet_t *ifp = arg2; 401 402 switch (cmd) { 403 case PFIL_IFNET_ATTACH: 404 npfk_ifmap_attach(npf, ifp); 405 npf_ifaddr_sync(npf, ifp); 406 break; 407 case PFIL_IFNET_DETACH: 408 npfk_ifmap_detach(npf, ifp); 409 npf_ifaddr_flush(npf, ifp); 410 break; 411 } 412 } 413 414 static void 415 npf_ifaddrhook(void *arg, u_long cmd, void *arg2) 416 { 417 npf_t *npf = npf_getkernctx(); 418 struct ifaddr *ifa = arg2; 419 420 switch (cmd) { 421 case SIOCSIFADDR: 422 case SIOCAIFADDR: 423 case SIOCDIFADDR: 424 #ifdef INET6 425 case SIOCSIFADDR_IN6: 426 case SIOCAIFADDR_IN6: 427 case SIOCDIFADDR_IN6: 428 #endif 429 KASSERT(ifa != NULL); 430 break; 431 default: 432 return; 433 } 434 npf_ifaddr_sync(npf, ifa->ifa_ifp); 435 } 436 437 /* 438 * npf_pfil_register: register pfil(9) hooks. 439 */ 440 static int 441 npf_pfil_register(bool init) 442 { 443 npf_t *npf = npf_getkernctx(); 444 int error = 0; 445 446 SOFTNET_KERNEL_LOCK_UNLESS_NET_MPSAFE(); 447 448 /* Init: interface re-config and attach/detach hook. */ 449 if (!npf_ph_if) { 450 npf_ph_if = pfil_head_get(PFIL_TYPE_IFNET, 0); 451 if (!npf_ph_if) { 452 error = ENOENT; 453 goto out; 454 } 455 456 error = pfil_add_ihook(npf_ifhook, NULL, 457 PFIL_IFNET, npf_ph_if); 458 KASSERT(error == 0); 459 460 error = pfil_add_ihook(npf_ifaddrhook, NULL, 461 PFIL_IFADDR, npf_ph_if); 462 KASSERT(error == 0); 463 } 464 if (init) { 465 goto out; 466 } 467 468 /* Check if pfil hooks are not already registered. */ 469 if (pfil_registered) { 470 error = EEXIST; 471 goto out; 472 } 473 474 /* Capture points of the activity in the IP layer. */ 475 npf_ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET); 476 npf_ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6); 477 if (!npf_ph_inet && !npf_ph_inet6) { 478 error = ENOENT; 479 goto out; 480 } 481 482 /* Packet IN/OUT handlers for IP layer. */ 483 if (npf_ph_inet) { 484 error = pfil_add_hook(npfos_packet_handler, npf, 485 PFIL_ALL, npf_ph_inet); 486 KASSERT(error == 0); 487 } 488 if (npf_ph_inet6) { 489 error = pfil_add_hook(npfos_packet_handler, npf, 490 PFIL_ALL, npf_ph_inet6); 491 KASSERT(error == 0); 492 } 493 494 /* 495 * It is necessary to re-sync all/any interface address tables, 496 * since we did not listen for any changes. 497 */ 498 npf_ifaddr_syncall(npf); 499 pfil_registered = true; 500 out: 501 SOFTNET_KERNEL_UNLOCK_UNLESS_NET_MPSAFE(); 502 503 return error; 504 } 505 506 /* 507 * npf_pfil_unregister: unregister pfil(9) hooks. 508 */ 509 static void 510 npf_pfil_unregister(bool fini) 511 { 512 npf_t *npf = npf_getkernctx(); 513 514 SOFTNET_KERNEL_LOCK_UNLESS_NET_MPSAFE(); 515 516 if (fini && npf_ph_if) { 517 (void)pfil_remove_ihook(npf_ifhook, NULL, 518 PFIL_IFNET, npf_ph_if); 519 (void)pfil_remove_ihook(npf_ifaddrhook, NULL, 520 PFIL_IFADDR, npf_ph_if); 521 } 522 if (npf_ph_inet) { 523 (void)pfil_remove_hook(npfos_packet_handler, npf, 524 PFIL_ALL, npf_ph_inet); 525 } 526 if (npf_ph_inet6) { 527 (void)pfil_remove_hook(npfos_packet_handler, npf, 528 PFIL_ALL, npf_ph_inet6); 529 } 530 pfil_registered = false; 531 532 SOFTNET_KERNEL_UNLOCK_UNLESS_NET_MPSAFE(); 533 } 534 535 bool 536 npf_active_p(void) 537 { 538 return pfil_registered; 539 } 540 541 #endif 542 543 #ifdef __NetBSD__ 544 545 /* 546 * Epoch-Based Reclamation (EBR) wrappers: in NetBSD, we rely on the 547 * passive serialization mechanism (see pserialize(9) manual page), 548 * which provides sufficient guarantees for NPF. 549 */ 550 551 ebr_t * 552 npf_ebr_create(void) 553 { 554 return pserialize_create(); 555 } 556 557 void 558 npf_ebr_destroy(ebr_t *ebr) 559 { 560 pserialize_destroy(ebr); 561 } 562 563 void 564 npf_ebr_register(ebr_t *ebr) 565 { 566 KASSERT(ebr != NULL); (void)ebr; 567 } 568 569 void 570 npf_ebr_unregister(ebr_t *ebr) 571 { 572 KASSERT(ebr != NULL); (void)ebr; 573 } 574 575 int 576 npf_ebr_enter(ebr_t *ebr) 577 { 578 KASSERT(ebr != NULL); (void)ebr; 579 return pserialize_read_enter(); 580 } 581 582 void 583 npf_ebr_exit(ebr_t *ebr, int s) 584 { 585 KASSERT(ebr != NULL); (void)ebr; 586 pserialize_read_exit(s); 587 } 588 589 void 590 npf_ebr_full_sync(ebr_t *ebr) 591 { 592 pserialize_perform(ebr); 593 } 594 595 bool 596 npf_ebr_incrit_p(ebr_t *ebr) 597 { 598 KASSERT(ebr != NULL); (void)ebr; 599 return pserialize_in_read_section(); 600 } 601 602 #endif 603