1 /* $NetBSD: npf_conn.c,v 1.23 2017/01/29 00:15:54 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 2014-2015 Mindaugas Rasiukevicius <rmind at netbsd org> 5 * Copyright (c) 2010-2014 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This material is based upon work partially supported by The 9 * NetBSD Foundation under a contract with Mindaugas Rasiukevicius. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * NPF connection tracking for stateful filtering and translation. 35 * 36 * Overview 37 * 38 * Connection direction is identified by the direction of its first 39 * packet. Packets can be incoming or outgoing with respect to an 40 * interface. To describe the packet in the context of connection 41 * direction we will use the terms "forwards stream" and "backwards 42 * stream". All connections have two keys and thus two entries: 43 * 44 * npf_conn_t::c_forw_entry for the forwards stream and 45 * npf_conn_t::c_back_entry for the backwards stream. 46 * 47 * The keys are formed from the 5-tuple (source/destination address, 48 * source/destination port and the protocol). Additional matching 49 * is performed for the interface (a common behaviour is equivalent 50 * to the 6-tuple lookup including the interface ID). Note that the 51 * key may be formed using translated values in a case of NAT. 52 * 53 * Connections can serve two purposes: for the implicit passing or 54 * to accommodate the dynamic NAT. Connections for the former purpose 55 * are created by the rules with "stateful" attribute and are used for 56 * stateful filtering. Such connections indicate that the packet of 57 * the backwards stream should be passed without inspection of the 58 * ruleset. The other purpose is to associate a dynamic NAT mechanism 59 * with a connection. Such connections are created by the NAT policies 60 * and they have a relationship with NAT translation structure via 61 * npf_conn_t::c_nat. A single connection can serve both purposes, 62 * which is a common case. 63 * 64 * Connection life-cycle 65 * 66 * Connections are established when a packet matches said rule or 67 * NAT policy. Both keys of the established connection are inserted 68 * into the connection database. A garbage collection thread 69 * periodically scans all connections and depending on connection 70 * properties (e.g. last activity time, protocol) removes connection 71 * entries and expires the actual connections. 72 * 73 * Each connection has a reference count. The reference is acquired 74 * on lookup and should be released by the caller. It guarantees that 75 * the connection will not be destroyed, although it may be expired. 76 * 77 * Synchronisation 78 * 79 * Connection database is accessed in a lock-less manner by the main 80 * routines: npf_conn_inspect() and npf_conn_establish(). Since they 81 * are always called from a software interrupt, the database is 82 * protected using passive serialisation. The main place which can 83 * destroy a connection is npf_conn_worker(). The database itself 84 * can be replaced and destroyed in npf_conn_reload(). 85 * 86 * ALG support 87 * 88 * Application-level gateways (ALGs) can override generic connection 89 * inspection (npf_alg_conn() call in npf_conn_inspect() function) by 90 * performing their own lookup using different key. Recursive call 91 * to npf_conn_inspect() is not allowed. The ALGs ought to use the 92 * npf_conn_lookup() function for this purpose. 93 * 94 * Lock order 95 * 96 * npf_config_lock -> 97 * conn_lock -> 98 * npf_conn_t::c_lock 99 */ 100 101 #ifdef _KERNEL 102 #include <sys/cdefs.h> 103 __KERNEL_RCSID(0, "$NetBSD: npf_conn.c,v 1.23 2017/01/29 00:15:54 christos Exp $"); 104 105 #include <sys/param.h> 106 #include <sys/types.h> 107 108 #include <netinet/in.h> 109 #include <netinet/tcp.h> 110 111 #include <sys/atomic.h> 112 #include <sys/condvar.h> 113 #include <sys/kmem.h> 114 #include <sys/kthread.h> 115 #include <sys/mutex.h> 116 #include <net/pfil.h> 117 #include <sys/pool.h> 118 #include <sys/queue.h> 119 #include <sys/systm.h> 120 #endif 121 122 #define __NPF_CONN_PRIVATE 123 #include "npf_conn.h" 124 #include "npf_impl.h" 125 126 /* 127 * Connection flags: PFIL_IN and PFIL_OUT values are reserved for direction. 128 */ 129 CTASSERT(PFIL_ALL == (0x001 | 0x002)); 130 #define CONN_ACTIVE 0x004 /* visible on inspection */ 131 #define CONN_PASS 0x008 /* perform implicit passing */ 132 #define CONN_EXPIRE 0x010 /* explicitly expire */ 133 #define CONN_REMOVED 0x020 /* "forw/back" entries removed */ 134 135 enum { CONN_TRACKING_OFF, CONN_TRACKING_ON }; 136 137 static void npf_conn_destroy(npf_t *, npf_conn_t *); 138 139 /* 140 * npf_conn_sys{init,fini}: initialise/destroy connection tracking. 141 */ 142 143 void 144 npf_conn_init(npf_t *npf, int flags) 145 { 146 npf->conn_cache = pool_cache_init(sizeof(npf_conn_t), coherency_unit, 147 0, 0, "npfconpl", NULL, IPL_NET, NULL, NULL, NULL); 148 mutex_init(&npf->conn_lock, MUTEX_DEFAULT, IPL_NONE); 149 npf->conn_tracking = CONN_TRACKING_OFF; 150 npf->conn_db = npf_conndb_create(); 151 152 if ((flags & NPF_NO_GC) == 0) { 153 npf_worker_register(npf, npf_conn_worker); 154 } 155 } 156 157 void 158 npf_conn_fini(npf_t *npf) 159 { 160 /* Note: the caller should have flushed the connections. */ 161 KASSERT(npf->conn_tracking == CONN_TRACKING_OFF); 162 npf_worker_unregister(npf, npf_conn_worker); 163 164 npf_conndb_destroy(npf->conn_db); 165 pool_cache_destroy(npf->conn_cache); 166 mutex_destroy(&npf->conn_lock); 167 } 168 169 /* 170 * npf_conn_load: perform the load by flushing the current connection 171 * database and replacing it with the new one or just destroying. 172 * 173 * => The caller must disable the connection tracking and ensure that 174 * there are no connection database lookups or references in-flight. 175 */ 176 void 177 npf_conn_load(npf_t *npf, npf_conndb_t *ndb, bool track) 178 { 179 npf_conndb_t *odb = NULL; 180 181 KASSERT(npf_config_locked_p(npf)); 182 183 /* 184 * The connection database is in the quiescent state. 185 * Prevent G/C thread from running and install a new database. 186 */ 187 mutex_enter(&npf->conn_lock); 188 if (ndb) { 189 KASSERT(npf->conn_tracking == CONN_TRACKING_OFF); 190 odb = npf->conn_db; 191 npf->conn_db = ndb; 192 membar_sync(); 193 } 194 if (track) { 195 /* After this point lookups start flying in. */ 196 npf->conn_tracking = CONN_TRACKING_ON; 197 } 198 mutex_exit(&npf->conn_lock); 199 200 if (odb) { 201 /* 202 * Flush all, no sync since the caller did it for us. 203 * Also, release the pool cache memory. 204 */ 205 npf_conn_gc(npf, odb, true, false); 206 npf_conndb_destroy(odb); 207 pool_cache_invalidate(npf->conn_cache); 208 } 209 } 210 211 /* 212 * npf_conn_tracking: enable/disable connection tracking. 213 */ 214 void 215 npf_conn_tracking(npf_t *npf, bool track) 216 { 217 KASSERT(npf_config_locked_p(npf)); 218 npf->conn_tracking = track ? CONN_TRACKING_ON : CONN_TRACKING_OFF; 219 } 220 221 static inline bool 222 npf_conn_trackable_p(const npf_cache_t *npc) 223 { 224 const npf_t *npf = npc->npc_ctx; 225 226 /* 227 * Check if connection tracking is on. Also, if layer 3 and 4 are 228 * not cached - protocol is not supported or packet is invalid. 229 */ 230 if (npf->conn_tracking != CONN_TRACKING_ON) { 231 return false; 232 } 233 if (!npf_iscached(npc, NPC_IP46) || !npf_iscached(npc, NPC_LAYER4)) { 234 return false; 235 } 236 return true; 237 } 238 239 static uint32_t 240 connkey_setkey(npf_connkey_t *key, uint16_t proto, const void *ipv, 241 const uint16_t *id, unsigned alen, bool forw) 242 { 243 uint32_t isrc, idst, *k = key->ck_key; 244 const npf_addr_t * const *ips = ipv; 245 246 if (__predict_true(forw)) { 247 isrc = NPF_SRC, idst = NPF_DST; 248 } else { 249 isrc = NPF_DST, idst = NPF_SRC; 250 } 251 252 /* 253 * Construct a key formed out of 32-bit integers. The key layout: 254 * 255 * Field: | proto | alen | src-id | dst-id | src-addr | dst-addr | 256 * +--------+--------+--------+--------+----------+----------+ 257 * Bits: | 16 | 16 | 16 | 16 | 32-128 | 32-128 | 258 * 259 * The source and destination are inverted if they key is for the 260 * backwards stream (forw == false). The address length depends 261 * on the 'alen' field; it is a length in bytes, either 4 or 16. 262 */ 263 264 k[0] = ((uint32_t)proto << 16) | (alen & 0xffff); 265 k[1] = ((uint32_t)id[isrc] << 16) | id[idst]; 266 267 if (__predict_true(alen == sizeof(in_addr_t))) { 268 k[2] = ips[isrc]->word32[0]; 269 k[3] = ips[idst]->word32[0]; 270 return 4 * sizeof(uint32_t); 271 } else { 272 const u_int nwords = alen >> 2; 273 memcpy(&k[2], ips[isrc], alen); 274 memcpy(&k[2 + nwords], ips[idst], alen); 275 return (2 + (nwords * 2)) * sizeof(uint32_t); 276 } 277 } 278 279 static void 280 connkey_getkey(const npf_connkey_t *key, uint16_t *proto, npf_addr_t *ips, 281 uint16_t *id, uint16_t *alen) 282 { 283 const uint32_t *k = key->ck_key; 284 285 *proto = k[0] >> 16; 286 *alen = k[0] & 0xffff; 287 id[NPF_SRC] = k[1] >> 16; 288 id[NPF_DST] = k[1] & 0xffff; 289 290 switch (*alen) { 291 case sizeof(struct in6_addr): 292 case sizeof(struct in_addr): 293 memcpy(&ips[NPF_SRC], &k[2], *alen); 294 memcpy(&ips[NPF_DST], &k[2 + ((unsigned)*alen >> 2)], *alen); 295 return; 296 default: 297 KASSERT(0); 298 } 299 } 300 301 /* 302 * npf_conn_conkey: construct a key for the connection lookup. 303 * 304 * => Returns the key length in bytes or zero on failure. 305 */ 306 unsigned 307 npf_conn_conkey(const npf_cache_t *npc, npf_connkey_t *key, const bool forw) 308 { 309 const u_int proto = npc->npc_proto; 310 const u_int alen = npc->npc_alen; 311 const struct tcphdr *th; 312 const struct udphdr *uh; 313 uint16_t id[2]; 314 315 switch (proto) { 316 case IPPROTO_TCP: 317 KASSERT(npf_iscached(npc, NPC_TCP)); 318 th = npc->npc_l4.tcp; 319 id[NPF_SRC] = th->th_sport; 320 id[NPF_DST] = th->th_dport; 321 break; 322 case IPPROTO_UDP: 323 KASSERT(npf_iscached(npc, NPC_UDP)); 324 uh = npc->npc_l4.udp; 325 id[NPF_SRC] = uh->uh_sport; 326 id[NPF_DST] = uh->uh_dport; 327 break; 328 case IPPROTO_ICMP: 329 if (npf_iscached(npc, NPC_ICMP_ID)) { 330 const struct icmp *ic = npc->npc_l4.icmp; 331 id[NPF_SRC] = ic->icmp_id; 332 id[NPF_DST] = ic->icmp_id; 333 break; 334 } 335 return 0; 336 case IPPROTO_ICMPV6: 337 if (npf_iscached(npc, NPC_ICMP_ID)) { 338 const struct icmp6_hdr *ic6 = npc->npc_l4.icmp6; 339 id[NPF_SRC] = ic6->icmp6_id; 340 id[NPF_DST] = ic6->icmp6_id; 341 break; 342 } 343 return 0; 344 default: 345 /* Unsupported protocol. */ 346 return 0; 347 } 348 return connkey_setkey(key, proto, npc->npc_ips, id, alen, forw); 349 } 350 351 static __inline void 352 connkey_set_addr(npf_connkey_t *key, const npf_addr_t *naddr, const int di) 353 { 354 const u_int alen = key->ck_key[0] & 0xffff; 355 uint32_t *addr = &key->ck_key[2 + ((alen >> 2) * di)]; 356 357 KASSERT(alen > 0); 358 memcpy(addr, naddr, alen); 359 } 360 361 static __inline void 362 connkey_set_id(npf_connkey_t *key, const uint16_t id, const int di) 363 { 364 const uint32_t oid = key->ck_key[1]; 365 const u_int shift = 16 * !di; 366 const uint32_t mask = 0xffff0000 >> shift; 367 368 key->ck_key[1] = ((uint32_t)id << shift) | (oid & mask); 369 } 370 371 static inline void 372 conn_update_atime(npf_conn_t *con) 373 { 374 struct timespec tsnow; 375 376 getnanouptime(&tsnow); 377 con->c_atime = tsnow.tv_sec; 378 } 379 380 /* 381 * npf_conn_ok: check if the connection is active, and has the right direction. 382 */ 383 static bool 384 npf_conn_ok(const npf_conn_t *con, const int di, bool forw) 385 { 386 const uint32_t flags = con->c_flags; 387 388 /* Check if connection is active and not expired. */ 389 bool ok = (flags & (CONN_ACTIVE | CONN_EXPIRE)) == CONN_ACTIVE; 390 if (__predict_false(!ok)) { 391 return false; 392 } 393 394 /* Check if the direction is consistent */ 395 bool pforw = (flags & PFIL_ALL) == (unsigned)di; 396 if (__predict_false(forw != pforw)) { 397 return false; 398 } 399 return true; 400 } 401 402 /* 403 * npf_conn_lookup: lookup if there is an established connection. 404 * 405 * => If found, we will hold a reference for the caller. 406 */ 407 npf_conn_t * 408 npf_conn_lookup(const npf_cache_t *npc, const int di, bool *forw) 409 { 410 npf_t *npf = npc->npc_ctx; 411 const nbuf_t *nbuf = npc->npc_nbuf; 412 npf_conn_t *con; 413 npf_connkey_t key; 414 u_int cifid; 415 416 /* Construct a key and lookup for a connection in the store. */ 417 if (!npf_conn_conkey(npc, &key, true)) { 418 return NULL; 419 } 420 con = npf_conndb_lookup(npf->conn_db, &key, forw); 421 if (con == NULL) { 422 return NULL; 423 } 424 KASSERT(npc->npc_proto == con->c_proto); 425 426 /* Check if connection is active and not expired. */ 427 if (!npf_conn_ok(con, di, *forw)) { 428 atomic_dec_uint(&con->c_refcnt); 429 return NULL; 430 } 431 432 /* 433 * Match the interface and the direction of the connection entry 434 * and the packet. 435 */ 436 cifid = con->c_ifid; 437 if (__predict_false(cifid && cifid != nbuf->nb_ifid)) { 438 atomic_dec_uint(&con->c_refcnt); 439 return NULL; 440 } 441 442 /* Update the last activity time. */ 443 conn_update_atime(con); 444 return con; 445 } 446 447 /* 448 * npf_conn_inspect: lookup a connection and inspecting the protocol data. 449 * 450 * => If found, we will hold a reference for the caller. 451 */ 452 npf_conn_t * 453 npf_conn_inspect(npf_cache_t *npc, const int di, int *error) 454 { 455 nbuf_t *nbuf = npc->npc_nbuf; 456 npf_conn_t *con; 457 bool forw, ok; 458 459 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET)); 460 if (!npf_conn_trackable_p(npc)) { 461 return NULL; 462 } 463 464 /* Query ALG which may lookup connection for us. */ 465 if ((con = npf_alg_conn(npc, di)) != NULL) { 466 /* Note: reference is held. */ 467 return con; 468 } 469 if (nbuf_head_mbuf(nbuf) == NULL) { 470 *error = ENOMEM; 471 return NULL; 472 } 473 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET)); 474 475 /* Main lookup of the connection. */ 476 if ((con = npf_conn_lookup(npc, di, &forw)) == NULL) { 477 return NULL; 478 } 479 480 /* Inspect the protocol data and handle state changes. */ 481 mutex_enter(&con->c_lock); 482 ok = npf_state_inspect(npc, &con->c_state, forw); 483 mutex_exit(&con->c_lock); 484 485 /* If invalid state: let the rules deal with it. */ 486 if (__predict_false(!ok)) { 487 npf_conn_release(con); 488 npf_stats_inc(npc->npc_ctx, NPF_STAT_INVALID_STATE); 489 return NULL; 490 } 491 492 /* 493 * If this is multi-end state, then specially tag the packet 494 * so it will be just passed-through on other interfaces. 495 */ 496 if (con->c_ifid == 0 && nbuf_add_tag(nbuf, NPF_NTAG_PASS) != 0) { 497 npf_conn_release(con); 498 *error = ENOMEM; 499 return NULL; 500 } 501 return con; 502 } 503 504 /* 505 * npf_conn_establish: create a new connection, insert into the global list. 506 * 507 * => Connection is created with the reference held for the caller. 508 * => Connection will be activated on the first reference release. 509 */ 510 npf_conn_t * 511 npf_conn_establish(npf_cache_t *npc, int di, bool per_if) 512 { 513 npf_t *npf = npc->npc_ctx; 514 const nbuf_t *nbuf = npc->npc_nbuf; 515 npf_conn_t *con; 516 int error = 0; 517 518 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET)); 519 520 if (!npf_conn_trackable_p(npc)) { 521 return NULL; 522 } 523 524 /* Allocate and initialise the new connection. */ 525 con = pool_cache_get(npf->conn_cache, PR_NOWAIT); 526 if (__predict_false(!con)) { 527 npf_worker_signal(npf); 528 return NULL; 529 } 530 NPF_PRINTF(("NPF: create conn %p\n", con)); 531 npf_stats_inc(npf, NPF_STAT_CONN_CREATE); 532 533 mutex_init(&con->c_lock, MUTEX_DEFAULT, IPL_SOFTNET); 534 con->c_flags = (di & PFIL_ALL); 535 con->c_refcnt = 0; 536 con->c_rproc = NULL; 537 con->c_nat = NULL; 538 539 /* Initialize the protocol state. */ 540 if (!npf_state_init(npc, &con->c_state)) { 541 npf_conn_destroy(npf, con); 542 return NULL; 543 } 544 545 KASSERT(npf_iscached(npc, NPC_IP46)); 546 npf_connkey_t *fw = &con->c_forw_entry; 547 npf_connkey_t *bk = &con->c_back_entry; 548 549 /* 550 * Construct "forwards" and "backwards" keys. Also, set the 551 * interface ID for this connection (unless it is global). 552 */ 553 if (!npf_conn_conkey(npc, fw, true) || 554 !npf_conn_conkey(npc, bk, false)) { 555 npf_conn_destroy(npf, con); 556 return NULL; 557 } 558 fw->ck_backptr = bk->ck_backptr = con; 559 con->c_ifid = per_if ? nbuf->nb_ifid : 0; 560 con->c_proto = npc->npc_proto; 561 562 /* 563 * Set last activity time for a new connection and acquire 564 * a reference for the caller before we make it visible. 565 */ 566 conn_update_atime(con); 567 con->c_refcnt = 1; 568 569 /* 570 * Insert both keys (entries representing directions) of the 571 * connection. At this point it becomes visible, but we activate 572 * the connection later. 573 */ 574 mutex_enter(&con->c_lock); 575 if (!npf_conndb_insert(npf->conn_db, fw, con)) { 576 error = EISCONN; 577 goto err; 578 } 579 if (!npf_conndb_insert(npf->conn_db, bk, con)) { 580 npf_conn_t *ret __diagused; 581 ret = npf_conndb_remove(npf->conn_db, fw); 582 KASSERT(ret == con); 583 error = EISCONN; 584 goto err; 585 } 586 err: 587 /* 588 * If we have hit the duplicate: mark the connection as expired 589 * and let the G/C thread to take care of it. We cannot do it 590 * here since there might be references acquired already. 591 */ 592 if (error) { 593 atomic_or_uint(&con->c_flags, CONN_REMOVED | CONN_EXPIRE); 594 atomic_dec_uint(&con->c_refcnt); 595 npf_stats_inc(npf, NPF_STAT_RACE_CONN); 596 } else { 597 NPF_PRINTF(("NPF: establish conn %p\n", con)); 598 } 599 600 /* Finally, insert into the connection list. */ 601 npf_conndb_enqueue(npf->conn_db, con); 602 mutex_exit(&con->c_lock); 603 604 return error ? NULL : con; 605 } 606 607 static void 608 npf_conn_destroy(npf_t *npf, npf_conn_t *con) 609 { 610 KASSERT(con->c_refcnt == 0); 611 612 if (con->c_nat) { 613 /* Release any NAT structures. */ 614 npf_nat_destroy(con->c_nat); 615 } 616 if (con->c_rproc) { 617 /* Release the rule procedure. */ 618 npf_rproc_release(con->c_rproc); 619 } 620 621 /* Destroy the state. */ 622 npf_state_destroy(&con->c_state); 623 mutex_destroy(&con->c_lock); 624 625 /* Free the structure, increase the counter. */ 626 pool_cache_put(npf->conn_cache, con); 627 npf_stats_inc(npf, NPF_STAT_CONN_DESTROY); 628 NPF_PRINTF(("NPF: conn %p destroyed\n", con)); 629 } 630 631 /* 632 * npf_conn_setnat: associate NAT entry with the connection, update and 633 * re-insert connection entry using the translation values. 634 * 635 * => The caller must be holding a reference. 636 */ 637 int 638 npf_conn_setnat(const npf_cache_t *npc, npf_conn_t *con, 639 npf_nat_t *nt, u_int ntype) 640 { 641 static const u_int nat_type_dimap[] = { 642 [NPF_NATOUT] = NPF_DST, 643 [NPF_NATIN] = NPF_SRC, 644 }; 645 npf_t *npf = npc->npc_ctx; 646 npf_connkey_t key, *bk; 647 npf_conn_t *ret __diagused; 648 npf_addr_t *taddr; 649 in_port_t tport; 650 u_int tidx; 651 652 KASSERT(con->c_refcnt > 0); 653 654 npf_nat_gettrans(nt, &taddr, &tport); 655 KASSERT(ntype == NPF_NATOUT || ntype == NPF_NATIN); 656 tidx = nat_type_dimap[ntype]; 657 658 /* Construct a "backwards" key. */ 659 if (!npf_conn_conkey(npc, &key, false)) { 660 return EINVAL; 661 } 662 663 /* Acquire the lock and check for the races. */ 664 mutex_enter(&con->c_lock); 665 if (__predict_false(con->c_flags & CONN_EXPIRE)) { 666 /* The connection got expired. */ 667 mutex_exit(&con->c_lock); 668 return EINVAL; 669 } 670 KASSERT((con->c_flags & CONN_REMOVED) == 0); 671 672 if (__predict_false(con->c_nat != NULL)) { 673 /* Race with a duplicate packet. */ 674 mutex_exit(&con->c_lock); 675 npf_stats_inc(npc->npc_ctx, NPF_STAT_RACE_NAT); 676 return EISCONN; 677 } 678 679 /* Remove the "backwards" entry. */ 680 ret = npf_conndb_remove(npf->conn_db, &con->c_back_entry); 681 KASSERT(ret == con); 682 683 /* Set the source/destination IDs to the translation values. */ 684 bk = &con->c_back_entry; 685 connkey_set_addr(bk, taddr, tidx); 686 if (tport) { 687 connkey_set_id(bk, tport, tidx); 688 } 689 690 /* Finally, re-insert the "backwards" entry. */ 691 if (!npf_conndb_insert(npf->conn_db, bk, con)) { 692 /* 693 * Race: we have hit the duplicate, remove the "forwards" 694 * entry and expire our connection; it is no longer valid. 695 */ 696 ret = npf_conndb_remove(npf->conn_db, &con->c_forw_entry); 697 KASSERT(ret == con); 698 699 atomic_or_uint(&con->c_flags, CONN_REMOVED | CONN_EXPIRE); 700 mutex_exit(&con->c_lock); 701 702 npf_stats_inc(npc->npc_ctx, NPF_STAT_RACE_NAT); 703 return EISCONN; 704 } 705 706 /* Associate the NAT entry and release the lock. */ 707 con->c_nat = nt; 708 mutex_exit(&con->c_lock); 709 return 0; 710 } 711 712 /* 713 * npf_conn_expire: explicitly mark connection as expired. 714 */ 715 void 716 npf_conn_expire(npf_conn_t *con) 717 { 718 /* KASSERT(con->c_refcnt > 0); XXX: npf_nat_freepolicy() */ 719 atomic_or_uint(&con->c_flags, CONN_EXPIRE); 720 } 721 722 /* 723 * npf_conn_pass: return true if connection is "pass" one, otherwise false. 724 */ 725 bool 726 npf_conn_pass(const npf_conn_t *con, npf_match_info_t *mi, npf_rproc_t **rp) 727 { 728 KASSERT(con->c_refcnt > 0); 729 if (__predict_true(con->c_flags & CONN_PASS)) { 730 *mi = con->c_mi; 731 *rp = con->c_rproc; 732 return true; 733 } 734 return false; 735 } 736 737 /* 738 * npf_conn_setpass: mark connection as a "pass" one and associate the 739 * rule procedure with it. 740 */ 741 void 742 npf_conn_setpass(npf_conn_t *con, const npf_match_info_t *mi, npf_rproc_t *rp) 743 { 744 KASSERT((con->c_flags & CONN_ACTIVE) == 0); 745 KASSERT(con->c_refcnt > 0); 746 KASSERT(con->c_rproc == NULL); 747 748 /* 749 * No need for atomic since the connection is not yet active. 750 * If rproc is set, the caller transfers its reference to us, 751 * which will be released on npf_conn_destroy(). 752 */ 753 atomic_or_uint(&con->c_flags, CONN_PASS); 754 con->c_rproc = rp; 755 if (rp) 756 con->c_mi = *mi; 757 } 758 759 /* 760 * npf_conn_release: release a reference, which might allow G/C thread 761 * to destroy this connection. 762 */ 763 void 764 npf_conn_release(npf_conn_t *con) 765 { 766 if ((con->c_flags & (CONN_ACTIVE | CONN_EXPIRE)) == 0) { 767 /* Activate: after this, connection is globally visible. */ 768 atomic_or_uint(&con->c_flags, CONN_ACTIVE); 769 } 770 KASSERT(con->c_refcnt > 0); 771 atomic_dec_uint(&con->c_refcnt); 772 } 773 774 /* 775 * npf_conn_getnat: return associated NAT data entry and indicate 776 * whether it is a "forwards" or "backwards" stream. 777 */ 778 npf_nat_t * 779 npf_conn_getnat(npf_conn_t *con, const int di, bool *forw) 780 { 781 KASSERT(con->c_refcnt > 0); 782 *forw = (con->c_flags & PFIL_ALL) == (u_int)di; 783 return con->c_nat; 784 } 785 786 /* 787 * npf_conn_expired: criterion to check if connection is expired. 788 */ 789 static inline bool 790 npf_conn_expired(const npf_conn_t *con, uint64_t tsnow) 791 { 792 const int etime = npf_state_etime(&con->c_state, con->c_proto); 793 int elapsed; 794 795 if (__predict_false(con->c_flags & CONN_EXPIRE)) { 796 /* Explicitly marked to be expired. */ 797 return true; 798 } 799 800 /* 801 * Note: another thread may update 'atime' and it might 802 * become greater than 'now'. 803 */ 804 elapsed = (int64_t)tsnow - con->c_atime; 805 return elapsed > etime; 806 } 807 808 /* 809 * npf_conn_gc: garbage collect the expired connections. 810 * 811 * => Must run in a single-threaded manner. 812 * => If it is a flush request, then destroy all connections. 813 * => If 'sync' is true, then perform passive serialisation. 814 */ 815 void 816 npf_conn_gc(npf_t *npf, npf_conndb_t *cd, bool flush, bool sync) 817 { 818 npf_conn_t *con, *prev, *gclist = NULL; 819 struct timespec tsnow; 820 821 getnanouptime(&tsnow); 822 823 /* 824 * Scan all connections and check them for expiration. 825 */ 826 prev = NULL; 827 con = npf_conndb_getlist(cd); 828 while (con) { 829 npf_conn_t *next = con->c_next; 830 831 /* Expired? Flushing all? */ 832 if (!npf_conn_expired(con, tsnow.tv_sec) && !flush) { 833 prev = con; 834 con = next; 835 continue; 836 } 837 838 /* Remove both entries of the connection. */ 839 mutex_enter(&con->c_lock); 840 if ((con->c_flags & CONN_REMOVED) == 0) { 841 npf_conn_t *ret __diagused; 842 843 ret = npf_conndb_remove(cd, &con->c_forw_entry); 844 KASSERT(ret == con); 845 ret = npf_conndb_remove(cd, &con->c_back_entry); 846 KASSERT(ret == con); 847 } 848 849 /* Flag the removal and expiration. */ 850 atomic_or_uint(&con->c_flags, CONN_REMOVED | CONN_EXPIRE); 851 mutex_exit(&con->c_lock); 852 853 /* Move to the G/C list. */ 854 npf_conndb_dequeue(cd, con, prev); 855 con->c_next = gclist; 856 gclist = con; 857 858 /* Next.. */ 859 con = next; 860 } 861 npf_conndb_settail(cd, prev); 862 863 /* 864 * Ensure it is safe to destroy the connections. 865 * Note: drop the conn_lock (see the lock order). 866 */ 867 if (sync) { 868 mutex_exit(&npf->conn_lock); 869 if (gclist) { 870 npf_config_enter(npf); 871 npf_config_sync(npf); 872 npf_config_exit(npf); 873 } 874 } 875 876 /* 877 * Garbage collect all expired connections. 878 * May need to wait for the references to drain. 879 */ 880 con = gclist; 881 while (con) { 882 npf_conn_t *next = con->c_next; 883 884 /* 885 * Destroy only if removed and no references. 886 * Otherwise, wait for a tiny moment. 887 */ 888 if (__predict_false(con->c_refcnt)) { 889 kpause("npfcongc", false, 1, NULL); 890 continue; 891 } 892 npf_conn_destroy(npf, con); 893 con = next; 894 } 895 } 896 897 /* 898 * npf_conn_worker: G/C to run from a worker thread. 899 */ 900 void 901 npf_conn_worker(npf_t *npf) 902 { 903 mutex_enter(&npf->conn_lock); 904 /* Note: the conn_lock will be released (sync == true). */ 905 npf_conn_gc(npf, npf->conn_db, false, true); 906 } 907 908 /* 909 * npf_conndb_export: construct a list of connections prepared for saving. 910 * Note: this is expected to be an expensive operation. 911 */ 912 int 913 npf_conndb_export(npf_t *npf, prop_array_t conlist) 914 { 915 npf_conn_t *con, *prev; 916 917 /* 918 * Note: acquire conn_lock to prevent from the database 919 * destruction and G/C thread. 920 */ 921 mutex_enter(&npf->conn_lock); 922 if (npf->conn_tracking != CONN_TRACKING_ON) { 923 mutex_exit(&npf->conn_lock); 924 return 0; 925 } 926 prev = NULL; 927 con = npf_conndb_getlist(npf->conn_db); 928 while (con) { 929 npf_conn_t *next = con->c_next; 930 prop_dictionary_t cdict; 931 932 if ((cdict = npf_conn_export(npf, con)) != NULL) { 933 prop_array_add(conlist, cdict); 934 prop_object_release(cdict); 935 } 936 prev = con; 937 con = next; 938 } 939 npf_conndb_settail(npf->conn_db, prev); 940 mutex_exit(&npf->conn_lock); 941 return 0; 942 } 943 944 static prop_dictionary_t 945 npf_connkey_export(const npf_connkey_t *key) 946 { 947 uint16_t id[2], alen, proto; 948 prop_dictionary_t kdict; 949 npf_addr_t ips[2]; 950 prop_data_t d; 951 952 kdict = prop_dictionary_create(); 953 connkey_getkey(key, &proto, ips, id, &alen); 954 955 prop_dictionary_set_uint16(kdict, "proto", proto); 956 957 prop_dictionary_set_uint16(kdict, "sport", id[NPF_SRC]); 958 prop_dictionary_set_uint16(kdict, "dport", id[NPF_DST]); 959 960 d = prop_data_create_data(&ips[NPF_SRC], alen); 961 prop_dictionary_set_and_rel(kdict, "saddr", d); 962 963 d = prop_data_create_data(&ips[NPF_DST], alen); 964 prop_dictionary_set_and_rel(kdict, "daddr", d); 965 966 return kdict; 967 } 968 969 /* 970 * npf_conn_export: serialise a single connection. 971 */ 972 prop_dictionary_t 973 npf_conn_export(npf_t *npf, const npf_conn_t *con) 974 { 975 prop_dictionary_t cdict, kdict; 976 prop_data_t d; 977 978 if ((con->c_flags & (CONN_ACTIVE|CONN_EXPIRE)) != CONN_ACTIVE) { 979 return NULL; 980 } 981 cdict = prop_dictionary_create(); 982 prop_dictionary_set_uint32(cdict, "flags", con->c_flags); 983 prop_dictionary_set_uint32(cdict, "proto", con->c_proto); 984 if (con->c_ifid) { 985 const char *ifname = npf_ifmap_getname(npf, con->c_ifid); 986 prop_dictionary_set_cstring(cdict, "ifname", ifname); 987 } 988 989 d = prop_data_create_data(&con->c_state, sizeof(npf_state_t)); 990 prop_dictionary_set_and_rel(cdict, "state", d); 991 992 kdict = npf_connkey_export(&con->c_forw_entry); 993 prop_dictionary_set_and_rel(cdict, "forw-key", kdict); 994 995 kdict = npf_connkey_export(&con->c_back_entry); 996 prop_dictionary_set_and_rel(cdict, "back-key", kdict); 997 998 if (con->c_nat) { 999 npf_nat_export(cdict, con->c_nat); 1000 } 1001 return cdict; 1002 } 1003 1004 static uint32_t 1005 npf_connkey_import(prop_dictionary_t kdict, npf_connkey_t *key) 1006 { 1007 prop_object_t sobj, dobj; 1008 npf_addr_t const * ips[2]; 1009 uint16_t alen, proto, id[2]; 1010 1011 if (!prop_dictionary_get_uint16(kdict, "proto", &proto)) 1012 return 0; 1013 1014 if (!prop_dictionary_get_uint16(kdict, "sport", &id[NPF_SRC])) 1015 return 0; 1016 1017 if (!prop_dictionary_get_uint16(kdict, "dport", &id[NPF_DST])) 1018 return 0; 1019 1020 sobj = prop_dictionary_get(kdict, "saddr"); 1021 if ((ips[NPF_SRC] = prop_data_data_nocopy(sobj)) == NULL) 1022 return 0; 1023 1024 dobj = prop_dictionary_get(kdict, "daddr"); 1025 if ((ips[NPF_DST] = prop_data_data_nocopy(dobj)) == NULL) 1026 return 0; 1027 1028 alen = prop_data_size(sobj); 1029 if (alen != prop_data_size(dobj)) 1030 return 0; 1031 1032 return connkey_setkey(key, proto, ips, id, alen, true); 1033 } 1034 1035 /* 1036 * npf_conn_import: fully reconstruct a single connection from a 1037 * directory and insert into the given database. 1038 */ 1039 int 1040 npf_conn_import(npf_t *npf, npf_conndb_t *cd, prop_dictionary_t cdict, 1041 npf_ruleset_t *natlist) 1042 { 1043 npf_conn_t *con; 1044 npf_connkey_t *fw, *bk; 1045 prop_object_t obj; 1046 const char *ifname; 1047 const void *d; 1048 1049 /* Allocate a connection and initialise it (clear first). */ 1050 con = pool_cache_get(npf->conn_cache, PR_WAITOK); 1051 memset(con, 0, sizeof(npf_conn_t)); 1052 mutex_init(&con->c_lock, MUTEX_DEFAULT, IPL_SOFTNET); 1053 npf_stats_inc(npf, NPF_STAT_CONN_CREATE); 1054 1055 prop_dictionary_get_uint32(cdict, "proto", &con->c_proto); 1056 prop_dictionary_get_uint32(cdict, "flags", &con->c_flags); 1057 con->c_flags &= PFIL_ALL | CONN_ACTIVE | CONN_PASS; 1058 conn_update_atime(con); 1059 1060 if (prop_dictionary_get_cstring_nocopy(cdict, "ifname", &ifname) && 1061 (con->c_ifid = npf_ifmap_register(npf, ifname)) == 0) { 1062 goto err; 1063 } 1064 1065 obj = prop_dictionary_get(cdict, "state"); 1066 if ((d = prop_data_data_nocopy(obj)) == NULL || 1067 prop_data_size(obj) != sizeof(npf_state_t)) { 1068 goto err; 1069 } 1070 memcpy(&con->c_state, d, sizeof(npf_state_t)); 1071 1072 /* Reconstruct NAT association, if any. */ 1073 if ((obj = prop_dictionary_get(cdict, "nat")) != NULL && 1074 (con->c_nat = npf_nat_import(npf, obj, natlist, con)) == NULL) { 1075 goto err; 1076 } 1077 1078 /* 1079 * Fetch and copy the keys for each direction. 1080 */ 1081 obj = prop_dictionary_get(cdict, "forw-key"); 1082 fw = &con->c_forw_entry; 1083 if (obj == NULL || !npf_connkey_import(obj, fw)) { 1084 goto err; 1085 } 1086 1087 obj = prop_dictionary_get(cdict, "back-key"); 1088 bk = &con->c_back_entry; 1089 if (obj == NULL || !npf_connkey_import(obj, bk)) { 1090 goto err; 1091 } 1092 1093 fw->ck_backptr = bk->ck_backptr = con; 1094 1095 /* Insert the entries and the connection itself. */ 1096 if (!npf_conndb_insert(cd, fw, con)) { 1097 goto err; 1098 } 1099 if (!npf_conndb_insert(cd, bk, con)) { 1100 npf_conndb_remove(cd, fw); 1101 goto err; 1102 } 1103 1104 NPF_PRINTF(("NPF: imported conn %p\n", con)); 1105 npf_conndb_enqueue(cd, con); 1106 return 0; 1107 err: 1108 npf_conn_destroy(npf, con); 1109 return EINVAL; 1110 } 1111 1112 int 1113 npf_conn_find(npf_t *npf, prop_dictionary_t idict, prop_dictionary_t *odict) 1114 { 1115 prop_dictionary_t kdict; 1116 npf_connkey_t key; 1117 npf_conn_t *con; 1118 uint16_t dir; 1119 bool forw; 1120 1121 if ((kdict = prop_dictionary_get(idict, "key")) == NULL) 1122 return EINVAL; 1123 1124 if (!npf_connkey_import(kdict, &key)) 1125 return EINVAL; 1126 1127 if (!prop_dictionary_get_uint16(idict, "direction", &dir)) 1128 return EINVAL; 1129 1130 con = npf_conndb_lookup(npf->conn_db, &key, &forw); 1131 if (con == NULL) { 1132 return ESRCH; 1133 } 1134 1135 if (!npf_conn_ok(con, dir, true)) { 1136 atomic_dec_uint(&con->c_refcnt); 1137 return ESRCH; 1138 } 1139 1140 *odict = npf_conn_export(npf, con); 1141 if (*odict == NULL) { 1142 atomic_dec_uint(&con->c_refcnt); 1143 return ENOSPC; 1144 } 1145 atomic_dec_uint(&con->c_refcnt); 1146 1147 return 0; 1148 } 1149 1150 #if defined(DDB) || defined(_NPF_TESTING) 1151 1152 void 1153 npf_conn_print(const npf_conn_t *con) 1154 { 1155 const u_int alen = NPF_CONN_GETALEN(&con->c_forw_entry); 1156 const uint32_t *fkey = con->c_forw_entry.ck_key; 1157 const uint32_t *bkey = con->c_back_entry.ck_key; 1158 const u_int proto = con->c_proto; 1159 struct timespec tspnow; 1160 const void *src, *dst; 1161 int etime; 1162 1163 getnanouptime(&tspnow); 1164 etime = npf_state_etime(&con->c_state, proto); 1165 1166 printf("%p:\n\tproto %d flags 0x%x tsdiff %ld etime %d\n", con, 1167 proto, con->c_flags, (long)(tspnow.tv_sec - con->c_atime), etime); 1168 1169 src = &fkey[2], dst = &fkey[2 + (alen >> 2)]; 1170 printf("\tforw %s:%d", npf_addr_dump(src, alen), ntohs(fkey[1] >> 16)); 1171 printf("-> %s:%d\n", npf_addr_dump(dst, alen), ntohs(fkey[1] & 0xffff)); 1172 1173 src = &bkey[2], dst = &bkey[2 + (alen >> 2)]; 1174 printf("\tback %s:%d", npf_addr_dump(src, alen), ntohs(bkey[1] >> 16)); 1175 printf("-> %s:%d\n", npf_addr_dump(dst, alen), ntohs(bkey[1] & 0xffff)); 1176 1177 npf_state_dump(&con->c_state); 1178 if (con->c_nat) { 1179 npf_nat_dump(con->c_nat); 1180 } 1181 } 1182 1183 #endif 1184